aboutsummaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2013-12-22 00:04:03 +0000
committerDimitry Andric <dim@FreeBSD.org>2013-12-22 00:04:03 +0000
commitf8af5cf600354830d4ccf59732403f0f073eccb9 (patch)
tree2ba0398b4c42ad4f55561327538044fd2c925a8b /lib/Target
parent59d6cff90eecf31cb3dd860c4e786674cfdd42eb (diff)
downloadsrc-f8af5cf600354830d4ccf59732403f0f073eccb9.tar.gz
src-f8af5cf600354830d4ccf59732403f0f073eccb9.zip
Vendor import of llvm release_34 branch r197841 (effectively, 3.4 RC3):vendor/llvm/llvm-release_34-r197841
Notes
Notes: svn path=/vendor/llvm/dist/; revision=259698 svn path=/vendor/llvm/llvm-release_34-r197841/; revision=259700; tag=vendor/llvm/llvm-release_34-r197841
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/AArch64/AArch64.td7
-rw-r--r--lib/Target/AArch64/AArch64AsmPrinter.cpp172
-rw-r--r--lib/Target/AArch64/AArch64AsmPrinter.h4
-rw-r--r--lib/Target/AArch64/AArch64BranchFixupPass.cpp2
-rw-r--r--lib/Target/AArch64/AArch64CallingConv.td9
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp35
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.h2
-rw-r--r--lib/Target/AArch64/AArch64ISelDAGToDAG.cpp1066
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp1814
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h147
-rw-r--r--lib/Target/AArch64/AArch64InstrFormats.td528
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp105
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.h4
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td87
-rw-r--r--lib/Target/AArch64/AArch64InstrNEON.td8671
-rw-r--r--lib/Target/AArch64/AArch64MCInstLower.cpp7
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.cpp7
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.h7
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.td176
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp28
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h20
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp1
-rw-r--r--lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp521
-rw-r--r--lib/Target/AArch64/CMakeLists.txt2
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp795
-rw-r--r--lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp131
-rw-r--r--lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h14
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp4
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp20
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp5
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h10
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp106
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp21
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h5
-rw-r--r--lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp2
-rw-r--r--lib/Target/AArch64/Utils/AArch64BaseInfo.cpp68
-rw-r--r--lib/Target/AArch64/Utils/AArch64BaseInfo.h65
-rw-r--r--lib/Target/AArch64/Utils/CMakeLists.txt2
-rw-r--r--lib/Target/ARM/A15SDOptimizer.cpp13
-rw-r--r--lib/Target/ARM/ARM.td94
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.cpp605
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.h8
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp470
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.h28
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp99
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.h27
-rw-r--r--lib/Target/ARM/ARMBuildAttrs.h71
-rw-r--r--lib/Target/ARM/ARMCallingConv.td26
-rw-r--r--lib/Target/ARM/ARMCodeEmitter.cpp6
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp3
-rw-r--r--lib/Target/ARM/ARMConstantPoolValue.cpp50
-rw-r--r--lib/Target/ARM/ARMConstantPoolValue.h33
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp75
-rw-r--r--lib/Target/ARM/ARMFPUName.def32
-rw-r--r--lib/Target/ARM/ARMFPUName.h26
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp375
-rw-r--r--lib/Target/ARM/ARMFeatures.h93
-rw-r--r--lib/Target/ARM/ARMFrameLowering.cpp156
-rw-r--r--lib/Target/ARM/ARMHazardRecognizer.cpp10
-rw-r--r--lib/Target/ARM/ARMHazardRecognizer.h13
-rw-r--r--lib/Target/ARM/ARMISelDAGToDAG.cpp566
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp1722
-rw-r--r--lib/Target/ARM/ARMISelLowering.h72
-rw-r--r--lib/Target/ARM/ARMInstrFormats.td290
-rw-r--r--lib/Target/ARM/ARMInstrInfo.cpp34
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td885
-rw-r--r--lib/Target/ARM/ARMInstrNEON.td451
-rw-r--r--lib/Target/ARM/ARMInstrThumb.td263
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td849
-rw-r--r--lib/Target/ARM/ARMInstrVFP.td348
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp115
-rw-r--r--lib/Target/ARM/ARMMCInstLower.cpp2
-rw-r--r--lib/Target/ARM/ARMMachineFunctionInfo.h78
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.cpp5
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.h6
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.td84
-rw-r--r--lib/Target/ARM/ARMSchedule.td18
-rw-r--r--lib/Target/ARM/ARMScheduleA9.td196
-rw-r--r--lib/Target/ARM/ARMScheduleSwift.td944
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.cpp4
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.h4
-rw-r--r--lib/Target/ARM/ARMSubtarget.cpp120
-rw-r--r--lib/Target/ARM/ARMSubtarget.h79
-rw-r--r--lib/Target/ARM/ARMTargetMachine.cpp15
-rw-r--r--lib/Target/ARM/ARMTargetObjectFile.cpp2
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp113
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp1429
-rw-r--r--lib/Target/ARM/CMakeLists.txt2
-rw-r--r--lib/Target/ARM/Disassembler/ARMDisassembler.cpp817
-rw-r--r--lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp284
-rw-r--r--lib/Target/ARM/InstPrinter/ARMInstPrinter.h4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h12
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp53
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h61
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp735
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFStreamer.h27
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp2
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp102
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp93
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h16
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp43
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp129
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp157
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h73
-rw-r--r--lib/Target/ARM/MCTargetDesc/CMakeLists.txt1
-rw-r--r--lib/Target/ARM/Thumb1FrameLowering.cpp36
-rw-r--r--lib/Target/ARM/Thumb1InstrInfo.cpp2
-rw-r--r--lib/Target/ARM/Thumb1RegisterInfo.cpp20
-rw-r--r--lib/Target/ARM/Thumb1RegisterInfo.h2
-rw-r--r--lib/Target/ARM/Thumb2ITBlockPass.cpp71
-rw-r--r--lib/Target/ARM/Thumb2InstrInfo.cpp25
-rw-r--r--lib/Target/ARM/Thumb2RegisterInfo.cpp6
-rw-r--r--lib/Target/ARM/Thumb2RegisterInfo.h6
-rw-r--r--lib/Target/CppBackend/CPPBackend.cpp27
-rw-r--r--lib/Target/Hexagon/CMakeLists.txt7
-rw-r--r--lib/Target/Hexagon/Hexagon.h6
-rw-r--r--lib/Target/Hexagon/Hexagon.td30
-rw-r--r--lib/Target/Hexagon/HexagonAsmPrinter.cpp4
-rw-r--r--lib/Target/Hexagon/HexagonCallingConvLower.cpp8
-rw-r--r--lib/Target/Hexagon/HexagonCallingConvLower.h5
-rw-r--r--lib/Target/Hexagon/HexagonCopyToCombine.cpp677
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp80
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp10
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp76
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp104
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.h17
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormats.td5
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp857
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h15
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.td105
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV4.td265
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV5.td23
-rw-r--r--lib/Target/Hexagon/HexagonMCInstLower.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonMachineFunctionInfo.cpp (renamed from lib/Target/MBlaze/MBlazeMachineFunction.cpp)8
-rw-r--r--lib/Target/Hexagon/HexagonMachineFunctionInfo.h6
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp21
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.h5
-rw-r--r--lib/Target/Hexagon/HexagonNewValueJump.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonPeephole.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.cpp26
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.h8
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.td4
-rw-r--r--lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonSelectionDAGInfo.h2
-rw-r--r--lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp174
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.h2
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp31
-rw-r--r--lib/Target/Hexagon/HexagonTargetObjectFile.cpp8
-rw-r--r--lib/Target/Hexagon/HexagonTargetObjectFile.h1
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp1833
-rw-r--r--lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp15
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h5
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp6
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h9
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp11
-rw-r--r--lib/Target/LLVMBuild.txt2
-rw-r--r--lib/Target/MBlaze/AsmParser/CMakeLists.txt8
-rw-r--r--lib/Target/MBlaze/AsmParser/LLVMBuild.txt23
-rw-r--r--lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp572
-rw-r--r--lib/Target/MBlaze/AsmParser/Makefile15
-rw-r--r--lib/Target/MBlaze/CMakeLists.txt37
-rw-r--r--lib/Target/MBlaze/Disassembler/CMakeLists.txt16
-rw-r--r--lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp719
-rw-r--r--lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h49
-rw-r--r--lib/Target/MBlaze/Disassembler/Makefile16
-rw-r--r--lib/Target/MBlaze/InstPrinter/CMakeLists.txt8
-rw-r--r--lib/Target/MBlaze/InstPrinter/LLVMBuild.txt23
-rw-r--r--lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp71
-rw-r--r--lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h43
-rw-r--r--lib/Target/MBlaze/InstPrinter/Makefile16
-rw-r--r--lib/Target/MBlaze/LLVMBuild.txt34
-rw-r--r--lib/Target/MBlaze/MBlaze.h32
-rw-r--r--lib/Target/MBlaze/MBlaze.td73
-rw-r--r--lib/Target/MBlaze/MBlazeAsmPrinter.cpp326
-rw-r--r--lib/Target/MBlaze/MBlazeCallingConv.td24
-rw-r--r--lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp254
-rw-r--r--lib/Target/MBlaze/MBlazeFrameLowering.cpp488
-rw-r--r--lib/Target/MBlaze/MBlazeFrameLowering.h56
-rw-r--r--lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp277
-rw-r--r--lib/Target/MBlaze/MBlazeISelLowering.cpp1154
-rw-r--r--lib/Target/MBlaze/MBlazeISelLowering.h179
-rw-r--r--lib/Target/MBlaze/MBlazeInstrFPU.td219
-rw-r--r--lib/Target/MBlaze/MBlazeInstrFSL.td229
-rw-r--r--lib/Target/MBlaze/MBlazeInstrFormats.td228
-rw-r--r--lib/Target/MBlaze/MBlazeInstrInfo.cpp297
-rw-r--r--lib/Target/MBlaze/MBlazeInstrInfo.h240
-rw-r--r--lib/Target/MBlaze/MBlazeInstrInfo.td1051
-rw-r--r--lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp112
-rw-r--r--lib/Target/MBlaze/MBlazeIntrinsicInfo.h33
-rw-r--r--lib/Target/MBlaze/MBlazeIntrinsics.td131
-rw-r--r--lib/Target/MBlaze/MBlazeMCInstLower.cpp167
-rw-r--r--lib/Target/MBlaze/MBlazeMCInstLower.h47
-rw-r--r--lib/Target/MBlaze/MBlazeMachineFunction.h169
-rw-r--r--lib/Target/MBlaze/MBlazeRegisterInfo.cpp145
-rw-r--r--lib/Target/MBlaze/MBlazeRegisterInfo.h71
-rw-r--r--lib/Target/MBlaze/MBlazeRegisterInfo.td148
-rw-r--r--lib/Target/MBlaze/MBlazeRelocations.h47
-rw-r--r--lib/Target/MBlaze/MBlazeSchedule.td50
-rw-r--r--lib/Target/MBlaze/MBlazeSchedule3.td236
-rw-r--r--lib/Target/MBlaze/MBlazeSchedule5.td267
-rw-r--r--lib/Target/MBlaze/MBlazeSelectionDAGInfo.h31
-rw-r--r--lib/Target/MBlaze/MBlazeSubtarget.cpp56
-rw-r--r--lib/Target/MBlaze/MBlazeSubtarget.h75
-rw-r--r--lib/Target/MBlaze/MBlazeTargetMachine.cpp81
-rw-r--r--lib/Target/MBlaze/MBlazeTargetMachine.h80
-rw-r--r--lib/Target/MBlaze/MBlazeTargetObjectFile.cpp90
-rw-r--r--lib/Target/MBlaze/MBlazeTargetObjectFile.h40
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/CMakeLists.txt9
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt23
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp171
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h237
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp77
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp26
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h30
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp222
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp141
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h56
-rw-r--r--lib/Target/MBlaze/Makefile23
-rw-r--r--lib/Target/MBlaze/TODO21
-rw-r--r--lib/Target/MBlaze/TargetInfo/CMakeLists.txt8
-rw-r--r--lib/Target/MBlaze/TargetInfo/LLVMBuild.txt23
-rw-r--r--lib/Target/MBlaze/TargetInfo/MBlazeTargetInfo.cpp19
-rw-r--r--lib/Target/MBlaze/TargetInfo/Makefile15
-rw-r--r--lib/Target/MSP430/CMakeLists.txt2
-rw-r--r--lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp5
-rw-r--r--lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h7
-rw-r--r--lib/Target/MSP430/MSP430AsmPrinter.cpp2
-rw-r--r--lib/Target/MSP430/MSP430CallingConv.td7
-rw-r--r--lib/Target/MSP430/MSP430FrameLowering.h4
-rw-r--r--lib/Target/MSP430/MSP430ISelDAGToDAG.cpp10
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp191
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.h13
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.cpp7
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.h1
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.td4
-rw-r--r--lib/Target/MSP430/MSP430MCInstLower.cpp2
-rw-r--r--lib/Target/MSP430/MSP430RegisterInfo.cpp6
-rw-r--r--lib/Target/MSP430/MSP430RegisterInfo.h3
-rw-r--r--lib/Target/MSP430/MSP430RegisterInfo.td2
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.cpp4
-rw-r--r--lib/Target/Mangler.cpp149
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp1572
-rw-r--r--lib/Target/Mips/CMakeLists.txt4
-rw-r--r--lib/Target/Mips/Disassembler/MipsDisassembler.cpp446
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp89
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.h9
-rw-r--r--lib/Target/Mips/MCTargetDesc/CMakeLists.txt3
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp43
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp81
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h28
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp39
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp89
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h43
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h39
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp3
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h7
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp210
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp49
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h16
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp67
-rw-r--r--lib/Target/Mips/MSA.txt78
-rw-r--r--lib/Target/Mips/MicroMipsInstrFormats.td196
-rw-r--r--lib/Target/Mips/MicroMipsInstrInfo.td228
-rw-r--r--lib/Target/Mips/Mips.h1
-rw-r--r--lib/Target/Mips/Mips.td3
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.cpp26
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.h2
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp517
-rw-r--r--lib/Target/Mips/Mips16HardFloat.h54
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.cpp21
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.h2
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.cpp239
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.h4
-rw-r--r--lib/Target/Mips/Mips16InstrFormats.td18
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.cpp203
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.h15
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.td188
-rw-r--r--lib/Target/Mips/Mips16RegisterInfo.cpp17
-rw-r--r--lib/Target/Mips/Mips16RegisterInfo.h4
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td440
-rw-r--r--lib/Target/Mips/MipsAnalyzeImmediate.cpp2
-rw-r--r--lib/Target/Mips/MipsAnalyzeImmediate.h10
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp141
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.h25
-rw-r--r--lib/Target/Mips/MipsCallingConv.td30
-rw-r--r--lib/Target/Mips/MipsCodeEmitter.cpp40
-rw-r--r--lib/Target/Mips/MipsCondMov.td210
-rw-r--r--lib/Target/Mips/MipsConstantIslandPass.cpp1470
-rw-r--r--lib/Target/Mips/MipsDSPInstrInfo.td468
-rw-r--r--lib/Target/Mips/MipsDelaySlotFiller.cpp38
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.cpp81
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.h38
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp824
-rw-r--r--lib/Target/Mips/MipsISelLowering.h184
-rw-r--r--lib/Target/Mips/MipsInstrFPU.td456
-rw-r--r--lib/Target/Mips/MipsInstrFormats.td143
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp43
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h16
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td983
-rw-r--r--lib/Target/Mips/MipsJITInfo.cpp6
-rw-r--r--lib/Target/Mips/MipsLongBranch.cpp25
-rw-r--r--lib/Target/Mips/MipsMCInstLower.cpp5
-rw-r--r--lib/Target/Mips/MipsMCInstLower.h4
-rw-r--r--lib/Target/Mips/MipsMSAInstrFormats.td406
-rw-r--r--lib/Target/Mips/MipsMSAInstrInfo.td3694
-rw-r--r--lib/Target/Mips/MipsMachineFunction.cpp72
-rw-r--r--lib/Target/Mips/MipsMachineFunction.h103
-rw-r--r--lib/Target/Mips/MipsOs16.cpp45
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp79
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.h8
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.td411
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.cpp184
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.h2
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.cpp443
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.h48
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.cpp2329
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.h48
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.cpp310
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.h40
-rw-r--r--lib/Target/Mips/MipsSERegisterInfo.cpp85
-rw-r--r--lib/Target/Mips/MipsSERegisterInfo.h5
-rw-r--r--lib/Target/Mips/MipsSchedule.td15
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp34
-rw-r--r--lib/Target/Mips/MipsSubtarget.h31
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp21
-rw-r--r--lib/Target/Mips/MipsTargetMachine.h6
-rw-r--r--lib/Target/Mips/MipsTargetStreamer.h44
-rw-r--r--lib/Target/NVPTX/CMakeLists.txt4
-rw-r--r--lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp290
-rw-r--r--lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h53
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h1
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp12
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h2
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp17
-rw-r--r--lib/Target/NVPTX/ManagedStringPool.h2
-rw-r--r--lib/Target/NVPTX/NVPTX.h55
-rw-r--r--lib/Target/NVPTX/NVPTX.td6
-rw-r--r--lib/Target/NVPTX/NVPTXAllocaHoisting.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp595
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.h27
-rw-r--r--lib/Target/NVPTX/NVPTXFrameLowering.cpp41
-rw-r--r--lib/Target/NVPTX/NVPTXGenericToNVVM.cpp4
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp1035
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.h31
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp1627
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.h39
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.cpp51
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.h1
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.td2114
-rw-r--r--lib/Target/NVPTX/NVPTXIntrinsics.td632
-rw-r--r--lib/Target/NVPTX/NVPTXMCExpr.cpp46
-rw-r--r--lib/Target/NVPTX/NVPTXMCExpr.h83
-rw-r--r--lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp225
-rw-r--r--lib/Target/NVPTX/NVPTXRegisterInfo.cpp13
-rw-r--r--lib/Target/NVPTX/NVPTXRegisterInfo.h2
-rw-r--r--lib/Target/NVPTX/NVPTXRegisterInfo.td26
-rw-r--r--lib/Target/NVPTX/NVPTXSection.h4
-rw-r--r--lib/Target/NVPTX/NVPTXSplitBBatBar.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXSubtarget.cpp20
-rw-r--r--lib/Target/NVPTX/NVPTXSubtarget.h2
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.cpp64
-rw-r--r--lib/Target/NVPTX/NVPTXTargetObjectFile.h44
-rw-r--r--lib/Target/NVPTX/NVVMReflect.cpp4
-rw-r--r--lib/Target/PowerPC/AsmParser/LLVMBuild.txt2
-rw-r--r--lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp738
-rw-r--r--lib/Target/PowerPC/CMakeLists.txt5
-rw-r--r--lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp189
-rw-r--r--lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h18
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp101
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp340
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h28
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp9
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h3
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp102
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp155
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h96
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp84
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h8
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp389
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp32
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h32
-rw-r--r--lib/Target/PowerPC/Makefile2
-rw-r--r--lib/Target/PowerPC/PPC.h24
-rw-r--r--lib/Target/PowerPC/PPC.td66
-rw-r--r--lib/Target/PowerPC/PPCAsmPrinter.cpp218
-rw-r--r--lib/Target/PowerPC/PPCCTRLoops.cpp1164
-rw-r--r--lib/Target/PowerPC/PPCCallingConv.td70
-rw-r--r--lib/Target/PowerPC/PPCCodeEmitter.cpp51
-rw-r--r--lib/Target/PowerPC/PPCFastISel.cpp2236
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp635
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.h10
-rw-r--r--lib/Target/PowerPC/PPCHazardRecognizers.cpp6
-rw-r--r--lib/Target/PowerPC/PPCHazardRecognizers.h4
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp115
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp1056
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h143
-rw-r--r--lib/Target/PowerPC/PPCInstr64Bit.td165
-rw-r--r--lib/Target/PowerPC/PPCInstrAltivec.td52
-rw-r--r--lib/Target/PowerPC/PPCInstrFormats.td71
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp65
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.h7
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.td805
-rw-r--r--lib/Target/PowerPC/PPCJITInfo.cpp40
-rw-r--r--lib/Target/PowerPC/PPCMCInstLower.cpp62
-rw-r--r--lib/Target/PowerPC/PPCMachineFunctionInfo.h9
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp264
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.h11
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.td20
-rw-r--r--lib/Target/PowerPC/PPCSchedule.td8
-rw-r--r--lib/Target/PowerPC/PPCScheduleA2.td841
-rw-r--r--lib/Target/PowerPC/PPCScheduleE500mc.td2
-rw-r--r--lib/Target/PowerPC/PPCScheduleE5500.td1
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.cpp154
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.h28
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.cpp16
-rw-r--r--lib/Target/PowerPC/PPCTargetObjectFile.cpp67
-rw-r--r--lib/Target/PowerPC/PPCTargetObjectFile.h35
-rw-r--r--lib/Target/PowerPC/PPCTargetStreamer.h23
-rw-r--r--lib/Target/PowerPC/PPCTargetTransformInfo.cpp9
-rw-r--r--lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp5
-rw-r--r--lib/Target/R600/AMDGPU.h66
-rw-r--r--lib/Target/R600/AMDGPU.td85
-rw-r--r--lib/Target/R600/AMDGPUAsmPrinter.cpp86
-rw-r--r--lib/Target/R600/AMDGPUAsmPrinter.h12
-rw-r--r--lib/Target/R600/AMDGPUCallingConv.td46
-rw-r--r--lib/Target/R600/AMDGPUFrameLowering.cpp23
-rw-r--r--lib/Target/R600/AMDGPUISelDAGToDAG.cpp585
-rw-r--r--lib/Target/R600/AMDGPUISelLowering.cpp445
-rw-r--r--lib/Target/R600/AMDGPUISelLowering.h56
-rw-r--r--lib/Target/R600/AMDGPUIndirectAddressing.cpp343
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.cpp140
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.h50
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.td22
-rw-r--r--lib/Target/R600/AMDGPUInstructions.td268
-rw-r--r--lib/Target/R600/AMDGPUIntrinsics.td2
-rw-r--r--lib/Target/R600/AMDGPUMCInstLower.cpp43
-rw-r--r--lib/Target/R600/AMDGPUMachineFunction.cpp9
-rw-r--r--lib/Target/R600/AMDGPUMachineFunction.h9
-rw-r--r--lib/Target/R600/AMDGPURegisterInfo.cpp38
-rw-r--r--lib/Target/R600/AMDGPURegisterInfo.h11
-rw-r--r--lib/Target/R600/AMDGPURegisterInfo.td3
-rw-r--r--lib/Target/R600/AMDGPUStructurizeCFG.cpp896
-rw-r--r--lib/Target/R600/AMDGPUSubtarget.cpp78
-rw-r--r--lib/Target/R600/AMDGPUSubtarget.h33
-rw-r--r--lib/Target/R600/AMDGPUTargetMachine.cpp81
-rw-r--r--lib/Target/R600/AMDGPUTargetMachine.h61
-rw-r--r--lib/Target/R600/AMDGPUTargetTransformInfo.cpp90
-rw-r--r--lib/Target/R600/AMDIL.h121
-rw-r--r--lib/Target/R600/AMDIL7XXDevice.cpp115
-rw-r--r--lib/Target/R600/AMDIL7XXDevice.h72
-rw-r--r--lib/Target/R600/AMDILBase.td64
-rw-r--r--lib/Target/R600/AMDILCFGStructurizer.cpp4133
-rw-r--r--lib/Target/R600/AMDILDevice.cpp132
-rw-r--r--lib/Target/R600/AMDILDevice.h117
-rw-r--r--lib/Target/R600/AMDILDeviceInfo.cpp97
-rw-r--r--lib/Target/R600/AMDILDeviceInfo.h88
-rw-r--r--lib/Target/R600/AMDILDevices.h19
-rw-r--r--lib/Target/R600/AMDILEvergreenDevice.cpp169
-rw-r--r--lib/Target/R600/AMDILEvergreenDevice.h93
-rw-r--r--lib/Target/R600/AMDILISelDAGToDAG.cpp666
-rw-r--r--lib/Target/R600/AMDILISelLowering.cpp61
-rw-r--r--lib/Target/R600/AMDILInstrInfo.td67
-rw-r--r--lib/Target/R600/AMDILIntrinsicInfo.cpp4
-rw-r--r--lib/Target/R600/AMDILNIDevice.cpp65
-rw-r--r--lib/Target/R600/AMDILNIDevice.h57
-rw-r--r--lib/Target/R600/AMDILSIDevice.cpp48
-rw-r--r--lib/Target/R600/AMDILSIDevice.h39
-rw-r--r--lib/Target/R600/CMakeLists.txt18
-rw-r--r--lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp137
-rw-r--r--lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h4
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp8
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp13
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h4
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp (renamed from lib/Target/MBlaze/MBlazeSelectionDAGInfo.cpp)16
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h1
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp2
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h4
-rw-r--r--lib/Target/R600/MCTargetDesc/CMakeLists.txt1
-rw-r--r--lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp102
-rw-r--r--lib/Target/R600/Processors.td48
-rw-r--r--lib/Target/R600/R600ClauseMergePass.cpp204
-rw-r--r--lib/Target/R600/R600ControlFlowFinalizer.cpp152
-rw-r--r--lib/Target/R600/R600Defines.h122
-rw-r--r--lib/Target/R600/R600EmitClauseMarkers.cpp164
-rw-r--r--lib/Target/R600/R600ExpandSpecialInstrs.cpp90
-rw-r--r--lib/Target/R600/R600ISelLowering.cpp1299
-rw-r--r--lib/Target/R600/R600ISelLowering.h14
-rw-r--r--lib/Target/R600/R600InstrFormats.td492
-rw-r--r--lib/Target/R600/R600InstrInfo.cpp825
-rw-r--r--lib/Target/R600/R600InstrInfo.h121
-rw-r--r--lib/Target/R600/R600Instructions.td1715
-rw-r--r--lib/Target/R600/R600Intrinsics.td44
-rw-r--r--lib/Target/R600/R600MachineFunctionInfo.cpp6
-rw-r--r--lib/Target/R600/R600MachineFunctionInfo.h3
-rw-r--r--lib/Target/R600/R600MachineScheduler.cpp300
-rw-r--r--lib/Target/R600/R600MachineScheduler.h44
-rw-r--r--lib/Target/R600/R600OptimizeVectorRegisters.cpp380
-rw-r--r--lib/Target/R600/R600Packetizer.cpp311
-rw-r--r--lib/Target/R600/R600RegisterInfo.cpp53
-rw-r--r--lib/Target/R600/R600RegisterInfo.h13
-rw-r--r--lib/Target/R600/R600RegisterInfo.td79
-rw-r--r--lib/Target/R600/R600Schedule.td6
-rw-r--r--lib/Target/R600/R600TextureIntrinsicsReplacer.cpp303
-rw-r--r--lib/Target/R600/SIAnnotateControlFlow.cpp16
-rw-r--r--lib/Target/R600/SIDefines.h16
-rw-r--r--lib/Target/R600/SIFixSGPRCopies.cpp263
-rw-r--r--lib/Target/R600/SIISelLowering.cpp756
-rw-r--r--lib/Target/R600/SIISelLowering.h31
-rw-r--r--lib/Target/R600/SIInsertWaits.cpp34
-rw-r--r--lib/Target/R600/SIInstrFormats.td116
-rw-r--r--lib/Target/R600/SIInstrInfo.cpp524
-rw-r--r--lib/Target/R600/SIInstrInfo.h80
-rw-r--r--lib/Target/R600/SIInstrInfo.td286
-rw-r--r--lib/Target/R600/SIInstructions.td798
-rw-r--r--lib/Target/R600/SIIntrinsics.td26
-rw-r--r--lib/Target/R600/SILowerControlFlow.cpp38
-rw-r--r--lib/Target/R600/SIMachineFunctionInfo.cpp4
-rw-r--r--lib/Target/R600/SIMachineFunctionInfo.h1
-rw-r--r--lib/Target/R600/SIRegisterInfo.cpp88
-rw-r--r--lib/Target/R600/SIRegisterInfo.h26
-rw-r--r--lib/Target/R600/SIRegisterInfo.td18
-rw-r--r--lib/Target/R600/SITypeRewriter.cpp162
-rw-r--r--lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp2
-rw-r--r--lib/Target/Sparc/CMakeLists.txt6
-rw-r--r--lib/Target/Sparc/DelaySlotFiller.cpp267
-rw-r--r--lib/Target/Sparc/FPMover.cpp141
-rw-r--r--lib/Target/Sparc/LLVMBuild.txt4
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h22
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp15
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h7
-rw-r--r--lib/Target/Sparc/Makefile3
-rw-r--r--lib/Target/Sparc/README.txt8
-rw-r--r--lib/Target/Sparc/Sparc.h24
-rw-r--r--lib/Target/Sparc/Sparc.td6
-rw-r--r--lib/Target/Sparc/SparcAsmPrinter.cpp112
-rw-r--r--lib/Target/Sparc/SparcCallingConv.td13
-rw-r--r--lib/Target/Sparc/SparcCodeEmitter.cpp245
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.cpp228
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.h20
-rw-r--r--lib/Target/Sparc/SparcISelDAGToDAG.cpp24
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp1169
-rw-r--r--lib/Target/Sparc/SparcISelLowering.h50
-rw-r--r--lib/Target/Sparc/SparcInstr64Bit.td103
-rw-r--r--lib/Target/Sparc/SparcInstrFormats.td92
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.cpp177
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.h15
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.td528
-rw-r--r--lib/Target/Sparc/SparcJITInfo.cpp165
-rw-r--r--lib/Target/Sparc/SparcJITInfo.h67
-rw-r--r--lib/Target/Sparc/SparcMachineFunctionInfo.h12
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.cpp166
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.h10
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.td124
-rw-r--r--lib/Target/Sparc/SparcRelocations.h41
-rw-r--r--lib/Target/Sparc/SparcSubtarget.cpp33
-rw-r--r--lib/Target/Sparc/SparcSubtarget.h16
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.cpp9
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.h6
-rw-r--r--lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp8
-rw-r--r--lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp475
-rw-r--r--lib/Target/SystemZ/CMakeLists.txt9
-rw-r--r--lib/Target/SystemZ/Disassembler/CMakeLists.txt7
-rw-r--r--lib/Target/SystemZ/Disassembler/LLVMBuild.txt (renamed from lib/Target/MBlaze/Disassembler/LLVMBuild.txt)10
-rw-r--r--lib/Target/SystemZ/Disassembler/Makefile (renamed from lib/Target/MBlaze/MCTargetDesc/Makefile)6
-rw-r--r--lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp323
-rw-r--r--lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp23
-rw-r--r--lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h3
-rw-r--r--lib/Target/SystemZ/LLVMBuild.txt3
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp49
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp4
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h7
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp114
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp81
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h38
-rw-r--r--lib/Target/SystemZ/Makefile3
-rw-r--r--lib/Target/SystemZ/README.txt65
-rw-r--r--lib/Target/SystemZ/SystemZ.h42
-rw-r--r--lib/Target/SystemZ/SystemZ.td7
-rw-r--r--lib/Target/SystemZ/SystemZAsmPrinter.cpp136
-rw-r--r--lib/Target/SystemZ/SystemZCallingConv.td4
-rw-r--r--lib/Target/SystemZ/SystemZConstantPoolValue.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZElimCompare.cpp471
-rw-r--r--lib/Target/SystemZ/SystemZFrameLowering.cpp108
-rw-r--r--lib/Target/SystemZ/SystemZFrameLowering.h27
-rw-r--r--lib/Target/SystemZ/SystemZISelDAGToDAG.cpp604
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp1523
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.h139
-rw-r--r--lib/Target/SystemZ/SystemZInstrFP.td320
-rw-r--r--lib/Target/SystemZ/SystemZInstrFormats.td1242
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.cpp907
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.h150
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.td1226
-rw-r--r--lib/Target/SystemZ/SystemZLongBranch.cpp462
-rw-r--r--lib/Target/SystemZ/SystemZMCInstLower.cpp116
-rw-r--r--lib/Target/SystemZ/SystemZMCInstLower.h15
-rw-r--r--lib/Target/SystemZ/SystemZMachineFunctionInfo.cpp (renamed from lib/Target/NVPTX/NVPTXNumRegisters.h)13
-rw-r--r--lib/Target/SystemZ/SystemZMachineFunctionInfo.h12
-rw-r--r--lib/Target/SystemZ/SystemZOperands.td153
-rw-r--r--lib/Target/SystemZ/SystemZOperators.td208
-rw-r--r--lib/Target/SystemZ/SystemZPatterns.td99
-rw-r--r--lib/Target/SystemZ/SystemZProcessors.td46
-rw-r--r--lib/Target/SystemZ/SystemZRegisterInfo.cpp37
-rw-r--r--lib/Target/SystemZ/SystemZRegisterInfo.h16
-rw-r--r--lib/Target/SystemZ/SystemZRegisterInfo.td71
-rw-r--r--lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp293
-rw-r--r--lib/Target/SystemZ/SystemZSelectionDAGInfo.h80
-rw-r--r--lib/Target/SystemZ/SystemZShortenInst.cpp163
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.cpp15
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.h22
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.cpp50
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.h4
-rw-r--r--lib/Target/Target.cpp8
-rw-r--r--lib/Target/TargetLibraryInfo.cpp44
-rw-r--r--lib/Target/TargetLoweringObjectFile.cpp20
-rw-r--r--lib/Target/TargetMachine.cpp6
-rw-r--r--lib/Target/TargetMachineC.cpp56
-rw-r--r--lib/Target/TargetSubtargetInfo.cpp19
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp420
-rw-r--r--lib/Target/X86/CMakeLists.txt2
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.cpp145
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.c267
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.h106
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h165
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp37
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.h22
-rw-r--r--lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp32
-rw-r--r--lib/Target/X86/InstPrinter/X86IntelInstPrinter.h52
-rw-r--r--lib/Target/X86/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp401
-rw-r--r--lib/Target/X86/MCTargetDesc/X86BaseInfo.h82
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp22
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp135
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp6
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h3
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp335
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp35
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h13
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp116
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp177
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp2
-rw-r--r--lib/Target/X86/README-SSE.txt31
-rw-r--r--lib/Target/X86/X86.td71
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp86
-rw-r--r--lib/Target/X86/X86AsmPrinter.h20
-rw-r--r--lib/Target/X86/X86CallingConv.h35
-rw-r--r--lib/Target/X86/X86CallingConv.td102
-rw-r--r--lib/Target/X86/X86CodeEmitter.cpp47
-rw-r--r--lib/Target/X86/X86FastISel.cpp488
-rw-r--r--lib/Target/X86/X86FixupLEAs.cpp11
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp18
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp337
-rw-r--r--lib/Target/X86/X86FrameLowering.h27
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp226
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp3138
-rw-r--r--lib/Target/X86/X86ISelLowering.h136
-rw-r--r--lib/Target/X86/X86InstrAVX512.td3526
-rw-r--r--lib/Target/X86/X86InstrArithmetic.td250
-rw-r--r--lib/Target/X86/X86InstrCompiler.td130
-rw-r--r--lib/Target/X86/X86InstrControl.td5
-rw-r--r--lib/Target/X86/X86InstrExtension.td68
-rw-r--r--lib/Target/X86/X86InstrFMA.td77
-rw-r--r--lib/Target/X86/X86InstrFPStack.td30
-rw-r--r--lib/Target/X86/X86InstrFormats.td181
-rw-r--r--lib/Target/X86/X86InstrFragmentsSIMD.td141
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp1000
-rw-r--r--lib/Target/X86/X86InstrInfo.h26
-rw-r--r--lib/Target/X86/X86InstrInfo.td654
-rw-r--r--lib/Target/X86/X86InstrMMX.td93
-rw-r--r--lib/Target/X86/X86InstrSSE.td1833
-rw-r--r--lib/Target/X86/X86InstrSVM.td18
-rw-r--r--lib/Target/X86/X86InstrShiftRotate.td136
-rw-r--r--lib/Target/X86/X86InstrSystem.td76
-rw-r--r--lib/Target/X86/X86InstrTSX.td7
-rw-r--r--lib/Target/X86/X86InstrXOP.td146
-rw-r--r--lib/Target/X86/X86JITInfo.cpp3
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp270
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp89
-rw-r--r--lib/Target/X86/X86RegisterInfo.h10
-rw-r--r--lib/Target/X86/X86RegisterInfo.td92
-rw-r--r--lib/Target/X86/X86SchedHaswell.td14
-rw-r--r--lib/Target/X86/X86SchedSandyBridge.td13
-rw-r--r--lib/Target/X86/X86Schedule.td73
-rw-r--r--lib/Target/X86/X86ScheduleAtom.td41
-rw-r--r--lib/Target/X86/X86ScheduleSLM.td668
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp6
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.h4
-rw-r--r--lib/Target/X86/X86Subtarget.cpp60
-rw-r--r--lib/Target/X86/X86Subtarget.h51
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp10
-rw-r--r--lib/Target/X86/X86TargetObjectFile.cpp10
-rw-r--r--lib/Target/X86/X86TargetObjectFile.h3
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp234
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp45
-rw-r--r--lib/Target/XCore/CMakeLists.txt3
-rw-r--r--lib/Target/XCore/Disassembler/XCoreDisassembler.cpp8
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp10
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h6
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp10
-rw-r--r--lib/Target/XCore/README.txt1
-rw-r--r--lib/Target/XCore/XCore.h2
-rw-r--r--lib/Target/XCore/XCoreAsmPrinter.cpp80
-rw-r--r--lib/Target/XCore/XCoreFrameLowering.cpp107
-rw-r--r--lib/Target/XCore/XCoreISelDAGToDAG.cpp14
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp200
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h24
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.cpp17
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.h7
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.td26
-rw-r--r--lib/Target/XCore/XCoreLowerThreadLocal.cpp114
-rw-r--r--lib/Target/XCore/XCoreMCInstLower.cpp2
-rw-r--r--lib/Target/XCore/XCoreRegisterInfo.cpp6
-rw-r--r--lib/Target/XCore/XCoreRegisterInfo.h4
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.cpp9
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.h2
-rw-r--r--lib/Target/XCore/XCoreTargetTransformInfo.cpp83
718 files changed, 89996 insertions, 42671 deletions
diff --git a/lib/Target/AArch64/AArch64.td b/lib/Target/AArch64/AArch64.td
index e17052b4a565..9c2c69a65935 100644
--- a/lib/Target/AArch64/AArch64.td
+++ b/lib/Target/AArch64/AArch64.td
@@ -21,8 +21,11 @@ include "llvm/Target/Target.td"
// AArch64 Subtarget features.
//
+def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8", "true",
+ "Enable ARMv8 FP">;
+
def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true",
- "Enable Advanced SIMD instructions">;
+ "Enable Advanced SIMD instructions", [FeatureFPARMv8]>;
def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
"Enable cryptographic instructions">;
@@ -33,7 +36,7 @@ def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
include "AArch64Schedule.td"
-def : Processor<"generic", GenericItineraries, [FeatureNEON, FeatureCrypto]>;
+def : Processor<"generic", GenericItineraries, [FeatureFPARMv8]>;
//===----------------------------------------------------------------------===//
// Register File Description
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.cpp b/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 47ebb826e0d0..d59ca56ba998 100644
--- a/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -27,32 +27,23 @@
using namespace llvm;
-MachineLocation
-AArch64AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
- // See emitFrameIndexDebugValue in InstrInfo for where this instruction is
- // expected to be created.
- assert(MI->getNumOperands() == 4 && MI->getOperand(0).isReg()
- && MI->getOperand(1).isImm() && "unexpected custom DBG_VALUE");
- return MachineLocation(MI->getOperand(0).getReg(),
- MI->getOperand(1).getImm());
-}
-
/// Try to print a floating-point register as if it belonged to a specified
/// register-class. For example the inline asm operand modifier "b" requires its
/// argument to be printed as "bN".
static bool printModifiedFPRAsmOperand(const MachineOperand &MO,
const TargetRegisterInfo *TRI,
- const TargetRegisterClass &RegClass,
- raw_ostream &O) {
+ char RegType, raw_ostream &O) {
if (!MO.isReg())
return true;
for (MCRegAliasIterator AR(MO.getReg(), TRI, true); AR.isValid(); ++AR) {
- if (RegClass.contains(*AR)) {
- O << AArch64InstPrinter::getRegisterName(*AR);
+ if (AArch64::FPR8RegClass.contains(*AR)) {
+ O << RegType << TRI->getEncodingValue(MO.getReg());
return false;
}
}
+
+ // The register doesn't correspond to anything floating-point like.
return true;
}
@@ -91,9 +82,9 @@ bool AArch64AsmPrinter::printSymbolicAddress(const MachineOperand &MO,
StringRef Modifier;
switch (MO.getType()) {
default:
- llvm_unreachable("Unexpected operand for symbolic address constraint");
+ return true;
case MachineOperand::MO_GlobalAddress:
- Name = Mang->getSymbol(MO.getGlobal())->getName();
+ Name = getSymbol(MO.getGlobal())->getName();
// Global variables may be accessed either via a GOT or in various fun and
// interesting TLS-model specific ways. Set the prefix modifier as
@@ -155,57 +146,29 @@ bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant,
const char *ExtraCode, raw_ostream &O) {
const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
- if (!ExtraCode || !ExtraCode[0]) {
- // There's actually no operand modifier, which leads to a slightly eclectic
- // set of behaviour which we have to handle here.
- const MachineOperand &MO = MI->getOperand(OpNum);
- switch (MO.getType()) {
- default:
- llvm_unreachable("Unexpected operand for inline assembly");
- case MachineOperand::MO_Register:
- // GCC prints the unmodified operand of a 'w' constraint as the vector
- // register. Technically, we could allocate the argument as a VPR128, but
- // that leads to extremely dodgy copies being generated to get the data
- // there.
- if (printModifiedFPRAsmOperand(MO, TRI, AArch64::VPR128RegClass, O))
- O << AArch64InstPrinter::getRegisterName(MO.getReg());
- break;
- case MachineOperand::MO_Immediate:
- O << '#' << MO.getImm();
- break;
- case MachineOperand::MO_FPImmediate:
- assert(MO.getFPImm()->isExactlyValue(0.0) && "Only FP 0.0 expected");
- O << "#0.0";
- break;
- case MachineOperand::MO_BlockAddress:
- case MachineOperand::MO_ConstantPoolIndex:
- case MachineOperand::MO_GlobalAddress:
- case MachineOperand::MO_ExternalSymbol:
- return printSymbolicAddress(MO, false, "", O);
- }
- return false;
- }
- // We have a real modifier to handle.
+ if (!ExtraCode)
+ ExtraCode = "";
+
switch(ExtraCode[0]) {
default:
- // See if this is a generic operand
- return AsmPrinter::PrintAsmOperand(MI, OpNum, AsmVariant, ExtraCode, O);
- case 'c': // Don't print "#" before an immediate operand.
- if (!MI->getOperand(OpNum).isImm())
- return true;
- O << MI->getOperand(OpNum).getImm();
- return false;
+ if (!AsmPrinter::PrintAsmOperand(MI, OpNum, AsmVariant, ExtraCode, O))
+ return false;
+ break;
case 'w':
// Output 32-bit general register operand, constant zero as wzr, or stack
// pointer as wsp. Ignored when used with other operand types.
- return printModifiedGPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::GPR32RegClass, O);
+ if (!printModifiedGPRAsmOperand(MI->getOperand(OpNum), TRI,
+ AArch64::GPR32RegClass, O))
+ return false;
+ break;
case 'x':
// Output 64-bit general register operand, constant zero as xzr, or stack
// pointer as sp. Ignored when used with other operand types.
- return printModifiedGPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::GPR64RegClass, O);
+ if (!printModifiedGPRAsmOperand(MI->getOperand(OpNum), TRI,
+ AArch64::GPR64RegClass, O))
+ return false;
+ break;
case 'H':
// Output higher numbered of a 64-bit general register pair
case 'Q':
@@ -221,40 +184,65 @@ bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
// copies ...).
llvm_unreachable("FIXME: Unimplemented register pairs");
case 'b':
- // Output 8-bit FP/SIMD scalar register operand, prefixed with b.
- return printModifiedFPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::FPR8RegClass, O);
case 'h':
- // Output 16-bit FP/SIMD scalar register operand, prefixed with h.
- return printModifiedFPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::FPR16RegClass, O);
case 's':
- // Output 32-bit FP/SIMD scalar register operand, prefixed with s.
- return printModifiedFPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::FPR32RegClass, O);
case 'd':
- // Output 64-bit FP/SIMD scalar register operand, prefixed with d.
- return printModifiedFPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::FPR64RegClass, O);
case 'q':
- // Output 128-bit FP/SIMD scalar register operand, prefixed with q.
- return printModifiedFPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::FPR128RegClass, O);
+ if (!printModifiedFPRAsmOperand(MI->getOperand(OpNum), TRI,
+ ExtraCode[0], O))
+ return false;
+ break;
case 'A':
// Output symbolic address with appropriate relocation modifier (also
// suitable for ADRP).
- return printSymbolicAddress(MI->getOperand(OpNum), false, "", O);
+ if (!printSymbolicAddress(MI->getOperand(OpNum), false, "", O))
+ return false;
+ break;
case 'L':
// Output bits 11:0 of symbolic address with appropriate :lo12: relocation
// modifier.
- return printSymbolicAddress(MI->getOperand(OpNum), true, "lo12", O);
+ if (!printSymbolicAddress(MI->getOperand(OpNum), true, "lo12", O))
+ return false;
+ break;
case 'G':
// Output bits 23:12 of symbolic address with appropriate :hi12: relocation
// modifier (currently only for TLS local exec).
- return printSymbolicAddress(MI->getOperand(OpNum), true, "hi12", O);
+ if (!printSymbolicAddress(MI->getOperand(OpNum), true, "hi12", O))
+ return false;
+ break;
+ case 'a':
+ return PrintAsmMemoryOperand(MI, OpNum, AsmVariant, ExtraCode, O);
}
+ // There's actually no operand modifier, which leads to a slightly eclectic
+ // set of behaviour which we have to handle here.
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ switch (MO.getType()) {
+ default:
+ llvm_unreachable("Unexpected operand for inline assembly");
+ case MachineOperand::MO_Register:
+ // GCC prints the unmodified operand of a 'w' constraint as the vector
+ // register. Technically, we could allocate the argument as a VPR128, but
+ // that leads to extremely dodgy copies being generated to get the data
+ // there.
+ if (printModifiedFPRAsmOperand(MO, TRI, 'v', O))
+ O << AArch64InstPrinter::getRegisterName(MO.getReg());
+ break;
+ case MachineOperand::MO_Immediate:
+ O << '#' << MO.getImm();
+ break;
+ case MachineOperand::MO_FPImmediate:
+ assert(MO.getFPImm()->isExactlyValue(0.0) && "Only FP 0.0 expected");
+ O << "#0.0";
+ break;
+ case MachineOperand::MO_BlockAddress:
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ExternalSymbol:
+ return printSymbolicAddress(MO, false, "", O);
+ }
+ return false;
}
bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
@@ -271,24 +259,6 @@ bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
return false;
}
-void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
- raw_ostream &OS) {
- unsigned NOps = MI->getNumOperands();
- assert(NOps==4);
- OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
- OS << V.getName();
- OS << " <- ";
- // Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
- OS << '[' << AArch64InstPrinter::getRegisterName(MI->getOperand(0).getReg());
- OS << '+' << MI->getOperand(1).getImm();
- OS << ']';
- OS << "+" << MI->getOperand(NOps - 2).getImm();
-}
-
-
#include "AArch64GenMCPseudoLowering.inc"
void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
@@ -296,18 +266,6 @@ void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (emitPseudoExpansionLowering(OutStreamer, MI))
return;
- switch (MI->getOpcode()) {
- case AArch64::DBG_VALUE: {
- if (isVerbose() && OutStreamer.hasRawTextSupport()) {
- SmallString<128> TmpStr;
- raw_svector_ostream OS(TmpStr);
- PrintDebugValueComment(MI, OS);
- OutStreamer.EmitRawText(StringRef(OS.str()));
- }
- return;
- }
- }
-
MCInst TmpInst;
LowerAArch64MachineInstrToMCInst(MI, TmpInst, *this);
OutStreamer.EmitInstruction(TmpInst);
@@ -329,7 +287,7 @@ void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
- TD->getPointerSize(0), 0);
+ TD->getPointerSize(0));
}
Stubs.clear();
}
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.h b/lib/Target/AArch64/AArch64AsmPrinter.h
index af0c9fed066f..824f0036bc5b 100644
--- a/lib/Target/AArch64/AArch64AsmPrinter.h
+++ b/lib/Target/AArch64/AArch64AsmPrinter.h
@@ -55,8 +55,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64AsmPrinter : public AsmPrinter {
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O);
- void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
-
/// printSymbolicAddress - Given some kind of reasonably bare symbolic
/// reference, print out the appropriate asm string to represent it. If
/// appropriate, a relocation-specifier will be produced, composed of a
@@ -67,8 +65,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64AsmPrinter : public AsmPrinter {
bool PrintImmediatePrefix,
StringRef Suffix, raw_ostream &O);
- MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
-
virtual const char *getPassName() const {
return "AArch64 Assembly Printer";
}
diff --git a/lib/Target/AArch64/AArch64BranchFixupPass.cpp b/lib/Target/AArch64/AArch64BranchFixupPass.cpp
index 71233ba5c3dc..11e7f41a3964 100644
--- a/lib/Target/AArch64/AArch64BranchFixupPass.cpp
+++ b/lib/Target/AArch64/AArch64BranchFixupPass.cpp
@@ -87,7 +87,7 @@ namespace {
// If the block size isn't a multiple of the known bits, assume the
// worst case padding.
if (Size & ((1u << Bits) - 1))
- Bits = CountTrailingZeros_32(Size);
+ Bits = countTrailingZeros(Size);
return Bits;
}
diff --git a/lib/Target/AArch64/AArch64CallingConv.td b/lib/Target/AArch64/AArch64CallingConv.td
index b880d8373deb..a2a9f3f67455 100644
--- a/lib/Target/AArch64/AArch64CallingConv.td
+++ b/lib/Target/AArch64/AArch64CallingConv.td
@@ -59,9 +59,9 @@ def CC_A64_APCS : CallingConv<[
// Canonicalise the various types that live in different floating-point
// registers. This makes sense because the PCS does not distinguish Short
// Vectors and Floating-point types.
- CCIfType<[v2i8], CCBitConvertToType<f16>>,
- CCIfType<[v4i8, v2i16], CCBitConvertToType<f32>>,
- CCIfType<[v8i8, v4i16, v2i32, v2f32], CCBitConvertToType<f64>>,
+ CCIfType<[v1i16, v2i8], CCBitConvertToType<f16>>,
+ CCIfType<[v1i32, v4i8, v2i16, v1f32], CCBitConvertToType<f32>>,
+ CCIfType<[v8i8, v4i16, v2i32, v2f32, v1i64, v1f64], CCBitConvertToType<f64>>,
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCBitConvertToType<f128>>,
@@ -70,7 +70,8 @@ def CC_A64_APCS : CallingConv<[
// argument is allocated to the least significant bits of register
// v[NSRN]. The NSRN is incremented by one. The argument has now been
// allocated."
- CCIfType<[f16], CCAssignToReg<[B0, B1, B2, B3, B4, B5, B6, B7]>>,
+ CCIfType<[v1i8], CCAssignToReg<[B0, B1, B2, B3, B4, B5, B6, B7]>>,
+ CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
CCIfType<[f128], CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index daa7f1d5ef49..731823017c13 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -54,7 +54,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
bool NeedsFrameMoves = MMI.hasDebugInfo()
|| MF.getFunction()->needsUnwindTableEntry();
@@ -97,8 +97,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
.addSym(SPLabel);
MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(AArch64::XSP, NumInitialBytes);
- Moves.push_back(MachineMove(SPLabel, Dst, Src));
+ unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true);
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfa(SPLabel, Reg, -NumInitialBytes));
}
// Otherwise we need to set the frame pointer and/or add a second stack
@@ -131,9 +132,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *FPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
.addSym(FPLabel);
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(AArch64::X29, -MFI->getObjectOffset(X29FrameIdx));
- Moves.push_back(MachineMove(FPLabel, Dst, Src));
+ unsigned Reg = MRI->getDwarfRegNum(AArch64::X29, true);
+ unsigned Offset = MFI->getObjectOffset(X29FrameIdx);
+ MMI.addFrameInst(MCCFIInstruction::createDefCfa(FPLabel, Reg, Offset));
}
FPNeedsSetting = false;
@@ -164,8 +165,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
.addSym(CSLabel);
MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(AArch64::XSP, NumResidualBytes + NumInitialBytes);
- Moves.push_back(MachineMove(CSLabel, Dst, Src));
+ unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true);
+ unsigned Offset = NumResidualBytes + NumInitialBytes;
+ MMI.addFrameInst(MCCFIInstruction::createDefCfa(CSLabel, Reg, -Offset));
}
// And any callee-saved registers (it's fine to leave them to the end here,
@@ -180,10 +182,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I) {
- MachineLocation Dst(MachineLocation::VirtualFP,
- MFI->getObjectOffset(I->getFrameIdx()));
- MachineLocation Src(I->getReg());
- Moves.push_back(MachineMove(CSLabel, Dst, Src));
+ unsigned Offset = MFI->getObjectOffset(I->getFrameIdx());
+ unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, Reg, Offset));
}
}
}
@@ -424,7 +425,7 @@ AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI,
- LoadStoreMethod PossClasses[],
+ const LoadStoreMethod PossClasses[],
unsigned NumClasses) const {
DebugLoc DL = MBB.findDebugLoc(MBBI);
MachineFunction &MF = *MBB.getParent();
@@ -527,11 +528,11 @@ AArch64FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
if (CSI.empty())
return false;
- static LoadStoreMethod PossibleClasses[] = {
+ static const LoadStoreMethod PossibleClasses[] = {
{&AArch64::GPR64RegClass, AArch64::LSPair64_STR, AArch64::LS64_STR},
{&AArch64::FPR64RegClass, AArch64::LSFPPair64_STR, AArch64::LSFP64_STR},
};
- unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
+ const unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
emitFrameMemOps(/* isPrologue = */ true, MBB, MBBI, CSI, TRI,
PossibleClasses, NumClasses);
@@ -548,11 +549,11 @@ AArch64FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (CSI.empty())
return false;
- static LoadStoreMethod PossibleClasses[] = {
+ static const LoadStoreMethod PossibleClasses[] = {
{&AArch64::GPR64RegClass, AArch64::LSPair64_LDR, AArch64::LS64_LDR},
{&AArch64::FPR64RegClass, AArch64::LSFPPair64_LDR, AArch64::LSFP64_LDR},
};
- unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
+ const unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
emitFrameMemOps(/* isPrologue = */ false, MBB, MBBI, CSI, TRI,
PossibleClasses, NumClasses);
diff --git a/lib/Target/AArch64/AArch64FrameLowering.h b/lib/Target/AArch64/AArch64FrameLowering.h
index 45ea0ec8e071..032dd90fa0e6 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/lib/Target/AArch64/AArch64FrameLowering.h
@@ -90,7 +90,7 @@ public:
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI,
- LoadStoreMethod PossibleClasses[],
+ const LoadStoreMethod PossibleClasses[],
unsigned NumClasses) const;
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 102c71b5d989..ef99541c1700 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -33,7 +33,6 @@ namespace {
class AArch64DAGToDAGISel : public SelectionDAGISel {
AArch64TargetMachine &TM;
- const AArch64InstrInfo *TII;
/// Keep a pointer to the AArch64Subtarget around so that we can
/// make the right decision when generating code for different targets.
@@ -43,7 +42,6 @@ public:
explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel), TM(tm),
- TII(static_cast<const AArch64InstrInfo*>(TM.getInstrInfo())),
Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
}
@@ -72,10 +70,11 @@ public:
/// Used for pre-lowered address-reference nodes, so we already know
/// the fields match. This operand's job is simply to add an
- /// appropriate shift operand (i.e. 0) to the MOVZ/MOVK instruction.
+ /// appropriate shift operand to the MOVZ/MOVK instruction.
+ template<unsigned LogShift>
bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
Imm = N;
- Shift = CurDAG->getTargetConstant(0, MVT::i32);
+ Shift = CurDAG->getTargetConstant(LogShift, MVT::i32);
return true;
}
@@ -102,7 +101,7 @@ public:
/// Put the given constant into a pool and return a DAG which will give its
/// address.
- SDValue getConstantPoolItemAddress(DebugLoc DL, const Constant *CV);
+ SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV);
SDNode *TrySelectToMoveImm(SDNode *N);
SDNode *LowerToFPLitPool(SDNode *Node);
@@ -110,6 +109,45 @@ public:
SDNode* Select(SDNode*);
private:
+ /// Get the opcode for table lookup instruction
+ unsigned getTBLOpc(bool IsExt, bool Is64Bit, unsigned NumOfVec);
+
+ /// Select NEON table lookup intrinsics. NumVecs should be 1, 2, 3 or 4.
+ /// IsExt is to indicate if the result will be extended with an argument.
+ SDNode *SelectVTBL(SDNode *N, unsigned NumVecs, bool IsExt);
+
+ /// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4.
+ SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *Opcode);
+
+ /// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4.
+ SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *Opcodes);
+
+ /// Form sequences of consecutive 64/128-bit registers for use in NEON
+ /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
+ /// between 1 and 4 elements. If it contains a single element that is returned
+ /// unchanged; otherwise a REG_SEQUENCE value is returned.
+ SDValue createDTuple(ArrayRef<SDValue> Vecs);
+ SDValue createQTuple(ArrayRef<SDValue> Vecs);
+
+ /// Generic helper for the createDTuple/createQTuple
+ /// functions. Those should almost always be called instead.
+ SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
+ unsigned SubRegs[]);
+
+ /// Select NEON load-duplicate intrinsics. NumVecs should be 2, 3 or 4.
+ /// The opcode array specifies the instructions used for load.
+ SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *Opcodes);
+
+ /// Select NEON load/store lane intrinsics. NumVecs should be 2, 3 or 4.
+ /// The opcode arrays specify the instructions used for load/store.
+ SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
+ unsigned NumVecs, const uint16_t *Opcodes);
+
+ SDValue getTargetSubregToReg(int SRIdx, SDLoc DL, EVT VT, EVT VTD,
+ SDValue Operand);
};
}
@@ -191,7 +229,7 @@ bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
SDNode *ResNode;
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
EVT DestType = Node->getValueType(0);
unsigned DestWidth = DestType.getSizeInBits();
@@ -241,14 +279,14 @@ SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
}
SDValue
-AArch64DAGToDAGISel::getConstantPoolItemAddress(DebugLoc DL,
+AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL,
const Constant *CV) {
- EVT PtrVT = TLI.getPointerTy();
+ EVT PtrVT = getTargetLowering()->getPointerTy();
- switch (TLI.getTargetMachine().getCodeModel()) {
+ switch (getTargetLowering()->getTargetMachine().getCodeModel()) {
case CodeModel::Small: {
unsigned Alignment =
- TLI.getDataLayout()->getABITypeAlignment(CV->getType());
+ getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
return CurDAG->getNode(
AArch64ISD::WrapperSmall, DL, PtrVT,
CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
@@ -260,15 +298,15 @@ AArch64DAGToDAGISel::getConstantPoolItemAddress(DebugLoc DL,
LitAddr = CurDAG->getMachineNode(
AArch64::MOVZxii, DL, PtrVT,
CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
- CurDAG->getTargetConstant(0, MVT::i32));
+ CurDAG->getTargetConstant(3, MVT::i32));
LitAddr = CurDAG->getMachineNode(
AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
- CurDAG->getTargetConstant(0, MVT::i32));
+ CurDAG->getTargetConstant(2, MVT::i32));
LitAddr = CurDAG->getMachineNode(
AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
- CurDAG->getTargetConstant(0, MVT::i32));
+ CurDAG->getTargetConstant(1, MVT::i32));
LitAddr = CurDAG->getMachineNode(
AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC),
@@ -281,7 +319,7 @@ AArch64DAGToDAGISel::getConstantPoolItemAddress(DebugLoc DL,
}
SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
EVT DestType = Node->getValueType(0);
@@ -312,7 +350,8 @@ SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
MemType.getSizeInBits()),
UnsignedVal);
SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
- unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(CV->getType());
+ unsigned Alignment =
+ getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
PoolAddr,
@@ -323,11 +362,12 @@ SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
}
SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
EVT DestType = Node->getValueType(0);
- unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(FV->getType());
+ unsigned Alignment =
+ getTargetLowering()->getDataLayout()->getABITypeAlignment(FV->getType());
SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
@@ -389,12 +429,607 @@ SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
&Ops[0], Ops.size());
}
+SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
+ static unsigned RegClassIDs[] = { AArch64::DPairRegClassID,
+ AArch64::DTripleRegClassID,
+ AArch64::DQuadRegClassID };
+ static unsigned SubRegs[] = { AArch64::dsub_0, AArch64::dsub_1,
+ AArch64::dsub_2, AArch64::dsub_3 };
+
+ return createTuple(Regs, RegClassIDs, SubRegs);
+}
+
+SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
+ static unsigned RegClassIDs[] = { AArch64::QPairRegClassID,
+ AArch64::QTripleRegClassID,
+ AArch64::QQuadRegClassID };
+ static unsigned SubRegs[] = { AArch64::qsub_0, AArch64::qsub_1,
+ AArch64::qsub_2, AArch64::qsub_3 };
+
+ return createTuple(Regs, RegClassIDs, SubRegs);
+}
+
+SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
+ unsigned RegClassIDs[],
+ unsigned SubRegs[]) {
+ // There's no special register-class for a vector-list of 1 element: it's just
+ // a vector.
+ if (Regs.size() == 1)
+ return Regs[0];
+
+ assert(Regs.size() >= 2 && Regs.size() <= 4);
+
+ SDLoc DL(Regs[0].getNode());
+
+ SmallVector<SDValue, 4> Ops;
+
+ // First operand of REG_SEQUENCE is the desired RegClass.
+ Ops.push_back(
+ CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
+
+ // Then we get pairs of source & subregister-position for the components.
+ for (unsigned i = 0; i < Regs.size(); ++i) {
+ Ops.push_back(Regs[i]);
+ Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
+ }
+
+ SDNode *N =
+ CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
+ return SDValue(N, 0);
+}
+
+
+// Get the register stride update opcode of a VLD/VST instruction that
+// is otherwise equivalent to the given fixed stride updating instruction.
+static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
+ switch (Opc) {
+ default: break;
+ case AArch64::LD1WB_8B_fixed: return AArch64::LD1WB_8B_register;
+ case AArch64::LD1WB_4H_fixed: return AArch64::LD1WB_4H_register;
+ case AArch64::LD1WB_2S_fixed: return AArch64::LD1WB_2S_register;
+ case AArch64::LD1WB_1D_fixed: return AArch64::LD1WB_1D_register;
+ case AArch64::LD1WB_16B_fixed: return AArch64::LD1WB_16B_register;
+ case AArch64::LD1WB_8H_fixed: return AArch64::LD1WB_8H_register;
+ case AArch64::LD1WB_4S_fixed: return AArch64::LD1WB_4S_register;
+ case AArch64::LD1WB_2D_fixed: return AArch64::LD1WB_2D_register;
+
+ case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
+ case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
+ case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
+ case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
+ case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
+ case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
+ case AArch64::LD2WB_2D_fixed: return AArch64::LD2WB_2D_register;
+
+ case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
+ case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
+ case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
+ case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
+ case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
+ case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
+ case AArch64::LD3WB_2D_fixed: return AArch64::LD3WB_2D_register;
+
+ case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
+ case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
+ case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
+ case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
+ case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
+ case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
+ case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
+
+ case AArch64::LD1x2WB_8B_fixed: return AArch64::LD1x2WB_8B_register;
+ case AArch64::LD1x2WB_4H_fixed: return AArch64::LD1x2WB_4H_register;
+ case AArch64::LD1x2WB_2S_fixed: return AArch64::LD1x2WB_2S_register;
+ case AArch64::LD1x2WB_1D_fixed: return AArch64::LD1x2WB_1D_register;
+ case AArch64::LD1x2WB_16B_fixed: return AArch64::LD1x2WB_16B_register;
+ case AArch64::LD1x2WB_8H_fixed: return AArch64::LD1x2WB_8H_register;
+ case AArch64::LD1x2WB_4S_fixed: return AArch64::LD1x2WB_4S_register;
+ case AArch64::LD1x2WB_2D_fixed: return AArch64::LD1x2WB_2D_register;
+
+ case AArch64::LD1x3WB_8B_fixed: return AArch64::LD1x3WB_8B_register;
+ case AArch64::LD1x3WB_4H_fixed: return AArch64::LD1x3WB_4H_register;
+ case AArch64::LD1x3WB_2S_fixed: return AArch64::LD1x3WB_2S_register;
+ case AArch64::LD1x3WB_1D_fixed: return AArch64::LD1x3WB_1D_register;
+ case AArch64::LD1x3WB_16B_fixed: return AArch64::LD1x3WB_16B_register;
+ case AArch64::LD1x3WB_8H_fixed: return AArch64::LD1x3WB_8H_register;
+ case AArch64::LD1x3WB_4S_fixed: return AArch64::LD1x3WB_4S_register;
+ case AArch64::LD1x3WB_2D_fixed: return AArch64::LD1x3WB_2D_register;
+
+ case AArch64::LD1x4WB_8B_fixed: return AArch64::LD1x4WB_8B_register;
+ case AArch64::LD1x4WB_4H_fixed: return AArch64::LD1x4WB_4H_register;
+ case AArch64::LD1x4WB_2S_fixed: return AArch64::LD1x4WB_2S_register;
+ case AArch64::LD1x4WB_1D_fixed: return AArch64::LD1x4WB_1D_register;
+ case AArch64::LD1x4WB_16B_fixed: return AArch64::LD1x4WB_16B_register;
+ case AArch64::LD1x4WB_8H_fixed: return AArch64::LD1x4WB_8H_register;
+ case AArch64::LD1x4WB_4S_fixed: return AArch64::LD1x4WB_4S_register;
+ case AArch64::LD1x4WB_2D_fixed: return AArch64::LD1x4WB_2D_register;
+
+ case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
+ case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
+ case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
+ case AArch64::ST1WB_1D_fixed: return AArch64::ST1WB_1D_register;
+ case AArch64::ST1WB_16B_fixed: return AArch64::ST1WB_16B_register;
+ case AArch64::ST1WB_8H_fixed: return AArch64::ST1WB_8H_register;
+ case AArch64::ST1WB_4S_fixed: return AArch64::ST1WB_4S_register;
+ case AArch64::ST1WB_2D_fixed: return AArch64::ST1WB_2D_register;
+
+ case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
+ case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
+ case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
+ case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
+ case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
+ case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
+ case AArch64::ST2WB_2D_fixed: return AArch64::ST2WB_2D_register;
+
+ case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
+ case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
+ case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
+ case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
+ case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
+ case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
+ case AArch64::ST3WB_2D_fixed: return AArch64::ST3WB_2D_register;
+
+ case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
+ case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
+ case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
+ case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
+ case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
+ case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
+ case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
+
+ case AArch64::ST1x2WB_8B_fixed: return AArch64::ST1x2WB_8B_register;
+ case AArch64::ST1x2WB_4H_fixed: return AArch64::ST1x2WB_4H_register;
+ case AArch64::ST1x2WB_2S_fixed: return AArch64::ST1x2WB_2S_register;
+ case AArch64::ST1x2WB_1D_fixed: return AArch64::ST1x2WB_1D_register;
+ case AArch64::ST1x2WB_16B_fixed: return AArch64::ST1x2WB_16B_register;
+ case AArch64::ST1x2WB_8H_fixed: return AArch64::ST1x2WB_8H_register;
+ case AArch64::ST1x2WB_4S_fixed: return AArch64::ST1x2WB_4S_register;
+ case AArch64::ST1x2WB_2D_fixed: return AArch64::ST1x2WB_2D_register;
+
+ case AArch64::ST1x3WB_8B_fixed: return AArch64::ST1x3WB_8B_register;
+ case AArch64::ST1x3WB_4H_fixed: return AArch64::ST1x3WB_4H_register;
+ case AArch64::ST1x3WB_2S_fixed: return AArch64::ST1x3WB_2S_register;
+ case AArch64::ST1x3WB_1D_fixed: return AArch64::ST1x3WB_1D_register;
+ case AArch64::ST1x3WB_16B_fixed: return AArch64::ST1x3WB_16B_register;
+ case AArch64::ST1x3WB_8H_fixed: return AArch64::ST1x3WB_8H_register;
+ case AArch64::ST1x3WB_4S_fixed: return AArch64::ST1x3WB_4S_register;
+ case AArch64::ST1x3WB_2D_fixed: return AArch64::ST1x3WB_2D_register;
+
+ case AArch64::ST1x4WB_8B_fixed: return AArch64::ST1x4WB_8B_register;
+ case AArch64::ST1x4WB_4H_fixed: return AArch64::ST1x4WB_4H_register;
+ case AArch64::ST1x4WB_2S_fixed: return AArch64::ST1x4WB_2S_register;
+ case AArch64::ST1x4WB_1D_fixed: return AArch64::ST1x4WB_1D_register;
+ case AArch64::ST1x4WB_16B_fixed: return AArch64::ST1x4WB_16B_register;
+ case AArch64::ST1x4WB_8H_fixed: return AArch64::ST1x4WB_8H_register;
+ case AArch64::ST1x4WB_4S_fixed: return AArch64::ST1x4WB_4S_register;
+ case AArch64::ST1x4WB_2D_fixed: return AArch64::ST1x4WB_2D_register;
+
+ // Post-index of duplicate loads
+ case AArch64::LD2R_WB_8B_fixed: return AArch64::LD2R_WB_8B_register;
+ case AArch64::LD2R_WB_4H_fixed: return AArch64::LD2R_WB_4H_register;
+ case AArch64::LD2R_WB_2S_fixed: return AArch64::LD2R_WB_2S_register;
+ case AArch64::LD2R_WB_1D_fixed: return AArch64::LD2R_WB_1D_register;
+ case AArch64::LD2R_WB_16B_fixed: return AArch64::LD2R_WB_16B_register;
+ case AArch64::LD2R_WB_8H_fixed: return AArch64::LD2R_WB_8H_register;
+ case AArch64::LD2R_WB_4S_fixed: return AArch64::LD2R_WB_4S_register;
+ case AArch64::LD2R_WB_2D_fixed: return AArch64::LD2R_WB_2D_register;
+
+ case AArch64::LD3R_WB_8B_fixed: return AArch64::LD3R_WB_8B_register;
+ case AArch64::LD3R_WB_4H_fixed: return AArch64::LD3R_WB_4H_register;
+ case AArch64::LD3R_WB_2S_fixed: return AArch64::LD3R_WB_2S_register;
+ case AArch64::LD3R_WB_1D_fixed: return AArch64::LD3R_WB_1D_register;
+ case AArch64::LD3R_WB_16B_fixed: return AArch64::LD3R_WB_16B_register;
+ case AArch64::LD3R_WB_8H_fixed: return AArch64::LD3R_WB_8H_register;
+ case AArch64::LD3R_WB_4S_fixed: return AArch64::LD3R_WB_4S_register;
+ case AArch64::LD3R_WB_2D_fixed: return AArch64::LD3R_WB_2D_register;
+
+ case AArch64::LD4R_WB_8B_fixed: return AArch64::LD4R_WB_8B_register;
+ case AArch64::LD4R_WB_4H_fixed: return AArch64::LD4R_WB_4H_register;
+ case AArch64::LD4R_WB_2S_fixed: return AArch64::LD4R_WB_2S_register;
+ case AArch64::LD4R_WB_1D_fixed: return AArch64::LD4R_WB_1D_register;
+ case AArch64::LD4R_WB_16B_fixed: return AArch64::LD4R_WB_16B_register;
+ case AArch64::LD4R_WB_8H_fixed: return AArch64::LD4R_WB_8H_register;
+ case AArch64::LD4R_WB_4S_fixed: return AArch64::LD4R_WB_4S_register;
+ case AArch64::LD4R_WB_2D_fixed: return AArch64::LD4R_WB_2D_register;
+
+ // Post-index of lane loads
+ case AArch64::LD2LN_WB_B_fixed: return AArch64::LD2LN_WB_B_register;
+ case AArch64::LD2LN_WB_H_fixed: return AArch64::LD2LN_WB_H_register;
+ case AArch64::LD2LN_WB_S_fixed: return AArch64::LD2LN_WB_S_register;
+ case AArch64::LD2LN_WB_D_fixed: return AArch64::LD2LN_WB_D_register;
+
+ case AArch64::LD3LN_WB_B_fixed: return AArch64::LD3LN_WB_B_register;
+ case AArch64::LD3LN_WB_H_fixed: return AArch64::LD3LN_WB_H_register;
+ case AArch64::LD3LN_WB_S_fixed: return AArch64::LD3LN_WB_S_register;
+ case AArch64::LD3LN_WB_D_fixed: return AArch64::LD3LN_WB_D_register;
+
+ case AArch64::LD4LN_WB_B_fixed: return AArch64::LD4LN_WB_B_register;
+ case AArch64::LD4LN_WB_H_fixed: return AArch64::LD4LN_WB_H_register;
+ case AArch64::LD4LN_WB_S_fixed: return AArch64::LD4LN_WB_S_register;
+ case AArch64::LD4LN_WB_D_fixed: return AArch64::LD4LN_WB_D_register;
+
+ // Post-index of lane stores
+ case AArch64::ST2LN_WB_B_fixed: return AArch64::ST2LN_WB_B_register;
+ case AArch64::ST2LN_WB_H_fixed: return AArch64::ST2LN_WB_H_register;
+ case AArch64::ST2LN_WB_S_fixed: return AArch64::ST2LN_WB_S_register;
+ case AArch64::ST2LN_WB_D_fixed: return AArch64::ST2LN_WB_D_register;
+
+ case AArch64::ST3LN_WB_B_fixed: return AArch64::ST3LN_WB_B_register;
+ case AArch64::ST3LN_WB_H_fixed: return AArch64::ST3LN_WB_H_register;
+ case AArch64::ST3LN_WB_S_fixed: return AArch64::ST3LN_WB_S_register;
+ case AArch64::ST3LN_WB_D_fixed: return AArch64::ST3LN_WB_D_register;
+
+ case AArch64::ST4LN_WB_B_fixed: return AArch64::ST4LN_WB_B_register;
+ case AArch64::ST4LN_WB_H_fixed: return AArch64::ST4LN_WB_H_register;
+ case AArch64::ST4LN_WB_S_fixed: return AArch64::ST4LN_WB_S_register;
+ case AArch64::ST4LN_WB_D_fixed: return AArch64::ST4LN_WB_D_register;
+ }
+ return Opc; // If not one we handle, return it unchanged.
+}
+
+SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating,
+ unsigned NumVecs,
+ const uint16_t *Opcodes) {
+ assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
+
+ EVT VT = N->getValueType(0);
+ unsigned OpcodeIndex;
+ bool is64BitVector = VT.is64BitVector();
+ switch (VT.getScalarType().getSizeInBits()) {
+ case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
+ case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
+ case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
+ case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
+ default: llvm_unreachable("unhandled vector load type");
+ }
+ unsigned Opc = Opcodes[OpcodeIndex];
+
+ SmallVector<SDValue, 2> Ops;
+ unsigned AddrOpIdx = isUpdating ? 1 : 2;
+ Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
+
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
+ Opc = getVLDSTRegisterUpdateOpcode(Opc);
+ Ops.push_back(Inc);
+ }
+
+ Ops.push_back(N->getOperand(0)); // Push back the Chain
+
+ SmallVector<EVT, 3> ResTys;
+ // Push back the type of return super register
+ if (NumVecs == 1)
+ ResTys.push_back(VT);
+ else if (NumVecs == 3)
+ ResTys.push_back(MVT::Untyped);
+ else {
+ EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
+ is64BitVector ? NumVecs : NumVecs * 2);
+ ResTys.push_back(ResTy);
+ }
+
+ if (isUpdating)
+ ResTys.push_back(MVT::i64); // Type of the updated register
+ ResTys.push_back(MVT::Other); // Type of the Chain
+ SDLoc dl(N);
+ SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Transfer memoperands.
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
+
+ if (NumVecs == 1)
+ return VLd;
+
+ // If NumVecs > 1, the return result is a super register containing 2-4
+ // consecutive vector registers.
+ SDValue SuperReg = SDValue(VLd, 0);
+
+ unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0;
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ ReplaceUses(SDValue(N, Vec),
+ CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
+ // Update users of the Chain
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
+ if (isUpdating)
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
+
+ return NULL;
+}
+
+SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, bool isUpdating,
+ unsigned NumVecs,
+ const uint16_t *Opcodes) {
+ assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
+ SDLoc dl(N);
+
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
+
+ unsigned AddrOpIdx = isUpdating ? 1 : 2;
+ unsigned Vec0Idx = 3;
+ EVT VT = N->getOperand(Vec0Idx).getValueType();
+ unsigned OpcodeIndex;
+ bool is64BitVector = VT.is64BitVector();
+ switch (VT.getScalarType().getSizeInBits()) {
+ case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
+ case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
+ case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
+ case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
+ default: llvm_unreachable("unhandled vector store type");
+ }
+ unsigned Opc = Opcodes[OpcodeIndex];
+
+ SmallVector<EVT, 2> ResTys;
+ if (isUpdating)
+ ResTys.push_back(MVT::i64);
+ ResTys.push_back(MVT::Other); // Type for the Chain
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
+
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
+ Opc = getVLDSTRegisterUpdateOpcode(Opc);
+ Ops.push_back(Inc);
+ }
+
+ SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
+ N->op_begin() + Vec0Idx + NumVecs);
+ SDValue SrcReg = is64BitVector ? createDTuple(Regs) : createQTuple(Regs);
+ Ops.push_back(SrcReg);
+
+ // Push back the Chain
+ Ops.push_back(N->getOperand(0));
+
+ // Transfer memoperands.
+ SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+ cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
+
+ return VSt;
+}
+
+SDValue
+AArch64DAGToDAGISel::getTargetSubregToReg(int SRIdx, SDLoc DL, EVT VT, EVT VTD,
+ SDValue Operand) {
+ SDNode *Reg = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, DL,
+ VT, VTD, MVT::Other,
+ CurDAG->getTargetConstant(0, MVT::i64),
+ Operand,
+ CurDAG->getTargetConstant(AArch64::sub_64, MVT::i32));
+ return SDValue(Reg, 0);
+}
+
+SDNode *AArch64DAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
+ unsigned NumVecs,
+ const uint16_t *Opcodes) {
+ assert(NumVecs >=2 && NumVecs <= 4 && "Load Dup NumVecs out-of-range");
+ SDLoc dl(N);
+
+ EVT VT = N->getValueType(0);
+ unsigned OpcodeIndex;
+ bool is64BitVector = VT.is64BitVector();
+ switch (VT.getScalarType().getSizeInBits()) {
+ case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
+ case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
+ case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
+ case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
+ default: llvm_unreachable("unhandled vector duplicate lane load type");
+ }
+ unsigned Opc = Opcodes[OpcodeIndex];
+
+ SDValue SuperReg;
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(N->getOperand(1)); // Push back the Memory Address
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(2);
+ if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
+ Opc = getVLDSTRegisterUpdateOpcode(Opc);
+ Ops.push_back(Inc);
+ }
+ Ops.push_back(N->getOperand(0)); // Push back the Chain
+
+ SmallVector<EVT, 3> ResTys;
+ // Push back the type of return super register
+ if (NumVecs == 3)
+ ResTys.push_back(MVT::Untyped);
+ else {
+ EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
+ is64BitVector ? NumVecs : NumVecs * 2);
+ ResTys.push_back(ResTy);
+ }
+ if (isUpdating)
+ ResTys.push_back(MVT::i64); // Type of the updated register
+ ResTys.push_back(MVT::Other); // Type of the Chain
+ SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Transfer memoperands.
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
+
+ SuperReg = SDValue(VLdDup, 0);
+ unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0;
+ // Update uses of each registers in super register
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ ReplaceUses(SDValue(N, Vec),
+ CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
+ // Update uses of the Chain
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
+ if (isUpdating)
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
+ return NULL;
+}
+
+// We only have 128-bit vector type of load/store lane instructions.
+// If it is 64-bit vector, we also select it to the 128-bit instructions.
+// Just use SUBREG_TO_REG to adapt the input to 128-bit vector and
+// EXTRACT_SUBREG to get the 64-bit vector from the 128-bit vector output.
+SDNode *AArch64DAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
+ bool isUpdating, unsigned NumVecs,
+ const uint16_t *Opcodes) {
+ assert(NumVecs >= 2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
+ SDLoc dl(N);
+ unsigned AddrOpIdx = isUpdating ? 1 : 2;
+ unsigned Vec0Idx = 3;
+
+ SDValue Chain = N->getOperand(0);
+ unsigned Lane =
+ cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
+ EVT VT = N->getOperand(Vec0Idx).getValueType();
+ bool is64BitVector = VT.is64BitVector();
+ EVT VT64; // 64-bit Vector Type
+
+ if (is64BitVector) {
+ VT64 = VT;
+ VT = EVT::getVectorVT(*CurDAG->getContext(), VT.getVectorElementType(),
+ VT.getVectorNumElements() * 2);
+ }
+
+ unsigned OpcodeIndex;
+ switch (VT.getScalarType().getSizeInBits()) {
+ case 8: OpcodeIndex = 0; break;
+ case 16: OpcodeIndex = 1; break;
+ case 32: OpcodeIndex = 2; break;
+ case 64: OpcodeIndex = 3; break;
+ default: llvm_unreachable("unhandled vector lane load/store type");
+ }
+ unsigned Opc = Opcodes[OpcodeIndex];
+
+ SmallVector<EVT, 3> ResTys;
+ if (IsLoad) {
+ // Push back the type of return super register
+ if (NumVecs == 3)
+ ResTys.push_back(MVT::Untyped);
+ else {
+ EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
+ is64BitVector ? NumVecs : NumVecs * 2);
+ ResTys.push_back(ResTy);
+ }
+ }
+ if (isUpdating)
+ ResTys.push_back(MVT::i64); // Type of the updated register
+ ResTys.push_back(MVT::Other); // Type of Chain
+ SmallVector<SDValue, 5> Ops;
+ Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
+ Opc = getVLDSTRegisterUpdateOpcode(Opc);
+ Ops.push_back(Inc);
+ }
+
+ SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
+ N->op_begin() + Vec0Idx + NumVecs);
+ if (is64BitVector)
+ for (unsigned i = 0; i < Regs.size(); i++)
+ Regs[i] = getTargetSubregToReg(AArch64::sub_64, dl, VT, VT64, Regs[i]);
+ SDValue SuperReg = createQTuple(Regs);
+
+ Ops.push_back(SuperReg); // Source Reg
+ SDValue LaneValue = CurDAG->getTargetConstant(Lane, MVT::i32);
+ Ops.push_back(LaneValue);
+ Ops.push_back(Chain); // Push back the Chain
+
+ SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
+ if (!IsLoad)
+ return VLdLn;
+
+ // Extract the subregisters.
+ SuperReg = SDValue(VLdLn, 0);
+ unsigned Sub0 = AArch64::qsub_0;
+ // Update uses of each registers in super register
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
+ SDValue SUB0 = CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg);
+ if (is64BitVector) {
+ SUB0 = CurDAG->getTargetExtractSubreg(AArch64::sub_64, dl, VT64, SUB0);
+ }
+ ReplaceUses(SDValue(N, Vec), SUB0);
+ }
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
+ if (isUpdating)
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
+ return NULL;
+}
+
+unsigned AArch64DAGToDAGISel::getTBLOpc(bool IsExt, bool Is64Bit,
+ unsigned NumOfVec) {
+ assert(NumOfVec >= 1 && NumOfVec <= 4 && "VST NumVecs out-of-range");
+
+ unsigned Opc = 0;
+ switch (NumOfVec) {
+ default:
+ break;
+ case 1:
+ if (IsExt)
+ Opc = Is64Bit ? AArch64::TBX1_8b : AArch64::TBX1_16b;
+ else
+ Opc = Is64Bit ? AArch64::TBL1_8b : AArch64::TBL1_16b;
+ break;
+ case 2:
+ if (IsExt)
+ Opc = Is64Bit ? AArch64::TBX2_8b : AArch64::TBX2_16b;
+ else
+ Opc = Is64Bit ? AArch64::TBL2_8b : AArch64::TBL2_16b;
+ break;
+ case 3:
+ if (IsExt)
+ Opc = Is64Bit ? AArch64::TBX3_8b : AArch64::TBX3_16b;
+ else
+ Opc = Is64Bit ? AArch64::TBL3_8b : AArch64::TBL3_16b;
+ break;
+ case 4:
+ if (IsExt)
+ Opc = Is64Bit ? AArch64::TBX4_8b : AArch64::TBX4_16b;
+ else
+ Opc = Is64Bit ? AArch64::TBL4_8b : AArch64::TBL4_16b;
+ break;
+ }
+
+ return Opc;
+}
+
+SDNode *AArch64DAGToDAGISel::SelectVTBL(SDNode *N, unsigned NumVecs,
+ bool IsExt) {
+ assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
+ SDLoc dl(N);
+
+ // Check the element of look up table is 64-bit or not
+ unsigned Vec0Idx = IsExt ? 2 : 1;
+ assert(!N->getOperand(Vec0Idx + 0).getValueType().is64BitVector() &&
+ "The element of lookup table for vtbl and vtbx must be 128-bit");
+
+ // Check the return value type is 64-bit or not
+ EVT ResVT = N->getValueType(0);
+ bool is64BitRes = ResVT.is64BitVector();
+
+ // Create new SDValue for vector list
+ SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
+ N->op_begin() + Vec0Idx + NumVecs);
+ SDValue TblReg = createQTuple(Regs);
+ unsigned Opc = getTBLOpc(IsExt, is64BitRes, NumVecs);
+
+ SmallVector<SDValue, 3> Ops;
+ if (IsExt)
+ Ops.push_back(N->getOperand(1));
+ Ops.push_back(TblReg);
+ Ops.push_back(N->getOperand(Vec0Idx + NumVecs));
+ return CurDAG->getMachineNode(Opc, dl, ResVT, Ops);
+}
+
SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected
DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
if (Node->isMachineOpcode()) {
DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
+ Node->setNodeId(-1);
return NULL;
}
@@ -473,7 +1108,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
AArch64::ATOMIC_CMP_SWAP_I64);
case ISD::FrameIndex: {
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
- EVT PtrTy = TLI.getPointerTy();
+ EVT PtrTy = getTargetLowering()->getPointerTy();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
TFI, CurDAG->getTargetConstant(0, PtrTy));
@@ -497,7 +1132,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
- Node->getDebugLoc(),
+ SDLoc(Node),
Register, Ty).getNode();
}
@@ -534,6 +1169,399 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
Node = ResNode;
break;
}
+ case AArch64ISD::NEON_LD1_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1WB_8B_fixed, AArch64::LD1WB_4H_fixed,
+ AArch64::LD1WB_2S_fixed, AArch64::LD1WB_1D_fixed,
+ AArch64::LD1WB_16B_fixed, AArch64::LD1WB_8H_fixed,
+ AArch64::LD1WB_4S_fixed, AArch64::LD1WB_2D_fixed
+ };
+ return SelectVLD(Node, true, 1, Opcodes);
+ }
+ case AArch64ISD::NEON_LD2_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD2WB_8B_fixed, AArch64::LD2WB_4H_fixed,
+ AArch64::LD2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
+ AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
+ AArch64::LD2WB_4S_fixed, AArch64::LD2WB_2D_fixed
+ };
+ return SelectVLD(Node, true, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_LD3_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD3WB_8B_fixed, AArch64::LD3WB_4H_fixed,
+ AArch64::LD3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
+ AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
+ AArch64::LD3WB_4S_fixed, AArch64::LD3WB_2D_fixed
+ };
+ return SelectVLD(Node, true, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_LD4_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD4WB_8B_fixed, AArch64::LD4WB_4H_fixed,
+ AArch64::LD4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
+ AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
+ AArch64::LD4WB_4S_fixed, AArch64::LD4WB_2D_fixed
+ };
+ return SelectVLD(Node, true, 4, Opcodes);
+ }
+ case AArch64ISD::NEON_LD1x2_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1x2WB_8B_fixed, AArch64::LD1x2WB_4H_fixed,
+ AArch64::LD1x2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
+ AArch64::LD1x2WB_16B_fixed, AArch64::LD1x2WB_8H_fixed,
+ AArch64::LD1x2WB_4S_fixed, AArch64::LD1x2WB_2D_fixed
+ };
+ return SelectVLD(Node, true, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_LD1x3_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1x3WB_8B_fixed, AArch64::LD1x3WB_4H_fixed,
+ AArch64::LD1x3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
+ AArch64::LD1x3WB_16B_fixed, AArch64::LD1x3WB_8H_fixed,
+ AArch64::LD1x3WB_4S_fixed, AArch64::LD1x3WB_2D_fixed
+ };
+ return SelectVLD(Node, true, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_LD1x4_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1x4WB_8B_fixed, AArch64::LD1x4WB_4H_fixed,
+ AArch64::LD1x4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
+ AArch64::LD1x4WB_16B_fixed, AArch64::LD1x4WB_8H_fixed,
+ AArch64::LD1x4WB_4S_fixed, AArch64::LD1x4WB_2D_fixed
+ };
+ return SelectVLD(Node, true, 4, Opcodes);
+ }
+ case AArch64ISD::NEON_ST1_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1WB_8B_fixed, AArch64::ST1WB_4H_fixed,
+ AArch64::ST1WB_2S_fixed, AArch64::ST1WB_1D_fixed,
+ AArch64::ST1WB_16B_fixed, AArch64::ST1WB_8H_fixed,
+ AArch64::ST1WB_4S_fixed, AArch64::ST1WB_2D_fixed
+ };
+ return SelectVST(Node, true, 1, Opcodes);
+ }
+ case AArch64ISD::NEON_ST2_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST2WB_8B_fixed, AArch64::ST2WB_4H_fixed,
+ AArch64::ST2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
+ AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
+ AArch64::ST2WB_4S_fixed, AArch64::ST2WB_2D_fixed
+ };
+ return SelectVST(Node, true, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_ST3_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST3WB_8B_fixed, AArch64::ST3WB_4H_fixed,
+ AArch64::ST3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
+ AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
+ AArch64::ST3WB_4S_fixed, AArch64::ST3WB_2D_fixed
+ };
+ return SelectVST(Node, true, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_ST4_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST4WB_8B_fixed, AArch64::ST4WB_4H_fixed,
+ AArch64::ST4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
+ AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
+ AArch64::ST4WB_4S_fixed, AArch64::ST4WB_2D_fixed
+ };
+ return SelectVST(Node, true, 4, Opcodes);
+ }
+ case AArch64ISD::NEON_LD2DUP: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD2R_8B, AArch64::LD2R_4H, AArch64::LD2R_2S,
+ AArch64::LD2R_1D, AArch64::LD2R_16B, AArch64::LD2R_8H,
+ AArch64::LD2R_4S, AArch64::LD2R_2D
+ };
+ return SelectVLDDup(Node, false, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_LD3DUP: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD3R_8B, AArch64::LD3R_4H, AArch64::LD3R_2S,
+ AArch64::LD3R_1D, AArch64::LD3R_16B, AArch64::LD3R_8H,
+ AArch64::LD3R_4S, AArch64::LD3R_2D
+ };
+ return SelectVLDDup(Node, false, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_LD4DUP: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD4R_8B, AArch64::LD4R_4H, AArch64::LD4R_2S,
+ AArch64::LD4R_1D, AArch64::LD4R_16B, AArch64::LD4R_8H,
+ AArch64::LD4R_4S, AArch64::LD4R_2D
+ };
+ return SelectVLDDup(Node, false, 4, Opcodes);
+ }
+ case AArch64ISD::NEON_LD2DUP_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD2R_WB_8B_fixed, AArch64::LD2R_WB_4H_fixed,
+ AArch64::LD2R_WB_2S_fixed, AArch64::LD2R_WB_1D_fixed,
+ AArch64::LD2R_WB_16B_fixed, AArch64::LD2R_WB_8H_fixed,
+ AArch64::LD2R_WB_4S_fixed, AArch64::LD2R_WB_2D_fixed
+ };
+ return SelectVLDDup(Node, true, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_LD3DUP_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD3R_WB_8B_fixed, AArch64::LD3R_WB_4H_fixed,
+ AArch64::LD3R_WB_2S_fixed, AArch64::LD3R_WB_1D_fixed,
+ AArch64::LD3R_WB_16B_fixed, AArch64::LD3R_WB_8H_fixed,
+ AArch64::LD3R_WB_4S_fixed, AArch64::LD3R_WB_2D_fixed
+ };
+ return SelectVLDDup(Node, true, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_LD4DUP_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD4R_WB_8B_fixed, AArch64::LD4R_WB_4H_fixed,
+ AArch64::LD4R_WB_2S_fixed, AArch64::LD4R_WB_1D_fixed,
+ AArch64::LD4R_WB_16B_fixed, AArch64::LD4R_WB_8H_fixed,
+ AArch64::LD4R_WB_4S_fixed, AArch64::LD4R_WB_2D_fixed
+ };
+ return SelectVLDDup(Node, true, 4, Opcodes);
+ }
+ case AArch64ISD::NEON_LD2LN_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD2LN_WB_B_fixed, AArch64::LD2LN_WB_H_fixed,
+ AArch64::LD2LN_WB_S_fixed, AArch64::LD2LN_WB_D_fixed
+ };
+ return SelectVLDSTLane(Node, true, true, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_LD3LN_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD3LN_WB_B_fixed, AArch64::LD3LN_WB_H_fixed,
+ AArch64::LD3LN_WB_S_fixed, AArch64::LD3LN_WB_D_fixed
+ };
+ return SelectVLDSTLane(Node, true, true, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_LD4LN_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD4LN_WB_B_fixed, AArch64::LD4LN_WB_H_fixed,
+ AArch64::LD4LN_WB_S_fixed, AArch64::LD4LN_WB_D_fixed
+ };
+ return SelectVLDSTLane(Node, true, true, 4, Opcodes);
+ }
+ case AArch64ISD::NEON_ST2LN_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST2LN_WB_B_fixed, AArch64::ST2LN_WB_H_fixed,
+ AArch64::ST2LN_WB_S_fixed, AArch64::ST2LN_WB_D_fixed
+ };
+ return SelectVLDSTLane(Node, false, true, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_ST3LN_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST3LN_WB_B_fixed, AArch64::ST3LN_WB_H_fixed,
+ AArch64::ST3LN_WB_S_fixed, AArch64::ST3LN_WB_D_fixed
+ };
+ return SelectVLDSTLane(Node, false, true, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_ST4LN_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST4LN_WB_B_fixed, AArch64::ST4LN_WB_H_fixed,
+ AArch64::ST4LN_WB_S_fixed, AArch64::ST4LN_WB_D_fixed
+ };
+ return SelectVLDSTLane(Node, false, true, 4, Opcodes);
+ }
+ case AArch64ISD::NEON_ST1x2_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1x2WB_8B_fixed, AArch64::ST1x2WB_4H_fixed,
+ AArch64::ST1x2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
+ AArch64::ST1x2WB_16B_fixed, AArch64::ST1x2WB_8H_fixed,
+ AArch64::ST1x2WB_4S_fixed, AArch64::ST1x2WB_2D_fixed
+ };
+ return SelectVST(Node, true, 2, Opcodes);
+ }
+ case AArch64ISD::NEON_ST1x3_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1x3WB_8B_fixed, AArch64::ST1x3WB_4H_fixed,
+ AArch64::ST1x3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
+ AArch64::ST1x3WB_16B_fixed, AArch64::ST1x3WB_8H_fixed,
+ AArch64::ST1x3WB_4S_fixed, AArch64::ST1x3WB_2D_fixed
+ };
+ return SelectVST(Node, true, 3, Opcodes);
+ }
+ case AArch64ISD::NEON_ST1x4_UPD: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1x4WB_8B_fixed, AArch64::ST1x4WB_4H_fixed,
+ AArch64::ST1x4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
+ AArch64::ST1x4WB_16B_fixed, AArch64::ST1x4WB_8H_fixed,
+ AArch64::ST1x4WB_4S_fixed, AArch64::ST1x4WB_2D_fixed
+ };
+ return SelectVST(Node, true, 4, Opcodes);
+ }
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
+ bool IsExt = false;
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::aarch64_neon_vtbx1:
+ IsExt = true;
+ case Intrinsic::aarch64_neon_vtbl1:
+ return SelectVTBL(Node, 1, IsExt);
+ case Intrinsic::aarch64_neon_vtbx2:
+ IsExt = true;
+ case Intrinsic::aarch64_neon_vtbl2:
+ return SelectVTBL(Node, 2, IsExt);
+ case Intrinsic::aarch64_neon_vtbx3:
+ IsExt = true;
+ case Intrinsic::aarch64_neon_vtbl3:
+ return SelectVTBL(Node, 3, IsExt);
+ case Intrinsic::aarch64_neon_vtbx4:
+ IsExt = true;
+ case Intrinsic::aarch64_neon_vtbl4:
+ return SelectVTBL(Node, 4, IsExt);
+ }
+ break;
+ }
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::arm_neon_vld1: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1_8B, AArch64::LD1_4H, AArch64::LD1_2S, AArch64::LD1_1D,
+ AArch64::LD1_16B, AArch64::LD1_8H, AArch64::LD1_4S, AArch64::LD1_2D
+ };
+ return SelectVLD(Node, false, 1, Opcodes);
+ }
+ case Intrinsic::arm_neon_vld2: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD2_8B, AArch64::LD2_4H, AArch64::LD2_2S, AArch64::LD1x2_1D,
+ AArch64::LD2_16B, AArch64::LD2_8H, AArch64::LD2_4S, AArch64::LD2_2D
+ };
+ return SelectVLD(Node, false, 2, Opcodes);
+ }
+ case Intrinsic::arm_neon_vld3: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD3_8B, AArch64::LD3_4H, AArch64::LD3_2S, AArch64::LD1x3_1D,
+ AArch64::LD3_16B, AArch64::LD3_8H, AArch64::LD3_4S, AArch64::LD3_2D
+ };
+ return SelectVLD(Node, false, 3, Opcodes);
+ }
+ case Intrinsic::arm_neon_vld4: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD4_8B, AArch64::LD4_4H, AArch64::LD4_2S, AArch64::LD1x4_1D,
+ AArch64::LD4_16B, AArch64::LD4_8H, AArch64::LD4_4S, AArch64::LD4_2D
+ };
+ return SelectVLD(Node, false, 4, Opcodes);
+ }
+ case Intrinsic::aarch64_neon_vld1x2: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1x2_8B, AArch64::LD1x2_4H, AArch64::LD1x2_2S,
+ AArch64::LD1x2_1D, AArch64::LD1x2_16B, AArch64::LD1x2_8H,
+ AArch64::LD1x2_4S, AArch64::LD1x2_2D
+ };
+ return SelectVLD(Node, false, 2, Opcodes);
+ }
+ case Intrinsic::aarch64_neon_vld1x3: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1x3_8B, AArch64::LD1x3_4H, AArch64::LD1x3_2S,
+ AArch64::LD1x3_1D, AArch64::LD1x3_16B, AArch64::LD1x3_8H,
+ AArch64::LD1x3_4S, AArch64::LD1x3_2D
+ };
+ return SelectVLD(Node, false, 3, Opcodes);
+ }
+ case Intrinsic::aarch64_neon_vld1x4: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD1x4_8B, AArch64::LD1x4_4H, AArch64::LD1x4_2S,
+ AArch64::LD1x4_1D, AArch64::LD1x4_16B, AArch64::LD1x4_8H,
+ AArch64::LD1x4_4S, AArch64::LD1x4_2D
+ };
+ return SelectVLD(Node, false, 4, Opcodes);
+ }
+ case Intrinsic::arm_neon_vst1: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1_8B, AArch64::ST1_4H, AArch64::ST1_2S, AArch64::ST1_1D,
+ AArch64::ST1_16B, AArch64::ST1_8H, AArch64::ST1_4S, AArch64::ST1_2D
+ };
+ return SelectVST(Node, false, 1, Opcodes);
+ }
+ case Intrinsic::arm_neon_vst2: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST2_8B, AArch64::ST2_4H, AArch64::ST2_2S, AArch64::ST1x2_1D,
+ AArch64::ST2_16B, AArch64::ST2_8H, AArch64::ST2_4S, AArch64::ST2_2D
+ };
+ return SelectVST(Node, false, 2, Opcodes);
+ }
+ case Intrinsic::arm_neon_vst3: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST3_8B, AArch64::ST3_4H, AArch64::ST3_2S, AArch64::ST1x3_1D,
+ AArch64::ST3_16B, AArch64::ST3_8H, AArch64::ST3_4S, AArch64::ST3_2D
+ };
+ return SelectVST(Node, false, 3, Opcodes);
+ }
+ case Intrinsic::arm_neon_vst4: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST4_8B, AArch64::ST4_4H, AArch64::ST4_2S, AArch64::ST1x4_1D,
+ AArch64::ST4_16B, AArch64::ST4_8H, AArch64::ST4_4S, AArch64::ST4_2D
+ };
+ return SelectVST(Node, false, 4, Opcodes);
+ }
+ case Intrinsic::aarch64_neon_vst1x2: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1x2_8B, AArch64::ST1x2_4H, AArch64::ST1x2_2S,
+ AArch64::ST1x2_1D, AArch64::ST1x2_16B, AArch64::ST1x2_8H,
+ AArch64::ST1x2_4S, AArch64::ST1x2_2D
+ };
+ return SelectVST(Node, false, 2, Opcodes);
+ }
+ case Intrinsic::aarch64_neon_vst1x3: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1x3_8B, AArch64::ST1x3_4H, AArch64::ST1x3_2S,
+ AArch64::ST1x3_1D, AArch64::ST1x3_16B, AArch64::ST1x3_8H,
+ AArch64::ST1x3_4S, AArch64::ST1x3_2D
+ };
+ return SelectVST(Node, false, 3, Opcodes);
+ }
+ case Intrinsic::aarch64_neon_vst1x4: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST1x4_8B, AArch64::ST1x4_4H, AArch64::ST1x4_2S,
+ AArch64::ST1x4_1D, AArch64::ST1x4_16B, AArch64::ST1x4_8H,
+ AArch64::ST1x4_4S, AArch64::ST1x4_2D
+ };
+ return SelectVST(Node, false, 4, Opcodes);
+ }
+ case Intrinsic::arm_neon_vld2lane: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD2LN_B, AArch64::LD2LN_H, AArch64::LD2LN_S, AArch64::LD2LN_D
+ };
+ return SelectVLDSTLane(Node, true, false, 2, Opcodes);
+ }
+ case Intrinsic::arm_neon_vld3lane: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD3LN_B, AArch64::LD3LN_H, AArch64::LD3LN_S, AArch64::LD3LN_D
+ };
+ return SelectVLDSTLane(Node, true, false, 3, Opcodes);
+ }
+ case Intrinsic::arm_neon_vld4lane: {
+ static const uint16_t Opcodes[] = {
+ AArch64::LD4LN_B, AArch64::LD4LN_H, AArch64::LD4LN_S, AArch64::LD4LN_D
+ };
+ return SelectVLDSTLane(Node, true, false, 4, Opcodes);
+ }
+ case Intrinsic::arm_neon_vst2lane: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST2LN_B, AArch64::ST2LN_H, AArch64::ST2LN_S, AArch64::ST2LN_D
+ };
+ return SelectVLDSTLane(Node, false, false, 2, Opcodes);
+ }
+ case Intrinsic::arm_neon_vst3lane: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST3LN_B, AArch64::ST3LN_H, AArch64::ST3LN_S, AArch64::ST3LN_D
+ };
+ return SelectVLDSTLane(Node, false, false, 3, Opcodes);
+ }
+ case Intrinsic::arm_neon_vst4lane: {
+ static const uint16_t Opcodes[] = {
+ AArch64::ST4LN_B, AArch64::ST4LN_H, AArch64::ST4LN_S, AArch64::ST4LN_D
+ };
+ return SelectVLDSTLane(Node, false, false, 4, Opcodes);
+ }
+ } // End of switch IntNo
+ break;
+ } // End of case ISD::INTRINSIC_VOID and :ISD::INTRINSIC_W_CHAIN
default:
break; // Let generic code handle it
}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 56f675183cb4..4fdb667b9539 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -39,12 +39,10 @@ static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
llvm_unreachable("unknown subtarget type");
}
-
AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
- : TargetLowering(TM, createTLOF(TM)),
- Subtarget(&TM.getSubtarget<AArch64Subtarget>()),
- RegInfo(TM.getRegisterInfo()),
- Itins(TM.getInstrItineraryData()) {
+ : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
+
+ const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
// SIMD compares set the entire lane's bits to 1
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
@@ -52,10 +50,34 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
// Scalar register <-> type mapping
addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
- addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
- addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
- addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
+
+ if (Subtarget->hasFPARMv8()) {
+ addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
+ addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
+ addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
+ }
+
+ if (Subtarget->hasNEON()) {
+ // And the vectors
+ addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass);
+ addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
+ addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
+ addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass);
+ addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
+ addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
+ addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
+ addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
+ addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
+ addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
+ addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
+ }
computeRegisterProperties();
@@ -64,6 +86,12 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::SRA);
+ setTargetDAGCombine(ISD::SRL);
+ setTargetDAGCombine(ISD::SHL);
+
+ setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
+ setTargetDAGCombine(ISD::INTRINSIC_VOID);
+ setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
// AArch64 does not have i1 loads, or much of anything for i1 really.
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@@ -253,14 +281,97 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
-
setExceptionPointerRegister(AArch64::X0);
setExceptionSelectorRegister(AArch64::X1);
+
+ if (Subtarget->hasNEON()) {
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
+
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
+
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
+ setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
+ setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
+ setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
+ setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
+ setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
+ setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
+ setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
+ setOperationAction(ISD::SETCC, MVT::v1f32, Custom);
+ setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
+ setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
+ setOperationAction(ISD::SETCC, MVT::v1f64, Custom);
+ setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
+
+ setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FCEIL, MVT::v2f32, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v1f64, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FRINT, MVT::v2f32, Legal);
+ setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
+ setOperationAction(ISD::FRINT, MVT::v1f64, Legal);
+ setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FROUND, MVT::v2f32, Legal);
+ setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
+ setOperationAction(ISD::FROUND, MVT::v1f64, Legal);
+ setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
+ }
}
-EVT AArch64TargetLowering::getSetCCResultType(EVT VT) const {
+EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
// It's reasonably important that this value matches the "natural" legal
// promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
// in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
@@ -271,16 +382,16 @@ EVT AArch64TargetLowering::getSetCCResultType(EVT VT) const {
static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
unsigned &LdrOpc,
unsigned &StrOpc) {
- static unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
- AArch64::LDXR_word, AArch64::LDXR_dword};
- static unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
- AArch64::LDAXR_word, AArch64::LDAXR_dword};
- static unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
- AArch64::STXR_word, AArch64::STXR_dword};
- static unsigned StoreRels[] = {AArch64::STLXR_byte, AArch64::STLXR_hword,
- AArch64::STLXR_word, AArch64::STLXR_dword};
-
- unsigned *LoadOps, *StoreOps;
+ static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
+ AArch64::LDXR_word, AArch64::LDXR_dword};
+ static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
+ AArch64::LDAXR_word, AArch64::LDAXR_dword};
+ static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
+ AArch64::STXR_word, AArch64::STXR_dword};
+ static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
+ AArch64::STLXR_word, AArch64::STLXR_dword};
+
+ const unsigned *LoadOps, *StoreOps;
if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
LoadOps = LoadAcqs;
else
@@ -298,6 +409,29 @@ static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
StrOpc = StoreOps[Log2_32(Size)];
}
+// FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really
+// have value type mapped, and they are both being defined as MVT::untyped.
+// Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost
+// would fail to figure out the register pressure correctly.
+std::pair<const TargetRegisterClass*, uint8_t>
+AArch64TargetLowering::findRepresentativeClass(MVT VT) const{
+ const TargetRegisterClass *RRC = 0;
+ uint8_t Cost = 1;
+ switch (VT.SimpleTy) {
+ default:
+ return TargetLowering::findRepresentativeClass(VT);
+ case MVT::v4i64:
+ RRC = &AArch64::QPairRegClass;
+ Cost = 2;
+ break;
+ case MVT::v8i64:
+ RRC = &AArch64::QQuadRegClass;
+ Cost = 4;
+ break;
+ }
+ return std::make_pair(RRC, Cost);
+}
+
MachineBasicBlock *
AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Size,
@@ -623,6 +757,12 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
MBB->addSuccessor(TrueBB);
MBB->addSuccessor(EndBB);
+ if (!NZCVKilled) {
+ // NZCV is live-through TrueBB.
+ TrueBB->addLiveIn(AArch64::NZCV);
+ EndBB->addLiveIn(AArch64::NZCV);
+ }
+
// IfTrue:
// str qIFTRUE, [sp]
BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
@@ -637,8 +777,6 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
// Done:
// ldr qDEST, [sp]
// [... rest of incoming MBB ...]
- if (!NZCVKilled)
- EndBB->addLiveIn(AArch64::NZCV);
MachineInstr *StartOfEnd = EndBB->begin();
BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
.addFrameIndex(ScratchFI)
@@ -784,7 +922,102 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
- default: return NULL;
+ case AArch64ISD::NEON_BSL:
+ return "AArch64ISD::NEON_BSL";
+ case AArch64ISD::NEON_MOVIMM:
+ return "AArch64ISD::NEON_MOVIMM";
+ case AArch64ISD::NEON_MVNIMM:
+ return "AArch64ISD::NEON_MVNIMM";
+ case AArch64ISD::NEON_FMOVIMM:
+ return "AArch64ISD::NEON_FMOVIMM";
+ case AArch64ISD::NEON_CMP:
+ return "AArch64ISD::NEON_CMP";
+ case AArch64ISD::NEON_CMPZ:
+ return "AArch64ISD::NEON_CMPZ";
+ case AArch64ISD::NEON_TST:
+ return "AArch64ISD::NEON_TST";
+ case AArch64ISD::NEON_QSHLs:
+ return "AArch64ISD::NEON_QSHLs";
+ case AArch64ISD::NEON_QSHLu:
+ return "AArch64ISD::NEON_QSHLu";
+ case AArch64ISD::NEON_VDUP:
+ return "AArch64ISD::NEON_VDUP";
+ case AArch64ISD::NEON_VDUPLANE:
+ return "AArch64ISD::NEON_VDUPLANE";
+ case AArch64ISD::NEON_REV16:
+ return "AArch64ISD::NEON_REV16";
+ case AArch64ISD::NEON_REV32:
+ return "AArch64ISD::NEON_REV32";
+ case AArch64ISD::NEON_REV64:
+ return "AArch64ISD::NEON_REV64";
+ case AArch64ISD::NEON_UZP1:
+ return "AArch64ISD::NEON_UZP1";
+ case AArch64ISD::NEON_UZP2:
+ return "AArch64ISD::NEON_UZP2";
+ case AArch64ISD::NEON_ZIP1:
+ return "AArch64ISD::NEON_ZIP1";
+ case AArch64ISD::NEON_ZIP2:
+ return "AArch64ISD::NEON_ZIP2";
+ case AArch64ISD::NEON_TRN1:
+ return "AArch64ISD::NEON_TRN1";
+ case AArch64ISD::NEON_TRN2:
+ return "AArch64ISD::NEON_TRN2";
+ case AArch64ISD::NEON_LD1_UPD:
+ return "AArch64ISD::NEON_LD1_UPD";
+ case AArch64ISD::NEON_LD2_UPD:
+ return "AArch64ISD::NEON_LD2_UPD";
+ case AArch64ISD::NEON_LD3_UPD:
+ return "AArch64ISD::NEON_LD3_UPD";
+ case AArch64ISD::NEON_LD4_UPD:
+ return "AArch64ISD::NEON_LD4_UPD";
+ case AArch64ISD::NEON_ST1_UPD:
+ return "AArch64ISD::NEON_ST1_UPD";
+ case AArch64ISD::NEON_ST2_UPD:
+ return "AArch64ISD::NEON_ST2_UPD";
+ case AArch64ISD::NEON_ST3_UPD:
+ return "AArch64ISD::NEON_ST3_UPD";
+ case AArch64ISD::NEON_ST4_UPD:
+ return "AArch64ISD::NEON_ST4_UPD";
+ case AArch64ISD::NEON_LD1x2_UPD:
+ return "AArch64ISD::NEON_LD1x2_UPD";
+ case AArch64ISD::NEON_LD1x3_UPD:
+ return "AArch64ISD::NEON_LD1x3_UPD";
+ case AArch64ISD::NEON_LD1x4_UPD:
+ return "AArch64ISD::NEON_LD1x4_UPD";
+ case AArch64ISD::NEON_ST1x2_UPD:
+ return "AArch64ISD::NEON_ST1x2_UPD";
+ case AArch64ISD::NEON_ST1x3_UPD:
+ return "AArch64ISD::NEON_ST1x3_UPD";
+ case AArch64ISD::NEON_ST1x4_UPD:
+ return "AArch64ISD::NEON_ST1x4_UPD";
+ case AArch64ISD::NEON_LD2DUP:
+ return "AArch64ISD::NEON_LD2DUP";
+ case AArch64ISD::NEON_LD3DUP:
+ return "AArch64ISD::NEON_LD3DUP";
+ case AArch64ISD::NEON_LD4DUP:
+ return "AArch64ISD::NEON_LD4DUP";
+ case AArch64ISD::NEON_LD2DUP_UPD:
+ return "AArch64ISD::NEON_LD2DUP_UPD";
+ case AArch64ISD::NEON_LD3DUP_UPD:
+ return "AArch64ISD::NEON_LD3DUP_UPD";
+ case AArch64ISD::NEON_LD4DUP_UPD:
+ return "AArch64ISD::NEON_LD4DUP_UPD";
+ case AArch64ISD::NEON_LD2LN_UPD:
+ return "AArch64ISD::NEON_LD2LN_UPD";
+ case AArch64ISD::NEON_LD3LN_UPD:
+ return "AArch64ISD::NEON_LD3LN_UPD";
+ case AArch64ISD::NEON_LD4LN_UPD:
+ return "AArch64ISD::NEON_LD4LN_UPD";
+ case AArch64ISD::NEON_ST2LN_UPD:
+ return "AArch64ISD::NEON_ST2LN_UPD";
+ case AArch64ISD::NEON_ST3LN_UPD:
+ return "AArch64ISD::NEON_ST3LN_UPD";
+ case AArch64ISD::NEON_ST4LN_UPD:
+ return "AArch64ISD::NEON_ST4LN_UPD";
+ case AArch64ISD::NEON_VEXTRACT:
+ return "AArch64ISD::NEON_VEXTRACT";
+ default:
+ return NULL;
}
}
@@ -826,7 +1059,7 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
void
AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc DL, SDValue &Chain) const {
+ SDLoc DL, SDValue &Chain) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
AArch64MachineFunctionInfo *FuncInfo
@@ -858,24 +1091,31 @@ AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
}
}
+ if (getSubtarget()->hasFPARMv8()) {
unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
int FPRIdx = 0;
- if (FPRSaveSize != 0) {
- FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
-
- SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
-
- for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
- unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
- &AArch64::FPR128RegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
- SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
- MachinePointerInfo::getStack(i * 16),
- false, false, 0);
- MemOps.push_back(Store);
- FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
- DAG.getConstant(16, getPointerTy()));
+ // According to the AArch64 Procedure Call Standard, section B.1/B.3, we
+ // can omit a register save area if we know we'll never use registers of
+ // that class.
+ if (FPRSaveSize != 0) {
+ FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
+
+ SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
+
+ for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
+ unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
+ &AArch64::FPR128RegClass);
+ SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
+ SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
+ MachinePointerInfo::getStack(i * 16),
+ false, false, 0);
+ MemOps.push_back(Store);
+ FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
+ DAG.getConstant(16, getPointerTy()));
+ }
}
+ FuncInfo->setVariadicFPRIdx(FPRIdx);
+ FuncInfo->setVariadicFPRSize(FPRSaveSize);
}
int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
@@ -883,8 +1123,6 @@ AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
FuncInfo->setVariadicStackIdx(StackIdx);
FuncInfo->setVariadicGPRIdx(GPRIdx);
FuncInfo->setVariadicGPRSize(GPRSaveSize);
- FuncInfo->setVariadicFPRIdx(FPRIdx);
- FuncInfo->setVariadicFPRSize(FPRSaveSize);
if (!MemOps.empty()) {
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
@@ -897,7 +1135,7 @@ SDValue
AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
AArch64MachineFunctionInfo *FuncInfo
@@ -1012,7 +1250,7 @@ AArch64TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location.
SmallVector<CCValAssign, 16> RVLocs;
@@ -1085,10 +1323,10 @@ SDValue
AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
@@ -1151,7 +1389,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
}
if (!IsSibCall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ dl);
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
getPointerTy());
@@ -1282,7 +1521,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// in the correct location.
if (IsTailCall && !IsSibCall) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
@@ -1336,7 +1575,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(CalleePopBytes, true),
- InFlag);
+ InFlag, dl);
InFlag = Chain.getValue(1);
}
@@ -1348,7 +1587,7 @@ SDValue
AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@@ -1537,7 +1776,7 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
}
// Build a tokenfactor for all the chains.
- return DAG.getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
+ return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
&ArgChains[0], ArgChains.size());
}
@@ -1570,7 +1809,7 @@ bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
ISD::CondCode CC, SDValue &A64cc,
- SelectionDAG &DAG, DebugLoc &dl) const {
+ SelectionDAG &DAG, SDLoc &dl) const {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
int64_t C = 0;
EVT VT = RHSC->getValueType(0);
@@ -1663,7 +1902,7 @@ static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
SDValue
AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT PtrVT = getPointerTy();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
@@ -1693,7 +1932,7 @@ AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// (BRCOND chain, val, dest)
SDValue
AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Chain = Op.getOperand(0);
SDValue TheBit = Op.getOperand(1);
SDValue DestBB = Op.getOperand(2);
@@ -1716,7 +1955,7 @@ AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
// (BR_CC chain, condcode, lhs, rhs, dest)
SDValue
AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
SDValue LHS = Op.getOperand(2);
@@ -1802,7 +2041,7 @@ AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
0, getLibcallCallingConv(Call), isTailCall,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, Op->getDebugLoc());
+ Callee, Args, DAG, SDLoc(Op));
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
if (!CallInfo.second.getNode())
@@ -1824,7 +2063,7 @@ AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
SDValue SrcVal = Op.getOperand(0);
return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
- /*isSigned*/ false, Op.getDebugLoc());
+ /*isSigned*/ false, SDLoc(Op)).first;
}
SDValue
@@ -1854,6 +2093,45 @@ AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
return LowerF128ToCall(Op, DAG, LC);
}
+SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ SDValue Offset = DAG.getConstant(8, MVT::i64);
+ return DAG.getLoad(VT, dl, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
+ MachinePointerInfo(), false, false, false, 0);
+ }
+
+ // Return X30, which contains the return address. Mark it an implicit live-in.
+ unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64);
+}
+
+
+SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
+ const {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned FrameReg = AArch64::X29;
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
+ while (Depth--)
+ FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ return FrameAddr;
+}
+
SDValue
AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
SelectionDAG &DAG) const {
@@ -1861,7 +2139,7 @@ AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
assert(getTargetMachine().getRelocationModel() == Reloc::Static);
EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GN->getGlobal();
@@ -1885,7 +2163,7 @@ AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
assert(getTargetMachine().getCodeModel() == CodeModel::Small);
EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GN->getGlobal();
unsigned Alignment = GV->getAlignment();
@@ -1927,7 +2205,7 @@ AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
}
unsigned char HiFixup, LoFixup;
- bool UseGOT = Subtarget->GVIsIndirectSymbol(GV, RelocM);
+ bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
if (UseGOT) {
HiFixup = AArch64II::MO_GOT;
@@ -1978,7 +2256,7 @@ AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
SDValue DescAddr,
- DebugLoc DL,
+ SDLoc DL,
SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
@@ -2023,7 +2301,7 @@ SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
SDValue
AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
- assert(Subtarget->isTargetELF() &&
+ assert(getSubtarget()->isTargetELF() &&
"TLS not implemented for non-ELF targets");
assert(getTargetMachine().getCodeModel() == CodeModel::Small
&& "TLS only supported in small memory model");
@@ -2033,7 +2311,7 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue TPOff;
EVT PtrVT = getPointerTy();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
const GlobalValue *GV = GA->getGlobal();
SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
@@ -2054,7 +2332,7 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
AArch64II::MO_TPREL_G0_NC);
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
- DAG.getTargetConstant(0, MVT::i32)), 0);
+ DAG.getTargetConstant(1, MVT::i32)), 0);
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
TPOff, LoVar,
DAG.getTargetConstant(0, MVT::i32)), 0);
@@ -2134,7 +2412,7 @@ AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
SDValue
AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- DebugLoc dl = JT->getDebugLoc();
+ SDLoc dl(JT);
EVT PtrVT = getPointerTy();
// When compiling PIC, jump tables get put in the code section so a static
@@ -2161,7 +2439,7 @@ AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
SDValue
AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue IfTrue = Op.getOperand(2);
@@ -2217,7 +2495,7 @@ AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
// (SELECT testbit, iftrue, iffalse)
SDValue
AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue TheBit = Op.getOperand(0);
SDValue IfTrue = Op.getOperand(1);
SDValue IfFalse = Op.getOperand(2);
@@ -2236,15 +2514,225 @@ AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
DAG.getConstant(A64CC::NE, MVT::i32));
}
+static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ EVT VT = Op.getValueType();
+ bool Invert = false;
+ SDValue Op0, Op1;
+ unsigned Opcode;
+
+ if (LHS.getValueType().isInteger()) {
+
+ // Attempt to use Vector Integer Compare Mask Test instruction.
+ // TST = icmp ne (and (op0, op1), zero).
+ if (CC == ISD::SETNE) {
+ if (((LHS.getOpcode() == ISD::AND) &&
+ ISD::isBuildVectorAllZeros(RHS.getNode())) ||
+ ((RHS.getOpcode() == ISD::AND) &&
+ ISD::isBuildVectorAllZeros(LHS.getNode()))) {
+
+ SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
+ SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
+ SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
+ return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
+ }
+ }
+
+ // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
+ // Note: Compare against Zero does not support unsigned predicates.
+ if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
+ ISD::isBuildVectorAllZeros(LHS.getNode())) &&
+ !isUnsignedIntSetCC(CC)) {
+
+ // If LHS is the zero value, swap operands and CondCode.
+ if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
+ CC = getSetCCSwappedOperands(CC);
+ Op0 = RHS;
+ } else
+ Op0 = LHS;
+
+ // Ensure valid CondCode for Compare Mask against Zero instruction:
+ // EQ, GE, GT, LE, LT.
+ if (ISD::SETNE == CC) {
+ Invert = true;
+ CC = ISD::SETEQ;
+ }
+
+ // Using constant type to differentiate integer and FP compares with zero.
+ Op1 = DAG.getConstant(0, MVT::i32);
+ Opcode = AArch64ISD::NEON_CMPZ;
+
+ } else {
+ // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
+ // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
+ bool Swap = false;
+ switch (CC) {
+ default:
+ llvm_unreachable("Illegal integer comparison.");
+ case ISD::SETEQ:
+ case ISD::SETGT:
+ case ISD::SETGE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ break;
+ case ISD::SETNE:
+ Invert = true;
+ CC = ISD::SETEQ;
+ break;
+ case ISD::SETULT:
+ case ISD::SETULE:
+ case ISD::SETLT:
+ case ISD::SETLE:
+ Swap = true;
+ CC = getSetCCSwappedOperands(CC);
+ }
+
+ if (Swap)
+ std::swap(LHS, RHS);
+
+ Opcode = AArch64ISD::NEON_CMP;
+ Op0 = LHS;
+ Op1 = RHS;
+ }
+
+ // Generate Compare Mask instr or Compare Mask against Zero instr.
+ SDValue NeonCmp =
+ DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
+
+ if (Invert)
+ NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
+
+ return NeonCmp;
+ }
+
+ // Now handle Floating Point cases.
+ // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
+ if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
+ ISD::isBuildVectorAllZeros(LHS.getNode())) {
+
+ // If LHS is the zero value, swap operands and CondCode.
+ if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
+ CC = getSetCCSwappedOperands(CC);
+ Op0 = RHS;
+ } else
+ Op0 = LHS;
+
+ // Using constant type to differentiate integer and FP compares with zero.
+ Op1 = DAG.getConstantFP(0, MVT::f32);
+ Opcode = AArch64ISD::NEON_CMPZ;
+ } else {
+ // Attempt to use Vector Floating Point Compare Mask instruction.
+ Op0 = LHS;
+ Op1 = RHS;
+ Opcode = AArch64ISD::NEON_CMP;
+ }
+
+ SDValue NeonCmpAlt;
+ // Some register compares have to be implemented with swapped CC and operands,
+ // e.g.: OLT implemented as OGT with swapped operands.
+ bool SwapIfRegArgs = false;
+
+ // Ensure valid CondCode for FP Compare Mask against Zero instruction:
+ // EQ, GE, GT, LE, LT.
+ // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
+ switch (CC) {
+ default:
+ llvm_unreachable("Illegal FP comparison");
+ case ISD::SETUNE:
+ case ISD::SETNE:
+ Invert = true; // Fallthrough
+ case ISD::SETOEQ:
+ case ISD::SETEQ:
+ CC = ISD::SETEQ;
+ break;
+ case ISD::SETOLT:
+ case ISD::SETLT:
+ CC = ISD::SETLT;
+ SwapIfRegArgs = true;
+ break;
+ case ISD::SETOGT:
+ case ISD::SETGT:
+ CC = ISD::SETGT;
+ break;
+ case ISD::SETOLE:
+ case ISD::SETLE:
+ CC = ISD::SETLE;
+ SwapIfRegArgs = true;
+ break;
+ case ISD::SETOGE:
+ case ISD::SETGE:
+ CC = ISD::SETGE;
+ break;
+ case ISD::SETUGE:
+ Invert = true;
+ CC = ISD::SETLT;
+ SwapIfRegArgs = true;
+ break;
+ case ISD::SETULE:
+ Invert = true;
+ CC = ISD::SETGT;
+ break;
+ case ISD::SETUGT:
+ Invert = true;
+ CC = ISD::SETLE;
+ SwapIfRegArgs = true;
+ break;
+ case ISD::SETULT:
+ Invert = true;
+ CC = ISD::SETGE;
+ break;
+ case ISD::SETUEQ:
+ Invert = true; // Fallthrough
+ case ISD::SETONE:
+ // Expand this to (OGT |OLT).
+ NeonCmpAlt =
+ DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
+ CC = ISD::SETLT;
+ SwapIfRegArgs = true;
+ break;
+ case ISD::SETUO:
+ Invert = true; // Fallthrough
+ case ISD::SETO:
+ // Expand this to (OGE | OLT).
+ NeonCmpAlt =
+ DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
+ CC = ISD::SETLT;
+ SwapIfRegArgs = true;
+ break;
+ }
+
+ if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
+ CC = getSetCCSwappedOperands(CC);
+ std::swap(Op0, Op1);
+ }
+
+ // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
+ SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
+
+ if (NeonCmpAlt.getNode())
+ NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
+
+ if (Invert)
+ NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
+
+ return NeonCmp;
+}
+
// (SETCC lhs, rhs, condcode)
SDValue
AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
EVT VT = Op.getValueType();
+ if (VT.isVector())
+ return LowerVectorSETCC(Op, DAG);
+
if (LHS.getValueType() == MVT::f128) {
// f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
// for the rest of the function (some i32 or i64 values).
@@ -2298,7 +2786,7 @@ AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
// We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
// rather than just 8.
- return DAG.getMemcpy(Op.getOperand(0), Op.getDebugLoc(),
+ return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
Op.getOperand(1), Op.getOperand(2),
DAG.getConstant(32, MVT::i32), 8, false, false,
MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
@@ -2311,7 +2799,7 @@ AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
AArch64MachineFunctionInfo *FuncInfo
= MF.getInfo<AArch64MachineFunctionInfo>();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
SDValue VAList = Op.getOperand(1);
@@ -2389,6 +2877,8 @@ AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
@@ -2401,16 +2891,161 @@ AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
+ case ISD::BUILD_VECTOR:
+ return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
+ case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
}
return SDValue();
}
+/// Check if the specified splat value corresponds to a valid vector constant
+/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If
+/// so, return the encoded 8-bit immediate and the OpCmode instruction fields
+/// values.
+static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
+ unsigned SplatBitSize, SelectionDAG &DAG,
+ bool is128Bits, NeonModImmType type, EVT &VT,
+ unsigned &Imm, unsigned &OpCmode) {
+ switch (SplatBitSize) {
+ default:
+ llvm_unreachable("unexpected size for isNeonModifiedImm");
+ case 8: {
+ if (type != Neon_Mov_Imm)
+ return false;
+ assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
+ // Neon movi per byte: Op=0, Cmode=1110.
+ OpCmode = 0xe;
+ Imm = SplatBits;
+ VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
+ break;
+ }
+ case 16: {
+ // Neon move inst per halfword
+ VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
+ if ((SplatBits & ~0xff) == 0) {
+ // Value = 0x00nn is 0x00nn LSL 0
+ // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
+ // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001
+ // Op=x, Cmode=100y
+ Imm = SplatBits;
+ OpCmode = 0x8;
+ break;
+ }
+ if ((SplatBits & ~0xff00) == 0) {
+ // Value = 0xnn00 is 0x00nn LSL 8
+ // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
+ // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011
+ // Op=x, Cmode=101x
+ Imm = SplatBits >> 8;
+ OpCmode = 0xa;
+ break;
+ }
+ // can't handle any other
+ return false;
+ }
+
+ case 32: {
+ // First the LSL variants (MSL is unusable by some interested instructions).
+
+ // Neon move instr per word, shift zeros
+ VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
+ if ((SplatBits & ~0xff) == 0) {
+ // Value = 0x000000nn is 0x000000nn LSL 0
+ // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
+ // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001
+ // Op=x, Cmode=000x
+ Imm = SplatBits;
+ OpCmode = 0;
+ break;
+ }
+ if ((SplatBits & ~0xff00) == 0) {
+ // Value = 0x0000nn00 is 0x000000nn LSL 8
+ // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010
+ // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011
+ // Op=x, Cmode=001x
+ Imm = SplatBits >> 8;
+ OpCmode = 0x2;
+ break;
+ }
+ if ((SplatBits & ~0xff0000) == 0) {
+ // Value = 0x00nn0000 is 0x000000nn LSL 16
+ // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
+ // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101
+ // Op=x, Cmode=010x
+ Imm = SplatBits >> 16;
+ OpCmode = 0x4;
+ break;
+ }
+ if ((SplatBits & ~0xff000000) == 0) {
+ // Value = 0xnn000000 is 0x000000nn LSL 24
+ // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
+ // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111
+ // Op=x, Cmode=011x
+ Imm = SplatBits >> 24;
+ OpCmode = 0x6;
+ break;
+ }
+
+ // Now the MSL immediates.
+
+ // Neon move instr per word, shift ones
+ if ((SplatBits & ~0xffff) == 0 &&
+ ((SplatBits | SplatUndef) & 0xff) == 0xff) {
+ // Value = 0x0000nnff is 0x000000nn MSL 8
+ // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
+ // Op=x, Cmode=1100
+ Imm = SplatBits >> 8;
+ OpCmode = 0xc;
+ break;
+ }
+ if ((SplatBits & ~0xffffff) == 0 &&
+ ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
+ // Value = 0x00nnffff is 0x000000nn MSL 16
+ // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
+ // Op=x, Cmode=1101
+ Imm = SplatBits >> 16;
+ OpCmode = 0xd;
+ break;
+ }
+ // can't handle any other
+ return false;
+ }
+
+ case 64: {
+ if (type != Neon_Mov_Imm)
+ return false;
+ // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
+ // movi Op=1, Cmode=1110.
+ OpCmode = 0x1e;
+ uint64_t BitMask = 0xff;
+ uint64_t Val = 0;
+ unsigned ImmMask = 1;
+ Imm = 0;
+ for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
+ if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
+ Val |= BitMask;
+ Imm |= ImmMask;
+ } else if ((SplatBits & BitMask) != 0) {
+ return false;
+ }
+ BitMask <<= 8;
+ ImmMask <<= 1;
+ }
+ SplatBits = Val;
+ VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
+ break;
+ }
+ }
+
+ return true;
+}
+
static SDValue PerformANDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
// We're looking for an SRA/SHL pair which form an SBFX.
@@ -2448,7 +3083,7 @@ static SDValue PerformANDCombine(SDNode *N,
/// a compatible SHL operation (unless they're already low). This function
/// checks that condition and returns the least-significant bit that's
/// intended. If the operation not a field preparation, -1 is returned.
-static int32_t getLSBForBFI(SelectionDAG &DAG, DebugLoc DL, EVT VT,
+static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
SDValue &MaskedVal, uint64_t Mask) {
if (!isShiftedMask_64(Mask))
return -1;
@@ -2464,7 +3099,7 @@ static int32_t getLSBForBFI(SelectionDAG &DAG, DebugLoc DL, EVT VT,
// cases (e.g. bitfield to bitfield copy) may still need a real shift before
// the BFI.
- uint64_t LSB = CountTrailingZeros_64(Mask);
+ uint64_t LSB = countTrailingZeros(Mask);
int64_t ShiftRightRequired = LSB;
if (MaskedVal.getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
@@ -2524,7 +3159,7 @@ static SDValue tryCombineToBFI(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
assert(N->getOpcode() == ISD::OR && "Unexpected root");
@@ -2605,7 +3240,7 @@ static SDValue tryCombineToLargerBFI(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
// First job is to hunt for a MaskedBFI on either the left or right. Swap
@@ -2687,7 +3322,7 @@ static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
static SDValue tryCombineToEXTR(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
assert(N->getOpcode() == ISD::OR && "Unexpected root");
@@ -2731,6 +3366,7 @@ static SDValue PerformORCombine(SDNode *N,
const AArch64Subtarget *Subtarget) {
SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
@@ -2751,6 +3387,44 @@ static SDValue PerformORCombine(SDNode *N,
if (Res.getNode())
return Res;
+ if (!Subtarget->hasNEON())
+ return SDValue();
+
+ // Attempt to use vector immediate-form BSL
+ // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
+
+ SDValue N0 = N->getOperand(0);
+ if (N0.getOpcode() != ISD::AND)
+ return SDValue();
+
+ SDValue N1 = N->getOperand(1);
+ if (N1.getOpcode() != ISD::AND)
+ return SDValue();
+
+ if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
+ APInt SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
+ APInt SplatBits0;
+ if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
+ HasAnyUndefs) &&
+ !HasAnyUndefs) {
+ BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
+ APInt SplatBits1;
+ if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
+ HasAnyUndefs) &&
+ !HasAnyUndefs && SplatBits0 == ~SplatBits1) {
+ // Canonicalize the vector type to make instruction selection simpler.
+ EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8;
+ SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT,
+ N0->getOperand(1), N0->getOperand(0),
+ N1->getOperand(0));
+ return DAG.getNode(ISD::BITCAST, DL, VT, Result);
+ }
+ }
+ }
+
return SDValue();
}
@@ -2759,7 +3433,7 @@ static SDValue PerformSRACombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
// We're looking for an SRA/SHL pair which form an SBFX.
@@ -2791,6 +3465,336 @@ static SDValue PerformSRACombine(SDNode *N,
DAG.getConstant(LSB + Width - 1, MVT::i64));
}
+/// Check if this is a valid build_vector for the immediate operand of
+/// a vector shift operation, where all the elements of the build_vector
+/// must have the same constant integer value.
+static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
+ // Ignore bit_converts.
+ while (Op.getOpcode() == ISD::BITCAST)
+ Op = Op.getOperand(0);
+ BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
+ APInt SplatBits, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
+ HasAnyUndefs, ElementBits) ||
+ SplatBitSize > ElementBits)
+ return false;
+ Cnt = SplatBits.getSExtValue();
+ return true;
+}
+
+/// Check if this is a valid build_vector for the immediate operand of
+/// a vector shift left operation. That value must be in the range:
+/// 0 <= Value < ElementBits
+static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
+ assert(VT.isVector() && "vector shift count is not a vector type");
+ unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
+ if (!getVShiftImm(Op, ElementBits, Cnt))
+ return false;
+ return (Cnt >= 0 && Cnt < ElementBits);
+}
+
+/// Check if this is a valid build_vector for the immediate operand of a
+/// vector shift right operation. The value must be in the range:
+/// 1 <= Value <= ElementBits
+static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) {
+ assert(VT.isVector() && "vector shift count is not a vector type");
+ unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
+ if (!getVShiftImm(Op, ElementBits, Cnt))
+ return false;
+ return (Cnt >= 1 && Cnt <= ElementBits);
+}
+
+/// Checks for immediate versions of vector shifts and lowers them.
+static SDValue PerformShiftCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64Subtarget *ST) {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+ if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64))
+ return PerformSRACombine(N, DCI);
+
+ // Nothing to be done for scalar shifts.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!VT.isVector() || !TLI.isTypeLegal(VT))
+ return SDValue();
+
+ assert(ST->hasNEON() && "unexpected vector shift");
+ int64_t Cnt;
+
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("unexpected shift opcode");
+
+ case ISD::SHL:
+ if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
+ SDValue RHS =
+ DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
+ DAG.getConstant(Cnt, MVT::i32));
+ return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
+ }
+ break;
+
+ case ISD::SRA:
+ case ISD::SRL:
+ if (isVShiftRImm(N->getOperand(1), VT, Cnt)) {
+ SDValue RHS =
+ DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
+ DAG.getConstant(Cnt, MVT::i32));
+ return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS);
+ }
+ break;
+ }
+
+ return SDValue();
+}
+
+/// ARM-specific DAG combining for intrinsics.
+static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+
+ switch (IntNo) {
+ default:
+ // Don't do anything for most intrinsics.
+ break;
+
+ case Intrinsic::arm_neon_vqshifts:
+ case Intrinsic::arm_neon_vqshiftu:
+ EVT VT = N->getOperand(1).getValueType();
+ int64_t Cnt;
+ if (!isVShiftLImm(N->getOperand(2), VT, Cnt))
+ break;
+ unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts)
+ ? AArch64ISD::NEON_QSHLs
+ : AArch64ISD::NEON_QSHLu;
+ return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
+ N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
+ }
+
+ return SDValue();
+}
+
+/// Target-specific DAG combine function for NEON load/store intrinsics
+/// to merge base address updates.
+static SDValue CombineBaseUpdate(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
+ N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
+ unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
+ SDValue Addr = N->getOperand(AddrOpIdx);
+
+ // Search for a use of the address operand that is an increment.
+ for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
+ UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::ADD ||
+ UI.getUse().getResNo() != Addr.getResNo())
+ continue;
+
+ // Check that the add is independent of the load/store. Otherwise, folding
+ // it would create a cycle.
+ if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
+ continue;
+
+ // Find the new opcode for the updating load/store.
+ bool isLoad = true;
+ bool isLaneOp = false;
+ unsigned NewOpc = 0;
+ unsigned NumVecs = 0;
+ if (isIntrinsic) {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ default: llvm_unreachable("unexpected intrinsic for Neon base update");
+ case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD;
+ NumVecs = 1; break;
+ case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD;
+ NumVecs = 2; break;
+ case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD;
+ NumVecs = 3; break;
+ case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD;
+ NumVecs = 4; break;
+ case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD;
+ NumVecs = 1; isLoad = false; break;
+ case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD;
+ NumVecs = 2; isLoad = false; break;
+ case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD;
+ NumVecs = 3; isLoad = false; break;
+ case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD;
+ NumVecs = 4; isLoad = false; break;
+ case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD;
+ NumVecs = 2; break;
+ case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD;
+ NumVecs = 3; break;
+ case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD;
+ NumVecs = 4; break;
+ case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD;
+ NumVecs = 2; isLoad = false; break;
+ case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD;
+ NumVecs = 3; isLoad = false; break;
+ case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD;
+ NumVecs = 4; isLoad = false; break;
+ case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD;
+ NumVecs = 2; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD;
+ NumVecs = 3; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD;
+ NumVecs = 4; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD;
+ NumVecs = 2; isLoad = false; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD;
+ NumVecs = 3; isLoad = false; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD;
+ NumVecs = 4; isLoad = false; isLaneOp = true; break;
+ }
+ } else {
+ isLaneOp = true;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("unexpected opcode for Neon base update");
+ case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD;
+ NumVecs = 2; break;
+ case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD;
+ NumVecs = 3; break;
+ case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD;
+ NumVecs = 4; break;
+ }
+ }
+
+ // Find the size of memory referenced by the load/store.
+ EVT VecTy;
+ if (isLoad)
+ VecTy = N->getValueType(0);
+ else
+ VecTy = N->getOperand(AddrOpIdx + 1).getValueType();
+ unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
+ if (isLaneOp)
+ NumBytes /= VecTy.getVectorNumElements();
+
+ // If the increment is a constant, it must match the memory ref size.
+ SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
+ if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
+ uint32_t IncVal = CInc->getZExtValue();
+ if (IncVal != NumBytes)
+ continue;
+ Inc = DAG.getTargetConstant(IncVal, MVT::i32);
+ }
+
+ // Create the new updating load/store node.
+ EVT Tys[6];
+ unsigned NumResultVecs = (isLoad ? NumVecs : 0);
+ unsigned n;
+ for (n = 0; n < NumResultVecs; ++n)
+ Tys[n] = VecTy;
+ Tys[n++] = MVT::i64;
+ Tys[n] = MVT::Other;
+ SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(N->getOperand(0)); // incoming chain
+ Ops.push_back(N->getOperand(AddrOpIdx));
+ Ops.push_back(Inc);
+ for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
+ Ops.push_back(N->getOperand(i));
+ }
+ MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
+ SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
+ Ops.data(), Ops.size(),
+ MemInt->getMemoryVT(),
+ MemInt->getMemOperand());
+
+ // Update the uses.
+ std::vector<SDValue> NewResults;
+ for (unsigned i = 0; i < NumResultVecs; ++i) {
+ NewResults.push_back(SDValue(UpdN.getNode(), i));
+ }
+ NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
+ DCI.CombineTo(N, NewResults);
+ DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
+
+ break;
+ }
+ return SDValue();
+}
+
+/// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1)
+/// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs.
+/// If so, combine them to a vldN-dup operation and return true.
+static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+
+ // Check if the VDUPLANE operand is a vldN-dup intrinsic.
+ SDNode *VLD = N->getOperand(0).getNode();
+ if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
+ return SDValue();
+ unsigned NumVecs = 0;
+ unsigned NewOpc = 0;
+ unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
+ if (IntNo == Intrinsic::arm_neon_vld2lane) {
+ NumVecs = 2;
+ NewOpc = AArch64ISD::NEON_LD2DUP;
+ } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
+ NumVecs = 3;
+ NewOpc = AArch64ISD::NEON_LD3DUP;
+ } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
+ NumVecs = 4;
+ NewOpc = AArch64ISD::NEON_LD4DUP;
+ } else {
+ return SDValue();
+ }
+
+ // First check that all the vldN-lane uses are VDUPLANEs and that the lane
+ // numbers match the load.
+ unsigned VLDLaneNo =
+ cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue();
+ for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
+ UI != UE; ++UI) {
+ // Ignore uses of the chain result.
+ if (UI.getUse().getResNo() == NumVecs)
+ continue;
+ SDNode *User = *UI;
+ if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE ||
+ VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
+ return SDValue();
+ }
+
+ // Create the vldN-dup node.
+ EVT Tys[5];
+ unsigned n;
+ for (n = 0; n < NumVecs; ++n)
+ Tys[n] = VT;
+ Tys[n] = MVT::Other;
+ SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1);
+ SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
+ MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
+ SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2,
+ VLDMemInt->getMemoryVT(),
+ VLDMemInt->getMemOperand());
+
+ // Update the uses.
+ for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
+ UI != UE; ++UI) {
+ unsigned ResNo = UI.getUse().getResNo();
+ // Ignore uses of the chain result.
+ if (ResNo == NumVecs)
+ continue;
+ SDNode *User = *UI;
+ DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
+ }
+
+ // Now the vldN-lane intrinsic is dead except for its chain result.
+ // Update uses of the chain.
+ std::vector<SDValue> VLDDupResults;
+ for (unsigned n = 0; n < NumVecs; ++n)
+ VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
+ VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
+ DCI.CombineTo(VLD, VLDDupResults);
+
+ return SDValue(N, 0);
+}
SDValue
AArch64TargetLowering::PerformDAGCombine(SDNode *N,
@@ -2798,12 +3802,578 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N,
switch (N->getOpcode()) {
default: break;
case ISD::AND: return PerformANDCombine(N, DCI);
- case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
- case ISD::SRA: return PerformSRACombine(N, DCI);
+ case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ return PerformShiftCombine(N, DCI, getSubtarget());
+ case ISD::INTRINSIC_WO_CHAIN:
+ return PerformIntrinsicCombine(N, DCI.DAG);
+ case AArch64ISD::NEON_VDUPLANE:
+ return CombineVLDDUP(N, DCI);
+ case AArch64ISD::NEON_LD2DUP:
+ case AArch64ISD::NEON_LD3DUP:
+ case AArch64ISD::NEON_LD4DUP:
+ return CombineBaseUpdate(N, DCI);
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_W_CHAIN:
+ switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ case Intrinsic::arm_neon_vld1:
+ case Intrinsic::arm_neon_vld2:
+ case Intrinsic::arm_neon_vld3:
+ case Intrinsic::arm_neon_vld4:
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::arm_neon_vld2lane:
+ case Intrinsic::arm_neon_vld3lane:
+ case Intrinsic::arm_neon_vld4lane:
+ case Intrinsic::aarch64_neon_vld1x2:
+ case Intrinsic::aarch64_neon_vld1x3:
+ case Intrinsic::aarch64_neon_vld1x4:
+ case Intrinsic::aarch64_neon_vst1x2:
+ case Intrinsic::aarch64_neon_vst1x3:
+ case Intrinsic::aarch64_neon_vst1x4:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane:
+ return CombineBaseUpdate(N, DCI);
+ default:
+ break;
+ }
}
return SDValue();
}
+bool
+AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ VT = VT.getScalarType();
+
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f16:
+ case MVT::f32:
+ case MVT::f64:
+ return true;
+ case MVT::f128:
+ return false;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+// Check whether a Build Vector could be presented as Shuffle Vector. If yes,
+// try to call LowerVECTOR_SHUFFLE to lower it.
+bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG,
+ SDValue &Res) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned V0NumElts = 0;
+ int Mask[16];
+ SDValue V0, V1;
+
+ // Check if all elements are extracted from less than 3 vectors.
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Elt = Op.getOperand(i);
+ if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return false;
+
+ if (V0.getNode() == 0) {
+ V0 = Elt.getOperand(0);
+ V0NumElts = V0.getValueType().getVectorNumElements();
+ }
+ if (Elt.getOperand(0) == V0) {
+ Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue());
+ continue;
+ } else if (V1.getNode() == 0) {
+ V1 = Elt.getOperand(0);
+ }
+ if (Elt.getOperand(0) == V1) {
+ unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue();
+ Mask[i] = (Lane + V0NumElts);
+ continue;
+ } else {
+ return false;
+ }
+ }
+
+ if (!V1.getNode() && V0NumElts == NumElts * 2) {
+ V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
+ DAG.getConstant(NumElts, MVT::i64));
+ V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
+ DAG.getConstant(0, MVT::i64));
+ V0NumElts = V0.getValueType().getVectorNumElements();
+ }
+
+ if (V1.getNode() && NumElts == V0NumElts &&
+ V0NumElts == V1.getValueType().getVectorNumElements()) {
+ SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask);
+ Res = LowerVECTOR_SHUFFLE(Shuffle, DAG);
+ return true;
+ } else
+ return false;
+}
+
+// If this is a case we can't handle, return null and let the default
+// expansion code take care of it.
+SDValue
+AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
+ const AArch64Subtarget *ST) const {
+
+ BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+
+ APInt SplatBits, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ unsigned UseNeonMov = VT.getSizeInBits() >= 64;
+
+ // Note we favor lowering MOVI over MVNI.
+ // This has implications on the definition of patterns in TableGen to select
+ // BIC immediate instructions but not ORR immediate instructions.
+ // If this lowering order is changed, TableGen patterns for BIC immediate and
+ // ORR immediate instructions have to be updated.
+ if (UseNeonMov &&
+ BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
+ if (SplatBitSize <= 64) {
+ // First attempt to use vector immediate-form MOVI
+ EVT NeonMovVT;
+ unsigned Imm = 0;
+ unsigned OpCmode = 0;
+
+ if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
+ SplatBitSize, DAG, VT.is128BitVector(),
+ Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
+ SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
+ SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
+
+ if (ImmVal.getNode() && OpCmodeVal.getNode()) {
+ SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
+ ImmVal, OpCmodeVal);
+ return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
+ }
+ }
+
+ // Then attempt to use vector immediate-form MVNI
+ uint64_t NegatedImm = (~SplatBits).getZExtValue();
+ if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
+ DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
+ Imm, OpCmode)) {
+ SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
+ SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
+ if (ImmVal.getNode() && OpCmodeVal.getNode()) {
+ SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
+ ImmVal, OpCmodeVal);
+ return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
+ }
+ }
+
+ // Attempt to use vector immediate-form FMOV
+ if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
+ (VT == MVT::v2f64 && SplatBitSize == 64)) {
+ APFloat RealVal(
+ SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
+ SplatBits);
+ uint32_t ImmVal;
+ if (A64Imms::isFPImm(RealVal, ImmVal)) {
+ SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
+ return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
+ }
+ }
+ }
+ }
+
+ unsigned NumElts = VT.getVectorNumElements();
+ bool isOnlyLowElement = true;
+ bool usesOnlyOneValue = true;
+ bool hasDominantValue = false;
+ bool isConstant = true;
+
+ // Map of the number of times a particular SDValue appears in the
+ // element list.
+ DenseMap<SDValue, unsigned> ValueCounts;
+ SDValue Value;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ if (i > 0)
+ isOnlyLowElement = false;
+ if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
+ isConstant = false;
+
+ ValueCounts.insert(std::make_pair(V, 0));
+ unsigned &Count = ValueCounts[V];
+
+ // Is this value dominant? (takes up more than half of the lanes)
+ if (++Count > (NumElts / 2)) {
+ hasDominantValue = true;
+ Value = V;
+ }
+ }
+ if (ValueCounts.size() != 1)
+ usesOnlyOneValue = false;
+ if (!Value.getNode() && ValueCounts.size() > 0)
+ Value = ValueCounts.begin()->first;
+
+ if (ValueCounts.size() == 0)
+ return DAG.getUNDEF(VT);
+
+ // Loads are better lowered with insert_vector_elt.
+ // Keep going if we are hitting this case.
+ if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
+
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ if (hasDominantValue && EltSize <= 64) {
+ // Use VDUP for non-constant splats.
+ if (!isConstant) {
+ SDValue N;
+
+ // If we are DUPing a value that comes directly from a vector, we could
+ // just use DUPLANE. We can only do this if the lane being extracted
+ // is at a constant index, as the DUP from lane instructions only have
+ // constant-index forms.
+ if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ isa<ConstantSDNode>(Value->getOperand(1))) {
+ N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT,
+ Value->getOperand(0), Value->getOperand(1));
+ } else
+ N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
+
+ if (!usesOnlyOneValue) {
+ // The dominant value was splatted as 'N', but we now have to insert
+ // all differing elements.
+ for (unsigned I = 0; I < NumElts; ++I) {
+ if (Op.getOperand(I) == Value)
+ continue;
+ SmallVector<SDValue, 3> Ops;
+ Ops.push_back(N);
+ Ops.push_back(Op.getOperand(I));
+ Ops.push_back(DAG.getConstant(I, MVT::i64));
+ N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3);
+ }
+ }
+ return N;
+ }
+ if (usesOnlyOneValue && isConstant) {
+ return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
+ }
+ }
+ // If all elements are constants and the case above didn't get hit, fall back
+ // to the default expansion, which will generate a load from the constant
+ // pool.
+ if (isConstant)
+ return SDValue();
+
+ // Try to lower this in lowering ShuffleVector way.
+ SDValue Shuf;
+ if (isKnownShuffleVector(Op, DAG, Shuf))
+ return Shuf;
+
+ // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
+ // know the default expansion would otherwise fall back on something even
+ // worse. For a vector with one or two non-undef values, that's
+ // scalar_to_vector for the elements followed by a shuffle (provided the
+ // shuffle is valid for the target) and materialization element by element
+ // on the stack followed by a load for everything else.
+ if (!isConstant && !usesOnlyOneValue) {
+ SDValue Vec = DAG.getUNDEF(VT);
+ for (unsigned i = 0 ; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx);
+ }
+ return Vec;
+ }
+ return SDValue();
+}
+
+/// isREVMask - Check if a vector shuffle corresponds to a REV
+/// instruction with the specified blocksize. (The order of the elements
+/// within each block of the vector is reversed.)
+static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
+ assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
+ "Only possible block sizes for REV are: 16, 32, 64");
+
+ unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ if (EltSz == 64)
+ return false;
+
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned BlockElts = M[0] + 1;
+ // If the first shuffle index is UNDEF, be optimistic.
+ if (M[0] < 0)
+ BlockElts = BlockSize / EltSz;
+
+ if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
+ return false;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if (M[i] < 0)
+ continue; // ignore UNDEF indices
+ if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
+ return false;
+ }
+
+ return true;
+}
+
+// isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and
+// TRN instruction.
+static unsigned isPermuteMask(ArrayRef<int> M, EVT VT) {
+ unsigned NumElts = VT.getVectorNumElements();
+ if (NumElts < 4)
+ return 0;
+
+ bool ismatch = true;
+
+ // Check UZP1
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if ((unsigned)M[i] != i * 2) {
+ ismatch = false;
+ break;
+ }
+ }
+ if (ismatch)
+ return AArch64ISD::NEON_UZP1;
+
+ // Check UZP2
+ ismatch = true;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if ((unsigned)M[i] != i * 2 + 1) {
+ ismatch = false;
+ break;
+ }
+ }
+ if (ismatch)
+ return AArch64ISD::NEON_UZP2;
+
+ // Check ZIP1
+ ismatch = true;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if ((unsigned)M[i] != i / 2 + NumElts * (i % 2)) {
+ ismatch = false;
+ break;
+ }
+ }
+ if (ismatch)
+ return AArch64ISD::NEON_ZIP1;
+
+ // Check ZIP2
+ ismatch = true;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if ((unsigned)M[i] != (NumElts + i) / 2 + NumElts * (i % 2)) {
+ ismatch = false;
+ break;
+ }
+ }
+ if (ismatch)
+ return AArch64ISD::NEON_ZIP2;
+
+ // Check TRN1
+ ismatch = true;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if ((unsigned)M[i] != i + (NumElts - 1) * (i % 2)) {
+ ismatch = false;
+ break;
+ }
+ }
+ if (ismatch)
+ return AArch64ISD::NEON_TRN1;
+
+ // Check TRN2
+ ismatch = true;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if ((unsigned)M[i] != 1 + i + (NumElts - 1) * (i % 2)) {
+ ismatch = false;
+ break;
+ }
+ }
+ if (ismatch)
+ return AArch64ISD::NEON_TRN2;
+
+ return 0;
+}
+
+SDValue
+AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+ ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
+
+ // Convert shuffles that are directly supported on NEON to target-specific
+ // DAG nodes, instead of keeping them as shuffles and matching them again
+ // during code selection. This is more efficient and avoids the possibility
+ // of inconsistencies between legalization and selection.
+ ArrayRef<int> ShuffleMask = SVN->getMask();
+
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ if (EltSize > 64)
+ return SDValue();
+
+ if (isREVMask(ShuffleMask, VT, 64))
+ return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1);
+ if (isREVMask(ShuffleMask, VT, 32))
+ return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1);
+ if (isREVMask(ShuffleMask, VT, 16))
+ return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1);
+
+ unsigned ISDNo = isPermuteMask(ShuffleMask, VT);
+ if (ISDNo)
+ return DAG.getNode(ISDNo, dl, VT, V1, V2);
+
+ // If the element of shuffle mask are all the same constant, we can
+ // transform it into either NEON_VDUP or NEON_VDUPLANE
+ if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
+ int Lane = SVN->getSplatIndex();
+ // If this is undef splat, generate it via "just" vdup, if possible.
+ if (Lane == -1) Lane = 0;
+
+ // Test if V1 is a SCALAR_TO_VECTOR.
+ if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
+ return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
+ }
+ // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
+ if (V1.getOpcode() == ISD::BUILD_VECTOR) {
+ bool IsScalarToVector = true;
+ for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
+ if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
+ i != (unsigned)Lane) {
+ IsScalarToVector = false;
+ break;
+ }
+ if (IsScalarToVector)
+ return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
+ V1.getOperand(Lane));
+ }
+
+ // Test if V1 is a EXTRACT_SUBVECTOR.
+ if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
+ int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue();
+ return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0),
+ DAG.getConstant(Lane + ExtLane, MVT::i64));
+ }
+ // Test if V1 is a CONCAT_VECTORS.
+ if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
+ V1.getOperand(1).getOpcode() == ISD::UNDEF) {
+ SDValue Op0 = V1.getOperand(0);
+ assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() &&
+ "Invalid vector lane access");
+ return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0,
+ DAG.getConstant(Lane, MVT::i64));
+ }
+
+ return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
+ DAG.getConstant(Lane, MVT::i64));
+ }
+
+ int Length = ShuffleMask.size();
+ int V1EltNum = V1.getValueType().getVectorNumElements();
+
+ // If the number of v1 elements is the same as the number of shuffle mask
+ // element and the shuffle masks are sequential values, we can transform
+ // it into NEON_VEXTRACT.
+ if (V1EltNum == Length) {
+ // Check if the shuffle mask is sequential.
+ bool IsSequential = true;
+ int CurMask = ShuffleMask[0];
+ for (int I = 0; I < Length; ++I) {
+ if (ShuffleMask[I] != CurMask) {
+ IsSequential = false;
+ break;
+ }
+ CurMask++;
+ }
+ if (IsSequential) {
+ assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
+ unsigned VecSize = EltSize * V1EltNum;
+ unsigned Index = (EltSize/8) * ShuffleMask[0];
+ if (VecSize == 64 || VecSize == 128)
+ return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
+ DAG.getConstant(Index, MVT::i64));
+ }
+ }
+
+ // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
+ // by element from V2 to V1 .
+ // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
+ // better choice to be inserted than V1 as less insert needed, so we count
+ // element to be inserted for both V1 and V2, and select less one as insert
+ // target.
+
+ // Collect elements need to be inserted and their index.
+ SmallVector<int, 8> NV1Elt;
+ SmallVector<int, 8> N1Index;
+ SmallVector<int, 8> NV2Elt;
+ SmallVector<int, 8> N2Index;
+ for (int I = 0; I != Length; ++I) {
+ if (ShuffleMask[I] != I) {
+ NV1Elt.push_back(ShuffleMask[I]);
+ N1Index.push_back(I);
+ }
+ }
+ for (int I = 0; I != Length; ++I) {
+ if (ShuffleMask[I] != (I + V1EltNum)) {
+ NV2Elt.push_back(ShuffleMask[I]);
+ N2Index.push_back(I);
+ }
+ }
+
+ // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
+ // will be inserted.
+ SDValue InsV = V1;
+ SmallVector<int, 8> InsMasks = NV1Elt;
+ SmallVector<int, 8> InsIndex = N1Index;
+ if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
+ if (NV1Elt.size() > NV2Elt.size()) {
+ InsV = V2;
+ InsMasks = NV2Elt;
+ InsIndex = N2Index;
+ }
+ } else {
+ InsV = DAG.getNode(ISD::UNDEF, dl, VT);
+ }
+
+ for (int I = 0, E = InsMasks.size(); I != E; ++I) {
+ SDValue ExtV = V1;
+ int Mask = InsMasks[I];
+ if (Mask >= V1EltNum) {
+ ExtV = V2;
+ Mask -= V1EltNum;
+ }
+ // Any value type smaller than i32 is illegal in AArch64, and this lower
+ // function is called after legalize pass, so we need to legalize
+ // the result here.
+ EVT EltVT;
+ if (VT.getVectorElementType().isFloatingPoint())
+ EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32;
+ else
+ EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32;
+
+ if (Mask >= 0) {
+ ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
+ DAG.getConstant(Mask, MVT::i64));
+ InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV,
+ DAG.getConstant(InsIndex[I], MVT::i64));
+ }
+ }
+ return InsV;
+}
+
AArch64TargetLowering::ConstraintType
AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
if (Constraint.size() == 1) {
@@ -2899,7 +4469,7 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'S': {
// An absolute symbolic address or label reference.
if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
- Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(),
+ Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
GA->getValueType(0));
} else if (const BlockAddressSDNode *BA
= dyn_cast<BlockAddressSDNode>(Op)) {
@@ -2935,7 +4505,7 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::pair<unsigned, const TargetRegisterClass*>
AArch64TargetLowering::getRegForInlineAsmConstraint(
const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
@@ -2949,14 +4519,10 @@ AArch64TargetLowering::getRegForInlineAsmConstraint(
return std::make_pair(0U, &AArch64::FPR16RegClass);
else if (VT == MVT::f32)
return std::make_pair(0U, &AArch64::FPR32RegClass);
- else if (VT == MVT::f64)
- return std::make_pair(0U, &AArch64::FPR64RegClass);
else if (VT.getSizeInBits() == 64)
- return std::make_pair(0U, &AArch64::VPR64RegClass);
- else if (VT == MVT::f128)
- return std::make_pair(0U, &AArch64::FPR128RegClass);
+ return std::make_pair(0U, &AArch64::FPR64RegClass);
else if (VT.getSizeInBits() == 128)
- return std::make_pair(0U, &AArch64::VPR128RegClass);
+ return std::make_pair(0U, &AArch64::FPR128RegClass);
break;
}
}
@@ -2965,3 +4531,69 @@ AArch64TargetLowering::getRegForInlineAsmConstraint(
// constraint into a member of a register class.
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
+
+/// Represent NEON load and store intrinsics as MemIntrinsicNodes.
+/// The associated MachineMemOperands record the alignment specified
+/// in the intrinsic calls.
+bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ case Intrinsic::arm_neon_vld1:
+ case Intrinsic::arm_neon_vld2:
+ case Intrinsic::arm_neon_vld3:
+ case Intrinsic::arm_neon_vld4:
+ case Intrinsic::aarch64_neon_vld1x2:
+ case Intrinsic::aarch64_neon_vld1x3:
+ case Intrinsic::aarch64_neon_vld1x4:
+ case Intrinsic::arm_neon_vld2lane:
+ case Intrinsic::arm_neon_vld3lane:
+ case Intrinsic::arm_neon_vld4lane: {
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ // Conservatively set memVT to the entire set of vectors loaded.
+ uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
+ Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
+ Info.vol = false; // volatile loads with NEON intrinsics not supported
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::aarch64_neon_vst1x2:
+ case Intrinsic::aarch64_neon_vst1x3:
+ case Intrinsic::aarch64_neon_vst1x4:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane: {
+ Info.opc = ISD::INTRINSIC_VOID;
+ // Conservatively set memVT to the entire set of vectors stored.
+ unsigned NumElts = 0;
+ for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
+ Type *ArgTy = I.getArgOperand(ArgI)->getType();
+ if (!ArgTy->isVectorTy())
+ break;
+ NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
+ }
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
+ Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
+ Info.vol = false; // volatile stores with NEON intrinsics not supported
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
+ default:
+ break;
+ }
+
+ return false;
+}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index d49b3ee453f9..8ad5a79a33ee 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -19,7 +19,7 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
-
+#include "llvm/IR/Intrinsics.h"
namespace llvm {
namespace AArch64ISD {
@@ -111,7 +111,92 @@ namespace AArch64ISD {
// created using the small memory model style: i.e. adrp/add or
// adrp/mem-op. This exists to prevent bare TargetAddresses which may never
// get selected.
- WrapperSmall
+ WrapperSmall,
+
+ // Vector bitwise select
+ NEON_BSL,
+
+ // Vector move immediate
+ NEON_MOVIMM,
+
+ // Vector Move Inverted Immediate
+ NEON_MVNIMM,
+
+ // Vector FP move immediate
+ NEON_FMOVIMM,
+
+ // Vector permute
+ NEON_UZP1,
+ NEON_UZP2,
+ NEON_ZIP1,
+ NEON_ZIP2,
+ NEON_TRN1,
+ NEON_TRN2,
+
+ // Vector Element reverse
+ NEON_REV64,
+ NEON_REV32,
+ NEON_REV16,
+
+ // Vector compare
+ NEON_CMP,
+
+ // Vector compare zero
+ NEON_CMPZ,
+
+ // Vector compare bitwise test
+ NEON_TST,
+
+ // Vector saturating shift
+ NEON_QSHLs,
+ NEON_QSHLu,
+
+ // Vector dup
+ NEON_VDUP,
+
+ // Vector dup by lane
+ NEON_VDUPLANE,
+
+ // Vector extract
+ NEON_VEXTRACT,
+
+ // NEON duplicate lane loads
+ NEON_LD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ NEON_LD3DUP,
+ NEON_LD4DUP,
+
+ // NEON loads with post-increment base updates:
+ NEON_LD1_UPD,
+ NEON_LD2_UPD,
+ NEON_LD3_UPD,
+ NEON_LD4_UPD,
+ NEON_LD1x2_UPD,
+ NEON_LD1x3_UPD,
+ NEON_LD1x4_UPD,
+
+ // NEON stores with post-increment base updates:
+ NEON_ST1_UPD,
+ NEON_ST2_UPD,
+ NEON_ST3_UPD,
+ NEON_ST4_UPD,
+ NEON_ST1x2_UPD,
+ NEON_ST1x3_UPD,
+ NEON_ST1x4_UPD,
+
+ // NEON duplicate lane loads with post-increment base updates:
+ NEON_LD2DUP_UPD,
+ NEON_LD3DUP_UPD,
+ NEON_LD4DUP_UPD,
+
+ // NEON lane loads with post-increment base updates:
+ NEON_LD2LN_UPD,
+ NEON_LD3LN_UPD,
+ NEON_LD4LN_UPD,
+
+ // NEON lane store with post-increment base updates:
+ NEON_ST2LN_UPD,
+ NEON_ST3LN_UPD,
+ NEON_ST4LN_UPD
};
}
@@ -130,14 +215,14 @@ public:
SDValue LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
@@ -145,12 +230,18 @@ public:
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc DL, SDValue &Chain) const;
+ bool isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, SDValue &Res) const;
+
+ SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
+ const AArch64Subtarget *ST) const;
+
+ SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
+ SDValue &Chain) const;
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
@@ -171,7 +262,7 @@ public:
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
MachineFrameInfo *MFI, int ClobberedFI) const;
- EVT getSetCCResultType(EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
@@ -181,7 +272,7 @@ public:
bool isLegalICmpImmediate(int64_t Val) const;
SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &A64cc, SelectionDAG &DAG, DebugLoc &dl) const;
+ SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const;
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
@@ -211,12 +302,14 @@ public:
SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELFSmall(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELFLarge(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, DebugLoc DL,
+ SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
@@ -229,11 +322,11 @@ public:
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
- /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
- /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
- /// is expanded to mul + add.
- virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; }
+ /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
+ /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
+ /// expanded to FMAs when this method returns true, otherwise fmuladd is
+ /// expanded to fmul + fadd.
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
ConstraintType getConstraintType(const std::string &Constraint) const;
@@ -245,12 +338,30 @@ public:
SelectionDAG &DAG) const;
std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+
+ virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
+ unsigned Intrinsic) const LLVM_OVERRIDE;
+
+protected:
+ std::pair<const TargetRegisterClass*, uint8_t>
+ findRepresentativeClass(MVT VT) const;
+
private:
- const AArch64Subtarget *Subtarget;
- const TargetRegisterInfo *RegInfo;
const InstrItineraryData *Itins;
+
+ const AArch64Subtarget *getSubtarget() const {
+ return &getTargetMachine().getSubtarget<AArch64Subtarget>();
+ }
};
+enum NeonModImmType {
+ Neon_Mov_Imm,
+ Neon_Mvn_Imm
+};
+
+extern SDValue ScanBUILD_VECTOR(SDValue Op, bool &isOnlyLowElement,
+ bool &usesOnlyOneValue, bool &hasDominantValue,
+ bool &isConstant, bool &isUNDEF);
} // namespace llvm
#endif // LLVM_TARGET_AARCH64_ISELLOWERING_H
diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td
index 9dd122f14941..34f917caabe7 100644
--- a/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/lib/Target/AArch64/AArch64InstrFormats.td
@@ -120,6 +120,14 @@ class A64InstRdnm<dag outs, dag ins, string asmstr,
let Inst{20-16} = Rm;
}
+class A64InstRtnm<dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRtn<outs, ins, asmstr, patterns, itin> {
+ bits<5> Rm;
+
+ let Inst{20-16} = Rm;
+}
+
//===----------------------------------------------------------------------===//
//
// Actual A64 Instruction Formats
@@ -383,6 +391,8 @@ class A64I_extract<bit sf, bits<3> op, bit n,
// Inherits Rd in 4-0
}
+let Predicates = [HasFPARMv8] in {
+
// Format for floating-point compare instructions.
class A64I_fpcmp<bit m, bit s, bits<2> type, bits<2> op, bits<5> opcode2,
dag outs, dag ins, string asmstr,
@@ -562,6 +572,8 @@ class A64I_fpimm<bit m, bit s, bits<2> type, bits<5> imm5,
// Inherit Rd in 4-0
}
+}
+
// Format for load-register (literal) instructions.
class A64I_LDRlit<bits<2> opc, bit v,
dag outs, dag ins, string asmstr,
@@ -959,3 +971,519 @@ class A64I_Breg<bits<4> opc, bits<5> op2, bits<6> op3, bits<5> op4,
let Inst{4-0} = op4;
}
+
+//===----------------------------------------------------------------------===//
+//
+// Neon Instruction Format Definitions.
+//
+
+let Predicates = [HasNEON] in {
+
+class NeonInstAlias<string Asm, dag Result, bit Emit = 0b1>
+ : InstAlias<Asm, Result, Emit> {
+}
+
+// Format AdvSIMD bitwise extract
+class NeonI_BitExtract<bit q, bits<2> op2,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-24} = 0b101110;
+ let Inst{23-22} = op2;
+ let Inst{21} = 0b0;
+ // Inherit Rm in 20-16
+ let Inst{15} = 0b0;
+ // imm4 in 14-11
+ let Inst{10} = 0b0;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD perm
+class NeonI_Perm<bit q, bits<2> size, bits<3> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-24} = 0b001110;
+ let Inst{23-22} = size;
+ let Inst{21} = 0b0;
+ // Inherit Rm in 20-16
+ let Inst{15} = 0b0;
+ let Inst{14-12} = opcode;
+ let Inst{11-10} = 0b10;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD table lookup
+class NeonI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-24} = 0b001110;
+ let Inst{23-22} = op2;
+ let Inst{21} = 0b0;
+ // Inherit Rm in 20-16
+ let Inst{15} = 0b0;
+ let Inst{14-13} = len;
+ let Inst{12} = op;
+ let Inst{11-10} = 0b00;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD 3 vector registers with same vector type
+class NeonI_3VSame<bit q, bit u, bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21} = 0b1;
+ // Inherit Rm in 20-16
+ let Inst{15-11} = opcode;
+ let Inst{10} = 0b1;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD 3 vector registers with different vector type
+class NeonI_3VDiff<bit q, bit u, bits<2> size, bits<4> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21} = 0b1;
+ // Inherit Rm in 20-16
+ let Inst{15-12} = opcode;
+ let Inst{11} = 0b0;
+ let Inst{10} = 0b0;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD two registers and an element
+class NeonI_2VElem<bit q, bit u, bits<2> size, bits<4> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b01111;
+ let Inst{23-22} = size;
+ // l in Inst{21}
+ // m in Inst{20}
+ // Inherit Rm in 19-16
+ let Inst{15-12} = opcode;
+ // h in Inst{11}
+ let Inst{10} = 0b0;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD 1 vector register with modified immediate
+class NeonI_1VModImm<bit q, bit op,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRd<outs,ins, asmstr, patterns, itin> {
+ bits<8> Imm;
+ bits<4> cmode;
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = op;
+ let Inst{28-19} = 0b0111100000;
+ let Inst{15-12} = cmode;
+ let Inst{11} = 0b0; // o2
+ let Inst{10} = 1;
+ // Inherit Rd in 4-0
+ let Inst{18-16} = Imm{7-5}; // imm a:b:c
+ let Inst{9-5} = Imm{4-0}; // imm d:e:f:g:h
+}
+
+// Format AdvSIMD 3 scalar registers with same type
+
+class NeonI_Scalar3Same<bit u, bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = 0b1;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = size;
+ let Inst{21} = 0b1;
+ // Inherit Rm in 20-16
+ let Inst{15-11} = opcode;
+ let Inst{10} = 0b1;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+
+// Format AdvSIMD 2 vector registers miscellaneous
+class NeonI_2VMisc<bit q, bit u, bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD 2 vector 1 immediate shift
+class NeonI_2VShiftImm<bit q, bit u, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ bits<7> Imm;
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = u;
+ let Inst{28-23} = 0b011110;
+ let Inst{22-16} = Imm;
+ let Inst{15-11} = opcode;
+ let Inst{10} = 0b1;
+
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD duplicate and insert
+class NeonI_copy<bit q, bit op, bits<4> imm4,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ bits<5> Imm5;
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = op;
+ let Inst{28-21} = 0b01110000;
+ let Inst{20-16} = Imm5;
+ let Inst{15} = 0b0;
+ let Inst{14-11} = imm4;
+ let Inst{10} = 0b1;
+
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+// Format AdvSIMD insert from element to vector
+class NeonI_insert<bit q, bit op,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ bits<5> Imm5;
+ bits<4> Imm4;
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = op;
+ let Inst{28-21} = 0b01110000;
+ let Inst{20-16} = Imm5;
+ let Inst{15} = 0b0;
+ let Inst{14-11} = Imm4;
+ let Inst{10} = 0b1;
+
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD scalar pairwise
+class NeonI_ScalarPair<bit u, bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = 0b1;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b11000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD 2 vector across lanes
+class NeonI_2VAcross<bit q, bit u, bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin>
+{
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b11000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD scalar two registers miscellaneous
+class NeonI_Scalar2SameMisc<bit u, bits<2> size, bits<5> opcode, dag outs, dag ins,
+ string asmstr, list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = 0b1;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD vector load/store multiple N-element structure
+class NeonI_LdStMult<bit q, bit l, bits<4> opcode, bits<2> size,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRtn<outs, ins, asmstr, patterns, itin>
+{
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-23} = 0b0011000;
+ let Inst{22} = l;
+ let Inst{21-16} = 0b000000;
+ let Inst{15-12} = opcode;
+ let Inst{11-10} = size;
+
+ // Inherit Rn in 9-5
+ // Inherit Rt in 4-0
+}
+
+// Format AdvSIMD vector load/store multiple N-element structure (post-index)
+class NeonI_LdStMult_Post<bit q, bit l, bits<4> opcode, bits<2> size,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRtnm<outs, ins, asmstr, patterns, itin>
+{
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-23} = 0b0011001;
+ let Inst{22} = l;
+ let Inst{21} = 0b0;
+ // Inherit Rm in 20-16
+ let Inst{15-12} = opcode;
+ let Inst{11-10} = size;
+ // Inherit Rn in 9-5
+ // Inherit Rt in 4-0
+}
+
+// Format AdvSIMD vector load Single N-element structure to all lanes
+class NeonI_LdOne_Dup<bit q, bit r, bits<3> opcode, bits<2> size, dag outs,
+ dag ins, string asmstr, list<dag> patterns,
+ InstrItinClass itin>
+ : A64InstRtn<outs, ins, asmstr, patterns, itin>
+{
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-23} = 0b0011010;
+ let Inst{22} = 0b1;
+ let Inst{21} = r;
+ let Inst{20-16} = 0b00000;
+ let Inst{15-13} = opcode;
+ let Inst{12} = 0b0;
+ let Inst{11-10} = size;
+
+ // Inherit Rn in 9-5
+ // Inherit Rt in 4-0
+}
+
+// Format AdvSIMD vector load/store Single N-element structure to/from one lane
+class NeonI_LdStOne_Lane<bit l, bit r, bits<2> op2_1, bit op0, dag outs,
+ dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRtn<outs, ins, asmstr, patterns, itin>
+{
+ bits<4> lane;
+ let Inst{31} = 0b0;
+ let Inst{29-23} = 0b0011010;
+ let Inst{22} = l;
+ let Inst{21} = r;
+ let Inst{20-16} = 0b00000;
+ let Inst{15-14} = op2_1;
+ let Inst{13} = op0;
+
+ // Inherit Rn in 9-5
+ // Inherit Rt in 4-0
+}
+
+// Format AdvSIMD post-index vector load Single N-element structure to all lanes
+class NeonI_LdOne_Dup_Post<bit q, bit r, bits<3> opcode, bits<2> size, dag outs,
+ dag ins, string asmstr, list<dag> patterns,
+ InstrItinClass itin>
+ : A64InstRtnm<outs, ins, asmstr, patterns, itin>
+{
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-23} = 0b0011011;
+ let Inst{22} = 0b1;
+ let Inst{21} = r;
+ // Inherit Rm in 20-16
+ let Inst{15-13} = opcode;
+ let Inst{12} = 0b0;
+ let Inst{11-10} = size;
+
+ // Inherit Rn in 9-5
+ // Inherit Rt in 4-0
+}
+
+// Format AdvSIMD post-index vector load/store Single N-element structure
+// to/from one lane
+class NeonI_LdStOne_Lane_Post<bit l, bit r, bits<2> op2_1, bit op0, dag outs,
+ dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRtnm<outs, ins, asmstr, patterns, itin>
+{
+ bits<4> lane;
+ let Inst{31} = 0b0;
+ let Inst{29-23} = 0b0011011;
+ let Inst{22} = l;
+ let Inst{21} = r;
+ // Inherit Rm in 20-16
+ let Inst{15-14} = op2_1;
+ let Inst{13} = op0;
+
+ // Inherit Rn in 9-5
+ // Inherit Rt in 4-0
+}
+
+// Format AdvSIMD 3 scalar registers with different type
+
+class NeonI_Scalar3Diff<bit u, bits<2> size, bits<4> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31-30} = 0b01;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = size;
+ let Inst{21} = 0b1;
+ // Inherit Rm in 20-16
+ let Inst{15-12} = opcode;
+ let Inst{11-10} = 0b00;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD scalar shift by immediate
+
+class NeonI_ScalarShiftImm<bit u, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ bits<4> Imm4;
+ bits<3> Imm3;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = u;
+ let Inst{28-23} = 0b111110;
+ let Inst{22-19} = Imm4;
+ let Inst{18-16} = Imm3;
+ let Inst{15-11} = opcode;
+ let Inst{10} = 0b1;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD crypto AES
+class NeonI_Crypto_AES<bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ let Inst{31-24} = 0b01001110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10100;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD crypto SHA
+class NeonI_Crypto_SHA<bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+ let Inst{31-24} = 0b01011110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10100;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD crypto 3V SHA
+class NeonI_Crypto_3VSHA<bits<2> size, bits<3> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31-24} = 0b01011110;
+ let Inst{23-22} = size;
+ let Inst{21} = 0b0;
+ // Inherit Rm in 20-16
+ let Inst{15} = 0b0;
+ let Inst{14-12} = opcode;
+ let Inst{11-10} = 0b00;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
+// Format AdvSIMD scalar x indexed element
+class NeonI_ScalarXIndexedElem<bit u, bit szhi, bit szlo,
+ bits<4> opcode, dag outs, dag ins,
+ string asmstr, list<dag> patterns,
+ InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin>
+{
+ let Inst{31} = 0b0;
+ let Inst{30} = 0b1;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b11111;
+ let Inst{23} = szhi;
+ let Inst{22} = szlo;
+ // l in Inst{21}
+ // m in Instr{20}
+ // Inherit Rm in 19-16
+ let Inst{15-12} = opcode;
+ // h in Inst{11}
+ let Inst{10} = 0b0;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+// Format AdvSIMD scalar copy - insert from element to scalar
+class NeonI_ScalarCopy<dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : NeonI_copy<0b1, 0b0, 0b0000, outs, ins, asmstr, patterns, itin> {
+ let Inst{28} = 0b1;
+}
+}
+
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index cf3a2c3707d9..180110a84dd6 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -29,14 +29,14 @@
#include <algorithm>
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "AArch64GenInstrInfo.inc"
using namespace llvm;
AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
: AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
- RI(*this, STI), Subtarget(STI) {}
+ Subtarget(STI) {}
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
@@ -68,43 +68,71 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
BuildMI(MBB, I, DL, get(AArch64::MRSxi), DestReg)
.addImm(A64SysReg::NZCV);
} else if (AArch64::GPR64RegClass.contains(DestReg)) {
- assert(AArch64::GPR64RegClass.contains(SrcReg));
- Opc = AArch64::ORRxxx_lsl;
- ZeroReg = AArch64::XZR;
+ if(AArch64::GPR64RegClass.contains(SrcReg)){
+ Opc = AArch64::ORRxxx_lsl;
+ ZeroReg = AArch64::XZR;
+ } else{
+ assert(AArch64::FPR64RegClass.contains(SrcReg));
+ BuildMI(MBB, I, DL, get(AArch64::FMOVxd), DestReg)
+ .addReg(SrcReg);
+ return;
+ }
} else if (AArch64::GPR32RegClass.contains(DestReg)) {
- assert(AArch64::GPR32RegClass.contains(SrcReg));
- Opc = AArch64::ORRwww_lsl;
- ZeroReg = AArch64::WZR;
+ if(AArch64::GPR32RegClass.contains(SrcReg)){
+ Opc = AArch64::ORRwww_lsl;
+ ZeroReg = AArch64::WZR;
+ } else{
+ assert(AArch64::FPR32RegClass.contains(SrcReg));
+ BuildMI(MBB, I, DL, get(AArch64::FMOVws), DestReg)
+ .addReg(SrcReg);
+ return;
+ }
} else if (AArch64::FPR32RegClass.contains(DestReg)) {
- assert(AArch64::FPR32RegClass.contains(SrcReg));
- BuildMI(MBB, I, DL, get(AArch64::FMOVss), DestReg)
- .addReg(SrcReg);
- return;
+ if(AArch64::FPR32RegClass.contains(SrcReg)){
+ BuildMI(MBB, I, DL, get(AArch64::FMOVss), DestReg)
+ .addReg(SrcReg);
+ return;
+ }
+ else {
+ assert(AArch64::GPR32RegClass.contains(SrcReg));
+ BuildMI(MBB, I, DL, get(AArch64::FMOVsw), DestReg)
+ .addReg(SrcReg);
+ return;
+ }
} else if (AArch64::FPR64RegClass.contains(DestReg)) {
- assert(AArch64::FPR64RegClass.contains(SrcReg));
- BuildMI(MBB, I, DL, get(AArch64::FMOVdd), DestReg)
- .addReg(SrcReg);
- return;
+ if(AArch64::FPR64RegClass.contains(SrcReg)){
+ BuildMI(MBB, I, DL, get(AArch64::FMOVdd), DestReg)
+ .addReg(SrcReg);
+ return;
+ }
+ else {
+ assert(AArch64::GPR64RegClass.contains(SrcReg));
+ BuildMI(MBB, I, DL, get(AArch64::FMOVdx), DestReg)
+ .addReg(SrcReg);
+ return;
+ }
} else if (AArch64::FPR128RegClass.contains(DestReg)) {
assert(AArch64::FPR128RegClass.contains(SrcReg));
- // FIXME: there's no good way to do this, at least without NEON:
- // + There's no single move instruction for q-registers
- // + We can't create a spill slot and use normal STR/LDR because stack
- // allocation has already happened
- // + We can't go via X-registers with FMOV because register allocation has
- // already happened.
- // This may not be efficient, but at least it works.
- BuildMI(MBB, I, DL, get(AArch64::LSFP128_PreInd_STR), AArch64::XSP)
- .addReg(SrcReg)
- .addReg(AArch64::XSP)
- .addImm(0x1ff & -16);
-
- BuildMI(MBB, I, DL, get(AArch64::LSFP128_PostInd_LDR), DestReg)
- .addReg(AArch64::XSP, RegState::Define)
- .addReg(AArch64::XSP)
- .addImm(16);
- return;
+ // If NEON is enable, we use ORR to implement this copy.
+ // If NEON isn't available, emit STR and LDR to handle this.
+ if(getSubTarget().hasNEON()) {
+ BuildMI(MBB, I, DL, get(AArch64::ORRvvv_16B), DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg);
+ return;
+ } else {
+ BuildMI(MBB, I, DL, get(AArch64::LSFP128_PreInd_STR), AArch64::XSP)
+ .addReg(SrcReg)
+ .addReg(AArch64::XSP)
+ .addImm(0x1ff & -16);
+
+ BuildMI(MBB, I, DL, get(AArch64::LSFP128_PostInd_LDR), DestReg)
+ .addReg(AArch64::XSP, RegState::Define)
+ .addReg(AArch64::XSP)
+ .addImm(16);
+ return;
+ }
} else {
llvm_unreachable("Unknown register class in copyPhysReg");
}
@@ -116,17 +144,6 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addImm(0);
}
-MachineInstr *
-AArch64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
- uint64_t Offset, const MDNode *MDPtr,
- DebugLoc DL) const {
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
- .addFrameIndex(FrameIx).addImm(0)
- .addImm(Offset)
- .addMetadata(MDPtr);
- return &*MIB;
-}
-
/// Does the Opcode represent a conditional branch that we can remove and re-add
/// at the end of a basic block?
static bool isCondBranch(unsigned Opc) {
diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h
index 22a2ab4cf60a..620ecc93b170 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/lib/Target/AArch64/AArch64InstrInfo.h
@@ -43,10 +43,6 @@ public:
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
- MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
- uint64_t Offset, const MDNode *MDPtr,
- DebugLoc DL) const;
-
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index d2cfc7db2232..23d81fc478e8 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -11,6 +11,19 @@
//
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// ARM Instruction Predicate Definitions.
+//
+def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
+ AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
+def HasNEON : Predicate<"Subtarget->hasNEON()">,
+ AssemblerPredicate<"FeatureNEON", "neon">;
+def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
+ AssemblerPredicate<"FeatureCrypto","crypto">;
+
+// Use fused MAC if more precision in FP computation is allowed.
+def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
+ " FPOpFusion::Fast)">;
include "AArch64InstrFormats.td"
//===----------------------------------------------------------------------===//
@@ -114,6 +127,8 @@ def A64Sbfx : SDNode<"AArch64ISD::SBFX", SDTA64BFX>;
def A64Ubfx : SDNode<"AArch64ISD::UBFX", SDTA64BFX>;
+class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
+
//===----------------------------------------------------------------------===//
// Call sequence pseudo-instructions
//===----------------------------------------------------------------------===//
@@ -1263,7 +1278,7 @@ def : Pat<(i64 (sext_inreg (anyext i32:$Rn), i1)),
// UBFX makes sense as an implementation of a 64-bit zero-extension too. Could
// use either 64-bit or 32-bit variant, but 32-bit might be more efficient.
-def : Pat<(zext i32:$Rn), (SUBREG_TO_REG (i64 0), (UBFXwwii $Rn, 0, 31),
+def : Pat<(i64 (zext i32:$Rn)), (SUBREG_TO_REG (i64 0), (UBFXwwii $Rn, 0, 31),
sub_32)>;
//===-------------------------------
@@ -1967,6 +1982,13 @@ def fpz64 : Operand<f64>,
let DecoderMethod = "DecodeFPZeroOperand";
}
+def fpz64movi : Operand<i64>,
+ ComplexPattern<f64, 1, "SelectFPZeroOperand", [fpimm]> {
+ let ParserMatchClass = fpzero_asmoperand;
+ let PrintMethod = "printFPZeroOperand";
+ let DecoderMethod = "DecodeFPZeroOperand";
+}
+
multiclass A64I_fpcmpSignal<bits<2> type, bit imm, dag ins, dag pattern> {
def _quiet : A64I_fpcmp<0b0, 0b0, type, 0b00, {0b0, imm, 0b0, 0b0, 0b0},
(outs), ins, "fcmp\t$Rn, $Rm", [pattern],
@@ -2173,6 +2195,29 @@ def FMSUBdddd : A64I_fpdp3Impl<"fmsub", FPR64, f64, 0b01, 0b0, 0b1, fmsub>;
def FNMADDdddd : A64I_fpdp3Impl<"fnmadd", FPR64, f64, 0b01, 0b1, 0b0, fnmadd>;
def FNMSUBdddd : A64I_fpdp3Impl<"fnmsub", FPR64, f64, 0b01, 0b1, 0b1, fnmsub>;
+// Extra patterns for when we're allowed to optimise separate multiplication and
+// addition.
+let Predicates = [HasFPARMv8, UseFusedMAC] in {
+def : Pat<(f32 (fadd FPR32:$Ra, (f32 (fmul FPR32:$Rn, FPR32:$Rm)))),
+ (FMADDssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
+def : Pat<(f32 (fsub FPR32:$Ra, (f32 (fmul FPR32:$Rn, FPR32:$Rm)))),
+ (FMSUBssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
+def : Pat<(f32 (fsub (f32 (fmul FPR32:$Rn, FPR32:$Rm)), FPR32:$Ra)),
+ (FNMADDssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
+def : Pat<(f32 (fsub (f32 (fneg FPR32:$Ra)), (f32 (fmul FPR32:$Rn, FPR32:$Rm)))),
+ (FNMSUBssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
+
+def : Pat<(f64 (fadd FPR64:$Ra, (f64 (fmul FPR64:$Rn, FPR64:$Rm)))),
+ (FMADDdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
+def : Pat<(f64 (fsub FPR64:$Ra, (f64 (fmul FPR64:$Rn, FPR64:$Rm)))),
+ (FMSUBdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
+def : Pat<(f64 (fsub (f64 (fmul FPR64:$Rn, FPR64:$Rm)), FPR64:$Ra)),
+ (FNMADDdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
+def : Pat<(f64 (fsub (f64 (fneg FPR64:$Ra)), (f64 (fmul FPR64:$Rn, FPR64:$Rm)))),
+ (FNMSUBdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
+}
+
+
//===----------------------------------------------------------------------===//
// Floating-point <-> fixed-point conversion instructions
//===----------------------------------------------------------------------===//
@@ -2308,6 +2353,7 @@ defm FCVTM : A64I_fptointRM<0b10, 0b0, "fcvtm">;
defm FCVTZ : A64I_fptointRM<0b11, 0b0, "fcvtz">;
defm FCVTA : A64I_fptointRM<0b00, 0b1, "fcvta">;
+let Predicates = [HasFPARMv8] in {
def : Pat<(i32 (fp_to_sint f32:$Rn)), (FCVTZSws $Rn)>;
def : Pat<(i64 (fp_to_sint f32:$Rn)), (FCVTZSxs $Rn)>;
def : Pat<(i32 (fp_to_uint f32:$Rn)), (FCVTZUws $Rn)>;
@@ -2316,6 +2362,7 @@ def : Pat<(i32 (fp_to_sint f64:$Rn)), (FCVTZSwd $Rn)>;
def : Pat<(i64 (fp_to_sint f64:$Rn)), (FCVTZSxd $Rn)>;
def : Pat<(i32 (fp_to_uint f64:$Rn)), (FCVTZUwd $Rn)>;
def : Pat<(i64 (fp_to_uint f64:$Rn)), (FCVTZUxd $Rn)>;
+}
multiclass A64I_inttofp<bit o0, string asmop> {
def CVTFsw : A64I_fpintI<0b0, 0b00, 0b00, {0, 1, o0}, FPR32, GPR32, asmop>;
@@ -2327,6 +2374,7 @@ multiclass A64I_inttofp<bit o0, string asmop> {
defm S : A64I_inttofp<0b0, "scvtf">;
defm U : A64I_inttofp<0b1, "ucvtf">;
+let Predicates = [HasFPARMv8] in {
def : Pat<(f32 (sint_to_fp i32:$Rn)), (SCVTFsw $Rn)>;
def : Pat<(f32 (sint_to_fp i64:$Rn)), (SCVTFsx $Rn)>;
def : Pat<(f64 (sint_to_fp i32:$Rn)), (SCVTFdw $Rn)>;
@@ -2335,16 +2383,19 @@ def : Pat<(f32 (uint_to_fp i32:$Rn)), (UCVTFsw $Rn)>;
def : Pat<(f32 (uint_to_fp i64:$Rn)), (UCVTFsx $Rn)>;
def : Pat<(f64 (uint_to_fp i32:$Rn)), (UCVTFdw $Rn)>;
def : Pat<(f64 (uint_to_fp i64:$Rn)), (UCVTFdx $Rn)>;
+}
def FMOVws : A64I_fpintI<0b0, 0b00, 0b00, 0b110, GPR32, FPR32, "fmov">;
def FMOVsw : A64I_fpintI<0b0, 0b00, 0b00, 0b111, FPR32, GPR32, "fmov">;
def FMOVxd : A64I_fpintI<0b1, 0b01, 0b00, 0b110, GPR64, FPR64, "fmov">;
def FMOVdx : A64I_fpintI<0b1, 0b01, 0b00, 0b111, FPR64, GPR64, "fmov">;
+let Predicates = [HasFPARMv8] in {
def : Pat<(i32 (bitconvert f32:$Rn)), (FMOVws $Rn)>;
def : Pat<(f32 (bitconvert i32:$Rn)), (FMOVsw $Rn)>;
def : Pat<(i64 (bitconvert f64:$Rn)), (FMOVxd $Rn)>;
def : Pat<(f64 (bitconvert i64:$Rn)), (FMOVdx $Rn)>;
+}
def lane1_asmoperand : AsmOperandClass {
let Name = "Lane1";
@@ -2367,11 +2418,13 @@ let DecoderMethod = "DecodeFMOVLaneInstruction" in {
"fmov\t$Rd.d[$Lane], $Rn", [], NoItinerary>;
}
+let Predicates = [HasFPARMv8] in {
def : InstAlias<"fmov $Rd, $Rn.2d[$Lane]",
(FMOVxv GPR64:$Rd, VPR128:$Rn, lane1:$Lane), 0b0>;
def : InstAlias<"fmov $Rd.2d[$Lane], $Rn",
(FMOVvx VPR128:$Rd, GPR64:$Rn, lane1:$Lane), 0b0>;
+}
//===----------------------------------------------------------------------===//
// Floating-point immediate instructions
@@ -2465,11 +2518,15 @@ let mayLoad = 1 in {
def LDRx_lit : A64I_LDRlitSimple<0b01, 0b0, GPR64>;
}
+let Predicates = [HasFPARMv8] in {
def LDRs_lit : A64I_LDRlitSimple<0b00, 0b1, FPR32>;
def LDRd_lit : A64I_LDRlitSimple<0b01, 0b1, FPR64>;
+}
let mayLoad = 1 in {
+ let Predicates = [HasFPARMv8] in {
def LDRq_lit : A64I_LDRlitSimple<0b10, 0b1, FPR128>;
+ }
def LDRSWx_lit : A64I_LDRlit<0b10, 0b0,
@@ -3063,6 +3120,7 @@ defm LS32
defm LS64
: A64I_LDRSTR_unsigned<"LS64", 0b11, 0b0, 0b0, "", GPR64, dword_addrparams>;
+let Predicates = [HasFPARMv8] in {
// STR/LDR to/from a B register
defm LSFP8
: A64I_LDRSTR_unsigned<"LSFP8", 0b00, 0b1, 0b0, "", FPR8, byte_addrparams>;
@@ -3081,6 +3139,7 @@ defm LSFP64
defm LSFP128
: A64I_LDRSTR_unsigned<"LSFP128", 0b00, 0b1, 0b1, "", FPR128,
qword_addrparams>;
+}
//===------------------------------
// 2.3 Signed loads
@@ -3536,10 +3595,13 @@ multiclass A64I_LSPsimple<bits<2> opc, bit v, RegisterClass SomeReg,
defm LSPair32 : A64I_LSPsimple<0b00, 0b0, GPR32, word_simm7, "LSPair32">;
defm LSPair64 : A64I_LSPsimple<0b10, 0b0, GPR64, dword_simm7, "LSPair64">;
+
+let Predicates = [HasFPARMv8] in {
defm LSFPPair32 : A64I_LSPsimple<0b00, 0b1, FPR32, word_simm7, "LSFPPair32">;
defm LSFPPair64 : A64I_LSPsimple<0b01, 0b1, FPR64, dword_simm7, "LSFPPair64">;
defm LSFPPair128 : A64I_LSPsimple<0b10, 0b1, FPR128, qword_simm7,
"LSFPPair128">;
+}
def LDPSWx : A64I_LSPoffset<0b01, 0b0, 0b1,
@@ -3974,14 +4036,17 @@ def : movalias<MOVZxii, GPR64, movz64_movimm>;
def : movalias<MOVNwii, GPR32, movn32_movimm>;
def : movalias<MOVNxii, GPR64, movn64_movimm>;
-def movw_addressref : ComplexPattern<i64, 2, "SelectMOVWAddressRef">;
+def movw_addressref_g0 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<0>">;
+def movw_addressref_g1 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<1>">;
+def movw_addressref_g2 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<2>">;
+def movw_addressref_g3 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<3>">;
-def : Pat<(A64WrapperLarge movw_addressref:$G3, movw_addressref:$G2,
- movw_addressref:$G1, movw_addressref:$G0),
- (MOVKxii (MOVKxii (MOVKxii (MOVZxii movw_addressref:$G3),
- movw_addressref:$G2),
- movw_addressref:$G1),
- movw_addressref:$G0)>;
+def : Pat<(A64WrapperLarge movw_addressref_g3:$G3, movw_addressref_g2:$G2,
+ movw_addressref_g1:$G1, movw_addressref_g0:$G0),
+ (MOVKxii (MOVKxii (MOVKxii (MOVZxii movw_addressref_g3:$G3),
+ movw_addressref_g2:$G2),
+ movw_addressref_g1:$G1),
+ movw_addressref_g0:$G0)>;
//===----------------------------------------------------------------------===//
// PC-relative addressing instructions
@@ -5120,3 +5185,9 @@ defm : regoff_pats<"Xm", (add i64:$Rn, i64:$Rm),
defm : regoff_pats<"Xm", (add i64:$Rn, (shl i64:$Rm, SHIFT)),
(i64 i64:$Rn), (i64 i64:$Rm), (i64 3)>;
+
+//===----------------------------------------------------------------------===//
+// Advanced SIMD (NEON) Support
+//
+
+include "AArch64InstrNEON.td"
diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td
new file mode 100644
index 000000000000..d71749d0e61b
--- /dev/null
+++ b/lib/Target/AArch64/AArch64InstrNEON.td
@@ -0,0 +1,8671 @@
+//===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the AArch64 NEON instruction set.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// NEON-specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+def Neon_bsl : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
+ [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>]>>;
+
+// (outs Result), (ins Imm, OpCmode)
+def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
+
+def Neon_movi : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
+
+def Neon_mvni : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
+
+// (outs Result), (ins Imm)
+def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
+ [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
+
+// (outs Result), (ins LHS, RHS, CondCode)
+def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
+ [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
+
+// (outs Result), (ins LHS, 0/0.0 constant, CondCode)
+def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
+ [SDTCisVec<0>, SDTCisVec<1>]>>;
+
+// (outs Result), (ins LHS, RHS)
+def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
+ [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
+
+def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisVT<2, i32>]>;
+def Neon_sqrshlImm : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
+def Neon_uqrshlImm : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
+
+def SDTPERMUTE : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>]>;
+def Neon_uzp1 : SDNode<"AArch64ISD::NEON_UZP1", SDTPERMUTE>;
+def Neon_uzp2 : SDNode<"AArch64ISD::NEON_UZP2", SDTPERMUTE>;
+def Neon_zip1 : SDNode<"AArch64ISD::NEON_ZIP1", SDTPERMUTE>;
+def Neon_zip2 : SDNode<"AArch64ISD::NEON_ZIP2", SDTPERMUTE>;
+def Neon_trn1 : SDNode<"AArch64ISD::NEON_TRN1", SDTPERMUTE>;
+def Neon_trn2 : SDNode<"AArch64ISD::NEON_TRN2", SDTPERMUTE>;
+
+def SDTVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
+def Neon_rev64 : SDNode<"AArch64ISD::NEON_REV64", SDTVSHUF>;
+def Neon_rev32 : SDNode<"AArch64ISD::NEON_REV32", SDTVSHUF>;
+def Neon_rev16 : SDNode<"AArch64ISD::NEON_REV16", SDTVSHUF>;
+def Neon_vdup : SDNode<"AArch64ISD::NEON_VDUP", SDTypeProfile<1, 1,
+ [SDTCisVec<0>]>>;
+def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
+ [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
+def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
+ [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
+
+def SDT_assertext : SDTypeProfile<1, 1,
+ [SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 0>]>;
+def assertsext : SDNode<"ISD::AssertSext", SDT_assertext>;
+def assertzext : SDNode<"ISD::AssertZext", SDT_assertext>;
+
+//===----------------------------------------------------------------------===//
+// Multiclasses
+//===----------------------------------------------------------------------===//
+
+multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size, bits<5> opcode,
+ string asmop, SDPatternOperator opnode8B,
+ SDPatternOperator opnode16B,
+ bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8B : NeonI_3VSame<0b0, u, size, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
+ asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
+ [(set (v8i8 VPR64:$Rd),
+ (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
+ NoItinerary>;
+
+ def _16B : NeonI_3VSame<0b1, u, size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
+ [(set (v16i8 VPR128:$Rd),
+ (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
+ NoItinerary>;
+ }
+
+}
+
+multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
+ string asmop, SDPatternOperator opnode,
+ bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
+ asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
+ [(set (v4i16 VPR64:$Rd),
+ (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
+ NoItinerary>;
+
+ def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
+ [(set (v8i16 VPR128:$Rd),
+ (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
+ NoItinerary>;
+
+ def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
+ asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
+ NoItinerary>;
+
+ def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
+ NoItinerary>;
+ }
+}
+multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
+ string asmop, SDPatternOperator opnode,
+ bit Commutable = 0>
+ : NeonI_3VSame_HS_sizes<u, opcode, asmop, opnode, Commutable> {
+ let isCommutable = Commutable in {
+ def _8B : NeonI_3VSame<0b0, u, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
+ asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
+ [(set (v8i8 VPR64:$Rd),
+ (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
+ NoItinerary>;
+
+ def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
+ [(set (v16i8 VPR128:$Rd),
+ (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
+ NoItinerary>;
+ }
+}
+
+multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
+ string asmop, SDPatternOperator opnode,
+ bit Commutable = 0>
+ : NeonI_3VSame_BHS_sizes<u, opcode, asmop, opnode, Commutable> {
+ let isCommutable = Commutable in {
+ def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
+ [(set (v2i64 VPR128:$Rd),
+ (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
+ NoItinerary>;
+ }
+}
+
+// Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
+// but Result types can be integer or floating point types.
+multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
+ string asmop, SDPatternOperator opnode2S,
+ SDPatternOperator opnode4S,
+ SDPatternOperator opnode2D,
+ ValueType ResTy2S, ValueType ResTy4S,
+ ValueType ResTy2D, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
+ asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
+ [(set (ResTy2S VPR64:$Rd),
+ (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
+ NoItinerary>;
+
+ def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
+ [(set (ResTy4S VPR128:$Rd),
+ (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
+ NoItinerary>;
+
+ def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
+ [(set (ResTy2D VPR128:$Rd),
+ (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
+ NoItinerary>;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction Definitions
+//===----------------------------------------------------------------------===//
+
+// Vector Arithmetic Instructions
+
+// Vector Add (Integer and Floating-Point)
+
+defm ADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
+defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector Sub (Integer and Floating-Point)
+
+defm SUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
+defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
+ v2f32, v4f32, v2f64, 0>;
+
+// Vector Multiply (Integer and Floating-Point)
+
+defm MULvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
+defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector Multiply (Polynomial)
+
+defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
+ int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
+
+// Vector Multiply-accumulate and Multiply-subtract (Integer)
+
+// class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
+// two operands constraints.
+class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
+ RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size,
+ bits<5> opcode, SDPatternOperator opnode>
+ : NeonI_3VSame<q, u, size, opcode,
+ (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
+ asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
+ [(set (OpTy VPRC:$Rd),
+ (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (add node:$Ra, (mul node:$Rn, node:$Rm))>;
+
+def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
+
+
+def MLAvvv_8B: NeonI_3VSame_Constraint_impl<"mla", ".8b", VPR64, v8i8,
+ 0b0, 0b0, 0b00, 0b10010, Neon_mla>;
+def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
+ 0b1, 0b0, 0b00, 0b10010, Neon_mla>;
+def MLAvvv_4H: NeonI_3VSame_Constraint_impl<"mla", ".4h", VPR64, v4i16,
+ 0b0, 0b0, 0b01, 0b10010, Neon_mla>;
+def MLAvvv_8H: NeonI_3VSame_Constraint_impl<"mla", ".8h", VPR128, v8i16,
+ 0b1, 0b0, 0b01, 0b10010, Neon_mla>;
+def MLAvvv_2S: NeonI_3VSame_Constraint_impl<"mla", ".2s", VPR64, v2i32,
+ 0b0, 0b0, 0b10, 0b10010, Neon_mla>;
+def MLAvvv_4S: NeonI_3VSame_Constraint_impl<"mla", ".4s", VPR128, v4i32,
+ 0b1, 0b0, 0b10, 0b10010, Neon_mla>;
+
+def MLSvvv_8B: NeonI_3VSame_Constraint_impl<"mls", ".8b", VPR64, v8i8,
+ 0b0, 0b1, 0b00, 0b10010, Neon_mls>;
+def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
+ 0b1, 0b1, 0b00, 0b10010, Neon_mls>;
+def MLSvvv_4H: NeonI_3VSame_Constraint_impl<"mls", ".4h", VPR64, v4i16,
+ 0b0, 0b1, 0b01, 0b10010, Neon_mls>;
+def MLSvvv_8H: NeonI_3VSame_Constraint_impl<"mls", ".8h", VPR128, v8i16,
+ 0b1, 0b1, 0b01, 0b10010, Neon_mls>;
+def MLSvvv_2S: NeonI_3VSame_Constraint_impl<"mls", ".2s", VPR64, v2i32,
+ 0b0, 0b1, 0b10, 0b10010, Neon_mls>;
+def MLSvvv_4S: NeonI_3VSame_Constraint_impl<"mls", ".4s", VPR128, v4i32,
+ 0b1, 0b1, 0b10, 0b10010, Neon_mls>;
+
+// Vector Multiply-accumulate and Multiply-subtract (Floating Point)
+
+def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
+
+def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
+
+let Predicates = [HasNEON, UseFusedMAC] in {
+def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s", VPR64, v2f32,
+ 0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
+def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s", VPR128, v4f32,
+ 0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
+def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d", VPR128, v2f64,
+ 0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
+
+def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s", VPR64, v2f32,
+ 0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
+def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s", VPR128, v4f32,
+ 0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
+def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d", VPR128, v2f64,
+ 0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
+}
+
+// We're also allowed to match the fma instruction regardless of compile
+// options.
+def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
+ (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
+def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
+ (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
+def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
+ (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
+
+def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
+ (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
+def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
+ (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
+def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
+ (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
+
+// Vector Divide (Floating-Point)
+
+defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
+ v2f32, v4f32, v2f64, 0>;
+
+// Vector Bitwise Operations
+
+// Vector Bitwise AND
+
+defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
+
+// Vector Bitwise Exclusive OR
+
+defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
+
+// Vector Bitwise OR
+
+defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
+
+// ORR disassembled as MOV if Vn==Vm
+
+// Vector Move - register
+// Alias for ORR if Vn=Vm.
+// FIXME: This is actually the preferred syntax but TableGen can't deal with
+// custom printing of aliases.
+def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
+ (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
+def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
+ (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
+
+// The MOVI instruction takes two immediate operands. The first is the
+// immediate encoding, while the second is the cmode. A cmode of 14, or
+// 0b1110, produces a MOVI operation, rather than a MVNI, ORR, or BIC.
+def Neon_AllZero : PatFrag<(ops), (Neon_movi (i32 0), (i32 14))>;
+def Neon_AllOne : PatFrag<(ops), (Neon_movi (i32 255), (i32 14))>;
+
+def Neon_not8B : PatFrag<(ops node:$in),
+ (xor node:$in, (bitconvert (v8i8 Neon_AllOne)))>;
+def Neon_not16B : PatFrag<(ops node:$in),
+ (xor node:$in, (bitconvert (v16i8 Neon_AllOne)))>;
+
+def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
+ (or node:$Rn, (Neon_not8B node:$Rm))>;
+
+def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
+ (or node:$Rn, (Neon_not16B node:$Rm))>;
+
+def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
+ (and node:$Rn, (Neon_not8B node:$Rm))>;
+
+def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
+ (and node:$Rn, (Neon_not16B node:$Rm))>;
+
+
+// Vector Bitwise OR NOT - register
+
+defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
+ Neon_orn8B, Neon_orn16B, 0>;
+
+// Vector Bitwise Bit Clear (AND NOT) - register
+
+defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
+ Neon_bic8B, Neon_bic16B, 0>;
+
+multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
+ SDPatternOperator opnode16B,
+ Instruction INST8B,
+ Instruction INST16B> {
+ def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
+ (INST8B VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
+ (INST8B VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
+ (INST8B VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
+ (INST16B VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
+ (INST16B VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
+ (INST16B VPR128:$Rn, VPR128:$Rm)>;
+}
+
+// Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
+defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
+defm : Neon_bitwise2V_patterns<or, or, ORRvvv_8B, ORRvvv_16B>;
+defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
+defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
+defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
+
+// Vector Bitwise Select
+def BSLvvv_8B : NeonI_3VSame_Constraint_impl<"bsl", ".8b", VPR64, v8i8,
+ 0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
+
+def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
+ 0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
+
+multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
+ Instruction INST8B,
+ Instruction INST16B> {
+ // Disassociate type from instruction definition
+ def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+
+ // Allow to match BSL instruction pattern with non-constant operand
+ def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
+ (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
+ (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
+ (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
+ (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
+ (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
+ (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
+ (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
+ (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
+ (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
+ (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
+ (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
+ (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
+ (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
+ (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
+ (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
+ (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
+
+ // Allow to match llvm.arm.* intrinsics.
+ def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
+ (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
+ (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
+ (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
+ (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
+ (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v1f64 (int_arm_neon_vbsl (v1f64 VPR64:$src),
+ (v1f64 VPR64:$Rn), (v1f64 VPR64:$Rm))),
+ (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
+ def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
+ (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
+ (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
+ (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
+ (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
+ (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+ def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
+ (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
+ (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
+}
+
+// Additional patterns for bitwise instruction BSL
+defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
+
+def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
+ (Neon_bsl node:$src, node:$Rn, node:$Rm),
+ [{ (void)N; return false; }]>;
+
+// Vector Bitwise Insert if True
+
+def BITvvv_8B : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64, v8i8,
+ 0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
+def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
+ 0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
+
+// Vector Bitwise Insert if False
+
+def BIFvvv_8B : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64, v8i8,
+ 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
+def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
+ 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
+
+// Vector Absolute Difference and Accumulate (Signed, Unsigned)
+
+def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
+def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
+
+// Vector Absolute Difference and Accumulate (Unsigned)
+def UABAvvv_8B : NeonI_3VSame_Constraint_impl<"uaba", ".8b", VPR64, v8i8,
+ 0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
+def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
+ 0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
+def UABAvvv_4H : NeonI_3VSame_Constraint_impl<"uaba", ".4h", VPR64, v4i16,
+ 0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
+def UABAvvv_8H : NeonI_3VSame_Constraint_impl<"uaba", ".8h", VPR128, v8i16,
+ 0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
+def UABAvvv_2S : NeonI_3VSame_Constraint_impl<"uaba", ".2s", VPR64, v2i32,
+ 0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
+def UABAvvv_4S : NeonI_3VSame_Constraint_impl<"uaba", ".4s", VPR128, v4i32,
+ 0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
+
+// Vector Absolute Difference and Accumulate (Signed)
+def SABAvvv_8B : NeonI_3VSame_Constraint_impl<"saba", ".8b", VPR64, v8i8,
+ 0b0, 0b0, 0b00, 0b01111, Neon_saba>;
+def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
+ 0b1, 0b0, 0b00, 0b01111, Neon_saba>;
+def SABAvvv_4H : NeonI_3VSame_Constraint_impl<"saba", ".4h", VPR64, v4i16,
+ 0b0, 0b0, 0b01, 0b01111, Neon_saba>;
+def SABAvvv_8H : NeonI_3VSame_Constraint_impl<"saba", ".8h", VPR128, v8i16,
+ 0b1, 0b0, 0b01, 0b01111, Neon_saba>;
+def SABAvvv_2S : NeonI_3VSame_Constraint_impl<"saba", ".2s", VPR64, v2i32,
+ 0b0, 0b0, 0b10, 0b01111, Neon_saba>;
+def SABAvvv_4S : NeonI_3VSame_Constraint_impl<"saba", ".4s", VPR128, v4i32,
+ 0b1, 0b0, 0b10, 0b01111, Neon_saba>;
+
+
+// Vector Absolute Difference (Signed, Unsigned)
+defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
+defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
+
+// Vector Absolute Difference (Floating Point)
+defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
+ int_arm_neon_vabds, int_arm_neon_vabds,
+ int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
+
+// Vector Reciprocal Step (Floating Point)
+defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
+ int_arm_neon_vrecps, int_arm_neon_vrecps,
+ int_arm_neon_vrecps,
+ v2f32, v4f32, v2f64, 0>;
+
+// Vector Reciprocal Square Root Step (Floating Point)
+defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
+ int_arm_neon_vrsqrts,
+ int_arm_neon_vrsqrts,
+ int_arm_neon_vrsqrts,
+ v2f32, v4f32, v2f64, 0>;
+
+// Vector Comparisons
+
+def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
+ (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
+def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
+ (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
+def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
+ (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
+def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
+ (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
+def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
+ (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
+
+// NeonI_compare_aliases class: swaps register operands to implement
+// comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
+class NeonI_compare_aliases<string asmop, string asmlane,
+ Instruction inst, RegisterOperand VPRC>
+ : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
+ ", $Rm" # asmlane,
+ (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
+
+// Vector Comparisons (Integer)
+
+// Vector Compare Mask Equal (Integer)
+let isCommutable =1 in {
+defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
+}
+
+// Vector Compare Mask Higher or Same (Unsigned Integer)
+defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
+
+// Vector Compare Mask Greater Than or Equal (Integer)
+defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
+
+// Vector Compare Mask Higher (Unsigned Integer)
+defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
+
+// Vector Compare Mask Greater Than (Integer)
+defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
+
+// Vector Compare Mask Bitwise Test (Integer)
+defm CMTSTvvv: NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
+
+// Vector Compare Mask Less or Same (Unsigned Integer)
+// CMLS is alias for CMHS with operands reversed.
+def CMLSvvv_8B : NeonI_compare_aliases<"cmls", ".8b", CMHSvvv_8B, VPR64>;
+def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
+def CMLSvvv_4H : NeonI_compare_aliases<"cmls", ".4h", CMHSvvv_4H, VPR64>;
+def CMLSvvv_8H : NeonI_compare_aliases<"cmls", ".8h", CMHSvvv_8H, VPR128>;
+def CMLSvvv_2S : NeonI_compare_aliases<"cmls", ".2s", CMHSvvv_2S, VPR64>;
+def CMLSvvv_4S : NeonI_compare_aliases<"cmls", ".4s", CMHSvvv_4S, VPR128>;
+def CMLSvvv_2D : NeonI_compare_aliases<"cmls", ".2d", CMHSvvv_2D, VPR128>;
+
+// Vector Compare Mask Less Than or Equal (Integer)
+// CMLE is alias for CMGE with operands reversed.
+def CMLEvvv_8B : NeonI_compare_aliases<"cmle", ".8b", CMGEvvv_8B, VPR64>;
+def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
+def CMLEvvv_4H : NeonI_compare_aliases<"cmle", ".4h", CMGEvvv_4H, VPR64>;
+def CMLEvvv_8H : NeonI_compare_aliases<"cmle", ".8h", CMGEvvv_8H, VPR128>;
+def CMLEvvv_2S : NeonI_compare_aliases<"cmle", ".2s", CMGEvvv_2S, VPR64>;
+def CMLEvvv_4S : NeonI_compare_aliases<"cmle", ".4s", CMGEvvv_4S, VPR128>;
+def CMLEvvv_2D : NeonI_compare_aliases<"cmle", ".2d", CMGEvvv_2D, VPR128>;
+
+// Vector Compare Mask Lower (Unsigned Integer)
+// CMLO is alias for CMHI with operands reversed.
+def CMLOvvv_8B : NeonI_compare_aliases<"cmlo", ".8b", CMHIvvv_8B, VPR64>;
+def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
+def CMLOvvv_4H : NeonI_compare_aliases<"cmlo", ".4h", CMHIvvv_4H, VPR64>;
+def CMLOvvv_8H : NeonI_compare_aliases<"cmlo", ".8h", CMHIvvv_8H, VPR128>;
+def CMLOvvv_2S : NeonI_compare_aliases<"cmlo", ".2s", CMHIvvv_2S, VPR64>;
+def CMLOvvv_4S : NeonI_compare_aliases<"cmlo", ".4s", CMHIvvv_4S, VPR128>;
+def CMLOvvv_2D : NeonI_compare_aliases<"cmlo", ".2d", CMHIvvv_2D, VPR128>;
+
+// Vector Compare Mask Less Than (Integer)
+// CMLT is alias for CMGT with operands reversed.
+def CMLTvvv_8B : NeonI_compare_aliases<"cmlt", ".8b", CMGTvvv_8B, VPR64>;
+def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
+def CMLTvvv_4H : NeonI_compare_aliases<"cmlt", ".4h", CMGTvvv_4H, VPR64>;
+def CMLTvvv_8H : NeonI_compare_aliases<"cmlt", ".8h", CMGTvvv_8H, VPR128>;
+def CMLTvvv_2S : NeonI_compare_aliases<"cmlt", ".2s", CMGTvvv_2S, VPR64>;
+def CMLTvvv_4S : NeonI_compare_aliases<"cmlt", ".4s", CMGTvvv_4S, VPR128>;
+def CMLTvvv_2D : NeonI_compare_aliases<"cmlt", ".2d", CMGTvvv_2D, VPR128>;
+
+
+def neon_uimm0_asmoperand : AsmOperandClass
+{
+ let Name = "UImm0";
+ let PredicateMethod = "isUImm<0>";
+ let RenderMethod = "addImmOperands";
+}
+
+def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
+ let ParserMatchClass = neon_uimm0_asmoperand;
+ let PrintMethod = "printNeonUImm0Operand";
+
+}
+
+multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
+{
+ def _8B : NeonI_2VMisc<0b0, u, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
+ asmop # "\t$Rd.8b, $Rn.8b, $Imm",
+ [(set (v8i8 VPR64:$Rd),
+ (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
+ NoItinerary>;
+
+ def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
+ asmop # "\t$Rd.16b, $Rn.16b, $Imm",
+ [(set (v16i8 VPR128:$Rd),
+ (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
+ NoItinerary>;
+
+ def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
+ asmop # "\t$Rd.4h, $Rn.4h, $Imm",
+ [(set (v4i16 VPR64:$Rd),
+ (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
+ NoItinerary>;
+
+ def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
+ asmop # "\t$Rd.8h, $Rn.8h, $Imm",
+ [(set (v8i16 VPR128:$Rd),
+ (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
+ NoItinerary>;
+
+ def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
+ asmop # "\t$Rd.2s, $Rn.2s, $Imm",
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
+ NoItinerary>;
+
+ def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
+ asmop # "\t$Rd.4s, $Rn.4s, $Imm",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
+ NoItinerary>;
+
+ def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
+ asmop # "\t$Rd.2d, $Rn.2d, $Imm",
+ [(set (v2i64 VPR128:$Rd),
+ (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
+ NoItinerary>;
+}
+
+// Vector Compare Mask Equal to Zero (Integer)
+defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
+
+// Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
+defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
+
+// Vector Compare Mask Greater Than Zero (Signed Integer)
+defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
+
+// Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
+defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
+
+// Vector Compare Mask Less Than Zero (Signed Integer)
+defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
+
+// Vector Comparisons (Floating Point)
+
+// Vector Compare Mask Equal (Floating Point)
+let isCommutable =1 in {
+defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
+ Neon_cmeq, Neon_cmeq,
+ v2i32, v4i32, v2i64, 0>;
+}
+
+// Vector Compare Mask Greater Than Or Equal (Floating Point)
+defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
+ Neon_cmge, Neon_cmge,
+ v2i32, v4i32, v2i64, 0>;
+
+// Vector Compare Mask Greater Than (Floating Point)
+defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
+ Neon_cmgt, Neon_cmgt,
+ v2i32, v4i32, v2i64, 0>;
+
+// Vector Compare Mask Less Than Or Equal (Floating Point)
+// FCMLE is alias for FCMGE with operands reversed.
+def FCMLEvvv_2S : NeonI_compare_aliases<"fcmle", ".2s", FCMGEvvv_2S, VPR64>;
+def FCMLEvvv_4S : NeonI_compare_aliases<"fcmle", ".4s", FCMGEvvv_4S, VPR128>;
+def FCMLEvvv_2D : NeonI_compare_aliases<"fcmle", ".2d", FCMGEvvv_2D, VPR128>;
+
+// Vector Compare Mask Less Than (Floating Point)
+// FCMLT is alias for FCMGT with operands reversed.
+def FCMLTvvv_2S : NeonI_compare_aliases<"fcmlt", ".2s", FCMGTvvv_2S, VPR64>;
+def FCMLTvvv_4S : NeonI_compare_aliases<"fcmlt", ".4s", FCMGTvvv_4S, VPR128>;
+def FCMLTvvv_2D : NeonI_compare_aliases<"fcmlt", ".2d", FCMGTvvv_2D, VPR128>;
+
+
+multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
+ string asmop, CondCode CC>
+{
+ def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
+ asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
+ NoItinerary>;
+
+ def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
+ asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
+ NoItinerary>;
+
+ def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
+ asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
+ [(set (v2i64 VPR128:$Rd),
+ (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
+ NoItinerary>;
+}
+
+// Vector Compare Mask Equal to Zero (Floating Point)
+defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
+
+// Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
+defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
+
+// Vector Compare Mask Greater Than Zero (Floating Point)
+defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
+
+// Vector Compare Mask Less Than or Equal To Zero (Floating Point)
+defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
+
+// Vector Compare Mask Less Than Zero (Floating Point)
+defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
+
+// Vector Absolute Comparisons (Floating Point)
+
+// Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
+defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
+ int_arm_neon_vacged, int_arm_neon_vacgeq,
+ int_aarch64_neon_vacgeq,
+ v2i32, v4i32, v2i64, 0>;
+
+// Vector Absolute Compare Mask Greater Than (Floating Point)
+defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
+ int_arm_neon_vacgtd, int_arm_neon_vacgtq,
+ int_aarch64_neon_vacgtq,
+ v2i32, v4i32, v2i64, 0>;
+
+// Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
+// FACLE is alias for FACGE with operands reversed.
+def FACLEvvv_2S : NeonI_compare_aliases<"facle", ".2s", FACGEvvv_2S, VPR64>;
+def FACLEvvv_4S : NeonI_compare_aliases<"facle", ".4s", FACGEvvv_4S, VPR128>;
+def FACLEvvv_2D : NeonI_compare_aliases<"facle", ".2d", FACGEvvv_2D, VPR128>;
+
+// Vector Absolute Compare Mask Less Than (Floating Point)
+// FACLT is alias for FACGT with operands reversed.
+def FACLTvvv_2S : NeonI_compare_aliases<"faclt", ".2s", FACGTvvv_2S, VPR64>;
+def FACLTvvv_4S : NeonI_compare_aliases<"faclt", ".4s", FACGTvvv_4S, VPR128>;
+def FACLTvvv_2D : NeonI_compare_aliases<"faclt", ".2d", FACGTvvv_2D, VPR128>;
+
+// Vector halving add (Integer Signed, Unsigned)
+defm SHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
+ int_arm_neon_vhadds, 1>;
+defm UHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
+ int_arm_neon_vhaddu, 1>;
+
+// Vector halving sub (Integer Signed, Unsigned)
+defm SHSUBvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
+ int_arm_neon_vhsubs, 0>;
+defm UHSUBvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
+ int_arm_neon_vhsubu, 0>;
+
+// Vector rouding halving add (Integer Signed, Unsigned)
+defm SRHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
+ int_arm_neon_vrhadds, 1>;
+defm URHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
+ int_arm_neon_vrhaddu, 1>;
+
+// Vector Saturating add (Integer Signed, Unsigned)
+defm SQADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
+ int_arm_neon_vqadds, 1>;
+defm UQADDvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
+ int_arm_neon_vqaddu, 1>;
+
+// Vector Saturating sub (Integer Signed, Unsigned)
+defm SQSUBvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
+ int_arm_neon_vqsubs, 1>;
+defm UQSUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
+ int_arm_neon_vqsubu, 1>;
+
+// Vector Shift Left (Signed and Unsigned Integer)
+defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
+ int_arm_neon_vshifts, 1>;
+defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
+ int_arm_neon_vshiftu, 1>;
+
+// Vector Saturating Shift Left (Signed and Unsigned Integer)
+defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
+ int_arm_neon_vqshifts, 1>;
+defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
+ int_arm_neon_vqshiftu, 1>;
+
+// Vector Rouding Shift Left (Signed and Unsigned Integer)
+defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
+ int_arm_neon_vrshifts, 1>;
+defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
+ int_arm_neon_vrshiftu, 1>;
+
+// Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
+defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
+ int_arm_neon_vqrshifts, 1>;
+defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
+ int_arm_neon_vqrshiftu, 1>;
+
+// Vector Maximum (Signed and Unsigned Integer)
+defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
+defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
+
+// Vector Minimum (Signed and Unsigned Integer)
+defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
+defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
+
+// Vector Maximum (Floating Point)
+defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
+ int_arm_neon_vmaxs, int_arm_neon_vmaxs,
+ int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
+
+// Vector Minimum (Floating Point)
+defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
+ int_arm_neon_vmins, int_arm_neon_vmins,
+ int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
+
+// Vector maxNum (Floating Point) - prefer a number over a quiet NaN)
+defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
+ int_aarch64_neon_vmaxnm,
+ int_aarch64_neon_vmaxnm,
+ int_aarch64_neon_vmaxnm,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector minNum (Floating Point) - prefer a number over a quiet NaN)
+defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
+ int_aarch64_neon_vminnm,
+ int_aarch64_neon_vminnm,
+ int_aarch64_neon_vminnm,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector Maximum Pairwise (Signed and Unsigned Integer)
+defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
+defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
+
+// Vector Minimum Pairwise (Signed and Unsigned Integer)
+defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
+defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
+
+// Vector Maximum Pairwise (Floating Point)
+defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
+ int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
+ int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
+
+// Vector Minimum Pairwise (Floating Point)
+defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
+ int_arm_neon_vpmins, int_arm_neon_vpmins,
+ int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
+
+// Vector maxNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
+defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
+ int_aarch64_neon_vpmaxnm,
+ int_aarch64_neon_vpmaxnm,
+ int_aarch64_neon_vpmaxnm,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector minNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
+defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
+ int_aarch64_neon_vpminnm,
+ int_aarch64_neon_vpminnm,
+ int_aarch64_neon_vpminnm,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector Addition Pairwise (Integer)
+defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
+
+// Vector Addition Pairwise (Floating Point)
+defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
+ int_arm_neon_vpadd,
+ int_arm_neon_vpadd,
+ int_arm_neon_vpadd,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector Saturating Doubling Multiply High
+defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
+ int_arm_neon_vqdmulh, 1>;
+
+// Vector Saturating Rouding Doubling Multiply High
+defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
+ int_arm_neon_vqrdmulh, 1>;
+
+// Vector Multiply Extended (Floating Point)
+defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
+ int_aarch64_neon_vmulx,
+ int_aarch64_neon_vmulx,
+ int_aarch64_neon_vmulx,
+ v2f32, v4f32, v2f64, 1>;
+
+// Vector Immediate Instructions
+
+multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
+{
+ def _asmoperand : AsmOperandClass
+ {
+ let Name = "NeonMovImmShift" # PREFIX;
+ let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
+ let PredicateMethod = "isNeonMovImmShift" # PREFIX;
+ }
+}
+
+// Definition of vector immediates shift operands
+
+// The selectable use-cases extract the shift operation
+// information from the OpCmode fields encoded in the immediate.
+def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
+ uint64_t OpCmode = N->getZExtValue();
+ unsigned ShiftImm;
+ unsigned ShiftOnesIn;
+ unsigned HasShift =
+ A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
+ if (!HasShift) return SDValue();
+ return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
+}]>;
+
+// Vector immediates shift operands which accept LSL and MSL
+// shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
+// or 0, 8 (LSLH) or 8, 16 (MSL).
+defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
+defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
+// LSLH restricts shift amount to 0, 8 out of 0, 8, 16, 24
+defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
+
+multiclass neon_mov_imm_shift_operands<string PREFIX,
+ string HALF, string ISHALF, code pred>
+{
+ def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
+ {
+ let PrintMethod =
+ "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
+ let DecoderMethod =
+ "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
+ }
+}
+
+defm neon_mov_imm_LSL : neon_mov_imm_shift_operands<"LSL", "", "false", [{
+ unsigned ShiftImm;
+ unsigned ShiftOnesIn;
+ unsigned HasShift =
+ A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
+ return (HasShift && !ShiftOnesIn);
+}]>;
+
+defm neon_mov_imm_MSL : neon_mov_imm_shift_operands<"MSL", "", "false", [{
+ unsigned ShiftImm;
+ unsigned ShiftOnesIn;
+ unsigned HasShift =
+ A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
+ return (HasShift && ShiftOnesIn);
+}]>;
+
+defm neon_mov_imm_LSLH : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
+ unsigned ShiftImm;
+ unsigned ShiftOnesIn;
+ unsigned HasShift =
+ A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
+ return (HasShift && !ShiftOnesIn);
+}]>;
+
+def neon_uimm1_asmoperand : AsmOperandClass
+{
+ let Name = "UImm1";
+ let PredicateMethod = "isUImm<1>";
+ let RenderMethod = "addImmOperands";
+}
+
+def neon_uimm2_asmoperand : AsmOperandClass
+{
+ let Name = "UImm2";
+ let PredicateMethod = "isUImm<2>";
+ let RenderMethod = "addImmOperands";
+}
+
+def neon_uimm8_asmoperand : AsmOperandClass
+{
+ let Name = "UImm8";
+ let PredicateMethod = "isUImm<8>";
+ let RenderMethod = "addImmOperands";
+}
+
+def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
+ let ParserMatchClass = neon_uimm8_asmoperand;
+ let PrintMethod = "printUImmHexOperand";
+}
+
+def neon_uimm64_mask_asmoperand : AsmOperandClass
+{
+ let Name = "NeonUImm64Mask";
+ let PredicateMethod = "isNeonUImm64Mask";
+ let RenderMethod = "addNeonUImm64MaskOperands";
+}
+
+// MCOperand for 64-bit bytemask with each byte having only the
+// value 0x00 and 0xff is encoded as an unsigned 8-bit value
+def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
+ let ParserMatchClass = neon_uimm64_mask_asmoperand;
+ let PrintMethod = "printNeonUImm64MaskOperand";
+}
+
+multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
+ SDPatternOperator opnode>
+{
+ // shift zeros, per word
+ def _2S : NeonI_1VModImm<0b0, op,
+ (outs VPR64:$Rd),
+ (ins neon_uimm8:$Imm,
+ neon_mov_imm_LSL_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (opnode (timm:$Imm),
+ (neon_mov_imm_LSL_operand:$Simm))))],
+ NoItinerary> {
+ bits<2> Simm;
+ let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
+ }
+
+ def _4S : NeonI_1VModImm<0b1, op,
+ (outs VPR128:$Rd),
+ (ins neon_uimm8:$Imm,
+ neon_mov_imm_LSL_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (opnode (timm:$Imm),
+ (neon_mov_imm_LSL_operand:$Simm))))],
+ NoItinerary> {
+ bits<2> Simm;
+ let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
+ }
+
+ // shift zeros, per halfword
+ def _4H : NeonI_1VModImm<0b0, op,
+ (outs VPR64:$Rd),
+ (ins neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
+ [(set (v4i16 VPR64:$Rd),
+ (v4i16 (opnode (timm:$Imm),
+ (neon_mov_imm_LSLH_operand:$Simm))))],
+ NoItinerary> {
+ bit Simm;
+ let cmode = {0b1, 0b0, Simm, 0b0};
+ }
+
+ def _8H : NeonI_1VModImm<0b1, op,
+ (outs VPR128:$Rd),
+ (ins neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
+ [(set (v8i16 VPR128:$Rd),
+ (v8i16 (opnode (timm:$Imm),
+ (neon_mov_imm_LSLH_operand:$Simm))))],
+ NoItinerary> {
+ bit Simm;
+ let cmode = {0b1, 0b0, Simm, 0b0};
+ }
+}
+
+multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
+ SDPatternOperator opnode,
+ SDPatternOperator neonopnode>
+{
+ let Constraints = "$src = $Rd" in {
+ // shift zeros, per word
+ def _2S : NeonI_1VModImm<0b0, op,
+ (outs VPR64:$Rd),
+ (ins VPR64:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSL_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (opnode (v2i32 VPR64:$src),
+ (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
+ neon_mov_imm_LSL_operand:$Simm)))))))],
+ NoItinerary> {
+ bits<2> Simm;
+ let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
+ }
+
+ def _4S : NeonI_1VModImm<0b1, op,
+ (outs VPR128:$Rd),
+ (ins VPR128:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSL_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (opnode (v4i32 VPR128:$src),
+ (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
+ neon_mov_imm_LSL_operand:$Simm)))))))],
+ NoItinerary> {
+ bits<2> Simm;
+ let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
+ }
+
+ // shift zeros, per halfword
+ def _4H : NeonI_1VModImm<0b0, op,
+ (outs VPR64:$Rd),
+ (ins VPR64:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
+ [(set (v4i16 VPR64:$Rd),
+ (v4i16 (opnode (v4i16 VPR64:$src),
+ (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
+ neon_mov_imm_LSL_operand:$Simm)))))))],
+ NoItinerary> {
+ bit Simm;
+ let cmode = {0b1, 0b0, Simm, 0b1};
+ }
+
+ def _8H : NeonI_1VModImm<0b1, op,
+ (outs VPR128:$Rd),
+ (ins VPR128:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
+ [(set (v8i16 VPR128:$Rd),
+ (v8i16 (opnode (v8i16 VPR128:$src),
+ (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
+ neon_mov_imm_LSL_operand:$Simm)))))))],
+ NoItinerary> {
+ bit Simm;
+ let cmode = {0b1, 0b0, Simm, 0b1};
+ }
+ }
+}
+
+multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
+ SDPatternOperator opnode>
+{
+ // shift ones, per word
+ def _2S : NeonI_1VModImm<0b0, op,
+ (outs VPR64:$Rd),
+ (ins neon_uimm8:$Imm,
+ neon_mov_imm_MSL_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (opnode (timm:$Imm),
+ (neon_mov_imm_MSL_operand:$Simm))))],
+ NoItinerary> {
+ bit Simm;
+ let cmode = {0b1, 0b1, 0b0, Simm};
+ }
+
+ def _4S : NeonI_1VModImm<0b1, op,
+ (outs VPR128:$Rd),
+ (ins neon_uimm8:$Imm,
+ neon_mov_imm_MSL_operand:$Simm),
+ !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (opnode (timm:$Imm),
+ (neon_mov_imm_MSL_operand:$Simm))))],
+ NoItinerary> {
+ bit Simm;
+ let cmode = {0b1, 0b1, 0b0, Simm};
+ }
+}
+
+// Vector Move Immediate Shifted
+let isReMaterializable = 1 in {
+defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
+}
+
+// Vector Move Inverted Immediate Shifted
+let isReMaterializable = 1 in {
+defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
+}
+
+// Vector Bitwise Bit Clear (AND NOT) - immediate
+let isReMaterializable = 1 in {
+defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
+ and, Neon_mvni>;
+}
+
+// Vector Bitwise OR - immedidate
+
+let isReMaterializable = 1 in {
+defm ORRvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
+ or, Neon_movi>;
+}
+
+// Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
+// LowerBUILD_VECTOR favors lowering MOVI over MVNI.
+// BIC immediate instructions selection requires additional patterns to
+// transform Neon_movi operands into BIC immediate operands
+
+def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
+ uint64_t OpCmode = N->getZExtValue();
+ unsigned ShiftImm;
+ unsigned ShiftOnesIn;
+ (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
+ // LSLH restricts shift amount to 0, 8 which are encoded as 0 and 1
+ // Transform encoded shift amount 0 to 1 and 1 to 0.
+ return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
+}]>;
+
+def neon_mov_imm_LSLH_transform_operand
+ : ImmLeaf<i32, [{
+ unsigned ShiftImm;
+ unsigned ShiftOnesIn;
+ unsigned HasShift =
+ A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
+ return (HasShift && !ShiftOnesIn); }],
+ neon_mov_imm_LSLH_transform_XFORM>;
+
+// Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
+// Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
+def : Pat<(v4i16 (and VPR64:$src,
+ (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
+ (BICvi_lsl_4H VPR64:$src, 0,
+ neon_mov_imm_LSLH_transform_operand:$Simm)>;
+
+// Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
+// Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
+def : Pat<(v8i16 (and VPR128:$src,
+ (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
+ (BICvi_lsl_8H VPR128:$src, 0,
+ neon_mov_imm_LSLH_transform_operand:$Simm)>;
+
+
+multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
+ SDPatternOperator neonopnode,
+ Instruction INST4H,
+ Instruction INST8H> {
+ def : Pat<(v8i8 (opnode VPR64:$src,
+ (bitconvert(v4i16 (neonopnode timm:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm))))),
+ (INST4H VPR64:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm)>;
+ def : Pat<(v1i64 (opnode VPR64:$src,
+ (bitconvert(v4i16 (neonopnode timm:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm))))),
+ (INST4H VPR64:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm)>;
+
+ def : Pat<(v16i8 (opnode VPR128:$src,
+ (bitconvert(v8i16 (neonopnode timm:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm))))),
+ (INST8H VPR128:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm)>;
+ def : Pat<(v4i32 (opnode VPR128:$src,
+ (bitconvert(v8i16 (neonopnode timm:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm))))),
+ (INST8H VPR128:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm)>;
+ def : Pat<(v2i64 (opnode VPR128:$src,
+ (bitconvert(v8i16 (neonopnode timm:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm))))),
+ (INST8H VPR128:$src, neon_uimm8:$Imm,
+ neon_mov_imm_LSLH_operand:$Simm)>;
+}
+
+// Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
+defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
+
+// Additional patterns for Vector Bitwise OR - immedidate
+defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
+
+
+// Vector Move Immediate Masked
+let isReMaterializable = 1 in {
+defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
+}
+
+// Vector Move Inverted Immediate Masked
+let isReMaterializable = 1 in {
+defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
+}
+
+class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
+ Instruction inst, RegisterOperand VPRC>
+ : NeonInstAlias<!strconcat(asmop, "\t$Rd," # asmlane # ", $Imm"),
+ (inst VPRC:$Rd, neon_uimm8:$Imm, 0), 0b0>;
+
+// Aliases for Vector Move Immediate Shifted
+def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
+def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
+
+// Aliases for Vector Move Inverted Immediate Shifted
+def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
+def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
+
+// Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
+def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
+def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
+
+// Aliases for Vector Bitwise OR - immedidate
+def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
+def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
+def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
+
+// Vector Move Immediate - per byte
+let isReMaterializable = 1 in {
+def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
+ (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
+ "movi\t$Rd.8b, $Imm",
+ [(set (v8i8 VPR64:$Rd),
+ (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
+ NoItinerary> {
+ let cmode = 0b1110;
+}
+
+def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
+ (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
+ "movi\t$Rd.16b, $Imm",
+ [(set (v16i8 VPR128:$Rd),
+ (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
+ NoItinerary> {
+ let cmode = 0b1110;
+}
+}
+
+// Vector Move Immediate - bytemask, per double word
+let isReMaterializable = 1 in {
+def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
+ (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
+ "movi\t $Rd.2d, $Imm",
+ [(set (v2i64 VPR128:$Rd),
+ (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
+ NoItinerary> {
+ let cmode = 0b1110;
+}
+}
+
+// Vector Move Immediate - bytemask, one doubleword
+
+let isReMaterializable = 1 in {
+def MOVIdi : NeonI_1VModImm<0b0, 0b1,
+ (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
+ "movi\t $Rd, $Imm",
+ [(set (v1i64 FPR64:$Rd),
+ (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))],
+ NoItinerary> {
+ let cmode = 0b1110;
+}
+}
+
+// Vector Floating Point Move Immediate
+
+class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
+ Operand immOpType, bit q, bit op>
+ : NeonI_1VModImm<q, op,
+ (outs VPRC:$Rd), (ins immOpType:$Imm),
+ "fmov\t$Rd" # asmlane # ", $Imm",
+ [(set (OpTy VPRC:$Rd),
+ (OpTy (Neon_fmovi (timm:$Imm))))],
+ NoItinerary> {
+ let cmode = 0b1111;
+ }
+
+let isReMaterializable = 1 in {
+def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64, v2f32, fmov32_operand, 0b0, 0b0>;
+def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
+def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
+}
+
+// Vector Shift (Immediate)
+// Immediate in [0, 63]
+def imm0_63 : Operand<i32> {
+ let ParserMatchClass = uimm6_asmoperand;
+}
+
+// Shift Right/Left Immediate - The immh:immb field of these shifts are encoded
+// as follows:
+//
+// Offset Encoding
+// 8 immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
+// 16 immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
+// 32 immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
+// 64 immh:immb<6> = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
+//
+// The shift right immediate amount, in the range 1 to element bits, is computed
+// as Offset - UInt(immh:immb). The shift left immediate amount, in the range 0
+// to element bits - 1, is computed as UInt(immh:immb) - Offset.
+
+class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
+ let Name = "ShrImm" # OFFSET;
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "ShrImm" # OFFSET;
+}
+
+class shr_imm<string OFFSET> : Operand<i32> {
+ let EncoderMethod = "getShiftRightImm" # OFFSET;
+ let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
+}
+
+def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
+def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
+def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
+def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
+
+def shr_imm8 : shr_imm<"8">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 8;}]>;
+def shr_imm16 : shr_imm<"16">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 16;}]>;
+def shr_imm32 : shr_imm<"32">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 32;}]>;
+def shr_imm64 : shr_imm<"64">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 64;}]>;
+
+class shl_imm_asmoperands<string OFFSET> : AsmOperandClass {
+ let Name = "ShlImm" # OFFSET;
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "ShlImm" # OFFSET;
+}
+
+class shl_imm<string OFFSET> : Operand<i32> {
+ let EncoderMethod = "getShiftLeftImm" # OFFSET;
+ let DecoderMethod = "DecodeShiftLeftImm" # OFFSET;
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("shl_imm" # OFFSET # "_asmoperand");
+}
+
+def shl_imm8_asmoperand : shl_imm_asmoperands<"8">;
+def shl_imm16_asmoperand : shl_imm_asmoperands<"16">;
+def shl_imm32_asmoperand : shl_imm_asmoperands<"32">;
+def shl_imm64_asmoperand : shl_imm_asmoperands<"64">;
+
+def shl_imm8 : shl_imm<"8">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 8;}]>;
+def shl_imm16 : shl_imm<"16">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 16;}]>;
+def shl_imm32 : shl_imm<"32">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 32;}]>;
+def shl_imm64 : shl_imm<"64">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 64;}]>;
+
+class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
+ RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
+ : NeonI_2VShiftImm<q, u, opcode,
+ (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
+ [(set (Ty VPRC:$Rd),
+ (Ty (OpNode (Ty VPRC:$Rn),
+ (Ty (Neon_vdup (i32 ImmTy:$Imm))))))],
+ NoItinerary>;
+
+multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
+ // 64-bit vector types.
+ def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8, shl> {
+ let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
+ }
+
+ def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16, shl> {
+ let Inst{22-20} = 0b001; // immh:immb = 001xxxx
+ }
+
+ def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32, shl> {
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ }
+
+ // 128-bit vector types.
+ def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8, shl> {
+ let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
+ }
+
+ def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16, shl> {
+ let Inst{22-20} = 0b001; // immh:immb = 001xxxx
+ }
+
+ def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32, shl> {
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ }
+
+ def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64, shl> {
+ let Inst{22} = 0b1; // immh:immb = 1xxxxxx
+ }
+}
+
+multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
+ def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
+ OpNode> {
+ let Inst{22} = 0b1;
+ }
+}
+
+// Shift left
+defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
+
+// Shift right
+defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
+defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
+
+def Neon_High16B : PatFrag<(ops node:$in),
+ (extract_subvector (v16i8 node:$in), (iPTR 8))>;
+def Neon_High8H : PatFrag<(ops node:$in),
+ (extract_subvector (v8i16 node:$in), (iPTR 4))>;
+def Neon_High4S : PatFrag<(ops node:$in),
+ (extract_subvector (v4i32 node:$in), (iPTR 2))>;
+def Neon_High2D : PatFrag<(ops node:$in),
+ (extract_subvector (v2i64 node:$in), (iPTR 1))>;
+def Neon_High4float : PatFrag<(ops node:$in),
+ (extract_subvector (v4f32 node:$in), (iPTR 2))>;
+def Neon_High2double : PatFrag<(ops node:$in),
+ (extract_subvector (v2f64 node:$in), (iPTR 1))>;
+
+def Neon_Low16B : PatFrag<(ops node:$in),
+ (v8i8 (extract_subvector (v16i8 node:$in),
+ (iPTR 0)))>;
+def Neon_Low8H : PatFrag<(ops node:$in),
+ (v4i16 (extract_subvector (v8i16 node:$in),
+ (iPTR 0)))>;
+def Neon_Low4S : PatFrag<(ops node:$in),
+ (v2i32 (extract_subvector (v4i32 node:$in),
+ (iPTR 0)))>;
+def Neon_Low2D : PatFrag<(ops node:$in),
+ (v1i64 (extract_subvector (v2i64 node:$in),
+ (iPTR 0)))>;
+def Neon_Low4float : PatFrag<(ops node:$in),
+ (v2f32 (extract_subvector (v4f32 node:$in),
+ (iPTR 0)))>;
+def Neon_Low2double : PatFrag<(ops node:$in),
+ (v1f64 (extract_subvector (v2f64 node:$in),
+ (iPTR 0)))>;
+
+class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
+ string SrcT, ValueType DestTy, ValueType SrcTy,
+ Operand ImmTy, SDPatternOperator ExtOp>
+ : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
+ (ins VPR64:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
+ [(set (DestTy VPR128:$Rd),
+ (DestTy (shl
+ (DestTy (ExtOp (SrcTy VPR64:$Rn))),
+ (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
+ NoItinerary>;
+
+class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
+ string SrcT, ValueType DestTy, ValueType SrcTy,
+ int StartIndex, Operand ImmTy,
+ SDPatternOperator ExtOp, PatFrag getTop>
+ : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
+ (ins VPR128:$Rn, ImmTy:$Imm),
+ asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
+ [(set (DestTy VPR128:$Rd),
+ (DestTy (shl
+ (DestTy (ExtOp
+ (SrcTy (getTop VPR128:$Rn)))),
+ (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
+ NoItinerary>;
+
+multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
+ SDNode ExtOp> {
+ // 64-bit vector types.
+ def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
+ shl_imm8, ExtOp> {
+ let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
+ }
+
+ def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
+ shl_imm16, ExtOp> {
+ let Inst{22-20} = 0b001; // immh:immb = 001xxxx
+ }
+
+ def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
+ shl_imm32, ExtOp> {
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ }
+
+ // 128-bit vector types
+ def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b", v8i16, v8i8,
+ 8, shl_imm8, ExtOp, Neon_High16B> {
+ let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
+ }
+
+ def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h", v4i32, v4i16,
+ 4, shl_imm16, ExtOp, Neon_High8H> {
+ let Inst{22-20} = 0b001; // immh:immb = 001xxxx
+ }
+
+ def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s", v2i64, v2i32,
+ 2, shl_imm32, ExtOp, Neon_High4S> {
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ }
+
+ // Use other patterns to match when the immediate is 0.
+ def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
+ (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
+
+ def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
+ (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
+
+ def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
+ (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
+
+ def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
+ (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
+
+ def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
+ (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
+
+ def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
+ (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
+}
+
+// Shift left long
+defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
+defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
+
+// Rounding/Saturating shift
+class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
+ RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
+ SDPatternOperator OpNode>
+ : NeonI_2VShiftImm<q, u, opcode,
+ (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
+ [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
+ (i32 ImmTy:$Imm))))],
+ NoItinerary>;
+
+// shift right (vector by immediate)
+multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
+ SDPatternOperator OpNode> {
+ def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
+ OpNode> {
+ let Inst{22} = 0b1;
+ }
+}
+
+multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
+ SDPatternOperator OpNode> {
+ // 64-bit vector types.
+ def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ // 128-bit vector types.
+ def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
+ OpNode> {
+ let Inst{22} = 0b1;
+ }
+}
+
+// Rounding shift right
+defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
+ int_aarch64_neon_vsrshr>;
+defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
+ int_aarch64_neon_vurshr>;
+
+// Saturating shift left unsigned
+defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
+
+// Saturating shift left
+defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
+defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
+
+class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
+ RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
+ SDNode OpNode>
+ : NeonI_2VShiftImm<q, u, opcode,
+ (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
+ [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
+ (Ty (OpNode (Ty VPRC:$Rn),
+ (Ty (Neon_vdup (i32 ImmTy:$Imm))))))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+// Shift Right accumulate
+multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
+ def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
+ OpNode> {
+ let Inst{22} = 0b1;
+ }
+}
+
+// Shift right and accumulate
+defm SSRAvvi : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
+defm USRAvvi : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
+
+// Rounding shift accumulate
+class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
+ RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
+ SDPatternOperator OpNode>
+ : NeonI_2VShiftImm<q, u, opcode,
+ (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
+ [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
+ (Ty (OpNode (Ty VPRC:$Rn), (i32 ImmTy:$Imm))))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
+ SDPatternOperator OpNode> {
+ def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
+ OpNode> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
+ OpNode> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
+ OpNode> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
+ OpNode> {
+ let Inst{22} = 0b1;
+ }
+}
+
+// Rounding shift right and accumulate
+defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
+defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
+
+// Shift insert by immediate
+class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
+ RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
+ SDPatternOperator OpNode>
+ : NeonI_2VShiftImm<q, u, opcode,
+ (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
+ [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
+ (i32 ImmTy:$Imm))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+// shift left insert (vector by immediate)
+multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
+ def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
+ int_aarch64_neon_vsli> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
+ int_aarch64_neon_vsli> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
+ int_aarch64_neon_vsli> {
+ let Inst{22-21} = 0b01;
+ }
+
+ // 128-bit vector types
+ def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
+ int_aarch64_neon_vsli> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
+ int_aarch64_neon_vsli> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
+ int_aarch64_neon_vsli> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
+ int_aarch64_neon_vsli> {
+ let Inst{22} = 0b1;
+ }
+}
+
+// shift right insert (vector by immediate)
+multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
+ // 64-bit vector types.
+ def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
+ int_aarch64_neon_vsri> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
+ int_aarch64_neon_vsri> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
+ int_aarch64_neon_vsri> {
+ let Inst{22-21} = 0b01;
+ }
+
+ // 128-bit vector types
+ def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
+ int_aarch64_neon_vsri> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
+ int_aarch64_neon_vsri> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
+ int_aarch64_neon_vsri> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
+ int_aarch64_neon_vsri> {
+ let Inst{22} = 0b1;
+ }
+}
+
+// Shift left and insert
+defm SLIvvi : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
+
+// Shift right and insert
+defm SRIvvi : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
+
+class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
+ string SrcT, Operand ImmTy>
+ : NeonI_2VShiftImm<q, u, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
+ [], NoItinerary>;
+
+class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
+ string SrcT, Operand ImmTy>
+ : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
+ (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
+ [], NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+// left long shift by immediate
+multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
+ def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
+ let Inst{22-21} = 0b01;
+ }
+
+ // Shift Narrow High
+ def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
+ shr_imm8> {
+ let Inst{22-19} = 0b0001;
+ }
+
+ def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
+ shr_imm16> {
+ let Inst{22-20} = 0b001;
+ }
+
+ def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
+ shr_imm32> {
+ let Inst{22-21} = 0b01;
+ }
+}
+
+// Shift right narrow
+defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
+
+// Shift right narrow (prefix Q is saturating, prefix R is rounding)
+defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
+defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
+defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
+defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
+defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
+defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
+defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
+
+def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
+ (v2i64 (concat_vectors (v1i64 node:$Rm),
+ (v1i64 node:$Rn)))>;
+def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
+ (v8i16 (concat_vectors (v4i16 node:$Rm),
+ (v4i16 node:$Rn)))>;
+def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
+ (v4i32 (concat_vectors (v2i32 node:$Rm),
+ (v2i32 node:$Rn)))>;
+def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
+ (v4f32 (concat_vectors (v2f32 node:$Rm),
+ (v2f32 node:$Rn)))>;
+def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
+ (v2f64 (concat_vectors (v1f64 node:$Rm),
+ (v1f64 node:$Rn)))>;
+
+def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
+ (v8i16 (srl (v8i16 node:$lhs),
+ (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
+def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
+ (v4i32 (srl (v4i32 node:$lhs),
+ (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
+def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
+ (v2i64 (srl (v2i64 node:$lhs),
+ (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
+def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
+ (v8i16 (sra (v8i16 node:$lhs),
+ (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
+def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
+ (v4i32 (sra (v4i32 node:$lhs),
+ (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
+def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
+ (v2i64 (sra (v2i64 node:$lhs),
+ (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
+
+// Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
+multiclass Neon_shiftNarrow_patterns<string shr> {
+ def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
+ (i32 shr_imm8:$Imm)))),
+ (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
+ (i32 shr_imm16:$Imm)))),
+ (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
+ (i32 shr_imm32:$Imm)))),
+ (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
+
+ def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
+ (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
+ VPR128:$Rn, (i32 shr_imm8:$Imm))))))),
+ (SHRNvvi_16B (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
+ VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
+ (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
+ VPR128:$Rn, (i32 shr_imm16:$Imm))))))),
+ (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+ VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
+ (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
+ VPR128:$Rn, (i32 shr_imm32:$Imm))))))),
+ (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+ VPR128:$Rn, imm:$Imm)>;
+}
+
+multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
+ def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), shr_imm8:$Imm)),
+ (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), shr_imm16:$Imm)),
+ (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), shr_imm32:$Imm)),
+ (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
+
+ def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+ (v1i64 (bitconvert (v8i8
+ (op (v8i16 VPR128:$Rn), shr_imm8:$Imm))))),
+ (!cast<Instruction>(prefix # "_16B")
+ (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+ VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+ (v1i64 (bitconvert (v4i16
+ (op (v4i32 VPR128:$Rn), shr_imm16:$Imm))))),
+ (!cast<Instruction>(prefix # "_8H")
+ (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+ VPR128:$Rn, imm:$Imm)>;
+ def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+ (v1i64 (bitconvert (v2i32
+ (op (v2i64 VPR128:$Rn), shr_imm32:$Imm))))),
+ (!cast<Instruction>(prefix # "_4S")
+ (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+ VPR128:$Rn, imm:$Imm)>;
+}
+
+defm : Neon_shiftNarrow_patterns<"lshr">;
+defm : Neon_shiftNarrow_patterns<"ashr">;
+
+defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
+defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
+defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
+defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
+defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
+defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
+defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
+
+// Convert fix-point and float-pointing
+class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
+ RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
+ Operand ImmTy, SDPatternOperator IntOp>
+ : NeonI_2VShiftImm<q, u, opcode,
+ (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
+ asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
+ [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
+ (i32 ImmTy:$Imm))))],
+ NoItinerary>;
+
+multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
+ SDPatternOperator IntOp> {
+ def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
+ shr_imm32, IntOp> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
+ shr_imm32, IntOp> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
+ shr_imm64, IntOp> {
+ let Inst{22} = 0b1;
+ }
+}
+
+multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
+ SDPatternOperator IntOp> {
+ def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
+ shr_imm32, IntOp> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
+ shr_imm32, IntOp> {
+ let Inst{22-21} = 0b01;
+ }
+
+ def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
+ shr_imm64, IntOp> {
+ let Inst{22} = 0b1;
+ }
+}
+
+// Convert fixed-point to floating-point
+defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
+ int_arm_neon_vcvtfxs2fp>;
+defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
+ int_arm_neon_vcvtfxu2fp>;
+
+// Convert floating-point to fixed-point
+defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
+ int_arm_neon_vcvtfp2fxs>;
+defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
+ int_arm_neon_vcvtfp2fxu>;
+
+multiclass Neon_sshll2_0<SDNode ext>
+{
+ def _v8i8 : PatFrag<(ops node:$Rn),
+ (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
+ def _v4i16 : PatFrag<(ops node:$Rn),
+ (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
+ def _v2i32 : PatFrag<(ops node:$Rn),
+ (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
+}
+
+defm NI_sext_high : Neon_sshll2_0<sext>;
+defm NI_zext_high : Neon_sshll2_0<zext>;
+
+
+//===----------------------------------------------------------------------===//
+// Multiclasses for NeonI_Across
+//===----------------------------------------------------------------------===//
+
+// Variant 1
+
+multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+{
+ def _1h8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
+ (outs FPR16:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.8b",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v8i8 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
+ (outs FPR16:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.16b",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def _1s4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
+ (outs FPR32:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.4h",
+ [(set (v1i32 FPR32:$Rd),
+ (v1i32 (opnode (v4i16 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1s8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
+ (outs FPR32:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.8h",
+ [(set (v1i32 FPR32:$Rd),
+ (v1i32 (opnode (v8i16 VPR128:$Rn))))],
+ NoItinerary>;
+
+ // _1d2s doesn't exist!
+
+ def _1d4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
+ (outs FPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.4s",
+ [(set (v1i64 FPR64:$Rd),
+ (v1i64 (opnode (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+}
+
+defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
+defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
+
+// Variant 2
+
+multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+{
+ def _1b8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
+ (outs FPR8:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.8b",
+ [(set (v1i8 FPR8:$Rd),
+ (v1i8 (opnode (v8i8 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
+ (outs FPR8:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.16b",
+ [(set (v1i8 FPR8:$Rd),
+ (v1i8 (opnode (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def _1h4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
+ (outs FPR16:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.4h",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v4i16 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1h8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
+ (outs FPR16:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.8h",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v8i16 VPR128:$Rn))))],
+ NoItinerary>;
+
+ // _1s2s doesn't exist!
+
+ def _1s4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
+ (outs FPR32:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.4s",
+ [(set (v1i32 FPR32:$Rd),
+ (v1i32 (opnode (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+}
+
+defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
+defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
+
+defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
+defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
+
+defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
+
+// Variant 3
+
+multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
+ string asmop, SDPatternOperator opnode> {
+ def _1s4s: NeonI_2VAcross<0b1, u, size, opcode,
+ (outs FPR32:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.4s",
+ [(set (v1f32 FPR32:$Rd),
+ (v1f32 (opnode (v4f32 VPR128:$Rn))))],
+ NoItinerary>;
+}
+
+defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
+ int_aarch64_neon_vmaxnmv>;
+defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
+ int_aarch64_neon_vminnmv>;
+
+defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
+ int_aarch64_neon_vmaxv>;
+defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
+ int_aarch64_neon_vminv>;
+
+// The followings are for instruction class (Perm)
+
+class NeonI_Permute<bit q, bits<2> size, bits<3> opcode,
+ string asmop, RegisterOperand OpVPR, string OpS,
+ SDPatternOperator opnode, ValueType Ty>
+ : NeonI_Perm<q, size, opcode,
+ (outs OpVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # OpS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (Ty OpVPR:$Rd),
+ (Ty (opnode (Ty OpVPR:$Rn), (Ty OpVPR:$Rm))))],
+ NoItinerary>;
+
+multiclass NeonI_Perm_pat<bits<3> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _8b : NeonI_Permute<0b0, 0b00, opcode, asmop,
+ VPR64, "8b", opnode, v8i8>;
+ def _16b : NeonI_Permute<0b1, 0b00, opcode, asmop,
+ VPR128, "16b",opnode, v16i8>;
+ def _4h : NeonI_Permute<0b0, 0b01, opcode, asmop,
+ VPR64, "4h", opnode, v4i16>;
+ def _8h : NeonI_Permute<0b1, 0b01, opcode, asmop,
+ VPR128, "8h", opnode, v8i16>;
+ def _2s : NeonI_Permute<0b0, 0b10, opcode, asmop,
+ VPR64, "2s", opnode, v2i32>;
+ def _4s : NeonI_Permute<0b1, 0b10, opcode, asmop,
+ VPR128, "4s", opnode, v4i32>;
+ def _2d : NeonI_Permute<0b1, 0b11, opcode, asmop,
+ VPR128, "2d", opnode, v2i64>;
+}
+
+defm UZP1vvv : NeonI_Perm_pat<0b001, "uzp1", Neon_uzp1>;
+defm TRN1vvv : NeonI_Perm_pat<0b010, "trn1", Neon_trn1>;
+defm ZIP1vvv : NeonI_Perm_pat<0b011, "zip1", Neon_zip1>;
+defm UZP2vvv : NeonI_Perm_pat<0b101, "uzp2", Neon_uzp2>;
+defm TRN2vvv : NeonI_Perm_pat<0b110, "trn2", Neon_trn2>;
+defm ZIP2vvv : NeonI_Perm_pat<0b111, "zip2", Neon_zip2>;
+
+multiclass NeonI_Perm_float_pat<string INS, SDPatternOperator opnode> {
+ def : Pat<(v2f32 (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
+ (!cast<Instruction>(INS # "_2s") VPR64:$Rn, VPR64:$Rm)>;
+
+ def : Pat<(v4f32 (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
+ (!cast<Instruction>(INS # "_4s") VPR128:$Rn, VPR128:$Rm)>;
+
+ def : Pat<(v2f64 (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
+ (!cast<Instruction>(INS # "_2d") VPR128:$Rn, VPR128:$Rm)>;
+}
+
+defm : NeonI_Perm_float_pat<"UZP1vvv", Neon_uzp1>;
+defm : NeonI_Perm_float_pat<"UZP2vvv", Neon_uzp2>;
+defm : NeonI_Perm_float_pat<"ZIP1vvv", Neon_zip1>;
+defm : NeonI_Perm_float_pat<"ZIP2vvv", Neon_zip2>;
+defm : NeonI_Perm_float_pat<"TRN1vvv", Neon_trn1>;
+defm : NeonI_Perm_float_pat<"TRN2vvv", Neon_trn2>;
+
+// The followings are for instruction class (3V Diff)
+
+// normal long/long2 pattern
+class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode, SDPatternOperator ext,
+ RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
+ (ResTy (ext (OpTy OpVPR:$Rm))))))],
+ NoItinerary>;
+
+multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
+ string asmop, SDPatternOperator opnode,
+ bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, sext, VPR64, v8i16, v8i8>;
+ def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, sext, VPR64, v4i32, v4i16>;
+ def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, sext, VPR64, v2i64, v2i32>;
+ }
+}
+
+multiclass NeonI_3VDL2_s<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
+ def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
+ def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
+ }
+}
+
+multiclass NeonI_3VDL_u<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, zext, VPR64, v8i16, v8i8>;
+ def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, zext, VPR64, v4i32, v4i16>;
+ def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, zext, VPR64, v2i64, v2i32>;
+ }
+}
+
+multiclass NeonI_3VDL2_u<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
+ def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
+ def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
+ }
+}
+
+defm SADDLvvv : NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
+defm UADDLvvv : NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
+
+defm SADDL2vvv : NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
+defm UADDL2vvv : NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
+
+defm SSUBLvvv : NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
+defm USUBLvvv : NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
+
+defm SSUBL2vvv : NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
+defm USUBL2vvv : NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
+
+// normal wide/wide2 pattern
+class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode, SDPatternOperator ext,
+ RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (opnode (ResTy VPR128:$Rn),
+ (ResTy (ext (OpTy OpVPR:$Rm))))))],
+ NoItinerary>;
+
+multiclass NeonI_3VDW_s<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, sext, VPR64, v8i16, v8i8>;
+ def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, sext, VPR64, v4i32, v4i16>;
+ def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, sext, VPR64, v2i64, v2i32>;
+}
+
+defm SADDWvvv : NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
+defm SSUBWvvv : NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
+
+multiclass NeonI_3VDW2_s<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
+ def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
+ def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
+}
+
+defm SADDW2vvv : NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
+defm SSUBW2vvv : NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
+
+multiclass NeonI_3VDW_u<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, zext, VPR64, v8i16, v8i8>;
+ def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, zext, VPR64, v4i32, v4i16>;
+ def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, zext, VPR64, v2i64, v2i32>;
+}
+
+defm UADDWvvv : NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
+defm USUBWvvv : NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
+
+multiclass NeonI_3VDW2_u<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
+ def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
+ def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
+}
+
+defm UADDW2vvv : NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
+defm USUBW2vvv : NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
+
+// Get the high half part of the vector element.
+multiclass NeonI_get_high {
+ def _8h : PatFrag<(ops node:$Rn),
+ (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
+ (v8i16 (Neon_vdup (i32 8)))))))>;
+ def _4s : PatFrag<(ops node:$Rn),
+ (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
+ (v4i32 (Neon_vdup (i32 16)))))))>;
+ def _2d : PatFrag<(ops node:$Rn),
+ (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
+ (v2i64 (Neon_vdup (i32 32)))))))>;
+}
+
+defm NI_get_hi : NeonI_get_high;
+
+// pattern for addhn/subhn with 2 operands
+class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode, SDPatternOperator get_hi,
+ ValueType ResTy, ValueType OpTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy VPR64:$Rd),
+ (ResTy (get_hi
+ (OpTy (opnode (OpTy VPR128:$Rn),
+ (OpTy VPR128:$Rm))))))],
+ NoItinerary>;
+
+multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
+ opnode, NI_get_hi_8h, v8i8, v8i16>;
+ def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
+ opnode, NI_get_hi_4s, v4i16, v4i32>;
+ def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
+ opnode, NI_get_hi_2d, v2i32, v2i64>;
+ }
+}
+
+defm ADDHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
+defm SUBHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
+
+// pattern for operation with 2 operands
+class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode,
+ RegisterOperand ResVPR, RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy ResVPR:$Rd),
+ (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
+ NoItinerary>;
+
+// normal narrow pattern
+multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
+ opnode, VPR64, VPR128, v8i8, v8i16>;
+ def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
+ opnode, VPR64, VPR128, v4i16, v4i32>;
+ def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
+ opnode, VPR64, VPR128, v2i32, v2i64>;
+ }
+}
+
+defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
+defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
+
+// pattern for acle intrinsic with 3 operands
+class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [], NoItinerary> {
+ let Constraints = "$src = $Rd";
+ let neverHasSideEffects = 1;
+}
+
+multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode, string asmop> {
+ def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
+ def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
+ def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
+}
+
+defm ADDHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
+defm SUBHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
+
+defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
+defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
+
+// Patterns have to be separate because there's a SUBREG_TO_REG in the output
+// part.
+class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
+ SDPatternOperator coreop>
+ : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+ (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
+ (SrcTy VPR128:$Rm)))))),
+ (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+ VPR128:$Rn, VPR128:$Rm)>;
+
+// addhn2 patterns
+def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8, v8i16,
+ BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<ADDHN2vvv_8h4s, v4i16, v4i32,
+ BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<ADDHN2vvv_4s2d, v2i32, v2i64,
+ BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
+
+// subhn2 patterns
+def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8, v8i16,
+ BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<SUBHN2vvv_8h4s, v4i16, v4i32,
+ BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<SUBHN2vvv_4s2d, v2i32, v2i64,
+ BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
+
+// raddhn2 patterns
+def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vraddhn>;
+def : NarrowHighHalfPat<RADDHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vraddhn>;
+def : NarrowHighHalfPat<RADDHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vraddhn>;
+
+// rsubhn2 patterns
+def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vrsubhn>;
+def : NarrowHighHalfPat<RSUBHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vrsubhn>;
+def : NarrowHighHalfPat<RSUBHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vrsubhn>;
+
+// pattern that need to extend result
+class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode,
+ RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy, ValueType OpSTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
+ (OpTy OpVPR:$Rm))))))],
+ NoItinerary>;
+
+multiclass NeonI_3VDL_zext<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, VPR64, v8i16, v8i8, v8i8>;
+ def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, VPR64, v4i32, v4i16, v4i16>;
+ def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, VPR64, v2i64, v2i32, v2i32>;
+ }
+}
+
+defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
+defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
+
+multiclass NeonI_Op_High<SDPatternOperator op> {
+ def _16B : PatFrag<(ops node:$Rn, node:$Rm),
+ (op (v8i8 (Neon_High16B node:$Rn)),
+ (v8i8 (Neon_High16B node:$Rm)))>;
+ def _8H : PatFrag<(ops node:$Rn, node:$Rm),
+ (op (v4i16 (Neon_High8H node:$Rn)),
+ (v4i16 (Neon_High8H node:$Rm)))>;
+ def _4S : PatFrag<(ops node:$Rn, node:$Rm),
+ (op (v2i32 (Neon_High4S node:$Rn)),
+ (v2i32 (Neon_High4S node:$Rm)))>;
+}
+
+defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
+defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
+defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
+defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
+defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
+defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
+
+multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode, string asmop, string opnode,
+ bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h8b : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ !cast<PatFrag>(opnode # "_16B"),
+ VPR128, v8i16, v16i8, v8i8>;
+ def _4s4h : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ !cast<PatFrag>(opnode # "_8H"),
+ VPR128, v4i32, v8i16, v4i16>;
+ def _2d2s : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ !cast<PatFrag>(opnode # "_4S"),
+ VPR128, v2i64, v4i32, v2i32>;
+ }
+}
+
+defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
+defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
+
+// For pattern that need two operators being chained.
+class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode, SDPatternOperator subop,
+ RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy, ValueType OpSTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (opnode
+ (ResTy VPR128:$src),
+ (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
+ (OpTy OpVPR:$Rm))))))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, SDPatternOperator subop>{
+ def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, subop, VPR64, v8i16, v8i8, v8i8>;
+ def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, subop, VPR64, v4i32, v4i16, v4i16>;
+ def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, subop, VPR64, v2i64, v2i32, v2i32>;
+}
+
+defm SABALvvv : NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
+ add, int_arm_neon_vabds>;
+defm UABALvvv : NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
+ add, int_arm_neon_vabdu>;
+
+multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, string subop> {
+ def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ opnode, !cast<PatFrag>(subop # "_16B"),
+ VPR128, v8i16, v16i8, v8i8>;
+ def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ opnode, !cast<PatFrag>(subop # "_8H"),
+ VPR128, v4i32, v8i16, v4i16>;
+ def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ opnode, !cast<PatFrag>(subop # "_4S"),
+ VPR128, v2i64, v4i32, v2i32>;
+}
+
+defm SABAL2vvv : NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
+ "NI_sabdl_hi">;
+defm UABAL2vvv : NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
+ "NI_uabdl_hi">;
+
+// Long pattern with 2 operands
+multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, VPR128, VPR64, v8i16, v8i8>;
+ def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, VPR128, VPR64, v4i32, v4i16>;
+ def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, VPR128, VPR64, v2i64, v2i32>;
+ }
+}
+
+defm SMULLvvv : NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
+defm UMULLvvv : NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
+
+class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode,
+ ValueType ResTy, ValueType OpTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
+ NoItinerary>;
+
+multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode, string asmop,
+ string opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ !cast<PatFrag>(opnode # "_16B"),
+ v8i16, v16i8>;
+ def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ !cast<PatFrag>(opnode # "_8H"),
+ v4i32, v8i16>;
+ def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ !cast<PatFrag>(opnode # "_4S"),
+ v2i64, v4i32>;
+ }
+}
+
+defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
+ "NI_smull_hi", 1>;
+defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
+ "NI_umull_hi", 1>;
+
+// Long pattern with 3 operands
+class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator opnode,
+ ValueType ResTy, ValueType OpTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (opnode
+ (ResTy VPR128:$src),
+ (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, v8i16, v8i8>;
+ def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, v4i32, v4i16>;
+ def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, v2i64, v2i32>;
+}
+
+def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
+ (add node:$Rd,
+ (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
+
+def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
+ (add node:$Rd,
+ (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
+
+def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
+ (sub node:$Rd,
+ (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
+
+def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
+ (sub node:$Rd,
+ (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
+
+defm SMLALvvv : NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
+defm UMLALvvv : NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
+
+defm SMLSLvvv : NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
+defm UMLSLvvv : NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
+
+class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS,
+ SDPatternOperator subop, SDPatternOperator opnode,
+ RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy>
+ : NeonI_3VDiff<q, u, size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (subop
+ (ResTy VPR128:$src),
+ (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator subop, string opnode> {
+ def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ subop, !cast<PatFrag>(opnode # "_16B"),
+ VPR128, v8i16, v16i8>;
+ def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ subop, !cast<PatFrag>(opnode # "_8H"),
+ VPR128, v4i32, v8i16>;
+ def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ subop, !cast<PatFrag>(opnode # "_4S"),
+ VPR128, v2i64, v4i32>;
+}
+
+defm SMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
+ add, "NI_smull_hi">;
+defm UMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
+ add, "NI_umull_hi">;
+
+defm SMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
+ sub, "NI_smull_hi">;
+defm UMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
+ sub, "NI_umull_hi">;
+
+multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, int_arm_neon_vqdmull,
+ VPR64, v4i32, v4i16>;
+ def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, int_arm_neon_vqdmull,
+ VPR64, v2i64, v2i32>;
+}
+
+defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
+ int_arm_neon_vqadds>;
+defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
+ int_arm_neon_vqsubs>;
+
+multiclass NeonI_3VDL_v2<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
+ opnode, VPR128, VPR64, v4i32, v4i16>;
+ def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
+ opnode, VPR128, VPR64, v2i64, v2i32>;
+ }
+}
+
+defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
+ int_arm_neon_vqdmull, 1>;
+
+multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode, string asmop,
+ string opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ !cast<PatFrag>(opnode # "_8H"),
+ v4i32, v8i16>;
+ def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ !cast<PatFrag>(opnode # "_4S"),
+ v2i64, v4i32>;
+ }
+}
+
+defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2",
+ "NI_qdmull_hi", 1>;
+
+multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode> {
+ def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
+ opnode, NI_qdmull_hi_8H,
+ VPR128, v4i32, v8i16>;
+ def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
+ opnode, NI_qdmull_hi_4S,
+ VPR128, v2i64, v4i32>;
+}
+
+defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
+ int_arm_neon_vqadds>;
+defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
+ int_arm_neon_vqsubs>;
+
+multiclass NeonI_3VDL_v3<bit u, bits<4> opcode, string asmop,
+ SDPatternOperator opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
+ opnode, VPR128, VPR64, v8i16, v8i8>;
+
+ def _1q1d : NeonI_3VDiff<0b0, u, 0b11, opcode,
+ (outs VPR128:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
+ asmop # "\t$Rd.1q, $Rn.1d, $Rm.1d",
+ [], NoItinerary>;
+ }
+}
+
+defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp, 1>;
+
+multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode, string asmop,
+ string opnode, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
+ !cast<PatFrag>(opnode # "_16B"),
+ v8i16, v16i8>;
+
+ def _1q2d : NeonI_3VDiff<0b1, u, 0b11, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.1q, $Rn.2d, $Rm.2d",
+ [], NoItinerary>;
+ }
+}
+
+defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2", "NI_pmull_hi",
+ 1>;
+
+// End of implementation for instruction class (3V Diff)
+
+// The followings are vector load/store multiple N-element structure
+// (class SIMD lselem).
+
+// ld1: load multiple 1-element structure to 1/2/3/4 registers.
+// ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4).
+// The structure consists of a sequence of sets of N values.
+// The first element of the structure is placed in the first lane
+// of the first first vector, the second element in the first lane
+// of the second vector, and so on.
+// E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into
+// the three 64-bit vectors list {BA, DC, FE}.
+// E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three
+// 64-bit vectors list {DA, EB, FC}.
+// Store instructions store multiple structure to N registers like load.
+
+
+class NeonI_LDVList<bit q, bits<4> opcode, bits<2> size,
+ RegisterOperand VecList, string asmop>
+ : NeonI_LdStMult<q, 1, opcode, size,
+ (outs VecList:$Rt), (ins GPR64xsp:$Rn),
+ asmop # "\t$Rt, [$Rn]",
+ [],
+ NoItinerary> {
+ let mayLoad = 1;
+ let neverHasSideEffects = 1;
+}
+
+multiclass LDVList_BHSD<bits<4> opcode, string List, string asmop> {
+ def _8B : NeonI_LDVList<0, opcode, 0b00,
+ !cast<RegisterOperand>(List # "8B_operand"), asmop>;
+
+ def _4H : NeonI_LDVList<0, opcode, 0b01,
+ !cast<RegisterOperand>(List # "4H_operand"), asmop>;
+
+ def _2S : NeonI_LDVList<0, opcode, 0b10,
+ !cast<RegisterOperand>(List # "2S_operand"), asmop>;
+
+ def _16B : NeonI_LDVList<1, opcode, 0b00,
+ !cast<RegisterOperand>(List # "16B_operand"), asmop>;
+
+ def _8H : NeonI_LDVList<1, opcode, 0b01,
+ !cast<RegisterOperand>(List # "8H_operand"), asmop>;
+
+ def _4S : NeonI_LDVList<1, opcode, 0b10,
+ !cast<RegisterOperand>(List # "4S_operand"), asmop>;
+
+ def _2D : NeonI_LDVList<1, opcode, 0b11,
+ !cast<RegisterOperand>(List # "2D_operand"), asmop>;
+}
+
+// Load multiple N-element structure to N consecutive registers (N = 1,2,3,4)
+defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">;
+def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">;
+
+defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">;
+
+defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
+
+defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
+
+// Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
+defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
+def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
+
+defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
+def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
+
+defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
+def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
+
+class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
+ RegisterOperand VecList, string asmop>
+ : NeonI_LdStMult<q, 0, opcode, size,
+ (outs), (ins GPR64xsp:$Rn, VecList:$Rt),
+ asmop # "\t$Rt, [$Rn]",
+ [],
+ NoItinerary> {
+ let mayStore = 1;
+ let neverHasSideEffects = 1;
+}
+
+multiclass STVList_BHSD<bits<4> opcode, string List, string asmop> {
+ def _8B : NeonI_STVList<0, opcode, 0b00,
+ !cast<RegisterOperand>(List # "8B_operand"), asmop>;
+
+ def _4H : NeonI_STVList<0, opcode, 0b01,
+ !cast<RegisterOperand>(List # "4H_operand"), asmop>;
+
+ def _2S : NeonI_STVList<0, opcode, 0b10,
+ !cast<RegisterOperand>(List # "2S_operand"), asmop>;
+
+ def _16B : NeonI_STVList<1, opcode, 0b00,
+ !cast<RegisterOperand>(List # "16B_operand"), asmop>;
+
+ def _8H : NeonI_STVList<1, opcode, 0b01,
+ !cast<RegisterOperand>(List # "8H_operand"), asmop>;
+
+ def _4S : NeonI_STVList<1, opcode, 0b10,
+ !cast<RegisterOperand>(List # "4S_operand"), asmop>;
+
+ def _2D : NeonI_STVList<1, opcode, 0b11,
+ !cast<RegisterOperand>(List # "2D_operand"), asmop>;
+}
+
+// Store multiple N-element structures from N registers (N = 1,2,3,4)
+defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">;
+def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">;
+
+defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">;
+
+defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
+
+defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
+
+// Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
+defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
+def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
+
+defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
+def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
+
+defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
+def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
+
+def : Pat<(v2f64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
+def : Pat<(v2i64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
+
+def : Pat<(v4f32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
+def : Pat<(v4i32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
+
+def : Pat<(v8i16 (load GPR64xsp:$addr)), (LD1_8H GPR64xsp:$addr)>;
+def : Pat<(v16i8 (load GPR64xsp:$addr)), (LD1_16B GPR64xsp:$addr)>;
+
+def : Pat<(v1f64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
+def : Pat<(v1i64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
+
+def : Pat<(v2f32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
+def : Pat<(v2i32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
+
+def : Pat<(v4i16 (load GPR64xsp:$addr)), (LD1_4H GPR64xsp:$addr)>;
+def : Pat<(v8i8 (load GPR64xsp:$addr)), (LD1_8B GPR64xsp:$addr)>;
+
+def : Pat<(store (v2i64 VPR128:$value), GPR64xsp:$addr),
+ (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
+def : Pat<(store (v2f64 VPR128:$value), GPR64xsp:$addr),
+ (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
+
+def : Pat<(store (v4i32 VPR128:$value), GPR64xsp:$addr),
+ (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
+def : Pat<(store (v4f32 VPR128:$value), GPR64xsp:$addr),
+ (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
+
+def : Pat<(store (v8i16 VPR128:$value), GPR64xsp:$addr),
+ (ST1_8H GPR64xsp:$addr, VPR128:$value)>;
+def : Pat<(store (v16i8 VPR128:$value), GPR64xsp:$addr),
+ (ST1_16B GPR64xsp:$addr, VPR128:$value)>;
+
+def : Pat<(store (v1i64 VPR64:$value), GPR64xsp:$addr),
+ (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
+def : Pat<(store (v1f64 VPR64:$value), GPR64xsp:$addr),
+ (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
+
+def : Pat<(store (v2i32 VPR64:$value), GPR64xsp:$addr),
+ (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
+def : Pat<(store (v2f32 VPR64:$value), GPR64xsp:$addr),
+ (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
+
+def : Pat<(store (v4i16 VPR64:$value), GPR64xsp:$addr),
+ (ST1_4H GPR64xsp:$addr, VPR64:$value)>;
+def : Pat<(store (v8i8 VPR64:$value), GPR64xsp:$addr),
+ (ST1_8B GPR64xsp:$addr, VPR64:$value)>;
+
+// End of vector load/store multiple N-element structure(class SIMD lselem)
+
+// The followings are post-index vector load/store multiple N-element
+// structure(class SIMD lselem-post)
+def exact1_asmoperand : AsmOperandClass {
+ let Name = "Exact1";
+ let PredicateMethod = "isExactImm<1>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact1 : Operand<i32>, ImmLeaf<i32, [{return Imm == 1;}]> {
+ let ParserMatchClass = exact1_asmoperand;
+}
+
+def exact2_asmoperand : AsmOperandClass {
+ let Name = "Exact2";
+ let PredicateMethod = "isExactImm<2>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact2 : Operand<i32>, ImmLeaf<i32, [{return Imm == 2;}]> {
+ let ParserMatchClass = exact2_asmoperand;
+}
+
+def exact3_asmoperand : AsmOperandClass {
+ let Name = "Exact3";
+ let PredicateMethod = "isExactImm<3>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact3 : Operand<i32>, ImmLeaf<i32, [{return Imm == 3;}]> {
+ let ParserMatchClass = exact3_asmoperand;
+}
+
+def exact4_asmoperand : AsmOperandClass {
+ let Name = "Exact4";
+ let PredicateMethod = "isExactImm<4>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact4 : Operand<i32>, ImmLeaf<i32, [{return Imm == 4;}]> {
+ let ParserMatchClass = exact4_asmoperand;
+}
+
+def exact6_asmoperand : AsmOperandClass {
+ let Name = "Exact6";
+ let PredicateMethod = "isExactImm<6>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact6 : Operand<i32>, ImmLeaf<i32, [{return Imm == 6;}]> {
+ let ParserMatchClass = exact6_asmoperand;
+}
+
+def exact8_asmoperand : AsmOperandClass {
+ let Name = "Exact8";
+ let PredicateMethod = "isExactImm<8>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
+ let ParserMatchClass = exact8_asmoperand;
+}
+
+def exact12_asmoperand : AsmOperandClass {
+ let Name = "Exact12";
+ let PredicateMethod = "isExactImm<12>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact12 : Operand<i32>, ImmLeaf<i32, [{return Imm == 12;}]> {
+ let ParserMatchClass = exact12_asmoperand;
+}
+
+def exact16_asmoperand : AsmOperandClass {
+ let Name = "Exact16";
+ let PredicateMethod = "isExactImm<16>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
+ let ParserMatchClass = exact16_asmoperand;
+}
+
+def exact24_asmoperand : AsmOperandClass {
+ let Name = "Exact24";
+ let PredicateMethod = "isExactImm<24>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
+ let ParserMatchClass = exact24_asmoperand;
+}
+
+def exact32_asmoperand : AsmOperandClass {
+ let Name = "Exact32";
+ let PredicateMethod = "isExactImm<32>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
+ let ParserMatchClass = exact32_asmoperand;
+}
+
+def exact48_asmoperand : AsmOperandClass {
+ let Name = "Exact48";
+ let PredicateMethod = "isExactImm<48>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
+ let ParserMatchClass = exact48_asmoperand;
+}
+
+def exact64_asmoperand : AsmOperandClass {
+ let Name = "Exact64";
+ let PredicateMethod = "isExactImm<64>";
+ let RenderMethod = "addImmOperands";
+}
+def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
+ let ParserMatchClass = exact64_asmoperand;
+}
+
+multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
+ RegisterOperand VecList, Operand ImmTy,
+ string asmop> {
+ let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1,
+ DecoderMethod = "DecodeVLDSTPostInstruction" in {
+ def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
+ (outs VecList:$Rt, GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, ImmTy:$amt),
+ asmop # "\t$Rt, [$Rn], $amt",
+ [],
+ NoItinerary> {
+ let Rm = 0b11111;
+ }
+
+ def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
+ (outs VecList:$Rt, GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
+ asmop # "\t$Rt, [$Rn], $Rm",
+ [],
+ NoItinerary>;
+ }
+}
+
+multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
+ Operand ImmTy2, string asmop> {
+ defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
+ !cast<RegisterOperand>(List # "8B_operand"),
+ ImmTy, asmop>;
+
+ defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
+ !cast<RegisterOperand>(List # "4H_operand"),
+ ImmTy, asmop>;
+
+ defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
+ !cast<RegisterOperand>(List # "2S_operand"),
+ ImmTy, asmop>;
+
+ defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
+ !cast<RegisterOperand>(List # "16B_operand"),
+ ImmTy2, asmop>;
+
+ defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
+ !cast<RegisterOperand>(List # "8H_operand"),
+ ImmTy2, asmop>;
+
+ defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
+ !cast<RegisterOperand>(List # "4S_operand"),
+ ImmTy2, asmop>;
+
+ defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
+ !cast<RegisterOperand>(List # "2D_operand"),
+ ImmTy2, asmop>;
+}
+
+// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
+defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
+defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
+ "ld1">;
+
+defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
+
+defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
+ "ld3">;
+
+defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
+
+// Post-index load multiple 1-element structures from N consecutive registers
+// (N = 2,3,4)
+defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
+ "ld1">;
+defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
+ uimm_exact16, "ld1">;
+
+defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
+ "ld1">;
+defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
+ uimm_exact24, "ld1">;
+
+defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
+ "ld1">;
+defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
+ uimm_exact32, "ld1">;
+
+multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
+ RegisterOperand VecList, Operand ImmTy,
+ string asmop> {
+ let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
+ DecoderMethod = "DecodeVLDSTPostInstruction" in {
+ def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
+ (outs GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
+ asmop # "\t$Rt, [$Rn], $amt",
+ [],
+ NoItinerary> {
+ let Rm = 0b11111;
+ }
+
+ def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
+ (outs GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt),
+ asmop # "\t$Rt, [$Rn], $Rm",
+ [],
+ NoItinerary>;
+ }
+}
+
+multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
+ Operand ImmTy2, string asmop> {
+ defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
+ !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
+
+ defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
+ !cast<RegisterOperand>(List # "4H_operand"),
+ ImmTy, asmop>;
+
+ defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
+ !cast<RegisterOperand>(List # "2S_operand"),
+ ImmTy, asmop>;
+
+ defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
+ !cast<RegisterOperand>(List # "16B_operand"),
+ ImmTy2, asmop>;
+
+ defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
+ !cast<RegisterOperand>(List # "8H_operand"),
+ ImmTy2, asmop>;
+
+ defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
+ !cast<RegisterOperand>(List # "4S_operand"),
+ ImmTy2, asmop>;
+
+ defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
+ !cast<RegisterOperand>(List # "2D_operand"),
+ ImmTy2, asmop>;
+}
+
+// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
+defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
+defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
+ "st1">;
+
+defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
+
+defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
+ "st3">;
+
+defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
+
+// Post-index load multiple 1-element structures from N consecutive registers
+// (N = 2,3,4)
+defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
+ "st1">;
+defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
+ uimm_exact16, "st1">;
+
+defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
+ "st1">;
+defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
+ uimm_exact24, "st1">;
+
+defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
+ "st1">;
+defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
+ uimm_exact32, "st1">;
+
+// End of post-index vector load/store multiple N-element structure
+// (class SIMD lselem-post)
+
+// The followings are vector load/store single N-element structure
+// (class SIMD lsone).
+def neon_uimm0_bare : Operand<i64>,
+ ImmLeaf<i64, [{return Imm == 0;}]> {
+ let ParserMatchClass = neon_uimm0_asmoperand;
+ let PrintMethod = "printUImmBareOperand";
+}
+
+def neon_uimm1_bare : Operand<i64>,
+ ImmLeaf<i64, [{return Imm < 2;}]> {
+ let ParserMatchClass = neon_uimm1_asmoperand;
+ let PrintMethod = "printUImmBareOperand";
+}
+
+def neon_uimm2_bare : Operand<i64>,
+ ImmLeaf<i64, [{return Imm < 4;}]> {
+ let ParserMatchClass = neon_uimm2_asmoperand;
+ let PrintMethod = "printUImmBareOperand";
+}
+
+def neon_uimm3_bare : Operand<i64>,
+ ImmLeaf<i64, [{return Imm < 8;}]> {
+ let ParserMatchClass = uimm3_asmoperand;
+ let PrintMethod = "printUImmBareOperand";
+}
+
+def neon_uimm4_bare : Operand<i64>,
+ ImmLeaf<i64, [{return Imm < 16;}]> {
+ let ParserMatchClass = uimm4_asmoperand;
+ let PrintMethod = "printUImmBareOperand";
+}
+
+class NeonI_LDN_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
+ RegisterOperand VecList, string asmop>
+ : NeonI_LdOne_Dup<q, r, opcode, size,
+ (outs VecList:$Rt), (ins GPR64xsp:$Rn),
+ asmop # "\t$Rt, [$Rn]",
+ [],
+ NoItinerary> {
+ let mayLoad = 1;
+ let neverHasSideEffects = 1;
+}
+
+multiclass LDN_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop> {
+ def _8B : NeonI_LDN_Dup<0, r, opcode, 0b00,
+ !cast<RegisterOperand>(List # "8B_operand"), asmop>;
+
+ def _4H : NeonI_LDN_Dup<0, r, opcode, 0b01,
+ !cast<RegisterOperand>(List # "4H_operand"), asmop>;
+
+ def _2S : NeonI_LDN_Dup<0, r, opcode, 0b10,
+ !cast<RegisterOperand>(List # "2S_operand"), asmop>;
+
+ def _1D : NeonI_LDN_Dup<0, r, opcode, 0b11,
+ !cast<RegisterOperand>(List # "1D_operand"), asmop>;
+
+ def _16B : NeonI_LDN_Dup<1, r, opcode, 0b00,
+ !cast<RegisterOperand>(List # "16B_operand"), asmop>;
+
+ def _8H : NeonI_LDN_Dup<1, r, opcode, 0b01,
+ !cast<RegisterOperand>(List # "8H_operand"), asmop>;
+
+ def _4S : NeonI_LDN_Dup<1, r, opcode, 0b10,
+ !cast<RegisterOperand>(List # "4S_operand"), asmop>;
+
+ def _2D : NeonI_LDN_Dup<1, r, opcode, 0b11,
+ !cast<RegisterOperand>(List # "2D_operand"), asmop>;
+}
+
+// Load single 1-element structure to all lanes of 1 register
+defm LD1R : LDN_Dup_BHSD<0b0, 0b110, "VOne", "ld1r">;
+
+// Load single N-element structure to all lanes of N consecutive
+// registers (N = 2,3,4)
+defm LD2R : LDN_Dup_BHSD<0b1, 0b110, "VPair", "ld2r">;
+defm LD3R : LDN_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r">;
+defm LD4R : LDN_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r">;
+
+
+class LD1R_pattern <ValueType VTy, ValueType DTy, PatFrag LoadOp,
+ Instruction INST>
+ : Pat<(VTy (Neon_vdup (DTy (LoadOp GPR64xsp:$Rn)))),
+ (VTy (INST GPR64xsp:$Rn))>;
+
+// Match all LD1R instructions
+def : LD1R_pattern<v8i8, i32, extloadi8, LD1R_8B>;
+
+def : LD1R_pattern<v16i8, i32, extloadi8, LD1R_16B>;
+
+def : LD1R_pattern<v4i16, i32, extloadi16, LD1R_4H>;
+
+def : LD1R_pattern<v8i16, i32, extloadi16, LD1R_8H>;
+
+def : LD1R_pattern<v2i32, i32, load, LD1R_2S>;
+def : LD1R_pattern<v2f32, f32, load, LD1R_2S>;
+
+def : LD1R_pattern<v4i32, i32, load, LD1R_4S>;
+def : LD1R_pattern<v4f32, f32, load, LD1R_4S>;
+
+def : LD1R_pattern<v1i64, i64, load, LD1R_1D>;
+def : LD1R_pattern<v1f64, f64, load, LD1R_1D>;
+
+def : LD1R_pattern<v2i64, i64, load, LD1R_2D>;
+def : LD1R_pattern<v2f64, f64, load, LD1R_2D>;
+
+
+multiclass VectorList_Bare_BHSD<string PREFIX, int Count,
+ RegisterClass RegList> {
+ defm B : VectorList_operands<PREFIX, "B", Count, RegList>;
+ defm H : VectorList_operands<PREFIX, "H", Count, RegList>;
+ defm S : VectorList_operands<PREFIX, "S", Count, RegList>;
+ defm D : VectorList_operands<PREFIX, "D", Count, RegList>;
+}
+
+// Special vector list operand of 128-bit vectors with bare layout.
+// i.e. only show ".b", ".h", ".s", ".d"
+defm VOne : VectorList_Bare_BHSD<"VOne", 1, FPR128>;
+defm VPair : VectorList_Bare_BHSD<"VPair", 2, QPair>;
+defm VTriple : VectorList_Bare_BHSD<"VTriple", 3, QTriple>;
+defm VQuad : VectorList_Bare_BHSD<"VQuad", 4, QQuad>;
+
+class NeonI_LDN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
+ Operand ImmOp, string asmop>
+ : NeonI_LdStOne_Lane<1, r, op2_1, op0,
+ (outs VList:$Rt),
+ (ins GPR64xsp:$Rn, VList:$src, ImmOp:$lane),
+ asmop # "\t$Rt[$lane], [$Rn]",
+ [],
+ NoItinerary> {
+ let mayLoad = 1;
+ let neverHasSideEffects = 1;
+ let hasExtraDefRegAllocReq = 1;
+ let Constraints = "$src = $Rt";
+}
+
+multiclass LDN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
+ def _B : NeonI_LDN_Lane<r, 0b00, op0,
+ !cast<RegisterOperand>(List # "B_operand"),
+ neon_uimm4_bare, asmop> {
+ let Inst{12-10} = lane{2-0};
+ let Inst{30} = lane{3};
+ }
+
+ def _H : NeonI_LDN_Lane<r, 0b01, op0,
+ !cast<RegisterOperand>(List # "H_operand"),
+ neon_uimm3_bare, asmop> {
+ let Inst{12-10} = {lane{1}, lane{0}, 0b0};
+ let Inst{30} = lane{2};
+ }
+
+ def _S : NeonI_LDN_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "S_operand"),
+ neon_uimm2_bare, asmop> {
+ let Inst{12-10} = {lane{0}, 0b0, 0b0};
+ let Inst{30} = lane{1};
+ }
+
+ def _D : NeonI_LDN_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "D_operand"),
+ neon_uimm1_bare, asmop> {
+ let Inst{12-10} = 0b001;
+ let Inst{30} = lane{0};
+ }
+}
+
+// Load single 1-element structure to one lane of 1 register.
+defm LD1LN : LDN_Lane_BHSD<0b0, 0b0, "VOne", "ld1">;
+
+// Load single N-element structure to one lane of N consecutive registers
+// (N = 2,3,4)
+defm LD2LN : LDN_Lane_BHSD<0b1, 0b0, "VPair", "ld2">;
+defm LD3LN : LDN_Lane_BHSD<0b0, 0b1, "VTriple", "ld3">;
+defm LD4LN : LDN_Lane_BHSD<0b1, 0b1, "VQuad", "ld4">;
+
+multiclass LD1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
+ Operand ImmOp, Operand ImmOp2, PatFrag LoadOp,
+ Instruction INST> {
+ def : Pat<(VTy (vector_insert (VTy VPR64:$src),
+ (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp:$lane))),
+ (VTy (EXTRACT_SUBREG
+ (INST GPR64xsp:$Rn,
+ (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+ ImmOp:$lane),
+ sub_64))>;
+
+ def : Pat<(VTy2 (vector_insert (VTy2 VPR128:$src),
+ (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp2:$lane))),
+ (VTy2 (INST GPR64xsp:$Rn, VPR128:$src, ImmOp2:$lane))>;
+}
+
+// Match all LD1LN instructions
+defm : LD1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
+ extloadi8, LD1LN_B>;
+
+defm : LD1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
+ extloadi16, LD1LN_H>;
+
+defm : LD1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
+ load, LD1LN_S>;
+defm : LD1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
+ load, LD1LN_S>;
+
+defm : LD1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
+ load, LD1LN_D>;
+defm : LD1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
+ load, LD1LN_D>;
+
+class NeonI_STN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
+ Operand ImmOp, string asmop>
+ : NeonI_LdStOne_Lane<0, r, op2_1, op0,
+ (outs), (ins GPR64xsp:$Rn, VList:$Rt, ImmOp:$lane),
+ asmop # "\t$Rt[$lane], [$Rn]",
+ [],
+ NoItinerary> {
+ let mayStore = 1;
+ let neverHasSideEffects = 1;
+ let hasExtraDefRegAllocReq = 1;
+}
+
+multiclass STN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
+ def _B : NeonI_STN_Lane<r, 0b00, op0,
+ !cast<RegisterOperand>(List # "B_operand"),
+ neon_uimm4_bare, asmop> {
+ let Inst{12-10} = lane{2-0};
+ let Inst{30} = lane{3};
+ }
+
+ def _H : NeonI_STN_Lane<r, 0b01, op0,
+ !cast<RegisterOperand>(List # "H_operand"),
+ neon_uimm3_bare, asmop> {
+ let Inst{12-10} = {lane{1}, lane{0}, 0b0};
+ let Inst{30} = lane{2};
+ }
+
+ def _S : NeonI_STN_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "S_operand"),
+ neon_uimm2_bare, asmop> {
+ let Inst{12-10} = {lane{0}, 0b0, 0b0};
+ let Inst{30} = lane{1};
+ }
+
+ def _D : NeonI_STN_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "D_operand"),
+ neon_uimm1_bare, asmop>{
+ let Inst{12-10} = 0b001;
+ let Inst{30} = lane{0};
+ }
+}
+
+// Store single 1-element structure from one lane of 1 register.
+defm ST1LN : STN_Lane_BHSD<0b0, 0b0, "VOne", "st1">;
+
+// Store single N-element structure from one lane of N consecutive registers
+// (N = 2,3,4)
+defm ST2LN : STN_Lane_BHSD<0b1, 0b0, "VPair", "st2">;
+defm ST3LN : STN_Lane_BHSD<0b0, 0b1, "VTriple", "st3">;
+defm ST4LN : STN_Lane_BHSD<0b1, 0b1, "VQuad", "st4">;
+
+multiclass ST1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
+ Operand ImmOp, Operand ImmOp2, PatFrag StoreOp,
+ Instruction INST> {
+ def : Pat<(StoreOp (DTy (vector_extract (VTy VPR64:$Rt), ImmOp:$lane)),
+ GPR64xsp:$Rn),
+ (INST GPR64xsp:$Rn,
+ (SUBREG_TO_REG (i64 0), VPR64:$Rt, sub_64),
+ ImmOp:$lane)>;
+
+ def : Pat<(StoreOp (DTy (vector_extract (VTy2 VPR128:$Rt), ImmOp2:$lane)),
+ GPR64xsp:$Rn),
+ (INST GPR64xsp:$Rn, VPR128:$Rt, ImmOp2:$lane)>;
+}
+
+// Match all ST1LN instructions
+defm : ST1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
+ truncstorei8, ST1LN_B>;
+
+defm : ST1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
+ truncstorei16, ST1LN_H>;
+
+defm : ST1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
+ store, ST1LN_S>;
+defm : ST1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
+ store, ST1LN_S>;
+
+defm : ST1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
+ store, ST1LN_D>;
+defm : ST1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
+ store, ST1LN_D>;
+
+// End of vector load/store single N-element structure (class SIMD lsone).
+
+
+// The following are post-index load/store single N-element instructions
+// (class SIMD lsone-post)
+
+multiclass NeonI_LDN_WB_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
+ RegisterOperand VecList, Operand ImmTy,
+ string asmop> {
+ let mayLoad = 1, neverHasSideEffects = 1, Constraints = "$wb = $Rn",
+ DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
+ def _fixed : NeonI_LdOne_Dup_Post<q, r, opcode, size,
+ (outs VecList:$Rt, GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, ImmTy:$amt),
+ asmop # "\t$Rt, [$Rn], $amt",
+ [],
+ NoItinerary> {
+ let Rm = 0b11111;
+ }
+
+ def _register : NeonI_LdOne_Dup_Post<q, r, opcode, size,
+ (outs VecList:$Rt, GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
+ asmop # "\t$Rt, [$Rn], $Rm",
+ [],
+ NoItinerary>;
+ }
+}
+
+multiclass LDWB_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop,
+ Operand uimm_b, Operand uimm_h,
+ Operand uimm_s, Operand uimm_d> {
+ defm _8B : NeonI_LDN_WB_Dup<0, r, opcode, 0b00,
+ !cast<RegisterOperand>(List # "8B_operand"),
+ uimm_b, asmop>;
+
+ defm _4H : NeonI_LDN_WB_Dup<0, r, opcode, 0b01,
+ !cast<RegisterOperand>(List # "4H_operand"),
+ uimm_h, asmop>;
+
+ defm _2S : NeonI_LDN_WB_Dup<0, r, opcode, 0b10,
+ !cast<RegisterOperand>(List # "2S_operand"),
+ uimm_s, asmop>;
+
+ defm _1D : NeonI_LDN_WB_Dup<0, r, opcode, 0b11,
+ !cast<RegisterOperand>(List # "1D_operand"),
+ uimm_d, asmop>;
+
+ defm _16B : NeonI_LDN_WB_Dup<1, r, opcode, 0b00,
+ !cast<RegisterOperand>(List # "16B_operand"),
+ uimm_b, asmop>;
+
+ defm _8H : NeonI_LDN_WB_Dup<1, r, opcode, 0b01,
+ !cast<RegisterOperand>(List # "8H_operand"),
+ uimm_h, asmop>;
+
+ defm _4S : NeonI_LDN_WB_Dup<1, r, opcode, 0b10,
+ !cast<RegisterOperand>(List # "4S_operand"),
+ uimm_s, asmop>;
+
+ defm _2D : NeonI_LDN_WB_Dup<1, r, opcode, 0b11,
+ !cast<RegisterOperand>(List # "2D_operand"),
+ uimm_d, asmop>;
+}
+
+// Post-index load single 1-element structure to all lanes of 1 register
+defm LD1R_WB : LDWB_Dup_BHSD<0b0, 0b110, "VOne", "ld1r", uimm_exact1,
+ uimm_exact2, uimm_exact4, uimm_exact8>;
+
+// Post-index load single N-element structure to all lanes of N consecutive
+// registers (N = 2,3,4)
+defm LD2R_WB : LDWB_Dup_BHSD<0b1, 0b110, "VPair", "ld2r", uimm_exact2,
+ uimm_exact4, uimm_exact8, uimm_exact16>;
+defm LD3R_WB : LDWB_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r", uimm_exact3,
+ uimm_exact6, uimm_exact12, uimm_exact24>;
+defm LD4R_WB : LDWB_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r", uimm_exact4,
+ uimm_exact8, uimm_exact16, uimm_exact32>;
+
+let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
+ Constraints = "$Rn = $wb, $Rt = $src",
+ DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
+ class LDN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
+ Operand ImmTy, Operand ImmOp, string asmop>
+ : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
+ (outs VList:$Rt, GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, ImmTy:$amt,
+ VList:$src, ImmOp:$lane),
+ asmop # "\t$Rt[$lane], [$Rn], $amt",
+ [],
+ NoItinerary> {
+ let Rm = 0b11111;
+ }
+
+ class LDN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
+ Operand ImmTy, Operand ImmOp, string asmop>
+ : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
+ (outs VList:$Rt, GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, GPR64noxzr:$Rm,
+ VList:$src, ImmOp:$lane),
+ asmop # "\t$Rt[$lane], [$Rn], $Rm",
+ [],
+ NoItinerary>;
+}
+
+multiclass LD_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
+ Operand uimm_b, Operand uimm_h,
+ Operand uimm_s, Operand uimm_d> {
+ def _B_fixed : LDN_WBFx_Lane<r, 0b00, op0,
+ !cast<RegisterOperand>(List # "B_operand"),
+ uimm_b, neon_uimm4_bare, asmop> {
+ let Inst{12-10} = lane{2-0};
+ let Inst{30} = lane{3};
+ }
+
+ def _B_register : LDN_WBReg_Lane<r, 0b00, op0,
+ !cast<RegisterOperand>(List # "B_operand"),
+ uimm_b, neon_uimm4_bare, asmop> {
+ let Inst{12-10} = lane{2-0};
+ let Inst{30} = lane{3};
+ }
+
+ def _H_fixed : LDN_WBFx_Lane<r, 0b01, op0,
+ !cast<RegisterOperand>(List # "H_operand"),
+ uimm_h, neon_uimm3_bare, asmop> {
+ let Inst{12-10} = {lane{1}, lane{0}, 0b0};
+ let Inst{30} = lane{2};
+ }
+
+ def _H_register : LDN_WBReg_Lane<r, 0b01, op0,
+ !cast<RegisterOperand>(List # "H_operand"),
+ uimm_h, neon_uimm3_bare, asmop> {
+ let Inst{12-10} = {lane{1}, lane{0}, 0b0};
+ let Inst{30} = lane{2};
+ }
+
+ def _S_fixed : LDN_WBFx_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "S_operand"),
+ uimm_s, neon_uimm2_bare, asmop> {
+ let Inst{12-10} = {lane{0}, 0b0, 0b0};
+ let Inst{30} = lane{1};
+ }
+
+ def _S_register : LDN_WBReg_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "S_operand"),
+ uimm_s, neon_uimm2_bare, asmop> {
+ let Inst{12-10} = {lane{0}, 0b0, 0b0};
+ let Inst{30} = lane{1};
+ }
+
+ def _D_fixed : LDN_WBFx_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "D_operand"),
+ uimm_d, neon_uimm1_bare, asmop> {
+ let Inst{12-10} = 0b001;
+ let Inst{30} = lane{0};
+ }
+
+ def _D_register : LDN_WBReg_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "D_operand"),
+ uimm_d, neon_uimm1_bare, asmop> {
+ let Inst{12-10} = 0b001;
+ let Inst{30} = lane{0};
+ }
+}
+
+// Post-index load single 1-element structure to one lane of 1 register.
+defm LD1LN_WB : LD_Lane_WB_BHSD<0b0, 0b0, "VOne", "ld1", uimm_exact1,
+ uimm_exact2, uimm_exact4, uimm_exact8>;
+
+// Post-index load single N-element structure to one lane of N consecutive
+// registers
+// (N = 2,3,4)
+defm LD2LN_WB : LD_Lane_WB_BHSD<0b1, 0b0, "VPair", "ld2", uimm_exact2,
+ uimm_exact4, uimm_exact8, uimm_exact16>;
+defm LD3LN_WB : LD_Lane_WB_BHSD<0b0, 0b1, "VTriple", "ld3", uimm_exact3,
+ uimm_exact6, uimm_exact12, uimm_exact24>;
+defm LD4LN_WB : LD_Lane_WB_BHSD<0b1, 0b1, "VQuad", "ld4", uimm_exact4,
+ uimm_exact8, uimm_exact16, uimm_exact32>;
+
+let mayStore = 1, neverHasSideEffects = 1,
+ hasExtraDefRegAllocReq = 1, Constraints = "$Rn = $wb",
+ DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
+ class STN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
+ Operand ImmTy, Operand ImmOp, string asmop>
+ : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
+ (outs GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, ImmTy:$amt,
+ VList:$Rt, ImmOp:$lane),
+ asmop # "\t$Rt[$lane], [$Rn], $amt",
+ [],
+ NoItinerary> {
+ let Rm = 0b11111;
+ }
+
+ class STN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
+ Operand ImmTy, Operand ImmOp, string asmop>
+ : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
+ (outs GPR64xsp:$wb),
+ (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VList:$Rt,
+ ImmOp:$lane),
+ asmop # "\t$Rt[$lane], [$Rn], $Rm",
+ [],
+ NoItinerary>;
+}
+
+multiclass ST_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
+ Operand uimm_b, Operand uimm_h,
+ Operand uimm_s, Operand uimm_d> {
+ def _B_fixed : STN_WBFx_Lane<r, 0b00, op0,
+ !cast<RegisterOperand>(List # "B_operand"),
+ uimm_b, neon_uimm4_bare, asmop> {
+ let Inst{12-10} = lane{2-0};
+ let Inst{30} = lane{3};
+ }
+
+ def _B_register : STN_WBReg_Lane<r, 0b00, op0,
+ !cast<RegisterOperand>(List # "B_operand"),
+ uimm_b, neon_uimm4_bare, asmop> {
+ let Inst{12-10} = lane{2-0};
+ let Inst{30} = lane{3};
+ }
+
+ def _H_fixed : STN_WBFx_Lane<r, 0b01, op0,
+ !cast<RegisterOperand>(List # "H_operand"),
+ uimm_h, neon_uimm3_bare, asmop> {
+ let Inst{12-10} = {lane{1}, lane{0}, 0b0};
+ let Inst{30} = lane{2};
+ }
+
+ def _H_register : STN_WBReg_Lane<r, 0b01, op0,
+ !cast<RegisterOperand>(List # "H_operand"),
+ uimm_h, neon_uimm3_bare, asmop> {
+ let Inst{12-10} = {lane{1}, lane{0}, 0b0};
+ let Inst{30} = lane{2};
+ }
+
+ def _S_fixed : STN_WBFx_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "S_operand"),
+ uimm_s, neon_uimm2_bare, asmop> {
+ let Inst{12-10} = {lane{0}, 0b0, 0b0};
+ let Inst{30} = lane{1};
+ }
+
+ def _S_register : STN_WBReg_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "S_operand"),
+ uimm_s, neon_uimm2_bare, asmop> {
+ let Inst{12-10} = {lane{0}, 0b0, 0b0};
+ let Inst{30} = lane{1};
+ }
+
+ def _D_fixed : STN_WBFx_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "D_operand"),
+ uimm_d, neon_uimm1_bare, asmop> {
+ let Inst{12-10} = 0b001;
+ let Inst{30} = lane{0};
+ }
+
+ def _D_register : STN_WBReg_Lane<r, 0b10, op0,
+ !cast<RegisterOperand>(List # "D_operand"),
+ uimm_d, neon_uimm1_bare, asmop> {
+ let Inst{12-10} = 0b001;
+ let Inst{30} = lane{0};
+ }
+}
+
+// Post-index store single 1-element structure from one lane of 1 register.
+defm ST1LN_WB : ST_Lane_WB_BHSD<0b0, 0b0, "VOne", "st1", uimm_exact1,
+ uimm_exact2, uimm_exact4, uimm_exact8>;
+
+// Post-index store single N-element structure from one lane of N consecutive
+// registers (N = 2,3,4)
+defm ST2LN_WB : ST_Lane_WB_BHSD<0b1, 0b0, "VPair", "st2", uimm_exact2,
+ uimm_exact4, uimm_exact8, uimm_exact16>;
+defm ST3LN_WB : ST_Lane_WB_BHSD<0b0, 0b1, "VTriple", "st3", uimm_exact3,
+ uimm_exact6, uimm_exact12, uimm_exact24>;
+defm ST4LN_WB : ST_Lane_WB_BHSD<0b1, 0b1, "VQuad", "st4", uimm_exact4,
+ uimm_exact8, uimm_exact16, uimm_exact32>;
+
+// End of post-index load/store single N-element instructions
+// (class SIMD lsone-post)
+
+// Neon Scalar instructions implementation
+// Scalar Three Same
+
+class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
+ RegisterClass FPRC>
+ : NeonI_Scalar3Same<u, size, opcode,
+ (outs FPRC:$Rd), (ins FPRC:$Rn, FPRC:$Rm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
+ [],
+ NoItinerary>;
+
+class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
+ : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
+
+multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode, string asmop,
+ bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
+ def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
+ }
+}
+
+multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
+ string asmop, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def sss : NeonI_Scalar3Same_size<u, {size_high, 0b0}, opcode, asmop, FPR32>;
+ def ddd : NeonI_Scalar3Same_size<u, {size_high, 0b1}, opcode, asmop, FPR64>;
+ }
+}
+
+multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
+ string asmop, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def bbb : NeonI_Scalar3Same_size<u, 0b00, opcode, asmop, FPR8>;
+ def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
+ def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
+ def ddd : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
+ }
+}
+
+multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD> {
+ def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
+ (INSTD FPR64:$Rn, FPR64:$Rm)>;
+}
+
+multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTB,
+ Instruction INSTH,
+ Instruction INSTS,
+ Instruction INSTD>
+ : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
+ def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
+ (INSTB FPR8:$Rn, FPR8:$Rm)>;
+
+ def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
+ (INSTH FPR16:$Rn, FPR16:$Rm)>;
+
+ def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
+ (INSTS FPR32:$Rn, FPR32:$Rm)>;
+}
+
+class Neon_Scalar3Same_cmp_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
+ : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
+ (INSTD FPR64:$Rn, FPR64:$Rm)>;
+
+multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
+ Instruction INSTH,
+ Instruction INSTS> {
+ def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
+ (INSTH FPR16:$Rn, FPR16:$Rm)>;
+ def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
+ (INSTS FPR32:$Rn, FPR32:$Rm)>;
+}
+
+multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
+ (INSTS FPR32:$Rn, FPR32:$Rm)>;
+ def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (INSTD FPR64:$Rn, FPR64:$Rm)>;
+}
+
+multiclass Neon_Scalar3Same_cmp_SD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
+ (INSTS FPR32:$Rn, FPR32:$Rm)>;
+ def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (INSTD FPR64:$Rn, FPR64:$Rm)>;
+}
+
+class Neon_Scalar3Same_cmp_V1_D_size_patterns<CondCode CC,
+ Instruction INSTD>
+ : Pat<(v1i64 (Neon_cmp (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm), CC)),
+ (INSTD FPR64:$Rn, FPR64:$Rm)>;
+
+// Scalar Three Different
+
+class NeonI_Scalar3Diff_size<bit u, bits<2> size, bits<4> opcode, string asmop,
+ RegisterClass FPRCD, RegisterClass FPRCS>
+ : NeonI_Scalar3Diff<u, size, opcode,
+ (outs FPRCD:$Rd), (ins FPRCS:$Rn, FPRCS:$Rm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
+ [],
+ NoItinerary>;
+
+multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
+ def shh : NeonI_Scalar3Diff_size<u, 0b01, opcode, asmop, FPR32, FPR16>;
+ def dss : NeonI_Scalar3Diff_size<u, 0b10, opcode, asmop, FPR64, FPR32>;
+}
+
+multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
+ let Constraints = "$Src = $Rd" in {
+ def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
+ (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
+ [],
+ NoItinerary>;
+ def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
+ (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
+ [],
+ NoItinerary>;
+ }
+}
+
+multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
+ Instruction INSTH,
+ Instruction INSTS> {
+ def : Pat<(v1i32 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
+ (INSTH FPR16:$Rn, FPR16:$Rm)>;
+ def : Pat<(v1i64 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
+ (INSTS FPR32:$Rn, FPR32:$Rm)>;
+}
+
+multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
+ Instruction INSTH,
+ Instruction INSTS> {
+ def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
+ (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
+ def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
+ (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
+}
+
+// Scalar Two Registers Miscellaneous
+
+class NeonI_Scalar2SameMisc_size<bit u, bits<2> size, bits<5> opcode, string asmop,
+ RegisterClass FPRCD, RegisterClass FPRCS>
+ : NeonI_Scalar2SameMisc<u, size, opcode,
+ (outs FPRCD:$Rd), (ins FPRCS:$Rn),
+ !strconcat(asmop, "\t$Rd, $Rn"),
+ [],
+ NoItinerary>;
+
+multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
+ string asmop> {
+ def ss : NeonI_Scalar2SameMisc_size<u, {size_high, 0b0}, opcode, asmop, FPR32,
+ FPR32>;
+ def dd : NeonI_Scalar2SameMisc_size<u, {size_high, 0b1}, opcode, asmop, FPR64,
+ FPR64>;
+}
+
+multiclass NeonI_Scalar2SameMisc_D_size<bit u, bits<5> opcode, string asmop> {
+ def dd : NeonI_Scalar2SameMisc_size<u, 0b11, opcode, asmop, FPR64, FPR64>;
+}
+
+multiclass NeonI_Scalar2SameMisc_BHSD_size<bit u, bits<5> opcode, string asmop>
+ : NeonI_Scalar2SameMisc_D_size<u, opcode, asmop> {
+ def bb : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR8>;
+ def hh : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR16>;
+ def ss : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR32>;
+}
+
+class NeonI_Scalar2SameMisc_fcvtxn_D_size<bit u, bits<5> opcode, string asmop>
+ : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR32, FPR64>;
+
+multiclass NeonI_Scalar2SameMisc_narrow_HSD_size<bit u, bits<5> opcode,
+ string asmop> {
+ def bh : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR16>;
+ def hs : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR32>;
+ def sd : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR64>;
+}
+
+class NeonI_Scalar2SameMisc_accum_size<bit u, bits<2> size, bits<5> opcode,
+ string asmop, RegisterClass FPRC>
+ : NeonI_Scalar2SameMisc<u, size, opcode,
+ (outs FPRC:$Rd), (ins FPRC:$Src, FPRC:$Rn),
+ !strconcat(asmop, "\t$Rd, $Rn"),
+ [],
+ NoItinerary>;
+
+multiclass NeonI_Scalar2SameMisc_accum_BHSD_size<bit u, bits<5> opcode,
+ string asmop> {
+
+ let Constraints = "$Src = $Rd" in {
+ def bb : NeonI_Scalar2SameMisc_accum_size<u, 0b00, opcode, asmop, FPR8>;
+ def hh : NeonI_Scalar2SameMisc_accum_size<u, 0b01, opcode, asmop, FPR16>;
+ def ss : NeonI_Scalar2SameMisc_accum_size<u, 0b10, opcode, asmop, FPR32>;
+ def dd : NeonI_Scalar2SameMisc_accum_size<u, 0b11, opcode, asmop, FPR64>;
+ }
+}
+
+class Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
+ : Pat<(v1f32 (opnode (v1f64 FPR64:$Rn))),
+ (INSTD FPR64:$Rn)>;
+
+multiclass Neon_Scalar2SameMisc_fcvt_SD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn))),
+ (INSTS FPR32:$Rn)>;
+ def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))),
+ (INSTD FPR64:$Rn)>;
+}
+
+multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator Sopnode,
+ SDPatternOperator Dopnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn))),
+ (INSTS FPR32:$Rn)>;
+ def : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn))),
+ (INSTD FPR64:$Rn)>;
+}
+
+multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn))),
+ (INSTS FPR32:$Rn)>;
+ def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
+ (INSTD FPR64:$Rn)>;
+}
+
+class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
+ : NeonI_Scalar2SameMisc<u, 0b11, opcode,
+ (outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
+ [],
+ NoItinerary>;
+
+multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
+ string asmop> {
+ def ssi : NeonI_Scalar2SameMisc<u, 0b10, opcode,
+ (outs FPR32:$Rd), (ins FPR32:$Rn, fpz32:$FPImm),
+ !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
+ [],
+ NoItinerary>;
+ def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
+ (outs FPR64:$Rd), (ins FPR64:$Rn, fpz32:$FPImm),
+ !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
+ [],
+ NoItinerary>;
+}
+
+class Neon_Scalar2SameMisc_cmpz_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
+ : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
+ (v1i64 (bitconvert (v8i8 Neon_AllZero))))),
+ (INSTD FPR64:$Rn, 0)>;
+
+class Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<CondCode CC,
+ Instruction INSTD>
+ : Pat<(v1i64 (Neon_cmpz (v1i64 FPR64:$Rn),
+ (i32 neon_uimm0:$Imm), CC)),
+ (INSTD FPR64:$Rn, neon_uimm0:$Imm)>;
+
+multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn),
+ (v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
+ (INSTS FPR32:$Rn, fpz32:$FPImm)>;
+ def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn),
+ (v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
+ (INSTD FPR64:$Rn, fpz32:$FPImm)>;
+}
+
+multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD> {
+ def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn))),
+ (INSTD FPR64:$Rn)>;
+}
+
+multiclass Neon_Scalar2SameMisc_BHSD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTB,
+ Instruction INSTH,
+ Instruction INSTS,
+ Instruction INSTD>
+ : Neon_Scalar2SameMisc_D_size_patterns<opnode, INSTD> {
+ def : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn))),
+ (INSTB FPR8:$Rn)>;
+ def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn))),
+ (INSTH FPR16:$Rn)>;
+ def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn))),
+ (INSTS FPR32:$Rn)>;
+}
+
+multiclass Neon_Scalar2SameMisc_narrow_HSD_size_patterns<
+ SDPatternOperator opnode,
+ Instruction INSTH,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn))),
+ (INSTH FPR16:$Rn)>;
+ def : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn))),
+ (INSTS FPR32:$Rn)>;
+ def : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn))),
+ (INSTD FPR64:$Rn)>;
+
+}
+
+multiclass Neon_Scalar2SameMisc_accum_BHSD_size_patterns<
+ SDPatternOperator opnode,
+ Instruction INSTB,
+ Instruction INSTH,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1i8 (opnode (v1i8 FPR8:$Src), (v1i8 FPR8:$Rn))),
+ (INSTB FPR8:$Src, FPR8:$Rn)>;
+ def : Pat<(v1i16 (opnode (v1i16 FPR16:$Src), (v1i16 FPR16:$Rn))),
+ (INSTH FPR16:$Src, FPR16:$Rn)>;
+ def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i32 FPR32:$Rn))),
+ (INSTS FPR32:$Src, FPR32:$Rn)>;
+ def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn))),
+ (INSTD FPR64:$Src, FPR64:$Rn)>;
+}
+
+// Scalar Shift By Immediate
+
+class NeonI_ScalarShiftImm_size<bit u, bits<5> opcode, string asmop,
+ RegisterClass FPRC, Operand ImmTy>
+ : NeonI_ScalarShiftImm<u, opcode,
+ (outs FPRC:$Rd), (ins FPRC:$Rn, ImmTy:$Imm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
+ [], NoItinerary>;
+
+multiclass NeonI_ScalarShiftRightImm_D_size<bit u, bits<5> opcode,
+ string asmop> {
+ def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
+ bits<6> Imm;
+ let Inst{22} = 0b1; // immh:immb = 1xxxxxx
+ let Inst{21-16} = Imm;
+ }
+}
+
+multiclass NeonI_ScalarShiftRightImm_BHSD_size<bit u, bits<5> opcode,
+ string asmop>
+ : NeonI_ScalarShiftRightImm_D_size<u, opcode, asmop> {
+ def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shr_imm8> {
+ bits<3> Imm;
+ let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
+ let Inst{18-16} = Imm;
+ }
+ def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shr_imm16> {
+ bits<4> Imm;
+ let Inst{22-20} = 0b001; // immh:immb = 001xxxx
+ let Inst{19-16} = Imm;
+ }
+ def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
+ bits<5> Imm;
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ let Inst{20-16} = Imm;
+ }
+}
+
+multiclass NeonI_ScalarShiftLeftImm_D_size<bit u, bits<5> opcode,
+ string asmop> {
+ def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shl_imm64> {
+ bits<6> Imm;
+ let Inst{22} = 0b1; // immh:immb = 1xxxxxx
+ let Inst{21-16} = Imm;
+ }
+}
+
+multiclass NeonI_ScalarShiftLeftImm_BHSD_size<bit u, bits<5> opcode,
+ string asmop>
+ : NeonI_ScalarShiftLeftImm_D_size<u, opcode, asmop> {
+ def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shl_imm8> {
+ bits<3> Imm;
+ let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
+ let Inst{18-16} = Imm;
+ }
+ def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shl_imm16> {
+ bits<4> Imm;
+ let Inst{22-20} = 0b001; // immh:immb = 001xxxx
+ let Inst{19-16} = Imm;
+ }
+ def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shl_imm32> {
+ bits<5> Imm;
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ let Inst{20-16} = Imm;
+ }
+}
+
+class NeonI_ScalarShiftRightImm_accum_D_size<bit u, bits<5> opcode, string asmop>
+ : NeonI_ScalarShiftImm<u, opcode,
+ (outs FPR64:$Rd),
+ (ins FPR64:$Src, FPR64:$Rn, shr_imm64:$Imm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
+ [], NoItinerary> {
+ bits<6> Imm;
+ let Inst{22} = 0b1; // immh:immb = 1xxxxxx
+ let Inst{21-16} = Imm;
+ let Constraints = "$Src = $Rd";
+}
+
+class NeonI_ScalarShiftLeftImm_accum_D_size<bit u, bits<5> opcode, string asmop>
+ : NeonI_ScalarShiftImm<u, opcode,
+ (outs FPR64:$Rd),
+ (ins FPR64:$Src, FPR64:$Rn, shl_imm64:$Imm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
+ [], NoItinerary> {
+ bits<6> Imm;
+ let Inst{22} = 0b1; // immh:immb = 1xxxxxx
+ let Inst{21-16} = Imm;
+ let Constraints = "$Src = $Rd";
+}
+
+class NeonI_ScalarShiftImm_narrow_size<bit u, bits<5> opcode, string asmop,
+ RegisterClass FPRCD, RegisterClass FPRCS,
+ Operand ImmTy>
+ : NeonI_ScalarShiftImm<u, opcode,
+ (outs FPRCD:$Rd), (ins FPRCS:$Rn, ImmTy:$Imm),
+ !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
+ [], NoItinerary>;
+
+multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
+ string asmop> {
+ def bhi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR8, FPR16,
+ shr_imm8> {
+ bits<3> Imm;
+ let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
+ let Inst{18-16} = Imm;
+ }
+ def hsi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR16, FPR32,
+ shr_imm16> {
+ bits<4> Imm;
+ let Inst{22-20} = 0b001; // immh:immb = 001xxxx
+ let Inst{19-16} = Imm;
+ }
+ def sdi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR32, FPR64,
+ shr_imm32> {
+ bits<5> Imm;
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ let Inst{20-16} = Imm;
+ }
+}
+
+multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
+ def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
+ bits<5> Imm;
+ let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
+ let Inst{20-16} = Imm;
+ }
+ def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
+ bits<6> Imm;
+ let Inst{22} = 0b1; // immh:immb = 1xxxxxx
+ let Inst{21-16} = Imm;
+ }
+}
+
+multiclass Neon_ScalarShiftRImm_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD> {
+ def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
+ (INSTD FPR64:$Rn, imm:$Imm)>;
+}
+
+multiclass Neon_ScalarShiftLImm_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD> {
+ def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shl_imm64:$Imm))),
+ (INSTD FPR64:$Rn, imm:$Imm)>;
+}
+
+class Neon_ScalarShiftImm_arm_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
+ : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
+ (v1i64 (Neon_vdup (i32 shr_imm64:$Imm))))),
+ (INSTD FPR64:$Rn, imm:$Imm)>;
+
+multiclass Neon_ScalarShiftLImm_BHSD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTB,
+ Instruction INSTH,
+ Instruction INSTS,
+ Instruction INSTD>
+ : Neon_ScalarShiftLImm_D_size_patterns<opnode, INSTD> {
+ def bbi : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (i32 shl_imm8:$Imm))),
+ (INSTB FPR8:$Rn, imm:$Imm)>;
+ def hhi : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (i32 shl_imm16:$Imm))),
+ (INSTH FPR16:$Rn, imm:$Imm)>;
+ def ssi : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (i32 shl_imm32:$Imm))),
+ (INSTS FPR32:$Rn, imm:$Imm)>;
+}
+
+class Neon_ScalarShiftLImm_accum_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
+ : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
+ (i32 shl_imm64:$Imm))),
+ (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
+
+class Neon_ScalarShiftRImm_accum_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
+ : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
+ (i32 shr_imm64:$Imm))),
+ (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
+
+multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
+ SDPatternOperator opnode,
+ Instruction INSTH,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def bhi : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn), (i32 shr_imm16:$Imm))),
+ (INSTH FPR16:$Rn, imm:$Imm)>;
+ def hsi : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
+ (INSTS FPR32:$Rn, imm:$Imm)>;
+ def sdi : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
+ (INSTD FPR64:$Rn, imm:$Imm)>;
+}
+
+multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator Sopnode,
+ SDPatternOperator Dopnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def ssi : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
+ (INSTS FPR32:$Rn, imm:$Imm)>;
+ def ddi : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
+ (INSTD FPR64:$Rn, imm:$Imm)>;
+}
+
+multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator Sopnode,
+ SDPatternOperator Dopnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def ssi : Pat<(v1i32 (Sopnode (v1f32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
+ (INSTS FPR32:$Rn, imm:$Imm)>;
+ def ddi : Pat<(v1i64 (Dopnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
+ (INSTD FPR64:$Rn, imm:$Imm)>;
+}
+
+// Scalar Signed Shift Right (Immediate)
+defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
+defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
+// Pattern to match llvm.arm.* intrinsic.
+def : Neon_ScalarShiftImm_arm_D_size_patterns<sra, SSHRddi>;
+
+// Scalar Unsigned Shift Right (Immediate)
+defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
+defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
+// Pattern to match llvm.arm.* intrinsic.
+def : Neon_ScalarShiftImm_arm_D_size_patterns<srl, USHRddi>;
+
+// Scalar Signed Rounding Shift Right (Immediate)
+defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
+defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vsrshr, SRSHRddi>;
+
+// Scalar Unigned Rounding Shift Right (Immediate)
+defm URSHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00100, "urshr">;
+defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vurshr, URSHRddi>;
+
+// Scalar Signed Shift Right and Accumulate (Immediate)
+def SSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00010, "ssra">;
+def : Neon_ScalarShiftRImm_accum_D_size_patterns
+ <int_aarch64_neon_vsrads_n, SSRA>;
+
+// Scalar Unsigned Shift Right and Accumulate (Immediate)
+def USRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00010, "usra">;
+def : Neon_ScalarShiftRImm_accum_D_size_patterns
+ <int_aarch64_neon_vsradu_n, USRA>;
+
+// Scalar Signed Rounding Shift Right and Accumulate (Immediate)
+def SRSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00110, "srsra">;
+def : Neon_ScalarShiftRImm_accum_D_size_patterns
+ <int_aarch64_neon_vrsrads_n, SRSRA>;
+
+// Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
+def URSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00110, "ursra">;
+def : Neon_ScalarShiftRImm_accum_D_size_patterns
+ <int_aarch64_neon_vrsradu_n, URSRA>;
+
+// Scalar Shift Left (Immediate)
+defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
+defm : Neon_ScalarShiftLImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
+// Pattern to match llvm.arm.* intrinsic.
+def : Neon_ScalarShiftImm_arm_D_size_patterns<shl, SHLddi>;
+
+// Signed Saturating Shift Left (Immediate)
+defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
+defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshls_n,
+ SQSHLbbi, SQSHLhhi,
+ SQSHLssi, SQSHLddi>;
+// Pattern to match llvm.arm.* intrinsic.
+defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_sqrshlImm, SQSHLddi>;
+
+// Unsigned Saturating Shift Left (Immediate)
+defm UQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01110, "uqshl">;
+defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshlu_n,
+ UQSHLbbi, UQSHLhhi,
+ UQSHLssi, UQSHLddi>;
+// Pattern to match llvm.arm.* intrinsic.
+defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_uqrshlImm, UQSHLddi>;
+
+// Signed Saturating Shift Left Unsigned (Immediate)
+defm SQSHLU : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01100, "sqshlu">;
+defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vsqshlu,
+ SQSHLUbbi, SQSHLUhhi,
+ SQSHLUssi, SQSHLUddi>;
+
+// Shift Right And Insert (Immediate)
+def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
+def : Neon_ScalarShiftRImm_accum_D_size_patterns
+ <int_aarch64_neon_vsri, SRI>;
+
+// Shift Left And Insert (Immediate)
+def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
+def : Neon_ScalarShiftLImm_accum_D_size_patterns
+ <int_aarch64_neon_vsli, SLI>;
+
+// Signed Saturating Shift Right Narrow (Immediate)
+defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
+defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrn,
+ SQSHRNbhi, SQSHRNhsi,
+ SQSHRNsdi>;
+
+// Unsigned Saturating Shift Right Narrow (Immediate)
+defm UQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10010, "uqshrn">;
+defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqshrn,
+ UQSHRNbhi, UQSHRNhsi,
+ UQSHRNsdi>;
+
+// Signed Saturating Rounded Shift Right Narrow (Immediate)
+defm SQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10011, "sqrshrn">;
+defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrn,
+ SQRSHRNbhi, SQRSHRNhsi,
+ SQRSHRNsdi>;
+
+// Unsigned Saturating Rounded Shift Right Narrow (Immediate)
+defm UQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10011, "uqrshrn">;
+defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqrshrn,
+ UQRSHRNbhi, UQRSHRNhsi,
+ UQRSHRNsdi>;
+
+// Signed Saturating Shift Right Unsigned Narrow (Immediate)
+defm SQSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10000, "sqshrun">;
+defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrun,
+ SQSHRUNbhi, SQSHRUNhsi,
+ SQSHRUNsdi>;
+
+// Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
+defm SQRSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10001, "sqrshrun">;
+defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
+ SQRSHRUNbhi, SQRSHRUNhsi,
+ SQRSHRUNsdi>;
+
+// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
+defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
+defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_s32,
+ int_aarch64_neon_vcvtf64_n_s64,
+ SCVTF_Nssi, SCVTF_Nddi>;
+
+// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
+defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
+defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_u32,
+ int_aarch64_neon_vcvtf64_n_u64,
+ UCVTF_Nssi, UCVTF_Nddi>;
+
+// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
+defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
+defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_s32_f32,
+ int_aarch64_neon_vcvtd_n_s64_f64,
+ FCVTZS_Nssi, FCVTZS_Nddi>;
+
+// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
+defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
+defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_u32_f32,
+ int_aarch64_neon_vcvtd_n_u64_f64,
+ FCVTZU_Nssi, FCVTZU_Nddi>;
+
+// Patterns For Convert Instructions Between v1f64 and v1i64
+class Neon_ScalarShiftImm_cvtf_v1f64_pattern<SDPatternOperator opnode,
+ Instruction INST>
+ : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
+ (INST FPR64:$Rn, imm:$Imm)>;
+
+class Neon_ScalarShiftImm_fcvt_v1f64_pattern<SDPatternOperator opnode,
+ Instruction INST>
+ : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
+ (INST FPR64:$Rn, imm:$Imm)>;
+
+def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxs2fp,
+ SCVTF_Nddi>;
+
+def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxu2fp,
+ UCVTF_Nddi>;
+
+def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxs,
+ FCVTZS_Nddi>;
+
+def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxu,
+ FCVTZU_Nddi>;
+
+// Scalar Integer Add
+let isCommutable = 1 in {
+def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
+}
+
+// Scalar Integer Sub
+def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
+
+// Pattern for Scalar Integer Add and Sub with D register only
+defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
+
+// Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
+
+// Scalar Integer Saturating Add (Signed, Unsigned)
+defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
+defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
+
+// Scalar Integer Saturating Sub (Signed, Unsigned)
+defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
+defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
+
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Saturating Add, Sub (Signed, Unsigned)
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqadds, SQADDbbb,
+ SQADDhhh, SQADDsss, SQADDddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqaddu, UQADDbbb,
+ UQADDhhh, UQADDsss, UQADDddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubs, SQSUBbbb,
+ SQSUBhhh, SQSUBsss, SQSUBddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubu, UQSUBbbb,
+ UQSUBhhh, UQSUBsss, UQSUBddd>;
+
+// Scalar Integer Saturating Doubling Multiply Half High
+defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
+
+// Scalar Integer Saturating Rounding Doubling Multiply Half High
+defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Saturating Doubling Multiply Half High and
+// Scalar Integer Saturating Rounding Doubling Multiply Half High
+defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
+ SQDMULHsss>;
+defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
+ SQRDMULHsss>;
+
+// Scalar Floating-point Multiply Extended
+defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
+
+// Scalar Floating-point Reciprocal Step
+defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
+
+// Scalar Floating-point Reciprocal Square Root Step
+defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Floating-point Reciprocal Step and
+// Scalar Floating-point Reciprocal Square Root Step
+defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrecps, FRECPSsss,
+ FRECPSddd>;
+defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrsqrts, FRSQRTSsss,
+ FRSQRTSddd>;
+
+def : Pat<(v1f64 (fsqrt (v1f64 FPR64:$Rn))), (FSQRTdd FPR64:$Rn)>;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Floating-point Multiply Extended,
+multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
+ (INSTS FPR32:$Rn, FPR32:$Rm)>;
+ def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
+ (INSTD FPR64:$Rn, FPR64:$Rm)>;
+}
+
+defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
+ FMULXsss,FMULXddd>;
+
+// Scalar Integer Shift Left (Signed, Unsigned)
+def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
+def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
+
+// Scalar Integer Saturating Shift Left (Signed, Unsigned)
+defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
+defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Saturating Shift Letf (Signed, Unsigned)
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
+ SQSHLhhh, SQSHLsss, SQSHLddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
+ UQSHLhhh, UQSHLsss, UQSHLddd>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Saturating Shift Letf (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
+
+// Scalar Integer Rounding Shift Left (Signed, Unsigned)
+def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
+def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
+
+// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
+defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
+defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
+ SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
+ UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
+
+// Signed Saturating Doubling Multiply-Add Long
+defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
+defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
+ SQDMLALshh, SQDMLALdss>;
+
+// Signed Saturating Doubling Multiply-Subtract Long
+defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
+defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
+ SQDMLSLshh, SQDMLSLdss>;
+
+// Signed Saturating Doubling Multiply Long
+defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
+defm : Neon_Scalar3Diff_HS_size_patterns<int_arm_neon_vqdmull,
+ SQDMULLshh, SQDMULLdss>;
+
+// Scalar Signed Integer Convert To Floating-point
+defm SCVTF : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
+defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_s32,
+ int_aarch64_neon_vcvtf64_s64,
+ SCVTFss, SCVTFdd>;
+
+// Scalar Unsigned Integer Convert To Floating-point
+defm UCVTF : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
+defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_u32,
+ int_aarch64_neon_vcvtf64_u64,
+ UCVTFss, UCVTFdd>;
+
+// Scalar Floating-point Converts
+def FCVTXN : NeonI_Scalar2SameMisc_fcvtxn_D_size<0b1, 0b10110, "fcvtxn">;
+def : Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<int_aarch64_neon_fcvtxn,
+ FCVTXN>;
+
+defm FCVTNS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11010, "fcvtns">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtns,
+ FCVTNSss, FCVTNSdd>;
+
+defm FCVTNU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11010, "fcvtnu">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtnu,
+ FCVTNUss, FCVTNUdd>;
+
+defm FCVTMS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11011, "fcvtms">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtms,
+ FCVTMSss, FCVTMSdd>;
+
+defm FCVTMU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11011, "fcvtmu">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtmu,
+ FCVTMUss, FCVTMUdd>;
+
+defm FCVTAS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11100, "fcvtas">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtas,
+ FCVTASss, FCVTASdd>;
+
+defm FCVTAU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11100, "fcvtau">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtau,
+ FCVTAUss, FCVTAUdd>;
+
+defm FCVTPS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11010, "fcvtps">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtps,
+ FCVTPSss, FCVTPSdd>;
+
+defm FCVTPU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11010, "fcvtpu">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtpu,
+ FCVTPUss, FCVTPUdd>;
+
+defm FCVTZS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11011, "fcvtzs">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzs,
+ FCVTZSss, FCVTZSdd>;
+
+defm FCVTZU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11011, "fcvtzu">;
+defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzu,
+ FCVTZUss, FCVTZUdd>;
+
+// Patterns For Convert Instructions Between v1f64 and v1i64
+class Neon_Scalar2SameMisc_cvtf_v1f64_pattern<SDPatternOperator opnode,
+ Instruction INST>
+ : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn))), (INST FPR64:$Rn)>;
+
+class Neon_Scalar2SameMisc_fcvt_v1f64_pattern<SDPatternOperator opnode,
+ Instruction INST>
+ : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
+
+def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<sint_to_fp, SCVTFdd>;
+def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<uint_to_fp, UCVTFdd>;
+
+def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_sint, FCVTZSdd>;
+def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_uint, FCVTZUdd>;
+
+// Scalar Floating-point Reciprocal Estimate
+defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrecpe,
+ FRECPEss, FRECPEdd>;
+
+// Scalar Floating-point Reciprocal Exponent
+defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
+ FRECPXss, FRECPXdd>;
+
+// Scalar Floating-point Reciprocal Square Root Estimate
+defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrsqrte,
+ FRSQRTEss, FRSQRTEdd>;
+
+// Scalar Floating-point Round
+class Neon_ScalarFloatRound_pattern<SDPatternOperator opnode, Instruction INST>
+ : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
+
+def : Neon_ScalarFloatRound_pattern<fceil, FRINTPdd>;
+def : Neon_ScalarFloatRound_pattern<ffloor, FRINTMdd>;
+def : Neon_ScalarFloatRound_pattern<ftrunc, FRINTZdd>;
+def : Neon_ScalarFloatRound_pattern<frint, FRINTXdd>;
+def : Neon_ScalarFloatRound_pattern<fnearbyint, FRINTIdd>;
+def : Neon_ScalarFloatRound_pattern<frnd, FRINTAdd>;
+def : Neon_ScalarFloatRound_pattern<int_aarch64_neon_frintn, FRINTNdd>;
+
+// Scalar Integer Compare
+
+// Scalar Compare Bitwise Equal
+def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
+def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
+
+class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
+ Instruction INSTD,
+ CondCode CC>
+ : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
+ (INSTD FPR64:$Rn, FPR64:$Rm)>;
+
+def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
+
+// Scalar Compare Signed Greather Than Or Equal
+def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
+def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
+def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGEddd, SETGE>;
+
+// Scalar Compare Unsigned Higher Or Same
+def CMHSddd: NeonI_Scalar3Same_D_size<0b1, 0b00111, "cmhs">;
+def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchs, CMHSddd>;
+def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHSddd, SETUGE>;
+
+// Scalar Compare Unsigned Higher
+def CMHIddd: NeonI_Scalar3Same_D_size<0b1, 0b00110, "cmhi">;
+def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchi, CMHIddd>;
+def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHIddd, SETUGT>;
+
+// Scalar Compare Signed Greater Than
+def CMGTddd: NeonI_Scalar3Same_D_size<0b0, 0b00110, "cmgt">;
+def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcgt, CMGTddd>;
+def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGTddd, SETGT>;
+
+// Scalar Compare Bitwise Test Bits
+def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
+def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
+def : Neon_Scalar3Same_cmp_D_size_patterns<Neon_tst, CMTSTddd>;
+
+// Scalar Compare Bitwise Equal To Zero
+def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
+def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vceq,
+ CMEQddi>;
+def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETEQ, CMEQddi>;
+
+// Scalar Compare Signed Greather Than Or Equal To Zero
+def CMGEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01000, "cmge">;
+def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcge,
+ CMGEddi>;
+def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGE, CMGEddi>;
+
+// Scalar Compare Signed Greater Than Zero
+def CMGTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01000, "cmgt">;
+def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcgt,
+ CMGTddi>;
+def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGT, CMGTddi>;
+
+// Scalar Compare Signed Less Than Or Equal To Zero
+def CMLEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01001, "cmle">;
+def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vclez,
+ CMLEddi>;
+def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLE, CMLEddi>;
+
+// Scalar Compare Less Than Zero
+def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">;
+def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcltz,
+ CMLTddi>;
+def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLT, CMLTddi>;
+
+// Scalar Floating-point Compare
+
+// Scalar Floating-point Compare Mask Equal
+defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
+defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vceq,
+ FCMEQsss, FCMEQddd>;
+def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETEQ, FCMEQddd>;
+
+// Scalar Floating-point Compare Mask Equal To Zero
+defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
+defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vceq,
+ FCMEQZssi, FCMEQZddi>;
+def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpz32:$FPImm), SETEQ)),
+ (FCMEQZddi FPR64:$Rn, fpz32:$FPImm)>;
+
+// Scalar Floating-point Compare Mask Greater Than Or Equal
+defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
+defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcge,
+ FCMGEsss, FCMGEddd>;
+def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGE, FCMGEddd>;
+
+// Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
+defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
+defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcge,
+ FCMGEZssi, FCMGEZddi>;
+
+// Scalar Floating-point Compare Mask Greather Than
+defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
+defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcgt,
+ FCMGTsss, FCMGTddd>;
+def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGT, FCMGTddd>;
+
+// Scalar Floating-point Compare Mask Greather Than Zero
+defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
+defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcgt,
+ FCMGTZssi, FCMGTZddi>;
+
+// Scalar Floating-point Compare Mask Less Than Or Equal To Zero
+defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
+defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vclez,
+ FCMLEZssi, FCMLEZddi>;
+
+// Scalar Floating-point Compare Mask Less Than Zero
+defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
+defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcltz,
+ FCMLTZssi, FCMLTZddi>;
+
+// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
+defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
+defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcage,
+ FACGEsss, FACGEddd>;
+
+// Scalar Floating-point Absolute Compare Mask Greater Than
+defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
+defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcagt,
+ FACGTsss, FACGTddd>;
+
+// Scakar Floating-point Absolute Difference
+defm FABD: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11010, "fabd">;
+defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vabd,
+ FABDsss, FABDddd>;
+
+// Scalar Absolute Value
+defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">;
+defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vabs, ABSdd>;
+
+// Scalar Signed Saturating Absolute Value
+defm SQABS : NeonI_Scalar2SameMisc_BHSD_size<0b0, 0b00111, "sqabs">;
+defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqabs,
+ SQABSbb, SQABShh, SQABSss, SQABSdd>;
+
+// Scalar Negate
+defm NEG : NeonI_Scalar2SameMisc_D_size<0b1, 0b01011, "neg">;
+defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vneg, NEGdd>;
+
+// Scalar Signed Saturating Negate
+defm SQNEG : NeonI_Scalar2SameMisc_BHSD_size<0b1, 0b00111, "sqneg">;
+defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqneg,
+ SQNEGbb, SQNEGhh, SQNEGss, SQNEGdd>;
+
+// Scalar Signed Saturating Accumulated of Unsigned Value
+defm SUQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b0, 0b00011, "suqadd">;
+defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vuqadd,
+ SUQADDbb, SUQADDhh,
+ SUQADDss, SUQADDdd>;
+
+// Scalar Unsigned Saturating Accumulated of Signed Value
+defm USQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b1, 0b00011, "usqadd">;
+defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vsqadd,
+ USQADDbb, USQADDhh,
+ USQADDss, USQADDdd>;
+
+def : Pat<(v1i64 (int_aarch64_neon_suqadd (v1i64 FPR64:$Src),
+ (v1i64 FPR64:$Rn))),
+ (SUQADDdd FPR64:$Src, FPR64:$Rn)>;
+
+def : Pat<(v1i64 (int_aarch64_neon_usqadd (v1i64 FPR64:$Src),
+ (v1i64 FPR64:$Rn))),
+ (USQADDdd FPR64:$Src, FPR64:$Rn)>;
+
+def : Pat<(v1i64 (int_arm_neon_vabs (v1i64 FPR64:$Rn))),
+ (ABSdd FPR64:$Rn)>;
+
+def : Pat<(v1i64 (int_arm_neon_vqabs (v1i64 FPR64:$Rn))),
+ (SQABSdd FPR64:$Rn)>;
+
+def : Pat<(v1i64 (int_arm_neon_vqneg (v1i64 FPR64:$Rn))),
+ (SQNEGdd FPR64:$Rn)>;
+
+def : Pat<(v1i64 (sub (v1i64 (bitconvert (v8i8 Neon_AllZero))),
+ (v1i64 FPR64:$Rn))),
+ (NEGdd FPR64:$Rn)>;
+
+// Scalar Signed Saturating Extract Unsigned Narrow
+defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">;
+defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnsu,
+ SQXTUNbh, SQXTUNhs,
+ SQXTUNsd>;
+
+// Scalar Signed Saturating Extract Narrow
+defm SQXTN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b0, 0b10100, "sqxtn">;
+defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovns,
+ SQXTNbh, SQXTNhs,
+ SQXTNsd>;
+
+// Scalar Unsigned Saturating Extract Narrow
+defm UQXTN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10100, "uqxtn">;
+defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnu,
+ UQXTNbh, UQXTNhs,
+ UQXTNsd>;
+
+// Scalar Reduce Pairwise
+
+multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
+ string asmop, bit Commutable = 0> {
+ let isCommutable = Commutable in {
+ def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
+ (outs FPR64:$Rd), (ins VPR128:$Rn),
+ !strconcat(asmop, "\t$Rd, $Rn.2d"),
+ [],
+ NoItinerary>;
+ }
+}
+
+multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
+ string asmop, bit Commutable = 0>
+ : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
+ let isCommutable = Commutable in {
+ def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
+ (outs FPR32:$Rd), (ins VPR64:$Rn),
+ !strconcat(asmop, "\t$Rd, $Rn.2s"),
+ [],
+ NoItinerary>;
+ }
+}
+
+// Scalar Reduce Addition Pairwise (Integer) with
+// Pattern to match llvm.arm.* intrinsic
+defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
+
+// Pattern to match llvm.aarch64.* intrinsic for
+// Scalar Reduce Addition Pairwise (Integer)
+def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
+ (ADDPvv_D_2D VPR128:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))),
+ (ADDPvv_D_2D VPR128:$Rn)>;
+
+// Scalar Reduce Addition Pairwise (Floating Point)
+defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
+
+// Scalar Reduce Maximum Pairwise (Floating Point)
+defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
+
+// Scalar Reduce Minimum Pairwise (Floating Point)
+defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
+
+// Scalar Reduce maxNum Pairwise (Floating Point)
+defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
+
+// Scalar Reduce minNum Pairwise (Floating Point)
+defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
+
+multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnodeS,
+ SDPatternOperator opnodeD,
+ Instruction INSTS,
+ Instruction INSTD> {
+ def : Pat<(v1f32 (opnodeS (v2f32 VPR64:$Rn))),
+ (INSTS VPR64:$Rn)>;
+ def : Pat<(v1f64 (opnodeD (v2f64 VPR128:$Rn))),
+ (INSTD VPR128:$Rn)>;
+}
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
+ int_aarch64_neon_vpfaddq, FADDPvv_S_2S, FADDPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
+ int_aarch64_neon_vpmaxq, FMAXPvv_S_2S, FMAXPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
+ int_aarch64_neon_vpminq, FMINPvv_S_2S, FMINPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
+ int_aarch64_neon_vpfmaxnmq, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
+ int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vaddv,
+ int_aarch64_neon_vaddv, FADDPvv_S_2S, FADDPvv_D_2D>;
+
+def : Pat<(v1f32 (int_aarch64_neon_vaddv (v4f32 VPR128:$Rn))),
+ (FADDPvv_S_2S (v2f32
+ (EXTRACT_SUBREG
+ (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
+ sub_64)))>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxv,
+ int_aarch64_neon_vmaxv, FMAXPvv_S_2S, FMAXPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminv,
+ int_aarch64_neon_vminv, FMINPvv_S_2S, FMINPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxnmv,
+ int_aarch64_neon_vmaxnmv, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminnmv,
+ int_aarch64_neon_vminnmv, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
+
+// Scalar by element Arithmetic
+
+class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,
+ string rmlane, bit u, bit szhi, bit szlo,
+ RegisterClass ResFPR, RegisterClass OpFPR,
+ RegisterOperand OpVPR, Operand OpImm>
+ : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
+ (outs ResFPR:$Rd),
+ (ins OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
+ asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
+ [],
+ NoItinerary> {
+ bits<3> Imm;
+ bits<5> MRm;
+}
+
+class NeonI_ScalarXIndexedElemArith_Constraint_Impl<string asmop, bits<4> opcode,
+ string rmlane,
+ bit u, bit szhi, bit szlo,
+ RegisterClass ResFPR,
+ RegisterClass OpFPR,
+ RegisterOperand OpVPR,
+ Operand OpImm>
+ : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
+ (outs ResFPR:$Rd),
+ (ins ResFPR:$src, OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
+ asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
+ [],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+ bits<3> Imm;
+ bits<5> MRm;
+}
+
+// Scalar Floating Point multiply (scalar, by element)
+def FMULssv_4S : NeonI_ScalarXIndexedElemArith<"fmul",
+ 0b1001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def FMULddv_2D : NeonI_ScalarXIndexedElemArith<"fmul",
+ 0b1001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
+ let Inst{11} = Imm{0}; // h
+ let Inst{21} = 0b0; // l
+ let Inst{20-16} = MRm;
+}
+
+// Scalar Floating Point multiply extended (scalar, by element)
+def FMULXssv_4S : NeonI_ScalarXIndexedElemArith<"fmulx",
+ 0b1001, ".s", 0b1, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
+ 0b1001, ".d", 0b1, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
+ let Inst{11} = Imm{0}; // h
+ let Inst{21} = 0b0; // l
+ let Inst{20-16} = MRm;
+}
+
+multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
+ SDPatternOperator opnode,
+ Instruction INST,
+ ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
+ ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
+
+ def : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
+ (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
+ (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
+ (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
+ (ResTy (INST (ResTy FPRC:$Rn),
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
+ OpNImm:$Imm))>;
+
+ // swapped operands
+ def : Pat<(ResTy (opnode
+ (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
+ (ResTy FPRC:$Rn))),
+ (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (opnode
+ (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
+ (ResTy FPRC:$Rn))),
+ (ResTy (INST (ResTy FPRC:$Rn),
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
+ OpNImm:$Imm))>;
+}
+
+// Patterns for Scalar Floating Point multiply (scalar, by element)
+defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
+ f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
+ f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
+
+// Patterns for Scalar Floating Point multiply extended (scalar, by element)
+defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
+ FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
+ v2f32, v4f32, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
+ FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
+ v1f64, v2f64, neon_uimm0_bare>;
+
+
+// Scalar Floating Point fused multiply-add (scalar, by element)
+def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
+ 0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def FMLAddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
+ 0b0001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
+ let Inst{11} = Imm{0}; // h
+ let Inst{21} = 0b0; // l
+ let Inst{20-16} = MRm;
+}
+
+// Scalar Floating Point fused multiply-subtract (scalar, by element)
+def FMLSssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
+ 0b0101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
+ 0b0101, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
+ let Inst{11} = Imm{0}; // h
+ let Inst{21} = 0b0; // l
+ let Inst{20-16} = MRm;
+}
+// We are allowed to match the fma instruction regardless of compile options.
+multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
+ Instruction FMLAI, Instruction FMLSI,
+ ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
+ ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
+ // fmla
+ def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
+ (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLAI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
+ (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLAI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn),
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
+ OpNImm:$Imm))>;
+
+ // swapped fmla operands
+ def : Pat<(ResTy (fma
+ (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
+ (ResTy FPRC:$Rn),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLAI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (fma
+ (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
+ (ResTy FPRC:$Rn),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLAI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn),
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
+ OpNImm:$Imm))>;
+
+ // fmls
+ def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
+ (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLSI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
+ (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLSI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn),
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
+ OpNImm:$Imm))>;
+
+ // swapped fmls operands
+ def : Pat<(ResTy (fma
+ (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
+ (ResTy FPRC:$Rn),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLSI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (fma
+ (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
+ (ResTy FPRC:$Rn),
+ (ResTy FPRC:$Ra))),
+ (ResTy (FMLSI (ResTy FPRC:$Ra),
+ (ResTy FPRC:$Rn),
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
+ OpNImm:$Imm))>;
+}
+
+// Scalar Floating Point fused multiply-add and
+// multiply-subtract (scalar, by element)
+defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
+ f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
+ f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
+defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
+ f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
+
+// Scalar Signed saturating doubling multiply long (scalar, by element)
+def SQDMULLshv_4H : NeonI_ScalarXIndexedElemArith<"sqdmull",
+ 0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMULLshv_8H : NeonI_ScalarXIndexedElemArith<"sqdmull",
+ 0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
+ let Inst{11} = Imm{2}; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMULLdsv_2S : NeonI_ScalarXIndexedElemArith<"sqdmull",
+ 0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def SQDMULLdsv_4S : NeonI_ScalarXIndexedElemArith<"sqdmull",
+ 0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+
+multiclass Neon_ScalarXIndexedElem_MUL_Patterns<
+ SDPatternOperator opnode,
+ Instruction INST,
+ ValueType ResTy, RegisterClass FPRC,
+ ValueType OpVTy, ValueType OpTy,
+ ValueType VecOpTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
+
+ def : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
+ (OpVTy (scalar_to_vector
+ (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))))),
+ (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
+
+ //swapped operands
+ def : Pat<(ResTy (opnode
+ (OpVTy (scalar_to_vector
+ (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
+ (OpVTy FPRC:$Rn))),
+ (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
+}
+
+
+// Patterns for Scalar Signed saturating doubling
+// multiply long (scalar, by element)
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
+ SQDMULLshv_4H, v1i32, FPR16, v1i16, i16, v4i16,
+ i32, VPR64Lo, neon_uimm2_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
+ SQDMULLshv_8H, v1i32, FPR16, v1i16, i16, v8i16,
+ i32, VPR128Lo, neon_uimm3_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
+ SQDMULLdsv_2S, v1i64, FPR32, v1i32, i32, v2i32,
+ i32, VPR64Lo, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
+ SQDMULLdsv_4S, v1i64, FPR32, v1i32, i32, v4i32,
+ i32, VPR128Lo, neon_uimm2_bare>;
+
+// Scalar Signed saturating doubling multiply-add long (scalar, by element)
+def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
+ 0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMLALshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
+ 0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
+ let Inst{11} = Imm{2}; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMLALdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
+ 0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def SQDMLALdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
+ 0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+
+// Scalar Signed saturating doubling
+// multiply-subtract long (scalar, by element)
+def SQDMLSLshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
+ 0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMLSLshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
+ 0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
+ let Inst{11} = Imm{2}; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMLSLdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
+ 0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def SQDMLSLdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
+ 0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+
+multiclass Neon_ScalarXIndexedElem_MLAL_Patterns<
+ SDPatternOperator opnode,
+ SDPatternOperator coreopnode,
+ Instruction INST,
+ ValueType ResTy, RegisterClass ResFPRC, RegisterClass FPRC,
+ ValueType OpTy,
+ ValueType OpVTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
+
+ def : Pat<(ResTy (opnode
+ (ResTy ResFPRC:$Ra),
+ (ResTy (coreopnode (OpTy FPRC:$Rn),
+ (OpTy (scalar_to_vector
+ (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))))))),
+ (ResTy (INST (ResTy ResFPRC:$Ra),
+ (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
+
+ // swapped operands
+ def : Pat<(ResTy (opnode
+ (ResTy ResFPRC:$Ra),
+ (ResTy (coreopnode
+ (OpTy (scalar_to_vector
+ (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))),
+ (OpTy FPRC:$Rn))))),
+ (ResTy (INST (ResTy ResFPRC:$Ra),
+ (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
+}
+
+// Patterns for Scalar Signed saturating
+// doubling multiply-add long (scalar, by element)
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
+ int_arm_neon_vqdmull, SQDMLALshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
+ i32, VPR64Lo, neon_uimm2_bare>;
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
+ int_arm_neon_vqdmull, SQDMLALshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
+ i32, VPR128Lo, neon_uimm3_bare>;
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
+ int_arm_neon_vqdmull, SQDMLALdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
+ i32, VPR64Lo, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
+ int_arm_neon_vqdmull, SQDMLALdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
+ i32, VPR128Lo, neon_uimm2_bare>;
+
+// Patterns for Scalar Signed saturating
+// doubling multiply-sub long (scalar, by element)
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
+ int_arm_neon_vqdmull, SQDMLSLshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
+ i32, VPR64Lo, neon_uimm2_bare>;
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
+ int_arm_neon_vqdmull, SQDMLSLshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
+ i32, VPR128Lo, neon_uimm3_bare>;
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
+ int_arm_neon_vqdmull, SQDMLSLdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
+ i32, VPR64Lo, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
+ int_arm_neon_vqdmull, SQDMLSLdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
+ i32, VPR128Lo, neon_uimm2_bare>;
+
+// Scalar general arithmetic operation
+class Neon_Scalar_GeneralMath2D_pattern<SDPatternOperator opnode,
+ Instruction INST>
+ : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
+
+class Neon_Scalar_GeneralMath3D_pattern<SDPatternOperator opnode,
+ Instruction INST>
+ : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (INST FPR64:$Rn, FPR64:$Rm)>;
+
+class Neon_Scalar_GeneralMath4D_pattern<SDPatternOperator opnode,
+ Instruction INST>
+ : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm),
+ (v1f64 FPR64:$Ra))),
+ (INST FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
+
+def : Neon_Scalar_GeneralMath3D_pattern<fadd, FADDddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<fmul, FMULddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<fsub, FSUBddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<fdiv, FDIVddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vabds, FABDddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmaxs, FMAXddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmins, FMINddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vmaxnm, FMAXNMddd>;
+def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vminnm, FMINNMddd>;
+
+def : Neon_Scalar_GeneralMath2D_pattern<fabs, FABSdd>;
+def : Neon_Scalar_GeneralMath2D_pattern<fneg, FNEGdd>;
+
+def : Neon_Scalar_GeneralMath4D_pattern<fma, FMADDdddd>;
+def : Neon_Scalar_GeneralMath4D_pattern<fmsub, FMSUBdddd>;
+
+// Scalar Signed saturating doubling multiply returning
+// high half (scalar, by element)
+def SQDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
+ 0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
+ 0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
+ let Inst{11} = Imm{2}; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
+ 0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def SQDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
+ 0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+
+// Patterns for Scalar Signed saturating doubling multiply returning
+// high half (scalar, by element)
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
+ SQDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16,
+ i32, VPR64Lo, neon_uimm2_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
+ SQDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16,
+ i32, VPR128Lo, neon_uimm3_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
+ SQDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32,
+ i32, VPR64Lo, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
+ SQDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32,
+ i32, VPR128Lo, neon_uimm2_bare>;
+
+// Scalar Signed saturating rounding doubling multiply
+// returning high half (scalar, by element)
+def SQRDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
+ 0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQRDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
+ 0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
+ let Inst{11} = Imm{2}; // h
+ let Inst{21} = Imm{1}; // l
+ let Inst{20} = Imm{0}; // m
+ let Inst{19-16} = MRm{3-0};
+}
+def SQRDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
+ 0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
+ let Inst{11} = 0b0; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+def SQRDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
+ 0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{11} = Imm{1}; // h
+ let Inst{21} = Imm{0}; // l
+ let Inst{20-16} = MRm;
+}
+
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
+ SQRDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16, i32,
+ VPR64Lo, neon_uimm2_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
+ SQRDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16, i32,
+ VPR128Lo, neon_uimm3_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
+ SQRDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32, i32,
+ VPR64Lo, neon_uimm1_bare>;
+defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
+ SQRDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32, i32,
+ VPR128Lo, neon_uimm2_bare>;
+
+// Scalar Copy - DUP element to scalar
+class NeonI_Scalar_DUP<string asmop, string asmlane,
+ RegisterClass ResRC, RegisterOperand VPRC,
+ Operand OpImm>
+ : NeonI_ScalarCopy<(outs ResRC:$Rd), (ins VPRC:$Rn, OpImm:$Imm),
+ asmop # "\t$Rd, $Rn." # asmlane # "[$Imm]",
+ [],
+ NoItinerary> {
+ bits<4> Imm;
+}
+
+def DUPbv_B : NeonI_Scalar_DUP<"dup", "b", FPR8, VPR128, neon_uimm4_bare> {
+ let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def DUPhv_H : NeonI_Scalar_DUP<"dup", "h", FPR16, VPR128, neon_uimm3_bare> {
+ let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def DUPsv_S : NeonI_Scalar_DUP<"dup", "s", FPR32, VPR128, neon_uimm2_bare> {
+ let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
+ let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
+}
+
+multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy,
+ ValueType OpTy, Operand OpImm,
+ ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
+ def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
+ (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
+ (ResTy (DUPI
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ OpNImm:$Imm))>;
+}
+
+// Patterns for vector extract of FP data using scalar DUP instructions
+defm : NeonI_Scalar_DUP_Elt_pattern<DUPsv_S, f32,
+ v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
+defm : NeonI_Scalar_DUP_Elt_pattern<DUPdv_D, f64,
+ v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
+
+multiclass NeonI_Scalar_DUP_Ext_Vec_pattern<Instruction DUPI,
+ ValueType ResTy, ValueType OpTy,Operand OpLImm,
+ ValueType NOpTy, ValueType ExTy, Operand OpNImm> {
+
+ def : Pat<(ResTy (extract_subvector (OpTy VPR128:$Rn), OpLImm:$Imm)),
+ (ResTy (DUPI VPR128:$Rn, OpLImm:$Imm))>;
+
+ def : Pat<(ResTy (extract_subvector (NOpTy VPR64:$Rn), OpNImm:$Imm)),
+ (ResTy (DUPI
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ OpNImm:$Imm))>;
+}
+
+// Patterns for extract subvectors of v1ix data using scalar DUP instructions.
+defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPbv_B, v1i8, v16i8, neon_uimm4_bare,
+ v8i8, v16i8, neon_uimm3_bare>;
+defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPhv_H, v1i16, v8i16, neon_uimm3_bare,
+ v4i16, v8i16, neon_uimm2_bare>;
+defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPsv_S, v1i32, v4i32, neon_uimm2_bare,
+ v2i32, v4i32, neon_uimm1_bare>;
+
+multiclass NeonI_Scalar_DUP_Copy_pattern1<Instruction DUPI, ValueType ResTy,
+ ValueType OpTy, ValueType ElemTy,
+ Operand OpImm, ValueType OpNTy,
+ ValueType ExTy, Operand OpNImm> {
+
+ def : Pat<(ResTy (vector_insert (ResTy undef),
+ (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
+ (neon_uimm0_bare:$Imm))),
+ (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (vector_insert (ResTy undef),
+ (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
+ (OpNImm:$Imm))),
+ (ResTy (DUPI
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ OpNImm:$Imm))>;
+}
+
+multiclass NeonI_Scalar_DUP_Copy_pattern2<Instruction DUPI, ValueType ResTy,
+ ValueType OpTy, ValueType ElemTy,
+ Operand OpImm, ValueType OpNTy,
+ ValueType ExTy, Operand OpNImm> {
+
+ def : Pat<(ResTy (scalar_to_vector
+ (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)))),
+ (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
+
+ def : Pat<(ResTy (scalar_to_vector
+ (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)))),
+ (ResTy (DUPI
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ OpNImm:$Imm))>;
+}
+
+// Patterns for vector copy to v1ix and v1fx vectors using scalar DUP
+// instructions.
+defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
+ v1i64, v2i64, i64, neon_uimm1_bare,
+ v1i64, v2i64, neon_uimm0_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
+ v1i32, v4i32, i32, neon_uimm2_bare,
+ v2i32, v4i32, neon_uimm1_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern1<DUPhv_H,
+ v1i16, v8i16, i32, neon_uimm3_bare,
+ v4i16, v8i16, neon_uimm2_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern1<DUPbv_B,
+ v1i8, v16i8, i32, neon_uimm4_bare,
+ v8i8, v16i8, neon_uimm3_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
+ v1f64, v2f64, f64, neon_uimm1_bare,
+ v1f64, v2f64, neon_uimm0_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
+ v1f32, v4f32, f32, neon_uimm2_bare,
+ v2f32, v4f32, neon_uimm1_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
+ v1i64, v2i64, i64, neon_uimm1_bare,
+ v1i64, v2i64, neon_uimm0_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
+ v1i32, v4i32, i32, neon_uimm2_bare,
+ v2i32, v4i32, neon_uimm1_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern2<DUPhv_H,
+ v1i16, v8i16, i32, neon_uimm3_bare,
+ v4i16, v8i16, neon_uimm2_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern2<DUPbv_B,
+ v1i8, v16i8, i32, neon_uimm4_bare,
+ v8i8, v16i8, neon_uimm3_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
+ v1f64, v2f64, f64, neon_uimm1_bare,
+ v1f64, v2f64, neon_uimm0_bare>;
+defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
+ v1f32, v4f32, f32, neon_uimm2_bare,
+ v2f32, v4f32, neon_uimm1_bare>;
+
+multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
+ Instruction DUPI, Operand OpImm,
+ RegisterClass ResRC> {
+ def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn" # asmlane # "[$Imm]"),
+ (DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
+}
+
+// Aliases for Scalar copy - DUP element (scalar)
+// FIXME: This is actually the preferred syntax but TableGen can't deal with
+// custom printing of aliases.
+defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
+defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
+defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
+defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
+
+multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh, ValueType ResTy,
+ ValueType OpTy> {
+ def : Pat<(ResTy (GetLow VPR128:$Rn)),
+ (ResTy (EXTRACT_SUBREG (OpTy VPR128:$Rn), sub_64))>;
+ def : Pat<(ResTy (GetHigh VPR128:$Rn)),
+ (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>;
+}
+
+defm : NeonI_SDUP<Neon_Low16B, Neon_High16B, v8i8, v16i8>;
+defm : NeonI_SDUP<Neon_Low8H, Neon_High8H, v4i16, v8i16>;
+defm : NeonI_SDUP<Neon_Low4S, Neon_High4S, v2i32, v4i32>;
+defm : NeonI_SDUP<Neon_Low2D, Neon_High2D, v1i64, v2i64>;
+defm : NeonI_SDUP<Neon_Low4float, Neon_High4float, v2f32, v4f32>;
+defm : NeonI_SDUP<Neon_Low2double, Neon_High2double, v1f64, v2f64>;
+
+//===----------------------------------------------------------------------===//
+// Non-Instruction Patterns
+//===----------------------------------------------------------------------===//
+
+// 64-bit vector bitcasts...
+
+def : Pat<(v1i64 (bitconvert (v8i8 VPR64:$src))), (v1i64 VPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (v8i8 VPR64:$src))), (v2f32 VPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v8i8 VPR64:$src))), (v2i32 VPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v8i8 VPR64:$src))), (v4i16 VPR64:$src)>;
+
+def : Pat<(v1i64 (bitconvert (v4i16 VPR64:$src))), (v1i64 VPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v4i16 VPR64:$src))), (v2i32 VPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (v4i16 VPR64:$src))), (v2f32 VPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v4i16 VPR64:$src))), (v8i8 VPR64:$src)>;
+
+def : Pat<(v1i64 (bitconvert (v2i32 VPR64:$src))), (v1i64 VPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (v2i32 VPR64:$src))), (v2f32 VPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v2i32 VPR64:$src))), (v4i16 VPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v2i32 VPR64:$src))), (v8i8 VPR64:$src)>;
+
+def : Pat<(v1i64 (bitconvert (v2f32 VPR64:$src))), (v1i64 VPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v2f32 VPR64:$src))), (v2i32 VPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v2f32 VPR64:$src))), (v4i16 VPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v2f32 VPR64:$src))), (v8i8 VPR64:$src)>;
+
+def : Pat<(v2f32 (bitconvert (v1i64 VPR64:$src))), (v2f32 VPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
+
+// ..and 128-bit vector bitcasts...
+
+def : Pat<(v2f64 (bitconvert (v16i8 VPR128:$src))), (v2f64 VPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v16i8 VPR128:$src))), (v2i64 VPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v16i8 VPR128:$src))), (v4f32 VPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v16i8 VPR128:$src))), (v4i32 VPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v16i8 VPR128:$src))), (v8i16 VPR128:$src)>;
+
+def : Pat<(v2f64 (bitconvert (v8i16 VPR128:$src))), (v2f64 VPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v8i16 VPR128:$src))), (v2i64 VPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v8i16 VPR128:$src))), (v4i32 VPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v8i16 VPR128:$src))), (v4f32 VPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v8i16 VPR128:$src))), (v16i8 VPR128:$src)>;
+
+def : Pat<(v2f64 (bitconvert (v4i32 VPR128:$src))), (v2f64 VPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v4i32 VPR128:$src))), (v2i64 VPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v4i32 VPR128:$src))), (v4f32 VPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4i32 VPR128:$src))), (v8i16 VPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4i32 VPR128:$src))), (v16i8 VPR128:$src)>;
+
+def : Pat<(v2f64 (bitconvert (v4f32 VPR128:$src))), (v2f64 VPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v4f32 VPR128:$src))), (v2i64 VPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v4f32 VPR128:$src))), (v4i32 VPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4f32 VPR128:$src))), (v8i16 VPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4f32 VPR128:$src))), (v16i8 VPR128:$src)>;
+
+def : Pat<(v2f64 (bitconvert (v2i64 VPR128:$src))), (v2f64 VPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2i64 VPR128:$src))), (v4f32 VPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2i64 VPR128:$src))), (v4i32 VPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2i64 VPR128:$src))), (v8i16 VPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2i64 VPR128:$src))), (v16i8 VPR128:$src)>;
+
+def : Pat<(v2i64 (bitconvert (v2f64 VPR128:$src))), (v2i64 VPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2f64 VPR128:$src))), (v4f32 VPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2f64 VPR128:$src))), (v4i32 VPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2f64 VPR128:$src))), (v8i16 VPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2f64 VPR128:$src))), (v16i8 VPR128:$src)>;
+
+// ...and scalar bitcasts...
+def : Pat<(f16 (bitconvert (v1i16 FPR16:$src))), (f16 FPR16:$src)>;
+def : Pat<(f32 (bitconvert (v1i32 FPR32:$src))), (f32 FPR32:$src)>;
+def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
+def : Pat<(f32 (bitconvert (v1f32 FPR32:$src))), (f32 FPR32:$src)>;
+def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
+
+def : Pat<(i64 (bitconvert (v1i64 FPR64:$src))), (FMOVxd $src)>;
+def : Pat<(i64 (bitconvert (v1f64 FPR64:$src))), (FMOVxd $src)>;
+def : Pat<(i64 (bitconvert (v2i32 FPR64:$src))), (FMOVxd $src)>;
+def : Pat<(i64 (bitconvert (v2f32 FPR64:$src))), (FMOVxd $src)>;
+def : Pat<(i64 (bitconvert (v4i16 FPR64:$src))), (FMOVxd $src)>;
+def : Pat<(i64 (bitconvert (v8i8 FPR64:$src))), (FMOVxd $src)>;
+
+def : Pat<(i32 (bitconvert (v1i32 FPR32:$src))), (FMOVws $src)>;
+
+def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
+
+def : Pat<(f64 (bitconvert (v8i8 VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64 (bitconvert (v4i16 VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64 (bitconvert (v2i32 VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64 (bitconvert (v2f32 VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64 (bitconvert (v1i64 VPR64:$src))), (f64 VPR64:$src)>;
+
+def : Pat<(f128 (bitconvert (v16i8 VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128 (bitconvert (v8i16 VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128 (bitconvert (v4i32 VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128 (bitconvert (v2i64 VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128 (bitconvert (v4f32 VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128 (bitconvert (v2f64 VPR128:$src))), (f128 VPR128:$src)>;
+
+def : Pat<(v1i16 (bitconvert (f16 FPR16:$src))), (v1i16 FPR16:$src)>;
+def : Pat<(v1i32 (bitconvert (f32 FPR32:$src))), (v1i32 FPR32:$src)>;
+def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
+def : Pat<(v1f32 (bitconvert (f32 FPR32:$src))), (v1f32 FPR32:$src)>;
+def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
+
+def : Pat<(v1i64 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
+def : Pat<(v1f64 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
+def : Pat<(v2i32 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
+def : Pat<(v2f32 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
+def : Pat<(v4i16 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
+def : Pat<(v8i8 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
+
+def : Pat<(v1i32 (bitconvert (i32 GPR32:$src))), (FMOVsw $src)>;
+
+def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
+def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
+
+def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
+def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
+
+// Scalar Three Same
+
+def neon_uimm3 : Operand<i64>,
+ ImmLeaf<i64, [{return Imm < 8;}]> {
+ let ParserMatchClass = uimm3_asmoperand;
+ let PrintMethod = "printUImmHexOperand";
+}
+
+def neon_uimm4 : Operand<i64>,
+ ImmLeaf<i64, [{return Imm < 16;}]> {
+ let ParserMatchClass = uimm4_asmoperand;
+ let PrintMethod = "printUImmHexOperand";
+}
+
+// Bitwise Extract
+class NeonI_Extract<bit q, bits<2> op2, string asmop,
+ string OpS, RegisterOperand OpVPR, Operand OpImm>
+ : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
+ (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
+ asmop # "\t$Rd." # OpS # ", $Rn." # OpS #
+ ", $Rm." # OpS # ", $Index",
+ [],
+ NoItinerary>{
+ bits<4> Index;
+}
+
+def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
+ VPR64, neon_uimm3> {
+ let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
+}
+
+def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
+ VPR128, neon_uimm4> {
+ let Inst{14-11} = Index;
+}
+
+class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
+ Operand OpImm>
+ : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
+ (i64 OpImm:$Imm))),
+ (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
+
+def : NI_Extract<v8i8, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v4i16, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v2i32, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v1i64, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v2f32, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v1f64, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
+
+// Table lookup
+class NI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
+ string asmop, string OpS, RegisterOperand OpVPR,
+ RegisterOperand VecList>
+ : NeonI_TBL<q, op2, len, op,
+ (outs OpVPR:$Rd), (ins VecList:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
+ [],
+ NoItinerary>;
+
+// The vectors in look up table are always 16b
+multiclass NI_TBL_pat<bits<2> len, bit op, string asmop, string List> {
+ def _8b : NI_TBL<0, 0b00, len, op, asmop, "8b", VPR64,
+ !cast<RegisterOperand>(List # "16B_operand")>;
+
+ def _16b : NI_TBL<1, 0b00, len, op, asmop, "16b", VPR128,
+ !cast<RegisterOperand>(List # "16B_operand")>;
+}
+
+defm TBL1 : NI_TBL_pat<0b00, 0b0, "tbl", "VOne">;
+defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
+defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
+defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
+
+// Table lookup extention
+class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
+ string asmop, string OpS, RegisterOperand OpVPR,
+ RegisterOperand VecList>
+ : NeonI_TBL<q, op2, len, op,
+ (outs OpVPR:$Rd), (ins OpVPR:$src, VecList:$Rn, OpVPR:$Rm),
+ asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
+ [],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+}
+
+// The vectors in look up table are always 16b
+multiclass NI_TBX_pat<bits<2> len, bit op, string asmop, string List> {
+ def _8b : NI_TBX<0, 0b00, len, op, asmop, "8b", VPR64,
+ !cast<RegisterOperand>(List # "16B_operand")>;
+
+ def _16b : NI_TBX<1, 0b00, len, op, asmop, "16b", VPR128,
+ !cast<RegisterOperand>(List # "16B_operand")>;
+}
+
+defm TBX1 : NI_TBX_pat<0b00, 0b1, "tbx", "VOne">;
+defm TBX2 : NI_TBX_pat<0b01, 0b1, "tbx", "VPair">;
+defm TBX3 : NI_TBX_pat<0b10, 0b1, "tbx", "VTriple">;
+defm TBX4 : NI_TBX_pat<0b11, 0b1, "tbx", "VQuad">;
+
+class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
+ RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
+ : NeonI_copy<0b1, 0b0, 0b0011,
+ (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
+ asmop # "\t$Rd." # Res # "[$Imm], $Rn",
+ [(set (ResTy VPR128:$Rd),
+ (ResTy (vector_insert
+ (ResTy VPR128:$src),
+ (OpTy OpGPR:$Rn),
+ (OpImm:$Imm))))],
+ NoItinerary> {
+ bits<4> Imm;
+ let Constraints = "$src = $Rd";
+}
+
+//Insert element (vector, from main)
+def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
+ neon_uimm4_bare> {
+ let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
+ neon_uimm3_bare> {
+ let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
+ neon_uimm2_bare> {
+ let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
+ neon_uimm1_bare> {
+ let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
+}
+
+def : NeonInstAlias<"mov $Rd.b[$Imm], $Rn",
+ (INSbw VPR128:$Rd, GPR32:$Rn, neon_uimm4_bare:$Imm), 0>;
+def : NeonInstAlias<"mov $Rd.h[$Imm], $Rn",
+ (INShw VPR128:$Rd, GPR32:$Rn, neon_uimm3_bare:$Imm), 0>;
+def : NeonInstAlias<"mov $Rd.s[$Imm], $Rn",
+ (INSsw VPR128:$Rd, GPR32:$Rn, neon_uimm2_bare:$Imm), 0>;
+def : NeonInstAlias<"mov $Rd.d[$Imm], $Rn",
+ (INSdx VPR128:$Rd, GPR64:$Rn, neon_uimm1_bare:$Imm), 0>;
+
+class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
+ RegisterClass OpGPR, ValueType OpTy,
+ Operand OpImm, Instruction INS>
+ : Pat<(ResTy (vector_insert
+ (ResTy VPR64:$src),
+ (OpTy OpGPR:$Rn),
+ (OpImm:$Imm))),
+ (ResTy (EXTRACT_SUBREG
+ (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
+ OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
+
+def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
+ neon_uimm3_bare, INSbw>;
+def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
+ neon_uimm2_bare, INShw>;
+def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
+ neon_uimm1_bare, INSsw>;
+def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
+ neon_uimm0_bare, INSdx>;
+
+class NeonI_INS_element<string asmop, string Res, Operand ResImm>
+ : NeonI_insert<0b1, 0b1,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn,
+ ResImm:$Immd, ResImm:$Immn),
+ asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
+ [],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+ bits<4> Immd;
+ bits<4> Immn;
+}
+
+//Insert element (vector, from element)
+def INSELb : NeonI_INS_element<"ins", "b", neon_uimm4_bare> {
+ let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
+ let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
+}
+def INSELh : NeonI_INS_element<"ins", "h", neon_uimm3_bare> {
+ let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
+ let Inst{14-11} = {Immn{2}, Immn{1}, Immn{0}, 0b0};
+ // bit 11 is unspecified, but should be set to zero.
+}
+def INSELs : NeonI_INS_element<"ins", "s", neon_uimm2_bare> {
+ let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
+ let Inst{14-11} = {Immn{1}, Immn{0}, 0b0, 0b0};
+ // bits 11-12 are unspecified, but should be set to zero.
+}
+def INSELd : NeonI_INS_element<"ins", "d", neon_uimm1_bare> {
+ let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
+ let Inst{14-11} = {Immn{0}, 0b0, 0b0, 0b0};
+ // bits 11-13 are unspecified, but should be set to zero.
+}
+
+def : NeonInstAlias<"mov $Rd.b[$Immd], $Rn.b[$Immn]",
+ (INSELb VPR128:$Rd, VPR128:$Rn,
+ neon_uimm4_bare:$Immd, neon_uimm4_bare:$Immn), 0>;
+def : NeonInstAlias<"mov $Rd.h[$Immd], $Rn.h[$Immn]",
+ (INSELh VPR128:$Rd, VPR128:$Rn,
+ neon_uimm3_bare:$Immd, neon_uimm3_bare:$Immn), 0>;
+def : NeonInstAlias<"mov $Rd.s[$Immd], $Rn.s[$Immn]",
+ (INSELs VPR128:$Rd, VPR128:$Rn,
+ neon_uimm2_bare:$Immd, neon_uimm2_bare:$Immn), 0>;
+def : NeonInstAlias<"mov $Rd.d[$Immd], $Rn.d[$Immn]",
+ (INSELd VPR128:$Rd, VPR128:$Rn,
+ neon_uimm1_bare:$Immd, neon_uimm1_bare:$Immn), 0>;
+
+multiclass Neon_INS_elt_pattern<ValueType ResTy, ValueType NaTy,
+ ValueType MidTy, Operand StImm, Operand NaImm,
+ Instruction INS> {
+def : Pat<(ResTy (vector_insert
+ (ResTy VPR128:$src),
+ (MidTy (vector_extract
+ (ResTy VPR128:$Rn),
+ (StImm:$Immn))),
+ (StImm:$Immd))),
+ (INS (ResTy VPR128:$src), (ResTy VPR128:$Rn),
+ StImm:$Immd, StImm:$Immn)>;
+
+def : Pat <(ResTy (vector_insert
+ (ResTy VPR128:$src),
+ (MidTy (vector_extract
+ (NaTy VPR64:$Rn),
+ (NaImm:$Immn))),
+ (StImm:$Immd))),
+ (INS (ResTy VPR128:$src),
+ (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
+ StImm:$Immd, NaImm:$Immn)>;
+
+def : Pat <(NaTy (vector_insert
+ (NaTy VPR64:$src),
+ (MidTy (vector_extract
+ (ResTy VPR128:$Rn),
+ (StImm:$Immn))),
+ (NaImm:$Immd))),
+ (NaTy (EXTRACT_SUBREG
+ (ResTy (INS
+ (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
+ (ResTy VPR128:$Rn),
+ NaImm:$Immd, StImm:$Immn)),
+ sub_64))>;
+
+def : Pat <(NaTy (vector_insert
+ (NaTy VPR64:$src),
+ (MidTy (vector_extract
+ (NaTy VPR64:$Rn),
+ (NaImm:$Immn))),
+ (NaImm:$Immd))),
+ (NaTy (EXTRACT_SUBREG
+ (ResTy (INS
+ (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
+ (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
+ NaImm:$Immd, NaImm:$Immn)),
+ sub_64))>;
+}
+
+defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, neon_uimm2_bare,
+ neon_uimm1_bare, INSELs>;
+defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, neon_uimm1_bare,
+ neon_uimm0_bare, INSELd>;
+defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
+ neon_uimm3_bare, INSELb>;
+defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
+ neon_uimm2_bare, INSELh>;
+defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
+ neon_uimm1_bare, INSELs>;
+defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, neon_uimm1_bare,
+ neon_uimm0_bare, INSELd>;
+
+multiclass Neon_INS_elt_float_pattern<ValueType ResTy, ValueType NaTy,
+ ValueType MidTy,
+ RegisterClass OpFPR, Operand ResImm,
+ SubRegIndex SubIndex, Instruction INS> {
+def : Pat <(ResTy (vector_insert
+ (ResTy VPR128:$src),
+ (MidTy OpFPR:$Rn),
+ (ResImm:$Imm))),
+ (INS (ResTy VPR128:$src),
+ (ResTy (SUBREG_TO_REG (i64 0), OpFPR:$Rn, SubIndex)),
+ ResImm:$Imm,
+ (i64 0))>;
+
+def : Pat <(NaTy (vector_insert
+ (NaTy VPR64:$src),
+ (MidTy OpFPR:$Rn),
+ (ResImm:$Imm))),
+ (NaTy (EXTRACT_SUBREG
+ (ResTy (INS
+ (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
+ (ResTy (SUBREG_TO_REG (i64 0), (MidTy OpFPR:$Rn), SubIndex)),
+ ResImm:$Imm,
+ (i64 0))),
+ sub_64))>;
+}
+
+defm : Neon_INS_elt_float_pattern<v4f32, v2f32, f32, FPR32, neon_uimm2_bare,
+ sub_32, INSELs>;
+defm : Neon_INS_elt_float_pattern<v2f64, v1f64, f64, FPR64, neon_uimm1_bare,
+ sub_64, INSELd>;
+
+class NeonI_SMOV<string asmop, string Res, bit Q,
+ ValueType OpTy, ValueType eleTy,
+ Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
+ : NeonI_copy<Q, 0b0, 0b0101,
+ (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
+ asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
+ [(set (ResTy ResGPR:$Rd),
+ (ResTy (sext_inreg
+ (ResTy (vector_extract
+ (OpTy VPR128:$Rn), (OpImm:$Imm))),
+ eleTy)))],
+ NoItinerary> {
+ bits<4> Imm;
+}
+
+//Signed integer move (main, from element)
+def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
+ GPR32, i32> {
+ let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
+ GPR32, i32> {
+ let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
+ GPR64, i64> {
+ let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
+ GPR64, i64> {
+ let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
+ GPR64, i64> {
+ let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+
+multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
+ ValueType eleTy, Operand StImm, Operand NaImm,
+ Instruction SMOVI> {
+ def : Pat<(i64 (sext_inreg
+ (i64 (anyext
+ (i32 (vector_extract
+ (StTy VPR128:$Rn), (StImm:$Imm))))),
+ eleTy)),
+ (SMOVI VPR128:$Rn, StImm:$Imm)>;
+
+ def : Pat<(i64 (sext
+ (i32 (vector_extract
+ (StTy VPR128:$Rn), (StImm:$Imm))))),
+ (SMOVI VPR128:$Rn, StImm:$Imm)>;
+
+ def : Pat<(i64 (sext_inreg
+ (i64 (vector_extract
+ (NaTy VPR64:$Rn), (NaImm:$Imm))),
+ eleTy)),
+ (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ NaImm:$Imm)>;
+
+ def : Pat<(i64 (sext_inreg
+ (i64 (anyext
+ (i32 (vector_extract
+ (NaTy VPR64:$Rn), (NaImm:$Imm))))),
+ eleTy)),
+ (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ NaImm:$Imm)>;
+
+ def : Pat<(i64 (sext
+ (i32 (vector_extract
+ (NaTy VPR64:$Rn), (NaImm:$Imm))))),
+ (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ NaImm:$Imm)>;
+}
+
+defm : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
+ neon_uimm3_bare, SMOVxb>;
+defm : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
+ neon_uimm2_bare, SMOVxh>;
+defm : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
+ neon_uimm1_bare, SMOVxs>;
+
+class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
+ ValueType eleTy, Operand StImm, Operand NaImm,
+ Instruction SMOVI>
+ : Pat<(i32 (sext_inreg
+ (i32 (vector_extract
+ (NaTy VPR64:$Rn), (NaImm:$Imm))),
+ eleTy)),
+ (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ NaImm:$Imm)>;
+
+def : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
+ neon_uimm3_bare, SMOVwb>;
+def : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
+ neon_uimm2_bare, SMOVwh>;
+
+class NeonI_UMOV<string asmop, string Res, bit Q,
+ ValueType OpTy, Operand OpImm,
+ RegisterClass ResGPR, ValueType ResTy>
+ : NeonI_copy<Q, 0b0, 0b0111,
+ (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
+ asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
+ [(set (ResTy ResGPR:$Rd),
+ (ResTy (vector_extract
+ (OpTy VPR128:$Rn), (OpImm:$Imm))))],
+ NoItinerary> {
+ bits<4> Imm;
+}
+
+//Unsigned integer move (main, from element)
+def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
+ GPR32, i32> {
+ let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
+ GPR32, i32> {
+ let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
+ GPR32, i32> {
+ let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
+ GPR64, i64> {
+ let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
+}
+
+def : NeonInstAlias<"mov $Rd, $Rn.s[$Imm]",
+ (UMOVws GPR32:$Rd, VPR128:$Rn, neon_uimm2_bare:$Imm), 0>;
+def : NeonInstAlias<"mov $Rd, $Rn.d[$Imm]",
+ (UMOVxd GPR64:$Rd, VPR128:$Rn, neon_uimm1_bare:$Imm), 0>;
+
+class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
+ Operand StImm, Operand NaImm,
+ Instruction SMOVI>
+ : Pat<(ResTy (vector_extract
+ (NaTy VPR64:$Rn), NaImm:$Imm)),
+ (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ NaImm:$Imm)>;
+
+def : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
+ neon_uimm3_bare, UMOVwb>;
+def : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
+ neon_uimm2_bare, UMOVwh>;
+def : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
+ neon_uimm1_bare, UMOVws>;
+
+def : Pat<(i32 (and
+ (i32 (vector_extract
+ (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
+ 255)),
+ (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
+
+def : Pat<(i32 (and
+ (i32 (vector_extract
+ (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
+ 65535)),
+ (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
+
+def : Pat<(i64 (zext
+ (i32 (vector_extract
+ (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
+ (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
+
+def : Pat<(i32 (and
+ (i32 (vector_extract
+ (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
+ 255)),
+ (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
+ neon_uimm3_bare:$Imm)>;
+
+def : Pat<(i32 (and
+ (i32 (vector_extract
+ (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
+ 65535)),
+ (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
+ neon_uimm2_bare:$Imm)>;
+
+def : Pat<(i64 (zext
+ (i32 (vector_extract
+ (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
+ (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
+ neon_uimm0_bare:$Imm)>;
+
+// Additional copy patterns for scalar types
+def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
+ (UMOVwb (v16i8
+ (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
+
+def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
+ (UMOVwh (v8i16
+ (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
+
+def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
+ (FMOVws FPR32:$Rn)>;
+
+def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
+ (FMOVxd FPR64:$Rn)>;
+
+def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
+ (f64 FPR64:$Rn)>;
+
+def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
+ (f32 FPR32:$Rn)>;
+
+def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
+ (v1i8 (EXTRACT_SUBREG (v16i8
+ (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
+ sub_8))>;
+
+def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
+ (v1i16 (EXTRACT_SUBREG (v8i16
+ (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
+ sub_16))>;
+
+def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
+ (FMOVsw $src)>;
+
+def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
+ (FMOVdx $src)>;
+
+def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$Rn))),
+ (v1f32 FPR32:$Rn)>;
+def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
+ (v1f64 FPR64:$Rn)>;
+
+def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
+ (FMOVdd $src)>;
+
+def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
+ (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
+ (f64 FPR64:$src), sub_64)>;
+
+class NeonI_DUP_Elt<bit Q, string asmop, string rdlane, string rnlane,
+ RegisterOperand ResVPR, Operand OpImm>
+ : NeonI_copy<Q, 0b0, 0b0000, (outs ResVPR:$Rd),
+ (ins VPR128:$Rn, OpImm:$Imm),
+ asmop # "\t$Rd" # rdlane # ", $Rn" # rnlane # "[$Imm]",
+ [],
+ NoItinerary> {
+ bits<4> Imm;
+}
+
+def DUPELT16b : NeonI_DUP_Elt<0b1, "dup", ".16b", ".b", VPR128,
+ neon_uimm4_bare> {
+ let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+
+def DUPELT8h : NeonI_DUP_Elt<0b1, "dup", ".8h", ".h", VPR128,
+ neon_uimm3_bare> {
+ let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+
+def DUPELT4s : NeonI_DUP_Elt<0b1, "dup", ".4s", ".s", VPR128,
+ neon_uimm2_bare> {
+ let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+
+def DUPELT2d : NeonI_DUP_Elt<0b1, "dup", ".2d", ".d", VPR128,
+ neon_uimm1_bare> {
+ let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
+}
+
+def DUPELT8b : NeonI_DUP_Elt<0b0, "dup", ".8b", ".b", VPR64,
+ neon_uimm4_bare> {
+ let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+
+def DUPELT4h : NeonI_DUP_Elt<0b0, "dup", ".4h", ".h", VPR64,
+ neon_uimm3_bare> {
+ let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+
+def DUPELT2s : NeonI_DUP_Elt<0b0, "dup", ".2s", ".s", VPR64,
+ neon_uimm2_bare> {
+ let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+
+multiclass NeonI_DUP_Elt_pattern<Instruction DUPELT, ValueType ResTy,
+ ValueType OpTy,ValueType NaTy,
+ ValueType ExTy, Operand OpLImm,
+ Operand OpNImm> {
+def : Pat<(ResTy (Neon_vduplane (OpTy VPR128:$Rn), OpLImm:$Imm)),
+ (ResTy (DUPELT (OpTy VPR128:$Rn), OpLImm:$Imm))>;
+
+def : Pat<(ResTy (Neon_vduplane
+ (NaTy VPR64:$Rn), OpNImm:$Imm)),
+ (ResTy (DUPELT
+ (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), OpNImm:$Imm))>;
+}
+defm : NeonI_DUP_Elt_pattern<DUPELT16b, v16i8, v16i8, v8i8, v16i8,
+ neon_uimm4_bare, neon_uimm3_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT8b, v8i8, v16i8, v8i8, v16i8,
+ neon_uimm4_bare, neon_uimm3_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT8h, v8i16, v8i16, v4i16, v8i16,
+ neon_uimm3_bare, neon_uimm2_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT4h, v4i16, v8i16, v4i16, v8i16,
+ neon_uimm3_bare, neon_uimm2_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4i32, v4i32, v2i32, v4i32,
+ neon_uimm2_bare, neon_uimm1_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2i32, v4i32, v2i32, v4i32,
+ neon_uimm2_bare, neon_uimm1_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2i64, v2i64, v1i64, v2i64,
+ neon_uimm1_bare, neon_uimm0_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4f32, v4f32, v2f32, v4f32,
+ neon_uimm2_bare, neon_uimm1_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2f32, v4f32, v2f32, v4f32,
+ neon_uimm2_bare, neon_uimm1_bare>;
+defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2f64, v2f64, v1f64, v2f64,
+ neon_uimm1_bare, neon_uimm0_bare>;
+
+def : Pat<(v2f32 (Neon_vdup (f32 FPR32:$Rn))),
+ (v2f32 (DUPELT2s
+ (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
+ (i64 0)))>;
+def : Pat<(v4f32 (Neon_vdup (f32 FPR32:$Rn))),
+ (v4f32 (DUPELT4s
+ (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
+ (i64 0)))>;
+def : Pat<(v2f64 (Neon_vdup (f64 FPR64:$Rn))),
+ (v2f64 (DUPELT2d
+ (SUBREG_TO_REG (i64 0), FPR64:$Rn, sub_64),
+ (i64 0)))>;
+
+class NeonI_DUP<bit Q, string asmop, string rdlane,
+ RegisterOperand ResVPR, ValueType ResTy,
+ RegisterClass OpGPR, ValueType OpTy>
+ : NeonI_copy<Q, 0b0, 0b0001, (outs ResVPR:$Rd), (ins OpGPR:$Rn),
+ asmop # "\t$Rd" # rdlane # ", $Rn",
+ [(set (ResTy ResVPR:$Rd),
+ (ResTy (Neon_vdup (OpTy OpGPR:$Rn))))],
+ NoItinerary>;
+
+def DUP16b : NeonI_DUP<0b1, "dup", ".16b", VPR128, v16i8, GPR32, i32> {
+ let Inst{20-16} = 0b00001;
+ // bits 17-20 are unspecified, but should be set to zero.
+}
+
+def DUP8h : NeonI_DUP<0b1, "dup", ".8h", VPR128, v8i16, GPR32, i32> {
+ let Inst{20-16} = 0b00010;
+ // bits 18-20 are unspecified, but should be set to zero.
+}
+
+def DUP4s : NeonI_DUP<0b1, "dup", ".4s", VPR128, v4i32, GPR32, i32> {
+ let Inst{20-16} = 0b00100;
+ // bits 19-20 are unspecified, but should be set to zero.
+}
+
+def DUP2d : NeonI_DUP<0b1, "dup", ".2d", VPR128, v2i64, GPR64, i64> {
+ let Inst{20-16} = 0b01000;
+ // bit 20 is unspecified, but should be set to zero.
+}
+
+def DUP8b : NeonI_DUP<0b0, "dup", ".8b", VPR64, v8i8, GPR32, i32> {
+ let Inst{20-16} = 0b00001;
+ // bits 17-20 are unspecified, but should be set to zero.
+}
+
+def DUP4h : NeonI_DUP<0b0, "dup", ".4h", VPR64, v4i16, GPR32, i32> {
+ let Inst{20-16} = 0b00010;
+ // bits 18-20 are unspecified, but should be set to zero.
+}
+
+def DUP2s : NeonI_DUP<0b0, "dup", ".2s", VPR64, v2i32, GPR32, i32> {
+ let Inst{20-16} = 0b00100;
+ // bits 19-20 are unspecified, but should be set to zero.
+}
+
+// patterns for CONCAT_VECTORS
+multiclass Concat_Vector_Pattern<ValueType ResTy, ValueType OpTy> {
+def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), undef)),
+ (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)>;
+def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))),
+ (INSELd
+ (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rm, sub_64)),
+ (i64 1),
+ (i64 0))>;
+def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rn))),
+ (DUPELT2d
+ (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ (i64 0))> ;
+}
+
+defm : Concat_Vector_Pattern<v16i8, v8i8>;
+defm : Concat_Vector_Pattern<v8i16, v4i16>;
+defm : Concat_Vector_Pattern<v4i32, v2i32>;
+defm : Concat_Vector_Pattern<v2i64, v1i64>;
+defm : Concat_Vector_Pattern<v4f32, v2f32>;
+defm : Concat_Vector_Pattern<v2f64, v1f64>;
+
+//patterns for EXTRACT_SUBVECTOR
+def : Pat<(v8i8 (extract_subvector (v16i8 VPR128:$Rn), (i64 0))),
+ (v8i8 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
+def : Pat<(v4i16 (extract_subvector (v8i16 VPR128:$Rn), (i64 0))),
+ (v4i16 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
+def : Pat<(v2i32 (extract_subvector (v4i32 VPR128:$Rn), (i64 0))),
+ (v2i32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
+def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
+ (v1i64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
+def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
+ (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
+def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
+ (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
+
+// The followings are for instruction class (3V Elem)
+
+// Variant 1
+
+class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS, string EleOpS,
+ Operand OpImm, RegisterOperand ResVPR,
+ RegisterOperand OpVPR, RegisterOperand EleOpVPR>
+ : NeonI_2VElem<q, u, size, opcode,
+ (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
+ EleOpVPR:$Re, OpImm:$Index),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
+ ", $Re." # EleOpS # "[$Index]",
+ [],
+ NoItinerary> {
+ bits<3> Index;
+ bits<5> Re;
+
+ let Constraints = "$src = $Rd";
+}
+
+multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop> {
+ // vector register class for element is always 128-bit to cover the max index
+ def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+ neon_uimm2_bare, VPR64, VPR64, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+ neon_uimm2_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+ def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
+ neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+
+ def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
+ neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+}
+
+defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
+defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
+
+// Pattern for lane in 128-bit vector
+class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand ResVPR, RegisterOperand OpVPR,
+ RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
+ ValueType EleOpTy>
+ : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
+ (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand ResVPR, RegisterOperand OpVPR,
+ RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
+ ValueType EleOpTy>
+ : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
+ (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST ResVPR:$src, OpVPR:$Rn,
+ (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
+{
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
+ op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32>;
+
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
+ op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32>;
+
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
+ op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
+
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
+ op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
+ op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32>;
+
+ def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
+ op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
+}
+
+defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
+defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
+
+class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
+ string asmop, string ResS, string OpS, string EleOpS,
+ Operand OpImm, RegisterOperand ResVPR,
+ RegisterOperand OpVPR, RegisterOperand EleOpVPR>
+ : NeonI_2VElem<q, u, size, opcode,
+ (outs ResVPR:$Rd), (ins OpVPR:$Rn,
+ EleOpVPR:$Re, OpImm:$Index),
+ asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
+ ", $Re." # EleOpS # "[$Index]",
+ [],
+ NoItinerary> {
+ bits<3> Index;
+ bits<5> Re;
+}
+
+multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop> {
+ // vector register class for element is always 128-bit to cover the max index
+ def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+ neon_uimm2_bare, VPR64, VPR64, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+ neon_uimm2_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+ def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
+ neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+
+ def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
+ neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+}
+
+defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
+defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
+defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
+
+// Pattern for lane in 128-bit vector
+class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand OpVPR, RegisterOperand EleOpVPR,
+ ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
+ : Pat<(ResTy (op (OpTy OpVPR:$Rn),
+ (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand OpVPR, RegisterOperand EleOpVPR,
+ ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
+ : Pat<(ResTy (op (OpTy OpVPR:$Rn),
+ (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST OpVPR:$Rn,
+ (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op> {
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
+ op, VPR64, VPR128, v2i32, v2i32, v4i32>;
+
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
+ op, VPR128, VPR128, v4i32, v4i32, v4i32>;
+
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
+ op, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
+
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
+ op, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
+ op, VPR64, VPR64, v2i32, v2i32, v2i32>;
+
+ def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
+ op, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
+}
+
+defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
+defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
+defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
+
+// Variant 2
+
+multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop> {
+ // vector register class for element is always 128-bit to cover the max index
+ def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+ neon_uimm2_bare, VPR64, VPR64, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+ neon_uimm2_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ // _1d2d doesn't exist!
+
+ def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
+ neon_uimm1_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{0}};
+ let Inst{21} = 0b0;
+ let Inst{20-16} = Re;
+ }
+}
+
+defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
+defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
+
+class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand OpVPR, RegisterOperand EleOpVPR,
+ ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
+ SDPatternOperator coreop>
+ : Pat<(ResTy (op (OpTy OpVPR:$Rn),
+ (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
+ (INST OpVPR:$Rn,
+ (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
+
+multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op> {
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
+ op, VPR64, VPR128, v2f32, v2f32, v4f32>;
+
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
+ op, VPR128, VPR128, v4f32, v4f32, v4f32>;
+
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
+ op, VPR128, VPR128, v2f64, v2f64, v2f64>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
+ op, VPR64, VPR64, v2f32, v2f32, v2f32>;
+
+ def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
+ op, VPR128, VPR64, v2f64, v2f64, v1f64,
+ BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
+}
+
+defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
+defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
+
+def : Pat<(v2f32 (fmul (v2f32 (Neon_vdup (f32 FPR32:$Re))),
+ (v2f32 VPR64:$Rn))),
+ (FMULve_2s4s VPR64:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
+
+def : Pat<(v4f32 (fmul (v4f32 (Neon_vdup (f32 FPR32:$Re))),
+ (v4f32 VPR128:$Rn))),
+ (FMULve_4s4s VPR128:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
+
+def : Pat<(v2f64 (fmul (v2f64 (Neon_vdup (f64 FPR64:$Re))),
+ (v2f64 VPR128:$Rn))),
+ (FMULve_2d2d VPR128:$Rn, (SUBREG_TO_REG (i64 0), $Re, sub_64), 0)>;
+
+// The followings are patterns using fma
+// -ffp-contract=fast generates fma
+
+multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop> {
+ // vector register class for element is always 128-bit to cover the max index
+ def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+ neon_uimm2_bare, VPR64, VPR64, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+ neon_uimm2_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ // _1d2d doesn't exist!
+
+ def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
+ neon_uimm1_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{0}};
+ let Inst{21} = 0b0;
+ let Inst{20-16} = Re;
+ }
+}
+
+defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
+defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
+
+// Pattern for lane in 128-bit vector
+class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand ResVPR, RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy,
+ SDPatternOperator coreop>
+ : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
+ (ResTy ResVPR:$src), (ResTy ResVPR:$Rn))),
+ (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane 0
+class NI_2VEfma_lane0<Instruction INST, SDPatternOperator op,
+ RegisterOperand ResVPR, ValueType ResTy>
+ : Pat<(ResTy (op (ResTy ResVPR:$Rn),
+ (ResTy (Neon_vdup (f32 FPR32:$Re))),
+ (ResTy ResVPR:$src))),
+ (INST ResVPR:$src, ResVPR:$Rn,
+ (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand ResVPR, RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy,
+ SDPatternOperator coreop>
+ : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
+ (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
+ (INST ResVPR:$src, ResVPR:$Rn,
+ (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
+ SDPatternOperator op,
+ RegisterOperand ResVPR, RegisterOperand OpVPR,
+ ValueType ResTy, ValueType OpTy,
+ SDPatternOperator coreop>
+ : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
+ (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
+ (INST ResVPR:$src, ResVPR:$Rn,
+ (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
+
+
+multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op> {
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
+ neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
+ BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+ def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_2s4s"),
+ op, VPR64, v2f32>;
+
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
+ neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
+ BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+ def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_4s4s"),
+ op, VPR128, v4f32>;
+
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
+ neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
+ BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
+ neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
+ BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+ def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
+ neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
+ BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
+}
+
+defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
+
+// Pattern for lane 0
+class NI_2VEfms_lane0<Instruction INST, SDPatternOperator op,
+ RegisterOperand ResVPR, ValueType ResTy>
+ : Pat<(ResTy (op (ResTy (fneg ResVPR:$Rn)),
+ (ResTy (Neon_vdup (f32 FPR32:$Re))),
+ (ResTy ResVPR:$src))),
+ (INST ResVPR:$src, ResVPR:$Rn,
+ (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
+
+multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
+{
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
+ neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
+ BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
+
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
+ neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
+ BinOpFrag<(Neon_vduplane
+ (fneg node:$LHS), node:$RHS)>>;
+
+ def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_2s4s"),
+ op, VPR64, v2f32>;
+
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
+ neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
+ BinOpFrag<(fneg (Neon_vduplane
+ node:$LHS, node:$RHS))>>;
+
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
+ neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
+ BinOpFrag<(Neon_vduplane
+ (fneg node:$LHS), node:$RHS)>>;
+
+ def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_4s4s"),
+ op, VPR128, v4f32>;
+
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
+ neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
+ BinOpFrag<(fneg (Neon_vduplane
+ node:$LHS, node:$RHS))>>;
+
+ def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
+ neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
+ BinOpFrag<(Neon_vduplane
+ (fneg node:$LHS), node:$RHS)>>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
+ neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
+ BinOpFrag<(fneg (Neon_vduplane
+ node:$LHS, node:$RHS))>>;
+
+ def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
+ neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
+ BinOpFrag<(Neon_vduplane
+ (fneg node:$LHS), node:$RHS)>>;
+
+ def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
+ neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
+ BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
+
+ def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
+ neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
+ BinOpFrag<(Neon_vduplane (fneg node:$LHS), node:$RHS)>>;
+
+ def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
+ neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
+ BinOpFrag<(fneg (Neon_combine_2d
+ node:$LHS, node:$RHS))>>;
+
+ def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
+ neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
+ BinOpFrag<(Neon_combine_2d
+ (fneg node:$LHS), (fneg node:$RHS))>>;
+}
+
+defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
+
+// Variant 3: Long type
+// E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
+// SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
+
+multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop> {
+ // vector register class for element is always 128-bit to cover the max index
+ def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
+ neon_uimm2_bare, VPR128, VPR64, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
+ neon_uimm2_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+ def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
+ neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+
+ def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
+ neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+}
+
+defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
+defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
+defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
+defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
+defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
+defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
+
+multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop> {
+ // vector register class for element is always 128-bit to cover the max index
+ def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
+ neon_uimm2_bare, VPR128, VPR64, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
+ neon_uimm2_bare, VPR128, VPR128, VPR128> {
+ let Inst{11} = {Index{1}};
+ let Inst{21} = {Index{0}};
+ let Inst{20-16} = Re;
+ }
+
+ // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+ def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
+ neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+
+ def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
+ neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
+ let Inst{11} = {Index{2}};
+ let Inst{21} = {Index{1}};
+ let Inst{20} = {Index{0}};
+ let Inst{19-16} = Re{3-0};
+ }
+}
+
+defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
+defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
+defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
+
+def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
+ (FMOVdd $src)>;
+def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$src))),
+ (FMOVss $src)>;
+
+// Pattern for lane in 128-bit vector
+class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand EleOpVPR, ValueType ResTy,
+ ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+ SDPatternOperator hiop>
+ : Pat<(ResTy (op (ResTy VPR128:$src),
+ (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+ (HalfOpTy (Neon_vduplane
+ (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand EleOpVPR, ValueType ResTy,
+ ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+ SDPatternOperator hiop>
+ : Pat<(ResTy (op (ResTy VPR128:$src),
+ (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+ (HalfOpTy (Neon_vduplane
+ (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST VPR128:$src, VPR128:$Rn,
+ (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+class NI_2VEL2_lane0<Instruction INST, SDPatternOperator op,
+ ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
+ SDPatternOperator hiop, Instruction DupInst>
+ : Pat<(ResTy (op (ResTy VPR128:$src),
+ (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+ (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
+ (INST VPR128:$src, VPR128:$Rn, (DupInst $Re), 0)>;
+
+multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op> {
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
+ op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
+
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
+ op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32>;
+
+ def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
+ op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
+
+ def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
+ op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
+
+ def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
+ op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
+
+ def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
+ op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
+ op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
+
+ def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
+ op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32>;
+
+ def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
+ op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
+
+ def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
+ op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
+}
+
+defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
+defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
+defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
+defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
+
+// Pattern for lane in 128-bit vector
+class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand EleOpVPR, ValueType ResTy,
+ ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+ SDPatternOperator hiop>
+ : Pat<(ResTy (op
+ (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+ (HalfOpTy (Neon_vduplane
+ (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+ RegisterOperand EleOpVPR, ValueType ResTy,
+ ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+ SDPatternOperator hiop>
+ : Pat<(ResTy (op
+ (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+ (HalfOpTy (Neon_vduplane
+ (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+ (INST VPR128:$Rn,
+ (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+// Pattern for fixed lane 0
+class NI_2VEL2_mul_lane0<Instruction INST, SDPatternOperator op,
+ ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
+ SDPatternOperator hiop, Instruction DupInst>
+ : Pat<(ResTy (op
+ (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+ (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
+ (INST VPR128:$Rn, (DupInst $Re), 0)>;
+
+multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op> {
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
+ op, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
+
+ def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
+ op, VPR64, VPR128, v2i64, v2i32, v4i32>;
+
+ def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
+ op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
+
+ def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
+ op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
+
+ def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_4s8h"),
+ op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
+
+ def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_2d4s"),
+ op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
+ op, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
+
+ def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
+ op, VPR64, VPR64, v2i64, v2i32, v2i32>;
+
+ def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
+ op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
+
+ def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
+ op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
+}
+
+defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
+defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
+defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
+
+multiclass NI_qdma<SDPatternOperator op> {
+ def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (op node:$Ra,
+ (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
+
+ def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+ (op node:$Ra,
+ (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
+}
+
+defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
+defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
+
+multiclass NI_2VEL_v3_qdma_pat<string subop, string op> {
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
+ !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
+ v4i32, v4i16, v8i16>;
+
+ def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
+ !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
+ v2i64, v2i32, v4i32>;
+
+ def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
+ !cast<PatFrag>(op # "_4s"), VPR128Lo,
+ v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
+
+ def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
+ !cast<PatFrag>(op # "_2d"), VPR128,
+ v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
+
+ def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
+ !cast<PatFrag>(op # "_4s"),
+ v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
+
+ def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
+ !cast<PatFrag>(op # "_2d"),
+ v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
+
+ // Index can only be half of the max value for lane in 64-bit vector
+
+ def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
+ !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
+ v4i32, v4i16, v4i16>;
+
+ def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
+ !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
+ v2i64, v2i32, v2i32>;
+
+ def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
+ !cast<PatFrag>(op # "_4s"), VPR64Lo,
+ v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
+
+ def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
+ !cast<PatFrag>(op # "_2d"), VPR64,
+ v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
+}
+
+defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
+defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
+
+// End of implementation for instruction class (3V Elem)
+
+class NeonI_REV<string asmop, string Res, bits<2> size, bit Q, bit U,
+ bits<5> opcode, RegisterOperand ResVPR, ValueType ResTy,
+ SDPatternOperator Neon_Rev>
+ : NeonI_2VMisc<Q, U, size, opcode,
+ (outs ResVPR:$Rd), (ins ResVPR:$Rn),
+ asmop # "\t$Rd." # Res # ", $Rn." # Res,
+ [(set (ResTy ResVPR:$Rd),
+ (ResTy (Neon_Rev (ResTy ResVPR:$Rn))))],
+ NoItinerary> ;
+
+def REV64_16b : NeonI_REV<"rev64", "16b", 0b00, 0b1, 0b0, 0b00000, VPR128,
+ v16i8, Neon_rev64>;
+def REV64_8h : NeonI_REV<"rev64", "8h", 0b01, 0b1, 0b0, 0b00000, VPR128,
+ v8i16, Neon_rev64>;
+def REV64_4s : NeonI_REV<"rev64", "4s", 0b10, 0b1, 0b0, 0b00000, VPR128,
+ v4i32, Neon_rev64>;
+def REV64_8b : NeonI_REV<"rev64", "8b", 0b00, 0b0, 0b0, 0b00000, VPR64,
+ v8i8, Neon_rev64>;
+def REV64_4h : NeonI_REV<"rev64", "4h", 0b01, 0b0, 0b0, 0b00000, VPR64,
+ v4i16, Neon_rev64>;
+def REV64_2s : NeonI_REV<"rev64", "2s", 0b10, 0b0, 0b0, 0b00000, VPR64,
+ v2i32, Neon_rev64>;
+
+def : Pat<(v4f32 (Neon_rev64 (v4f32 VPR128:$Rn))), (REV64_4s VPR128:$Rn)>;
+def : Pat<(v2f32 (Neon_rev64 (v2f32 VPR64:$Rn))), (REV64_2s VPR64:$Rn)>;
+
+def REV32_16b : NeonI_REV<"rev32", "16b", 0b00, 0b1, 0b1, 0b00000, VPR128,
+ v16i8, Neon_rev32>;
+def REV32_8h : NeonI_REV<"rev32", "8h", 0b01, 0b1, 0b1, 0b00000, VPR128,
+ v8i16, Neon_rev32>;
+def REV32_8b : NeonI_REV<"rev32", "8b", 0b00, 0b0, 0b1, 0b00000, VPR64,
+ v8i8, Neon_rev32>;
+def REV32_4h : NeonI_REV<"rev32", "4h", 0b01, 0b0, 0b1, 0b00000, VPR64,
+ v4i16, Neon_rev32>;
+
+def REV16_16b : NeonI_REV<"rev16", "16b", 0b00, 0b1, 0b0, 0b00001, VPR128,
+ v16i8, Neon_rev16>;
+def REV16_8b : NeonI_REV<"rev16", "8b", 0b00, 0b0, 0b0, 0b00001, VPR64,
+ v8i8, Neon_rev16>;
+
+multiclass NeonI_PairwiseAdd<string asmop, bit U, bits<5> opcode,
+ SDPatternOperator Neon_Padd> {
+ def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.8h, $Rn.16b",
+ [(set (v8i16 VPR128:$Rd),
+ (v8i16 (Neon_Padd (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.4h, $Rn.8b",
+ [(set (v4i16 VPR64:$Rd),
+ (v4i16 (Neon_Padd (v8i8 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.8h",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (Neon_Padd (v8i16 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.4h",
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (Neon_Padd (v4i16 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.2d, $Rn.4s",
+ [(set (v2i64 VPR128:$Rd),
+ (v2i64 (Neon_Padd (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.1d, $Rn.2s",
+ [(set (v1i64 VPR64:$Rd),
+ (v1i64 (Neon_Padd (v2i32 VPR64:$Rn))))],
+ NoItinerary>;
+}
+
+defm SADDLP : NeonI_PairwiseAdd<"saddlp", 0b0, 0b00010,
+ int_arm_neon_vpaddls>;
+defm UADDLP : NeonI_PairwiseAdd<"uaddlp", 0b1, 0b00010,
+ int_arm_neon_vpaddlu>;
+
+multiclass NeonI_PairwiseAddAcc<string asmop, bit U, bits<5> opcode,
+ SDPatternOperator Neon_Padd> {
+ let Constraints = "$src = $Rd" in {
+ def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.8h, $Rn.16b",
+ [(set (v8i16 VPR128:$Rd),
+ (v8i16 (Neon_Padd
+ (v8i16 VPR128:$src), (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
+ asmop # "\t$Rd.4h, $Rn.8b",
+ [(set (v4i16 VPR64:$Rd),
+ (v4i16 (Neon_Padd
+ (v4i16 VPR64:$src), (v8i8 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.8h",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (Neon_Padd
+ (v4i32 VPR128:$src), (v8i16 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.4h",
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (Neon_Padd
+ (v2i32 VPR64:$src), (v4i16 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.2d, $Rn.4s",
+ [(set (v2i64 VPR128:$Rd),
+ (v2i64 (Neon_Padd
+ (v2i64 VPR128:$src), (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
+ asmop # "\t$Rd.1d, $Rn.2s",
+ [(set (v1i64 VPR64:$Rd),
+ (v1i64 (Neon_Padd
+ (v1i64 VPR64:$src), (v2i32 VPR64:$Rn))))],
+ NoItinerary>;
+ }
+}
+
+defm SADALP : NeonI_PairwiseAddAcc<"sadalp", 0b0, 0b00110,
+ int_arm_neon_vpadals>;
+defm UADALP : NeonI_PairwiseAddAcc<"uadalp", 0b1, 0b00110,
+ int_arm_neon_vpadalu>;
+
+multiclass NeonI_2VMisc_BHSDsize_1Arg<string asmop, bit U, bits<5> opcode> {
+ def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.16b, $Rn.16b",
+ [], NoItinerary>;
+
+ def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.8h, $Rn.8h",
+ [], NoItinerary>;
+
+ def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4s",
+ [], NoItinerary>;
+
+ def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.2d, $Rn.2d",
+ [], NoItinerary>;
+
+ def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.8b, $Rn.8b",
+ [], NoItinerary>;
+
+ def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.4h, $Rn.4h",
+ [], NoItinerary>;
+
+ def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2s",
+ [], NoItinerary>;
+}
+
+defm SQABS : NeonI_2VMisc_BHSDsize_1Arg<"sqabs", 0b0, 0b00111>;
+defm SQNEG : NeonI_2VMisc_BHSDsize_1Arg<"sqneg", 0b1, 0b00111>;
+defm ABS : NeonI_2VMisc_BHSDsize_1Arg<"abs", 0b0, 0b01011>;
+defm NEG : NeonI_2VMisc_BHSDsize_1Arg<"neg", 0b1, 0b01011>;
+
+multiclass NeonI_2VMisc_BHSD_1Arg_Pattern<string Prefix,
+ SDPatternOperator Neon_Op> {
+ def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$Rn))),
+ (v16i8 (!cast<Instruction>(Prefix # 16b) (v16i8 VPR128:$Rn)))>;
+
+ def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$Rn))),
+ (v8i16 (!cast<Instruction>(Prefix # 8h) (v8i16 VPR128:$Rn)))>;
+
+ def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$Rn))),
+ (v4i32 (!cast<Instruction>(Prefix # 4s) (v4i32 VPR128:$Rn)))>;
+
+ def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$Rn))),
+ (v2i64 (!cast<Instruction>(Prefix # 2d) (v2i64 VPR128:$Rn)))>;
+
+ def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$Rn))),
+ (v8i8 (!cast<Instruction>(Prefix # 8b) (v8i8 VPR64:$Rn)))>;
+
+ def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$Rn))),
+ (v4i16 (!cast<Instruction>(Prefix # 4h) (v4i16 VPR64:$Rn)))>;
+
+ def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$Rn))),
+ (v2i32 (!cast<Instruction>(Prefix # 2s) (v2i32 VPR64:$Rn)))>;
+}
+
+defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQABS", int_arm_neon_vqabs>;
+defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQNEG", int_arm_neon_vqneg>;
+defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"ABS", int_arm_neon_vabs>;
+
+def : Pat<(v16i8 (sub
+ (v16i8 Neon_AllZero),
+ (v16i8 VPR128:$Rn))),
+ (v16i8 (NEG16b (v16i8 VPR128:$Rn)))>;
+def : Pat<(v8i8 (sub
+ (v8i8 Neon_AllZero),
+ (v8i8 VPR64:$Rn))),
+ (v8i8 (NEG8b (v8i8 VPR64:$Rn)))>;
+def : Pat<(v8i16 (sub
+ (v8i16 (bitconvert (v16i8 Neon_AllZero))),
+ (v8i16 VPR128:$Rn))),
+ (v8i16 (NEG8h (v8i16 VPR128:$Rn)))>;
+def : Pat<(v4i16 (sub
+ (v4i16 (bitconvert (v8i8 Neon_AllZero))),
+ (v4i16 VPR64:$Rn))),
+ (v4i16 (NEG4h (v4i16 VPR64:$Rn)))>;
+def : Pat<(v4i32 (sub
+ (v4i32 (bitconvert (v16i8 Neon_AllZero))),
+ (v4i32 VPR128:$Rn))),
+ (v4i32 (NEG4s (v4i32 VPR128:$Rn)))>;
+def : Pat<(v2i32 (sub
+ (v2i32 (bitconvert (v8i8 Neon_AllZero))),
+ (v2i32 VPR64:$Rn))),
+ (v2i32 (NEG2s (v2i32 VPR64:$Rn)))>;
+def : Pat<(v2i64 (sub
+ (v2i64 (bitconvert (v16i8 Neon_AllZero))),
+ (v2i64 VPR128:$Rn))),
+ (v2i64 (NEG2d (v2i64 VPR128:$Rn)))>;
+
+multiclass NeonI_2VMisc_BHSDsize_2Args<string asmop, bit U, bits<5> opcode> {
+ let Constraints = "$src = $Rd" in {
+ def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.16b, $Rn.16b",
+ [], NoItinerary>;
+
+ def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.8h, $Rn.8h",
+ [], NoItinerary>;
+
+ def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4s",
+ [], NoItinerary>;
+
+ def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.2d, $Rn.2d",
+ [], NoItinerary>;
+
+ def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
+ asmop # "\t$Rd.8b, $Rn.8b",
+ [], NoItinerary>;
+
+ def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
+ asmop # "\t$Rd.4h, $Rn.4h",
+ [], NoItinerary>;
+
+ def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2s",
+ [], NoItinerary>;
+ }
+}
+
+defm SUQADD : NeonI_2VMisc_BHSDsize_2Args<"suqadd", 0b0, 0b00011>;
+defm USQADD : NeonI_2VMisc_BHSDsize_2Args<"usqadd", 0b1, 0b00011>;
+
+multiclass NeonI_2VMisc_BHSD_2Args_Pattern<string Prefix,
+ SDPatternOperator Neon_Op> {
+ def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$src), (v16i8 VPR128:$Rn))),
+ (v16i8 (!cast<Instruction>(Prefix # 16b)
+ (v16i8 VPR128:$src), (v16i8 VPR128:$Rn)))>;
+
+ def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$src), (v8i16 VPR128:$Rn))),
+ (v8i16 (!cast<Instruction>(Prefix # 8h)
+ (v8i16 VPR128:$src), (v8i16 VPR128:$Rn)))>;
+
+ def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$src), (v4i32 VPR128:$Rn))),
+ (v4i32 (!cast<Instruction>(Prefix # 4s)
+ (v4i32 VPR128:$src), (v4i32 VPR128:$Rn)))>;
+
+ def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$src), (v2i64 VPR128:$Rn))),
+ (v2i64 (!cast<Instruction>(Prefix # 2d)
+ (v2i64 VPR128:$src), (v2i64 VPR128:$Rn)))>;
+
+ def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$src), (v8i8 VPR64:$Rn))),
+ (v8i8 (!cast<Instruction>(Prefix # 8b)
+ (v8i8 VPR64:$src), (v8i8 VPR64:$Rn)))>;
+
+ def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$src), (v4i16 VPR64:$Rn))),
+ (v4i16 (!cast<Instruction>(Prefix # 4h)
+ (v4i16 VPR64:$src), (v4i16 VPR64:$Rn)))>;
+
+ def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$src), (v2i32 VPR64:$Rn))),
+ (v2i32 (!cast<Instruction>(Prefix # 2s)
+ (v2i32 VPR64:$src), (v2i32 VPR64:$Rn)))>;
+}
+
+defm : NeonI_2VMisc_BHSD_2Args_Pattern<"SUQADD", int_aarch64_neon_suqadd>;
+defm : NeonI_2VMisc_BHSD_2Args_Pattern<"USQADD", int_aarch64_neon_usqadd>;
+
+multiclass NeonI_2VMisc_BHSsizes<string asmop, bit U,
+ SDPatternOperator Neon_Op> {
+ def 16b : NeonI_2VMisc<0b1, U, 0b00, 0b00100,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.16b, $Rn.16b",
+ [(set (v16i8 VPR128:$Rd),
+ (v16i8 (Neon_Op (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 8h : NeonI_2VMisc<0b1, U, 0b01, 0b00100,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.8h, $Rn.8h",
+ [(set (v8i16 VPR128:$Rd),
+ (v8i16 (Neon_Op (v8i16 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 4s : NeonI_2VMisc<0b1, U, 0b10, 0b00100,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4s",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 8b : NeonI_2VMisc<0b0, U, 0b00, 0b00100,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.8b, $Rn.8b",
+ [(set (v8i8 VPR64:$Rd),
+ (v8i8 (Neon_Op (v8i8 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def 4h : NeonI_2VMisc<0b0, U, 0b01, 0b00100,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.4h, $Rn.4h",
+ [(set (v4i16 VPR64:$Rd),
+ (v4i16 (Neon_Op (v4i16 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def 2s : NeonI_2VMisc<0b0, U, 0b10, 0b00100,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2s",
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
+ NoItinerary>;
+}
+
+defm CLS : NeonI_2VMisc_BHSsizes<"cls", 0b0, int_arm_neon_vcls>;
+defm CLZ : NeonI_2VMisc_BHSsizes<"clz", 0b1, ctlz>;
+
+multiclass NeonI_2VMisc_Bsize<string asmop, bit U, bits<2> size,
+ bits<5> Opcode> {
+ def 16b : NeonI_2VMisc<0b1, U, size, Opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.16b, $Rn.16b",
+ [], NoItinerary>;
+
+ def 8b : NeonI_2VMisc<0b0, U, size, Opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.8b, $Rn.8b",
+ [], NoItinerary>;
+}
+
+defm CNT : NeonI_2VMisc_Bsize<"cnt", 0b0, 0b00, 0b00101>;
+defm NOT : NeonI_2VMisc_Bsize<"not", 0b1, 0b00, 0b00101>;
+defm RBIT : NeonI_2VMisc_Bsize<"rbit", 0b1, 0b01, 0b00101>;
+
+def : NeonInstAlias<"mvn $Rd.16b, $Rn.16b",
+ (NOT16b VPR128:$Rd, VPR128:$Rn), 0>;
+def : NeonInstAlias<"mvn $Rd.8b, $Rn.8b",
+ (NOT8b VPR64:$Rd, VPR64:$Rn), 0>;
+
+def : Pat<(v16i8 (ctpop (v16i8 VPR128:$Rn))),
+ (v16i8 (CNT16b (v16i8 VPR128:$Rn)))>;
+def : Pat<(v8i8 (ctpop (v8i8 VPR64:$Rn))),
+ (v8i8 (CNT8b (v8i8 VPR64:$Rn)))>;
+
+def : Pat<(v16i8 (xor
+ (v16i8 VPR128:$Rn),
+ (v16i8 Neon_AllOne))),
+ (v16i8 (NOT16b (v16i8 VPR128:$Rn)))>;
+def : Pat<(v8i8 (xor
+ (v8i8 VPR64:$Rn),
+ (v8i8 Neon_AllOne))),
+ (v8i8 (NOT8b (v8i8 VPR64:$Rn)))>;
+def : Pat<(v8i16 (xor
+ (v8i16 VPR128:$Rn),
+ (v8i16 (bitconvert (v16i8 Neon_AllOne))))),
+ (NOT16b VPR128:$Rn)>;
+def : Pat<(v4i16 (xor
+ (v4i16 VPR64:$Rn),
+ (v4i16 (bitconvert (v8i8 Neon_AllOne))))),
+ (NOT8b VPR64:$Rn)>;
+def : Pat<(v4i32 (xor
+ (v4i32 VPR128:$Rn),
+ (v4i32 (bitconvert (v16i8 Neon_AllOne))))),
+ (NOT16b VPR128:$Rn)>;
+def : Pat<(v2i32 (xor
+ (v2i32 VPR64:$Rn),
+ (v2i32 (bitconvert (v8i8 Neon_AllOne))))),
+ (NOT8b VPR64:$Rn)>;
+def : Pat<(v2i64 (xor
+ (v2i64 VPR128:$Rn),
+ (v2i64 (bitconvert (v16i8 Neon_AllOne))))),
+ (NOT16b VPR128:$Rn)>;
+
+def : Pat<(v16i8 (int_aarch64_neon_rbit (v16i8 VPR128:$Rn))),
+ (v16i8 (RBIT16b (v16i8 VPR128:$Rn)))>;
+def : Pat<(v8i8 (int_aarch64_neon_rbit (v8i8 VPR64:$Rn))),
+ (v8i8 (RBIT8b (v8i8 VPR64:$Rn)))>;
+
+multiclass NeonI_2VMisc_SDsizes<string asmop, bit U, bits<5> opcode,
+ SDPatternOperator Neon_Op> {
+ def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4s",
+ [(set (v4f32 VPR128:$Rd),
+ (v4f32 (Neon_Op (v4f32 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.2d, $Rn.2d",
+ [(set (v2f64 VPR128:$Rd),
+ (v2f64 (Neon_Op (v2f64 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2s",
+ [(set (v2f32 VPR64:$Rd),
+ (v2f32 (Neon_Op (v2f32 VPR64:$Rn))))],
+ NoItinerary>;
+}
+
+defm FABS : NeonI_2VMisc_SDsizes<"fabs", 0b0, 0b01111, fabs>;
+defm FNEG : NeonI_2VMisc_SDsizes<"fneg", 0b1, 0b01111, fneg>;
+
+multiclass NeonI_2VMisc_HSD_Narrow<string asmop, bit U, bits<5> opcode> {
+ def 8h8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.8b, $Rn.8h",
+ [], NoItinerary>;
+
+ def 4s4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4h, $Rn.4s",
+ [], NoItinerary>;
+
+ def 2d2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2d",
+ [], NoItinerary>;
+
+ let Constraints = "$Rd = $src" in {
+ def 8h16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "2\t$Rd.16b, $Rn.8h",
+ [], NoItinerary>;
+
+ def 4s8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "2\t$Rd.8h, $Rn.4s",
+ [], NoItinerary>;
+
+ def 2d4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "2\t$Rd.4s, $Rn.2d",
+ [], NoItinerary>;
+ }
+}
+
+defm XTN : NeonI_2VMisc_HSD_Narrow<"xtn", 0b0, 0b10010>;
+defm SQXTUN : NeonI_2VMisc_HSD_Narrow<"sqxtun", 0b1, 0b10010>;
+defm SQXTN : NeonI_2VMisc_HSD_Narrow<"sqxtn", 0b0, 0b10100>;
+defm UQXTN : NeonI_2VMisc_HSD_Narrow<"uqxtn", 0b1, 0b10100>;
+
+multiclass NeonI_2VMisc_Narrow_Patterns<string Prefix,
+ SDPatternOperator Neon_Op> {
+ def : Pat<(v8i8 (Neon_Op (v8i16 VPR128:$Rn))),
+ (v8i8 (!cast<Instruction>(Prefix # 8h8b) (v8i16 VPR128:$Rn)))>;
+
+ def : Pat<(v4i16 (Neon_Op (v4i32 VPR128:$Rn))),
+ (v4i16 (!cast<Instruction>(Prefix # 4s4h) (v4i32 VPR128:$Rn)))>;
+
+ def : Pat<(v2i32 (Neon_Op (v2i64 VPR128:$Rn))),
+ (v2i32 (!cast<Instruction>(Prefix # 2d2s) (v2i64 VPR128:$Rn)))>;
+
+ def : Pat<(v16i8 (concat_vectors
+ (v8i8 VPR64:$src),
+ (v8i8 (Neon_Op (v8i16 VPR128:$Rn))))),
+ (!cast<Instruction>(Prefix # 8h16b)
+ (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
+ VPR128:$Rn)>;
+
+ def : Pat<(v8i16 (concat_vectors
+ (v4i16 VPR64:$src),
+ (v4i16 (Neon_Op (v4i32 VPR128:$Rn))))),
+ (!cast<Instruction>(Prefix # 4s8h)
+ (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
+ VPR128:$Rn)>;
+
+ def : Pat<(v4i32 (concat_vectors
+ (v2i32 VPR64:$src),
+ (v2i32 (Neon_Op (v2i64 VPR128:$Rn))))),
+ (!cast<Instruction>(Prefix # 2d4s)
+ (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
+ VPR128:$Rn)>;
+}
+
+defm : NeonI_2VMisc_Narrow_Patterns<"XTN", trunc>;
+defm : NeonI_2VMisc_Narrow_Patterns<"SQXTUN", int_arm_neon_vqmovnsu>;
+defm : NeonI_2VMisc_Narrow_Patterns<"SQXTN", int_arm_neon_vqmovns>;
+defm : NeonI_2VMisc_Narrow_Patterns<"UQXTN", int_arm_neon_vqmovnu>;
+
+multiclass NeonI_2VMisc_SHIFT<string asmop, bit U, bits<5> opcode> {
+ let DecoderMethod = "DecodeSHLLInstruction" in {
+ def 8b8h : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR128:$Rd),
+ (ins VPR64:$Rn, uimm_exact8:$Imm),
+ asmop # "\t$Rd.8h, $Rn.8b, $Imm",
+ [], NoItinerary>;
+
+ def 4h4s : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR128:$Rd),
+ (ins VPR64:$Rn, uimm_exact16:$Imm),
+ asmop # "\t$Rd.4s, $Rn.4h, $Imm",
+ [], NoItinerary>;
+
+ def 2s2d : NeonI_2VMisc<0b0, U, 0b10, opcode,
+ (outs VPR128:$Rd),
+ (ins VPR64:$Rn, uimm_exact32:$Imm),
+ asmop # "\t$Rd.2d, $Rn.2s, $Imm",
+ [], NoItinerary>;
+
+ def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd),
+ (ins VPR128:$Rn, uimm_exact8:$Imm),
+ asmop # "2\t$Rd.8h, $Rn.16b, $Imm",
+ [], NoItinerary>;
+
+ def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd),
+ (ins VPR128:$Rn, uimm_exact16:$Imm),
+ asmop # "2\t$Rd.4s, $Rn.8h, $Imm",
+ [], NoItinerary>;
+
+ def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
+ (outs VPR128:$Rd),
+ (ins VPR128:$Rn, uimm_exact32:$Imm),
+ asmop # "2\t$Rd.2d, $Rn.4s, $Imm",
+ [], NoItinerary>;
+ }
+}
+
+defm SHLL : NeonI_2VMisc_SHIFT<"shll", 0b1, 0b10011>;
+
+class NeonI_SHLL_Patterns<ValueType OpTy, ValueType DesTy,
+ SDPatternOperator ExtOp, Operand Neon_Imm,
+ string suffix>
+ : Pat<(DesTy (shl
+ (DesTy (ExtOp (OpTy VPR64:$Rn))),
+ (DesTy (Neon_vdup
+ (i32 Neon_Imm:$Imm))))),
+ (!cast<Instruction>("SHLL" # suffix) VPR64:$Rn, Neon_Imm:$Imm)>;
+
+class NeonI_SHLL_High_Patterns<ValueType OpTy, ValueType DesTy,
+ SDPatternOperator ExtOp, Operand Neon_Imm,
+ string suffix, PatFrag GetHigh>
+ : Pat<(DesTy (shl
+ (DesTy (ExtOp
+ (OpTy (GetHigh VPR128:$Rn)))),
+ (DesTy (Neon_vdup
+ (i32 Neon_Imm:$Imm))))),
+ (!cast<Instruction>("SHLL" # suffix) VPR128:$Rn, Neon_Imm:$Imm)>;
+
+def : NeonI_SHLL_Patterns<v8i8, v8i16, zext, uimm_exact8, "8b8h">;
+def : NeonI_SHLL_Patterns<v8i8, v8i16, sext, uimm_exact8, "8b8h">;
+def : NeonI_SHLL_Patterns<v4i16, v4i32, zext, uimm_exact16, "4h4s">;
+def : NeonI_SHLL_Patterns<v4i16, v4i32, sext, uimm_exact16, "4h4s">;
+def : NeonI_SHLL_Patterns<v2i32, v2i64, zext, uimm_exact32, "2s2d">;
+def : NeonI_SHLL_Patterns<v2i32, v2i64, sext, uimm_exact32, "2s2d">;
+def : NeonI_SHLL_High_Patterns<v8i8, v8i16, zext, uimm_exact8, "16b8h",
+ Neon_High16B>;
+def : NeonI_SHLL_High_Patterns<v8i8, v8i16, sext, uimm_exact8, "16b8h",
+ Neon_High16B>;
+def : NeonI_SHLL_High_Patterns<v4i16, v4i32, zext, uimm_exact16, "8h4s",
+ Neon_High8H>;
+def : NeonI_SHLL_High_Patterns<v4i16, v4i32, sext, uimm_exact16, "8h4s",
+ Neon_High8H>;
+def : NeonI_SHLL_High_Patterns<v2i32, v2i64, zext, uimm_exact32, "4s2d",
+ Neon_High4S>;
+def : NeonI_SHLL_High_Patterns<v2i32, v2i64, sext, uimm_exact32, "4s2d",
+ Neon_High4S>;
+
+multiclass NeonI_2VMisc_SD_Narrow<string asmop, bit U, bits<5> opcode> {
+ def 4s4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4h, $Rn.4s",
+ [], NoItinerary>;
+
+ def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2d",
+ [], NoItinerary>;
+
+ let Constraints = "$src = $Rd" in {
+ def 4s8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "2\t$Rd.8h, $Rn.4s",
+ [], NoItinerary>;
+
+ def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "2\t$Rd.4s, $Rn.2d",
+ [], NoItinerary>;
+ }
+}
+
+defm FCVTN : NeonI_2VMisc_SD_Narrow<"fcvtn", 0b0, 0b10110>;
+
+multiclass NeonI_2VMisc_Narrow_Pattern<string prefix,
+ SDPatternOperator f32_to_f16_Op,
+ SDPatternOperator f64_to_f32_Op> {
+
+ def : Pat<(v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))),
+ (!cast<Instruction>(prefix # "4s4h") (v4f32 VPR128:$Rn))>;
+
+ def : Pat<(v8i16 (concat_vectors
+ (v4i16 VPR64:$src),
+ (v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))))),
+ (!cast<Instruction>(prefix # "4s8h")
+ (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
+ (v4f32 VPR128:$Rn))>;
+
+ def : Pat<(v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))),
+ (!cast<Instruction>(prefix # "2d2s") (v2f64 VPR128:$Rn))>;
+
+ def : Pat<(v4f32 (concat_vectors
+ (v2f32 VPR64:$src),
+ (v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))))),
+ (!cast<Instruction>(prefix # "2d4s")
+ (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
+ (v2f64 VPR128:$Rn))>;
+}
+
+defm : NeonI_2VMisc_Narrow_Pattern<"FCVTN", int_arm_neon_vcvtfp2hf, fround>;
+
+multiclass NeonI_2VMisc_D_Narrow<string asmop, string prefix, bit U,
+ bits<5> opcode> {
+ def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2d",
+ [], NoItinerary>;
+
+ def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "2\t$Rd.4s, $Rn.2d",
+ [], NoItinerary> {
+ let Constraints = "$src = $Rd";
+ }
+
+ def : Pat<(v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))),
+ (!cast<Instruction>(prefix # "2d2s") VPR128:$Rn)>;
+
+ def : Pat<(v4f32 (concat_vectors
+ (v2f32 VPR64:$src),
+ (v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))))),
+ (!cast<Instruction>(prefix # "2d4s")
+ (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
+ VPR128:$Rn)>;
+}
+
+defm FCVTXN : NeonI_2VMisc_D_Narrow<"fcvtxn","FCVTXN", 0b1, 0b10110>;
+
+def Neon_High4Float : PatFrag<(ops node:$in),
+ (extract_subvector (v4f32 node:$in), (iPTR 2))>;
+
+multiclass NeonI_2VMisc_HS_Extend<string asmop, bit U, bits<5> opcode> {
+ def 4h4s : NeonI_2VMisc<0b0, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4h",
+ [], NoItinerary>;
+
+ def 2s2d : NeonI_2VMisc<0b0, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.2d, $Rn.2s",
+ [], NoItinerary>;
+
+ def 8h4s : NeonI_2VMisc<0b1, U, 0b00, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "2\t$Rd.4s, $Rn.8h",
+ [], NoItinerary>;
+
+ def 4s2d : NeonI_2VMisc<0b1, U, 0b01, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "2\t$Rd.2d, $Rn.4s",
+ [], NoItinerary>;
+}
+
+defm FCVTL : NeonI_2VMisc_HS_Extend<"fcvtl", 0b0, 0b10111>;
+
+multiclass NeonI_2VMisc_Extend_Pattern<string prefix> {
+ def : Pat<(v4f32 (int_arm_neon_vcvthf2fp (v4i16 VPR64:$Rn))),
+ (!cast<Instruction>(prefix # "4h4s") VPR64:$Rn)>;
+
+ def : Pat<(v4f32 (int_arm_neon_vcvthf2fp
+ (v4i16 (Neon_High8H
+ (v8i16 VPR128:$Rn))))),
+ (!cast<Instruction>(prefix # "8h4s") VPR128:$Rn)>;
+
+ def : Pat<(v2f64 (fextend (v2f32 VPR64:$Rn))),
+ (!cast<Instruction>(prefix # "2s2d") VPR64:$Rn)>;
+
+ def : Pat<(v2f64 (fextend
+ (v2f32 (Neon_High4Float
+ (v4f32 VPR128:$Rn))))),
+ (!cast<Instruction>(prefix # "4s2d") VPR128:$Rn)>;
+}
+
+defm : NeonI_2VMisc_Extend_Pattern<"FCVTL">;
+
+multiclass NeonI_2VMisc_SD_Conv<string asmop, bit Size, bit U, bits<5> opcode,
+ ValueType ResTy4s, ValueType OpTy4s,
+ ValueType ResTy2d, ValueType OpTy2d,
+ ValueType ResTy2s, ValueType OpTy2s,
+ SDPatternOperator Neon_Op> {
+
+ def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4s",
+ [(set (ResTy4s VPR128:$Rd),
+ (ResTy4s (Neon_Op (OpTy4s VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 2d : NeonI_2VMisc<0b1, U, {Size, 0b1}, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.2d, $Rn.2d",
+ [(set (ResTy2d VPR128:$Rd),
+ (ResTy2d (Neon_Op (OpTy2d VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2s",
+ [(set (ResTy2s VPR64:$Rd),
+ (ResTy2s (Neon_Op (OpTy2s VPR64:$Rn))))],
+ NoItinerary>;
+}
+
+multiclass NeonI_2VMisc_fp_to_int<string asmop, bit Size, bit U,
+ bits<5> opcode, SDPatternOperator Neon_Op> {
+ defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4i32, v4f32, v2i64,
+ v2f64, v2i32, v2f32, Neon_Op>;
+}
+
+defm FCVTNS : NeonI_2VMisc_fp_to_int<"fcvtns", 0b0, 0b0, 0b11010,
+ int_aarch64_neon_fcvtns>;
+defm FCVTNU : NeonI_2VMisc_fp_to_int<"fcvtnu", 0b0, 0b1, 0b11010,
+ int_aarch64_neon_fcvtnu>;
+defm FCVTPS : NeonI_2VMisc_fp_to_int<"fcvtps", 0b1, 0b0, 0b11010,
+ int_aarch64_neon_fcvtps>;
+defm FCVTPU : NeonI_2VMisc_fp_to_int<"fcvtpu", 0b1, 0b1, 0b11010,
+ int_aarch64_neon_fcvtpu>;
+defm FCVTMS : NeonI_2VMisc_fp_to_int<"fcvtms", 0b0, 0b0, 0b11011,
+ int_aarch64_neon_fcvtms>;
+defm FCVTMU : NeonI_2VMisc_fp_to_int<"fcvtmu", 0b0, 0b1, 0b11011,
+ int_aarch64_neon_fcvtmu>;
+defm FCVTZS : NeonI_2VMisc_fp_to_int<"fcvtzs", 0b1, 0b0, 0b11011, fp_to_sint>;
+defm FCVTZU : NeonI_2VMisc_fp_to_int<"fcvtzu", 0b1, 0b1, 0b11011, fp_to_uint>;
+defm FCVTAS : NeonI_2VMisc_fp_to_int<"fcvtas", 0b0, 0b0, 0b11100,
+ int_aarch64_neon_fcvtas>;
+defm FCVTAU : NeonI_2VMisc_fp_to_int<"fcvtau", 0b0, 0b1, 0b11100,
+ int_aarch64_neon_fcvtau>;
+
+multiclass NeonI_2VMisc_int_to_fp<string asmop, bit Size, bit U,
+ bits<5> opcode, SDPatternOperator Neon_Op> {
+ defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4i32, v2f64,
+ v2i64, v2f32, v2i32, Neon_Op>;
+}
+
+defm SCVTF : NeonI_2VMisc_int_to_fp<"scvtf", 0b0, 0b0, 0b11101, sint_to_fp>;
+defm UCVTF : NeonI_2VMisc_int_to_fp<"ucvtf", 0b0, 0b1, 0b11101, uint_to_fp>;
+
+multiclass NeonI_2VMisc_fp_to_fp<string asmop, bit Size, bit U,
+ bits<5> opcode, SDPatternOperator Neon_Op> {
+ defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4f32, v2f64,
+ v2f64, v2f32, v2f32, Neon_Op>;
+}
+
+defm FRINTN : NeonI_2VMisc_fp_to_fp<"frintn", 0b0, 0b0, 0b11000,
+ int_aarch64_neon_frintn>;
+defm FRINTA : NeonI_2VMisc_fp_to_fp<"frinta", 0b0, 0b1, 0b11000, frnd>;
+defm FRINTP : NeonI_2VMisc_fp_to_fp<"frintp", 0b1, 0b0, 0b11000, fceil>;
+defm FRINTM : NeonI_2VMisc_fp_to_fp<"frintm", 0b0, 0b0, 0b11001, ffloor>;
+defm FRINTX : NeonI_2VMisc_fp_to_fp<"frintx", 0b0, 0b1, 0b11001, frint>;
+defm FRINTZ : NeonI_2VMisc_fp_to_fp<"frintz", 0b1, 0b0, 0b11001, ftrunc>;
+defm FRINTI : NeonI_2VMisc_fp_to_fp<"frinti", 0b1, 0b1, 0b11001, fnearbyint>;
+defm FRECPE : NeonI_2VMisc_fp_to_fp<"frecpe", 0b1, 0b0, 0b11101,
+ int_arm_neon_vrecpe>;
+defm FRSQRTE : NeonI_2VMisc_fp_to_fp<"frsqrte", 0b1, 0b1, 0b11101,
+ int_arm_neon_vrsqrte>;
+defm FSQRT : NeonI_2VMisc_fp_to_fp<"fsqrt", 0b1, 0b1, 0b11111, fsqrt>;
+
+multiclass NeonI_2VMisc_S_Conv<string asmop, bit Size, bit U,
+ bits<5> opcode, SDPatternOperator Neon_Op> {
+ def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4s",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
+ (outs VPR64:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd.2s, $Rn.2s",
+ [(set (v2i32 VPR64:$Rd),
+ (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
+ NoItinerary>;
+}
+
+defm URECPE : NeonI_2VMisc_S_Conv<"urecpe", 0b1, 0b0, 0b11100,
+ int_arm_neon_vrecpe>;
+defm URSQRTE : NeonI_2VMisc_S_Conv<"ursqrte", 0b1, 0b1, 0b11100,
+ int_arm_neon_vrsqrte>;
+
+// Crypto Class
+class NeonI_Cryptoaes_2v<bits<2> size, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+ : NeonI_Crypto_AES<size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.16b, $Rn.16b",
+ [(set (v16i8 VPR128:$Rd),
+ (v16i8 (opnode (v16i8 VPR128:$src),
+ (v16i8 VPR128:$Rn))))],
+ NoItinerary>{
+ let Constraints = "$src = $Rd";
+ let Predicates = [HasNEON, HasCrypto];
+}
+
+def AESE : NeonI_Cryptoaes_2v<0b00, 0b00100, "aese", int_arm_neon_aese>;
+def AESD : NeonI_Cryptoaes_2v<0b00, 0b00101, "aesd", int_arm_neon_aesd>;
+
+class NeonI_Cryptoaes<bits<2> size, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+ : NeonI_Crypto_AES<size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd.16b, $Rn.16b",
+ [(set (v16i8 VPR128:$Rd),
+ (v16i8 (opnode (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+def AESMC : NeonI_Cryptoaes<0b00, 0b00110, "aesmc", int_arm_neon_aesmc>;
+def AESIMC : NeonI_Cryptoaes<0b00, 0b00111, "aesimc", int_arm_neon_aesimc>;
+
+class NeonI_Cryptosha_vv<bits<2> size, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+ : NeonI_Crypto_SHA<size, opcode,
+ (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
+ asmop # "\t$Rd.4s, $Rn.4s",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (opnode (v4i32 VPR128:$src),
+ (v4i32 VPR128:$Rn))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+ let Predicates = [HasNEON, HasCrypto];
+}
+
+def SHA1SU1 : NeonI_Cryptosha_vv<0b00, 0b00001, "sha1su1",
+ int_arm_neon_sha1su1>;
+def SHA256SU0 : NeonI_Cryptosha_vv<0b00, 0b00010, "sha256su0",
+ int_arm_neon_sha256su0>;
+
+class NeonI_Cryptosha_ss<bits<2> size, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+ : NeonI_Crypto_SHA<size, opcode,
+ (outs FPR32:$Rd), (ins FPR32:$Rn),
+ asmop # "\t$Rd, $Rn",
+ [(set (v1i32 FPR32:$Rd),
+ (v1i32 (opnode (v1i32 FPR32:$Rn))))],
+ NoItinerary> {
+ let Predicates = [HasNEON, HasCrypto];
+}
+
+def SHA1H : NeonI_Cryptosha_ss<0b00, 0b00000, "sha1h", int_arm_neon_sha1h>;
+
+class NeonI_Cryptosha3_vvv<bits<2> size, bits<3> opcode, string asmop,
+ SDPatternOperator opnode>
+ : NeonI_Crypto_3VSHA<size, opcode,
+ (outs VPR128:$Rd),
+ (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
+ [(set (v4i32 VPR128:$Rd),
+ (v4i32 (opnode (v4i32 VPR128:$src),
+ (v4i32 VPR128:$Rn),
+ (v4i32 VPR128:$Rm))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+ let Predicates = [HasNEON, HasCrypto];
+}
+
+def SHA1SU0 : NeonI_Cryptosha3_vvv<0b00, 0b011, "sha1su0",
+ int_arm_neon_sha1su0>;
+def SHA256SU1 : NeonI_Cryptosha3_vvv<0b00, 0b110, "sha256su1",
+ int_arm_neon_sha256su1>;
+
+class NeonI_Cryptosha3_qqv<bits<2> size, bits<3> opcode, string asmop,
+ SDPatternOperator opnode>
+ : NeonI_Crypto_3VSHA<size, opcode,
+ (outs FPR128:$Rd),
+ (ins FPR128:$src, FPR128:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd, $Rn, $Rm.4s",
+ [(set (v4i32 FPR128:$Rd),
+ (v4i32 (opnode (v4i32 FPR128:$src),
+ (v4i32 FPR128:$Rn),
+ (v4i32 VPR128:$Rm))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+ let Predicates = [HasNEON, HasCrypto];
+}
+
+def SHA256H : NeonI_Cryptosha3_qqv<0b00, 0b100, "sha256h",
+ int_arm_neon_sha256h>;
+def SHA256H2 : NeonI_Cryptosha3_qqv<0b00, 0b101, "sha256h2",
+ int_arm_neon_sha256h2>;
+
+class NeonI_Cryptosha3_qsv<bits<2> size, bits<3> opcode, string asmop,
+ SDPatternOperator opnode>
+ : NeonI_Crypto_3VSHA<size, opcode,
+ (outs FPR128:$Rd),
+ (ins FPR128:$src, FPR32:$Rn, VPR128:$Rm),
+ asmop # "\t$Rd, $Rn, $Rm.4s",
+ [(set (v4i32 FPR128:$Rd),
+ (v4i32 (opnode (v4i32 FPR128:$src),
+ (v1i32 FPR32:$Rn),
+ (v4i32 VPR128:$Rm))))],
+ NoItinerary> {
+ let Constraints = "$src = $Rd";
+ let Predicates = [HasNEON, HasCrypto];
+}
+
+def SHA1C : NeonI_Cryptosha3_qsv<0b00, 0b000, "sha1c", int_aarch64_neon_sha1c>;
+def SHA1P : NeonI_Cryptosha3_qsv<0b00, 0b001, "sha1p", int_aarch64_neon_sha1p>;
+def SHA1M : NeonI_Cryptosha3_qsv<0b00, 0b010, "sha1m", int_aarch64_neon_sha1m>;
+
+//
+// Patterns for handling half-precision values
+//
+
+// Convert f16 value coming in as i16 value to f32
+def : Pat<(f32 (f16_to_f32 (i32 (and (i32 GPR32:$Rn), 65535)))),
+ (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
+def : Pat<(f32 (f16_to_f32 (i32 (assertzext GPR32:$Rn)))),
+ (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
+
+def : Pat<(f32 (f16_to_f32 (i32 (assertzext (i32 (
+ f32_to_f16 (f32 FPR32:$Rn))))))),
+ (f32 FPR32:$Rn)>;
+
+// Patterns for vector extract of half-precision FP value in i16 storage type
+def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
+ (v4i16 VPR64:$Rn), neon_uimm2_bare:$Imm)), 65535)))),
+ (FCVTsh (f16 (DUPhv_H
+ (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ neon_uimm2_bare:$Imm)))>;
+
+def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
+ (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)), 65535)))),
+ (FCVTsh (f16 (DUPhv_H (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)))>;
+
+// Patterns for vector insert of half-precision FP value 0 in i16 storage type
+def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
+ (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
+ (neon_uimm3_bare:$Imm))),
+ (v8i16 (INSELh (v8i16 VPR128:$Rn),
+ (v8i16 (SUBREG_TO_REG (i64 0),
+ (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
+ sub_16)),
+ neon_uimm3_bare:$Imm, 0))>;
+
+def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
+ (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
+ (neon_uimm2_bare:$Imm))),
+ (v4i16 (EXTRACT_SUBREG
+ (v8i16 (INSELh
+ (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ (v8i16 (SUBREG_TO_REG (i64 0),
+ (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
+ sub_16)),
+ neon_uimm2_bare:$Imm, 0)),
+ sub_64))>;
+
+// Patterns for vector insert of half-precision FP value in i16 storage type
+def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
+ (i32 (assertsext (i32 (fp_to_sint
+ (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
+ (neon_uimm3_bare:$Imm))),
+ (v8i16 (INSELh (v8i16 VPR128:$Rn),
+ (v8i16 (SUBREG_TO_REG (i64 0),
+ (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
+ sub_16)),
+ neon_uimm3_bare:$Imm, 0))>;
+
+def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
+ (i32 (assertsext (i32 (fp_to_sint
+ (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
+ (neon_uimm2_bare:$Imm))),
+ (v4i16 (EXTRACT_SUBREG
+ (v8i16 (INSELh
+ (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ (v8i16 (SUBREG_TO_REG (i64 0),
+ (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
+ sub_16)),
+ neon_uimm2_bare:$Imm, 0)),
+ sub_64))>;
+
+def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
+ (i32 (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
+ (neon_uimm3_bare:$Imm1))),
+ (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
+ neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
+
+// Patterns for vector copy of half-precision FP value in i16 storage type
+def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
+ (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
+ (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
+ 65535)))))))),
+ (neon_uimm3_bare:$Imm1))),
+ (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
+ neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
+
+def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
+ (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
+ (vector_extract (v4i16 VPR64:$src), neon_uimm3_bare:$Imm2)),
+ 65535)))))))),
+ (neon_uimm3_bare:$Imm1))),
+ (v4i16 (EXTRACT_SUBREG
+ (v8i16 (INSELh
+ (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+ (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
+ neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2)),
+ sub_64))>;
+
+
diff --git a/lib/Target/AArch64/AArch64MCInstLower.cpp b/lib/Target/AArch64/AArch64MCInstLower.cpp
index 3d22330afe72..8cfb968237e4 100644
--- a/lib/Target/AArch64/AArch64MCInstLower.cpp
+++ b/lib/Target/AArch64/AArch64MCInstLower.cpp
@@ -109,6 +109,11 @@ bool AArch64AsmPrinter::lowerOperand(const MachineOperand &MO,
case MachineOperand::MO_Immediate:
MCOp = MCOperand::CreateImm(MO.getImm());
break;
+ case MachineOperand::MO_FPImmediate: {
+ assert(MO.getFPImm()->isZero() && "Only fp imm 0.0 is supported");
+ MCOp = MCOperand::CreateFPImm(0.0);
+ break;
+ }
case MachineOperand::MO_BlockAddress:
MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress()));
break;
@@ -116,7 +121,7 @@ bool AArch64AsmPrinter::lowerOperand(const MachineOperand &MO,
MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
break;
case MachineOperand::MO_GlobalAddress:
- MCOp = lowerSymbolOperand(MO, Mang->getSymbol(MO.getGlobal()));
+ MCOp = lowerSymbolOperand(MO, getSymbol(MO.getGlobal()));
break;
case MachineOperand::MO_MachineBasicBlock:
MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 20b0dcf86f46..75ec44f3fecb 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -29,9 +29,8 @@
using namespace llvm;
-AArch64RegisterInfo::AArch64RegisterInfo(const AArch64InstrInfo &tii,
- const AArch64Subtarget &sti)
- : AArch64GenRegisterInfo(AArch64::X30), TII(tii) {
+AArch64RegisterInfo::AArch64RegisterInfo()
+ : AArch64GenRegisterInfo(AArch64::X30) {
}
const uint16_t *
@@ -122,6 +121,8 @@ AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI,
return;
}
+ const AArch64InstrInfo &TII =
+ *static_cast<const AArch64InstrInfo*>(MF.getTarget().getInstrInfo());
int MinOffset, MaxOffset, OffsetScale;
if (MI.getOpcode() == AArch64::ADDxxi_lsl0_s) {
MinOffset = 0;
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.h b/lib/Target/AArch64/AArch64RegisterInfo.h
index bb64fd55b2c3..4d679439936a 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.h
+++ b/lib/Target/AArch64/AArch64RegisterInfo.h
@@ -25,12 +25,7 @@ class AArch64InstrInfo;
class AArch64Subtarget;
struct AArch64RegisterInfo : public AArch64GenRegisterInfo {
-private:
- const AArch64InstrInfo &TII;
-
-public:
- AArch64RegisterInfo(const AArch64InstrInfo &tii,
- const AArch64Subtarget &sti);
+ AArch64RegisterInfo();
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.td b/lib/Target/AArch64/AArch64RegisterInfo.td
index bd79546371c5..4e2022c06165 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.td
+++ b/lib/Target/AArch64/AArch64RegisterInfo.td
@@ -12,15 +12,25 @@
//===----------------------------------------------------------------------===//
let Namespace = "AArch64" in {
-def sub_128 : SubRegIndex;
-def sub_64 : SubRegIndex;
-def sub_32 : SubRegIndex;
-def sub_16 : SubRegIndex;
-def sub_8 : SubRegIndex;
-
-// The VPR registers are handled as sub-registers of FPR equivalents, but
-// they're really the same thing. We give this concept a special index.
-def sub_alias : SubRegIndex;
+def sub_128 : SubRegIndex<128>;
+def sub_64 : SubRegIndex<64>;
+def sub_32 : SubRegIndex<32>;
+def sub_16 : SubRegIndex<16>;
+def sub_8 : SubRegIndex<8>;
+
+// Note: Code depends on these having consecutive numbers.
+def qqsub : SubRegIndex<256, 256>;
+
+def qsub_0 : SubRegIndex<128>;
+def qsub_1 : SubRegIndex<128, 128>;
+def qsub_2 : ComposedSubRegIndex<qqsub, qsub_0>;
+def qsub_3 : ComposedSubRegIndex<qqsub, qsub_1>;
+
+def dsub_0 : SubRegIndex<64>;
+def dsub_1 : SubRegIndex<64, 64>;
+def dsub_2 : ComposedSubRegIndex<qsub_1, dsub_0>;
+def dsub_3 : ComposedSubRegIndex<qsub_1, dsub_1>;
+def dsub_4 : ComposedSubRegIndex<qsub_2, dsub_0>;
}
// Registers are identified with 5-bit ID numbers.
@@ -137,60 +147,51 @@ foreach Index = 0-31 in {
}
-def FPR8 : RegisterClass<"AArch64", [i8], 8,
+def FPR8 : RegisterClass<"AArch64", [i8, v1i8], 8,
(sequence "B%u", 0, 31)> {
}
-def FPR16 : RegisterClass<"AArch64", [f16], 16,
+def FPR16 : RegisterClass<"AArch64", [f16, v1i16], 16,
(sequence "H%u", 0, 31)> {
}
-def FPR32 : RegisterClass<"AArch64", [f32], 32,
+def FPR32 : RegisterClass<"AArch64", [f32, v1i32, v1f32], 32,
(sequence "S%u", 0, 31)> {
}
-def FPR64 : RegisterClass<"AArch64", [f64], 64,
- (sequence "D%u", 0, 31)> {
-}
+def FPR64 : RegisterClass<"AArch64",
+ [f64, v2f32, v2i32, v4i16, v8i8, v1i64, v1f64],
+ 64, (sequence "D%u", 0, 31)>;
-def FPR128 : RegisterClass<"AArch64", [f128], 128,
- (sequence "Q%u", 0, 31)> {
-}
+def FPR128 : RegisterClass<"AArch64",
+ [f128, v2f64, v2i64, v4f32, v4i32, v8i16, v16i8],
+ 128, (sequence "Q%u", 0, 31)>;
+def FPR64Lo : RegisterClass<"AArch64",
+ [f64, v2f32, v2i32, v4i16, v8i8, v1i64, v1f64],
+ 64, (sequence "D%u", 0, 15)>;
+
+def FPR128Lo : RegisterClass<"AArch64",
+ [f128, v2f64, v2i64, v4f32, v4i32, v8i16, v16i8],
+ 128, (sequence "Q%u", 0, 15)>;
//===----------------------------------------------------------------------===//
// Vector registers:
//===----------------------------------------------------------------------===//
-// NEON registers simply specify the overall vector, and it's expected that
-// Instructions will individually specify the acceptable data layout. In
-// principle this leaves two approaches open:
-// + An operand, giving a single ADDvvv instruction (for example). This turns
-// out to be unworkable in the assembly parser (without every Instruction
-// having a "cvt" function, at least) because the constraints can't be
-// properly enforced. It also complicates specifying patterns since each
-// instruction will accept many types.
-// + A bare token (e.g. ".2d"). This means the AsmParser has to know specific
-// details about NEON registers, but simplifies most other details.
-//
-// The second approach was taken.
-
-foreach Index = 0-31 in {
- def V # Index : AArch64RegWithSubs<Index, "v" # Index,
- [!cast<Register>("Q" # Index)],
- [sub_alias]>,
- DwarfRegNum<[!add(Index, 64)]>;
+def VPR64AsmOperand : AsmOperandClass {
+ let Name = "VPR";
+ let PredicateMethod = "isReg";
+ let RenderMethod = "addRegOperands";
}
-// These two classes contain the same registers, which should be reasonably
-// sensible for MC and allocation purposes, but allows them to be treated
-// separately for things like stack spilling.
-def VPR64 : RegisterClass<"AArch64", [v2f32, v2i32, v4i16, v8i8], 64,
- (sequence "V%u", 0, 31)>;
+def VPR64 : RegisterOperand<FPR64, "printVPRRegister">;
+
+def VPR128 : RegisterOperand<FPR128, "printVPRRegister">;
+
+def VPR64Lo : RegisterOperand<FPR64Lo, "printVPRRegister">;
-def VPR128 : RegisterClass<"AArch64",
- [v2f64, v2i64, v4f32, v4i32, v8i16, v16i8], 128,
- (sequence "V%u", 0, 31)>;
+def VPR128Lo : RegisterOperand<FPR128Lo, "printVPRRegister">;
// Flags register
def NZCV : Register<"nzcv"> {
@@ -201,3 +202,90 @@ def FlagClass : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
let CopyCost = -1;
let isAllocatable = 0;
}
+
+//===----------------------------------------------------------------------===//
+// Consecutive vector registers
+//===----------------------------------------------------------------------===//
+// 2 Consecutive 64-bit registers: D0_D1, D1_D2, ..., D30_D31
+def Tuples2D : RegisterTuples<[dsub_0, dsub_1],
+ [(rotl FPR64, 0), (rotl FPR64, 1)]>;
+
+// 3 Consecutive 64-bit registers: D0_D1_D2, ..., D31_D0_D1
+def Tuples3D : RegisterTuples<[dsub_0, dsub_1, dsub_2],
+ [(rotl FPR64, 0), (rotl FPR64, 1),
+ (rotl FPR64, 2)]>;
+
+// 4 Consecutive 64-bit registers: D0_D1_D2_D3, ..., D31_D0_D1_D2
+def Tuples4D : RegisterTuples<[dsub_0, dsub_1, dsub_2, dsub_3],
+ [(rotl FPR64, 0), (rotl FPR64, 1),
+ (rotl FPR64, 2), (rotl FPR64, 3)]>;
+
+// 2 Consecutive 128-bit registers: Q0_Q1, Q1_Q2, ..., Q30_Q31
+def Tuples2Q : RegisterTuples<[qsub_0, qsub_1],
+ [(rotl FPR128, 0), (rotl FPR128, 1)]>;
+
+// 3 Consecutive 128-bit registers: Q0_Q1_Q2, ..., Q31_Q0_Q1
+def Tuples3Q : RegisterTuples<[qsub_0, qsub_1, qsub_2],
+ [(rotl FPR128, 0), (rotl FPR128, 1),
+ (rotl FPR128, 2)]>;
+
+// 4 Consecutive 128-bit registers: Q0_Q1_Q2_Q3, ..., Q31_Q0_Q1_Q2
+def Tuples4Q : RegisterTuples<[qsub_0, qsub_1, qsub_2, qsub_3],
+ [(rotl FPR128, 0), (rotl FPR128, 1),
+ (rotl FPR128, 2), (rotl FPR128, 3)]>;
+
+// The followings are super register classes to model 2/3/4 consecutive
+// 64-bit/128-bit registers.
+
+def DPair : RegisterClass<"AArch64", [v2i64], 64, (add Tuples2D)>;
+
+def DTriple : RegisterClass<"AArch64", [untyped], 64, (add Tuples3D)> {
+ let Size = 192; // 3 x 64 bits, we have no predefined type of that size.
+}
+
+def DQuad : RegisterClass<"AArch64", [v4i64], 64, (add Tuples4D)>;
+
+def QPair : RegisterClass<"AArch64", [v4i64], 128, (add Tuples2Q)>;
+
+def QTriple : RegisterClass<"AArch64", [untyped], 128, (add Tuples3Q)> {
+ let Size = 384; // 3 x 128 bits, we have no predefined type of that size.
+}
+
+def QQuad : RegisterClass<"AArch64", [v8i64], 128, (add Tuples4Q)>;
+
+
+// The followings are vector list operands
+multiclass VectorList_operands<string PREFIX, string LAYOUT, int Count,
+ RegisterClass RegList> {
+ def _asmoperand : AsmOperandClass {
+ let Name = PREFIX # LAYOUT # Count;
+ let RenderMethod = "addVectorListOperands";
+ let PredicateMethod =
+ "isVectorList<A64Layout::VL_" # LAYOUT # ", " # Count # ">";
+ let ParserMethod = "ParseVectorList";
+ }
+
+ def _operand : RegisterOperand<RegList,
+ "printVectorList<A64Layout::VL_" # LAYOUT # ", " # Count # ">"> {
+ let ParserMatchClass =
+ !cast<AsmOperandClass>(PREFIX # LAYOUT # "_asmoperand");
+ }
+}
+
+multiclass VectorList_BHSD<string PREFIX, int Count, RegisterClass DRegList,
+ RegisterClass QRegList> {
+ defm 8B : VectorList_operands<PREFIX, "8B", Count, DRegList>;
+ defm 4H : VectorList_operands<PREFIX, "4H", Count, DRegList>;
+ defm 2S : VectorList_operands<PREFIX, "2S", Count, DRegList>;
+ defm 1D : VectorList_operands<PREFIX, "1D", Count, DRegList>;
+ defm 16B : VectorList_operands<PREFIX, "16B", Count, QRegList>;
+ defm 8H : VectorList_operands<PREFIX, "8H", Count, QRegList>;
+ defm 4S : VectorList_operands<PREFIX, "4S", Count, QRegList>;
+ defm 2D : VectorList_operands<PREFIX, "2D", Count, QRegList>;
+}
+
+// Vector list operand with 1/2/3/4 registers: VOne8B_operand,..., VQuad2D_operand
+defm VOne : VectorList_BHSD<"VOne", 1, FPR64, FPR128>;
+defm VPair : VectorList_BHSD<"VPair", 2, DPair, QPair>;
+defm VTriple : VectorList_BHSD<"VTriple", 3, DTriple, QTriple>;
+defm VQuad : VectorList_BHSD<"VQuad", 4, DQuad, QQuad>; \ No newline at end of file
diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp
index d17b73820994..5c693c18c6a6 100644
--- a/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -25,13 +25,31 @@
using namespace llvm;
+// Pin the vtable to this file.
+void AArch64Subtarget::anchor() {}
+
AArch64Subtarget::AArch64Subtarget(StringRef TT, StringRef CPU, StringRef FS)
- : AArch64GenSubtargetInfo(TT, CPU, FS)
- , HasNEON(true)
- , HasCrypto(true)
- , TargetTriple(TT) {
+ : AArch64GenSubtargetInfo(TT, CPU, FS), HasFPARMv8(false), HasNEON(false),
+ HasCrypto(false), TargetTriple(TT), CPUString(CPU) {
+
+ initializeSubtargetFeatures(CPU, FS);
+}
+
+void AArch64Subtarget::initializeSubtargetFeatures(StringRef CPU,
+ StringRef FS) {
+ if (CPU.empty())
+ CPUString = "generic";
+
+ std::string FullFS = FS;
+ if (CPUString == "generic") {
+ // Enable FP by default.
+ if (FullFS.empty())
+ FullFS = "+fp-armv8";
+ else
+ FullFS = "+fp-armv8," + FullFS;
+ }
- ParseSubtargetFeatures(CPU, FS);
+ ParseSubtargetFeatures(CPU, FullFS);
}
bool AArch64Subtarget::GVIsIndirectSymbol(const GlobalValue *GV,
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index 2e9205fc9924..bbfd3bc7dfac 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -27,18 +27,31 @@ class StringRef;
class GlobalValue;
class AArch64Subtarget : public AArch64GenSubtargetInfo {
+ virtual void anchor();
protected:
+ bool HasFPARMv8;
bool HasNEON;
bool HasCrypto;
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
+
+ /// CPUString - String name of used CPU.
+ std::string CPUString;
+
+private:
+ void initializeSubtargetFeatures(StringRef CPU, StringRef FS);
+
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
AArch64Subtarget(StringRef TT, StringRef CPU, StringRef FS);
+ virtual bool enableMachineScheduler() const {
+ return true;
+ }
+
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
@@ -46,8 +59,13 @@ public:
bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const;
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
- bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
+ bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+
+ bool hasFPARMv8() const { return HasFPARMv8; }
+ bool hasNEON() const { return HasNEON; }
+ bool hasCrypto() const { return HasCrypto; }
+ const std::string & getCPUString() const { return CPUString; }
};
} // End llvm namespace
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index df599d599dd6..f1695e2ce207 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -38,6 +38,7 @@ AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget) {
+ initAsmInfo();
}
namespace {
diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 69bb80a48537..fbbce116ad82 100644
--- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -54,8 +54,9 @@ public:
#include "AArch64GenAsmMatcher.inc"
};
- AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
- : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
+ AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
// Initialize the set of available features.
@@ -126,6 +127,11 @@ public:
OperandMatchResultTy
ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
+ SMLoc &LayoutLoc);
+
+ OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
+
bool validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
@@ -153,6 +159,7 @@ private:
k_Immediate, // Including expressions referencing symbols
k_Register,
k_ShiftExtend,
+ k_VectorList, // A sequential list of 1 to 4 registers.
k_SysReg, // The register operand of MRS and MSR instructions
k_Token, // The mnemonic; other raw tokens the auto-generated
k_WrappedRegister // Load/store exclusive permit a wrapped register.
@@ -188,6 +195,13 @@ private:
bool ImplicitAmount;
};
+ // A vector register list is a sequential list of 1 to 4 registers.
+ struct VectorListOp {
+ unsigned RegNum;
+ unsigned Count;
+ A64Layout::VectorLayout Layout;
+ };
+
struct SysRegOp {
const char *Data;
unsigned Length;
@@ -205,6 +219,7 @@ private:
struct ImmOp Imm;
struct RegOp Reg;
struct ShiftExtendOp ShiftExtend;
+ struct VectorListOp VectorList;
struct SysRegOp SysReg;
struct TokOp Tok;
};
@@ -454,7 +469,7 @@ public:
}
bool isMOVN32Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_SABS_G0,
AArch64MCExpr::VK_AARCH64_SABS_G1,
AArch64MCExpr::VK_AARCH64_DTPREL_G1,
@@ -463,13 +478,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(32, PermittedModifiers, NumModifiers);
}
bool isMOVN64Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_SABS_G0,
AArch64MCExpr::VK_AARCH64_SABS_G1,
AArch64MCExpr::VK_AARCH64_SABS_G2,
@@ -481,14 +496,14 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(64, PermittedModifiers, NumModifiers);
}
bool isMOVZ32Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0,
AArch64MCExpr::VK_AARCH64_ABS_G1,
AArch64MCExpr::VK_AARCH64_SABS_G0,
@@ -499,13 +514,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(32, PermittedModifiers, NumModifiers);
}
bool isMOVZ64Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0,
AArch64MCExpr::VK_AARCH64_ABS_G1,
AArch64MCExpr::VK_AARCH64_ABS_G2,
@@ -521,13 +536,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(64, PermittedModifiers, NumModifiers);
}
bool isMOVK32Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
@@ -536,13 +551,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(32, PermittedModifiers, NumModifiers);
}
bool isMOVK64Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
@@ -553,13 +568,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(64, PermittedModifiers, NumModifiers);
}
bool isMoveWideImm(unsigned RegWidth,
- AArch64MCExpr::VariantKind *PermittedModifiers,
+ const AArch64MCExpr::VariantKind *PermittedModifiers,
unsigned NumModifiers) const {
if (!isImmWithLSL()) return false;
@@ -664,8 +679,86 @@ public:
return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
}
- template<int MemSize> bool isSImm7Scaled() const {
- if (!isImm()) return false;
+ // if 0 < value <= w, return true
+ bool isShrFixedWidth(int w) const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value <= w;
+ }
+
+ bool isShrImm8() const { return isShrFixedWidth(8); }
+
+ bool isShrImm16() const { return isShrFixedWidth(16); }
+
+ bool isShrImm32() const { return isShrFixedWidth(32); }
+
+ bool isShrImm64() const { return isShrFixedWidth(64); }
+
+ // if 0 <= value < w, return true
+ bool isShlFixedWidth(int w) const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < w;
+ }
+
+ bool isShlImm8() const { return isShlFixedWidth(8); }
+
+ bool isShlImm16() const { return isShlFixedWidth(16); }
+
+ bool isShlImm32() const { return isShlFixedWidth(32); }
+
+ bool isShlImm64() const { return isShlFixedWidth(64); }
+
+ bool isNeonMovImmShiftLSL() const {
+ if (!isShiftOrExtend())
+ return false;
+
+ if (ShiftExtend.ShiftType != A64SE::LSL)
+ return false;
+
+ // Valid shift amount is 0, 8, 16 and 24.
+ return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
+ }
+
+ bool isNeonMovImmShiftLSLH() const {
+ if (!isShiftOrExtend())
+ return false;
+
+ if (ShiftExtend.ShiftType != A64SE::LSL)
+ return false;
+
+ // Valid shift amount is 0 and 8.
+ return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
+ }
+
+ bool isNeonMovImmShiftMSL() const {
+ if (!isShiftOrExtend())
+ return false;
+
+ if (ShiftExtend.ShiftType != A64SE::MSL)
+ return false;
+
+ // Valid shift amount is 8 and 16.
+ return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
+ }
+
+ template <A64Layout::VectorLayout Layout, unsigned Count>
+ bool isVectorList() const {
+ return Kind == k_VectorList && VectorList.Layout == Layout &&
+ VectorList.Count == Count;
+ }
+
+ template <int MemSize> bool isSImm7Scaled() const {
+ if (!isImm())
+ return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
@@ -705,10 +798,38 @@ public:
return isa<MCConstantExpr>(getImm());
}
+ bool isNeonUImm64Mask() const {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return false;
+
+ uint64_t Value = CE->getValue();
+
+ // i64 value with each byte being either 0x00 or 0xff.
+ for (unsigned i = 0; i < 8; ++i, Value >>= 8)
+ if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
+ return false;
+ return true;
+ }
+
+ // if value == N, return true
+ template<int N>
+ bool isExactImm() const {
+ if (!isImm()) return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+
+ return CE->getValue() == N;
+ }
+
static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
unsigned ShiftAmount,
bool ImplicitAmount,
- SMLoc S, SMLoc E) {
+ SMLoc S,SMLoc E) {
AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
Op->ImmWithLSL.Val = Val;
Op->ImmWithLSL.ShiftAmount = ShiftAmount;
@@ -766,6 +887,18 @@ public:
return Op;
}
+ static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
+ A64Layout::VectorLayout Layout,
+ SMLoc S, SMLoc E) {
+ AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
+ Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Count = Count;
+ Op->VectorList.Layout = Layout;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
Op->Tok.Data = Str.data();
@@ -1026,6 +1159,40 @@ public:
Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
}
+ // For Vector Immediates shifted imm operands.
+ void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
+ llvm_unreachable("Invalid shift amount for vector immediate inst.");
+
+ // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
+ int64_t Imm = ShiftExtend.Amount / 8;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
+ void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
+ llvm_unreachable("Invalid shift amount for vector immediate inst.");
+
+ // Encode LSLH shift amount 0, 8 as 0, 1.
+ int64_t Imm = ShiftExtend.Amount / 8;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
+ void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
+ llvm_unreachable("Invalid shift amount for vector immediate inst.");
+
+ // Encode MSL shift amount 8, 16 as 0, 1.
+ int64_t Imm = ShiftExtend.Amount / 8 - 1;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
// For the extend in load-store (register offset) instructions.
template<unsigned MemSize>
void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
@@ -1065,6 +1232,25 @@ public:
Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
}
+
+ void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ // A bit from each byte in the constant forms the encoded immediate
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+
+ unsigned Imm = 0;
+ for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
+ Imm |= (Value & 1) << i;
+ }
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
+ void addVectorListOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
+ }
};
} // end anonymous namespace.
@@ -1104,7 +1290,6 @@ AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
else
return MatchOperand_Success;
}
-
// ... or it might be a symbolish thing
}
// Fall through
@@ -1148,7 +1333,7 @@ AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return ParseOperand(Operands, Mnemonic);
}
// The following will likely be useful later, but not in very early cases
- case AsmToken::LCurly: // Weird SIMD lists
+ case AsmToken::LCurly: // SIMD vector list is not parsed here
llvm_unreachable("Don't know how to deal with '{' in operand");
return MatchOperand_ParseFail;
}
@@ -1306,7 +1491,7 @@ AArch64AsmParser::ParseImmWithLSLOperand(
// The optional operand must be "lsl #N" where N is non-negative.
if (Parser.getTok().is(AsmToken::Identifier)
- && Parser.getTok().getIdentifier().lower() == "lsl") {
+ && Parser.getTok().getIdentifier().equals_lower("lsl")) {
Parser.Lex();
if (Parser.getTok().is(AsmToken::Hash)) {
@@ -1363,9 +1548,8 @@ AArch64AsmParser::ParseCRxOperand(
return MatchOperand_ParseFail;
}
- std::string LowerTok = Parser.getTok().getIdentifier().lower();
- StringRef Tok(LowerTok);
- if (Tok[0] != 'c') {
+ StringRef Tok = Parser.getTok().getIdentifier();
+ if (Tok[0] != 'c' && Tok[0] != 'C') {
Error(S, "Expected cN operand where 0 <= N <= 15");
return MatchOperand_ParseFail;
}
@@ -1437,22 +1621,11 @@ AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
std::string LowerReg = Tok.getString().lower();
size_t DotPos = LowerReg.find('.');
- RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
- if (RegNum == AArch64::NoRegister) {
- RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
- .Case("ip0", AArch64::X16)
- .Case("ip1", AArch64::X17)
- .Case("fp", AArch64::X29)
- .Case("lr", AArch64::X30)
- .Default(AArch64::NoRegister);
- }
- if (RegNum == AArch64::NoRegister)
- return false;
-
+ bool IsVec128 = false;
SMLoc S = Tok.getLoc();
RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
- if (DotPos == StringRef::npos) {
+ if (DotPos == std::string::npos) {
Layout = StringRef();
} else {
// Everything afterwards needs to be a literal token, expected to be
@@ -1462,20 +1635,78 @@ AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
// gives us a permanent string to use in the token (a pointer into LowerReg
// would go out of scope when we return).
LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
- std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
+ StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
+
+ // See if it's a 128-bit layout first.
Layout = StringSwitch<const char *>(LayoutText)
- .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
- .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
- .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
- .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
+ .Case(".q", ".q").Case(".1q", ".1q")
+ .Case(".d", ".d").Case(".2d", ".2d")
+ .Case(".s", ".s").Case(".4s", ".4s")
+ .Case(".h", ".h").Case(".8h", ".8h")
+ .Case(".b", ".b").Case(".16b", ".16b")
.Default("");
+ if (Layout.size() != 0)
+ IsVec128 = true;
+ else {
+ Layout = StringSwitch<const char *>(LayoutText)
+ .Case(".1d", ".1d")
+ .Case(".2s", ".2s")
+ .Case(".4h", ".4h")
+ .Case(".8b", ".8b")
+ .Default("");
+ }
+
if (Layout.size() == 0) {
- // Malformed register
+ // If we've still not pinned it down the register is malformed.
return false;
}
}
+ RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
+ if (RegNum == AArch64::NoRegister) {
+ RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
+ .Case("ip0", AArch64::X16)
+ .Case("ip1", AArch64::X17)
+ .Case("fp", AArch64::X29)
+ .Case("lr", AArch64::X30)
+ .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
+ .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
+ .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
+ .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
+ .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
+ .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
+ .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
+ .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
+ .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
+ .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
+ .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
+ .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
+ .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
+ .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
+ .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
+ .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
+ .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
+ .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
+ .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
+ .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
+ .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
+ .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
+ .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
+ .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
+ .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
+ .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
+ .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
+ .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
+ .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
+ .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
+ .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
+ .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
+ .Default(AArch64::NoRegister);
+ }
+ if (RegNum == AArch64::NoRegister)
+ return false;
+
return true;
}
@@ -1507,6 +1738,7 @@ AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
case 'h': NumLanes = 8; break;
case 's': NumLanes = 4; break;
case 'd': NumLanes = 2; break;
+ case 'q': NumLanes = 1; break;
}
}
@@ -1660,20 +1892,21 @@ AArch64AsmParser::ParseShiftExtend(
std::string LowerID = IDVal.lower();
A64SE::ShiftExtSpecifiers Spec =
- StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
- .Case("lsl", A64SE::LSL)
- .Case("lsr", A64SE::LSR)
- .Case("asr", A64SE::ASR)
- .Case("ror", A64SE::ROR)
- .Case("uxtb", A64SE::UXTB)
- .Case("uxth", A64SE::UXTH)
- .Case("uxtw", A64SE::UXTW)
- .Case("uxtx", A64SE::UXTX)
- .Case("sxtb", A64SE::SXTB)
- .Case("sxth", A64SE::SXTH)
- .Case("sxtw", A64SE::SXTW)
- .Case("sxtx", A64SE::SXTX)
- .Default(A64SE::Invalid);
+ StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
+ .Case("lsl", A64SE::LSL)
+ .Case("msl", A64SE::MSL)
+ .Case("lsr", A64SE::LSR)
+ .Case("asr", A64SE::ASR)
+ .Case("ror", A64SE::ROR)
+ .Case("uxtb", A64SE::UXTB)
+ .Case("uxth", A64SE::UXTH)
+ .Case("uxtw", A64SE::UXTW)
+ .Case("uxtx", A64SE::UXTX)
+ .Case("sxtb", A64SE::SXTB)
+ .Case("sxth", A64SE::SXTH)
+ .Case("sxtw", A64SE::SXTW)
+ .Case("sxtx", A64SE::SXTX)
+ .Default(A64SE::Invalid);
if (Spec == A64SE::Invalid)
return MatchOperand_NoMatch;
@@ -1683,8 +1916,8 @@ AArch64AsmParser::ParseShiftExtend(
S = Parser.getTok().getLoc();
Parser.Lex();
- if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
- Spec != A64SE::ASR && Spec != A64SE::ROR) {
+ if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
+ Spec != A64SE::ROR && Spec != A64SE::MSL) {
// The shift amount can be omitted for the extending versions, but not real
// shifts:
// add x0, x0, x0, uxtb
@@ -1724,6 +1957,148 @@ AArch64AsmParser::ParseShiftExtend(
return MatchOperand_Success;
}
+/// Try to parse a vector register token, If it is a vector register,
+/// the token is eaten and return true. Otherwise return false.
+bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
+ StringRef &Layout, SMLoc &LayoutLoc) {
+ bool IsVector = true;
+
+ if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
+ IsVector = false;
+ else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
+ .contains(RegNum) &&
+ !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
+ .contains(RegNum))
+ IsVector = false;
+ else if (Layout.size() == 0)
+ IsVector = false;
+
+ if (!IsVector)
+ Error(Parser.getTok().getLoc(), "expected vector type register");
+
+ Parser.Lex(); // Eat this token.
+ return IsVector;
+}
+
+
+// A vector list contains 1-4 consecutive registers.
+// Now there are two kinds of vector list when number of vector > 1:
+// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
+// (2) {Vn.layout - Vm.layout}
+// If the layout is like .b/.h/.s/.d, also parse the lane.
+AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ if (Parser.getTok().isNot(AsmToken::LCurly)) {
+ Error(Parser.getTok().getLoc(), "'{' expected");
+ return MatchOperand_ParseFail;
+ }
+ SMLoc SLoc = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat '{' token.
+
+ unsigned Reg, Count = 1;
+ StringRef LayoutStr;
+ SMLoc RegEndLoc, LayoutLoc;
+ if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
+ return MatchOperand_ParseFail;
+
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ Parser.Lex(); // Eat the minus.
+
+ unsigned Reg2;
+ StringRef LayoutStr2;
+ SMLoc RegEndLoc2, LayoutLoc2;
+ SMLoc RegLoc2 = Parser.getTok().getLoc();
+
+ if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
+ return MatchOperand_ParseFail;
+ unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
+
+ if (LayoutStr != LayoutStr2) {
+ Error(LayoutLoc2, "expected the same vector layout");
+ return MatchOperand_ParseFail;
+ }
+ if (Space == 0 || Space > 3) {
+ Error(RegLoc2, "invalid number of vectors");
+ return MatchOperand_ParseFail;
+ }
+
+ Count += Space;
+ } else {
+ unsigned LastReg = Reg;
+ while (Parser.getTok().is(AsmToken::Comma)) {
+ Parser.Lex(); // Eat the comma.
+ unsigned Reg2;
+ StringRef LayoutStr2;
+ SMLoc RegEndLoc2, LayoutLoc2;
+ SMLoc RegLoc2 = Parser.getTok().getLoc();
+
+ if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
+ return MatchOperand_ParseFail;
+ unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
+ : (Reg2 + 32 - LastReg);
+ Count++;
+
+ // The space between two vectors should be 1. And they should have the same layout.
+ // Total count shouldn't be great than 4
+ if (Space != 1) {
+ Error(RegLoc2, "invalid space between two vectors");
+ return MatchOperand_ParseFail;
+ }
+ if (LayoutStr != LayoutStr2) {
+ Error(LayoutLoc2, "expected the same vector layout");
+ return MatchOperand_ParseFail;
+ }
+ if (Count > 4) {
+ Error(RegLoc2, "invalid number of vectors");
+ return MatchOperand_ParseFail;
+ }
+
+ LastReg = Reg2;
+ }
+ }
+
+ if (Parser.getTok().isNot(AsmToken::RCurly)) {
+ Error(Parser.getTok().getLoc(), "'}' expected");
+ return MatchOperand_ParseFail;
+ }
+ SMLoc ELoc = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat '}' token.
+
+ A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
+ if (Count > 1) { // If count > 1, create vector list using super register.
+ bool IsVec64 = (Layout < A64Layout::VL_16B);
+ static unsigned SupRegIDs[3][2] = {
+ { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
+ { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
+ { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
+ };
+ unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
+ unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
+ const MCRegisterInfo *MRI = getContext().getRegisterInfo();
+ Reg = MRI->getMatchingSuperReg(Reg, Sub0,
+ &AArch64MCRegisterClasses[SupRegID]);
+ }
+ Operands.push_back(
+ AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
+
+ if (Parser.getTok().is(AsmToken::LBrac)) {
+ uint32_t NumLanes = 0;
+ switch(Layout) {
+ case A64Layout::VL_B : NumLanes = 16; break;
+ case A64Layout::VL_H : NumLanes = 8; break;
+ case A64Layout::VL_S : NumLanes = 4; break;
+ case A64Layout::VL_D : NumLanes = 2; break;
+ default:
+ SMLoc Loc = getLexer().getLoc();
+ Error(Loc, "expected comma before next operand");
+ return MatchOperand_ParseFail;
+ }
+ return ParseNEONLane(Operands, NumLanes);
+ } else {
+ return MatchOperand_Success;
+ }
+}
+
// FIXME: We would really like to be able to tablegen'erate this.
bool AArch64AsmParser::
validateInstruction(MCInst &Inst,
@@ -1918,7 +2293,7 @@ bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
if (getParser().parseExpression(Value))
return true;
- getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
+ getParser().getStreamer().EmitValue(Value, Size);
if (getLexer().is(AsmToken::EndOfStatement))
break;
@@ -2019,7 +2394,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
"expected compatible register or floating-point constant");
case Match_FPZero:
return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected floating-point constant #0.0");
+ "expected floating-point constant #0.0 or invalid register type");
case Match_Label:
return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
"expected label or encodable integer pc offset");
@@ -2140,6 +2515,30 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_Width64:
return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
"expected integer in range [<lsb>, 63]");
+ case Match_ShrImm8:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 8]");
+ case Match_ShrImm16:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 16]");
+ case Match_ShrImm32:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 32]");
+ case Match_ShrImm64:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 64]");
+ case Match_ShlImm8:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 7]");
+ case Match_ShlImm16:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 15]");
+ case Match_ShlImm32:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 31]");
+ case Match_ShlImm64:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 63]");
}
llvm_unreachable("Implement any new match types added!");
diff --git a/lib/Target/AArch64/CMakeLists.txt b/lib/Target/AArch64/CMakeLists.txt
index 8164d6f73c97..0f2e81693198 100644
--- a/lib/Target/AArch64/CMakeLists.txt
+++ b/lib/Target/AArch64/CMakeLists.txt
@@ -28,6 +28,8 @@ add_llvm_target(AArch64CodeGen
AArch64TargetObjectFile.cpp
)
+add_dependencies(LLVMAArch64CodeGen AArch64CommonTableGen)
+
add_subdirectory(AsmParser)
add_subdirectory(Disassembler)
add_subdirectory(InstPrinter)
diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index 12c1b8f4c81a..be4d7f22b2b1 100644
--- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -38,7 +38,7 @@ typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
/// AArch64 disassembler for all AArch64 platforms.
class AArch64Disassembler : public MCDisassembler {
- const MCRegisterInfo *RegInfo;
+ OwningPtr<const MCRegisterInfo> RegInfo;
public:
/// Initializes the disassembler.
///
@@ -46,8 +46,7 @@ public:
: MCDisassembler(STI), RegInfo(Info) {
}
- ~AArch64Disassembler() {
- }
+ ~AArch64Disassembler() {}
/// See MCDisassembler.
DecodeStatus getInstruction(MCInst &instr,
@@ -57,7 +56,7 @@ public:
raw_ostream &vStream,
raw_ostream &cStream) const;
- const MCRegisterInfo *getRegInfo() const { return RegInfo; }
+ const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
};
}
@@ -83,12 +82,38 @@ static DecodeStatus DecodeFPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeFPR64LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeFPR128RegisterClass(llvm::MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeVPR128RegisterClass(llvm::MCInst &Inst,
- unsigned RegNo, uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeFPR128LoRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeQPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeDTripleRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeQTripleRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeDQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeQQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst,
unsigned OptionHiS,
@@ -111,6 +136,30 @@ static DecodeStatus DecodeFPZeroOperand(llvm::MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeShiftRightImm8(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeShiftRightImm16(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeShiftRightImm32(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeShiftRightImm64(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeShiftLeftImm8(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeShiftLeftImm16(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeShiftLeftImm32(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeShiftLeftImm64(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+
template<int RegWidth>
static DecodeStatus DecodeMoveWideImmOperand(llvm::MCInst &Inst,
unsigned FullImm,
@@ -127,6 +176,10 @@ static DecodeStatus DecodeRegExtendOperand(llvm::MCInst &Inst,
unsigned ShiftAmount,
uint64_t Address,
const void *Decoder);
+template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
+static DecodeStatus
+DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount,
+ uint64_t Address, const void *Decoder);
static DecodeStatus Decode32BitShiftOperand(llvm::MCInst &Inst,
unsigned ShiftAmount,
@@ -177,6 +230,17 @@ static DecodeStatus DecodeSingleIndexedInstruction(llvm::MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeSHLLInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
static bool Check(DecodeStatus &Out, DecodeStatus In);
@@ -208,7 +272,7 @@ DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
uint8_t bytes[4];
// We want to read exactly 4 bytes of data.
- if (Region.readBytes(Address, 4, (uint8_t*)bytes, NULL) == -1) {
+ if (Region.readBytes(Address, 4, bytes) == -1) {
Size = 0;
return MCDisassembler::Fail;
}
@@ -325,6 +389,14 @@ DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
return MCDisassembler::Success;
}
+static DecodeStatus
+DecodeFPR64LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ if (RegNo > 15)
+ return MCDisassembler::Fail;
+
+ return DecodeFPR64RegisterClass(Inst, RegNo, Address, Decoder);
+}
static DecodeStatus
DecodeFPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
@@ -338,16 +410,79 @@ DecodeFPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
}
static DecodeStatus
-DecodeVPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
+DecodeFPR128LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ if (RegNo > 15)
+ return MCDisassembler::Fail;
+
+ return DecodeFPR128RegisterClass(Inst, RegNo, Address, Decoder);
+}
+
+static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 30)
+ return MCDisassembler::Fail;
+
+ uint16_t Register = getReg(Decoder, AArch64::GPR64noxzrRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeRegisterClassByID(llvm::MCInst &Inst, unsigned RegNo,
+ unsigned RegID,
+ const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
- uint16_t Register = getReg(Decoder, AArch64::VPR128RegClassID, RegNo);
+ uint16_t Register = getReg(Decoder, RegID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Register));
return MCDisassembler::Success;
}
+static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeRegisterClassByID(Inst, RegNo, AArch64::DPairRegClassID,
+ Decoder);
+}
+
+static DecodeStatus DecodeQPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeRegisterClassByID(Inst, RegNo, AArch64::QPairRegClassID,
+ Decoder);
+}
+
+static DecodeStatus DecodeDTripleRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
+ const void *Decoder) {
+ return DecodeRegisterClassByID(Inst, RegNo, AArch64::DTripleRegClassID,
+ Decoder);
+}
+
+static DecodeStatus DecodeQTripleRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
+ const void *Decoder) {
+ return DecodeRegisterClassByID(Inst, RegNo, AArch64::QTripleRegClassID,
+ Decoder);
+}
+
+static DecodeStatus DecodeDQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeRegisterClassByID(Inst, RegNo, AArch64::DQuadRegClassID,
+ Decoder);
+}
+
+static DecodeStatus DecodeQQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeRegisterClassByID(Inst, RegNo, AArch64::QQuadRegClassID,
+ Decoder);
+}
+
static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst,
unsigned OptionHiS,
uint64_t Address,
@@ -396,7 +531,73 @@ static DecodeStatus DecodeFPZeroOperand(llvm::MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeShiftRightImm8(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(8 - Val));
+ return MCDisassembler::Success;
+}
+static DecodeStatus DecodeShiftRightImm16(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(16 - Val));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeShiftRightImm32(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(32 - Val));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeShiftRightImm64(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(64 - Val));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeShiftLeftImm8(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ if (Val > 7)
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::CreateImm(Val));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeShiftLeftImm16(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ if (Val > 15)
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::CreateImm(Val));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeShiftLeftImm32(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ if (Val > 31)
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::CreateImm(Val));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeShiftLeftImm64(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ if (Val > 63)
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::CreateImm(Val));
+ return MCDisassembler::Success;
+}
template<int RegWidth>
static DecodeStatus DecodeMoveWideImmOperand(llvm::MCInst &Inst,
@@ -553,11 +754,11 @@ static DecodeStatus DecodeFMOVLaneInstruction(llvm::MCInst &Inst, unsigned Insn,
unsigned IsToVec = fieldFromInstruction(Insn, 16, 1);
if (IsToVec) {
- DecodeVPR128RegisterClass(Inst, Rd, Address, Decoder);
+ DecodeFPR128RegisterClass(Inst, Rd, Address, Decoder);
DecodeGPR64RegisterClass(Inst, Rn, Address, Decoder);
} else {
DecodeGPR64RegisterClass(Inst, Rd, Address, Decoder);
- DecodeVPR128RegisterClass(Inst, Rn, Address, Decoder);
+ DecodeFPR128RegisterClass(Inst, Rn, Address, Decoder);
}
// Add the lane
@@ -800,4 +1001,572 @@ extern "C" void LLVMInitializeAArch64Disassembler() {
createAArch64Disassembler);
}
+template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
+static DecodeStatus
+DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount,
+ uint64_t Address, const void *Decoder) {
+ bool IsLSL = false;
+ if (Ext == A64SE::LSL)
+ IsLSL = true;
+ else if (Ext != A64SE::MSL)
+ return MCDisassembler::Fail;
+
+ // MSL and LSLH accepts encoded shift amount 0 or 1.
+ if ((!IsLSL || (IsLSL && IsHalf)) && ShiftAmount != 0 && ShiftAmount != 1)
+ return MCDisassembler::Fail;
+
+ // LSL accepts encoded shift amount 0, 1, 2 or 3.
+ if (IsLSL && ShiftAmount > 3)
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
+ return MCDisassembler::Success;
+}
+
+// Decode post-index vector load/store instructions.
+// This is necessary as we need to decode Rm: if Rm == 0b11111, the last
+// operand is an immediate equal the the length of vector list in bytes,
+// or Rm is decoded to a GPR64noxzr register.
+static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned Rt = fieldFromInstruction(Insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(Insn, 5, 5);
+ unsigned Rm = fieldFromInstruction(Insn, 16, 5);
+ unsigned Opcode = fieldFromInstruction(Insn, 12, 4);
+ unsigned IsLoad = fieldFromInstruction(Insn, 22, 1);
+ // 0 for 64bit vector list, 1 for 128bit vector list
+ unsigned Is128BitVec = fieldFromInstruction(Insn, 30, 1);
+
+ unsigned NumVecs;
+ switch (Opcode) {
+ case 0: // ld4/st4
+ case 2: // ld1/st1 with 4 vectors
+ NumVecs = 4; break;
+ case 4: // ld3/st3
+ case 6: // ld1/st1 with 3 vectors
+ NumVecs = 3; break;
+ case 7: // ld1/st1 with 1 vector
+ NumVecs = 1; break;
+ case 8: // ld2/st2
+ case 10: // ld1/st1 with 2 vectors
+ NumVecs = 2; break;
+ default:
+ llvm_unreachable("Invalid opcode for post-index load/store instructions");
+ }
+
+ // Decode vector list of 1/2/3/4 vectors for load instructions.
+ if (IsLoad) {
+ switch (NumVecs) {
+ case 1:
+ Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 2:
+ Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 3:
+ Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 4:
+ Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ }
+ }
+
+ // Decode write back register, which is equal to Rn.
+ DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+ DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+
+ if (Rm == 31) // If Rm is 0x11111, add the vector list length in byte
+ Inst.addOperand(MCOperand::CreateImm(NumVecs * (Is128BitVec ? 16 : 8)));
+ else // Decode Rm
+ DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
+
+ // Decode vector list of 1/2/3/4 vectors for load instructions.
+ if (!IsLoad) {
+ switch (NumVecs) {
+ case 1:
+ Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 2:
+ Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 3:
+ Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 4:
+ Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ }
+ }
+
+ return MCDisassembler::Success;
+}
+
+// Decode post-index vector load/store lane instructions.
+// This is necessary as we need to decode Rm: if Rm == 0b11111, the last
+// operand is an immediate equal the the length of the changed bytes,
+// or Rm is decoded to a GPR64noxzr register.
+static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ bool Is64bitVec = false;
+ bool IsLoadDup = false;
+ bool IsLoad = false;
+ // The total number of bytes transferred.
+ // TransferBytes = NumVecs * OneLaneBytes
+ unsigned TransferBytes = 0;
+ unsigned NumVecs = 0;
+ unsigned Opc = Inst.getOpcode();
+ switch (Opc) {
+ case AArch64::LD1R_WB_8B_fixed: case AArch64::LD1R_WB_8B_register:
+ case AArch64::LD1R_WB_4H_fixed: case AArch64::LD1R_WB_4H_register:
+ case AArch64::LD1R_WB_2S_fixed: case AArch64::LD1R_WB_2S_register:
+ case AArch64::LD1R_WB_1D_fixed: case AArch64::LD1R_WB_1D_register: {
+ switch (Opc) {
+ case AArch64::LD1R_WB_8B_fixed: case AArch64::LD1R_WB_8B_register:
+ TransferBytes = 1; break;
+ case AArch64::LD1R_WB_4H_fixed: case AArch64::LD1R_WB_4H_register:
+ TransferBytes = 2; break;
+ case AArch64::LD1R_WB_2S_fixed: case AArch64::LD1R_WB_2S_register:
+ TransferBytes = 4; break;
+ case AArch64::LD1R_WB_1D_fixed: case AArch64::LD1R_WB_1D_register:
+ TransferBytes = 8; break;
+ }
+ Is64bitVec = true;
+ IsLoadDup = true;
+ NumVecs = 1;
+ break;
+ }
+
+ case AArch64::LD1R_WB_16B_fixed: case AArch64::LD1R_WB_16B_register:
+ case AArch64::LD1R_WB_8H_fixed: case AArch64::LD1R_WB_8H_register:
+ case AArch64::LD1R_WB_4S_fixed: case AArch64::LD1R_WB_4S_register:
+ case AArch64::LD1R_WB_2D_fixed: case AArch64::LD1R_WB_2D_register: {
+ switch (Opc) {
+ case AArch64::LD1R_WB_16B_fixed: case AArch64::LD1R_WB_16B_register:
+ TransferBytes = 1; break;
+ case AArch64::LD1R_WB_8H_fixed: case AArch64::LD1R_WB_8H_register:
+ TransferBytes = 2; break;
+ case AArch64::LD1R_WB_4S_fixed: case AArch64::LD1R_WB_4S_register:
+ TransferBytes = 4; break;
+ case AArch64::LD1R_WB_2D_fixed: case AArch64::LD1R_WB_2D_register:
+ TransferBytes = 8; break;
+ }
+ IsLoadDup = true;
+ NumVecs = 1;
+ break;
+ }
+
+ case AArch64::LD2R_WB_8B_fixed: case AArch64::LD2R_WB_8B_register:
+ case AArch64::LD2R_WB_4H_fixed: case AArch64::LD2R_WB_4H_register:
+ case AArch64::LD2R_WB_2S_fixed: case AArch64::LD2R_WB_2S_register:
+ case AArch64::LD2R_WB_1D_fixed: case AArch64::LD2R_WB_1D_register: {
+ switch (Opc) {
+ case AArch64::LD2R_WB_8B_fixed: case AArch64::LD2R_WB_8B_register:
+ TransferBytes = 2; break;
+ case AArch64::LD2R_WB_4H_fixed: case AArch64::LD2R_WB_4H_register:
+ TransferBytes = 4; break;
+ case AArch64::LD2R_WB_2S_fixed: case AArch64::LD2R_WB_2S_register:
+ TransferBytes = 8; break;
+ case AArch64::LD2R_WB_1D_fixed: case AArch64::LD2R_WB_1D_register:
+ TransferBytes = 16; break;
+ }
+ Is64bitVec = true;
+ IsLoadDup = true;
+ NumVecs = 2;
+ break;
+ }
+
+ case AArch64::LD2R_WB_16B_fixed: case AArch64::LD2R_WB_16B_register:
+ case AArch64::LD2R_WB_8H_fixed: case AArch64::LD2R_WB_8H_register:
+ case AArch64::LD2R_WB_4S_fixed: case AArch64::LD2R_WB_4S_register:
+ case AArch64::LD2R_WB_2D_fixed: case AArch64::LD2R_WB_2D_register: {
+ switch (Opc) {
+ case AArch64::LD2R_WB_16B_fixed: case AArch64::LD2R_WB_16B_register:
+ TransferBytes = 2; break;
+ case AArch64::LD2R_WB_8H_fixed: case AArch64::LD2R_WB_8H_register:
+ TransferBytes = 4; break;
+ case AArch64::LD2R_WB_4S_fixed: case AArch64::LD2R_WB_4S_register:
+ TransferBytes = 8; break;
+ case AArch64::LD2R_WB_2D_fixed: case AArch64::LD2R_WB_2D_register:
+ TransferBytes = 16; break;
+ }
+ IsLoadDup = true;
+ NumVecs = 2;
+ break;
+ }
+
+ case AArch64::LD3R_WB_8B_fixed: case AArch64::LD3R_WB_8B_register:
+ case AArch64::LD3R_WB_4H_fixed: case AArch64::LD3R_WB_4H_register:
+ case AArch64::LD3R_WB_2S_fixed: case AArch64::LD3R_WB_2S_register:
+ case AArch64::LD3R_WB_1D_fixed: case AArch64::LD3R_WB_1D_register: {
+ switch (Opc) {
+ case AArch64::LD3R_WB_8B_fixed: case AArch64::LD3R_WB_8B_register:
+ TransferBytes = 3; break;
+ case AArch64::LD3R_WB_4H_fixed: case AArch64::LD3R_WB_4H_register:
+ TransferBytes = 6; break;
+ case AArch64::LD3R_WB_2S_fixed: case AArch64::LD3R_WB_2S_register:
+ TransferBytes = 12; break;
+ case AArch64::LD3R_WB_1D_fixed: case AArch64::LD3R_WB_1D_register:
+ TransferBytes = 24; break;
+ }
+ Is64bitVec = true;
+ IsLoadDup = true;
+ NumVecs = 3;
+ break;
+ }
+
+ case AArch64::LD3R_WB_16B_fixed: case AArch64::LD3R_WB_16B_register:
+ case AArch64::LD3R_WB_4S_fixed: case AArch64::LD3R_WB_8H_register:
+ case AArch64::LD3R_WB_8H_fixed: case AArch64::LD3R_WB_4S_register:
+ case AArch64::LD3R_WB_2D_fixed: case AArch64::LD3R_WB_2D_register: {
+ switch (Opc) {
+ case AArch64::LD3R_WB_16B_fixed: case AArch64::LD3R_WB_16B_register:
+ TransferBytes = 3; break;
+ case AArch64::LD3R_WB_8H_fixed: case AArch64::LD3R_WB_8H_register:
+ TransferBytes = 6; break;
+ case AArch64::LD3R_WB_4S_fixed: case AArch64::LD3R_WB_4S_register:
+ TransferBytes = 12; break;
+ case AArch64::LD3R_WB_2D_fixed: case AArch64::LD3R_WB_2D_register:
+ TransferBytes = 24; break;
+ }
+ IsLoadDup = true;
+ NumVecs = 3;
+ break;
+ }
+
+ case AArch64::LD4R_WB_8B_fixed: case AArch64::LD4R_WB_8B_register:
+ case AArch64::LD4R_WB_4H_fixed: case AArch64::LD4R_WB_4H_register:
+ case AArch64::LD4R_WB_2S_fixed: case AArch64::LD4R_WB_2S_register:
+ case AArch64::LD4R_WB_1D_fixed: case AArch64::LD4R_WB_1D_register: {
+ switch (Opc) {
+ case AArch64::LD4R_WB_8B_fixed: case AArch64::LD4R_WB_8B_register:
+ TransferBytes = 4; break;
+ case AArch64::LD4R_WB_4H_fixed: case AArch64::LD4R_WB_4H_register:
+ TransferBytes = 8; break;
+ case AArch64::LD4R_WB_2S_fixed: case AArch64::LD4R_WB_2S_register:
+ TransferBytes = 16; break;
+ case AArch64::LD4R_WB_1D_fixed: case AArch64::LD4R_WB_1D_register:
+ TransferBytes = 32; break;
+ }
+ Is64bitVec = true;
+ IsLoadDup = true;
+ NumVecs = 4;
+ break;
+ }
+
+ case AArch64::LD4R_WB_16B_fixed: case AArch64::LD4R_WB_16B_register:
+ case AArch64::LD4R_WB_4S_fixed: case AArch64::LD4R_WB_8H_register:
+ case AArch64::LD4R_WB_8H_fixed: case AArch64::LD4R_WB_4S_register:
+ case AArch64::LD4R_WB_2D_fixed: case AArch64::LD4R_WB_2D_register: {
+ switch (Opc) {
+ case AArch64::LD4R_WB_16B_fixed: case AArch64::LD4R_WB_16B_register:
+ TransferBytes = 4; break;
+ case AArch64::LD4R_WB_8H_fixed: case AArch64::LD4R_WB_8H_register:
+ TransferBytes = 8; break;
+ case AArch64::LD4R_WB_4S_fixed: case AArch64::LD4R_WB_4S_register:
+ TransferBytes = 16; break;
+ case AArch64::LD4R_WB_2D_fixed: case AArch64::LD4R_WB_2D_register:
+ TransferBytes = 32; break;
+ }
+ IsLoadDup = true;
+ NumVecs = 4;
+ break;
+ }
+
+ case AArch64::LD1LN_WB_B_fixed: case AArch64::LD1LN_WB_B_register:
+ case AArch64::LD1LN_WB_H_fixed: case AArch64::LD1LN_WB_H_register:
+ case AArch64::LD1LN_WB_S_fixed: case AArch64::LD1LN_WB_S_register:
+ case AArch64::LD1LN_WB_D_fixed: case AArch64::LD1LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::LD1LN_WB_B_fixed: case AArch64::LD1LN_WB_B_register:
+ TransferBytes = 1; break;
+ case AArch64::LD1LN_WB_H_fixed: case AArch64::LD1LN_WB_H_register:
+ TransferBytes = 2; break;
+ case AArch64::LD1LN_WB_S_fixed: case AArch64::LD1LN_WB_S_register:
+ TransferBytes = 4; break;
+ case AArch64::LD1LN_WB_D_fixed: case AArch64::LD1LN_WB_D_register:
+ TransferBytes = 8; break;
+ }
+ IsLoad = true;
+ NumVecs = 1;
+ break;
+ }
+
+ case AArch64::LD2LN_WB_B_fixed: case AArch64::LD2LN_WB_B_register:
+ case AArch64::LD2LN_WB_H_fixed: case AArch64::LD2LN_WB_H_register:
+ case AArch64::LD2LN_WB_S_fixed: case AArch64::LD2LN_WB_S_register:
+ case AArch64::LD2LN_WB_D_fixed: case AArch64::LD2LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::LD2LN_WB_B_fixed: case AArch64::LD2LN_WB_B_register:
+ TransferBytes = 2; break;
+ case AArch64::LD2LN_WB_H_fixed: case AArch64::LD2LN_WB_H_register:
+ TransferBytes = 4; break;
+ case AArch64::LD2LN_WB_S_fixed: case AArch64::LD2LN_WB_S_register:
+ TransferBytes = 8; break;
+ case AArch64::LD2LN_WB_D_fixed: case AArch64::LD2LN_WB_D_register:
+ TransferBytes = 16; break;
+ }
+ IsLoad = true;
+ NumVecs = 2;
+ break;
+ }
+
+ case AArch64::LD3LN_WB_B_fixed: case AArch64::LD3LN_WB_B_register:
+ case AArch64::LD3LN_WB_H_fixed: case AArch64::LD3LN_WB_H_register:
+ case AArch64::LD3LN_WB_S_fixed: case AArch64::LD3LN_WB_S_register:
+ case AArch64::LD3LN_WB_D_fixed: case AArch64::LD3LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::LD3LN_WB_B_fixed: case AArch64::LD3LN_WB_B_register:
+ TransferBytes = 3; break;
+ case AArch64::LD3LN_WB_H_fixed: case AArch64::LD3LN_WB_H_register:
+ TransferBytes = 6; break;
+ case AArch64::LD3LN_WB_S_fixed: case AArch64::LD3LN_WB_S_register:
+ TransferBytes = 12; break;
+ case AArch64::LD3LN_WB_D_fixed: case AArch64::LD3LN_WB_D_register:
+ TransferBytes = 24; break;
+ }
+ IsLoad = true;
+ NumVecs = 3;
+ break;
+ }
+
+ case AArch64::LD4LN_WB_B_fixed: case AArch64::LD4LN_WB_B_register:
+ case AArch64::LD4LN_WB_H_fixed: case AArch64::LD4LN_WB_H_register:
+ case AArch64::LD4LN_WB_S_fixed: case AArch64::LD4LN_WB_S_register:
+ case AArch64::LD4LN_WB_D_fixed: case AArch64::LD4LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::LD4LN_WB_B_fixed: case AArch64::LD4LN_WB_B_register:
+ TransferBytes = 4; break;
+ case AArch64::LD4LN_WB_H_fixed: case AArch64::LD4LN_WB_H_register:
+ TransferBytes = 8; break;
+ case AArch64::LD4LN_WB_S_fixed: case AArch64::LD4LN_WB_S_register:
+ TransferBytes = 16; break;
+ case AArch64::LD4LN_WB_D_fixed: case AArch64::LD4LN_WB_D_register:
+ TransferBytes = 32; break;
+ }
+ IsLoad = true;
+ NumVecs = 4;
+ break;
+ }
+
+ case AArch64::ST1LN_WB_B_fixed: case AArch64::ST1LN_WB_B_register:
+ case AArch64::ST1LN_WB_H_fixed: case AArch64::ST1LN_WB_H_register:
+ case AArch64::ST1LN_WB_S_fixed: case AArch64::ST1LN_WB_S_register:
+ case AArch64::ST1LN_WB_D_fixed: case AArch64::ST1LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::ST1LN_WB_B_fixed: case AArch64::ST1LN_WB_B_register:
+ TransferBytes = 1; break;
+ case AArch64::ST1LN_WB_H_fixed: case AArch64::ST1LN_WB_H_register:
+ TransferBytes = 2; break;
+ case AArch64::ST1LN_WB_S_fixed: case AArch64::ST1LN_WB_S_register:
+ TransferBytes = 4; break;
+ case AArch64::ST1LN_WB_D_fixed: case AArch64::ST1LN_WB_D_register:
+ TransferBytes = 8; break;
+ }
+ NumVecs = 1;
+ break;
+ }
+
+ case AArch64::ST2LN_WB_B_fixed: case AArch64::ST2LN_WB_B_register:
+ case AArch64::ST2LN_WB_H_fixed: case AArch64::ST2LN_WB_H_register:
+ case AArch64::ST2LN_WB_S_fixed: case AArch64::ST2LN_WB_S_register:
+ case AArch64::ST2LN_WB_D_fixed: case AArch64::ST2LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::ST2LN_WB_B_fixed: case AArch64::ST2LN_WB_B_register:
+ TransferBytes = 2; break;
+ case AArch64::ST2LN_WB_H_fixed: case AArch64::ST2LN_WB_H_register:
+ TransferBytes = 4; break;
+ case AArch64::ST2LN_WB_S_fixed: case AArch64::ST2LN_WB_S_register:
+ TransferBytes = 8; break;
+ case AArch64::ST2LN_WB_D_fixed: case AArch64::ST2LN_WB_D_register:
+ TransferBytes = 16; break;
+ }
+ NumVecs = 2;
+ break;
+ }
+
+ case AArch64::ST3LN_WB_B_fixed: case AArch64::ST3LN_WB_B_register:
+ case AArch64::ST3LN_WB_H_fixed: case AArch64::ST3LN_WB_H_register:
+ case AArch64::ST3LN_WB_S_fixed: case AArch64::ST3LN_WB_S_register:
+ case AArch64::ST3LN_WB_D_fixed: case AArch64::ST3LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::ST3LN_WB_B_fixed: case AArch64::ST3LN_WB_B_register:
+ TransferBytes = 3; break;
+ case AArch64::ST3LN_WB_H_fixed: case AArch64::ST3LN_WB_H_register:
+ TransferBytes = 6; break;
+ case AArch64::ST3LN_WB_S_fixed: case AArch64::ST3LN_WB_S_register:
+ TransferBytes = 12; break;
+ case AArch64::ST3LN_WB_D_fixed: case AArch64::ST3LN_WB_D_register:
+ TransferBytes = 24; break;
+ }
+ NumVecs = 3;
+ break;
+ }
+
+ case AArch64::ST4LN_WB_B_fixed: case AArch64::ST4LN_WB_B_register:
+ case AArch64::ST4LN_WB_H_fixed: case AArch64::ST4LN_WB_H_register:
+ case AArch64::ST4LN_WB_S_fixed: case AArch64::ST4LN_WB_S_register:
+ case AArch64::ST4LN_WB_D_fixed: case AArch64::ST4LN_WB_D_register: {
+ switch (Opc) {
+ case AArch64::ST4LN_WB_B_fixed: case AArch64::ST4LN_WB_B_register:
+ TransferBytes = 4; break;
+ case AArch64::ST4LN_WB_H_fixed: case AArch64::ST4LN_WB_H_register:
+ TransferBytes = 8; break;
+ case AArch64::ST4LN_WB_S_fixed: case AArch64::ST4LN_WB_S_register:
+ TransferBytes = 16; break;
+ case AArch64::ST4LN_WB_D_fixed: case AArch64::ST4LN_WB_D_register:
+ TransferBytes = 32; break;
+ }
+ NumVecs = 4;
+ break;
+ }
+
+ default:
+ return MCDisassembler::Fail;
+ } // End of switch (Opc)
+
+ unsigned Rt = fieldFromInstruction(Insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(Insn, 5, 5);
+ unsigned Rm = fieldFromInstruction(Insn, 16, 5);
+
+ // Decode post-index of load duplicate lane
+ if (IsLoadDup) {
+ switch (NumVecs) {
+ case 1:
+ Is64bitVec ? DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 2:
+ Is64bitVec ? DecodeDPairRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 3:
+ Is64bitVec ? DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 4:
+ Is64bitVec ? DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder)
+ : DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
+ }
+
+ // Decode write back register, which is equal to Rn.
+ DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+ DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+
+ if (Rm == 31) // If Rm is 0x11111, add the number of transferred bytes
+ Inst.addOperand(MCOperand::CreateImm(TransferBytes));
+ else // Decode Rm
+ DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
+
+ return MCDisassembler::Success;
+ }
+
+ // Decode post-index of load/store lane
+ // Loads have a vector list as output.
+ if (IsLoad) {
+ switch (NumVecs) {
+ case 1:
+ DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 2:
+ DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 3:
+ DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 4:
+ DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
+ }
+ }
+
+ // Decode write back register, which is equal to Rn.
+ DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+ DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+
+ if (Rm == 31) // If Rm is 0x11111, add the number of transferred bytes
+ Inst.addOperand(MCOperand::CreateImm(TransferBytes));
+ else // Decode Rm
+ DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
+
+ // Decode the source vector list.
+ switch (NumVecs) {
+ case 1:
+ DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 2:
+ DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 3:
+ DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
+ break;
+ case 4:
+ DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
+ }
+
+ // Decode lane
+ unsigned Q = fieldFromInstruction(Insn, 30, 1);
+ unsigned S = fieldFromInstruction(Insn, 10, 3);
+ unsigned lane = 0;
+ // Calculate the number of lanes by number of vectors and transfered bytes.
+ // NumLanes = 16 bytes / bytes of each lane
+ unsigned NumLanes = 16 / (TransferBytes / NumVecs);
+ switch (NumLanes) {
+ case 16: // A vector has 16 lanes, each lane is 1 bytes.
+ lane = (Q << 3) | S;
+ break;
+ case 8:
+ lane = (Q << 2) | (S >> 1);
+ break;
+ case 4:
+ lane = (Q << 1) | (S >> 2);
+ break;
+ case 2:
+ lane = Q;
+ break;
+ }
+ Inst.addOperand(MCOperand::CreateImm(lane));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSHLLInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(Insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(Insn, 5, 5);
+ unsigned size = fieldFromInstruction(Insn, 22, 2);
+ unsigned Q = fieldFromInstruction(Insn, 30, 1);
+
+ DecodeFPR128RegisterClass(Inst, Rd, Address, Decoder);
+
+ if(Q)
+ DecodeFPR128RegisterClass(Inst, Rn, Address, Decoder);
+ else
+ DecodeFPR64RegisterClass(Inst, Rn, Address, Decoder);
+
+ switch (size) {
+ case 0:
+ Inst.addOperand(MCOperand::CreateImm(8));
+ break;
+ case 1:
+ Inst.addOperand(MCOperand::CreateImm(16));
+ break;
+ case 2:
+ Inst.addOperand(MCOperand::CreateImm(32));
+ break;
+ default :
+ return MCDisassembler::Fail;
+ }
+ return MCDisassembler::Success;
+}
diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
index 82ce80c8b1a1..0438de3152e1 100644
--- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
+++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
@@ -368,6 +368,14 @@ AArch64InstPrinter::printSImm7ScaledOperand(const MCInst *MI, unsigned OpNum,
O << "#" << (Imm * MemScale);
}
+void AArch64InstPrinter::printVPRRegister(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNo).getReg();
+ std::string Name = getRegisterName(Reg);
+ Name[0] = 'v';
+ O << Name;
+}
+
void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
@@ -406,3 +414,126 @@ void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
printAnnotation(O, Annot);
}
+
+template <A64SE::ShiftExtSpecifiers Ext, bool isHalf>
+void AArch64InstPrinter::printNeonMovImmShiftOperand(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+
+ assert(MO.isImm() &&
+ "Immediate operand required for Neon vector immediate inst.");
+
+ bool IsLSL = false;
+ if (Ext == A64SE::LSL)
+ IsLSL = true;
+ else if (Ext != A64SE::MSL)
+ llvm_unreachable("Invalid shift specifier in movi instruction");
+
+ int64_t Imm = MO.getImm();
+
+ // MSL and LSLH accepts encoded shift amount 0 or 1.
+ if ((!IsLSL || (IsLSL && isHalf)) && Imm != 0 && Imm != 1)
+ llvm_unreachable("Invalid shift amount in movi instruction");
+
+ // LSH accepts encoded shift amount 0, 1, 2 or 3.
+ if (IsLSL && (Imm < 0 || Imm > 3))
+ llvm_unreachable("Invalid shift amount in movi instruction");
+
+ // Print shift amount as multiple of 8 with MSL encoded shift amount
+ // 0 and 1 printed as 8 and 16.
+ if (!IsLSL)
+ Imm++;
+ Imm *= 8;
+
+ // LSL #0 is not printed
+ if (IsLSL) {
+ if (Imm == 0)
+ return;
+ O << ", lsl";
+ } else
+ O << ", msl";
+
+ O << " #" << Imm;
+}
+
+void AArch64InstPrinter::printNeonUImm0Operand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &o) {
+ o << "#0x0";
+}
+
+void AArch64InstPrinter::printUImmHexOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &MOUImm = MI->getOperand(OpNum);
+
+ assert(MOUImm.isImm() &&
+ "Immediate operand required for Neon vector immediate inst.");
+
+ unsigned Imm = MOUImm.getImm();
+
+ O << "#0x";
+ O.write_hex(Imm);
+}
+
+void AArch64InstPrinter::printUImmBareOperand(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &MOUImm = MI->getOperand(OpNum);
+
+ assert(MOUImm.isImm()
+ && "Immediate operand required for Neon vector immediate inst.");
+
+ unsigned Imm = MOUImm.getImm();
+ O << Imm;
+}
+
+void AArch64InstPrinter::printNeonUImm64MaskOperand(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &MOUImm8 = MI->getOperand(OpNum);
+
+ assert(MOUImm8.isImm() &&
+ "Immediate operand required for Neon vector immediate bytemask inst.");
+
+ uint32_t UImm8 = MOUImm8.getImm();
+ uint64_t Mask = 0;
+
+ // Replicates 0x00 or 0xff byte in a 64-bit vector
+ for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
+ if ((UImm8 >> ByteNum) & 1)
+ Mask |= (uint64_t)0xff << (8 * ByteNum);
+ }
+
+ O << "#0x";
+ O.write_hex(Mask);
+}
+
+// If Count > 1, there are two valid kinds of vector list:
+// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
+// (2) {Vn.layout - Vm.layout}
+// We choose the first kind as output.
+template <A64Layout::VectorLayout Layout, unsigned Count>
+void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ assert(Count >= 1 && Count <= 4 && "Invalid Number of Vectors");
+
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ std::string LayoutStr = A64VectorLayoutToString(Layout);
+ O << "{";
+ if (Count > 1) { // Print sub registers separately
+ bool IsVec64 = (Layout < A64Layout::VL_16B);
+ unsigned SubRegIdx = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
+ for (unsigned I = 0; I < Count; I++) {
+ std::string Name = getRegisterName(MRI.getSubReg(Reg, SubRegIdx++));
+ Name[0] = 'v';
+ O << Name << LayoutStr;
+ if (I != Count - 1)
+ O << ", ";
+ }
+ } else { // Print the register directly when NumVecs is 1.
+ std::string Name = getRegisterName(Reg);
+ Name[0] = 'v';
+ O << Name << LayoutStr;
+ }
+ O << "}";
+}
diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
index 639fa869c016..37b7273438dd 100644
--- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
+++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
@@ -157,6 +157,7 @@ public:
void printRegExtendOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O, A64SE::ShiftExtSpecifiers Ext);
+ void printVPRRegister(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
@@ -164,9 +165,18 @@ public:
return RegNo == AArch64::XSP || RegNo == AArch64::WSP;
}
-
+ template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
+ void printNeonMovImmShiftOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printNeonUImm0Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printUImmHexOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printUImmBareOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printNeonUImm64MaskOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+
+ template <A64Layout::VectorLayout Layout, unsigned Count>
+ void printVectorList(const MCInst *MI, unsigned OpNum, raw_ostream &O);
};
-
}
#endif
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index a3373b1087bb..8a9077c1cab4 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -578,8 +578,8 @@ static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
}
MCAsmBackend *
-llvm::createAArch64AsmBackend(const Target &T, StringRef TT, StringRef CPU) {
+llvm::createAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
Triple TheTriple(TT);
-
return new ELFAArch64AsmBackend(T, TT, TheTriple.getOS());
}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index 3b811df212d1..a64c463f9e5c 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -55,11 +55,10 @@ namespace {
/// by MachO. Beware!
class AArch64ELFStreamer : public MCELFStreamer {
public:
- AArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter)
- : MCELFStreamer(Context, TAB, OS, Emitter),
- MappingSymbolCounter(0), LastEMS(EMS_None) {
- }
+ AArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
+ MCCodeEmitter *Emitter)
+ : MCELFStreamer(Context, 0, TAB, OS, Emitter), MappingSymbolCounter(0),
+ LastEMS(EMS_None) {}
~AArch64ELFStreamer() {}
@@ -85,18 +84,17 @@ public:
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace) {
+ virtual void EmitBytes(StringRef Data) {
EmitDataMappingSymbol();
- MCELFStreamer::EmitBytes(Data, AddrSpace);
+ MCELFStreamer::EmitBytes(Data);
}
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) {
EmitDataMappingSymbol();
- MCELFStreamer::EmitValueImpl(Value, Size, AddrSpace);
+ MCELFStreamer::EmitValueImpl(Value, Size);
}
private:
@@ -130,7 +128,7 @@ private:
MCELF::SetType(SD, ELF::STT_NOTYPE);
MCELF::SetBinding(SD, ELF::STB_LOCAL);
SD.setExternal(false);
- Symbol->setSection(*getCurrentSection().first);
+ AssignSection(Symbol, getCurrentSection().first);
const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext());
Symbol->setVariableValue(Value);
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index 8ec8cbf1c525..add874c12019 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -31,11 +31,12 @@ AArch64ELFMCAsmInfo::AArch64ELFMCAsmInfo() {
UseDataRegionDirectives = true;
- WeakRefDirective = "\t.weak\t";
-
HasLEB128 = true;
SupportsDebugInformation = true;
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
}
+
+// Pin the vtable to this file.
+void AArch64ELFMCAsmInfo::anchor() {}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
index a20bc471c20d..d1dd285c832c 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
@@ -14,13 +14,15 @@
#ifndef LLVM_AARCH64TARGETASMINFO_H
#define LLVM_AARCH64TARGETASMINFO_H
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
- struct AArch64ELFMCAsmInfo : public MCAsmInfo {
- explicit AArch64ELFMCAsmInfo();
- };
+struct AArch64ELFMCAsmInfo : public MCAsmInfoELF {
+ explicit AArch64ELFMCAsmInfo();
+private:
+ virtual void anchor();
+};
} // namespace llvm
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
index a5c591eee800..b41c566f612b 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -59,6 +59,23 @@ public:
unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
// Labels are handled mostly the same way: a symbol is needed, and
// just gets some fixup attached.
@@ -152,10 +169,10 @@ getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
switch (Expr->getKind()) {
default: llvm_unreachable("Unexpected operand modifier");
case AArch64MCExpr::VK_AARCH64_LO12: {
- unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
- AArch64::fixup_a64_ldst16_lo12,
- AArch64::fixup_a64_ldst32_lo12,
- AArch64::fixup_a64_ldst64_lo12,
+ static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
+ AArch64::fixup_a64_ldst16_lo12,
+ AArch64::fixup_a64_ldst32_lo12,
+ AArch64::fixup_a64_ldst64_lo12,
AArch64::fixup_a64_ldst128_lo12 };
assert(MemSize <= 16 && "Invalid fixup for operation");
FixupKind = FixupsBySize[Log2_32(MemSize)];
@@ -166,19 +183,23 @@ getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
break;
case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
- unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12,
- AArch64::fixup_a64_ldst16_dtprel_lo12,
- AArch64::fixup_a64_ldst32_dtprel_lo12,
- AArch64::fixup_a64_ldst64_dtprel_lo12 };
+ static const unsigned FixupsBySize[] = {
+ AArch64::fixup_a64_ldst8_dtprel_lo12,
+ AArch64::fixup_a64_ldst16_dtprel_lo12,
+ AArch64::fixup_a64_ldst32_dtprel_lo12,
+ AArch64::fixup_a64_ldst64_dtprel_lo12
+ };
assert(MemSize <= 8 && "Invalid fixup for operation");
FixupKind = FixupsBySize[Log2_32(MemSize)];
break;
}
case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
- unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst64_dtprel_lo12_nc };
+ static const unsigned FixupsBySize[] = {
+ AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
+ AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
+ AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
+ AArch64::fixup_a64_ldst64_dtprel_lo12_nc
+ };
assert(MemSize <= 8 && "Invalid fixup for operation");
FixupKind = FixupsBySize[Log2_32(MemSize)];
break;
@@ -188,19 +209,23 @@ getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
break;
case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
- unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12,
- AArch64::fixup_a64_ldst16_tprel_lo12,
- AArch64::fixup_a64_ldst32_tprel_lo12,
- AArch64::fixup_a64_ldst64_tprel_lo12 };
+ static const unsigned FixupsBySize[] = {
+ AArch64::fixup_a64_ldst8_tprel_lo12,
+ AArch64::fixup_a64_ldst16_tprel_lo12,
+ AArch64::fixup_a64_ldst32_tprel_lo12,
+ AArch64::fixup_a64_ldst64_tprel_lo12
+ };
assert(MemSize <= 8 && "Invalid fixup for operation");
FixupKind = FixupsBySize[Log2_32(MemSize)];
break;
}
case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
- unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12_nc,
- AArch64::fixup_a64_ldst16_tprel_lo12_nc,
- AArch64::fixup_a64_ldst32_tprel_lo12_nc,
- AArch64::fixup_a64_ldst64_tprel_lo12_nc };
+ static const unsigned FixupsBySize[] = {
+ AArch64::fixup_a64_ldst8_tprel_lo12_nc,
+ AArch64::fixup_a64_ldst16_tprel_lo12_nc,
+ AArch64::fixup_a64_ldst32_tprel_lo12_nc,
+ AArch64::fixup_a64_ldst64_tprel_lo12_nc
+ };
assert(MemSize <= 8 && "Invalid fixup for operation");
FixupKind = FixupsBySize[Log2_32(MemSize)];
break;
@@ -302,6 +327,45 @@ AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
}
+unsigned AArch64MCCodeEmitter::getShiftRightImm8(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return 8 - MI.getOperand(Op).getImm();
+}
+
+unsigned AArch64MCCodeEmitter::getShiftRightImm16(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return 16 - MI.getOperand(Op).getImm();
+}
+
+unsigned AArch64MCCodeEmitter::getShiftRightImm32(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return 32 - MI.getOperand(Op).getImm();
+}
+
+unsigned AArch64MCCodeEmitter::getShiftRightImm64(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return 64 - MI.getOperand(Op).getImm();
+}
+
+unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return MI.getOperand(Op).getImm() - 8;
+}
+
+unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return MI.getOperand(Op).getImm() - 16;
+}
+
+unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return MI.getOperand(Op).getImm() - 32;
+}
+
+unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
+ const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
+ return MI.getOperand(Op).getImm() - 64;
+}
template<AArch64::Fixups fixupDesired> unsigned
AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
@@ -346,7 +410,7 @@ AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const {
if (MO.isReg()) {
- return Ctx.getRegisterInfo().getEncodingValue(MO.getReg());
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
} else if (MO.isImm()) {
return static_cast<unsigned>(MO.getImm());
}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index 819eeadb44e4..58fc95c2eaf6 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -40,7 +40,7 @@ MCSubtargetInfo *AArch64_MC::createAArch64MCSubtargetInfo(StringRef TT,
StringRef CPU,
StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
- InitAArch64MCSubtargetInfo(X, TT, CPU, "");
+ InitAArch64MCSubtargetInfo(X, TT, CPU, FS);
return X;
}
@@ -57,13 +57,14 @@ static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) {
return X;
}
-static MCAsmInfo *createAArch64MCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
+ StringRef TT) {
Triple TheTriple(TT);
MCAsmInfo *MAI = new AArch64ELFMCAsmInfo();
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(AArch64::XSP, 0);
- MAI->addInitialFrameState(0, Dst, Src);
+ unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0);
+ MAI->addInitialFrameState(Inst);
return MAI;
}
@@ -135,17 +136,17 @@ public:
return MCInstrAnalysis::isConditionalBranch(Inst);
}
- uint64_t evaluateBranch(const MCInst &Inst, uint64_t Addr,
- uint64_t Size) const {
+ bool evaluateBranch(const MCInst &Inst, uint64_t Addr,
+ uint64_t Size, uint64_t &Target) const {
unsigned LblOperand = Inst.getOpcode() == AArch64::Bcc ? 1 : 0;
// FIXME: We only handle PCRel branches for now.
if (Info->get(Inst.getOpcode()).OpInfo[LblOperand].OperandType
!= MCOI::OPERAND_PCREL)
- return -1ULL;
+ return false;
int64_t Imm = Inst.getOperand(LblOperand).getImm();
-
- return Addr + Imm;
+ Target = Addr + Imm;
+ return true;
}
};
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
index 3849fe379513..670e657ec73c 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -43,8 +43,9 @@ MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
MCObjectWriter *createAArch64ELFObjectWriter(raw_ostream &OS,
uint8_t OSABI);
-MCAsmBackend *createAArch64AsmBackend(const Target &T, StringRef TT,
- StringRef CPU);
+MCAsmBackend *createAArch64AsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
} // End llvm namespace
diff --git a/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp b/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp
index fc706a4cd42d..377b533be898 100644
--- a/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp
+++ b/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp
@@ -20,5 +20,5 @@ Target llvm::TheAArch64Target;
extern "C" void LLVMInitializeAArch64TargetInfo() {
RegisterTarget<Triple::aarch64, /*HasJIT=*/true>
- X(TheAArch64Target, "aarch64", "AArch64");
+ X(TheAArch64Target, "aarch64", "AArch64 (ARM 64-bit target)");
}
diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
index bedccb5438f6..2a97cd632560 100644
--- a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
+++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
@@ -972,7 +972,7 @@ bool A64Imms::isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits) {
// Now we have to work out the amount of rotation needed. The first part of
// this calculation is actually independent of RepeatWidth, but the complex
// case will depend on it.
- Rotation = CountTrailingZeros_64(Imm);
+ Rotation = countTrailingZeros(Imm);
if (Rotation == 0) {
// There were no leading zeros, which means it's either in place or there
// are 1s at each end (e.g. 0x8003 needs rotating).
@@ -1105,3 +1105,69 @@ bool A64Imms::isOnlyMOVNImm(int RegWidth, uint64_t Value,
return isMOVNImm(RegWidth, Value, UImm16, Shift);
}
+
+// decodeNeonModShiftImm - Decode a Neon OpCmode value into the
+// the shift amount and the shift type (shift zeros or ones in) and
+// returns whether the OpCmode value implies a shift operation.
+bool A64Imms::decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm,
+ unsigned &ShiftOnesIn) {
+ ShiftImm = 0;
+ ShiftOnesIn = false;
+ bool HasShift = true;
+
+ if (OpCmode == 0xe) {
+ // movi byte
+ HasShift = false;
+ } else if (OpCmode == 0x1e) {
+ // movi 64-bit bytemask
+ HasShift = false;
+ } else if ((OpCmode & 0xc) == 0x8) {
+ // shift zeros, per halfword
+ ShiftImm = ((OpCmode & 0x2) >> 1);
+ } else if ((OpCmode & 0x8) == 0) {
+ // shift zeros, per word
+ ShiftImm = ((OpCmode & 0x6) >> 1);
+ } else if ((OpCmode & 0xe) == 0xc) {
+ // shift ones, per word
+ ShiftOnesIn = true;
+ ShiftImm = (OpCmode & 0x1);
+ } else {
+ // per byte, per bytemask
+ llvm_unreachable("Unsupported Neon modified immediate");
+ }
+
+ return HasShift;
+}
+
+// decodeNeonModImm - Decode a NEON modified immediate and OpCmode values
+// into the element value and the element size in bits.
+uint64_t A64Imms::decodeNeonModImm(unsigned Val, unsigned OpCmode,
+ unsigned &EltBits) {
+ uint64_t DecodedVal = Val;
+ EltBits = 0;
+
+ if (OpCmode == 0xe) {
+ // movi byte
+ EltBits = 8;
+ } else if (OpCmode == 0x1e) {
+ // movi 64-bit bytemask
+ DecodedVal = 0;
+ for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
+ if ((Val >> ByteNum) & 1)
+ DecodedVal |= (uint64_t)0xff << (8 * ByteNum);
+ }
+ EltBits = 64;
+ } else if ((OpCmode & 0xc) == 0x8) {
+ // shift zeros, per halfword
+ EltBits = 16;
+ } else if ((OpCmode & 0x8) == 0) {
+ // shift zeros, per word
+ EltBits = 32;
+ } else if ((OpCmode & 0xe) == 0xc) {
+ // shift ones, per word
+ EltBits = 32;
+ } else {
+ llvm_unreachable("Unsupported Neon modified immediate");
+ }
+ return DecodedVal;
+}
diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index 9a1ca6127ae9..ce970b0a8aba 100644
--- a/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -289,6 +289,7 @@ namespace A64SE {
enum ShiftExtSpecifiers {
Invalid = -1,
LSL,
+ MSL,
LSR,
ASR,
ROR,
@@ -305,6 +306,65 @@ namespace A64SE {
};
}
+namespace A64Layout {
+ enum VectorLayout {
+ Invalid = -1,
+ VL_8B,
+ VL_4H,
+ VL_2S,
+ VL_1D,
+
+ VL_16B,
+ VL_8H,
+ VL_4S,
+ VL_2D,
+
+ // Bare layout for the 128-bit vector
+ // (only show ".b", ".h", ".s", ".d" without vector number)
+ VL_B,
+ VL_H,
+ VL_S,
+ VL_D
+ };
+}
+
+inline static const char *
+A64VectorLayoutToString(A64Layout::VectorLayout Layout) {
+ switch (Layout) {
+ case A64Layout::VL_8B: return ".8b";
+ case A64Layout::VL_4H: return ".4h";
+ case A64Layout::VL_2S: return ".2s";
+ case A64Layout::VL_1D: return ".1d";
+ case A64Layout::VL_16B: return ".16b";
+ case A64Layout::VL_8H: return ".8h";
+ case A64Layout::VL_4S: return ".4s";
+ case A64Layout::VL_2D: return ".2d";
+ case A64Layout::VL_B: return ".b";
+ case A64Layout::VL_H: return ".h";
+ case A64Layout::VL_S: return ".s";
+ case A64Layout::VL_D: return ".d";
+ default: llvm_unreachable("Unknown Vector Layout");
+ }
+}
+
+inline static A64Layout::VectorLayout
+A64StringToVectorLayout(StringRef LayoutStr) {
+ return StringSwitch<A64Layout::VectorLayout>(LayoutStr)
+ .Case(".8b", A64Layout::VL_8B)
+ .Case(".4h", A64Layout::VL_4H)
+ .Case(".2s", A64Layout::VL_2S)
+ .Case(".1d", A64Layout::VL_1D)
+ .Case(".16b", A64Layout::VL_16B)
+ .Case(".8h", A64Layout::VL_8H)
+ .Case(".4s", A64Layout::VL_4S)
+ .Case(".2d", A64Layout::VL_2D)
+ .Case(".b", A64Layout::VL_B)
+ .Case(".h", A64Layout::VL_H)
+ .Case(".s", A64Layout::VL_S)
+ .Case(".d", A64Layout::VL_D)
+ .Default(A64Layout::Invalid);
+}
+
namespace A64SysReg {
enum SysRegROValues {
MDCCSR_EL0 = 0x9808, // 10 011 0000 0001 000
@@ -1068,7 +1128,10 @@ namespace A64Imms {
// MOVN but *not* with a MOVZ (because that would take priority).
bool isOnlyMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift);
-}
+ uint64_t decodeNeonModImm(unsigned Val, unsigned OpCmode, unsigned &EltBits);
+ bool decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm,
+ unsigned &ShiftOnesIn);
+ }
} // end namespace llvm;
diff --git a/lib/Target/AArch64/Utils/CMakeLists.txt b/lib/Target/AArch64/Utils/CMakeLists.txt
index 2c28348d7d81..2348e44f850b 100644
--- a/lib/Target/AArch64/Utils/CMakeLists.txt
+++ b/lib/Target/AArch64/Utils/CMakeLists.txt
@@ -3,3 +3,5 @@ include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/
add_llvm_library(LLVMAArch64Utils
AArch64BaseInfo.cpp
)
+
+add_dependencies(LLVMAArch64Utils AArch64CommonTableGen)
diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp
index f0d4dbe2bfb3..ff585b41a2aa 100644
--- a/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/lib/Target/ARM/A15SDOptimizer.cpp
@@ -615,7 +615,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
SmallVector<unsigned, 8> Defs = getReadDPRs(MI);
bool Modified = false;
- for (SmallVector<unsigned, 8>::iterator I = Defs.begin(), E = Defs.end();
+ for (SmallVectorImpl<unsigned>::iterator I = Defs.begin(), E = Defs.end();
I != E; ++I) {
// Follow the def-use chain for this DPR through COPYs, and also through
// PHIs (which are essentially multi-way COPYs). It is because of PHIs that
@@ -630,7 +630,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
elideCopiesAndPHIs(Def, DefSrcs);
- for (SmallVector<MachineInstr*, 8>::iterator II = DefSrcs.begin(),
+ for (SmallVectorImpl<MachineInstr *>::iterator II = DefSrcs.begin(),
EE = DefSrcs.end(); II != EE; ++II) {
MachineInstr *MI = *II;
@@ -655,8 +655,15 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
if (NewReg != 0) {
Modified = true;
- for (SmallVector<MachineOperand*, 8>::const_iterator I = Uses.begin(),
+ for (SmallVectorImpl<MachineOperand *>::const_iterator I = Uses.begin(),
E = Uses.end(); I != E; ++I) {
+ // Make sure to constrain the register class of the new register to
+ // match what we're replacing. Otherwise we can optimize a DPR_VFP2
+ // reference into a plain DPR, and that will end poorly. NewReg is
+ // always virtual here, so there will always be a matching subclass
+ // to find.
+ MRI->constrainRegClass(NewReg, MRI->getRegClass((*I)->getReg()));
+
DEBUG(dbgs() << "Replacing operand "
<< **I << " with "
<< PrintReg(NewReg) << "\n");
diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td
index 2d7470919dc4..36e5680ca4e0 100644
--- a/lib/Target/ARM/ARM.td
+++ b/lib/Target/ARM/ARM.td
@@ -38,12 +38,16 @@ def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true",
def FeatureThumb2 : SubtargetFeature<"thumb2", "HasThumb2", "true",
"Enable Thumb2 instructions">;
def FeatureNoARM : SubtargetFeature<"noarm", "NoARM", "true",
- "Does not support ARM mode execution">;
+ "Does not support ARM mode execution",
+ [ModeThumb]>;
def FeatureFP16 : SubtargetFeature<"fp16", "HasFP16", "true",
"Enable half-precision floating point">;
def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true",
"Enable VFP4 instructions",
[FeatureVFP3, FeatureFP16]>;
+def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8",
+ "true", "Enable ARMv8 FP",
+ [FeatureVFP4]>;
def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true",
"Restrict VFP3 to 16 double registers">;
def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true",
@@ -59,8 +63,15 @@ def FeatureSlowFPBrcc : SubtargetFeature<"slow-fp-brcc", "SlowFPBrcc", "true",
"FP compare + branch is slow">;
def FeatureVFPOnlySP : SubtargetFeature<"fp-only-sp", "FPOnlySP", "true",
"Floating point unit supports single precision only">;
+def FeaturePerfMon : SubtargetFeature<"perfmon", "HasPerfMon", "true",
+ "Enable support for Performance Monitor extensions">;
def FeatureTrustZone : SubtargetFeature<"trustzone", "HasTrustZone", "true",
"Enable support for TrustZone security extensions">;
+def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
+ "Enable support for Cryptography extensions",
+ [FeatureNEON]>;
+def FeatureCRC : SubtargetFeature<"crc", "HasCRC", "true",
+ "Enable support for CRC instructions">;
// Some processors have FP multiply-accumulate instructions that don't
// play nicely with other VFP / NEON instructions, and it's generally better
@@ -108,10 +119,24 @@ def FeatureDSPThumb2 : SubtargetFeature<"t2dsp", "Thumb2DSP", "true",
def FeatureMP : SubtargetFeature<"mp", "HasMPExtension", "true",
"Supports Multiprocessing extension">;
-// M-series ISA?
-def FeatureMClass : SubtargetFeature<"mclass", "IsMClass", "true",
+// Virtualization extension - requires HW divide (ARMv7-AR ARMARM - 4.4.8).
+def FeatureVirtualization : SubtargetFeature<"virtualization",
+ "HasVirtualization", "true",
+ "Supports Virtualization extension",
+ [FeatureHWDiv, FeatureHWDivARM]>;
+
+// M-series ISA
+def FeatureMClass : SubtargetFeature<"mclass", "ARMProcClass", "MClass",
"Is microcontroller profile ('M' series)">;
+// R-series ISA
+def FeatureRClass : SubtargetFeature<"rclass", "ARMProcClass", "RClass",
+ "Is realtime profile ('R' series)">;
+
+// A-series ISA
+def FeatureAClass : SubtargetFeature<"aclass", "ARMProcClass", "AClass",
+ "Is application profile ('A' series)">;
+
// Special TRAP encoding for NaCl, which looks like a TRAP in Thumb too.
// See ARMInstrInfo.td for details.
def FeatureNaClTrap : SubtargetFeature<"nacl-trap", "UseNaClTrap", "true",
@@ -129,12 +154,19 @@ def HasV5TEOps : SubtargetFeature<"v5te", "HasV5TEOps", "true",
def HasV6Ops : SubtargetFeature<"v6", "HasV6Ops", "true",
"Support ARM v6 instructions",
[HasV5TEOps]>;
+def HasV6MOps : SubtargetFeature<"v6m", "HasV6MOps", "true",
+ "Support ARM v6M instructions",
+ [HasV6Ops]>;
def HasV6T2Ops : SubtargetFeature<"v6t2", "HasV6T2Ops", "true",
"Support ARM v6t2 instructions",
- [HasV6Ops, FeatureThumb2]>;
+ [HasV6MOps, FeatureThumb2]>;
def HasV7Ops : SubtargetFeature<"v7", "HasV7Ops", "true",
"Support ARM v7 instructions",
- [HasV6T2Ops]>;
+ [HasV6T2Ops, FeaturePerfMon]>;
+def HasV8Ops : SubtargetFeature<"v8", "HasV8Ops", "true",
+ "Support ARM v8 instructions",
+ [HasV7Ops, FeatureVirtualization,
+ FeatureMP]>;
//===----------------------------------------------------------------------===//
// ARM Processors supported.
@@ -170,12 +202,27 @@ def ProcSwift : SubtargetFeature<"swift", "ARMProcFamily", "Swift",
// FIXME: It has not been determined if A15 has these features.
def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15",
"Cortex-A15 ARM processors",
- [FeatureT2XtPk, FeatureFP16,
+ [FeatureT2XtPk, FeatureVFP4,
+ FeatureMP, FeatureHWDiv, FeatureHWDivARM,
FeatureAvoidPartialCPSR,
- FeatureTrustZone]>;
+ FeatureTrustZone, FeatureVirtualization]>;
+
+def ProcA53 : SubtargetFeature<"a53", "ARMProcFamily", "CortexA53",
+ "Cortex-A53 ARM processors",
+ [FeatureHWDiv, FeatureHWDivARM,
+ FeatureTrustZone, FeatureT2XtPk,
+ FeatureCrypto, FeatureCRC]>;
+
+def ProcA57 : SubtargetFeature<"a57", "ARMProcFamily", "CortexA57",
+ "Cortex-A57 ARM processors",
+ [FeatureHWDiv, FeatureHWDivARM,
+ FeatureTrustZone, FeatureT2XtPk,
+ FeatureCrypto, FeatureCRC]>;
+
def ProcR5 : SubtargetFeature<"r5", "ARMProcFamily", "CortexR5",
"Cortex-R5 ARM processors",
- [FeatureSlowFPBrcc, FeatureHWDivARM,
+ [FeatureSlowFPBrcc,
+ FeatureHWDiv, FeatureHWDivARM,
FeatureHasSlowFPVMLx,
FeatureAvoidPartialCPSR,
FeatureT2XtPk]>;
@@ -233,7 +280,7 @@ def : Processor<"mpcore", ARMV6Itineraries, [HasV6Ops, FeatureVFP2,
FeatureHasSlowFPVMLx]>;
// V6M Processors.
-def : Processor<"cortex-m0", ARMV6Itineraries, [HasV6Ops, FeatureNoARM,
+def : Processor<"cortex-m0", ARMV6Itineraries, [HasV6MOps, FeatureNoARM,
FeatureDB, FeatureMClass]>;
// V6T2 Processors.
@@ -248,26 +295,30 @@ def : Processor<"arm1156t2f-s", ARMV6Itineraries, [HasV6T2Ops, FeatureVFP2,
def : ProcessorModel<"cortex-a5", CortexA8Model,
[ProcA5, HasV7Ops, FeatureNEON, FeatureDB,
FeatureVFP4, FeatureDSPThumb2,
- FeatureHasRAS]>;
+ FeatureHasRAS, FeatureAClass]>;
def : ProcessorModel<"cortex-a8", CortexA8Model,
[ProcA8, HasV7Ops, FeatureNEON, FeatureDB,
- FeatureDSPThumb2, FeatureHasRAS]>;
+ FeatureDSPThumb2, FeatureHasRAS,
+ FeatureAClass]>;
def : ProcessorModel<"cortex-a9", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
- FeatureDSPThumb2, FeatureHasRAS]>;
+ FeatureDSPThumb2, FeatureHasRAS,
+ FeatureAClass]>;
def : ProcessorModel<"cortex-a9-mp", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureMP,
- FeatureHasRAS]>;
+ FeatureHasRAS, FeatureAClass]>;
// FIXME: A15 has currently the same ProcessorModel as A9.
def : ProcessorModel<"cortex-a15", CortexA9Model,
[ProcA15, HasV7Ops, FeatureNEON, FeatureDB,
- FeatureDSPThumb2, FeatureHasRAS]>;
+ FeatureDSPThumb2, FeatureHasRAS,
+ FeatureAClass]>;
// FIXME: R5 has currently the same ProcessorModel as A8.
def : ProcessorModel<"cortex-r5", CortexA8Model,
[ProcR5, HasV7Ops, FeatureDB,
FeatureVFP3, FeatureDSPThumb2,
- FeatureHasRAS]>;
+ FeatureHasRAS, FeatureVFPOnlySP,
+ FeatureD16, FeatureRClass]>;
// V7M Processors.
def : ProcNoItin<"cortex-m3", [HasV7Ops,
@@ -279,13 +330,22 @@ def : ProcNoItin<"cortex-m4", [HasV7Ops,
FeatureThumb2, FeatureNoARM, FeatureDB,
FeatureHWDiv, FeatureDSPThumb2,
FeatureT2XtPk, FeatureVFP4,
- FeatureVFPOnlySP, FeatureMClass]>;
+ FeatureVFPOnlySP, FeatureD16,
+ FeatureMClass]>;
// Swift uArch Processors.
def : ProcessorModel<"swift", SwiftModel,
[ProcSwift, HasV7Ops, FeatureNEON,
FeatureDB, FeatureDSPThumb2,
- FeatureHasRAS]>;
+ FeatureHasRAS, FeatureAClass]>;
+
+// V8 Processors
+def : ProcNoItin<"cortex-a53", [ProcA53, HasV8Ops, FeatureAClass,
+ FeatureDB, FeatureFPARMv8,
+ FeatureNEON, FeatureDSPThumb2]>;
+def : ProcNoItin<"cortex-a57", [ProcA57, HasV8Ops, FeatureAClass,
+ FeatureDB, FeatureFPARMv8,
+ FeatureNEON, FeatureDSPThumb2]>;
//===----------------------------------------------------------------------===//
// Register File Description
diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp
index 13ec2087938a..e79f88d4b6f1 100644
--- a/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -17,6 +17,7 @@
#include "ARM.h"
#include "ARMBuildAttrs.h"
#include "ARMConstantPoolValue.h"
+#include "ARMFPUName.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMTargetMachine.h"
#include "ARMTargetObjectFile.h"
@@ -55,235 +56,67 @@
#include <cctype>
using namespace llvm;
-namespace {
-
- // Per section and per symbol attributes are not supported.
- // To implement them we would need the ability to delay this emission
- // until the assembly file is fully parsed/generated as only then do we
- // know the symbol and section numbers.
- class AttributeEmitter {
- public:
- virtual void MaybeSwitchVendor(StringRef Vendor) = 0;
- virtual void EmitAttribute(unsigned Attribute, unsigned Value) = 0;
- virtual void EmitTextAttribute(unsigned Attribute, StringRef String) = 0;
- virtual void Finish() = 0;
- virtual ~AttributeEmitter() {}
- };
-
- class AsmAttributeEmitter : public AttributeEmitter {
- MCStreamer &Streamer;
-
- public:
- AsmAttributeEmitter(MCStreamer &Streamer_) : Streamer(Streamer_) {}
- void MaybeSwitchVendor(StringRef Vendor) { }
-
- void EmitAttribute(unsigned Attribute, unsigned Value) {
- Streamer.EmitRawText("\t.eabi_attribute " +
- Twine(Attribute) + ", " + Twine(Value));
- }
-
- void EmitTextAttribute(unsigned Attribute, StringRef String) {
- switch (Attribute) {
- default: llvm_unreachable("Unsupported Text attribute in ASM Mode");
- case ARMBuildAttrs::CPU_name:
- Streamer.EmitRawText(StringRef("\t.cpu ") + String.lower());
- break;
- /* GAS requires .fpu to be emitted regardless of EABI attribute */
- case ARMBuildAttrs::Advanced_SIMD_arch:
- case ARMBuildAttrs::VFP_arch:
- Streamer.EmitRawText(StringRef("\t.fpu ") + String.lower());
- break;
- }
- }
- void Finish() { }
- };
-
- class ObjectAttributeEmitter : public AttributeEmitter {
- // This structure holds all attributes, accounting for
- // their string/numeric value, so we can later emmit them
- // in declaration order, keeping all in the same vector
- struct AttributeItemType {
- enum {
- HiddenAttribute = 0,
- NumericAttribute,
- TextAttribute
- } Type;
- unsigned Tag;
- unsigned IntValue;
- StringRef StringValue;
- } AttributeItem;
-
- MCObjectStreamer &Streamer;
- StringRef CurrentVendor;
- SmallVector<AttributeItemType, 64> Contents;
-
- // Account for the ULEB/String size of each item,
- // not just the number of items
- size_t ContentsSize;
- // FIXME: this should be in a more generic place, but
- // getULEBSize() is in MCAsmInfo and will be moved to MCDwarf
- size_t getULEBSize(int Value) {
- size_t Size = 0;
- do {
- Value >>= 7;
- Size += sizeof(int8_t); // Is this really necessary?
- } while (Value);
- return Size;
- }
-
- public:
- ObjectAttributeEmitter(MCObjectStreamer &Streamer_) :
- Streamer(Streamer_), CurrentVendor(""), ContentsSize(0) { }
-
- void MaybeSwitchVendor(StringRef Vendor) {
- assert(!Vendor.empty() && "Vendor cannot be empty.");
-
- if (CurrentVendor.empty())
- CurrentVendor = Vendor;
- else if (CurrentVendor == Vendor)
- return;
- else
- Finish();
-
- CurrentVendor = Vendor;
-
- assert(Contents.size() == 0);
- }
-
- void EmitAttribute(unsigned Attribute, unsigned Value) {
- AttributeItemType attr = {
- AttributeItemType::NumericAttribute,
- Attribute,
- Value,
- StringRef("")
- };
- ContentsSize += getULEBSize(Attribute);
- ContentsSize += getULEBSize(Value);
- Contents.push_back(attr);
- }
-
- void EmitTextAttribute(unsigned Attribute, StringRef String) {
- AttributeItemType attr = {
- AttributeItemType::TextAttribute,
- Attribute,
- 0,
- String
- };
- ContentsSize += getULEBSize(Attribute);
- // String + \0
- ContentsSize += String.size()+1;
-
- Contents.push_back(attr);
- }
-
- void Finish() {
- // Vendor size + Vendor name + '\0'
- const size_t VendorHeaderSize = 4 + CurrentVendor.size() + 1;
-
- // Tag + Tag Size
- const size_t TagHeaderSize = 1 + 4;
-
- Streamer.EmitIntValue(VendorHeaderSize + TagHeaderSize + ContentsSize, 4);
- Streamer.EmitBytes(CurrentVendor);
- Streamer.EmitIntValue(0, 1); // '\0'
-
- Streamer.EmitIntValue(ARMBuildAttrs::File, 1);
- Streamer.EmitIntValue(TagHeaderSize + ContentsSize, 4);
-
- // Size should have been accounted for already, now
- // emit each field as its type (ULEB or String)
- for (unsigned int i=0; i<Contents.size(); ++i) {
- AttributeItemType item = Contents[i];
- Streamer.EmitULEB128IntValue(item.Tag);
- switch (item.Type) {
- default: llvm_unreachable("Invalid attribute type");
- case AttributeItemType::NumericAttribute:
- Streamer.EmitULEB128IntValue(item.IntValue);
- break;
- case AttributeItemType::TextAttribute:
- Streamer.EmitBytes(item.StringValue.upper());
- Streamer.EmitIntValue(0, 1); // '\0'
- break;
- }
- }
-
- Contents.clear();
- }
- };
-
-} // end of anonymous namespace
-
-MachineLocation ARMAsmPrinter::
-getDebugValueLocation(const MachineInstr *MI) const {
- MachineLocation Location;
- assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
- // Frame address. Currently handles register +- offset only.
- if (MI->getOperand(0).isReg() && MI->getOperand(1).isImm())
- Location.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm());
- else {
- DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
- }
- return Location;
-}
-
/// EmitDwarfRegOp - Emit dwarf register operation.
-void ARMAsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc) const {
+void ARMAsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc,
+ bool Indirect) const {
const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->getDwarfRegNum(MLoc.getReg(), false) != -1)
- AsmPrinter::EmitDwarfRegOp(MLoc);
- else {
- unsigned Reg = MLoc.getReg();
- if (Reg >= ARM::S0 && Reg <= ARM::S31) {
- assert(ARM::S0 + 31 == ARM::S31 && "Unexpected ARM S register numbering");
- // S registers are described as bit-pieces of a register
- // S[2x] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 0)
- // S[2x+1] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 32)
-
- unsigned SReg = Reg - ARM::S0;
- bool odd = SReg & 0x1;
- unsigned Rx = 256 + (SReg >> 1);
-
- OutStreamer.AddComment("DW_OP_regx for S register");
- EmitInt8(dwarf::DW_OP_regx);
-
- OutStreamer.AddComment(Twine(SReg));
- EmitULEB128(Rx);
-
- if (odd) {
- OutStreamer.AddComment("DW_OP_bit_piece 32 32");
- EmitInt8(dwarf::DW_OP_bit_piece);
- EmitULEB128(32);
- EmitULEB128(32);
- } else {
- OutStreamer.AddComment("DW_OP_bit_piece 32 0");
- EmitInt8(dwarf::DW_OP_bit_piece);
- EmitULEB128(32);
- EmitULEB128(0);
- }
- } else if (Reg >= ARM::Q0 && Reg <= ARM::Q15) {
- assert(ARM::Q0 + 15 == ARM::Q15 && "Unexpected ARM Q register numbering");
- // Q registers Q0-Q15 are described by composing two D registers together.
- // Qx = DW_OP_regx(256+2x) DW_OP_piece(8) DW_OP_regx(256+2x+1)
- // DW_OP_piece(8)
-
- unsigned QReg = Reg - ARM::Q0;
- unsigned D1 = 256 + 2 * QReg;
- unsigned D2 = D1 + 1;
-
- OutStreamer.AddComment("DW_OP_regx for Q register: D1");
- EmitInt8(dwarf::DW_OP_regx);
- EmitULEB128(D1);
- OutStreamer.AddComment("DW_OP_piece 8");
- EmitInt8(dwarf::DW_OP_piece);
- EmitULEB128(8);
-
- OutStreamer.AddComment("DW_OP_regx for Q register: D2");
- EmitInt8(dwarf::DW_OP_regx);
- EmitULEB128(D2);
- OutStreamer.AddComment("DW_OP_piece 8");
- EmitInt8(dwarf::DW_OP_piece);
- EmitULEB128(8);
+ if (RI->getDwarfRegNum(MLoc.getReg(), false) != -1) {
+ AsmPrinter::EmitDwarfRegOp(MLoc, Indirect);
+ return;
+ }
+ assert(MLoc.isReg() && !Indirect &&
+ "This doesn't support offset/indirection - implement it if needed");
+ unsigned Reg = MLoc.getReg();
+ if (Reg >= ARM::S0 && Reg <= ARM::S31) {
+ assert(ARM::S0 + 31 == ARM::S31 && "Unexpected ARM S register numbering");
+ // S registers are described as bit-pieces of a register
+ // S[2x] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 0)
+ // S[2x+1] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 32)
+
+ unsigned SReg = Reg - ARM::S0;
+ bool odd = SReg & 0x1;
+ unsigned Rx = 256 + (SReg >> 1);
+
+ OutStreamer.AddComment("DW_OP_regx for S register");
+ EmitInt8(dwarf::DW_OP_regx);
+
+ OutStreamer.AddComment(Twine(SReg));
+ EmitULEB128(Rx);
+
+ if (odd) {
+ OutStreamer.AddComment("DW_OP_bit_piece 32 32");
+ EmitInt8(dwarf::DW_OP_bit_piece);
+ EmitULEB128(32);
+ EmitULEB128(32);
+ } else {
+ OutStreamer.AddComment("DW_OP_bit_piece 32 0");
+ EmitInt8(dwarf::DW_OP_bit_piece);
+ EmitULEB128(32);
+ EmitULEB128(0);
}
+ } else if (Reg >= ARM::Q0 && Reg <= ARM::Q15) {
+ assert(ARM::Q0 + 15 == ARM::Q15 && "Unexpected ARM Q register numbering");
+ // Q registers Q0-Q15 are described by composing two D registers together.
+ // Qx = DW_OP_regx(256+2x) DW_OP_piece(8) DW_OP_regx(256+2x+1)
+ // DW_OP_piece(8)
+
+ unsigned QReg = Reg - ARM::Q0;
+ unsigned D1 = 256 + 2 * QReg;
+ unsigned D2 = D1 + 1;
+
+ OutStreamer.AddComment("DW_OP_regx for Q register: D1");
+ EmitInt8(dwarf::DW_OP_regx);
+ EmitULEB128(D1);
+ OutStreamer.AddComment("DW_OP_piece 8");
+ EmitInt8(dwarf::DW_OP_piece);
+ EmitULEB128(8);
+
+ OutStreamer.AddComment("DW_OP_regx for Q register: D2");
+ EmitInt8(dwarf::DW_OP_regx);
+ EmitULEB128(D2);
+ OutStreamer.AddComment("DW_OP_piece 8");
+ EmitInt8(dwarf::DW_OP_piece);
+ EmitULEB128(8);
}
}
@@ -312,7 +145,7 @@ void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts());
assert(GV && "C++ constructor pointer was not a GlobalValue!");
- const MCExpr *E = MCSymbolRefExpr::Create(Mang->getSymbol(GV),
+ const MCExpr *E = MCSymbolRefExpr::Create(getSymbol(GV),
(Subtarget->isTargetDarwin()
? MCSymbolRefExpr::VK_None
: MCSymbolRefExpr::VK_ARM_TARGET1),
@@ -373,7 +206,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
else if ((Modifier && strcmp(Modifier, "hi16") == 0) ||
(TF & ARMII::MO_HI16))
O << ":upper16:";
- O << *Mang->getSymbol(GV);
+ O << *getSymbol(GV);
printOffset(MO.getOffset(), O);
if (TF == ARMII::MO_PLT)
@@ -474,8 +307,14 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
// This takes advantage of the 2 operand-ness of ldm/stm and that we've
// already got the operands in registers that are operands to the
// inline asm statement.
-
- O << "{" << ARMInstPrinter::getRegisterName(RegBegin);
+ O << "{";
+ if (ARM::GPRPairRegClass.contains(RegBegin)) {
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ unsigned Reg0 = TRI->getSubReg(RegBegin, ARM::gsub_0);
+ O << ARMInstPrinter::getRegisterName(Reg0) << ", ";;
+ RegBegin = TRI->getSubReg(RegBegin, ARM::gsub_1);
+ }
+ O << ARMInstPrinter::getRegisterName(RegBegin);
// FIXME: The register allocator not only may not have given us the
// registers in sequence, but may not be in ascending registers. This
@@ -500,7 +339,38 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
if (!FlagsOP.isImm())
return true;
unsigned Flags = FlagsOP.getImm();
+
+ // This operand may not be the one that actually provides the register. If
+ // it's tied to a previous one then we should refer instead to that one
+ // for registers and their classes.
+ unsigned TiedIdx;
+ if (InlineAsm::isUseOperandTiedToDef(Flags, TiedIdx)) {
+ for (OpNum = InlineAsm::MIOp_FirstOperand; TiedIdx; --TiedIdx) {
+ unsigned OpFlags = MI->getOperand(OpNum).getImm();
+ OpNum += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
+ }
+ Flags = MI->getOperand(OpNum).getImm();
+
+ // Later code expects OpNum to be pointing at the register rather than
+ // the flags.
+ OpNum += 1;
+ }
+
unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+ unsigned RC;
+ InlineAsm::hasRegClassConstraint(Flags, RC);
+ if (RC == ARM::GPRPairRegClassID) {
+ if (NumVals != 1)
+ return true;
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ if (!MO.isReg())
+ return true;
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ unsigned Reg = TRI->getSubReg(MO.getReg(), ExtraCode[0] == 'Q' ?
+ ARM::gsub_0 : ARM::gsub_1);
+ O << ARMInstPrinter::getRegisterName(Reg);
+ return false;
+ }
if (NumVals != 2)
return true;
unsigned RegOp = ExtraCode[0] == 'Q' ? OpNum : OpNum + 1;
@@ -704,11 +574,6 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
// generates code that does this, it is always safe to set.
OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
}
- // FIXME: This should eventually end up somewhere else where more
- // intelligent flag decisions can be made. For now we are just maintaining
- // the status quo for ARM and setting EF_ARM_EABI_VER5 as the default.
- if (MCELFStreamer *MES = dyn_cast<MCELFStreamer>(&OutStreamer))
- MES->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5);
}
//===----------------------------------------------------------------------===//
@@ -718,145 +583,150 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
// to appear in the .ARM.attributes section in ELF.
// Instead of subclassing the MCELFStreamer, we do the work here.
-void ARMAsmPrinter::emitAttributes() {
-
- emitARMAttributeSection();
-
- /* GAS expect .fpu to be emitted, regardless of VFP build attribute */
- bool emitFPU = false;
- AttributeEmitter *AttrEmitter;
- if (OutStreamer.hasRawTextSupport()) {
- AttrEmitter = new AsmAttributeEmitter(OutStreamer);
- emitFPU = true;
- } else {
- MCObjectStreamer &O = static_cast<MCObjectStreamer&>(OutStreamer);
- AttrEmitter = new ObjectAttributeEmitter(O);
- }
-
- AttrEmitter->MaybeSwitchVendor("aeabi");
-
- std::string CPUString = Subtarget->getCPUString();
-
- if (CPUString == "cortex-a8" ||
- Subtarget->isCortexA8()) {
- AttrEmitter->EmitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a8");
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::ApplicationProfile);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use,
- ARMBuildAttrs::Allowed);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::AllowThumb32);
- // Fixme: figure out when this is emitted.
- //AttrEmitter->EmitAttribute(ARMBuildAttrs::WMMX_arch,
- // ARMBuildAttrs::AllowWMMXv1);
- //
-
- /// ADD additional Else-cases here!
- } else if (CPUString == "xscale") {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5TEJ);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use,
- ARMBuildAttrs::Allowed);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::Allowed);
- } else if (CPUString == "generic") {
- // For a generic CPU, we assume a standard v7a architecture in Subtarget.
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::ApplicationProfile);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use,
- ARMBuildAttrs::Allowed);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::AllowThumb32);
- } else if (Subtarget->hasV7Ops()) {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::AllowThumb32);
+static ARMBuildAttrs::CPUArch getArchForCPU(StringRef CPU,
+ const ARMSubtarget *Subtarget) {
+ if (CPU == "xscale")
+ return ARMBuildAttrs::v5TEJ;
+
+ if (Subtarget->hasV8Ops())
+ return ARMBuildAttrs::v8;
+ else if (Subtarget->hasV7Ops()) {
+ if (Subtarget->isMClass() && Subtarget->hasThumb2DSP())
+ return ARMBuildAttrs::v7E_M;
+ return ARMBuildAttrs::v7;
} else if (Subtarget->hasV6T2Ops())
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v6T2);
+ return ARMBuildAttrs::v6T2;
+ else if (Subtarget->hasV6MOps())
+ return ARMBuildAttrs::v6S_M;
else if (Subtarget->hasV6Ops())
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v6);
+ return ARMBuildAttrs::v6;
else if (Subtarget->hasV5TEOps())
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5TE);
+ return ARMBuildAttrs::v5TE;
else if (Subtarget->hasV5TOps())
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5T);
+ return ARMBuildAttrs::v5T;
else if (Subtarget->hasV4TOps())
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4T);
+ return ARMBuildAttrs::v4T;
+ else
+ return ARMBuildAttrs::v4;
+}
- if (Subtarget->hasNEON() && emitFPU) {
- /* NEON is not exactly a VFP architecture, but GAS emit one of
- * neon/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
- if (Subtarget->hasVFP4())
- AttrEmitter->EmitTextAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
- "neon-vfpv4");
- else
- AttrEmitter->EmitTextAttribute(ARMBuildAttrs::Advanced_SIMD_arch, "neon");
- /* If emitted for NEON, omit from VFP below, since you can have both
- * NEON and VFP in build attributes but only one .fpu */
- emitFPU = false;
+void ARMAsmPrinter::emitAttributes() {
+ MCTargetStreamer &TS = OutStreamer.getTargetStreamer();
+ ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
+
+ ATS.switchVendor("aeabi");
+
+ std::string CPUString = Subtarget->getCPUString();
+
+ if (CPUString != "generic")
+ ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
+
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch,
+ getArchForCPU(CPUString, Subtarget));
+
+ if (Subtarget->isAClass()) {
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::ApplicationProfile);
+ } else if (Subtarget->isRClass()) {
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::RealTimeProfile);
+ } else if (Subtarget->isMClass()){
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::MicroControllerProfile);
}
- /* VFPv4 + .fpu */
- if (Subtarget->hasVFP4()) {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch,
- ARMBuildAttrs::AllowFPv4A);
- if (emitFPU)
- AttrEmitter->EmitTextAttribute(ARMBuildAttrs::VFP_arch, "vfpv4");
-
- /* VFPv3 + .fpu */
- } else if (Subtarget->hasVFP3()) {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch,
- ARMBuildAttrs::AllowFPv3A);
- if (emitFPU)
- AttrEmitter->EmitTextAttribute(ARMBuildAttrs::VFP_arch, "vfpv3");
-
- /* VFPv2 + .fpu */
- } else if (Subtarget->hasVFP2()) {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch,
- ARMBuildAttrs::AllowFPv2);
- if (emitFPU)
- AttrEmitter->EmitTextAttribute(ARMBuildAttrs::VFP_arch, "vfpv2");
+ ATS.emitAttribute(ARMBuildAttrs::ARM_ISA_use, Subtarget->hasARMOps() ?
+ ARMBuildAttrs::Allowed : ARMBuildAttrs::Not_Allowed);
+ if (Subtarget->isThumb1Only()) {
+ ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::Allowed);
+ } else if (Subtarget->hasThumb2()) {
+ ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::AllowThumb32);
}
- /* TODO: ARMBuildAttrs::Allowed is not completely accurate,
- * since NEON can have 1 (allowed) or 2 (MAC operations) */
if (Subtarget->hasNEON()) {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
- ARMBuildAttrs::Allowed);
+ /* NEON is not exactly a VFP architecture, but GAS emit one of
+ * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
+ if (Subtarget->hasFPARMv8()) {
+ if (Subtarget->hasCrypto())
+ ATS.emitFPU(ARM::CRYPTO_NEON_FP_ARMV8);
+ else
+ ATS.emitFPU(ARM::NEON_FP_ARMV8);
+ }
+ else if (Subtarget->hasVFP4())
+ ATS.emitFPU(ARM::NEON_VFPV4);
+ else
+ ATS.emitFPU(ARM::NEON);
+ // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
+ if (Subtarget->hasV8Ops())
+ ATS.emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
+ ARMBuildAttrs::AllowNeonARMv8);
+ } else {
+ if (Subtarget->hasFPARMv8())
+ ATS.emitFPU(ARM::FP_ARMV8);
+ else if (Subtarget->hasVFP4())
+ ATS.emitFPU(Subtarget->hasD16() ? ARM::VFPV4_D16 : ARM::VFPV4);
+ else if (Subtarget->hasVFP3())
+ ATS.emitFPU(Subtarget->hasD16() ? ARM::VFPV3_D16 : ARM::VFPV3);
+ else if (Subtarget->hasVFP2())
+ ATS.emitFPU(ARM::VFPV2);
}
// Signal various FP modes.
if (!TM.Options.UnsafeFPMath) {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_denormal,
- ARMBuildAttrs::Allowed);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_exceptions,
- ARMBuildAttrs::Allowed);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::Allowed);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions,
+ ARMBuildAttrs::Allowed);
}
if (TM.Options.NoInfsFPMath && TM.Options.NoNaNsFPMath)
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_number_model,
- ARMBuildAttrs::Allowed);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_number_model,
+ ARMBuildAttrs::Allowed);
else
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_number_model,
- ARMBuildAttrs::AllowIEE754);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_number_model,
+ ARMBuildAttrs::AllowIEE754);
// FIXME: add more flags to ARMBuildAttrs.h
// 8-bytes alignment stuff.
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_align8_needed, 1);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_align8_preserved, 1);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_align8_needed, 1);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_align8_preserved, 1);
+
+ // ABI_HardFP_use attribute to indicate single precision FP.
+ if (Subtarget->isFPOnlySP())
+ ATS.emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
+ ARMBuildAttrs::HardFPSinglePrecision);
// Hard float. Use both S and D registers and conform to AAPCS-VFP.
- if (Subtarget->isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard) {
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_HardFP_use, 3);
- AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_VFP_args, 1);
- }
+ if (Subtarget->isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard)
+ ATS.emitAttribute(ARMBuildAttrs::ABI_VFP_args, ARMBuildAttrs::HardFPAAPCS);
+
// FIXME: Should we signal R9 usage?
- if (Subtarget->hasDivide())
- AttrEmitter->EmitAttribute(ARMBuildAttrs::DIV_use, 1);
+ if (Subtarget->hasFP16())
+ ATS.emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
+
+ if (Subtarget->hasMPExtension())
+ ATS.emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
+
+ if (Subtarget->hasDivide()) {
+ // Check if hardware divide is only available in thumb2 or ARM as well.
+ ATS.emitAttribute(ARMBuildAttrs::DIV_use,
+ Subtarget->hasDivideInARMMode() ? ARMBuildAttrs::AllowDIVExt :
+ ARMBuildAttrs::AllowDIVIfExists);
+ }
- AttrEmitter->Finish();
- delete AttrEmitter;
+ if (Subtarget->hasTrustZone() && Subtarget->hasVirtualization())
+ ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowTZVirtualization);
+ else if (Subtarget->hasTrustZone())
+ ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowTZ);
+ else if (Subtarget->hasVirtualization())
+ ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowVirtualization);
+
+ ATS.finishAttributeSection();
}
void ARMAsmPrinter::emitARMAttributeSection() {
@@ -908,7 +778,7 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) {
bool isIndirect = Subtarget->isTargetDarwin() &&
Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
if (!isIndirect)
- return Mang->getSymbol(GV);
+ return getSymbol(GV);
// FIXME: Remove this when Darwin transition to @GOT like syntax.
MCSymbol *MCSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
@@ -919,7 +789,7 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) {
MMIMachO.getGVStubEntry(MCSym);
if (StubSym.getPointer() == 0)
StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
return MCSym;
}
@@ -1092,27 +962,12 @@ void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
}
-void ARMAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
- raw_ostream &OS) {
- unsigned NOps = MI->getNumOperands();
- assert(NOps==4);
- OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
- OS << V.getName();
- OS << " <- ";
- // Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
- OS << '['; printOperand(MI, 0, OS); OS << '+'; printOperand(MI, 1, OS);
- OS << ']';
- OS << "+";
- printOperand(MI, NOps-2, OS);
-}
-
void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
assert(MI->getFlag(MachineInstr::FrameSetup) &&
"Only instruction which are involved into frame setup code are allowed");
+ MCTargetStreamer &TS = OutStreamer.getTargetStreamer();
+ ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
const MachineFunction &MF = *MI->getParent()->getParent();
const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
const ARMFunctionInfo &AFI = *MF.getInfo<ARMFunctionInfo>();
@@ -1175,7 +1030,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
RegList.push_back(SrcReg);
break;
}
- OutStreamer.EmitRegSave(RegList, Opc == ARM::VSTMDDB_UPD);
+ ATS.emitRegSave(RegList, Opc == ARM::VSTMDDB_UPD);
} else {
// Changes of stack / frame pointer.
if (SrcReg == ARM::SP) {
@@ -1223,11 +1078,11 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
if (DstReg == FramePtr && FramePtr != ARM::SP)
// Set-up of the frame pointer. Positive values correspond to "add"
// instruction.
- OutStreamer.EmitSetFP(FramePtr, ARM::SP, -Offset);
+ ATS.emitSetFP(FramePtr, ARM::SP, -Offset);
else if (DstReg == ARM::SP) {
// Change of SP by an offset. Positive values correspond to "sub"
// instruction.
- OutStreamer.EmitPad(Offset);
+ ATS.emitPad(Offset);
} else {
MI->dump();
llvm_unreachable("Unsupported opcode for unwinding information");
@@ -1272,15 +1127,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
unsigned Opc = MI->getOpcode();
switch (Opc) {
case ARM::t2MOVi32imm: llvm_unreachable("Should be lowered by thumb2it pass");
- case ARM::DBG_VALUE: {
- if (isVerbose() && OutStreamer.hasRawTextSupport()) {
- SmallString<128> TmpStr;
- raw_svector_ostream OS(TmpStr);
- PrintDebugValueComment(MI, OS);
- OutStreamer.EmitRawText(StringRef(OS.str()));
- }
- return;
- }
+ case ARM::DBG_VALUE: llvm_unreachable("Should be handled by generic printing");
case ARM::LEApcrel:
case ARM::tLEApcrel:
case ARM::t2LEApcrel: {
@@ -1376,7 +1223,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addReg(0));
const GlobalValue *GV = MI->getOperand(0).getGlobal();
- MCSymbol *GVSym = Mang->getSymbol(GV);
+ MCSymbol *GVSym = getSymbol(GV);
const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(ARM::Bcc)
.addExpr(GVSymExpr)
diff --git a/lib/Target/ARM/ARMAsmPrinter.h b/lib/Target/ARM/ARMAsmPrinter.h
index c945e4f28699..de72e063e0d5 100644
--- a/lib/Target/ARM/ARMAsmPrinter.h
+++ b/lib/Target/ARM/ARMAsmPrinter.h
@@ -97,13 +97,9 @@ private:
const MachineInstr *MI);
public:
- void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
-
- virtual MachineLocation
- getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE;
-
/// EmitDwarfRegOp - Emit dwarf register operation.
- virtual void EmitDwarfRegOp(const MachineLocation &MLoc) const LLVM_OVERRIDE;
+ virtual void EmitDwarfRegOp(const MachineLocation &MLoc, bool Indirect) const
+ LLVM_OVERRIDE;
virtual unsigned getISAEncoding() LLVM_OVERRIDE {
// ARM/Darwin adds ISA to the DWARF info for each function.
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 60050542716c..f835a4e5b5fe 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -11,10 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "ARMBaseInstrInfo.h"
#include "ARM.h"
+#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMConstantPoolValue.h"
+#include "ARMFeatures.h"
#include "ARMHazardRecognizer.h"
#include "ARMMachineFunctionInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
@@ -36,7 +37,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "ARMGenInstrInfo.inc"
using namespace llvm;
@@ -113,8 +114,7 @@ ScheduleHazardRecognizer *ARMBaseInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const {
if (Subtarget.isThumb2() || Subtarget.hasVFP2())
- return (ScheduleHazardRecognizer *)
- new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG);
+ return (ScheduleHazardRecognizer *)new ARMHazardRecognizer(II, DAG);
return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
}
@@ -273,104 +273,90 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
- // If the block has no terminators, it just falls into the block after it.
+ TBB = 0;
+ FBB = 0;
+
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin())
- return false;
+ return false; // Empty blocks are easy.
--I;
- while (I->isDebugValue()) {
- if (I == MBB.begin())
- return false;
- --I;
- }
-
- // Get the last instruction in the block.
- MachineInstr *LastInst = I;
- unsigned LastOpc = LastInst->getOpcode();
- // Check if it's an indirect branch first, this should return 'unanalyzable'
- // even if it's predicated.
- if (isIndirectBranchOpcode(LastOpc))
- return true;
+ // Walk backwards from the end of the basic block until the branch is
+ // analyzed or we give up.
+ while (isPredicated(I) || I->isTerminator()) {
- if (!isUnpredicatedTerminator(I))
- return false;
+ // Flag to be raised on unanalyzeable instructions. This is useful in cases
+ // where we want to clean up on the end of the basic block before we bail
+ // out.
+ bool CantAnalyze = false;
- // If there is only one terminator instruction, process it.
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
- if (isUncondBranchOpcode(LastOpc)) {
- TBB = LastInst->getOperand(0).getMBB();
- return false;
+ // Skip over DEBUG values and predicated nonterminators.
+ while (I->isDebugValue() || !I->isTerminator()) {
+ if (I == MBB.begin())
+ return false;
+ --I;
}
- if (isCondBranchOpcode(LastOpc)) {
- // Block ends with fall-through condbranch.
- TBB = LastInst->getOperand(0).getMBB();
- Cond.push_back(LastInst->getOperand(1));
- Cond.push_back(LastInst->getOperand(2));
- return false;
+
+ if (isIndirectBranchOpcode(I->getOpcode()) ||
+ isJumpTableBranchOpcode(I->getOpcode())) {
+ // Indirect branches and jump tables can't be analyzed, but we still want
+ // to clean up any instructions at the tail of the basic block.
+ CantAnalyze = true;
+ } else if (isUncondBranchOpcode(I->getOpcode())) {
+ TBB = I->getOperand(0).getMBB();
+ } else if (isCondBranchOpcode(I->getOpcode())) {
+ // Bail out if we encounter multiple conditional branches.
+ if (!Cond.empty())
+ return true;
+
+ assert(!FBB && "FBB should have been null.");
+ FBB = TBB;
+ TBB = I->getOperand(0).getMBB();
+ Cond.push_back(I->getOperand(1));
+ Cond.push_back(I->getOperand(2));
+ } else if (I->isReturn()) {
+ // Returns can't be analyzed, but we should run cleanup.
+ CantAnalyze = !isPredicated(I);
+ } else {
+ // We encountered other unrecognized terminator. Bail out immediately.
+ return true;
}
- return true; // Can't handle indirect branch.
- }
- // Get the instruction before it if it is a terminator.
- MachineInstr *SecondLastInst = I;
- unsigned SecondLastOpc = SecondLastInst->getOpcode();
-
- // If AllowModify is true and the block ends with two or more unconditional
- // branches, delete all but the first unconditional branch.
- if (AllowModify && isUncondBranchOpcode(LastOpc)) {
- while (isUncondBranchOpcode(SecondLastOpc)) {
- LastInst->eraseFromParent();
- LastInst = SecondLastInst;
- LastOpc = LastInst->getOpcode();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
- // Return now the only terminator is an unconditional branch.
- TBB = LastInst->getOperand(0).getMBB();
- return false;
- } else {
- SecondLastInst = I;
- SecondLastOpc = SecondLastInst->getOpcode();
+ // Cleanup code - to be run for unpredicated unconditional branches and
+ // returns.
+ if (!isPredicated(I) &&
+ (isUncondBranchOpcode(I->getOpcode()) ||
+ isIndirectBranchOpcode(I->getOpcode()) ||
+ isJumpTableBranchOpcode(I->getOpcode()) ||
+ I->isReturn())) {
+ // Forget any previous condition branch information - it no longer applies.
+ Cond.clear();
+ FBB = 0;
+
+ // If we can modify the function, delete everything below this
+ // unconditional branch.
+ if (AllowModify) {
+ MachineBasicBlock::iterator DI = llvm::next(I);
+ while (DI != MBB.end()) {
+ MachineInstr *InstToDelete = DI;
+ ++DI;
+ InstToDelete->eraseFromParent();
+ }
}
}
- }
- // If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
- return true;
-
- // If the block ends with a B and a Bcc, handle it.
- if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- Cond.push_back(SecondLastInst->getOperand(1));
- Cond.push_back(SecondLastInst->getOperand(2));
- FBB = LastInst->getOperand(0).getMBB();
- return false;
- }
+ if (CantAnalyze)
+ return true;
- // If the block ends with two unconditional branches, handle it. The second
- // one is not executed, so remove it.
- if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- I = LastInst;
- if (AllowModify)
- I->eraseFromParent();
- return false;
- }
+ if (I == MBB.begin())
+ return false;
- // ...likewise if it ends with a branch table followed by an unconditional
- // branch. The branch folder can create these, and we must get rid of them for
- // correctness of Thumb constant islands.
- if ((isJumpTableBranchOpcode(SecondLastOpc) ||
- isIndirectBranchOpcode(SecondLastOpc)) &&
- isUncondBranchOpcode(LastOpc)) {
- I = LastInst;
- if (AllowModify)
- I->eraseFromParent();
- return true;
+ --I;
}
- // Otherwise, can't handle this.
- return true;
+ // We made it past the terminators without bailing out - we must have
+ // analyzed this branch successfully.
+ return false;
}
@@ -535,11 +521,17 @@ bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
if (!MI->isPredicable())
return false;
- if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
- ARMFunctionInfo *AFI =
- MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
- return AFI->isThumb2Function();
+ ARMFunctionInfo *AFI =
+ MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
+
+ if (AFI->isThumb2Function()) {
+ if (getSubtarget().restrictIT())
+ return isV8EligibleForIT(MI);
+ } else { // non-Thumb
+ if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
+ return false;
}
+
return true;
}
@@ -660,16 +652,16 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
bool GPRDest = ARM::GPRRegClass.contains(DestReg);
- bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
+ bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
if (GPRDest && GPRSrc) {
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc))));
+ .addReg(SrcReg, getKillRegState(KillSrc))));
return;
}
bool SPRDest = ARM::SPRRegClass.contains(DestReg);
- bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
+ bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
unsigned Opc = 0;
if (SPRDest && SPRSrc)
@@ -698,26 +690,47 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
int Spacing = 1;
// Use VORRq when possible.
- if (ARM::QQPRRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 2;
- else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 4;
+ if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VORRq;
+ BeginIdx = ARM::qsub_0;
+ SubRegs = 2;
+ } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VORRq;
+ BeginIdx = ARM::qsub_0;
+ SubRegs = 4;
// Fall back to VMOVD.
- else if (ARM::DPairRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2;
- else if (ARM::DTripleRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3;
- else if (ARM::DQuadRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4;
- else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg))
- Opc = ARM::MOVr, BeginIdx = ARM::gsub_0, SubRegs = 2;
-
- else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2, Spacing = 2;
- else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3, Spacing = 2;
- else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg))
- Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4, Spacing = 2;
+ } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VMOVD;
+ BeginIdx = ARM::dsub_0;
+ SubRegs = 2;
+ } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VMOVD;
+ BeginIdx = ARM::dsub_0;
+ SubRegs = 3;
+ } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VMOVD;
+ BeginIdx = ARM::dsub_0;
+ SubRegs = 4;
+ } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) {
+ Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
+ BeginIdx = ARM::gsub_0;
+ SubRegs = 2;
+ } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VMOVD;
+ BeginIdx = ARM::dsub_0;
+ SubRegs = 2;
+ Spacing = 2;
+ } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VMOVD;
+ BeginIdx = ARM::dsub_0;
+ SubRegs = 3;
+ Spacing = 2;
+ } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) {
+ Opc = ARM::VMOVD;
+ BeginIdx = ARM::dsub_0;
+ SubRegs = 4;
+ Spacing = 2;
+ }
assert(Opc && "Impossible reg-to-reg copy");
@@ -726,26 +739,28 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// Copy register tuples backward when the first Dest reg overlaps with SrcReg.
if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
- BeginIdx = BeginIdx + ((SubRegs-1)*Spacing);
+ BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
Spacing = -Spacing;
}
#ifndef NDEBUG
SmallSet<unsigned, 4> DstRegs;
#endif
for (unsigned i = 0; i != SubRegs; ++i) {
- unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing);
- unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing);
+ unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
+ unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
assert(Dst && Src && "Bad sub-register");
#ifndef NDEBUG
assert(!DstRegs.count(Src) && "destructive vector copy");
DstRegs.insert(Dst);
#endif
- Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst)
- .addReg(Src);
+ Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src);
// VORR takes two source operands.
if (Opc == ARM::VORRq)
Mov.addReg(Src);
Mov = AddDefaultPred(Mov);
+ // MOVr can set CC.
+ if (Opc == ARM::MOVr)
+ Mov = AddDefaultCC(Mov);
}
// Add implicit super-register defs and kills to the last instruction.
Mov->addRegisterDefined(DestReg, TRI);
@@ -1214,16 +1229,6 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
return true;
}
-MachineInstr*
-ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx, uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const {
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
- .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
/// Create a copy of a const pool value. Update CPI to the new index and return
/// the label UID.
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
@@ -1426,9 +1431,11 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case ARM::VLDRD:
case ARM::VLDRS:
case ARM::t2LDRi8:
+ case ARM::t2LDRBi8:
case ARM::t2LDRDi8:
case ARM::t2LDRSHi8:
case ARM::t2LDRi12:
+ case ARM::t2LDRBi12:
case ARM::t2LDRSHi12:
break;
}
@@ -1445,8 +1452,10 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case ARM::VLDRD:
case ARM::VLDRS:
case ARM::t2LDRi8:
+ case ARM::t2LDRBi8:
case ARM::t2LDRSHi8:
case ARM::t2LDRi12:
+ case ARM::t2LDRBi12:
case ARM::t2LDRSHi12:
break;
}
@@ -1493,7 +1502,16 @@ bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
if ((Offset2 - Offset1) / 8 > 64)
return false;
- if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
+ // Check if the machine opcodes are different. If they are different
+ // then we consider them to not be of the same base address,
+ // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12.
+ // In this case, they are considered to be the same because they are different
+ // encoding forms of the same basic instruction.
+ if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) &&
+ !((Load1->getMachineOpcode() == ARM::t2LDRBi8 &&
+ Load2->getMachineOpcode() == ARM::t2LDRBi12) ||
+ (Load1->getMachineOpcode() == ARM::t2LDRBi12 &&
+ Load2->getMachineOpcode() == ARM::t2LDRBi8)))
return false; // FIXME: overly conservative?
// Four loads in a row should be sufficient.
@@ -1708,7 +1726,7 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
bool PreferFalse) const {
assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
"Unknown select instruction");
- const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
MachineInstr *DefMI = canFoldIntoMOVCC(MI->getOperand(2).getReg(), MRI, this);
bool Invert = !DefMI;
if (!DefMI)
@@ -1716,11 +1734,17 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
if (!DefMI)
return 0;
+ // Find new register class to use.
+ MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1);
+ unsigned DestReg = MI->getOperand(0).getReg();
+ const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
+ if (!MRI.constrainRegClass(DestReg, PreviousClass))
+ return 0;
+
// Create a new predicated version of DefMI.
// Rfalse is the first use.
MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- DefMI->getDesc(),
- MI->getOperand(0).getReg());
+ DefMI->getDesc(), DestReg);
// Copy all the DefMI operands, excluding its (null) predicate.
const MCInstrDesc &DefDesc = DefMI->getDesc();
@@ -1743,7 +1767,6 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
// register operand tied to the first def.
// The tie makes the register allocator ensure the FalseReg is allocated the
// same register as operand 0.
- MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1);
FalseReg.setImplicit();
NewMI.addOperand(FalseReg);
NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
@@ -1803,6 +1826,14 @@ void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
unsigned DestReg, unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags) {
+ if (NumBytes == 0 && DestReg != BaseReg) {
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
+ .addReg(BaseReg, RegState::Kill)
+ .addImm((unsigned)Pred).addReg(PredReg).addReg(0)
+ .setMIFlags(MIFlags);
+ return;
+ }
+
bool isSub = NumBytes < 0;
if (isSub) NumBytes = -NumBytes;
@@ -1826,6 +1857,115 @@ void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
}
}
+bool llvm::tryFoldSPUpdateIntoPushPop(MachineFunction &MF,
+ MachineInstr *MI,
+ unsigned NumBytes) {
+ // This optimisation potentially adds lots of load and store
+ // micro-operations, it's only really a great benefit to code-size.
+ if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize))
+ return false;
+
+ // If only one register is pushed/popped, LLVM can use an LDR/STR
+ // instead. We can't modify those so make sure we're dealing with an
+ // instruction we understand.
+ bool IsPop = isPopOpcode(MI->getOpcode());
+ bool IsPush = isPushOpcode(MI->getOpcode());
+ if (!IsPush && !IsPop)
+ return false;
+
+ bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD ||
+ MI->getOpcode() == ARM::VLDMDIA_UPD;
+ bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH ||
+ MI->getOpcode() == ARM::tPOP ||
+ MI->getOpcode() == ARM::tPOP_RET;
+
+ assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP &&
+ MI->getOperand(1).getReg() == ARM::SP)) &&
+ "trying to fold sp update into non-sp-updating push/pop");
+
+ // The VFP push & pop act on D-registers, so we can only fold an adjustment
+ // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try
+ // if this is violated.
+ if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
+ return false;
+
+ // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
+ // pred) so the list starts at 4. Thumb1 starts after the predicate.
+ int RegListIdx = IsT1PushPop ? 2 : 4;
+
+ // Calculate the space we'll need in terms of registers.
+ unsigned FirstReg = MI->getOperand(RegListIdx).getReg();
+ unsigned RD0Reg, RegsNeeded;
+ if (IsVFPPushPop) {
+ RD0Reg = ARM::D0;
+ RegsNeeded = NumBytes / 8;
+ } else {
+ RD0Reg = ARM::R0;
+ RegsNeeded = NumBytes / 4;
+ }
+
+ // We're going to have to strip all list operands off before
+ // re-adding them since the order matters, so save the existing ones
+ // for later.
+ SmallVector<MachineOperand, 4> RegList;
+ for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
+ RegList.push_back(MI->getOperand(i));
+
+ MachineBasicBlock *MBB = MI->getParent();
+ const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo();
+ const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
+
+ // Now try to find enough space in the reglist to allocate NumBytes.
+ for (unsigned CurReg = FirstReg - 1; CurReg >= RD0Reg && RegsNeeded;
+ --CurReg) {
+ if (!IsPop) {
+ // Pushing any register is completely harmless, mark the
+ // register involved as undef since we don't care about it in
+ // the slightest.
+ RegList.push_back(MachineOperand::CreateReg(CurReg, false, false,
+ false, false, true));
+ --RegsNeeded;
+ continue;
+ }
+
+ // However, we can only pop an extra register if it's not live. For
+ // registers live within the function we might clobber a return value
+ // register; the other way a register can be live here is if it's
+ // callee-saved.
+ if (isCalleeSavedRegister(CurReg, CSRegs) ||
+ MBB->computeRegisterLiveness(TRI, CurReg, MI) !=
+ MachineBasicBlock::LQR_Dead) {
+ // VFP pops don't allow holes in the register list, so any skip is fatal
+ // for our transformation. GPR pops do, so we should just keep looking.
+ if (IsVFPPushPop)
+ return false;
+ else
+ continue;
+ }
+
+ // Mark the unimportant registers as <def,dead> in the POP.
+ RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false,
+ true));
+ --RegsNeeded;
+ }
+
+ if (RegsNeeded > 0)
+ return false;
+
+ // Finally we know we can profitably perform the optimisation so go
+ // ahead: strip all existing registers off and add them back again
+ // in the right order.
+ for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
+ MI->RemoveOperand(i);
+
+ // Add the complete list back in.
+ MachineInstrBuilder MIB(MF, &*MI);
+ for (int i = RegList.size() - 1; i >= 0; --i)
+ MIB.addOperand(RegList[i]);
+
+ return true;
+}
+
bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const ARMBaseInstrInfo &TII) {
@@ -2232,8 +2372,32 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
isSafe = true;
break;
}
- // Condition code is after the operand before CPSR.
- ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm();
+ // Condition code is after the operand before CPSR except for VSELs.
+ ARMCC::CondCodes CC;
+ bool IsInstrVSel = true;
+ switch (Instr.getOpcode()) {
+ default:
+ IsInstrVSel = false;
+ CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm();
+ break;
+ case ARM::VSELEQD:
+ case ARM::VSELEQS:
+ CC = ARMCC::EQ;
+ break;
+ case ARM::VSELGTD:
+ case ARM::VSELGTS:
+ CC = ARMCC::GT;
+ break;
+ case ARM::VSELGED:
+ case ARM::VSELGES:
+ CC = ARMCC::GE;
+ break;
+ case ARM::VSELVSS:
+ case ARM::VSELVSD:
+ CC = ARMCC::VS;
+ break;
+ }
+
if (Sub) {
ARMCC::CondCodes NewCC = getSwappedCondition(CC);
if (NewCC == ARMCC::AL)
@@ -2244,11 +2408,14 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// If it is safe to remove CmpInstr, the condition code of these
// operands will be modified.
if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
- Sub->getOperand(2).getReg() == SrcReg)
- OperandsToUpdate.push_back(std::make_pair(&((*I).getOperand(IO-1)),
- NewCC));
- }
- else
+ Sub->getOperand(2).getReg() == SrcReg) {
+ // VSel doesn't support condition code update.
+ if (IsInstrVSel)
+ return false;
+ OperandsToUpdate.push_back(
+ std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
+ }
+ } else
switch (CC) {
default:
// CPSR can be used multiple times, we should continue.
@@ -3604,6 +3771,24 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return Latency;
}
+unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr *MI) const {
+ if (MI->isCopyLike() || MI->isInsertSubreg() ||
+ MI->isRegSequence() || MI->isImplicitDef())
+ return 0;
+
+ if (MI->isBundle())
+ return 0;
+
+ const MCInstrDesc &MCID = MI->getDesc();
+
+ if (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)) {
+ // When predicated, CPSR is an additional source operand for CPSR updating
+ // instructions, this apparently increases their latencies.
+ return 1;
+ }
+ return 0;
+}
+
unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
@@ -3685,8 +3870,7 @@ hasHighOperandLatency(const InstrItineraryData *ItinData,
return true;
// Hoist VFP / NEON instructions with 4 or higher latency.
- int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx,
- /*FindMin=*/false);
+ int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
if (Latency < 0)
Latency = getInstrLatency(ItinData, DefMI);
if (Latency <= 3)
@@ -4137,7 +4321,7 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI,
// FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
// the full D-register by loading the same value to both lanes. The
// instruction is micro-coded with 2 uops, so don't do this until we can
- // properly schedule micro-coded instuctions. The dispatcher stalls cause
+ // properly schedule micro-coded instructions. The dispatcher stalls cause
// too big regressions.
// Insert the dependency-breaking FCONSTD before MI.
@@ -4152,6 +4336,8 @@ bool ARMBaseInstrInfo::hasNOP() const {
}
bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {
+ if (MI->getNumOperands() < 4)
+ return true;
unsigned ShOpVal = MI->getOperand(3).getImm();
unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal);
// Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1.
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h
index 2ef659c23bd6..93e59647d220 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -46,7 +46,7 @@ public:
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const;
- virtual const ARMBaseRegisterInfo &getRegisterInfo() const =0;
+ virtual const ARMBaseRegisterInfo &getRegisterInfo() const = 0;
const ARMSubtarget &getSubtarget() const { return Subtarget; }
ScheduleHazardRecognizer *
@@ -125,12 +125,6 @@ public:
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
- virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx,
- uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const;
-
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
@@ -270,6 +264,8 @@ private:
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const;
+ unsigned getPredicationCost(const MachineInstr *MI) const;
+
unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost = 0) const;
@@ -366,6 +362,17 @@ bool isIndirectBranchOpcode(int Opc) {
return Opc == ARM::BX || Opc == ARM::MOVPCRX || Opc == ARM::tBRIND;
}
+static inline bool isPopOpcode(int Opc) {
+ return Opc == ARM::tPOP_RET || Opc == ARM::LDMIA_RET ||
+ Opc == ARM::t2LDMIA_RET || Opc == ARM::tPOP || Opc == ARM::LDMIA_UPD ||
+ Opc == ARM::t2LDMIA_UPD || Opc == ARM::VLDMDIA_UPD;
+}
+
+static inline bool isPushOpcode(int Opc) {
+ return Opc == ARM::tPUSH || Opc == ARM::t2STMDB_UPD ||
+ Opc == ARM::STMDB_UPD || Opc == ARM::VSTMDDB_UPD;
+}
+
/// getInstrPredicate - If instruction is predicated, returns its predicate
/// condition, otherwise returns AL. It also returns the condition code
/// register by reference.
@@ -405,6 +412,13 @@ void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
const ARMBaseRegisterInfo& MRI,
unsigned MIFlags = 0);
+/// Tries to add registers to the reglist of a given base-updating
+/// push/pop instruction to adjust the stack by an additional
+/// NumBytes. This can save a few bytes per function in code-size, but
+/// obviously generates more memory traffic. As such, it only takes
+/// effect in functions being optimised for size.
+bool tryFoldSPUpdateIntoPushPop(MachineFunction &MF, MachineInstr *MI,
+ unsigned NumBytes);
/// rewriteARMFrameIndex / rewriteT2FrameIndex -
/// Rewrite MI to access 'Offset' bytes from the FP. Return false if the
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index b0d34a76b014..8717dc0cde90 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -43,46 +43,73 @@
using namespace llvm;
-ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), TII(tii), STI(sti),
+ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMSubtarget &sti)
+ : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), STI(sti),
FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
BasePtr(ARM::R6) {
}
const uint16_t*
ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- bool ghcCall = false;
-
- if (MF) {
- const Function *F = MF->getFunction();
- ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
- }
-
- if (ghcCall) {
- return CSR_GHC_SaveList;
- }
- else {
- return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
- ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
+ const uint16_t *RegList = (STI.isTargetIOS() && !STI.isAAPCS_ABI())
+ ? CSR_iOS_SaveList
+ : CSR_AAPCS_SaveList;
+
+ if (!MF) return RegList;
+
+ const Function *F = MF->getFunction();
+ if (F->getCallingConv() == CallingConv::GHC) {
+ // GHC set of callee saved regs is empty as all those regs are
+ // used for passing STG regs around
+ return CSR_NoRegs_SaveList;
+ } else if (F->hasFnAttribute("interrupt")) {
+ if (STI.isMClass()) {
+ // M-class CPUs have hardware which saves the registers needed to allow a
+ // function conforming to the AAPCS to function as a handler.
+ return CSR_AAPCS_SaveList;
+ } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") {
+ // Fast interrupt mode gives the handler a private copy of R8-R14, so less
+ // need to be saved to restore user-mode state.
+ return CSR_FIQ_SaveList;
+ } else {
+ // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
+ // exception handling.
+ return CSR_GenericInt_SaveList;
+ }
}
+
+ return RegList;
}
const uint32_t*
-ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
+ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ if (CC == CallingConv::GHC)
+ // This is academic becase all GHC calls are (supposed to be) tail calls
+ return CSR_NoRegs_RegMask;
return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
}
const uint32_t*
-ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID) const {
- return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
- ? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask;
+ARMBaseRegisterInfo::getNoPreservedMask() const {
+ return CSR_NoRegs_RegMask;
}
const uint32_t*
-ARMBaseRegisterInfo::getNoPreservedMask() const {
- return CSR_NoRegs_RegMask;
+ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID CC) const {
+ // This should return a register mask that is the same as that returned by
+ // getCallPreservedMask but that additionally preserves the register used for
+ // the first i32 argument (which must also be the register used to return a
+ // single i32 return value)
+ //
+ // In case that the calling convention does not use the same register for
+ // both or otherwise does not want to enable this optimization, the function
+ // should return NULL
+ if (CC == CallingConv::GHC)
+ // This is academic becase all GHC calls are (supposed to be) tail calls
+ return NULL;
+ return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
+ ? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask;
}
BitVector ARMBaseRegisterInfo::
@@ -94,6 +121,7 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(ARM::SP);
Reserved.set(ARM::PC);
Reserved.set(ARM::FPSCR);
+ Reserved.set(ARM::APSR_NZCV);
if (TFI->hasFP(MF))
Reserved.set(FramePtr);
if (hasBasePointer(MF))
@@ -309,7 +337,7 @@ bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
// 1. Dynamic stack realignment is explicitly disabled,
// 2. This is a Thumb1 function (it's not useful, so we don't bother), or
// 3. There are VLAs in the function and the base pointer is disabled.
- if (!MF.getTarget().Options.RealignStack)
+ if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
return false;
if (AFI->isThumb1OnlyFunction())
return false;
@@ -357,14 +385,6 @@ ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return ARM::SP;
}
-unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
-}
-
-unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
-}
-
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
void ARMBaseRegisterInfo::
@@ -375,6 +395,7 @@ emitLoadConstPool(MachineBasicBlock &MBB,
ARMCC::CondCodes Pred,
unsigned PredReg, unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C =
ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
@@ -556,9 +577,10 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
if (Ins != MBB->end())
DL = Ins->getDebugLoc();
- const MCInstrDesc &MCID = TII.get(ADDriOpc);
- MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const MCInstrDesc &MCID = TII.get(ADDriOpc);
MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
MachineInstrBuilder MIB = AddDefaultPred(BuildMI(*MBB, Ins, DL, MCID, BaseReg)
@@ -574,6 +596,8 @@ ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
MachineInstr &MI = *I;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
int Off = Offset; // ARM doesn't need the general 64-bit offsets
unsigned i = 0;
@@ -671,6 +695,8 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
const ARMFrameLowering *TFI =
static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -696,12 +722,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
#endif // NDEBUG
- // Special handling of dbg_value instructions.
- if (MI.isDebugValue()) {
- MI.getOperand(FIOperandNum). ChangeToRegister(FrameReg, false /*isDef*/);
- MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
- return;
- }
+ assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
// Modify MI as necessary to handle as much of 'Offset' as possible
bool Done = false;
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.h b/lib/Target/ARM/ARMBaseRegisterInfo.h
index 0679919152c0..e28fff68f4e2 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -72,9 +72,16 @@ static inline bool isARMArea3Register(unsigned Reg, bool isIOS) {
}
}
+static inline bool isCalleeSavedRegister(unsigned Reg,
+ const MCPhysReg *CSRegs) {
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ if (Reg == CSRegs[i])
+ return true;
+ return false;
+}
+
class ARMBaseRegisterInfo : public ARMGenRegisterInfo {
protected:
- const ARMBaseInstrInfo &TII;
const ARMSubtarget &STI;
/// FramePtr - ARM physical register used as frame ptr.
@@ -86,8 +93,7 @@ protected:
unsigned BasePtr;
// Can be only subclassed.
- explicit ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &STI);
+ explicit ARMBaseRegisterInfo(const ARMSubtarget &STI);
// Return the opcode that implements 'Op', or 0 if no opcode
unsigned getOpcode(int Op) const;
@@ -96,9 +102,18 @@ public:
/// Code Generation virtual methods...
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
- const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const;
const uint32_t *getNoPreservedMask() const;
+ /// getThisReturnPreservedMask - Returns a call preserved mask specific to the
+ /// case that 'returned' is on an i32 first argument if the calling convention
+ /// is one that can (partially) model this attribute with a preserved mask
+ /// (i.e. it is a calling convention that uses the same register for the first
+ /// i32 argument and an i32 return value)
+ ///
+ /// Should return NULL in the case that the calling convention does not have
+ /// this property
+ const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const;
+
BitVector getReservedRegs(const MachineFunction &MF) const;
const TargetRegisterClass*
@@ -142,10 +157,6 @@ public:
unsigned getFrameRegister(const MachineFunction &MF) const;
unsigned getBaseRegister() const { return BasePtr; }
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-
bool isLowRegister(unsigned Reg) const;
diff --git a/lib/Target/ARM/ARMBuildAttrs.h b/lib/Target/ARM/ARMBuildAttrs.h
index 11bd6a4a8dbc..b16d4ef54b6d 100644
--- a/lib/Target/ARM/ARMBuildAttrs.h
+++ b/lib/Target/ARM/ARMBuildAttrs.h
@@ -15,11 +15,13 @@
#ifndef __TARGET_ARMBUILDATTRS_H__
#define __TARGET_ARMBUILDATTRS_H__
+namespace llvm {
namespace ARMBuildAttrs {
+
enum SpecialAttr {
// This is for the .cpu asm attr. It translates into one or more
// AttrType (below) entries in the .ARM.attributes section in the ELF.
- SEL_CPU
+ SEL_CPU
};
enum AttrType {
@@ -57,7 +59,7 @@ namespace ARMBuildAttrs {
ABI_FP_optimization_goals = 31,
compatibility = 32,
CPU_unaligned_access = 34,
- VFP_HP_extension = 36,
+ FP_HP_extension = 36,
ABI_FP_16bit_format = 38,
MPextension_use = 42, // was 70, 2.08 ABI
DIV_use = 44,
@@ -89,10 +91,11 @@ namespace ARMBuildAttrs {
v7 = 10, // e.g. Cortex A8, Cortex M3
v6_M = 11, // e.g. Cortex M1
v6S_M = 12, // v6_M with the System extensions
- v7E_M = 13 // v7_M with DSP extensions
+ v7E_M = 13, // v7_M with DSP extensions
+ v8 = 14 // v8, AArch32
};
- enum CPUArchProfile { // (=7), uleb128
+ enum CPUArchProfile { // (=7), uleb128
Not_Applicable = 0, // pre v7, or cross-profile code
ApplicationProfile = (0x41), // 'A' (e.g. for Cortex A8)
RealTimeProfile = (0x52), // 'R' (e.g. for Cortex R4)
@@ -101,31 +104,67 @@ namespace ARMBuildAttrs {
};
// The following have a lot of common use cases
- enum {
- //ARMISAUse (=8), uleb128 and THUMBISAUse (=9), uleb128
+ enum {
Not_Allowed = 0,
Allowed = 1,
- // FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10)
+ // Tag_ARM_ISA_use (=8), uleb128
+
+ // Tag_THUMB_ISA_use, (=9), uleb128
+ AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions)
+
+ // Tag_FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10)
AllowFPv2 = 2, // v2 FP ISA permitted (implies use of the v1 FP ISA)
AllowFPv3A = 3, // v3 FP ISA permitted (implies use of the v2 FP ISA)
- AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31
- AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA)
+ AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31
+ AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA)
AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31
+ AllowFPARMv8A = 7, // Use of the ARM v8-A FP ISA was permitted
+ AllowFPARMv8B = 8, // Use of the ARM v8-A FP ISA was permitted, but only D0-D15, S0-S31
// Tag_WMMX_arch, (=11), uleb128
- AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions)
-
- // Tag_WMMX_arch, (=11), uleb128
- AllowWMMXv1 = 2, // The user permitted this entity to use WMMX v2
+ AllowWMMXv1 = 1, // The user permitted this entity to use WMMX v1
+ AllowWMMXv2 = 2, // The user permitted this entity to use WMMX v2
+
+ // Tag_Advanced_SIMD_arch, (=12), uleb128
+ AllowNeon = 1, // SIMDv1 was permitted
+ AllowNeon2 = 2, // SIMDv2 was permitted (Half-precision FP, MAC operations)
+ AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted
- // Tag_ABI_FP_denormal, (=20), uleb128
+ // Tag_ABI_FP_denormal, (=20), uleb128
PreserveFPSign = 2, // sign when flushed-to-zero is preserved
// Tag_ABI_FP_number_model, (=23), uleb128
AllowRTABI = 2, // numbers, infinities, and one quiet NaN (see [RTABI])
- AllowIEE754 = 3 // this code to use all the IEEE 754-defined FP encodings
+ AllowIEE754 = 3, // this code to use all the IEEE 754-defined FP encodings
+
+ // Tag_ABI_HardFP_use, (=27), uleb128
+ HardFPImplied = 0, // FP use should be implied by Tag_FP_arch
+ HardFPSinglePrecision = 1, // Single-precision only
+
+ // Tag_ABI_VFP_args, (=28), uleb128
+ BaseAAPCS = 0,
+ HardFPAAPCS = 1,
+
+ // Tag_FP_HP_extension, (=36), uleb128
+ AllowHPFP = 1, // Allow use of Half Precision FP
+
+ // Tag_MPextension_use, (=42), uleb128
+ AllowMP = 1, // Allow use of MP extensions
+
+ // Tag_DIV_use, (=44), uleb128
+ AllowDIVIfExists = 0, // Allow hardware divide if available in arch, or no info exists.
+ DisallowDIV = 1, // Hardware divide explicitly disallowed
+ AllowDIVExt = 2, // Allow hardware divide as optional architecture extension above
+ // the base arch specified by Tag_CPU_arch and Tag_CPU_arch_profile.
+
+ // Tag_Virtualization_use, (=68), uleb128
+ AllowTZ = 1,
+ AllowVirtualization = 2,
+ AllowTZVirtualization = 3
};
-}
+
+} // namespace ARMBuildAttrs
+} // namespace llvm
#endif // __TARGET_ARMBUILDATTRS_H__
diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td
index 8ff666ed2844..9bea4b2d68e9 100644
--- a/lib/Target/ARM/ARMCallingConv.td
+++ b/lib/Target/ARM/ARMCallingConv.td
@@ -207,10 +207,24 @@ def CSR_AAPCS_ThisReturn : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6,
def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>;
def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4,
- (sub CSR_AAPCS_ThisReturn, R9))>;
+ (sub CSR_AAPCS_ThisReturn, R9))>;
+
+// The "interrupt" attribute is used to generate code that is acceptable in
+// exception-handlers of various kinds. It makes us use a different return
+// instruction (handled elsewhere) and affects which registers we must return to
+// our "caller" in the same state as we receive them.
+
+// For most interrupts, all registers except SP and LR are shared with
+// user-space. We mark LR to be saved anyway, since this is what the ARM backend
+// generally does rather than tracking its liveness as a normal register.
+def CSR_GenericInt : CalleeSavedRegs<(add LR, (sequence "R%u", 12, 0))>;
+
+// The fast interrupt handlers have more private state and get their own copies
+// of R8-R12, in addition to SP and LR. As before, mark LR for saving too.
+
+// FIXME: we mark R11 as callee-saved since it's often the frame-pointer, and
+// current frame lowering expects to encounter it while processing callee-saved
+// registers.
+def CSR_FIQ : CalleeSavedRegs<(add LR, R11, (sequence "R%u", 7, 0))>;
+
-// GHC set of callee saved regs is empty as all those regs are
-// used for passing STG regs around
-// add is a workaround for not being able to compile empty list:
-// def CSR_GHC : CalleeSavedRegs<()>;
-def CSR_GHC : CalleeSavedRegs<(add)>;
diff --git a/lib/Target/ARM/ARMCodeEmitter.cpp b/lib/Target/ARM/ARMCodeEmitter.cpp
index 5e8e1739a984..568ca858c4d2 100644
--- a/lib/Target/ARM/ARMCodeEmitter.cpp
+++ b/lib/Target/ARM/ARMCodeEmitter.cpp
@@ -167,6 +167,8 @@ namespace {
const { return 0; }
unsigned NEONThumb2DupPostEncoder(const MachineInstr &MI,unsigned Val)
const { return 0; }
+ unsigned NEONThumb2V8PostEncoder(const MachineInstr &MI,unsigned Val)
+ const { return 0; }
unsigned VFPThumb2PostEncoder(const MachineInstr&MI, unsigned Val)
const { return 0; }
unsigned getAdrLabelOpValue(const MachineInstr &MI, unsigned Op)
@@ -1044,8 +1046,8 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
return;
} else if ((MCID.Opcode == ARM::BFC) || (MCID.Opcode == ARM::BFI)) {
uint32_t v = ~MI.getOperand(2).getImm();
- int32_t lsb = CountTrailingZeros_32(v);
- int32_t msb = (32 - CountLeadingZeros_32(v)) - 1;
+ int32_t lsb = countTrailingZeros(v);
+ int32_t msb = (32 - countLeadingZeros(v)) - 1;
// Instr{20-16} = msb, Instr{11-7} = lsb
Binary |= (msb & 0x1F) << 16;
Binary |= (lsb & 0x1F) << 7;
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index 4891609b336f..cff5ce27bca6 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -128,7 +128,7 @@ namespace {
// If the block size isn't a multiple of the known bits, assume the
// worst case padding.
if (Size & ((1u << Bits) - 1))
- Bits = CountTrailingZeros_32(Size);
+ Bits = countTrailingZeros(Size);
return Bits;
}
@@ -753,6 +753,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
Scale = 4;
break;
+ case ARM::LDRBi12:
case ARM::LDRi12:
case ARM::LDRcp:
case ARM::t2LDRpci:
diff --git a/lib/Target/ARM/ARMConstantPoolValue.cpp b/lib/Target/ARM/ARMConstantPoolValue.cpp
index 4e703ec3c1a8..7d41c69f08b8 100644
--- a/lib/Target/ARM/ARMConstantPoolValue.cpp
+++ b/lib/Target/ARM/ARMConstantPoolValue.cpp
@@ -163,21 +163,7 @@ const BlockAddress *ARMConstantPoolConstant::getBlockAddress() const {
int ARMConstantPoolConstant::getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) {
- unsigned AlignMask = Alignment - 1;
- const std::vector<MachineConstantPoolEntry> Constants = CP->getConstants();
- for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
- if (Constants[i].isMachineConstantPoolEntry() &&
- (Constants[i].getAlignment() & AlignMask) == 0) {
- ARMConstantPoolValue *CPV =
- (ARMConstantPoolValue *)Constants[i].Val.MachineCPVal;
- ARMConstantPoolConstant *APC = dyn_cast<ARMConstantPoolConstant>(CPV);
- if (!APC) continue;
- if (APC->CVal == CVal && equals(APC))
- return i;
- }
- }
-
- return -1;
+ return getExistingMachineCPValueImpl<ARMConstantPoolConstant>(CP, Alignment);
}
bool ARMConstantPoolConstant::hasSameValue(ARMConstantPoolValue *ACPV) {
@@ -216,22 +202,7 @@ ARMConstantPoolSymbol::Create(LLVMContext &C, const char *s,
int ARMConstantPoolSymbol::getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) {
- unsigned AlignMask = Alignment - 1;
- const std::vector<MachineConstantPoolEntry> Constants = CP->getConstants();
- for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
- if (Constants[i].isMachineConstantPoolEntry() &&
- (Constants[i].getAlignment() & AlignMask) == 0) {
- ARMConstantPoolValue *CPV =
- (ARMConstantPoolValue *)Constants[i].Val.MachineCPVal;
- ARMConstantPoolSymbol *APS = dyn_cast<ARMConstantPoolSymbol>(CPV);
- if (!APS) continue;
-
- if (APS->S == S && equals(APS))
- return i;
- }
- }
-
- return -1;
+ return getExistingMachineCPValueImpl<ARMConstantPoolSymbol>(CP, Alignment);
}
bool ARMConstantPoolSymbol::hasSameValue(ARMConstantPoolValue *ACPV) {
@@ -271,22 +242,7 @@ ARMConstantPoolMBB *ARMConstantPoolMBB::Create(LLVMContext &C,
int ARMConstantPoolMBB::getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) {
- unsigned AlignMask = Alignment - 1;
- const std::vector<MachineConstantPoolEntry> Constants = CP->getConstants();
- for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
- if (Constants[i].isMachineConstantPoolEntry() &&
- (Constants[i].getAlignment() & AlignMask) == 0) {
- ARMConstantPoolValue *CPV =
- (ARMConstantPoolValue *)Constants[i].Val.MachineCPVal;
- ARMConstantPoolMBB *APMBB = dyn_cast<ARMConstantPoolMBB>(CPV);
- if (!APMBB) continue;
-
- if (APMBB->MBB == MBB && equals(APMBB))
- return i;
- }
- }
-
- return -1;
+ return getExistingMachineCPValueImpl<ARMConstantPoolMBB>(CP, Alignment);
}
bool ARMConstantPoolMBB::hasSameValue(ARMConstantPoolValue *ACPV) {
diff --git a/lib/Target/ARM/ARMConstantPoolValue.h b/lib/Target/ARM/ARMConstantPoolValue.h
index 93812fe6bb37..7ae7bf46f19d 100644
--- a/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/lib/Target/ARM/ARMConstantPoolValue.h
@@ -15,6 +15,7 @@
#define LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstddef>
@@ -64,6 +65,26 @@ protected:
ARMConstantPoolValue(LLVMContext &C, unsigned id, ARMCP::ARMCPKind Kind,
unsigned char PCAdj, ARMCP::ARMCPModifier Modifier,
bool AddCurrentAddress);
+
+ template <typename Derived>
+ int getExistingMachineCPValueImpl(MachineConstantPool *CP,
+ unsigned Alignment) {
+ unsigned AlignMask = Alignment - 1;
+ const std::vector<MachineConstantPoolEntry> &Constants = CP->getConstants();
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ if (Constants[i].isMachineConstantPoolEntry() &&
+ (Constants[i].getAlignment() & AlignMask) == 0) {
+ ARMConstantPoolValue *CPV =
+ (ARMConstantPoolValue *)Constants[i].Val.MachineCPVal;
+ if (Derived *APC = dyn_cast<Derived>(CPV))
+ if (cast<Derived>(this)->equals(APC))
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
public:
virtual ~ARMConstantPoolValue();
@@ -156,6 +177,10 @@ public:
static bool classof(const ARMConstantPoolValue *APV) {
return APV->isGlobalValue() || APV->isBlockAddress() || APV->isLSDA();
}
+
+ bool equals(const ARMConstantPoolConstant *A) const {
+ return CVal == A->CVal && ARMConstantPoolValue::equals(A);
+ }
};
/// ARMConstantPoolSymbol - ARM-specific constantpool values for external
@@ -187,6 +212,10 @@ public:
static bool classof(const ARMConstantPoolValue *ACPV) {
return ACPV->isExtSymbol();
}
+
+ bool equals(const ARMConstantPoolSymbol *A) const {
+ return S == A->S && ARMConstantPoolValue::equals(A);
+ }
};
/// ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic
@@ -219,6 +248,10 @@ public:
static bool classof(const ARMConstantPoolValue *ACPV) {
return ACPV->isMachineBasicBlock();
}
+
+ bool equals(const ARMConstantPoolMBB *A) const {
+ return MBB == A->MBB && ARMConstantPoolValue::equals(A);
+ }
};
} // End llvm namespace
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index beb843ca9aa8..e6f7f86c5587 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -692,10 +692,9 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
unsigned newOpc = Opcode == ARM::VMOVScc ? ARM::VMOVS : ARM::VMOVD;
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(newOpc),
MI.getOperand(1).getReg())
- .addReg(MI.getOperand(2).getReg(),
- getKillRegState(MI.getOperand(2).isKill()))
+ .addOperand(MI.getOperand(2))
.addImm(MI.getOperand(3).getImm()) // 'pred'
- .addReg(MI.getOperand(4).getReg());
+ .addOperand(MI.getOperand(4));
MI.eraseFromParent();
return true;
@@ -705,10 +704,9 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVr : ARM::MOVr;
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
MI.getOperand(1).getReg())
- .addReg(MI.getOperand(2).getReg(),
- getKillRegState(MI.getOperand(2).isKill()))
+ .addOperand(MI.getOperand(2))
.addImm(MI.getOperand(3).getImm()) // 'pred'
- .addReg(MI.getOperand(4).getReg())
+ .addOperand(MI.getOperand(4))
.addReg(0); // 's' bit
MI.eraseFromParent();
@@ -717,39 +715,36 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::MOVCCsi: {
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
(MI.getOperand(1).getReg()))
- .addReg(MI.getOperand(2).getReg(),
- getKillRegState(MI.getOperand(2).isKill()))
+ .addOperand(MI.getOperand(2))
.addImm(MI.getOperand(3).getImm())
.addImm(MI.getOperand(4).getImm()) // 'pred'
- .addReg(MI.getOperand(5).getReg())
+ .addOperand(MI.getOperand(5))
.addReg(0); // 's' bit
MI.eraseFromParent();
return true;
}
-
case ARM::MOVCCsr: {
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsr),
(MI.getOperand(1).getReg()))
- .addReg(MI.getOperand(2).getReg(),
- getKillRegState(MI.getOperand(2).isKill()))
- .addReg(MI.getOperand(3).getReg(),
- getKillRegState(MI.getOperand(3).isKill()))
+ .addOperand(MI.getOperand(2))
+ .addOperand(MI.getOperand(3))
.addImm(MI.getOperand(4).getImm())
.addImm(MI.getOperand(5).getImm()) // 'pred'
- .addReg(MI.getOperand(6).getReg())
+ .addOperand(MI.getOperand(6))
.addReg(0); // 's' bit
MI.eraseFromParent();
return true;
}
+ case ARM::t2MOVCCi16:
case ARM::MOVCCi16: {
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVi16),
+ unsigned NewOpc = AFI->isThumbFunction() ? ARM::t2MOVi16 : ARM::MOVi16;
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
MI.getOperand(1).getReg())
.addImm(MI.getOperand(2).getImm())
.addImm(MI.getOperand(3).getImm()) // 'pred'
- .addReg(MI.getOperand(4).getReg());
-
+ .addOperand(MI.getOperand(4));
MI.eraseFromParent();
return true;
}
@@ -760,23 +755,47 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MI.getOperand(1).getReg())
.addImm(MI.getOperand(2).getImm())
.addImm(MI.getOperand(3).getImm()) // 'pred'
- .addReg(MI.getOperand(4).getReg())
+ .addOperand(MI.getOperand(4))
.addReg(0); // 's' bit
MI.eraseFromParent();
return true;
}
+ case ARM::t2MVNCCi:
case ARM::MVNCCi: {
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MVNi),
+ unsigned Opc = AFI->isThumbFunction() ? ARM::t2MVNi : ARM::MVNi;
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
MI.getOperand(1).getReg())
.addImm(MI.getOperand(2).getImm())
.addImm(MI.getOperand(3).getImm()) // 'pred'
- .addReg(MI.getOperand(4).getReg())
+ .addOperand(MI.getOperand(4))
.addReg(0); // 's' bit
MI.eraseFromParent();
return true;
}
+ case ARM::t2MOVCClsl:
+ case ARM::t2MOVCClsr:
+ case ARM::t2MOVCCasr:
+ case ARM::t2MOVCCror: {
+ unsigned NewOpc;
+ switch (Opcode) {
+ case ARM::t2MOVCClsl: NewOpc = ARM::t2LSLri; break;
+ case ARM::t2MOVCClsr: NewOpc = ARM::t2LSRri; break;
+ case ARM::t2MOVCCasr: NewOpc = ARM::t2ASRri; break;
+ case ARM::t2MOVCCror: NewOpc = ARM::t2RORri; break;
+ default: llvm_unreachable("unexpeced conditional move");
+ }
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
+ MI.getOperand(1).getReg())
+ .addOperand(MI.getOperand(2))
+ .addImm(MI.getOperand(3).getImm())
+ .addImm(MI.getOperand(4).getImm()) // 'pred'
+ .addOperand(MI.getOperand(5))
+ .addReg(0); // 's' bit
+ MI.eraseFromParent();
+ return true;
+ }
case ARM::Int_eh_sjlj_dispatchsetup: {
MachineFunction &MF = *MI.getParent()->getParent();
const ARMBaseInstrInfo *AII =
@@ -823,7 +842,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::MOVsrl_flag:
case ARM::MOVsra_flag: {
- // These are just fancy MOVs insructions.
+ // These are just fancy MOVs instructions.
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
MI.getOperand(0).getReg())
.addOperand(MI.getOperand(1))
@@ -938,6 +957,18 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
ExpandMOV32BitImm(MBB, MBBI);
return true;
+ case ARM::SUBS_PC_LR: {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri), ARM::PC)
+ .addReg(ARM::LR)
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addOperand(MI.getOperand(2))
+ .addReg(ARM::CPSR, RegState::Undef);
+ TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+ return true;
+ }
case ARM::VLDMQIA: {
unsigned NewOpc = ARM::VLDMDIA;
MachineInstrBuilder MIB =
diff --git a/lib/Target/ARM/ARMFPUName.def b/lib/Target/ARM/ARMFPUName.def
new file mode 100644
index 000000000000..9a1bbe703d99
--- /dev/null
+++ b/lib/Target/ARM/ARMFPUName.def
@@ -0,0 +1,32 @@
+//===-- ARMFPUName.def - List of the ARM FPU names --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the list of the supported ARM FPU names.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef ARM_FPU_NAME
+#error "You must define ARM_FPU_NAME(NAME, ID) before including ARMFPUName.h"
+#endif
+
+ARM_FPU_NAME("vfp", VFP)
+ARM_FPU_NAME("vfpv2", VFPV2)
+ARM_FPU_NAME("vfpv3", VFPV3)
+ARM_FPU_NAME("vfpv3-d16", VFPV3_D16)
+ARM_FPU_NAME("vfpv4", VFPV4)
+ARM_FPU_NAME("vfpv4-d16", VFPV4_D16)
+ARM_FPU_NAME("fp-armv8", FP_ARMV8)
+ARM_FPU_NAME("neon", NEON)
+ARM_FPU_NAME("neon-vfpv4", NEON_VFPV4)
+ARM_FPU_NAME("neon-fp-armv8", NEON_FP_ARMV8)
+ARM_FPU_NAME("crypto-neon-fp-armv8", CRYPTO_NEON_FP_ARMV8)
+
+#undef ARM_FPU_NAME
diff --git a/lib/Target/ARM/ARMFPUName.h b/lib/Target/ARM/ARMFPUName.h
new file mode 100644
index 000000000000..2a64cce4880d
--- /dev/null
+++ b/lib/Target/ARM/ARMFPUName.h
@@ -0,0 +1,26 @@
+//===-- ARMFPUName.h - List of the ARM FPU names ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMFPUNAME_H
+#define ARMFPUNAME_H
+
+namespace llvm {
+namespace ARM {
+
+enum FPUKind {
+ INVALID_FPU = 0
+
+#define ARM_FPU_NAME(NAME, ID) , ID
+#include "ARMFPUName.def"
+};
+
+} // namespace ARM
+} // namespace llvm
+
+#endif // ARMFPUNAME_H
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 5d45f6491240..a4004f32db37 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -20,6 +20,7 @@
#include "ARMSubtarget.h"
#include "ARMTargetMachine.h"
#include "MCTargetDesc/ARMAddressingModes.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
@@ -175,6 +176,8 @@ class ARMFastISel : public FastISel {
// Utility routines.
private:
+ unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned OpNum,
+ unsigned Op);
bool isTypeLegal(Type *Ty, MVT &VT);
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
@@ -251,10 +254,10 @@ bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
const MCInstrDesc &MCID = MI->getDesc();
- // If we're a thumb2 or not NEON function we were handled via isPredicable.
+ // If we're a thumb2 or not NEON function we'll be handled via isPredicable.
if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
AFI->isThumb2Function())
- return false;
+ return MI->isPredicable();
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
if (MCID.OpInfo[i].isPredicate())
@@ -275,7 +278,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
// Do we use a predicate? or...
// Are we NEON in ARM mode and have a predicate operand? If so, I know
// we're not predicable but add it anyways.
- if (TII.isPredicable(MI) || isARMNEONPred(MI))
+ if (isARMNEONPred(MI))
AddDefaultPred(MIB);
// Do we optionally set a predicate? Preds is size > 0 iff the predicate
@@ -290,6 +293,23 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
return MIB;
}
+unsigned ARMFastISel::constrainOperandRegClass(const MCInstrDesc &II,
+ unsigned Op, unsigned OpNum) {
+ if (TargetRegisterInfo::isVirtualRegister(Op)) {
+ const TargetRegisterClass *RegClass =
+ TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
+ if (!MRI.constrainRegClass(Op, RegClass)) {
+ // If it's not legal to COPY between the register classes, something
+ // has gone very wrong before we got here.
+ unsigned NewOp = createResultReg(RegClass);
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::COPY), NewOp).addReg(Op));
+ return NewOp;
+ }
+ }
+ return Op;
+}
+
unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass* RC) {
unsigned ResultReg = createResultReg(RC);
@@ -305,6 +325,9 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ // Make sure the input operand is sufficiently constrained to be legal
+ // for this instruction.
+ Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill));
@@ -325,6 +348,11 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ // Make sure the input operands are sufficiently constrained to be legal
+ // for this instruction.
+ Op0 = constrainOperandRegClass(II, Op0, 1);
+ Op1 = constrainOperandRegClass(II, Op1, 2);
+
if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
@@ -348,6 +376,12 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ // Make sure the input operands are sufficiently constrained to be legal
+ // for this instruction.
+ Op0 = constrainOperandRegClass(II, Op0, 1);
+ Op1 = constrainOperandRegClass(II, Op1, 2);
+ Op2 = constrainOperandRegClass(II, Op1, 3);
+
if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
@@ -372,6 +406,9 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ // Make sure the input operand is sufficiently constrained to be legal
+ // for this instruction.
+ Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
@@ -394,6 +431,9 @@ unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ // Make sure the input operand is sufficiently constrained to be legal
+ // for this instruction.
+ Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
@@ -417,6 +457,10 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ // Make sure the input operands are sufficiently constrained to be legal
+ // for this instruction.
+ Op0 = constrainOperandRegClass(II, Op0, 1);
+ Op1 = constrainOperandRegClass(II, Op1, 2);
if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
@@ -609,6 +653,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
.addConstantPoolIndex(Idx));
else
// The extra immediate is for addrmode2.
+ DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::LDRcp), DestReg)
.addConstantPoolIndex(Idx)
@@ -628,6 +673,11 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
(const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned DestReg = createResultReg(RC);
+ // FastISel TLS support on non-Darwin is broken, punt to SelectionDAG.
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+ bool IsThreadLocal = GVar && GVar->isThreadLocal();
+ if (!Subtarget->isTargetDarwin() && IsThreadLocal) return 0;
+
// Use movw+movt when possible, it avoids constant pool entries.
// Darwin targets don't support movt with Reloc::Static, see
// ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support
@@ -679,6 +729,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
AddOptionalDefs(MIB);
} else {
// The extra immediate is for addrmode2.
+ DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
DestReg)
.addConstantPoolIndex(Idx)
@@ -814,22 +865,19 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
switch (Opcode) {
default:
break;
- case Instruction::BitCast: {
+ case Instruction::BitCast:
// Look through bitcasts.
return ARMComputeAddress(U->getOperand(0), Addr);
- }
- case Instruction::IntToPtr: {
+ case Instruction::IntToPtr:
// Look past no-op inttoptrs.
if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
return ARMComputeAddress(U->getOperand(0), Addr);
break;
- }
- case Instruction::PtrToInt: {
+ case Instruction::PtrToInt:
// Look past no-op ptrtoints.
if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
return ARMComputeAddress(U->getOperand(0), Addr);
break;
- }
case Instruction::GetElementPtr: {
Address SavedAddr = Addr;
int TmpOffset = Addr.Offset;
@@ -852,13 +900,8 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
TmpOffset += CI->getSExtValue() * S;
break;
}
- if (isa<AddOperator>(Op) &&
- (!isa<Instruction>(Op) ||
- FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
- == FuncInfo.MBB) &&
- isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
- // An add (in the same block) with a constant operand. Fold the
- // constant.
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
ConstantInt *CI =
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
TmpOffset += CI->getSExtValue() * S;
@@ -1025,7 +1068,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
useAM3 = true;
}
}
- RC = &ARM::GPRRegClass;
+ RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
break;
case MVT::i16:
if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
@@ -1040,7 +1083,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
useAM3 = true;
}
- RC = &ARM::GPRRegClass;
+ RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
break;
case MVT::i32:
if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
@@ -1054,7 +1097,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
} else {
Opc = ARM::LDRi12;
}
- RC = &ARM::GPRRegClass;
+ RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
break;
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
@@ -1063,7 +1106,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
needVMOV = true;
VT = MVT::i32;
Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
- RC = &ARM::GPRRegClass;
+ RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
} else {
Opc = ARM::VLDRS;
RC = TLI.getRegClassFor(VT);
@@ -1136,6 +1179,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
(const TargetRegisterClass*)&ARM::tGPRRegClass :
(const TargetRegisterClass*)&ARM::GPRRegClass);
unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
+ SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), Res)
.addReg(SrcReg).addImm(1));
@@ -1207,6 +1251,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
ARMSimplifyAddress(Addr, VT, useAM3);
// Create the base instruction, then add the operands.
+ SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg);
@@ -1330,6 +1375,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
(isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
unsigned OpReg = getRegForValue(TI->getOperand(0));
+ OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TstOpc))
.addReg(OpReg).addImm(1));
@@ -1367,6 +1413,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// and it left a value for us in a virtual register. Ergo, we test
// the one-bit value left in the virtual register.
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
+ CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
.addReg(CmpReg).addImm(1));
@@ -1491,13 +1538,15 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
}
}
+ const MCInstrDesc &II = TII.get(CmpOpc);
+ SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
if (!UseImm) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CmpOpc))
+ SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(SrcReg1).addReg(SrcReg2));
} else {
MachineInstrBuilder MIB;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(SrcReg1);
// Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
@@ -1696,6 +1745,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
}
unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
+ CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
.addReg(CondReg).addImm(0));
@@ -1712,12 +1762,16 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
}
unsigned ResultReg = createResultReg(RC);
- if (!UseImm)
+ if (!UseImm) {
+ Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
+ Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
.addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
- else
+ } else {
+ Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
.addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ }
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1802,7 +1856,9 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
unsigned SrcReg2 = getRegForValue(I->getOperand(1));
if (SrcReg2 == 0) return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
+ SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
+ SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addReg(SrcReg1).addReg(SrcReg2));
@@ -1930,7 +1986,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
!VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
return false;
} else {
- switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) {
+ switch (ArgVT.SimpleTy) {
default:
return false;
case MVT::i1:
@@ -1985,7 +2041,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
case CCValAssign::ZExt: {
MVT DestVT = VA.getLocVT();
Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
- assert (Arg != 0 && "Failed to emit a sext");
+ assert (Arg != 0 && "Failed to emit a zext");
ArgVT = DestVT;
break;
}
@@ -2182,10 +2238,14 @@ unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
}
unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
+ // Manually compute the global's type to avoid building it when unnecessary.
+ Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0);
+ EVT LCREVT = TLI.getValueType(GVTy);
+ if (!LCREVT.isSimple()) return 0;
+
GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
GlobalValue::ExternalLinkage, 0, Name);
- EVT LCREVT = TLI.getValueType(GV->getType());
- if (!LCREVT.isSimple()) return 0;
+ assert(GV->getType() == GVTy && "We miscomputed the type for the global!");
return ARMMaterializeGV(GV, LCREVT.getSimpleVT());
}
@@ -2403,15 +2463,22 @@ bool ARMFastISel::SelectCall(const Instruction *I,
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(CallOpc));
+ unsigned char OpFlags = 0;
+
+ // Add MO_PLT for global address or external symbol in the PIC relocation
+ // model.
+ if (Subtarget->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_)
+ OpFlags = ARMII::MO_PLT;
+
// ARM calls don't take a predicate, but tBL / tBLX do.
if(isThumb2)
AddDefaultPred(MIB);
if (UseReg)
MIB.addReg(CalleeReg);
else if (!IntrMemName)
- MIB.addGlobalAddress(GV, 0, 0);
+ MIB.addGlobalAddress(GV, 0, OpFlags);
else
- MIB.addExternalSymbol(IntrMemName, 0);
+ MIB.addExternalSymbol(IntrMemName, OpFlags);
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
@@ -2602,47 +2669,136 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
bool isZExt) {
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
return 0;
+ if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
+ return 0;
- unsigned Opc;
- bool isBoolZext = false;
- const TargetRegisterClass *RC;
- switch (SrcVT.SimpleTy) {
- default: return 0;
- case MVT::i16:
- if (!Subtarget->hasV6Ops()) return 0;
- RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
- if (isZExt)
- Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH;
- else
- Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
- break;
- case MVT::i8:
- if (!Subtarget->hasV6Ops()) return 0;
- RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
- if (isZExt)
- Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB;
- else
- Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
- break;
- case MVT::i1:
- if (isZExt) {
- RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
- Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
- isBoolZext = true;
- break;
+ // Table of which combinations can be emitted as a single instruction,
+ // and which will require two.
+ static const uint8_t isSingleInstrTbl[3][2][2][2] = {
+ // ARM Thumb
+ // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops
+ // ext: s z s z s z s z
+ /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
+ /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
+ /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
+ };
+
+ // Target registers for:
+ // - For ARM can never be PC.
+ // - For 16-bit Thumb are restricted to lower 8 registers.
+ // - For 32-bit Thumb are restricted to non-SP and non-PC.
+ static const TargetRegisterClass *RCTbl[2][2] = {
+ // Instructions: Two Single
+ /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
+ /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
+ };
+
+ // Table governing the instruction(s) to be emitted.
+ static const struct InstructionTable {
+ uint32_t Opc : 16;
+ uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0.
+ uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi.
+ uint32_t Imm : 8; // All instructions have either a shift or a mask.
+ } IT[2][2][3][2] = {
+ { // Two instructions (first is left shift, second is in this table).
+ { // ARM Opc S Shift Imm
+ /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 },
+ /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } },
+ /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 },
+ /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } },
+ /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 },
+ /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } }
+ },
+ { // Thumb Opc S Shift Imm
+ /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 },
+ /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } },
+ /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 },
+ /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } },
+ /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 },
+ /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } }
+ }
+ },
+ { // Single instruction.
+ { // ARM Opc S Shift Imm
+ /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 },
+ /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } },
+ /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 },
+ /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } },
+ /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 },
+ /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } }
+ },
+ { // Thumb Opc S Shift Imm
+ /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 },
+ /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } },
+ /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 },
+ /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } },
+ /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 },
+ /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } }
+ }
}
- return 0;
+ };
+
+ unsigned SrcBits = SrcVT.getSizeInBits();
+ unsigned DestBits = DestVT.getSizeInBits();
+ (void) DestBits;
+ assert((SrcBits < DestBits) && "can only extend to larger types");
+ assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
+ "other sizes unimplemented");
+ assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
+ "other sizes unimplemented");
+
+ bool hasV6Ops = Subtarget->hasV6Ops();
+ unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2}
+ assert((Bitness < 3) && "sanity-check table bounds");
+
+ bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
+ const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
+ const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt];
+ unsigned Opc = ITP->Opc;
+ assert(ARM::KILL != Opc && "Invalid table entry");
+ unsigned hasS = ITP->hasS;
+ ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift;
+ assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) &&
+ "only MOVsi has shift operand addressing mode");
+ unsigned Imm = ITP->Imm;
+
+ // 16-bit Thumb instructions always set CPSR (unless they're in an IT block).
+ bool setsCPSR = &ARM::tGPRRegClass == RC;
+ unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
+ unsigned ResultReg;
+ // MOVsi encodes shift and immediate in shift operand addressing mode.
+ // The following condition has the same value when emitting two
+ // instruction sequences: both are shifts.
+ bool ImmIsSO = (Shift != ARM_AM::no_shift);
+
+ // Either one or two instructions are emitted.
+ // They're always of the form:
+ // dst = in OP imm
+ // CPSR is set only by 16-bit Thumb instructions.
+ // Predicate, if any, is AL.
+ // S bit, if available, is always 0.
+ // When two are emitted the first's result will feed as the second's input,
+ // that value is then dead.
+ unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
+ for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
+ ResultReg = createResultReg(RC);
+ bool isLsl = (0 == Instr) && !isSingleInstr;
+ unsigned Opcode = isLsl ? LSLOpc : Opc;
+ ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift;
+ unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm;
+ bool isKill = 1 == Instr;
+ MachineInstrBuilder MIB = BuildMI(
+ *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg);
+ if (setsCPSR)
+ MIB.addReg(ARM::CPSR, RegState::Define);
+ SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR);
+ AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc));
+ if (hasS)
+ AddDefaultCC(MIB);
+ // Second instruction consumes the first's result.
+ SrcReg = ResultReg;
}
- unsigned ResultReg = createResultReg(RC);
- MachineInstrBuilder MIB;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
- .addReg(SrcReg);
- if (isBoolZext)
- MIB.addImm(1);
- else
- MIB.addImm(0);
- AddOptionalDefs(MIB);
return ResultReg;
}
@@ -2707,7 +2863,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
if (Reg2 == 0) return false;
}
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
if(ResultReg == 0) return false;
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -2797,6 +2953,25 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
return false;
}
+namespace {
+// This table describes sign- and zero-extend instructions which can be
+// folded into a preceding load. All of these extends have an immediate
+// (sometimes a mask and sometimes a shift) that's applied after
+// extension.
+const struct FoldableLoadExtendsStruct {
+ uint16_t Opc[2]; // ARM, Thumb.
+ uint8_t ExpectedImm;
+ uint8_t isZExt : 1;
+ uint8_t ExpectedVT : 7;
+} FoldableLoadExtends[] = {
+ { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
+ { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
+ { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
+ { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
+ { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
+};
+}
+
/// \brief The specified machine instr operand is a vreg, and that
/// vreg is being provided by the specified load instruction. If possible,
/// try to fold the load as an operand to the instruction, returning true if
@@ -2812,26 +2987,23 @@ bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
// ldrb r1, [r0] ldrb r1, [r0]
// uxtb r2, r1 =>
// mov r3, r2 mov r3, r1
- bool isZExt = true;
- switch(MI->getOpcode()) {
- default: return false;
- case ARM::SXTH:
- case ARM::t2SXTH:
- isZExt = false;
- case ARM::UXTH:
- case ARM::t2UXTH:
- if (VT != MVT::i16)
- return false;
- break;
- case ARM::SXTB:
- case ARM::t2SXTB:
- isZExt = false;
- case ARM::UXTB:
- case ARM::t2UXTB:
- if (VT != MVT::i8)
- return false;
- break;
+ if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm())
+ return false;
+ const uint64_t Imm = MI->getOperand(2).getImm();
+
+ bool Found = false;
+ bool isZExt;
+ for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends);
+ i != e; ++i) {
+ if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() &&
+ (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm &&
+ MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) {
+ Found = true;
+ isZExt = FoldableLoadExtends[i].isZExt;
+ }
}
+ if (!Found) return false;
+
// See if we can handle this address.
Address Addr;
if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
@@ -2854,12 +3026,14 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT));
// Load value.
if (isThumb2) {
+ DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::t2LDRpci), DestReg1)
.addConstantPoolIndex(Idx));
Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs;
} else {
// The extra immediate is for addrmode2.
+ DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(ARM::LDRcp), DestReg1)
.addConstantPoolIndex(Idx).addImm(0));
@@ -2873,6 +3047,9 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
}
unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT));
+ DestReg2 = constrainOperandRegClass(TII.get(Opc), DestReg2, 0);
+ DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1);
+ GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(Opc), DestReg2)
.addReg(DestReg1)
@@ -2938,12 +3115,10 @@ bool ARMFastISel::FastLowerArguments() {
ARM::R0, ARM::R1, ARM::R2, ARM::R3
};
- const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32);
+ const TargetRegisterClass *RC = &ARM::rGPRRegClass;
Idx = 0;
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I, ++Idx) {
- if (I->use_empty())
- continue;
unsigned SrcReg = GPRArgRegs[Idx];
unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
@@ -2961,13 +3136,23 @@ bool ARMFastISel::FastLowerArguments() {
namespace llvm {
FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) {
- // Completely untested on non-iOS.
const TargetMachine &TM = funcInfo.MF->getTarget();
- // Darwin and thumb1 only for now.
const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
- if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only())
+ // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
+ bool UseFastISel = false;
+ UseFastISel |= Subtarget->isTargetIOS() && !Subtarget->isThumb1Only();
+ UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb();
+ UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb();
+
+ if (UseFastISel) {
+ // iOS always has a FP for backtracking, force other targets
+ // to keep their FP when doing FastISel. The emitted code is
+ // currently superior, and in cases like test-suite's lencod
+ // FastISel isn't quite correct when FP is eliminated.
+ TM.Options.NoFramePointerElim = true;
return new ARMFastISel(funcInfo, libInfo);
+ }
return 0;
}
}
diff --git a/lib/Target/ARM/ARMFeatures.h b/lib/Target/ARM/ARMFeatures.h
new file mode 100644
index 000000000000..dafc4b3a82bd
--- /dev/null
+++ b/lib/Target/ARM/ARMFeatures.h
@@ -0,0 +1,93 @@
+//===-- ARMFeatures.h - Checks for ARM instruction features ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the code shared between ARM CodeGen and ARM MC
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TARGET_ARM_FEATURES_H
+#define TARGET_ARM_FEATURES_H
+
+#include "ARM.h"
+
+namespace llvm {
+
+template<typename InstrType> // could be MachineInstr or MCInst
+inline bool isV8EligibleForIT(InstrType *Instr, int BLXOperandIndex = 0) {
+ switch (Instr->getOpcode()) {
+ default:
+ return false;
+ case ARM::tADC:
+ case ARM::tADDi3:
+ case ARM::tADDi8:
+ case ARM::tADDrSPi:
+ case ARM::tADDrr:
+ case ARM::tAND:
+ case ARM::tASRri:
+ case ARM::tASRrr:
+ case ARM::tBIC:
+ case ARM::tCMNz:
+ case ARM::tCMPi8:
+ case ARM::tCMPr:
+ case ARM::tEOR:
+ case ARM::tLDRBi:
+ case ARM::tLDRBr:
+ case ARM::tLDRHi:
+ case ARM::tLDRHr:
+ case ARM::tLDRSB:
+ case ARM::tLDRSH:
+ case ARM::tLDRi:
+ case ARM::tLDRr:
+ case ARM::tLDRspi:
+ case ARM::tLSLri:
+ case ARM::tLSLrr:
+ case ARM::tLSRri:
+ case ARM::tLSRrr:
+ case ARM::tMOVi8:
+ case ARM::tMUL:
+ case ARM::tMVN:
+ case ARM::tORR:
+ case ARM::tROR:
+ case ARM::tRSB:
+ case ARM::tSBC:
+ case ARM::tSTRBi:
+ case ARM::tSTRBr:
+ case ARM::tSTRHi:
+ case ARM::tSTRHr:
+ case ARM::tSTRi:
+ case ARM::tSTRr:
+ case ARM::tSTRspi:
+ case ARM::tSUBi3:
+ case ARM::tSUBi8:
+ case ARM::tSUBrr:
+ case ARM::tTST:
+ return true;
+// there are some "conditionally deprecated" opcodes
+ case ARM::tADDspr:
+ return Instr->getOperand(2).getReg() != ARM::PC;
+ // ADD PC, SP and BLX PC were always unpredictable,
+ // now on top of it they're deprecated
+ case ARM::tADDrSP:
+ case ARM::tBX:
+ return Instr->getOperand(0).getReg() != ARM::PC;
+ case ARM::tBLXr:
+ return Instr->getOperand(BLXOperandIndex).getReg() != ARM::PC;
+ case ARM::tADDhirr:
+ return Instr->getOperand(0).getReg() != ARM::PC &&
+ Instr->getOperand(2).getReg() != ARM::PC;
+ case ARM::tCMPhir:
+ case ARM::tMOVr:
+ return Instr->getOperand(0).getReg() != ARM::PC &&
+ Instr->getOperand(1).getReg() != ARM::PC;
+ }
+}
+
+}
+
+#endif
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index 483802b130b5..d32bdbc58939 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -82,22 +82,11 @@ ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
}
-static bool isCalleeSavedRegister(unsigned Reg, const uint16_t *CSRegs) {
- for (unsigned i = 0; CSRegs[i]; ++i)
- if (Reg == CSRegs[i])
- return true;
- return false;
-}
-
static bool isCSRestore(MachineInstr *MI,
const ARMBaseInstrInfo &TII,
const uint16_t *CSRegs) {
// Integer spill area is handled with "pop".
- if (MI->getOpcode() == ARM::LDMIA_RET ||
- MI->getOpcode() == ARM::t2LDMIA_RET ||
- MI->getOpcode() == ARM::LDMIA_UPD ||
- MI->getOpcode() == ARM::t2LDMIA_UPD ||
- MI->getOpcode() == ARM::VLDMDIA_UPD) {
+ if (isPopOpcode(MI->getOpcode())) {
// The first two operands are predicates. The last two are
// imp-def and imp-use of SP. Check everything in between.
for (int i = 5, e = MI->getNumOperands(); i != e; ++i)
@@ -115,20 +104,31 @@ static bool isCSRestore(MachineInstr *MI,
return false;
}
-static void
-emitSPUpdate(bool isARM,
- MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- DebugLoc dl, const ARMBaseInstrInfo &TII,
- int NumBytes, unsigned MIFlags = MachineInstr::NoFlags,
- ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
+static void emitRegPlusImmediate(bool isARM, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI, DebugLoc dl,
+ const ARMBaseInstrInfo &TII, unsigned DestReg,
+ unsigned SrcReg, int NumBytes,
+ unsigned MIFlags = MachineInstr::NoFlags,
+ ARMCC::CondCodes Pred = ARMCC::AL,
+ unsigned PredReg = 0) {
if (isARM)
- emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
+ emitARMRegPlusImmediate(MBB, MBBI, dl, DestReg, SrcReg, NumBytes,
Pred, PredReg, TII, MIFlags);
else
- emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
+ emitT2RegPlusImmediate(MBB, MBBI, dl, DestReg, SrcReg, NumBytes,
Pred, PredReg, TII, MIFlags);
}
+static void emitSPUpdate(bool isARM, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI, DebugLoc dl,
+ const ARMBaseInstrInfo &TII, int NumBytes,
+ unsigned MIFlags = MachineInstr::NoFlags,
+ ARMCC::CondCodes Pred = ARMCC::AL,
+ unsigned PredReg = 0) {
+ emitRegPlusImmediate(isARM, MBB, MBBI, dl, TII, ARM::SP, ARM::SP, NumBytes,
+ MIFlags, Pred, PredReg);
+}
+
void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -141,7 +141,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
assert(!AFI->isThumb1OnlyFunction() &&
"This emitPrologue does not support Thumb1!");
bool isARM = !AFI->isThumbFunction();
- unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize();
+ unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
+ unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
unsigned NumBytes = MFI->getStackSize();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
@@ -174,6 +175,10 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned Reg = CSI[i].getReg();
int FI = CSI[i].getFrameIdx();
switch (Reg) {
+ case ARM::R0:
+ case ARM::R1:
+ case ARM::R2:
+ case ARM::R3:
case ARM::R4:
case ARM::R5:
case ARM::R6:
@@ -181,73 +186,61 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
case ARM::LR:
if (Reg == FramePtr)
FramePtrSpillFI = FI;
- AFI->addGPRCalleeSavedArea1Frame(FI);
GPRCS1Size += 4;
break;
case ARM::R8:
case ARM::R9:
case ARM::R10:
case ARM::R11:
+ case ARM::R12:
if (Reg == FramePtr)
FramePtrSpillFI = FI;
- if (STI.isTargetIOS()) {
- AFI->addGPRCalleeSavedArea2Frame(FI);
+ if (STI.isTargetIOS())
GPRCS2Size += 4;
- } else {
- AFI->addGPRCalleeSavedArea1Frame(FI);
+ else
GPRCS1Size += 4;
- }
break;
default:
// This is a DPR. Exclude the aligned DPRCS2 spills.
if (Reg == ARM::D8)
D8SpillFI = FI;
- if (Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs()) {
- AFI->addDPRCalleeSavedAreaFrame(FI);
+ if (Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs())
DPRCSSize += 8;
- }
}
}
// Move past area 1.
- if (GPRCS1Size > 0) MBBI++;
-
- // Set FP to point to the stack slot that contains the previous FP.
- // For iOS, FP is R7, which has now been stored in spill area 1.
- // Otherwise, if this is not iOS, all the callee-saved registers go
- // into spill area 1, including the FP in R11. In either case, it is
- // now safe to emit this assignment.
- bool HasFP = hasFP(MF);
- if (HasFP) {
- unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
- .addFrameIndex(FramePtrSpillFI).addImm(0)
- .setMIFlag(MachineInstr::FrameSetup);
- AddDefaultCC(AddDefaultPred(MIB));
- }
-
- // Move past area 2.
- if (GPRCS2Size > 0) MBBI++;
+ MachineBasicBlock::iterator LastPush = MBB.end(), FramePtrPush;
+ if (GPRCS1Size > 0)
+ FramePtrPush = LastPush = MBBI++;
// Determine starting offsets of spill areas.
+ bool HasFP = hasFP(MF);
unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
- if (HasFP)
+ int FramePtrOffsetInPush = 0;
+ if (HasFP) {
+ FramePtrOffsetInPush = MFI->getObjectOffset(FramePtrSpillFI) + GPRCS1Size;
AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
NumBytes);
+ }
AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
+ // Move past area 2.
+ if (GPRCS2Size > 0) {
+ LastPush = MBBI++;
+ }
+
// Move past area 3.
if (DPRCSSize > 0) {
- MBBI++;
+ LastPush = MBBI++;
// Since vpush register list cannot have gaps, there may be multiple vpush
// instructions in the prologue.
while (MBBI->getOpcode() == ARM::VSTMDDB_UPD)
- MBBI++;
+ LastPush = MBBI++;
}
// Move past the aligned DPRCS2 area.
@@ -263,8 +256,13 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
if (NumBytes) {
// Adjust SP after all the callee-save spills.
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
- MachineInstr::FrameSetup);
+ if (tryFoldSPUpdateIntoPushPop(MF, LastPush, NumBytes)) {
+ if (LastPush == FramePtrPush)
+ FramePtrOffsetInPush += NumBytes;
+ } else
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
+ MachineInstr::FrameSetup);
+
if (HasFP && isARM)
// Restore from fp only in ARM mode: e.g. sub sp, r7, #24
// Note it's not safe to do this in Thumb2 mode because it would have
@@ -277,6 +275,18 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setShouldRestoreSPFromFP(true);
}
+ // Set FP to point to the stack slot that contains the previous FP.
+ // For iOS, FP is R7, which has now been stored in spill area 1.
+ // Otherwise, if this is not iOS, all the callee-saved registers go
+ // into spill area 1, including the FP in R11. In either case, it
+ // is in area one and the adjustment needs to take place just after
+ // that push.
+ if (HasFP)
+ emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, ++FramePtrPush, dl, TII,
+ FramePtr, ARM::SP, FramePtrOffsetInPush,
+ MachineInstr::FrameSetup);
+
+
if (STI.isTargetELF() && hasFP(MF))
MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
AFI->getFramePtrSpillOffset());
@@ -357,7 +367,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
"This emitEpilogue does not support Thumb1!");
bool isARM = !AFI->isThumbFunction();
- unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize();
+ unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
+ unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
int NumBytes = (int)MFI->getStackSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
@@ -371,11 +382,11 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
} else {
// Unwind MBBI to point to first LDR / VLDRD.
- const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
+ const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
if (MBBI != MBB.begin()) {
- do
+ do {
--MBBI;
- while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
+ } while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
if (!isCSRestore(MBBI, TII, CSRegs))
++MBBI;
}
@@ -419,8 +430,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
ARM::SP)
.addReg(FramePtr));
}
- } else if (NumBytes)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
+ } else if (NumBytes && !tryFoldSPUpdateIntoPushPop(MF, MBBI, NumBytes))
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
// Increment past our save areas.
if (AFI->getDPRCalleeSavedAreaSize()) {
@@ -499,12 +510,6 @@ ARMFrameLowering::ResolveFrameIndexReference(const MachineFunction &MF,
FrameReg = ARM::SP;
Offset += SPAdj;
- if (AFI->isGPRCalleeSavedArea1Frame(FI))
- return Offset - AFI->getGPRCalleeSavedArea1Offset();
- else if (AFI->isGPRCalleeSavedArea2Frame(FI))
- return Offset - AFI->getGPRCalleeSavedArea2Offset();
- else if (AFI->isDPRCalleeSavedAreaFrame(FI))
- return Offset - AFI->getDPRCalleeSavedAreaOffset();
// SP can move around if there are allocas. We may also lose track of SP
// when emergency spilling inside a non-reserved call frame setup.
@@ -656,6 +661,8 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
unsigned RetOpcode = MI->getOpcode();
bool isTailCall = (RetOpcode == ARM::TCRETURNdi ||
RetOpcode == ARM::TCRETURNri);
+ bool isInterrupt =
+ RetOpcode == ARM::SUBS_PC_LR || RetOpcode == ARM::t2SUBS_PC_LR;
SmallVector<unsigned, 4> Regs;
unsigned i = CSI.size();
@@ -670,7 +677,8 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs)
continue;
- if (Reg == ARM::LR && !isTailCall && !isVarArg && STI.hasV5TOps()) {
+ if (Reg == ARM::LR && !isTailCall && !isVarArg && !isInterrupt &&
+ STI.hasV5TOps()) {
Reg = ARM::PC;
LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_RET : ARM::LDMIA_RET;
// Fold the return instruction into the LDM.
@@ -1197,7 +1205,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Don't spill FP if the frame can be eliminated. This is determined
// by scanning the callee-save registers to see if any is used.
- const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
+ const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
bool Spilled = false;
@@ -1224,6 +1232,8 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
case ARM::LR:
LRSpilled = true;
// Fallthrough
+ case ARM::R0: case ARM::R1:
+ case ARM::R2: case ARM::R3:
case ARM::R4: case ARM::R5:
case ARM::R6: case ARM::R7:
CS1Spilled = true;
@@ -1238,6 +1248,8 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
}
switch (Reg) {
+ case ARM::R0: case ARM::R1:
+ case ARM::R2: case ARM::R3:
case ARM::R4: case ARM::R5:
case ARM::R6: case ARM::R7:
case ARM::LR:
@@ -1293,8 +1305,12 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
if (!LRSpilled && CS1Spilled) {
MRI.setPhysRegUsed(ARM::LR);
NumGPRSpills++;
- UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
- UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
+ SmallVectorImpl<unsigned>::iterator LRPos;
+ LRPos = std::find(UnspilledCS1GPRs.begin(), UnspilledCS1GPRs.end(),
+ (unsigned)ARM::LR);
+ if (LRPos != UnspilledCS1GPRs.end())
+ UnspilledCS1GPRs.erase(LRPos);
+
ForceLRSpill = false;
ExtraCSSpill = true;
}
diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp
index 1240169e84ed..c69d313fd9ce 100644
--- a/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -44,10 +44,16 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
if (LastMI && (MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) {
MachineInstr *DefMI = LastMI;
const MCInstrDesc &LastMCID = LastMI->getDesc();
+ const TargetMachine &TM =
+ MI->getParent()->getParent()->getTarget();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
+
// Skip over one non-VFP / NEON instruction.
if (!LastMI->isBarrier() &&
// On A9, AGU and NEON/FPU are muxed.
- !(STI.isLikeA9() && (LastMI->mayLoad() || LastMI->mayStore())) &&
+ !(TII.getSubtarget().isLikeA9() &&
+ (LastMI->mayLoad() || LastMI->mayStore())) &&
(LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
MachineBasicBlock::iterator I = LastMI;
if (I != LastMI->getParent()->begin()) {
@@ -58,7 +64,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
if (TII.isFpMLxInstruction(DefMI->getOpcode()) &&
(TII.canCauseFpMLxStall(MI->getOpcode()) ||
- hasRAWHazard(DefMI, MI, TRI))) {
+ hasRAWHazard(DefMI, MI, TII.getRegisterInfo()))) {
// Try to schedule another instruction for the next 4 cycles.
if (FpMLxStalls == 0)
FpMLxStalls = 4;
diff --git a/lib/Target/ARM/ARMHazardRecognizer.h b/lib/Target/ARM/ARMHazardRecognizer.h
index 98bfc4cf0cc5..e1dcec3d1cc8 100644
--- a/lib/Target/ARM/ARMHazardRecognizer.h
+++ b/lib/Target/ARM/ARMHazardRecognizer.h
@@ -28,21 +28,14 @@ class MachineInstr;
/// ARM preRA scheduler uses an unspecialized instance of the
/// ScoreboardHazardRecognizer.
class ARMHazardRecognizer : public ScoreboardHazardRecognizer {
- const ARMBaseInstrInfo &TII;
- const ARMBaseRegisterInfo &TRI;
- const ARMSubtarget &STI;
-
MachineInstr *LastMI;
unsigned FpMLxStalls;
public:
ARMHazardRecognizer(const InstrItineraryData *ItinData,
- const ARMBaseInstrInfo &tii,
- const ARMBaseRegisterInfo &tri,
- const ARMSubtarget &sti,
- const ScheduleDAG *DAG) :
- ScoreboardHazardRecognizer(ItinData, DAG, "post-RA-sched"), TII(tii),
- TRI(tri), STI(sti), LastMI(0) {}
+ const ScheduleDAG *DAG)
+ : ScoreboardHazardRecognizer(ItinData, DAG, "post-RA-sched"),
+ LastMI(0) {}
virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void Reset();
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 9e1782e1191c..87d15226947a 100644
--- a/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -61,7 +61,6 @@ enum AddrMode2Type {
class ARMDAGToDAGISel : public SelectionDAGISel {
ARMBaseTargetMachine &TM;
- const ARMBaseInstrInfo *TII;
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
/// make the right decision when generating code for different targets.
@@ -71,7 +70,6 @@ public:
explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel), TM(tm),
- TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
}
@@ -132,6 +130,13 @@ public:
return true;
}
+ bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
+ const ConstantSDNode *CN = cast<ConstantSDNode>(N);
+ Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
+ Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
+ return true;
+ }
+
bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
SDValue &Offset, SDValue &Opc);
bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
@@ -177,6 +182,7 @@ public:
SDValue &OffImm);
bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
SDValue &OffReg, SDValue &ShImm);
+ bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
inline bool is_so_imm(unsigned Imm) const {
return ARM_AM::getSOImmVal(Imm) != -1;
@@ -240,21 +246,6 @@ private:
/// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
- /// SelectCMOVOp - Select CMOV instructions for ARM.
- SDNode *SelectCMOVOp(SDNode *N);
- SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
- SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
- SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
- SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
-
// Select special operations if node forms integer ABS pattern
SDNode *SelectABSOp(SDNode *N);
@@ -262,7 +253,7 @@ private:
SDNode *SelectConcatVector(SDNode *N);
- SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
+ SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, unsigned Op64);
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
@@ -364,7 +355,7 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
continue;
// Check if the AND mask is an immediate of the form: 000.....1111111100
- unsigned TZ = CountTrailingZeros_32(And_imm);
+ unsigned TZ = countTrailingZeros(And_imm);
if (TZ != 1 && TZ != 2)
// Be conservative here. Shifter operands aren't always free. e.g. On
// Swift, left shifter operand of 1 / 2 for free but others are not.
@@ -402,12 +393,12 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
}
// Now make the transformation.
- Srl = CurDAG->getNode(ISD::SRL, Srl.getDebugLoc(), MVT::i32,
+ Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
Srl.getOperand(0),
CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
- N1 = CurDAG->getNode(ISD::AND, N1.getDebugLoc(), MVT::i32,
+ N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
Srl, CurDAG->getConstant(And_imm, MVT::i32));
- N1 = CurDAG->getNode(ISD::SHL, N1.getDebugLoc(), MVT::i32,
+ N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
N1, CurDAG->getConstant(TZ, MVT::i32));
CurDAG->UpdateNodeOperands(N, N0, N1);
}
@@ -423,7 +414,7 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
if (!CheckVMLxHazard)
return true;
- if (!Subtarget->isCortexA8() && !Subtarget->isLikeA9() &&
+ if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9() &&
!Subtarget->isSwift())
return true;
@@ -434,6 +425,9 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
if (Use->getOpcode() == ISD::CopyToReg)
return true;
if (Use->isMachineOpcode()) {
+ const ARMBaseInstrInfo *TII =
+ static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
+
const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
if (MCID.mayStore())
return true;
@@ -533,7 +527,8 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
if (N.getOpcode() == ISD::FrameIndex) {
// Match frame index.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@@ -557,7 +552,8 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
@@ -703,7 +699,8 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
Base = N;
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
} else if (N.getOpcode() == ARMISD::Wrapper &&
!(Subtarget->useMovt() &&
N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
@@ -724,7 +721,8 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
Offset = CurDAG->getRegister(0, MVT::i32);
@@ -901,7 +899,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
Base = N;
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
@@ -915,7 +914,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
Offset = CurDAG->getRegister(0, MVT::i32);
@@ -960,7 +960,8 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
Base = N;
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
} else if (N.getOpcode() == ARMISD::Wrapper &&
!(Subtarget->useMovt() &&
N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
@@ -978,7 +979,8 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
ARM_AM::AddrOpc AddSub = ARM_AM::add;
@@ -1202,7 +1204,8 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
SDValue &Base, SDValue &OffImm) {
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@@ -1219,7 +1222,8 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
@@ -1267,7 +1271,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
if (N.getOpcode() == ISD::FrameIndex) {
// Match frame index.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@@ -1297,7 +1302,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
@@ -1326,7 +1332,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
@@ -1403,6 +1410,34 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
return true;
}
+bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
+ SDValue &OffImm) {
+ // This *must* succeed since it's used for the irreplacable ldrex and strex
+ // instructions.
+ Base = N;
+ OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+
+ if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
+ return true;
+
+ ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (!RHS)
+ return true;
+
+ uint32_t RHSC = (int)RHS->getZExtValue();
+ if (RHSC > 1020 || RHSC % 4 != 0)
+ return true;
+
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, getTargetLowering()->getPointerTy());
+ }
+
+ OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32);
+ return true;
+}
+
//===--------------------------------------------------------------------===//
/// getAL - Returns a ARMCC::AL immediate node.
@@ -1468,14 +1503,14 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
+ return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
MVT::i32, MVT::Other, Ops);
} else {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
+ return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
MVT::i32, MVT::Other, Ops);
}
}
@@ -1524,7 +1559,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, Offset, getAL(CurDAG),
CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
+ return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
MVT::Other, Ops);
}
@@ -1533,7 +1568,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
/// \brief Form a GPRPair pseudo register from a pair of GPR regs.
SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDLoc dl(V0.getNode());
SDValue RegClass =
CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
@@ -1544,7 +1579,7 @@ SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
/// \brief Form a D register from a pair of S registers.
SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDLoc dl(V0.getNode());
SDValue RegClass =
CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
@@ -1555,7 +1590,7 @@ SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
/// \brief Form a quad register from a pair of D registers.
SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDLoc dl(V0.getNode());
SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
@@ -1565,7 +1600,7 @@ SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
/// \brief Form 4 consecutive D registers from a pair of Q registers.
SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDLoc dl(V0.getNode());
SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
@@ -1576,7 +1611,7 @@ SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
/// \brief Form 4 consecutive S registers.
SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
SDValue V2, SDValue V3) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDLoc dl(V0.getNode());
SDValue RegClass =
CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
@@ -1591,7 +1626,7 @@ SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
/// \brief Form 4 consecutive D registers.
SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
SDValue V2, SDValue V3) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDLoc dl(V0.getNode());
SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
@@ -1605,7 +1640,7 @@ SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
/// \brief Form 4 consecutive Q registers.
SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
SDValue V2, SDValue V3) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDLoc dl(V0.getNode());
SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
@@ -1689,7 +1724,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
const uint16_t *QOpcodes0,
const uint16_t *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue MemAddr, Align;
unsigned AddrOpIdx = isUpdating ? 1 : 2;
@@ -1821,7 +1856,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
const uint16_t *QOpcodes0,
const uint16_t *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue MemAddr, Align;
unsigned AddrOpIdx = isUpdating ? 1 : 2;
@@ -1966,7 +2001,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
const uint16_t *DOpcodes,
const uint16_t *QOpcodes) {
assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue MemAddr, Align;
unsigned AddrOpIdx = isUpdating ? 1 : 2;
@@ -2084,7 +2119,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
unsigned NumVecs,
const uint16_t *Opcodes) {
assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue MemAddr, Align;
if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
@@ -2166,7 +2201,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
unsigned Opc) {
assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
unsigned FirstTblReg = IsExt ? 2 : 1;
@@ -2278,204 +2313,6 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
return NULL;
}
-SDNode *ARMDAGToDAGISel::
-SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- SDValue CPTmp0;
- SDValue CPTmp1;
- if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
- unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
- unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
- unsigned Opc = 0;
- switch (SOShOp) {
- case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
- case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
- case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
- case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
- default:
- llvm_unreachable("Unknown so_reg opcode!");
- }
- SDValue SOShImm =
- CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
- }
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::
-SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- SDValue CPTmp0;
- SDValue CPTmp1;
- SDValue CPTmp2;
- if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6);
- }
-
- if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7);
- }
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::
-SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
- if (!T)
- return 0;
-
- unsigned Opc = 0;
- unsigned TrueImm = T->getZExtValue();
- if (is_t2_so_imm(TrueImm)) {
- Opc = ARM::t2MOVCCi;
- } else if (TrueImm <= 0xffff) {
- Opc = ARM::t2MOVCCi16;
- } else if (is_t2_so_imm_not(TrueImm)) {
- TrueImm = ~TrueImm;
- Opc = ARM::t2MVNCCi;
- } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
- // Large immediate.
- Opc = ARM::t2MOVCCi32imm;
- }
-
- if (Opc) {
- SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
- }
-
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::
-SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
- if (!T)
- return 0;
-
- unsigned Opc = 0;
- unsigned TrueImm = T->getZExtValue();
- bool isSoImm = is_so_imm(TrueImm);
- if (isSoImm) {
- Opc = ARM::MOVCCi;
- } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
- Opc = ARM::MOVCCi16;
- } else if (is_so_imm_not(TrueImm)) {
- TrueImm = ~TrueImm;
- Opc = ARM::MVNCCi;
- } else if (TrueVal.getNode()->hasOneUse() &&
- (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
- // Large immediate.
- Opc = ARM::MOVCCi32imm;
- }
-
- if (Opc) {
- SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
- }
-
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
- EVT VT = N->getValueType(0);
- SDValue FalseVal = N->getOperand(0);
- SDValue TrueVal = N->getOperand(1);
- SDValue CC = N->getOperand(2);
- SDValue CCR = N->getOperand(3);
- SDValue InFlag = N->getOperand(4);
- assert(CC.getOpcode() == ISD::Constant);
- assert(CCR.getOpcode() == ISD::Register);
- ARMCC::CondCodes CCVal =
- (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
-
- if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
- // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
- // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
- // Pattern complexity = 18 cost = 1 size = 0
- if (Subtarget->isThumb()) {
- SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- } else {
- SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- }
-
- // Pattern: (ARMcmov:i32 GPR:i32:$false,
- // (imm:i32)<<P:Pred_so_imm>>:$true,
- // (imm:i32):$cc)
- // Emits: (MOVCCi:i32 GPR:i32:$false,
- // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
- // Pattern complexity = 10 cost = 1 size = 0
- if (Subtarget->isThumb()) {
- SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- } else {
- SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- }
- }
-
- // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Pattern complexity = 6 cost = 1 size = 0
- //
- // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Pattern complexity = 6 cost = 11 size = 0
- //
- // Also VMOVScc and VMOVDcc.
- SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
- unsigned Opc = 0;
- switch (VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Illegal conditional move type!");
- case MVT::i32:
- Opc = Subtarget->isThumb()
- ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
- : ARM::MOVCCr;
- break;
- case MVT::f32:
- Opc = ARM::VMOVScc;
- break;
- case MVT::f64:
- Opc = ARM::VMOVDcc;
- break;
- }
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
-}
-
/// Target-specific DAG combining for ISD::XOR.
/// Target-independent combining lowers SELECT_CC nodes of the form
/// select_cc setg[ge] X, 0, X, -X
@@ -2524,30 +2361,45 @@ SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
}
-SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
+SDNode *ARMDAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
+ unsigned Op16,unsigned Op32,
+ unsigned Op64) {
+ // Mostly direct translation to the given operations, except that we preserve
+ // the AtomicOrdering for use later on.
+ AtomicSDNode *AN = cast<AtomicSDNode>(Node);
+ EVT VT = AN->getMemoryVT();
+
+ unsigned Op;
+ SDVTList VTs = CurDAG->getVTList(AN->getValueType(0), MVT::Other);
+ if (VT == MVT::i8)
+ Op = Op8;
+ else if (VT == MVT::i16)
+ Op = Op16;
+ else if (VT == MVT::i32)
+ Op = Op32;
+ else if (VT == MVT::i64) {
+ Op = Op64;
+ VTs = CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other);
+ } else
+ llvm_unreachable("Unexpected atomic operation");
+
SmallVector<SDValue, 6> Ops;
- Ops.push_back(Node->getOperand(1)); // Ptr
- Ops.push_back(Node->getOperand(2)); // Low part of Val1
- Ops.push_back(Node->getOperand(3)); // High part of Val1
- if (Opc == ARM::ATOMCMPXCHG6432) {
- Ops.push_back(Node->getOperand(4)); // Low part of Val2
- Ops.push_back(Node->getOperand(5)); // High part of Val2
- }
- Ops.push_back(Node->getOperand(0)); // Chain
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
- SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
- MVT::i32, MVT::i32, MVT::Other,
- Ops);
- cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
- return ResNode;
+ for (unsigned i = 1; i < AN->getNumOperands(); ++i)
+ Ops.push_back(AN->getOperand(i));
+
+ Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
+ Ops.push_back(AN->getOperand(0)); // Chain moves to the end
+
+ return CurDAG->SelectNodeTo(Node, Op, VTs, &Ops[0], Ops.size());
}
SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
- if (N->isMachineOpcode())
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
return NULL; // Already selected.
+ }
switch (N->getOpcode()) {
default: break;
@@ -2587,7 +2439,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue CPIdx =
CurDAG->getTargetConstantPool(ConstantInt::get(
Type::getInt32Ty(*CurDAG->getContext()), Val),
- TLI.getPointerTy());
+ getTargetLowering()->getPointerTy());
SDNode *ResNode;
if (Subtarget->isThumb1Only()) {
@@ -2617,7 +2469,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ISD::FrameIndex: {
// Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ SDValue TFI = CurDAG->getTargetFrameIndex(FI,
+ getTargetLowering()->getPointerTy());
if (Subtarget->isThumb1Only()) {
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
@@ -2838,8 +2691,6 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue(Chain.getNode(), Chain.getResNo()));
return NULL;
}
- case ARMISD::CMOV:
- return SelectCMOVOp(N);
case ARMISD::VZIP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
@@ -3121,7 +2972,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case Intrinsic::arm_ldrexd: {
SDValue MemAddr = N->getOperand(2);
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue Chain = N->getOperand(0);
bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
@@ -3179,7 +3030,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case Intrinsic::arm_strexd: {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue Chain = N->getOperand(0);
SDValue Val0 = N->getOperand(2);
SDValue Val1 = N->getOperand(3);
@@ -3383,7 +3234,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::VTBL1: {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
SmallVector<SDValue, 6> Ops;
@@ -3394,7 +3245,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
}
case ARMISD::VTBL2: {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
// Form a REG_SEQUENCE to force register allocation.
@@ -3413,31 +3264,90 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ISD::CONCAT_VECTORS:
return SelectConcatVector(N);
- case ARMISD::ATOMOR64_DAG:
- return SelectAtomic64(N, ARM::ATOMOR6432);
- case ARMISD::ATOMXOR64_DAG:
- return SelectAtomic64(N, ARM::ATOMXOR6432);
- case ARMISD::ATOMADD64_DAG:
- return SelectAtomic64(N, ARM::ATOMADD6432);
- case ARMISD::ATOMSUB64_DAG:
- return SelectAtomic64(N, ARM::ATOMSUB6432);
- case ARMISD::ATOMNAND64_DAG:
- return SelectAtomic64(N, ARM::ATOMNAND6432);
- case ARMISD::ATOMAND64_DAG:
- return SelectAtomic64(N, ARM::ATOMAND6432);
- case ARMISD::ATOMSWAP64_DAG:
- return SelectAtomic64(N, ARM::ATOMSWAP6432);
- case ARMISD::ATOMCMPXCHG64_DAG:
- return SelectAtomic64(N, ARM::ATOMCMPXCHG6432);
-
- case ARMISD::ATOMMIN64_DAG:
- return SelectAtomic64(N, ARM::ATOMMIN6432);
- case ARMISD::ATOMUMIN64_DAG:
- return SelectAtomic64(N, ARM::ATOMUMIN6432);
- case ARMISD::ATOMMAX64_DAG:
- return SelectAtomic64(N, ARM::ATOMMAX6432);
- case ARMISD::ATOMUMAX64_DAG:
- return SelectAtomic64(N, ARM::ATOMUMAX6432);
+ case ISD::ATOMIC_LOAD:
+ if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64)
+ return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_LOAD_I64);
+ else
+ break;
+
+ case ISD::ATOMIC_STORE:
+ if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64)
+ return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_STORE_I64);
+ else
+ break;
+
+ case ISD::ATOMIC_LOAD_ADD:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_ADD_I8,
+ ARM::ATOMIC_LOAD_ADD_I16,
+ ARM::ATOMIC_LOAD_ADD_I32,
+ ARM::ATOMIC_LOAD_ADD_I64);
+ case ISD::ATOMIC_LOAD_SUB:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_SUB_I8,
+ ARM::ATOMIC_LOAD_SUB_I16,
+ ARM::ATOMIC_LOAD_SUB_I32,
+ ARM::ATOMIC_LOAD_SUB_I64);
+ case ISD::ATOMIC_LOAD_AND:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_AND_I8,
+ ARM::ATOMIC_LOAD_AND_I16,
+ ARM::ATOMIC_LOAD_AND_I32,
+ ARM::ATOMIC_LOAD_AND_I64);
+ case ISD::ATOMIC_LOAD_OR:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_OR_I8,
+ ARM::ATOMIC_LOAD_OR_I16,
+ ARM::ATOMIC_LOAD_OR_I32,
+ ARM::ATOMIC_LOAD_OR_I64);
+ case ISD::ATOMIC_LOAD_XOR:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_XOR_I8,
+ ARM::ATOMIC_LOAD_XOR_I16,
+ ARM::ATOMIC_LOAD_XOR_I32,
+ ARM::ATOMIC_LOAD_XOR_I64);
+ case ISD::ATOMIC_LOAD_NAND:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_NAND_I8,
+ ARM::ATOMIC_LOAD_NAND_I16,
+ ARM::ATOMIC_LOAD_NAND_I32,
+ ARM::ATOMIC_LOAD_NAND_I64);
+ case ISD::ATOMIC_LOAD_MIN:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_MIN_I8,
+ ARM::ATOMIC_LOAD_MIN_I16,
+ ARM::ATOMIC_LOAD_MIN_I32,
+ ARM::ATOMIC_LOAD_MIN_I64);
+ case ISD::ATOMIC_LOAD_MAX:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_MAX_I8,
+ ARM::ATOMIC_LOAD_MAX_I16,
+ ARM::ATOMIC_LOAD_MAX_I32,
+ ARM::ATOMIC_LOAD_MAX_I64);
+ case ISD::ATOMIC_LOAD_UMIN:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_UMIN_I8,
+ ARM::ATOMIC_LOAD_UMIN_I16,
+ ARM::ATOMIC_LOAD_UMIN_I32,
+ ARM::ATOMIC_LOAD_UMIN_I64);
+ case ISD::ATOMIC_LOAD_UMAX:
+ return SelectAtomic(N,
+ ARM::ATOMIC_LOAD_UMAX_I8,
+ ARM::ATOMIC_LOAD_UMAX_I16,
+ ARM::ATOMIC_LOAD_UMAX_I32,
+ ARM::ATOMIC_LOAD_UMAX_I64);
+ case ISD::ATOMIC_SWAP:
+ return SelectAtomic(N,
+ ARM::ATOMIC_SWAP_I8,
+ ARM::ATOMIC_SWAP_I16,
+ ARM::ATOMIC_SWAP_I32,
+ ARM::ATOMIC_SWAP_I64);
+ case ISD::ATOMIC_CMP_SWAP:
+ return SelectAtomic(N,
+ ARM::ATOMIC_CMP_SWAP_I8,
+ ARM::ATOMIC_CMP_SWAP_I16,
+ ARM::ATOMIC_CMP_SWAP_I32,
+ ARM::ATOMIC_CMP_SWAP_I64);
}
return SelectCode(N);
@@ -3449,24 +3359,20 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
bool Changed = false;
unsigned NumOps = N->getNumOperands();
- ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(
- N->getOperand(InlineAsm::Op_AsmString));
- StringRef AsmString = StringRef(S->getSymbol());
-
// Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
// However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
// (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
// respectively. Since there is no constraint to explicitly specify a
- // reg pair, we search %H operand inside the asm string. If it is found, the
- // transformation below enforces a GPRPair reg class for "%r" for 64-bit data.
- if (AsmString.find(":H}") == StringRef::npos)
- return NULL;
+ // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
+ // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
+ // them into a GPRPair.
- DebugLoc dl = N->getDebugLoc();
- SDValue Glue = N->getOperand(NumOps-1);
+ SDLoc dl(N);
+ SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0);
+ SmallVector<bool, 8> OpChanged;
// Glue node will be appended late.
- for(unsigned i = 0; i < NumOps -1; ++i) {
+ for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
SDValue op = N->getOperand(i);
AsmNodeOperands.push_back(op);
@@ -3480,17 +3386,38 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
else
continue;
+ // Immediate operands to inline asm in the SelectionDAG are modeled with
+ // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
+ // the second is a constant with the value of the immediate. If we get here
+ // and we have a Kind_Imm, skip the next operand, and continue.
+ if (Kind == InlineAsm::Kind_Imm) {
+ SDValue op = N->getOperand(++i);
+ AsmNodeOperands.push_back(op);
+ continue;
+ }
+
+ unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
+ if (NumRegs)
+ OpChanged.push_back(false);
+
+ unsigned DefIdx = 0;
+ bool IsTiedToChangedOp = false;
+ // If it's a use that is tied with a previous def, it has no
+ // reg class constraint.
+ if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
+ IsTiedToChangedOp = OpChanged[DefIdx];
+
if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
&& Kind != InlineAsm::Kind_RegDefEarlyClobber)
continue;
- unsigned RegNum = InlineAsm::getNumOperandRegisters(Flag);
unsigned RC;
bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
- if (!HasRC || RC != ARM::GPRRegClassID || RegNum != 2)
+ if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
+ || NumRegs != 2)
continue;
- assert((i+2 < NumOps-1) && "Invalid number of operands in inline asm");
+ assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
SDValue V0 = N->getOperand(i+1);
SDValue V1 = N->getOperand(i+2);
unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
@@ -3551,8 +3478,12 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
Changed = true;
if(PairedReg.getNode()) {
+ OpChanged[OpChanged.size() -1 ] = true;
Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
- Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
+ if (IsTiedToChangedOp)
+ Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
+ else
+ Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
// Replace the current flag.
AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
Flag, MVT::i32);
@@ -3563,11 +3494,12 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
}
}
- AsmNodeOperands.push_back(Glue);
+ if (Glue.getNode())
+ AsmNodeOperands.push_back(Glue);
if (!Changed)
return NULL;
- SDValue New = CurDAG->getNode(ISD::INLINEASM, N->getDebugLoc(),
+ SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0],
AsmNodeOperands.size());
New->setNodeId(-1);
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index e49cfc49854a..76a0a831f695 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -48,6 +48,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
+#include <utility>
using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
@@ -74,7 +75,7 @@ namespace {
class ARMCCState : public CCState {
public:
ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
- const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
+ const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
LLVMContext &C, ParmContext PC)
: CCState(CC, isVarArg, MF, TM, locs, C) {
assert(((PC == Call) || (PC == Prologue)) &&
@@ -174,9 +175,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget->isTargetIOS()) {
// Uses VFP for Thumb libfuncs if available.
- if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
+ if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
+ Subtarget->hasARMOps()) {
// Single-precision floating-point arithmetic.
setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
@@ -421,7 +423,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
// Use divmod compiler-rt calls for iOS 5.0 and later.
- if (Subtarget->getTargetTriple().getOS() == Triple::IOS &&
+ if (Subtarget->getTargetTriple().isiOS() &&
!Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
@@ -452,6 +454,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
if (Subtarget->hasNEON()) {
addDRTypeForNEON(MVT::v2f32);
@@ -564,16 +567,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand);
- // Custom expand long extensions to vectors.
- setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
- setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
- setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
- setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
-
// NEON does not have single instruction CTPOP for vectors with element
// types wider than 8-bits. However, custom lowering can leverage the
// v8i8/v16i8 vcnt instruction.
@@ -681,6 +674,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand);
setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand);
+ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
+
// Only ARMv6 has BSWAP.
if (!Subtarget->hasV6Ops())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
@@ -691,10 +686,36 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
}
+
+ // FIXME: Also set divmod for SREM on EABI
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ // Register based DivRem for AEABI (RTABI 4.2)
+ if (Subtarget->isTargetAEABI()) {
+ setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod");
+ setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
+ setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod");
+ setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod");
+ setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod");
+ setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod");
+ setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod");
+ setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod");
+
+ setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
+
+ setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
+ } else {
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ }
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
@@ -715,8 +736,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (!Subtarget->isTargetDarwin()) {
// Non-Darwin platforms may return values in these registers via the
// personality function.
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
setExceptionPointerRegister(ARM::R0);
setExceptionSelectorRegister(ARM::R1);
}
@@ -724,12 +743,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
// ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
// the default expansion.
- // FIXME: This should be checking for v6k, not just v6.
- if (Subtarget->hasDataBarrier() ||
- (Subtarget->hasV6Ops() && !Subtarget->isThumb())) {
- // membarrier needs custom lowering; the rest are legal and handled
- // normally.
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+ if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
+ // ATOMIC_FENCE needs custom lowering; the other 32-bit ones are legal and
+ // handled normally.
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
// Custom lowering for 64-bit ops
setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
@@ -742,11 +759,20 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom);
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
- // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
- setInsertFencesForAtomic(true);
+ // On v8, we have particularly efficient implementations of atomic fences
+ // if they can be combined with nearby atomic loads and stores.
+ if (!Subtarget->hasV8Ops()) {
+ // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
+ setInsertFencesForAtomic(true);
+ }
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
} else {
+ // If there's anything we can use as a barrier, go through custom lowering
+ // for ATOMIC_FENCE.
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other,
+ Subtarget->hasAnyDataBarrier() ? Custom : Expand);
+
// Set them all for expansion, which will force libcalls.
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
@@ -843,6 +869,18 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand);
}
}
+
+ // Combine sin / cos into one node or libcall if possible.
+ if (Subtarget->hasSinCos()) {
+ setLibcallName(RTLIB::SINCOS_F32, "sincosf");
+ setLibcallName(RTLIB::SINCOS_F64, "sincos");
+ if (Subtarget->getTargetTriple().getOS() == Triple::IOS) {
+ // For iOS, we don't want to the normal expansion of a libcall to
+ // sincos. We want to issue a libcall to __sincos_stret.
+ setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
+ setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
+ }
+ }
// We have target-specific dag combine patterns for the following nodes:
// ARMISD::VMOVRRD - No need to call setTargetDAGCombine
@@ -882,6 +920,44 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
}
+static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
+ bool isThumb2, unsigned &LdrOpc,
+ unsigned &StrOpc) {
+ static const unsigned LoadBares[4][2] = {{ARM::LDREXB, ARM::t2LDREXB},
+ {ARM::LDREXH, ARM::t2LDREXH},
+ {ARM::LDREX, ARM::t2LDREX},
+ {ARM::LDREXD, ARM::t2LDREXD}};
+ static const unsigned LoadAcqs[4][2] = {{ARM::LDAEXB, ARM::t2LDAEXB},
+ {ARM::LDAEXH, ARM::t2LDAEXH},
+ {ARM::LDAEX, ARM::t2LDAEX},
+ {ARM::LDAEXD, ARM::t2LDAEXD}};
+ static const unsigned StoreBares[4][2] = {{ARM::STREXB, ARM::t2STREXB},
+ {ARM::STREXH, ARM::t2STREXH},
+ {ARM::STREX, ARM::t2STREX},
+ {ARM::STREXD, ARM::t2STREXD}};
+ static const unsigned StoreRels[4][2] = {{ARM::STLEXB, ARM::t2STLEXB},
+ {ARM::STLEXH, ARM::t2STLEXH},
+ {ARM::STLEX, ARM::t2STLEX},
+ {ARM::STLEXD, ARM::t2STLEXD}};
+
+ const unsigned (*LoadOps)[2], (*StoreOps)[2];
+ if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
+ LoadOps = LoadAcqs;
+ else
+ LoadOps = LoadBares;
+
+ if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
+ StoreOps = StoreRels;
+ else
+ StoreOps = StoreBares;
+
+ assert(isPowerOf2_32(Size) && Size <= 8 &&
+ "unsupported size for atomic binary op!");
+
+ LdrOpc = LoadOps[Log2_32(Size)][isThumb2];
+ StrOpc = StoreOps[Log2_32(Size)][isThumb2];
+}
+
// FIXME: It might make sense to define the representative register class as the
// nearest super-register that has a non-null superset. For example, DPR_VFP2 is
// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
@@ -944,6 +1020,7 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::BR_JT: return "ARMISD::BR_JT";
case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
+ case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG";
case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
case ARMISD::CMP: return "ARMISD::CMP";
case ARMISD::CMN: return "ARMISD::CMN";
@@ -983,7 +1060,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
- case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER";
case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
@@ -1042,6 +1118,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
case ARMISD::FMAX: return "ARMISD::FMAX";
case ARMISD::FMIN: return "ARMISD::FMIN";
+ case ARMISD::VMAXNM: return "ARMISD::VMAX";
+ case ARMISD::VMINNM: return "ARMISD::VMIN";
case ARMISD::BFI: return "ARMISD::BFI";
case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
case ARMISD::VBICIMM: return "ARMISD::VBICIMM";
@@ -1069,7 +1147,7 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
}
}
-EVT ARMTargetLowering::getSetCCResultType(EVT VT) const {
+EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector()) return getPointerTy();
return VT.changeVectorElementTypeToInteger();
}
@@ -1233,7 +1311,7 @@ SDValue
ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
bool isThisReturn, SDValue ThisVal) const {
@@ -1314,7 +1392,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SDValue
ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
SDValue StackPtr, SDValue Arg,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
@@ -1325,12 +1403,12 @@ ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
false, false, 0);
}
-void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
+void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
- SmallVector<SDValue, 8> &MemOpChains,
+ SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const {
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
@@ -1357,10 +1435,10 @@ SDValue
ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -1406,7 +1484,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
if (!isSibCall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ dl);
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
@@ -1496,7 +1575,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
MachinePointerInfo(),
- false, false, false, 0);
+ false, false, false,
+ DAG.InferPtrAlignment(AddArg));
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(j, Load));
}
@@ -1705,17 +1785,26 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const uint32_t *Mask;
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
- const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI);
- if (isThisReturn)
- // For 'this' returns, use the R0-preserving mask
- Mask = ARI->getThisReturnPreservedMask(CallConv);
- else
- Mask = ARI->getCallPreservedMask(CallConv);
+ if (!isTailCall) {
+ const uint32_t *Mask;
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI);
+ if (isThisReturn) {
+ // For 'this' returns, use the R0-preserving mask if applicable
+ Mask = ARI->getThisReturnPreservedMask(CallConv);
+ if (!Mask) {
+ // Set isThisReturn to false if the calling convention is not one that
+ // allows 'returned' to be modeled in this way, so LowerCallResult does
+ // not try to pass 'this' straight through
+ isThisReturn = false;
+ Mask = ARI->getCallPreservedMask(CallConv);
+ }
+ } else
+ Mask = ARI->getCallPreservedMask(CallConv);
- assert(Mask && "Missing call preserved mask for calling convention");
- Ops.push_back(DAG.getRegisterMask(Mask));
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+ }
if (InFlag.getNode())
Ops.push_back(InFlag);
@@ -1729,7 +1818,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
if (!Ins.empty())
InFlag = Chain.getValue(1);
@@ -1795,7 +1884,7 @@ ARMTargetLowering::HandleByVal(
// else parameter would be splitted between registers and stack,
// end register would be r4 in this case.
unsigned ByValRegBegin = reg;
- unsigned ByValRegEnd = (size < excess) ? reg + size/4 : ARM::R4;
+ unsigned ByValRegEnd = (size < excess) ? reg + size/4 : (unsigned)ARM::R4;
State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
// Note, first register is allocated in the beginning of function already,
// allocate remained amount of registers we need.
@@ -1886,6 +1975,12 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (isVarArg && !Outs.empty())
return false;
+ // Exception-handling functions need a special set of instructions to indicate
+ // a return to the hardware. Tail-calling another function would probably
+ // break this.
+ if (CallerF->hasFnAttribute("interrupt"))
+ return false;
+
// Also avoid sibcall optimization if either caller or callee uses struct
// return semantics.
if (isCalleeStructRet || isCallerStructRet)
@@ -2014,12 +2109,45 @@ ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
isVarArg));
}
+static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
+ SDLoc DL, SelectionDAG &DAG) {
+ const MachineFunction &MF = DAG.getMachineFunction();
+ const Function *F = MF.getFunction();
+
+ StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
+
+ // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
+ // version of the "preferred return address". These offsets affect the return
+ // instruction if this is a return from PL1 without hypervisor extensions.
+ // IRQ/FIQ: +4 "subs pc, lr, #4"
+ // SWI: 0 "subs pc, lr, #0"
+ // ABORT: +4 "subs pc, lr, #4"
+ // UNDEF: +4/+2 "subs pc, lr, #0"
+ // UNDEF varies depending on where the exception came from ARM or Thumb
+ // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
+
+ int64_t LROffset;
+ if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
+ IntKind == "ABORT")
+ LROffset = 4;
+ else if (IntKind == "SWI" || IntKind == "UNDEF")
+ LROffset = 0;
+ else
+ report_fatal_error("Unsupported interrupt attribute. If present, value "
+ "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
+
+ RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false));
+
+ return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other,
+ RetOps.data(), RetOps.size());
+}
+
SDValue
ARMTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location.
SmallVector<CCValAssign, 16> RVLocs;
@@ -2099,6 +2227,19 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
+ // CPUs which aren't M-class use a special sequence to return from
+ // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
+ // though we use "subs pc, lr, #N").
+ //
+ // M-class CPUs actually use a normal return sequence with a special
+ // (hardware-provided) value in LR, so the normal code path works.
+ if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
+ !Subtarget->isMClass()) {
+ if (Subtarget->isThumb1Only())
+ report_fatal_error("interrupt attribute is not supported in Thumb1");
+ return LowerInterruptReturn(RetOps, dl, DAG);
+ }
+
return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other,
RetOps.data(), RetOps.size());
}
@@ -2147,7 +2288,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
Copy = *Copy->use_begin();
if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
return false;
- Chain = Copy->getOperand(0);
+ TCChain = Copy->getOperand(0);
} else {
return false;
}
@@ -2155,7 +2296,8 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
bool HasRet = false;
for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
UI != UE; ++UI) {
- if (UI->getOpcode() != ARMISD::RET_FLAG)
+ if (UI->getOpcode() != ARMISD::RET_FLAG &&
+ UI->getOpcode() != ARMISD::INTRET_FLAG)
return false;
HasRet = true;
}
@@ -2186,7 +2328,7 @@ bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
EVT PtrVT = Op.getValueType();
// FIXME there is no actual debug info here
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
SDValue Res;
if (CP->isMachineConstantPoolEntry())
@@ -2207,7 +2349,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
unsigned ARMPCLabelIndex = 0;
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT PtrVT = getPointerTy();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
@@ -2236,7 +2378,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
SDValue
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG) const {
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
EVT PtrVT = getPointerTy();
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
MachineFunction &MF = DAG.getMachineFunction();
@@ -2279,7 +2421,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
SelectionDAG &DAG,
TLSModel::Model model) const {
const GlobalValue *GV = GA->getGlobal();
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
SDValue Offset;
SDValue Chain = DAG.getEntryNode();
EVT PtrVT = getPointerTy();
@@ -2349,7 +2491,7 @@ ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
@@ -2392,7 +2534,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
@@ -2457,7 +2599,7 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
ARMConstantPoolValue *CPV =
ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
@@ -2473,7 +2615,7 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Val = DAG.getConstant(0, MVT::i32);
return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
@@ -2482,7 +2624,7 @@ ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
SDValue
ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
Op.getOperand(1), DAG.getConstant(0, MVT::i32));
}
@@ -2491,7 +2633,7 @@ SDValue
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) const {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
case Intrinsic::arm_thread_pointer: {
@@ -2527,7 +2669,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
case Intrinsic::arm_neon_vmullu: {
unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
? ARMISD::VMULLs : ARMISD::VMULLu;
- return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(),
+ return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
}
}
@@ -2536,19 +2678,33 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) {
// FIXME: handle "fence singlethread" more efficiently.
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (!Subtarget->hasDataBarrier()) {
// Some ARMv6 cpus can support data barriers with an mcr instruction.
// Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
// here.
assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
- "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
+ "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
DAG.getConstant(0, MVT::i32));
}
- return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(ARM_MB::ISH, MVT::i32));
+ ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
+ AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
+ unsigned Domain = ARM_MB::ISH;
+ if (Subtarget->isMClass()) {
+ // Only a full system barrier exists in the M-class architectures.
+ Domain = ARM_MB::SY;
+ } else if (Subtarget->isSwift() && Ord == Release) {
+ // Swift happens to implement ISHST barriers in a way that's compatible with
+ // Release semantics but weaker than ISH so we'd be fools not to use
+ // it. Beware: other processors probably don't!
+ Domain = ARM_MB::ISHST;
+ }
+
+ return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(Intrinsic::arm_dmb, MVT::i32),
+ DAG.getConstant(Domain, MVT::i32));
}
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
@@ -2559,7 +2715,7 @@ static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
// Just preserve the chain.
return Op.getOperand(0);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
if (!isRead &&
(!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
@@ -2584,7 +2740,7 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
@@ -2595,7 +2751,7 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
SDValue
ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
- DebugLoc dl) const {
+ SDLoc dl) const {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -2630,6 +2786,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
void
ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
unsigned InRegsParamRecordIdx,
+ unsigned ArgSize,
unsigned &ArgRegsSize,
unsigned &ArgRegsSaveSize)
const {
@@ -2648,7 +2805,29 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
ArgRegsSize = NumGPRs * 4;
- ArgRegsSaveSize = (ArgRegsSize + Align - 1) & ~(Align - 1);
+
+ // If parameter is split between stack and GPRs...
+ if (NumGPRs && Align == 8 &&
+ (ArgRegsSize < ArgSize ||
+ InRegsParamRecordIdx >= CCInfo.getInRegsParamsCount())) {
+ // Add padding for part of param recovered from GPRs, so
+ // its last byte must be at address K*8 - 1.
+ // We need to do it, since remained (stack) part of parameter has
+ // stack alignment, and we need to "attach" "GPRs head" without gaps
+ // to it:
+ // Stack:
+ // |---- 8 bytes block ----| |---- 8 bytes block ----| |---- 8 bytes...
+ // [ [padding] [GPRs head] ] [ Tail passed via stack ....
+ //
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ unsigned Padding =
+ ((ArgRegsSize + AFI->getArgRegsSaveSize() + Align - 1) & ~(Align-1)) -
+ (ArgRegsSize + AFI->getArgRegsSaveSize());
+ ArgRegsSaveSize = ArgRegsSize + Padding;
+ } else
+ // We don't need to extend regs save size for byval parameters if they
+ // are passed via GPRs only.
+ ArgRegsSaveSize = ArgRegsSize;
}
// The remaining GPRs hold either the beginning of variable-argument
@@ -2661,11 +2840,12 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
// Return: The frame index registers were stored into.
int
ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc dl, SDValue &Chain,
+ SDLoc dl, SDValue &Chain,
const Value *OrigArg,
unsigned InRegsParamRecordIdx,
unsigned OffsetFromOrigArg,
unsigned ArgOffset,
+ unsigned ArgSize,
bool ForceMutable) const {
// Currently, two use-cases possible:
@@ -2690,12 +2870,13 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
lastRegToSaveIndex = REnd - ARM::R0;
} else {
firstRegToSaveIndex = CCInfo.getFirstUnallocated
- (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
+ (GPRArgRegs, array_lengthof(GPRArgRegs));
lastRegToSaveIndex = 4;
}
unsigned ArgRegsSize, ArgRegsSaveSize;
- computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgRegsSize, ArgRegsSaveSize);
+ computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgSize,
+ ArgRegsSize, ArgRegsSaveSize);
// Store any by-val regs to their spots on the stack so that they may be
// loaded by deferencing the result of formal parameter pointer or va_next.
@@ -2703,9 +2884,17 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
// was initialized, it can't be initialized again.
if (ArgRegsSaveSize) {
+ unsigned Padding = ArgRegsSaveSize - ArgRegsSize;
+
+ if (Padding) {
+ assert(AFI->getStoredByValParamsPadding() == 0 &&
+ "The only parameter may be padded.");
+ AFI->setStoredByValParamsPadding(Padding);
+ }
+
int FrameIndex = MFI->CreateFixedObject(
ArgRegsSaveSize,
- ArgOffset + ArgRegsSaveSize - ArgRegsSize,
+ Padding + ArgOffset,
false);
SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy());
@@ -2737,13 +2926,14 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
return FrameIndex;
} else
// This will point to the next argument passed via stack.
- return MFI->CreateFixedObject(4, ArgOffset, !ForceMutable);
+ return MFI->CreateFixedObject(
+ 4, AFI->getStoredByValParamsPadding() + ArgOffset, !ForceMutable);
}
// Setup stack frame, the va_list pointer will start from.
void
ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc dl, SDValue &Chain,
+ SDLoc dl, SDValue &Chain,
unsigned ArgOffset,
bool ForceMutable) const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -2756,7 +2946,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
// argument passed via stack.
int FrameIndex =
StoreByValRegs(CCInfo, DAG, dl, Chain, 0, CCInfo.getInRegsParamsCount(),
- 0, ArgOffset, ForceMutable);
+ 0, ArgOffset, 0, ForceMutable);
AFI->setVarArgsFrameIndex(FrameIndex);
}
@@ -2766,7 +2956,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -2896,12 +3086,15 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
CurByValIndex,
Ins[VA.getValNo()].PartOffset,
VA.getLocMemOffset(),
+ Flags.getByValSize(),
true /*force mutable frames*/);
InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy()));
CCInfo.nextInRegsParam();
} else {
+ unsigned FIOffset = VA.getLocMemOffset() +
+ AFI->getStoredByValParamsPadding();
int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
- VA.getLocMemOffset(), true);
+ FIOffset, true);
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
@@ -2943,7 +3136,7 @@ static bool isFloatingPointZero(SDValue Op) {
SDValue
ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG,
- DebugLoc dl) const {
+ SDLoc dl) const {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
unsigned C = RHSC->getZExtValue();
if (!isLegalICmpImmediate(C)) {
@@ -3001,7 +3194,7 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
SDValue
ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
- DebugLoc dl) const {
+ SDLoc dl) const {
SDValue Cmp;
if (!isFloatingPointZero(RHS))
Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
@@ -3015,7 +3208,7 @@ ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
SDValue
ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
unsigned Opc = Cmp.getOpcode();
- DebugLoc DL = Cmp.getDebugLoc();
+ SDLoc DL(Cmp);
if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
@@ -3035,7 +3228,7 @@ SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Cond = Op.getOperand(0);
SDValue SelectTrue = Op.getOperand(1);
SDValue SelectFalse = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Convert:
//
@@ -3083,6 +3276,61 @@ SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SelectTrue, SelectFalse, ISD::SETNE);
}
+static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) {
+ if (CC == ISD::SETNE)
+ return ISD::SETEQ;
+ return ISD::getSetCCSwappedOperands(CC);
+}
+
+static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
+ bool &swpCmpOps, bool &swpVselOps) {
+ // Start by selecting the GE condition code for opcodes that return true for
+ // 'equality'
+ if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
+ CC == ISD::SETULE)
+ CondCode = ARMCC::GE;
+
+ // and GT for opcodes that return false for 'equality'.
+ else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
+ CC == ISD::SETULT)
+ CondCode = ARMCC::GT;
+
+ // Since we are constrained to GE/GT, if the opcode contains 'less', we need
+ // to swap the compare operands.
+ if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
+ CC == ISD::SETULT)
+ swpCmpOps = true;
+
+ // Both GT and GE are ordered comparisons, and return false for 'unordered'.
+ // If we have an unordered opcode, we need to swap the operands to the VSEL
+ // instruction (effectively negating the condition).
+ //
+ // This also has the effect of swapping which one of 'less' or 'greater'
+ // returns true, so we also swap the compare operands. It also switches
+ // whether we return true for 'equality', so we compensate by picking the
+ // opposite condition code to our original choice.
+ if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
+ CC == ISD::SETUGT) {
+ swpCmpOps = !swpCmpOps;
+ swpVselOps = !swpVselOps;
+ CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
+ }
+
+ // 'ordered' is 'anything but unordered', so use the VS condition code and
+ // swap the VSEL operands.
+ if (CC == ISD::SETO) {
+ CondCode = ARMCC::VS;
+ swpVselOps = true;
+ }
+
+ // 'unordered or not equal' is 'anything but equal', so use the EQ condition
+ // code and swap the VSEL operands.
+ if (CC == ISD::SETUNE) {
+ CondCode = ARMCC::EQ;
+ swpVselOps = true;
+ }
+}
+
SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
@@ -3090,18 +3338,69 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDValue TrueVal = Op.getOperand(2);
SDValue FalseVal = Op.getOperand(3);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (LHS.getValueType() == MVT::i32) {
+ // Try to generate VSEL on ARMv8.
+ // The VSEL instruction can't use all the usual ARM condition
+ // codes: it only has two bits to select the condition code, so it's
+ // constrained to use only GE, GT, VS and EQ.
+ //
+ // To implement all the various ISD::SETXXX opcodes, we sometimes need to
+ // swap the operands of the previous compare instruction (effectively
+ // inverting the compare condition, swapping 'less' and 'greater') and
+ // sometimes need to swap the operands to the VSEL (which inverts the
+ // condition in the sense of firing whenever the previous condition didn't)
+ if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
+ TrueVal.getValueType() == MVT::f64)) {
+ ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
+ if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
+ CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
+ CC = getInverseCCForVSEL(CC);
+ std::swap(TrueVal, FalseVal);
+ }
+ }
+
SDValue ARMcc;
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
- return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp);
+ return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
+ Cmp);
}
ARMCC::CondCodes CondCode, CondCode2;
FPCCToARMCC(CC, CondCode, CondCode2);
+ // Try to generate VSEL on ARMv8.
+ if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
+ TrueVal.getValueType() == MVT::f64)) {
+ // We can select VMAXNM/VMINNM from a compare followed by a select with the
+ // same operands, as follows:
+ // c = fcmp [ogt, olt, ugt, ult] a, b
+ // select c, a, b
+ // We only do this in unsafe-fp-math, because signed zeros and NaNs are
+ // handled differently than the original code sequence.
+ if (getTargetMachine().Options.UnsafeFPMath && LHS == TrueVal &&
+ RHS == FalseVal) {
+ if (CC == ISD::SETOGT || CC == ISD::SETUGT)
+ return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
+ if (CC == ISD::SETOLT || CC == ISD::SETULT)
+ return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
+ }
+
+ bool swpCmpOps = false;
+ bool swpVselOps = false;
+ checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
+
+ if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
+ CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
+ if (swpCmpOps)
+ std::swap(LHS, RHS);
+ if (swpVselOps)
+ std::swap(TrueVal, FalseVal);
+ }
+ }
+
SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
@@ -3145,7 +3444,7 @@ static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
return DAG.getConstant(0, MVT::i32);
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
- return DAG.getLoad(MVT::i32, Op.getDebugLoc(),
+ return DAG.getLoad(MVT::i32, SDLoc(Op),
Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
Ld->isInvariant(), Ld->getAlignment());
@@ -3163,7 +3462,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
SDValue Ptr = Ld->getBasePtr();
- RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
+ RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op),
Ld->getChain(), Ptr,
Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
@@ -3171,9 +3470,9 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
EVT PtrType = Ptr.getValueType();
unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
- SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(),
+ SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op),
PtrType, Ptr, DAG.getConstant(4, PtrType));
- RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
+ RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op),
Ld->getChain(), NewPtr,
Ld->getPointerInfo().getWithOffset(4),
Ld->isVolatile(), Ld->isNonTemporal(),
@@ -3193,7 +3492,7 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(2);
SDValue RHS = Op.getOperand(3);
SDValue Dest = Op.getOperand(4);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
bool LHSSeenZero = false;
bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
@@ -3243,7 +3542,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(2);
SDValue RHS = Op.getOperand(3);
SDValue Dest = Op.getOperand(4);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (LHS.getValueType() == MVT::i32) {
SDValue ARMcc;
@@ -3284,7 +3583,7 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Table = Op.getOperand(1);
SDValue Index = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT PTy = getPointerTy();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
@@ -3320,7 +3619,7 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (Op.getValueType().getVectorElementType() == MVT::i32) {
if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
@@ -3342,7 +3641,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
if (VT.isVector())
return LowerVectorFP_TO_INT(Op, DAG);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Opc;
switch (Op.getOpcode()) {
@@ -3360,7 +3659,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
if (VT.getVectorElementType() == MVT::f32)
@@ -3396,7 +3695,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
if (VT.isVector())
return LowerVectorINT_TO_FP(Op, DAG);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Opc;
switch (Op.getOpcode()) {
@@ -3417,7 +3716,7 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
// Implement fcopysign with a fabs and a conditional fneg.
SDValue Tmp0 = Op.getOperand(0);
SDValue Tmp1 = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT VT = Op.getValueType();
EVT SrcVT = Tmp1.getValueType();
bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
@@ -3501,7 +3800,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
MFI->setReturnAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
@@ -3521,7 +3820,7 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
+ SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
? ARM::R7 : ARM::R11;
@@ -3533,47 +3832,6 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
return FrameAddr;
}
-/// Custom Expand long vector extensions, where size(DestVec) > 2*size(SrcVec),
-/// and size(DestVec) > 128-bits.
-/// This is achieved by doing the one extension from the SrcVec, splitting the
-/// result, extending these parts, and then concatenating these into the
-/// destination.
-static SDValue ExpandVectorExtension(SDNode *N, SelectionDAG &DAG) {
- SDValue Op = N->getOperand(0);
- EVT SrcVT = Op.getValueType();
- EVT DestVT = N->getValueType(0);
-
- assert(DestVT.getSizeInBits() > 128 &&
- "Custom sext/zext expansion needs >128-bit vector.");
- // If this is a normal length extension, use the default expansion.
- if (SrcVT.getSizeInBits()*4 != DestVT.getSizeInBits() &&
- SrcVT.getSizeInBits()*8 != DestVT.getSizeInBits())
- return SDValue();
-
- DebugLoc dl = N->getDebugLoc();
- unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits();
- unsigned DestEltSize = DestVT.getVectorElementType().getSizeInBits();
- unsigned NumElts = SrcVT.getVectorNumElements();
- LLVMContext &Ctx = *DAG.getContext();
- SDValue Mid, SplitLo, SplitHi, ExtLo, ExtHi;
-
- EVT MidVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2),
- NumElts);
- EVT SplitVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2),
- NumElts/2);
- EVT ExtVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, DestEltSize),
- NumElts/2);
-
- Mid = DAG.getNode(N->getOpcode(), dl, MidVT, Op);
- SplitLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid,
- DAG.getIntPtrConstant(0));
- SplitHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid,
- DAG.getIntPtrConstant(NumElts/2));
- ExtLo = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitLo);
- ExtHi = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitHi);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, ExtLo, ExtHi);
-}
-
/// ExpandBITCAST - If the target supports VFP, this function is called to
/// expand a bit convert where either the source or destination type is i64 to
/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
@@ -3581,7 +3839,7 @@ static SDValue ExpandVectorExtension(SDNode *N, SelectionDAG &DAG) {
/// vectors), since the legalizer won't know what to do with that.
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue Op = N->getOperand(0);
// This function is only supposed to be called for i64 types, either as the
@@ -3618,7 +3876,7 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
/// not support i64 elements, so sometimes the zero vectors will need to be
/// explicitly constructed. Regardless, use a canonical VMOV to create the
/// zero vector.
-static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
+static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
assert(VT.isVector() && "Expected a vector type");
// The canonical modified immediate encoding of a zero vector is....0!
SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
@@ -3634,7 +3892,7 @@ SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
EVT VT = Op.getValueType();
unsigned VTBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
@@ -3670,7 +3928,7 @@ SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
EVT VT = Op.getValueType();
unsigned VTBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
@@ -3703,7 +3961,7 @@ SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
// The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
// so that the shift + and get folded into a bitfield extract.
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
DAG.getConstant(Intrinsic::arm_get_fpscr,
MVT::i32));
@@ -3718,7 +3976,7 @@ SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) {
EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
if (!ST->hasV6T2Ops())
return SDValue();
@@ -3742,7 +4000,7 @@ static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
/// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits)
static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
@@ -3764,7 +4022,7 @@ static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
/// v4i16:Extracted = [k0 k1 k2 k3 ]
static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
if (VT.is64BitVector()) {
@@ -3799,7 +4057,7 @@ static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
///
static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
@@ -3838,7 +4096,7 @@ static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) {
EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
if (!VT.isVector())
return SDValue();
@@ -3873,7 +4131,7 @@ static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) {
EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
// We can get here for a node like i32 = ISD::SHL i32, i64
if (VT != MVT::i64)
@@ -3919,7 +4177,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
SDValue CC = Op.getOperand(2);
EVT VT = Op.getValueType();
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (Op.getOperand(1).getValueType().isFloatingPoint()) {
switch (SetCCOpcode) {
@@ -4177,18 +4435,26 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) const {
- if (!ST->useNEONForSinglePrecisionFP() || !ST->hasVFP3() || ST->hasD16())
+ if (!ST->hasVFP3())
return SDValue();
+ bool IsDouble = Op.getValueType() == MVT::f64;
ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
- assert(Op.getValueType() == MVT::f32 &&
- "ConstantFP custom lowering should only occur for f32.");
// Try splatting with a VMOV.f32...
APFloat FPVal = CFP->getValueAPF();
- int ImmVal = ARM_AM::getFP32Imm(FPVal);
+ int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
+
if (ImmVal != -1) {
- DebugLoc DL = Op.getDebugLoc();
+ if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
+ // We have code in place to select a valid ConstantFP already, no need to
+ // do any mangling.
+ return Op;
+ }
+
+ // It's a float and we are trying to use NEON operations where
+ // possible. Lower it to a splat followed by an extract.
+ SDLoc DL(Op);
SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32);
SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
NewVal);
@@ -4196,15 +4462,31 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(0, MVT::i32));
}
- // If that fails, try a VMOV.i32
+ // The rest of our options are NEON only, make sure that's allowed before
+ // proceeding..
+ if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
+ return SDValue();
+
EVT VMovVT;
- unsigned iVal = FPVal.bitcastToAPInt().getZExtValue();
- SDValue NewVal = isNEONModifiedImm(iVal, 0, 32, DAG, VMovVT, false,
- VMOVModImm);
+ uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
+
+ // It wouldn't really be worth bothering for doubles except for one very
+ // important value, which does happen to match: 0.0. So make sure we don't do
+ // anything stupid.
+ if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
+ return SDValue();
+
+ // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
+ SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
+ false, VMOVModImm);
if (NewVal != SDValue()) {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
NewVal);
+ if (IsDouble)
+ return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
+
+ // It's a float: cast and extract a vector element.
SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
VecConstant);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
@@ -4212,11 +4494,16 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
}
// Finally, try a VMVN.i32
- NewVal = isNEONModifiedImm(~iVal & 0xffffffff, 0, 32, DAG, VMovVT, false,
- VMVNModImm);
+ NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
+ false, VMVNModImm);
if (NewVal != SDValue()) {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
+
+ if (IsDouble)
+ return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
+
+ // It's a float: cast and extract a vector element.
SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
VecConstant);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
@@ -4475,7 +4762,7 @@ static bool isReverseMask(ArrayRef<int> M, EVT VT) {
// instruction, return an SDValue of such a constant (will become a MOV
// instruction). Otherwise return null.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
- const ARMSubtarget *ST, DebugLoc dl) {
+ const ARMSubtarget *ST, SDLoc dl) {
uint64_t Val;
if (!isa<ConstantSDNode>(N))
return SDValue();
@@ -4496,7 +4783,7 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) const {
BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT VT = Op.getValueType();
APInt SplatBits, SplatUndef;
@@ -4580,7 +4867,9 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (ValueCounts.size() == 0)
return DAG.getUNDEF(VT);
- if (isOnlyLowElement)
+ // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
+ // Keep going if we are hitting this case.
+ if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
@@ -4679,6 +4968,24 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
+ // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
+ // know the default expansion would otherwise fall back on something even
+ // worse. For a vector with one or two non-undef values, that's
+ // scalar_to_vector for the elements followed by a shuffle (provided the
+ // shuffle is valid for the target) and materialization element by element
+ // on the stack followed by a load for everything else.
+ if (!isConstant && !usesOnlyOneValue) {
+ SDValue Vec = DAG.getUNDEF(VT);
+ for (unsigned i = 0 ; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
+ }
+ return Vec;
+ }
+
return SDValue();
}
@@ -4686,7 +4993,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
// shuffle in combination with VEXTs.
SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT VT = Op.getValueType();
unsigned NumElts = VT.getVectorNumElements();
@@ -4875,7 +5182,7 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
- DebugLoc dl) {
+ SDLoc dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
@@ -4955,7 +5262,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
// Check to see if we can use the VTBL instruction.
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SmallVector<SDValue, 8> VTBLMask;
for (ArrayRef<int>::iterator
@@ -4974,7 +5281,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
SelectionDAG &DAG) {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue OpLHS = Op.getOperand(0);
EVT VT = OpLHS.getValueType();
@@ -4992,7 +5299,7 @@ static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT VT = Op.getValueType();
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
@@ -5156,7 +5463,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
SDValue Vec = Op.getOperand(0);
if (Op.getValueType() == MVT::i32 &&
Vec.getValueType().getVectorElementType().getSizeInBits() < 32) {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
}
@@ -5168,7 +5475,7 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
// two 64-bit vectors are concatenated to a 128-bit vector.
assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
"unexpected CONCAT_VECTORS");
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Val = DAG.getUNDEF(MVT::v2f64);
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -5291,7 +5598,7 @@ static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
// Must extend size to at least 64 bits to be used as an operand for VMULL.
EVT NewVT = getExtensionTo64Bits(OrigTy);
- return DAG.getNode(ExtOpcode, N->getDebugLoc(), NewVT, N);
+ return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
}
/// SkipLoadExtensionForVMULL - return a load of the original vector size that
@@ -5304,7 +5611,7 @@ static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
// The load already has the right type.
if (ExtendedTy == LD->getMemoryVT())
- return DAG.getLoad(LD->getMemoryVT(), LD->getDebugLoc(), LD->getChain(),
+ return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(),
LD->getAlignment());
@@ -5312,7 +5619,7 @@ static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
// We need to create a zextload/sextload. We cannot just create a load
// followed by a zext/zext node because LowerMUL is also run during normal
// operation legalization where we can't create illegal types.
- return DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(), ExtendedTy,
+ return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
LD->getMemoryVT(), LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment());
@@ -5341,7 +5648,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
- return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32,
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), MVT::v2i32,
BVN->getOperand(LowElt), BVN->getOperand(LowElt+2));
}
// Construct a new BUILD_VECTOR with elements truncated to half the size.
@@ -5358,7 +5665,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
// The values are implicitly truncated so sext vs. zext doesn't matter.
Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32));
}
- return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts);
}
@@ -5430,7 +5737,7 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
}
// Legalize to a VMULL instruction.
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Op0;
SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
if (!isMLA) {
@@ -5460,7 +5767,7 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
}
static SDValue
-LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) {
+LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) {
// Convert to float
// float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
// float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
@@ -5489,7 +5796,7 @@ LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) {
}
static SDValue
-LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) {
+LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) {
SDValue N2;
// Convert to float.
// float4 yf = vcvt_f32_s32(vmovl_s16(y));
@@ -5530,7 +5837,7 @@ static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
"unexpected type for custom-lowering ISD::SDIV");
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
SDValue N2, N3;
@@ -5565,7 +5872,7 @@ static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
"unexpected type for custom-lowering ISD::UDIV");
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
SDValue N2, N3;
@@ -5649,12 +5956,76 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
}
if (!ExtraOp)
- return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
+ return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
Op.getOperand(1));
- return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
+ return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
Op.getOperand(1), Op.getOperand(2));
}
+SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetDarwin());
+
+ // For iOS, we want to call an alternative entry point: __sincos_stret,
+ // return values are passed via sret.
+ SDLoc dl(Op);
+ SDValue Arg = Op.getOperand(0);
+ EVT ArgVT = Arg.getValueType();
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+
+ MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ // Pair of floats / doubles used to pass the result.
+ StructType *RetTy = StructType::get(ArgTy, ArgTy, NULL);
+
+ // Create stack object for sret.
+ const uint64_t ByteSize = TLI.getDataLayout()->getTypeAllocSize(RetTy);
+ const unsigned StackAlign = TLI.getDataLayout()->getPrefTypeAlignment(RetTy);
+ int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
+ SDValue SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy());
+
+ ArgListTy Args;
+ ArgListEntry Entry;
+
+ Entry.Node = SRet;
+ Entry.Ty = RetTy->getPointerTo();
+ Entry.isSExt = false;
+ Entry.isZExt = false;
+ Entry.isSRet = true;
+ Args.push_back(Entry);
+
+ Entry.Node = Arg;
+ Entry.Ty = ArgTy;
+ Entry.isSExt = false;
+ Entry.isZExt = false;
+ Args.push_back(Entry);
+
+ const char *LibcallName = (ArgVT == MVT::f64)
+ ? "__sincos_stret" : "__sincosf_stret";
+ SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy());
+
+ TargetLowering::
+ CallLoweringInfo CLI(DAG.getEntryNode(), Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false, 0,
+ CallingConv::C, /*isTaillCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed*/false,
+ Callee, Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
+
+ SDValue LoadSin = DAG.getLoad(ArgVT, dl, CallResult.second, SRet,
+ MachinePointerInfo(), false, false, false, 0);
+
+ // Address of cos field.
+ SDValue Add = DAG.getNode(ISD::ADD, dl, getPointerTy(), SRet,
+ DAG.getIntPtrConstant(ArgVT.getStoreSize()));
+ SDValue LoadCos = DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add,
+ MachinePointerInfo(), false, false, false, 0);
+
+ SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
+ return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
+ LoadSin.getValue(0), LoadCos.getValue(0));
+}
+
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
// Monotonic load/store is legal for all targets
if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
@@ -5665,40 +6036,73 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
return SDValue();
}
-
static void
ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results,
- SelectionDAG &DAG, unsigned NewOp) {
- DebugLoc dl = Node->getDebugLoc();
+ SelectionDAG &DAG) {
+ SDLoc dl(Node);
assert (Node->getValueType(0) == MVT::i64 &&
"Only know how to expand i64 atomics");
+ AtomicSDNode *AN = cast<AtomicSDNode>(Node);
SmallVector<SDValue, 6> Ops;
Ops.push_back(Node->getOperand(0)); // Chain
Ops.push_back(Node->getOperand(1)); // Ptr
- // Low part of Val1
- Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(2), DAG.getIntPtrConstant(0)));
- // High part of Val1
- Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(2), DAG.getIntPtrConstant(1)));
- if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) {
- // High part of Val1
+ for(unsigned i=2; i<Node->getNumOperands(); i++) {
+ // Low part
Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(3), DAG.getIntPtrConstant(0)));
- // High part of Val2
+ Node->getOperand(i), DAG.getIntPtrConstant(0)));
+ // High part
Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(3), DAG.getIntPtrConstant(1)));
+ Node->getOperand(i), DAG.getIntPtrConstant(1)));
}
SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
SDValue Result =
- DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64,
- cast<MemSDNode>(Node)->getMemOperand());
+ DAG.getAtomic(Node->getOpcode(), dl, MVT::i64, Tys, Ops.data(), Ops.size(),
+ cast<MemSDNode>(Node)->getMemOperand(), AN->getOrdering(),
+ AN->getSynchScope());
SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) };
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
Results.push_back(Result.getValue(2));
}
+static void ReplaceREADCYCLECOUNTER(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG,
+ const ARMSubtarget *Subtarget) {
+ SDLoc DL(N);
+ SDValue Cycles32, OutChain;
+
+ if (Subtarget->hasPerfMon()) {
+ // Under Power Management extensions, the cycle-count is:
+ // mrc p15, #0, <Rt>, c9, c13, #0
+ SDValue Ops[] = { N->getOperand(0), // Chain
+ DAG.getConstant(Intrinsic::arm_mrc, MVT::i32),
+ DAG.getConstant(15, MVT::i32),
+ DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(9, MVT::i32),
+ DAG.getConstant(13, MVT::i32),
+ DAG.getConstant(0, MVT::i32)
+ };
+
+ Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
+ DAG.getVTList(MVT::i32, MVT::Other), &Ops[0],
+ array_lengthof(Ops));
+ OutChain = Cycles32.getValue(1);
+ } else {
+ // Intrinsic is defined to return 0 on unsupported platforms. Technically
+ // there are older ARM CPUs that have implementation-specific ways of
+ // obtaining this information (FIXME!).
+ Cycles32 = DAG.getConstant(0, MVT::i32);
+ OutChain = DAG.getEntryNode();
+ }
+
+
+ SDValue Cycles64 = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
+ Cycles32, DAG.getConstant(0, MVT::i32));
+ Results.push_back(Cycles64);
+ Results.push_back(OutChain);
+}
+
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Don't know how to custom lower this!");
@@ -5753,6 +6157,9 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG);
+ case ISD::FSINCOS: return LowerFSINCOS(Op, DAG);
+ case ISD::SDIVREM:
+ case ISD::UDIVREM: return LowerDivRem(Op, DAG);
}
}
@@ -5768,49 +6175,28 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::BITCAST:
Res = ExpandBITCAST(N, DAG);
break;
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- Res = ExpandVectorExtension(N, DAG);
- break;
case ISD::SRL:
case ISD::SRA:
Res = Expand64BitShift(N, DAG, Subtarget);
break;
- case ISD::ATOMIC_LOAD_ADD:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG);
+ case ISD::READCYCLECOUNTER:
+ ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
return;
+ case ISD::ATOMIC_STORE:
+ case ISD::ATOMIC_LOAD:
+ case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_AND:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG);
- return;
case ISD::ATOMIC_LOAD_NAND:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG);
- return;
case ISD::ATOMIC_LOAD_OR:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG);
- return;
case ISD::ATOMIC_LOAD_SUB:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG);
- return;
case ISD::ATOMIC_LOAD_XOR:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG);
- return;
case ISD::ATOMIC_SWAP:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG);
- return;
case ISD::ATOMIC_CMP_SWAP:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG);
- return;
case ISD::ATOMIC_LOAD_MIN:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMMIN64_DAG);
- return;
case ISD::ATOMIC_LOAD_UMIN:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMUMIN64_DAG);
- return;
case ISD::ATOMIC_LOAD_MAX:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMMAX64_DAG);
- return;
case ISD::ATOMIC_LOAD_UMAX:
- ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMUMAX64_DAG);
+ ReplaceATOMIC_OP_64(N, Results, DAG);
return;
}
if (Res.getNode())
@@ -5830,6 +6216,7 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
unsigned oldval = MI->getOperand(2).getReg();
unsigned newval = MI->getOperand(3).getReg();
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
DebugLoc dl = MI->getDebugLoc();
bool isThumb2 = Subtarget->isThumb2();
@@ -5845,21 +6232,7 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
}
unsigned ldrOpc, strOpc;
- switch (Size) {
- default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
- case 1:
- ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
- strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
- break;
- case 2:
- ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
- strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
- break;
- case 4:
- ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
- strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
- break;
- }
+ getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc);
MachineFunction *MF = BB->getParent();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -5939,6 +6312,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned dest = MI->getOperand(0).getReg();
unsigned ptr = MI->getOperand(1).getReg();
unsigned incr = MI->getOperand(2).getReg();
+ AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
DebugLoc dl = MI->getDebugLoc();
bool isThumb2 = Subtarget->isThumb2();
@@ -5946,24 +6320,11 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
if (isThumb2) {
MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(incr, &ARM::rGPRRegClass);
}
unsigned ldrOpc, strOpc;
- switch (Size) {
- default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
- case 1:
- ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
- strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
- break;
- case 2:
- ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
- strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
- break;
- case 4:
- ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
- strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
- break;
- }
+ getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc);
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
@@ -6047,6 +6408,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
unsigned ptr = MI->getOperand(1).getReg();
unsigned incr = MI->getOperand(2).getReg();
unsigned oldval = dest;
+ AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
DebugLoc dl = MI->getDebugLoc();
bool isThumb2 = Subtarget->isThumb2();
@@ -6054,24 +6416,20 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
if (isThumb2) {
MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(incr, &ARM::rGPRRegClass);
}
unsigned ldrOpc, strOpc, extendOpc;
+ getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc);
switch (Size) {
- default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
+ default: llvm_unreachable("unsupported size for AtomicBinaryMinMax!");
case 1:
- ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
- strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
break;
case 2:
- ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
- strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
break;
case 4:
- ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
- strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
extendOpc = 0;
break;
}
@@ -6115,7 +6473,10 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
// Sign extend the value, if necessary.
if (signExtend && extendOpc) {
- oldval = MRI.createVirtualRegister(&ARM::GPRRegClass);
+ oldval = MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass
+ : &ARM::GPRnopcRegClass);
+ if (!isThumb2)
+ MRI.constrainRegClass(dest, &ARM::GPRnopcRegClass);
AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval)
.addReg(dest)
.addImm(0));
@@ -6153,7 +6514,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Op1, unsigned Op2,
bool NeedsCarry, bool IsCmpxchg,
bool IsMinMax, ARMCC::CondCodes CC) const {
- // This also handles ATOMIC_SWAP, indicated by Op1==0.
+ // This also handles ATOMIC_SWAP and ATOMIC_STORE, indicated by Op1==0.
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -6161,11 +6522,15 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
MachineFunction::iterator It = BB;
++It;
+ bool isStore = (MI->getOpcode() == ARM::ATOMIC_STORE_I64);
+ unsigned offset = (isStore ? -2 : 0);
unsigned destlo = MI->getOperand(0).getReg();
unsigned desthi = MI->getOperand(1).getReg();
- unsigned ptr = MI->getOperand(2).getReg();
- unsigned vallo = MI->getOperand(3).getReg();
- unsigned valhi = MI->getOperand(4).getReg();
+ unsigned ptr = MI->getOperand(offset+2).getReg();
+ unsigned vallo = MI->getOperand(offset+3).getReg();
+ unsigned valhi = MI->getOperand(offset+4).getReg();
+ unsigned OrdIdx = offset + (IsCmpxchg ? 7 : 5);
+ AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(OrdIdx).getImm());
DebugLoc dl = MI->getDebugLoc();
bool isThumb2 = Subtarget->isThumb2();
@@ -6174,8 +6539,13 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
MRI.constrainRegClass(destlo, &ARM::rGPRRegClass);
MRI.constrainRegClass(desthi, &ARM::rGPRRegClass);
MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(vallo, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(valhi, &ARM::rGPRRegClass);
}
+ unsigned ldrOpc, strOpc;
+ getExclusiveOperation(8, Ord, isThumb2, ldrOpc, strOpc);
+
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *contBB = 0, *cont2BB = 0;
if (IsCmpxchg || IsMinMax)
@@ -6215,21 +6585,23 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
// fallthrough --> exitMBB
BB = loopMBB;
- // Load
- if (isThumb2) {
- AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2LDREXD))
- .addReg(destlo, RegState::Define)
- .addReg(desthi, RegState::Define)
- .addReg(ptr));
- } else {
- unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
- AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDREXD))
- .addReg(GPRPair0, RegState::Define).addReg(ptr));
- // Copy r2/r3 into dest. (This copy will normally be coalesced.)
- BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo)
- .addReg(GPRPair0, 0, ARM::gsub_0);
- BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi)
- .addReg(GPRPair0, 0, ARM::gsub_1);
+ if (!isStore) {
+ // Load
+ if (isThumb2) {
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
+ .addReg(destlo, RegState::Define)
+ .addReg(desthi, RegState::Define)
+ .addReg(ptr));
+ } else {
+ unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
+ .addReg(GPRPair0, RegState::Define).addReg(ptr));
+ // Copy r2/r3 into dest. (This copy will normally be coalesced.)
+ BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo)
+ .addReg(GPRPair0, 0, ARM::gsub_0);
+ BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi)
+ .addReg(GPRPair0, 0, ARM::gsub_1);
+ }
}
unsigned StoreLo, StoreHi;
@@ -6281,7 +6653,9 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
// Store
if (isThumb2) {
- AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2STREXD), storesuccess)
+ MRI.constrainRegClass(StoreLo, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(StoreHi, &ARM::rGPRRegClass);
+ AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess)
.addReg(StoreLo).addReg(StoreHi).addReg(ptr));
} else {
// Marshal a pair...
@@ -6299,7 +6673,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
.addImm(ARM::gsub_1);
// ...and store it
- AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::STREXD), storesuccess)
+ AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess)
.addReg(StorePair).addReg(ptr));
}
// Cmp+jump
@@ -6320,6 +6694,51 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
return BB;
}
+MachineBasicBlock *
+ARMTargetLowering::EmitAtomicLoad64(MachineInstr *MI, MachineBasicBlock *BB) const {
+
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+
+ unsigned destlo = MI->getOperand(0).getReg();
+ unsigned desthi = MI->getOperand(1).getReg();
+ unsigned ptr = MI->getOperand(2).getReg();
+ AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
+ DebugLoc dl = MI->getDebugLoc();
+ bool isThumb2 = Subtarget->isThumb2();
+
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ if (isThumb2) {
+ MRI.constrainRegClass(destlo, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(desthi, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
+ }
+ unsigned ldrOpc, strOpc;
+ getExclusiveOperation(8, Ord, isThumb2, ldrOpc, strOpc);
+
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(ldrOpc));
+
+ if (isThumb2) {
+ MIB.addReg(destlo, RegState::Define)
+ .addReg(desthi, RegState::Define)
+ .addReg(ptr);
+
+ } else {
+ unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
+ MIB.addReg(GPRPair0, RegState::Define).addReg(ptr);
+
+ // Copy GPRPair0 into dest. (This copy will normally be coalesced.)
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), destlo)
+ .addReg(GPRPair0, 0, ARM::gsub_0);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), desthi)
+ .addReg(GPRPair0, 0, ARM::gsub_1);
+ }
+ AddDefaultPred(MIB);
+
+ MI->eraseFromParent(); // The instruction is gone now.
+
+ return BB;
+}
+
/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
/// registers the function context.
void ARMTargetLowering::
@@ -6851,8 +7270,109 @@ MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
llvm_unreachable("Expecting a BB with two successors!");
}
-MachineBasicBlock *ARMTargetLowering::
-EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
+/// Return the load opcode for a given load size. If load size >= 8,
+/// neon opcode will be returned.
+static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
+ if (LdSize >= 8)
+ return LdSize == 16 ? ARM::VLD1q32wb_fixed
+ : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
+ if (IsThumb1)
+ return LdSize == 4 ? ARM::tLDRi
+ : LdSize == 2 ? ARM::tLDRHi
+ : LdSize == 1 ? ARM::tLDRBi : 0;
+ if (IsThumb2)
+ return LdSize == 4 ? ARM::t2LDR_POST
+ : LdSize == 2 ? ARM::t2LDRH_POST
+ : LdSize == 1 ? ARM::t2LDRB_POST : 0;
+ return LdSize == 4 ? ARM::LDR_POST_IMM
+ : LdSize == 2 ? ARM::LDRH_POST
+ : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
+}
+
+/// Return the store opcode for a given store size. If store size >= 8,
+/// neon opcode will be returned.
+static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
+ if (StSize >= 8)
+ return StSize == 16 ? ARM::VST1q32wb_fixed
+ : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
+ if (IsThumb1)
+ return StSize == 4 ? ARM::tSTRi
+ : StSize == 2 ? ARM::tSTRHi
+ : StSize == 1 ? ARM::tSTRBi : 0;
+ if (IsThumb2)
+ return StSize == 4 ? ARM::t2STR_POST
+ : StSize == 2 ? ARM::t2STRH_POST
+ : StSize == 1 ? ARM::t2STRB_POST : 0;
+ return StSize == 4 ? ARM::STR_POST_IMM
+ : StSize == 2 ? ARM::STRH_POST
+ : StSize == 1 ? ARM::STRB_POST_IMM : 0;
+}
+
+/// Emit a post-increment load operation with given size. The instructions
+/// will be added to BB at Pos.
+static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos,
+ const TargetInstrInfo *TII, DebugLoc dl,
+ unsigned LdSize, unsigned Data, unsigned AddrIn,
+ unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
+ unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
+ assert(LdOpc != 0 && "Should have a load opcode");
+ if (LdSize >= 8) {
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
+ .addReg(AddrOut, RegState::Define).addReg(AddrIn)
+ .addImm(0));
+ } else if (IsThumb1) {
+ // load + update AddrIn
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
+ .addReg(AddrIn).addImm(0));
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut);
+ MIB = AddDefaultT1CC(MIB);
+ MIB.addReg(AddrIn).addImm(LdSize);
+ AddDefaultPred(MIB);
+ } else if (IsThumb2) {
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
+ .addReg(AddrOut, RegState::Define).addReg(AddrIn)
+ .addImm(LdSize));
+ } else { // arm
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
+ .addReg(AddrOut, RegState::Define).addReg(AddrIn)
+ .addReg(0).addImm(LdSize));
+ }
+}
+
+/// Emit a post-increment store operation with given size. The instructions
+/// will be added to BB at Pos.
+static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos,
+ const TargetInstrInfo *TII, DebugLoc dl,
+ unsigned StSize, unsigned Data, unsigned AddrIn,
+ unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
+ unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
+ assert(StOpc != 0 && "Should have a store opcode");
+ if (StSize >= 8) {
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
+ .addReg(AddrIn).addImm(0).addReg(Data));
+ } else if (IsThumb1) {
+ // store + update AddrIn
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data)
+ .addReg(AddrIn).addImm(0));
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut);
+ MIB = AddDefaultT1CC(MIB);
+ MIB.addReg(AddrIn).addImm(StSize);
+ AddDefaultPred(MIB);
+ } else if (IsThumb2) {
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
+ .addReg(Data).addReg(AddrIn).addImm(StSize));
+ } else { // arm
+ AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
+ .addReg(Data).addReg(AddrIn).addReg(0)
+ .addImm(StSize));
+ }
+}
+
+MachineBasicBlock *
+ARMTargetLowering::EmitStructByval(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
// This pseudo instruction has 3 operands: dst, src, size
// We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
// Otherwise, we will generate unrolled scalar copies.
@@ -6867,23 +7387,18 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
unsigned Align = MI->getOperand(3).getImm();
DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
- unsigned ldrOpc, strOpc, UnitSize = 0;
+ unsigned UnitSize = 0;
+ const TargetRegisterClass *TRC = 0;
+ const TargetRegisterClass *VecTRC = 0;
- const TargetRegisterClass *TRC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
- const TargetRegisterClass *TRC_Vec = 0;
+ bool IsThumb1 = Subtarget->isThumb1Only();
+ bool IsThumb2 = Subtarget->isThumb2();
if (Align & 1) {
- ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM;
- strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM;
UnitSize = 1;
} else if (Align & 2) {
- ldrOpc = isThumb2 ? ARM::t2LDRH_POST : ARM::LDRH_POST;
- strOpc = isThumb2 ? ARM::t2STRH_POST : ARM::STRH_POST;
UnitSize = 2;
} else {
// Check whether we can use NEON instructions.
@@ -6891,27 +7406,27 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
hasAttribute(AttributeSet::FunctionIndex,
Attribute::NoImplicitFloat) &&
Subtarget->hasNEON()) {
- if ((Align % 16 == 0) && SizeVal >= 16) {
- ldrOpc = ARM::VLD1q32wb_fixed;
- strOpc = ARM::VST1q32wb_fixed;
+ if ((Align % 16 == 0) && SizeVal >= 16)
UnitSize = 16;
- TRC_Vec = (const TargetRegisterClass*)&ARM::DPairRegClass;
- }
- else if ((Align % 8 == 0) && SizeVal >= 8) {
- ldrOpc = ARM::VLD1d32wb_fixed;
- strOpc = ARM::VST1d32wb_fixed;
+ else if ((Align % 8 == 0) && SizeVal >= 8)
UnitSize = 8;
- TRC_Vec = (const TargetRegisterClass*)&ARM::DPRRegClass;
- }
}
// Can't use NEON instructions.
- if (UnitSize == 0) {
- ldrOpc = isThumb2 ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
- strOpc = isThumb2 ? ARM::t2STR_POST : ARM::STR_POST_IMM;
+ if (UnitSize == 0)
UnitSize = 4;
- }
}
+ // Select the correct opcode and register class for unit size load/store
+ bool IsNeon = UnitSize >= 8;
+ TRC = (IsThumb1 || IsThumb2) ? (const TargetRegisterClass *)&ARM::tGPRRegClass
+ : (const TargetRegisterClass *)&ARM::GPRRegClass;
+ if (IsNeon)
+ VecTRC = UnitSize == 16
+ ? (const TargetRegisterClass *)&ARM::DPairRegClass
+ : UnitSize == 8
+ ? (const TargetRegisterClass *)&ARM::DPRRegClass
+ : 0;
+
unsigned BytesLeft = SizeVal % UnitSize;
unsigned LoopSize = SizeVal - BytesLeft;
@@ -6922,34 +7437,13 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
unsigned srcIn = src;
unsigned destIn = dest;
for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
- unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC);
unsigned srcOut = MRI.createVirtualRegister(TRC);
unsigned destOut = MRI.createVirtualRegister(TRC);
- if (UnitSize >= 8) {
- AddDefaultPred(BuildMI(*BB, MI, dl,
- TII->get(ldrOpc), scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(0));
-
- AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
- .addReg(destIn).addImm(0).addReg(scratch));
- } else if (isThumb2) {
- AddDefaultPred(BuildMI(*BB, MI, dl,
- TII->get(ldrOpc), scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(UnitSize));
-
- AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
- .addReg(scratch).addReg(destIn)
- .addImm(UnitSize));
- } else {
- AddDefaultPred(BuildMI(*BB, MI, dl,
- TII->get(ldrOpc), scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0)
- .addImm(UnitSize));
-
- AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
- .addReg(scratch).addReg(destIn)
- .addReg(0).addImm(UnitSize));
- }
+ unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
+ emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
+ IsThumb1, IsThumb2);
+ emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
+ IsThumb1, IsThumb2);
srcIn = srcOut;
destIn = destOut;
}
@@ -6957,30 +7451,14 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
// Handle the leftover bytes with LDRB and STRB.
// [scratch, srcOut] = LDRB_POST(srcIn, 1)
// [destOut] = STRB_POST(scratch, destIn, 1)
- ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM;
- strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM;
for (unsigned i = 0; i < BytesLeft; i++) {
- unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned srcOut = MRI.createVirtualRegister(TRC);
unsigned destOut = MRI.createVirtualRegister(TRC);
- if (isThumb2) {
- AddDefaultPred(BuildMI(*BB, MI, dl,
- TII->get(ldrOpc),scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1));
-
- AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
- .addReg(scratch).addReg(destIn)
- .addReg(0).addImm(1));
- } else {
- AddDefaultPred(BuildMI(*BB, MI, dl,
- TII->get(ldrOpc),scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn)
- .addReg(0).addImm(1));
-
- AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
- .addReg(scratch).addReg(destIn)
- .addReg(0).addImm(1));
- }
+ unsigned scratch = MRI.createVirtualRegister(TRC);
+ emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
+ IsThumb1, IsThumb2);
+ emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
+ IsThumb1, IsThumb2);
srcIn = srcOut;
destIn = destOut;
}
@@ -7021,17 +7499,16 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
// Load an immediate to varEnd.
unsigned varEnd = MRI.createVirtualRegister(TRC);
- if (isThumb2) {
- unsigned VReg1 = varEnd;
+ if (IsThumb2) {
+ unsigned Vtmp = varEnd;
if ((LoopSize & 0xFFFF0000) != 0)
- VReg1 = MRI.createVirtualRegister(TRC);
- AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), VReg1)
- .addImm(LoopSize & 0xFFFF));
+ Vtmp = MRI.createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), Vtmp)
+ .addImm(LoopSize & 0xFFFF));
if ((LoopSize & 0xFFFF0000) != 0)
AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVTi16), varEnd)
- .addReg(VReg1)
- .addImm(LoopSize >> 16));
+ .addReg(Vtmp).addImm(LoopSize >> 16));
} else {
MachineConstantPool *ConstantPool = MF->getConstantPool();
Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
@@ -7043,10 +7520,12 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
Align = getDataLayout()->getTypeAllocSize(C->getType());
unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
- AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp))
- .addReg(varEnd, RegState::Define)
- .addConstantPoolIndex(Idx)
- .addImm(0));
+ if (IsThumb1)
+ AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg(
+ varEnd, RegState::Define).addConstantPoolIndex(Idx));
+ else
+ AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg(
+ varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0));
}
BB->addSuccessor(loopMBB);
@@ -7075,39 +7554,30 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
// [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
// [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
- unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC);
- if (UnitSize >= 8) {
- AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch)
- .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(0));
-
- AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop)
- .addReg(destPhi).addImm(0).addReg(scratch));
- } else if (isThumb2) {
- AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch)
- .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(UnitSize));
-
- AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop)
- .addReg(scratch).addReg(destPhi)
- .addImm(UnitSize));
- } else {
- AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch)
- .addReg(srcLoop, RegState::Define).addReg(srcPhi).addReg(0)
- .addImm(UnitSize));
-
- AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop)
- .addReg(scratch).addReg(destPhi)
- .addReg(0).addImm(UnitSize));
- }
+ unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
+ emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
+ IsThumb1, IsThumb2);
+ emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
+ IsThumb1, IsThumb2);
// Decrement loop variable by UnitSize.
- MachineInstrBuilder MIB = BuildMI(BB, dl,
- TII->get(isThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
- AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize)));
- MIB->getOperand(5).setReg(ARM::CPSR);
- MIB->getOperand(5).setIsDef(true);
-
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
+ if (IsThumb1) {
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop);
+ MIB = AddDefaultT1CC(MIB);
+ MIB.addReg(varPhi).addImm(UnitSize);
+ AddDefaultPred(MIB);
+ } else {
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, BB->end(), dl,
+ TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
+ AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize)));
+ MIB->getOperand(5).setReg(ARM::CPSR);
+ MIB->getOperand(5).setIsDef(true);
+ }
+ BuildMI(*BB, BB->end(), dl,
+ TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
+ .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
// loopMBB can loop back to loopMBB or fall through to exitMBB.
BB->addSuccessor(loopMBB);
@@ -7116,34 +7586,19 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
// Add epilogue to handle BytesLeft.
BB = exitMBB;
MachineInstr *StartOfExit = exitMBB->begin();
- ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM;
- strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM;
// [scratch, srcOut] = LDRB_POST(srcLoop, 1)
// [destOut] = STRB_POST(scratch, destLoop, 1)
unsigned srcIn = srcLoop;
unsigned destIn = destLoop;
for (unsigned i = 0; i < BytesLeft; i++) {
- unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned srcOut = MRI.createVirtualRegister(TRC);
unsigned destOut = MRI.createVirtualRegister(TRC);
- if (isThumb2) {
- AddDefaultPred(BuildMI(*BB, StartOfExit, dl,
- TII->get(ldrOpc),scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1));
-
- AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut)
- .addReg(scratch).addReg(destIn)
- .addImm(1));
- } else {
- AddDefaultPred(BuildMI(*BB, StartOfExit, dl,
- TII->get(ldrOpc),scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0).addImm(1));
-
- AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut)
- .addReg(scratch).addReg(destIn)
- .addReg(0).addImm(1));
- }
+ unsigned scratch = MRI.createVirtualRegister(TRC);
+ emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
+ IsThumb1, IsThumb2);
+ emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
+ IsThumb1, IsThumb2);
srcIn = srcOut;
destIn = destOut;
}
@@ -7293,46 +7748,49 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
+ case ARM::ATOMIC_LOAD_I64:
+ return EmitAtomicLoad64(MI, BB);
- case ARM::ATOMADD6432:
+ case ARM::ATOMIC_LOAD_ADD_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr,
isThumb2 ? ARM::t2ADCrr : ARM::ADCrr,
/*NeedsCarry*/ true);
- case ARM::ATOMSUB6432:
+ case ARM::ATOMIC_LOAD_SUB_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
/*NeedsCarry*/ true);
- case ARM::ATOMOR6432:
+ case ARM::ATOMIC_LOAD_OR_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr,
isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
- case ARM::ATOMXOR6432:
+ case ARM::ATOMIC_LOAD_XOR_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr,
isThumb2 ? ARM::t2EORrr : ARM::EORrr);
- case ARM::ATOMAND6432:
+ case ARM::ATOMIC_LOAD_AND_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr,
isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
- case ARM::ATOMSWAP6432:
+ case ARM::ATOMIC_STORE_I64:
+ case ARM::ATOMIC_SWAP_I64:
return EmitAtomicBinary64(MI, BB, 0, 0, false);
- case ARM::ATOMCMPXCHG6432:
+ case ARM::ATOMIC_CMP_SWAP_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
/*NeedsCarry*/ false, /*IsCmpxchg*/true);
- case ARM::ATOMMIN6432:
+ case ARM::ATOMIC_LOAD_MIN_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
/*NeedsCarry*/ true, /*IsCmpxchg*/false,
/*IsMinMax*/ true, ARMCC::LT);
- case ARM::ATOMMAX6432:
+ case ARM::ATOMIC_LOAD_MAX_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
/*NeedsCarry*/ true, /*IsCmpxchg*/false,
/*IsMinMax*/ true, ARMCC::GE);
- case ARM::ATOMUMIN6432:
+ case ARM::ATOMIC_LOAD_UMIN_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
/*NeedsCarry*/ true, /*IsCmpxchg*/false,
/*IsMinMax*/ true, ARMCC::LO);
- case ARM::ATOMUMAX6432:
+ case ARM::ATOMIC_LOAD_UMAX_I64:
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
/*NeedsCarry*/ true, /*IsCmpxchg*/false,
@@ -7710,13 +8168,13 @@ SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
// Slct is now know to be the desired identity constant when CC is true.
SDValue TrueVal = OtherOp;
- SDValue FalseVal = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT,
+ SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
OtherOp, NonConstantVal);
// Unless SwapSelectOps says CC should be false.
if (SwapSelectOps)
std::swap(TrueVal, FalseVal);
- return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
+ return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
CCOp, TrueVal, FalseVal);
}
@@ -7823,9 +8281,9 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
llvm_unreachable("Invalid vector element type for padd optimization.");
}
- SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
+ SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
widenType, &Ops[0], Ops.size());
- return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp);
+ return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, tmp);
}
static SDValue findMUL_LOHI(SDValue V) {
@@ -7868,8 +8326,11 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
assert(AddcNode->getNumValues() == 2 &&
AddcNode->getValueType(0) == MVT::i32 &&
- AddcNode->getValueType(1) == MVT::Glue &&
- "Expect ADDC with two result values: i32, glue");
+ "Expect ADDC with two result values. First: i32");
+
+ // Check that we have a glued ADDC node.
+ if (AddcNode->getValueType(1) != MVT::Glue)
+ return SDValue();
// Check that the ADDC adds the low result of the S/UMUL_LOHI.
if (AddcOp0->getOpcode() != ISD::UMUL_LOHI &&
@@ -7950,7 +8411,7 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
Ops.push_back(*LowAdd);
Ops.push_back(*HiAdd);
- SDValue MLALNode = DAG.getNode(FinalOpc, AddcNode->getDebugLoc(),
+ SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode),
DAG.getVTList(MVT::i32, MVT::i32),
&Ops[0], Ops.size());
@@ -8038,6 +8499,13 @@ static SDValue PerformSUBCombine(SDNode *N,
/// is faster than
/// vadd d3, d0, d1
/// vmul d3, d3, d2
+// However, for (A + B) * (A + B),
+// vadd d2, d0, d1
+// vmul d3, d0, d2
+// vmla d3, d1, d2
+// is slower than
+// vadd d2, d0, d1
+// vmul d3, d2, d2
static SDValue PerformVMULCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) {
@@ -8057,8 +8525,11 @@ static SDValue PerformVMULCombine(SDNode *N,
std::swap(N0, N1);
}
+ if (N0 == N1)
+ return SDValue();
+
EVT VT = N->getValueType(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue N00 = N0->getOperand(0);
SDValue N01 = N0->getOperand(1);
return DAG.getNode(Opcode, DL, VT,
@@ -8088,11 +8559,11 @@ static SDValue PerformMULCombine(SDNode *N,
return SDValue();
int64_t MulAmt = C->getSExtValue();
- unsigned ShiftAmt = CountTrailingZeros_64(MulAmt);
+ unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
ShiftAmt = ShiftAmt & (32 - 1);
SDValue V = N->getOperand(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue Res;
MulAmt >>= ShiftAmt;
@@ -8156,7 +8627,7 @@ static SDValue PerformANDCombine(SDNode *N,
// Attempt to use immediate-form VBIC
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG;
@@ -8199,7 +8670,7 @@ static SDValue PerformORCombine(SDNode *N,
const ARMSubtarget *Subtarget) {
// Attempt to use immediate-form VORR
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG;
@@ -8248,22 +8719,29 @@ static SDValue PerformORCombine(SDNode *N,
unsigned SplatBitSize;
bool HasAnyUndefs;
+ APInt SplatBits0, SplatBits1;
BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
- APInt SplatBits0;
+ BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
+ // Ensure that the second operand of both ands are constants
if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
- HasAnyUndefs) && !HasAnyUndefs) {
- BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
- APInt SplatBits1;
- if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
- HasAnyUndefs) && !HasAnyUndefs &&
- SplatBits0 == ~SplatBits1) {
- // Canonicalize the vector type to make instruction selection simpler.
- EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
- SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
- N0->getOperand(1), N0->getOperand(0),
- N1->getOperand(0));
- return DAG.getNode(ISD::BITCAST, dl, VT, Result);
- }
+ HasAnyUndefs) && !HasAnyUndefs) {
+ if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
+ HasAnyUndefs) && !HasAnyUndefs) {
+ // Ensure that the bit width of the constants are the same and that
+ // the splat arguments are logical inverses as per the pattern we
+ // are trying to simplify.
+ if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
+ SplatBits0 == ~SplatBits1) {
+ // Canonicalize the vector type to make instruction selection
+ // simpler.
+ EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
+ SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
+ N0->getOperand(1),
+ N0->getOperand(0),
+ N1->getOperand(0));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Result);
+ }
+ }
}
}
@@ -8274,7 +8752,7 @@ static SDValue PerformORCombine(SDNode *N,
if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
return SDValue();
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// 1) or (and A, mask), val => ARMbfi A, val, mask
// iff (val & mask) == val
//
@@ -8309,7 +8787,7 @@ static SDValue PerformORCombine(SDNode *N,
return SDValue();
if (ARM::isBitFieldInvertedMask(Mask)) {
- Val >>= CountTrailingZeros_32(~Mask);
+ Val >>= countTrailingZeros(~Mask);
Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
DAG.getConstant(Val, MVT::i32),
@@ -8336,7 +8814,7 @@ static SDValue PerformORCombine(SDNode *N,
(Mask == 0xffff || Mask == 0xffff0000))
return SDValue();
// 2a
- unsigned amt = CountTrailingZeros_32(Mask2);
+ unsigned amt = countTrailingZeros(Mask2);
Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
DAG.getConstant(amt, MVT::i32));
Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
@@ -8352,7 +8830,7 @@ static SDValue PerformORCombine(SDNode *N,
(Mask2 == 0xffff || Mask2 == 0xffff0000))
return SDValue();
// 2b
- unsigned lsb = CountTrailingZeros_32(Mask);
+ unsigned lsb = countTrailingZeros(Mask);
Res = DAG.getNode(ISD::SRL, DL, VT, N00,
DAG.getConstant(lsb, MVT::i32));
Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
@@ -8370,7 +8848,7 @@ static SDValue PerformORCombine(SDNode *N,
// where lsb(mask) == #shamt and masked bits of B are known zero.
SDValue ShAmt = N00.getOperand(1);
unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
- unsigned LSB = CountTrailingZeros_32(Mask);
+ unsigned LSB = countTrailingZeros(Mask);
if (ShAmtC != LSB)
return SDValue();
@@ -8413,12 +8891,12 @@ static SDValue PerformBFICombine(SDNode *N,
if (!N11C)
return SDValue();
unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
- unsigned LSB = CountTrailingZeros_32(~InvMask);
- unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB;
+ unsigned LSB = countTrailingZeros(~InvMask);
+ unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
unsigned Mask = (1 << Width)-1;
unsigned Mask2 = N11C->getZExtValue();
if ((Mask & (~Mask2)) == 0)
- return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0),
+ return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
N->getOperand(0), N1.getOperand(0),
N->getOperand(2));
}
@@ -8444,7 +8922,7 @@ static SDValue PerformVMOVRRDCombine(SDNode *N,
LoadSDNode *LD = cast<LoadSDNode>(InNode);
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = LD->getDebugLoc();
+ SDLoc DL(LD);
SDValue BasePtr = LD->getBasePtr();
SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr,
LD->getPointerInfo(), LD->isVolatile(),
@@ -8481,7 +8959,7 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
if (Op0.getOpcode() == ARMISD::VMOVRRD &&
Op0.getNode() == Op1.getNode() &&
Op0.getResNo() == 0 && Op1.getResNo() == 1)
- return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, SDLoc(N),
N->getValueType(0), Op0.getOperand(0));
return SDValue();
}
@@ -8523,7 +9001,7 @@ static SDValue PerformSTORECombine(SDNode *N,
NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
- DebugLoc DL = St->getDebugLoc();
+ SDLoc DL(St);
SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio;
@@ -8584,7 +9062,7 @@ static SDValue PerformSTORECombine(SDNode *N,
if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
StVal.getNode()->hasOneUse()) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = St->getDebugLoc();
+ SDLoc DL(St);
SDValue BasePtr = St->getBasePtr();
SDValue NewST1 = DAG.getStore(St->getChain(), DL,
StVal.getNode()->getOperand(0), BasePtr,
@@ -8606,14 +9084,14 @@ static SDValue PerformSTORECombine(SDNode *N,
// Bitcast an i64 store extracted from a vector to f64.
// Otherwise, the i64 value will be legalized to a pair of i32 values.
SelectionDAG &DAG = DCI.DAG;
- DebugLoc dl = StVal.getDebugLoc();
+ SDLoc dl(StVal);
SDValue IntVec = StVal.getOperand(0);
EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
IntVec.getValueType().getVectorNumElements());
SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
Vec, StVal.getOperand(1));
- dl = N->getDebugLoc();
+ dl = SDLoc(N);
SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
// Make the DAGCombiner fold the bitcasts.
DCI.AddToWorklist(Vec.getNode());
@@ -8659,7 +9137,7 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
EVT VT = N->getValueType(0);
if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
return SDValue();
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SmallVector<SDValue, 8> Ops;
unsigned NumElts = VT.getVectorNumElements();
for (unsigned i = 0; i < NumElts; ++i) {
@@ -8673,6 +9151,98 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}
+/// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
+static SDValue
+PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
+ // At that time, we may have inserted bitcasts from integer to float.
+ // If these bitcasts have survived DAGCombine, change the lowering of this
+ // BUILD_VECTOR in something more vector friendly, i.e., that does not
+ // force to use floating point types.
+
+ // Make sure we can change the type of the vector.
+ // This is possible iff:
+ // 1. The vector is only used in a bitcast to a integer type. I.e.,
+ // 1.1. Vector is used only once.
+ // 1.2. Use is a bit convert to an integer type.
+ // 2. The size of its operands are 32-bits (64-bits are not legal).
+ EVT VT = N->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
+
+ // Check 1.1. and 2.
+ if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
+ return SDValue();
+
+ // By construction, the input type must be float.
+ assert(EltVT == MVT::f32 && "Unexpected type!");
+
+ // Check 1.2.
+ SDNode *Use = *N->use_begin();
+ if (Use->getOpcode() != ISD::BITCAST ||
+ Use->getValueType(0).isFloatingPoint())
+ return SDValue();
+
+ // Check profitability.
+ // Model is, if more than half of the relevant operands are bitcast from
+ // i32, turn the build_vector into a sequence of insert_vector_elt.
+ // Relevant operands are everything that is not statically
+ // (i.e., at compile time) bitcasted.
+ unsigned NumOfBitCastedElts = 0;
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumOfRelevantElts = NumElts;
+ for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
+ SDValue Elt = N->getOperand(Idx);
+ if (Elt->getOpcode() == ISD::BITCAST) {
+ // Assume only bit cast to i32 will go away.
+ if (Elt->getOperand(0).getValueType() == MVT::i32)
+ ++NumOfBitCastedElts;
+ } else if (Elt.getOpcode() == ISD::UNDEF || isa<ConstantSDNode>(Elt))
+ // Constants are statically casted, thus do not count them as
+ // relevant operands.
+ --NumOfRelevantElts;
+ }
+
+ // Check if more than half of the elements require a non-free bitcast.
+ if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ // Create the new vector type.
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
+ // Check if the type is legal.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!TLI.isTypeLegal(VecVT))
+ return SDValue();
+
+ // Combine:
+ // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
+ // => BITCAST INSERT_VECTOR_ELT
+ // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
+ // (BITCAST EN), N.
+ SDValue Vec = DAG.getUNDEF(VecVT);
+ SDLoc dl(N);
+ for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
+ SDValue V = N->getOperand(Idx);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ if (V.getOpcode() == ISD::BITCAST &&
+ V->getOperand(0).getValueType() == MVT::i32)
+ // Fold obvious case.
+ V = V.getOperand(0);
+ else {
+ V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
+ // Make the DAGCombiner fold the bitcasts.
+ DCI.AddToWorklist(V.getNode());
+ }
+ SDValue LaneIdx = DAG.getConstant(Idx, MVT::i32);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
+ }
+ Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
+ // Make the DAGCombiner fold the bitcasts.
+ DCI.AddToWorklist(Vec.getNode());
+ return Vec;
+}
+
/// PerformInsertEltCombine - Target-specific dag combine xforms for
/// ISD::INSERT_VECTOR_ELT.
static SDValue PerformInsertEltCombine(SDNode *N,
@@ -8686,7 +9256,7 @@ static SDValue PerformInsertEltCombine(SDNode *N,
return SDValue();
SelectionDAG &DAG = DCI.DAG;
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
VT.getVectorNumElements());
SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
@@ -8732,7 +9302,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
!TLI.isTypeLegal(Concat1Op1.getValueType()))
return SDValue();
- SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
+ SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
Op0.getOperand(0), Op1.getOperand(0));
// Translate the shuffle mask.
SmallVector<int, 16> NewMask;
@@ -8748,7 +9318,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
NewElt = HalfElts + MaskElt - NumElts;
NewMask.push_back(NewElt);
}
- return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat,
+ return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
DAG.getUNDEF(VT), NewMask.data());
}
@@ -8865,7 +9435,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
Ops.push_back(N->getOperand(i));
}
MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
- SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys,
+ SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
Ops.data(), Ops.size(),
MemInt->getMemoryVT(),
MemInt->getMemOperand());
@@ -8939,7 +9509,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1);
SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
- SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys,
+ SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
Ops, 2, VLDMemInt->getMemoryVT(),
VLDMemInt->getMemOperand());
@@ -8994,7 +9564,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N,
if (EltSize > VT.getVectorElementType().getSizeInBits())
return SDValue();
- return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
+ return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
}
// isConstVecPow2 - Return true if each vector element is a power of 2, all
@@ -9051,12 +9621,27 @@ static SDValue PerformVCVTCombine(SDNode *N,
!isConstVecPow2(ConstVec, isSigned, C))
return SDValue();
+ MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
+ MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
+ if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) {
+ // These instructions only exist converting from f32 to i32. We can handle
+ // smaller integers by generating an extra truncate, but larger ones would
+ // be lossy.
+ return SDValue();
+ }
+
unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
Intrinsic::arm_neon_vcvtfp2fxu;
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
- N->getValueType(0),
- DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
- DAG.getConstant(Log2_64(C), MVT::i32));
+ unsigned NumLanes = Op.getValueType().getVectorNumElements();
+ SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
+ NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
+ DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
+ DAG.getConstant(Log2_64(C), MVT::i32));
+
+ if (IntTy.getSizeInBits() < FloatTy.getSizeInBits())
+ FixConv = DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), FixConv);
+
+ return FixConv;
}
/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
@@ -9087,12 +9672,28 @@ static SDValue PerformVDIVCombine(SDNode *N,
!isConstVecPow2(ConstVec, isSigned, C))
return SDValue();
+ MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
+ MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
+ if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) {
+ // These instructions only exist converting from i32 to f32. We can handle
+ // smaller integers by generating an extra extend, but larger ones would
+ // be lossy.
+ return SDValue();
+ }
+
+ SDValue ConvInput = Op.getOperand(0);
+ unsigned NumLanes = Op.getValueType().getVectorNumElements();
+ if (IntTy.getSizeInBits() < FloatTy.getSizeInBits())
+ ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
+ SDLoc(N), NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
+ ConvInput);
+
unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
Intrinsic::arm_neon_vcvtfxu2fp;
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
Op.getValueType(),
DAG.getConstant(IntrinsicOpcode, MVT::i32),
- Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32));
+ ConvInput, DAG.getConstant(Log2_64(C), MVT::i32));
}
/// Getvshiftimm - Check if this is a valid build_vector for the immediate
@@ -9273,7 +9874,7 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
VShiftOpc = ARMISD::VQRSHRNsu; break;
}
- return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
}
@@ -9290,7 +9891,7 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
}
- return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2),
DAG.getConstant(Cnt, MVT::i32));
}
@@ -9321,7 +9922,7 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
DAG.MaskedValueIsZero(N0.getOperand(0),
APInt::getHighBitsSet(32, 16)))
- return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, N0, N1);
+ return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
}
}
@@ -9338,7 +9939,7 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
case ISD::SHL:
if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
- return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
+ return DAG.getNode(ARMISD::VSHL, SDLoc(N), VT, N->getOperand(0),
DAG.getConstant(Cnt, MVT::i32));
break;
@@ -9347,7 +9948,7 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
ARMISD::VSHRs : ARMISD::VSHRu);
- return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
+ return DAG.getNode(VShiftOpc, SDLoc(N), VT, N->getOperand(0),
DAG.getConstant(Cnt, MVT::i32));
}
}
@@ -9387,7 +9988,7 @@ static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
Opc = ARMISD::VGETLANEu;
break;
}
- return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
+ return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
}
}
@@ -9476,7 +10077,7 @@ static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
if (!Opcode)
return SDValue();
- return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS);
+ return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), LHS, RHS);
}
/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
@@ -9488,7 +10089,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
return SDValue();
EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue LHS = Cmp.getOperand(0);
SDValue RHS = Cmp.getOperand(1);
SDValue FalseVal = N->getOperand(0);
@@ -9578,6 +10179,8 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ARMISD::VLD3DUP:
case ARMISD::VLD4DUP:
return CombineBaseUpdate(N, DCI);
+ case ARMISD::BUILD_VECTOR:
+ return PerformARMBUILD_VECTORCombine(N, DCI);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
@@ -9702,6 +10305,21 @@ bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
return false;
}
+bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
+ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+ return false;
+
+ if (!isTypeLegal(EVT::getEVT(Ty1)))
+ return false;
+
+ assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
+
+ // Assuming the caller doesn't have a zeroext or signext return parameter,
+ // truncation all the way down to i1 is valid.
+ return true;
+}
+
+
static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
if (V < 0)
return false;
@@ -10101,9 +10719,19 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
- KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
+ unsigned BitWidth = KnownOne.getBitWidth();
+ KnownZero = KnownOne = APInt(BitWidth, 0);
switch (Op.getOpcode()) {
default: break;
+ case ARMISD::ADDC:
+ case ARMISD::ADDE:
+ case ARMISD::SUBC:
+ case ARMISD::SUBE:
+ // These nodes' second result is a boolean
+ if (Op.getResNo() == 0)
+ break;
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
+ break;
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
@@ -10217,7 +10845,7 @@ ARMTargetLowering::getSingleConstraintMatchWeight(
typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
RCPair
ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC ARM Constraint Letters
switch (Constraint[0]) {
@@ -10232,6 +10860,8 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case 'r':
return RCPair(0U, &ARM::GPRRegClass);
case 'w':
+ if (VT == MVT::Other)
+ break;
if (VT == MVT::f32)
return RCPair(0U, &ARM::SPRRegClass);
if (VT.getSizeInBits() == 64)
@@ -10240,6 +10870,8 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return RCPair(0U, &ARM::QPRRegClass);
break;
case 'x':
+ if (VT == MVT::Other)
+ break;
if (VT == MVT::f32)
return RCPair(0U, &ARM::SPR_8RegClass);
if (VT.getSizeInBits() == 64)
@@ -10426,6 +11058,54 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
+SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetAEABI() && "Register-based DivRem lowering only");
+ unsigned Opcode = Op->getOpcode();
+ assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
+ "Invalid opcode for Div/Rem lowering");
+ bool isSigned = (Opcode == ISD::SDIVREM);
+ EVT VT = Op->getValueType(0);
+ Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+
+ RTLIB::Libcall LC;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unexpected request for libcall!");
+ case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
+ case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
+ case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
+ case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
+ }
+
+ SDValue InChain = DAG.getEntryNode();
+
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
+ EVT ArgVT = Op->getOperand(i).getValueType();
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Entry.Node = Op->getOperand(i);
+ Entry.Ty = ArgTy;
+ Entry.isSExt = isSigned;
+ Entry.isZExt = !isSigned;
+ Args.push_back(Entry);
+ }
+
+ SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
+ getPointerTy());
+
+ Type *RetTy = (Type*)StructType::get(Ty, Ty, NULL);
+
+ SDLoc dl(Op);
+ TargetLowering::
+ CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, true,
+ 0, getLibcallCallingConv(LC), /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
+ Callee, Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+
+ return CallInfo.first;
+}
+
bool
ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// The ARM target isn't yet aware of offsets.
@@ -10434,17 +11114,15 @@ ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
bool ARM::isBitFieldInvertedMask(unsigned v) {
if (v == 0xffffffff)
- return 0;
+ return false;
+
// there can be 1's on either or both "outsides", all the "inside"
// bits must be 0's
- unsigned int lsb = 0, msb = 31;
- while (v & (1 << msb)) --msb;
- while (v & (1 << lsb)) ++lsb;
- for (unsigned int i = lsb; i <= msb; ++i) {
- if (v & (1 << i))
- return 0;
- }
- return 1;
+ unsigned TO = CountTrailingOnes_32(v);
+ unsigned LO = CountLeadingOnes_32(v);
+ v = (v >> TO) << TO;
+ v = (v << LO) >> LO;
+ return v == 0;
}
/// isFPImmLegal - Returns true if the target can instruction select the
@@ -10513,6 +11191,30 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = true;
return true;
}
+ case Intrinsic::arm_ldrex: {
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
+ Info.vol = true;
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::arm_strex: {
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(1);
+ Info.offset = 0;
+ Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
+ Info.vol = true;
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
case Intrinsic::arm_strexd: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i64;
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 426010e295d7..90facddeb02b 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -52,6 +52,7 @@ namespace llvm {
BR_JT, // Jumptable branch.
BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
RET_FLAG, // Return with a flag operand.
+ INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
PIC_ADD, // Add with a PC operand and a PIC label.
@@ -94,7 +95,6 @@ namespace llvm {
DYN_ALLOC, // Dynamic allocation on the stack.
- MEMBARRIER, // Memory barrier (DMB)
MEMBARRIER_MCR, // Memory barrier (MCR)
PRELOAD, // Preload
@@ -186,6 +186,8 @@ namespace llvm {
// Floating-point max and min:
FMAX,
FMIN,
+ VMAXNM,
+ VMINNM,
// Bit-field insert
BFI,
@@ -222,21 +224,7 @@ namespace llvm {
VST4_UPD,
VST2LN_UPD,
VST3LN_UPD,
- VST4LN_UPD,
-
- // 64-bit atomic ops (value split into two registers)
- ATOMADD64_DAG,
- ATOMSUB64_DAG,
- ATOMOR64_DAG,
- ATOMXOR64_DAG,
- ATOMAND64_DAG,
- ATOMNAND64_DAG,
- ATOMSWAP64_DAG,
- ATOMCMPXCHG64_DAG,
- ATOMMIN64_DAG,
- ATOMUMIN64_DAG,
- ATOMMAX64_DAG,
- ATOMUMAX64_DAG
+ VST4LN_UPD
};
}
@@ -270,7 +258,7 @@ namespace llvm {
}
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
@@ -298,6 +286,9 @@ namespace llvm {
using TargetLowering::isZExtFree;
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+ virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+
+
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
@@ -349,7 +340,7 @@ namespace llvm {
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
@@ -372,6 +363,12 @@ namespace llvm {
/// be used for loads / stores from the global.
virtual unsigned getMaximalGlobalOffset() const;
+ /// Returns true if a cast between SrcAS and DestAS is a noop.
+ virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+ // Addrspacecasts are always noops.
+ return true;
+ }
+
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
@@ -412,21 +409,21 @@ namespace llvm {
void addQRTypeForNEON(MVT VT);
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
- void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
+ void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
- SmallVector<SDValue, 8> &MemOpChains,
+ SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const;
SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
- DebugLoc dl) const;
+ SDLoc dl) const;
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
bool isVarArg) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
@@ -457,13 +454,26 @@ namespace llvm {
const ARMSubtarget *ST) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) const;
+ SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
+
+ /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
+ /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
+ /// expanded to FMAs when this method returns true, otherwise fmuladd is
+ /// expanded to fmul + fadd.
+ ///
+ /// ARM supports both fused and unfused multiply-add operations; we already
+ /// lower a pair of fmul and fadd to the latter so it's not clear that there
+ /// would be a gain or that the gain would be worthwhile enough to risk
+ /// correctness bugs.
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; }
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
bool isThisReturn, SDValue ThisVal) const;
@@ -471,24 +481,26 @@ namespace llvm {
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc dl, SDValue &Chain,
+ SDLoc dl, SDValue &Chain,
const Value *OrigArg,
unsigned InRegsParamRecordIdx,
unsigned OffsetFromOrigArg,
unsigned ArgOffset,
+ unsigned ArgSize,
bool ForceMutable) const;
void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc dl, SDValue &Chain,
+ SDLoc dl, SDValue &Chain,
unsigned ArgOffset,
bool ForceMutable = false) const;
void computeRegArea(CCState &CCInfo, MachineFunction &MF,
unsigned InRegsParamRecordIdx,
+ unsigned ArgSize,
unsigned &ArgRegsSize,
unsigned &ArgRegsSaveSize) const;
@@ -522,16 +534,16 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
+ SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
SDValue getVFPCmp(SDValue LHS, SDValue RHS,
- SelectionDAG &DAG, DebugLoc dl) const;
+ SelectionDAG &DAG, SDLoc dl) const;
SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
@@ -556,6 +568,8 @@ namespace llvm {
unsigned Size,
bool signExtend,
ARMCC::CondCodes Cond) const;
+ MachineBasicBlock *EmitAtomicLoad64(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
void SetupEntryBlockForSjLj(MachineInstr *MI,
MachineBasicBlock *MBB,
diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td
index 67a6820932fc..f93504fddb8e 100644
--- a/lib/Target/ARM/ARMInstrFormats.td
+++ b/lib/Target/ARM/ARMInstrFormats.td
@@ -155,6 +155,16 @@ def pred : PredicateOperand<OtherVT, (ops i32imm, i32imm),
let DecoderMethod = "DecodePredicateOperand";
}
+// Selectable predicate operand for CMOV instructions. We can't use a normal
+// predicate because the default values interfere with instruction selection. In
+// all other respects it is identical though: pseudo-instruction expansion
+// relies on the MachineOperands being compatible.
+def cmovpred : Operand<i32>, PredicateOp,
+ ComplexPattern<i32, 2, "SelectCMOVPred"> {
+ let MIOperandInfo = (ops i32imm, i32imm);
+ let PrintMethod = "printPredicateOperand";
+}
+
// Conditional code result for instructions whose 's' bit is set, e.g. subs.
def CCOutOperand : AsmOperandClass { let Name = "CCOut"; }
def cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 zero_reg))> {
@@ -237,6 +247,8 @@ class t2InstAlias<string Asm, dag Result, bit Emit = 0b1>
: InstAlias<Asm, Result, Emit>, Requires<[IsThumb2]>;
class VFP2InstAlias<string Asm, dag Result, bit Emit = 0b1>
: InstAlias<Asm, Result, Emit>, Requires<[HasVFP2]>;
+class VFP2DPInstAlias<string Asm, dag Result, bit Emit = 0b1>
+ : InstAlias<Asm, Result, Emit>, Requires<[HasVFP2,HasDPVFP]>;
class VFP3InstAlias<string Asm, dag Result, bit Emit = 0b1>
: InstAlias<Asm, Result, Emit>, Requires<[HasVFP3]>;
class NEONInstAlias<string Asm, dag Result, bit Emit = 0b1>
@@ -490,8 +502,7 @@ class JTI<dag oops, dag iops, InstrItinClass itin,
: XI<oops, iops, AddrModeNone, 0, IndexModeNone, BrMiscFrm, itin,
asm, "", pattern>;
-// Atomic load/store instructions
-class AIldrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+class AIldr_ex_or_acq<bits<2> opcod, bits<2> opcod2, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, 4, IndexModeNone, LdStExFrm, itin,
opc, asm, "", pattern> {
@@ -502,23 +513,52 @@ class AIldrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
let Inst{20} = 1;
let Inst{19-16} = addr;
let Inst{15-12} = Rt;
- let Inst{11-0} = 0b111110011111;
+ let Inst{11-10} = 0b11;
+ let Inst{9-8} = opcod2;
+ let Inst{7-0} = 0b10011111;
}
-class AIstrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+class AIstr_ex_or_rel<bits<2> opcod, bits<2> opcod2, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, 4, IndexModeNone, LdStExFrm, itin,
opc, asm, "", pattern> {
- bits<4> Rd;
bits<4> Rt;
bits<4> addr;
let Inst{27-23} = 0b00011;
let Inst{22-21} = opcod;
let Inst{20} = 0;
let Inst{19-16} = addr;
- let Inst{15-12} = Rd;
- let Inst{11-4} = 0b11111001;
+ let Inst{11-10} = 0b11;
+ let Inst{9-8} = opcod2;
+ let Inst{7-4} = 0b1001;
let Inst{3-0} = Rt;
}
+// Atomic load/store instructions
+class AIldrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AIldr_ex_or_acq<opcod, 0b11, oops, iops, itin, opc, asm, pattern>;
+
+class AIstrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AIstr_ex_or_rel<opcod, 0b11, oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ let Inst{15-12} = Rd;
+}
+
+// Exclusive load/store instructions
+
+class AIldaex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AIldr_ex_or_acq<opcod, 0b10, oops, iops, itin, opc, asm, pattern>,
+ Requires<[IsARM, HasV8]>;
+
+class AIstlex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AIstr_ex_or_rel<opcod, 0b10, oops, iops, itin, opc, asm, pattern>,
+ Requires<[IsARM, HasV8]> {
+ bits<4> Rd;
+ let Inst{15-12} = Rd;
+}
+
class AIswp<bit b, dag oops, dag iops, string opc, list<dag> pattern>
: AI<oops, iops, MiscFrm, NoItinerary, opc, "\t$Rt, $Rt2, $addr", pattern> {
bits<4> Rt;
@@ -535,6 +575,18 @@ class AIswp<bit b, dag oops, dag iops, string opc, list<dag> pattern>
let Unpredictable{11-8} = 0b1111;
let DecoderMethod = "DecodeSwap";
}
+// Acquire/Release load/store instructions
+class AIldracq<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AIldr_ex_or_acq<opcod, 0b00, oops, iops, itin, opc, asm, pattern>,
+ Requires<[IsARM, HasV8]>;
+
+class AIstrrel<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AIstr_ex_or_rel<opcod, 0b00, oops, iops, itin, opc, asm, pattern>,
+ Requires<[IsARM, HasV8]> {
+ let Inst{15-12} = 0b1111;
+}
// addrmode1 instructions
class AI1<bits<4> opcod, dag oops, dag iops, Format f, InstrItinClass itin,
@@ -1230,8 +1282,9 @@ class T2JTI<dag oops, dag iops, InstrItinClass itin,
: Thumb2XI<oops, iops, AddrModeNone, 0, itin, asm, "", pattern>;
// Move to/from coprocessor instructions
-class T2Cop<bits<4> opc, dag oops, dag iops, string asm, list<dag> pattern>
- : T2XI <oops, iops, NoItinerary, asm, pattern>, Requires<[IsThumb2]> {
+class T2Cop<bits<4> opc, dag oops, dag iops, string opcstr, string asm,
+ list<dag> pattern>
+ : T2I <oops, iops, NoItinerary, opcstr, asm, pattern>, Requires<[IsThumb2]> {
let Inst{31-28} = opc;
}
@@ -1389,7 +1442,6 @@ class ADI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
let Inst{15-12} = Dd{3-0};
let Inst{7-0} = addr{7-0}; // imm8
- // TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-24} = opcod1;
let Inst{21-20} = opcod2;
let Inst{11-9} = 0b101;
@@ -1415,7 +1467,6 @@ class ASI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
let Inst{15-12} = Sd{4-1};
let Inst{7-0} = addr{7-0}; // imm8
- // TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-24} = opcod1;
let Inst{21-20} = opcod2;
let Inst{11-9} = 0b101;
@@ -1437,6 +1488,28 @@ class PseudoVFPLdStM<dag oops, dag iops, InstrItinClass itin, string cstr,
}
// Load / store multiple
+
+// Unknown precision
+class AXXI4<dag oops, dag iops, IndexMode im,
+ string asm, string cstr, list<dag> pattern>
+ : VFPXI<oops, iops, AddrMode4, 4, im,
+ VFPLdStFrm, NoItinerary, asm, cstr, pattern> {
+ // Instruction operands.
+ bits<4> Rn;
+ bits<13> regs;
+
+ // Encode instruction operands.
+ let Inst{19-16} = Rn;
+ let Inst{22} = 0;
+ let Inst{15-12} = regs{11-8};
+ let Inst{7-1} = regs{7-1};
+
+ let Inst{27-25} = 0b110;
+ let Inst{11-8} = 0b1011;
+ let Inst{0} = 1;
+}
+
+// Double precision
class AXDI4<dag oops, dag iops, IndexMode im, InstrItinClass itin,
string asm, string cstr, list<dag> pattern>
: VFPXI<oops, iops, AddrMode4, 4, im,
@@ -1449,14 +1522,15 @@ class AXDI4<dag oops, dag iops, IndexMode im, InstrItinClass itin,
let Inst{19-16} = Rn;
let Inst{22} = regs{12};
let Inst{15-12} = regs{11-8};
- let Inst{7-0} = regs{7-0};
+ let Inst{7-1} = regs{7-1};
- // TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-25} = 0b110;
let Inst{11-9} = 0b101;
let Inst{8} = 1; // Double precision
+ let Inst{0} = 0;
}
+// Single Precision
class AXSI4<dag oops, dag iops, IndexMode im, InstrItinClass itin,
string asm, string cstr, list<dag> pattern>
: VFPXI<oops, iops, AddrMode4, 4, im,
@@ -1471,7 +1545,6 @@ class AXSI4<dag oops, dag iops, IndexMode im, InstrItinClass itin,
let Inst{15-12} = regs{12-9};
let Inst{7-0} = regs{7-0};
- // TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-25} = 0b110;
let Inst{11-9} = 0b101;
let Inst{8} = 0; // Single precision
@@ -1499,6 +1572,34 @@ class ADuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
let Inst{8} = 1; // Double precision
let Inst{7-6} = opcod4;
let Inst{4} = opcod5;
+
+ let Predicates = [HasVFP2, HasDPVFP];
+}
+
+// Double precision, unary, not-predicated
+class ADuInp<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
+ bit opcod5, dag oops, dag iops, InstrItinClass itin,
+ string asm, list<dag> pattern>
+ : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone, VFPUnaryFrm, itin, asm, "", pattern> {
+ // Instruction operands.
+ bits<5> Dd;
+ bits<5> Dm;
+
+ let Inst{31-28} = 0b1111;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{15-12} = Dd{3-0};
+ let Inst{22} = Dd{4};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{19-16} = opcod3;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 1; // Double precision
+ let Inst{7-6} = opcod4;
+ let Inst{4} = opcod5;
}
// Double precision, binary
@@ -1525,9 +1626,42 @@ class ADbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
let Inst{8} = 1; // Double precision
let Inst{6} = op6;
let Inst{4} = op4;
+
+ let Predicates = [HasVFP2, HasDPVFP];
+}
+
+// FP, binary, not predicated
+class ADbInp<bits<5> opcod1, bits<2> opcod2, bit opcod3, dag oops, dag iops,
+ InstrItinClass itin, string asm, list<dag> pattern>
+ : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone, VFPBinaryFrm, itin,
+ asm, "", pattern>
+{
+ // Instruction operands.
+ bits<5> Dd;
+ bits<5> Dn;
+ bits<5> Dm;
+
+ let Inst{31-28} = 0b1111;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{19-16} = Dn{3-0};
+ let Inst{7} = Dn{4};
+ let Inst{15-12} = Dd{3-0};
+ let Inst{22} = Dd{4};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 1; // double precision
+ let Inst{6} = opcod3;
+ let Inst{4} = 0;
+
+ let Predicates = [HasVFP2, HasDPVFP];
}
-// Single precision, unary
+// Single precision, unary, predicated
class ASuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
string asm, list<dag> pattern>
@@ -1551,6 +1685,33 @@ class ASuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
let Inst{4} = opcod5;
}
+// Single precision, unary, non-predicated
+class ASuInp<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
+ bit opcod5, dag oops, dag iops, InstrItinClass itin,
+ string asm, list<dag> pattern>
+ : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone,
+ VFPUnaryFrm, itin, asm, "", pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ let Inst{31-28} = 0b1111;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{19-16} = opcod3;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 0; // Single precision
+ let Inst{7-6} = opcod4;
+ let Inst{4} = opcod5;
+}
+
// Single precision unary, if no NEON. Same as ASuI except not available if
// NEON is enabled.
class ASuIn<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
@@ -1586,6 +1747,35 @@ class ASbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, dag iops,
let Inst{4} = op4;
}
+// Single precision, binary, not predicated
+class ASbInp<bits<5> opcod1, bits<2> opcod2, bit opcod3, dag oops, dag iops,
+ InstrItinClass itin, string asm, list<dag> pattern>
+ : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone,
+ VFPBinaryFrm, itin, asm, "", pattern>
+{
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sn;
+ bits<5> Sm;
+
+ let Inst{31-28} = 0b1111;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 0; // Single precision
+ let Inst{6} = opcod3;
+ let Inst{4} = 0;
+}
+
// Single precision binary, if no NEON. Same as ASbI except not available if
// NEON is enabled.
class ASbIn<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
@@ -1698,6 +1888,21 @@ class NeonXI<dag oops, dag iops, AddrMode am, IndexMode im, Format f,
let DecoderNamespace = "NEON";
}
+// Same as NeonI except it is not predicated
+class NeonInp<dag oops, dag iops, AddrMode am, IndexMode im, Format f,
+ InstrItinClass itin, string opc, string dt, string asm, string cstr,
+ list<dag> pattern>
+ : InstARM<am, 4, im, f, NeonDomain, cstr, itin> {
+ let OutOperandList = oops;
+ let InOperandList = iops;
+ let AsmString = !strconcat(opc, ".", dt, "\t", asm);
+ let Pattern = pattern;
+ list<Predicate> Predicates = [HasNEON];
+ let DecoderNamespace = "NEON";
+
+ let Inst{31-28} = 0b1111;
+}
+
class NLdSt<bit op23, bits<2> op21_20, bits<4> op11_8, bits<4> op7_4,
dag oops, dag iops, InstrItinClass itin,
string opc, string dt, string asm, string cstr, list<dag> pattern>
@@ -1817,6 +2022,35 @@ class N2V<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
let Inst{5} = Vm{4};
}
+// Same as N2V but not predicated.
+class N2Vnp<bits<2> op19_18, bits<2> op17_16, bits<3> op10_8, bit op7, bit op6,
+ dag oops, dag iops, InstrItinClass itin, string OpcodeStr,
+ string Dt, ValueType ResTy, ValueType OpTy, list<dag> pattern>
+ : NeonInp<oops, iops, AddrModeNone, IndexModeNone, N2RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vm", "", pattern> {
+ bits<5> Vd;
+ bits<5> Vm;
+
+ // Encode instruction operands
+ let Inst{22} = Vd{4};
+ let Inst{15-12} = Vd{3-0};
+ let Inst{5} = Vm{4};
+ let Inst{3-0} = Vm{3-0};
+
+ // Encode constant bits
+ let Inst{27-23} = 0b00111;
+ let Inst{21-20} = 0b11;
+ let Inst{19-18} = op19_18;
+ let Inst{17-16} = op17_16;
+ let Inst{11} = 0;
+ let Inst{10-8} = op10_8;
+ let Inst{7} = op7;
+ let Inst{6} = op6;
+ let Inst{4} = 0;
+
+ let DecoderNamespace = "NEON";
+}
+
// Same as N2V except it doesn't have a datatype suffix.
class N2VX<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
bits<5> op11_7, bit op6, bit op4,
@@ -1898,6 +2132,32 @@ class N3V<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4,
let Inst{5} = Vm{4};
}
+class N3Vnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
+ bit op4, dag oops, dag iops,Format f, InstrItinClass itin,
+ string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
+ SDPatternOperator IntOp, bit Commutable, list<dag> pattern>
+ : NeonInp<oops, iops, AddrModeNone, IndexModeNone, f, itin, OpcodeStr,
+ Dt, "$Vd, $Vn, $Vm", "", pattern> {
+ bits<5> Vd;
+ bits<5> Vn;
+ bits<5> Vm;
+
+ // Encode instruction operands
+ let Inst{22} = Vd{4};
+ let Inst{15-12} = Vd{3-0};
+ let Inst{19-16} = Vn{3-0};
+ let Inst{7} = Vn{4};
+ let Inst{5} = Vm{4};
+ let Inst{3-0} = Vm{3-0};
+
+ // Encode constant bits
+ let Inst{27-23} = op27_23;
+ let Inst{21-20} = op21_20;
+ let Inst{11-8} = op11_8;
+ let Inst{6} = op6;
+ let Inst{4} = op4;
+}
+
class N3VLane32<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6,
bit op4, dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string dt, string asm, string cstr,
diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp
index 80f0ec74376a..df867b49ab57 100644
--- a/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/lib/Target/ARM/ARMInstrInfo.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -29,7 +30,7 @@
using namespace llvm;
ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(*this, STI) {
+ : ARMBaseInstrInfo(STI), RI(STI) {
}
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
@@ -106,29 +107,42 @@ namespace {
if (TM->getRelocationModel() != Reloc::PIC_)
return false;
- LLVMContext* Context = &MF.getFunction()->getContext();
- GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
- GlobalValue::ExternalLinkage, 0,
- "_GLOBAL_OFFSET_TABLE_");
- unsigned Id = AFI->createPICLabelUId();
- ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id);
- unsigned Align = TM->getDataLayout()->getPrefTypeAlignment(GV->getType());
+ LLVMContext *Context = &MF.getFunction()->getContext();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
+ unsigned PCAdj = TM->getSubtarget<ARMSubtarget>().isThumb() ? 4 : 8;
+ ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create(
+ *Context, "_GLOBAL_OFFSET_TABLE_", ARMPCLabelIndex, PCAdj);
+
+ unsigned Align = TM->getDataLayout()
+ ->getPrefTypeAlignment(Type::getInt32PtrTy(*Context));
unsigned Idx = MF.getConstantPool()->getConstantPoolIndex(CPV, Align);
MachineBasicBlock &FirstMBB = MF.front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
- unsigned GlobalBaseReg = AFI->getGlobalBaseReg();
+ unsigned TempReg =
+ MF.getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
unsigned Opc = TM->getSubtarget<ARMSubtarget>().isThumb2() ?
ARM::t2LDRpci : ARM::LDRcp;
const TargetInstrInfo &TII = *TM->getInstrInfo();
MachineInstrBuilder MIB = BuildMI(FirstMBB, MBBI, DL,
- TII.get(Opc), GlobalBaseReg)
+ TII.get(Opc), TempReg)
.addConstantPoolIndex(Idx);
if (Opc == ARM::LDRcp)
MIB.addImm(0);
AddDefaultPred(MIB);
+ // Fix the GOT address by adding pc.
+ unsigned GlobalBaseReg = AFI->getGlobalBaseReg();
+ Opc = TM->getSubtarget<ARMSubtarget>().isThumb2() ? ARM::tPICADD
+ : ARM::PICADD;
+ MIB = BuildMI(FirstMBB, MBBI, DL, TII.get(Opc), GlobalBaseReg)
+ .addReg(TempReg)
+ .addImm(ARMPCLabelIndex);
+ if (Opc == ARM::PICADD)
+ AddDefaultPred(MIB);
+
+
return true;
}
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index 1bd174e34162..2042c0460932 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -71,6 +71,9 @@ def SDT_ARMTCRET : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def SDT_ARMBFI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
+def SDT_ARMVMAXNM : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>, SDTCisFP<2>]>;
+def SDT_ARMVMINNM : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>, SDTCisFP<2>]>;
+
def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
[SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
@@ -118,7 +121,8 @@ def ARMcall_nolink : SDNode<"ARMISD::CALL_NOLINK", SDT_ARMcall,
def ARMretflag : SDNode<"ARMISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-
+def ARMintretflag : SDNode<"ARMISD::INTRET_FLAG", SDT_ARMcall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def ARMcmov : SDNode<"ARMISD::CMOV", SDT_ARMCMov,
[SDNPInGlue]>;
@@ -162,8 +166,6 @@ def ARMeh_sjlj_longjmp: SDNode<"ARMISD::EH_SJLJ_LONGJMP",
SDT_ARMEH_SJLJ_Longjmp,
[SDNPHasChain, SDNPSideEffect]>;
-def ARMMemBarrier : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIER,
- [SDNPHasChain, SDNPSideEffect]>;
def ARMMemBarrierMCR : SDNode<"ARMISD::MEMBARRIER_MCR", SDT_ARMMEMBARRIER,
[SDNPHasChain, SDNPSideEffect]>;
def ARMPreload : SDNode<"ARMISD::PRELOAD", SDT_ARMPREFETCH,
@@ -174,9 +176,11 @@ def ARMrbit : SDNode<"ARMISD::RBIT", SDTIntUnaryOp>;
def ARMtcret : SDNode<"ARMISD::TC_RETURN", SDT_ARMTCRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-
def ARMbfi : SDNode<"ARMISD::BFI", SDT_ARMBFI>;
+def ARMvmaxnm : SDNode<"ARMISD::VMAXNM", SDT_ARMVMAXNM, []>;
+def ARMvminnm : SDNode<"ARMISD::VMINNM", SDT_ARMVMINNM, []>;
+
//===----------------------------------------------------------------------===//
// ARM Instruction Predicate Definitions.
//
@@ -189,11 +193,18 @@ def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">,
def HasV6 : Predicate<"Subtarget->hasV6Ops()">,
AssemblerPredicate<"HasV6Ops", "armv6">;
def NoV6 : Predicate<"!Subtarget->hasV6Ops()">;
+def HasV6M : Predicate<"Subtarget->hasV6MOps()">,
+ AssemblerPredicate<"HasV6MOps",
+ "armv6m or armv6t2">;
def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">,
AssemblerPredicate<"HasV6T2Ops", "armv6t2">;
def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
def HasV7 : Predicate<"Subtarget->hasV7Ops()">,
AssemblerPredicate<"HasV7Ops", "armv7">;
+def HasV8 : Predicate<"Subtarget->hasV8Ops()">,
+ AssemblerPredicate<"HasV8Ops", "armv8">;
+def PreV8 : Predicate<"!Subtarget->hasV8Ops()">,
+ AssemblerPredicate<"!HasV8Ops", "armv7 or earlier">;
def NoVFP : Predicate<"!Subtarget->hasVFP2()">;
def HasVFP2 : Predicate<"Subtarget->hasVFP2()">,
AssemblerPredicate<"FeatureVFP2", "VFP2">;
@@ -201,14 +212,23 @@ def HasVFP3 : Predicate<"Subtarget->hasVFP3()">,
AssemblerPredicate<"FeatureVFP3", "VFP3">;
def HasVFP4 : Predicate<"Subtarget->hasVFP4()">,
AssemblerPredicate<"FeatureVFP4", "VFP4">;
+def HasDPVFP : Predicate<"!Subtarget->isFPOnlySP()">,
+ AssemblerPredicate<"!FeatureVFPOnlySP",
+ "double precision VFP">;
+def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
+ AssemblerPredicate<"FeatureFPARMv8", "FPARMv8">;
def HasNEON : Predicate<"Subtarget->hasNEON()">,
AssemblerPredicate<"FeatureNEON", "NEON">;
+def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
+ AssemblerPredicate<"FeatureCrypto", "crypto">;
+def HasCRC : Predicate<"Subtarget->hasCRC()">,
+ AssemblerPredicate<"FeatureCRC", "crc">;
def HasFP16 : Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<"FeatureFP16","half-float">;
def HasDivide : Predicate<"Subtarget->hasDivide()">,
- AssemblerPredicate<"FeatureHWDiv", "divide">;
+ AssemblerPredicate<"FeatureHWDiv", "divide in THUMB">;
def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">,
- AssemblerPredicate<"FeatureHWDivARM">;
+ AssemblerPredicate<"FeatureHWDivARM", "divide in ARM">;
def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">,
AssemblerPredicate<"FeatureT2XtPk",
"pack/extract">;
@@ -233,10 +253,10 @@ def IsThumb2 : Predicate<"Subtarget->isThumb2()">,
AssemblerPredicate<"ModeThumb,FeatureThumb2",
"thumb2">;
def IsMClass : Predicate<"Subtarget->isMClass()">,
- AssemblerPredicate<"FeatureMClass", "armv7m">;
-def IsARClass : Predicate<"!Subtarget->isMClass()">,
+ AssemblerPredicate<"FeatureMClass", "armv*m">;
+def IsNotMClass : Predicate<"!Subtarget->isMClass()">,
AssemblerPredicate<"!FeatureMClass",
- "armv7a/r">;
+ "!armv*m">;
def IsARM : Predicate<"!Subtarget->isThumb()">,
AssemblerPredicate<"!ModeThumb", "arm-mode">;
def IsIOS : Predicate<"Subtarget->isTargetIOS()">;
@@ -258,7 +278,9 @@ def UseMulOps : Predicate<"Subtarget->useMulOps()">;
def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
" FPOpFusion::Fast) && "
"!Subtarget->isTargetDarwin()">;
-def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4() || "
+def DontUseFusedMAC : Predicate<"!(TM.Options.AllowFPOpFusion =="
+ " FPOpFusion::Fast &&"
+ " Subtarget->hasVFP4()) || "
"Subtarget->isTargetDarwin()">;
// VGETLNi32 is microcoded on Swift - prefer VMOV.
@@ -275,8 +297,8 @@ def HasSlowVDUP32 : Predicate<"Subtarget->isSwift()">;
def UseVMOVSR : Predicate<"Subtarget->isCortexA9() || !Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseVMOVSR : Predicate<"!Subtarget->isCortexA9() && Subtarget->useNEONForSinglePrecisionFP()">;
-def IsLE : Predicate<"TLI.isLittleEndian()">;
-def IsBE : Predicate<"TLI.isBigEndian()">;
+def IsLE : Predicate<"getTargetLowering()->isLittleEndian()">;
+def IsBE : Predicate<"getTargetLowering()->isBigEndian()">;
//===----------------------------------------------------------------------===//
// ARM Flag Definitions.
@@ -456,7 +478,7 @@ def AdrLabelAsmOperand : AsmOperandClass { let Name = "AdrLabel"; }
def adrlabel : Operand<i32> {
let EncoderMethod = "getAdrLabelOpValue";
let ParserMatchClass = AdrLabelAsmOperand;
- let PrintMethod = "printAdrLabelOperand";
+ let PrintMethod = "printAdrLabelOperand<0>";
}
def neon_vcvt_imm32 : Operand<i32> {
@@ -581,17 +603,6 @@ def imm0_1 : Operand<i32> { let ParserMatchClass = Imm0_1AsmOperand; }
def Imm0_3AsmOperand: ImmAsmOperand { let Name = "Imm0_3"; }
def imm0_3 : Operand<i32> { let ParserMatchClass = Imm0_3AsmOperand; }
-/// imm0_4 predicate - Immediate in the range [0,4].
-def Imm0_4AsmOperand : ImmAsmOperand
-{
- let Name = "Imm0_4";
- let DiagnosticType = "ImmRange0_4";
-}
-def imm0_4 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 5; }]> {
- let ParserMatchClass = Imm0_4AsmOperand;
- let DecoderMethod = "DecodeImm0_4";
-}
-
/// imm0_7 predicate - Immediate in the range [0,7].
def Imm0_7AsmOperand: ImmAsmOperand { let Name = "Imm0_7"; }
def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
@@ -671,6 +682,15 @@ def imm0_63 : Operand<i32>, ImmLeaf<i32, [{
let ParserMatchClass = Imm0_63AsmOperand;
}
+/// imm0_239 predicate - Immediate in the range [0,239].
+def Imm0_239AsmOperand : ImmAsmOperand {
+ let Name = "Imm0_239";
+ let DiagnosticType = "ImmRange0_239";
+}
+def imm0_239 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 240; }]> {
+ let ParserMatchClass = Imm0_239AsmOperand;
+}
+
/// imm0_255 predicate - Immediate in the range [0,255].
def Imm0_255AsmOperand : ImmAsmOperand { let Name = "Imm0_255"; }
def imm0_255 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 256; }]> {
@@ -702,6 +722,11 @@ def imm0_65535_expr : Operand<i32> {
let ParserMatchClass = Imm0_65535ExprAsmOperand;
}
+def Imm256_65535ExprAsmOperand: ImmAsmOperand { let Name = "Imm256_65535Expr"; }
+def imm256_65535_expr : Operand<i32> {
+ let ParserMatchClass = Imm256_65535ExprAsmOperand;
+}
+
/// imm24b - True if the 32-bit immediate is encodable in 24 bits.
def Imm24bitAsmOperand: ImmAsmOperand { let Name = "Imm24bit"; }
def imm24b : Operand<i32>, ImmLeaf<i32, [{
@@ -1007,11 +1032,6 @@ def p_imm : Operand<i32> {
let DecoderMethod = "DecodeCoprocessor";
}
-def pf_imm : Operand<i32> {
- let PrintMethod = "printPImmediate";
- let ParserMatchClass = CoprocNumAsmOperand;
-}
-
def CoprocRegAsmOperand : AsmOperandClass {
let Name = "CoprocReg";
let ParserMethod = "parseCoprocRegOperand";
@@ -1327,7 +1347,7 @@ class AI_ext_rrot<bits<8> opcod, string opc, PatFrag opnode>
: AExtI<opcod, (outs GPRnopc:$Rd), (ins GPRnopc:$Rm, rot_imm:$rot),
IIC_iEXTr, opc, "\t$Rd, $Rm$rot",
[(set GPRnopc:$Rd, (opnode (rotr GPRnopc:$Rm, rot_imm:$rot)))]>,
- Requires<[IsARM, HasV6]> {
+ Requires<[IsARM, HasV6]>, Sched<[WriteALUsi]> {
bits<4> Rd;
bits<4> Rm;
bits<2> rot;
@@ -1340,11 +1360,11 @@ class AI_ext_rrot<bits<8> opcod, string opc, PatFrag opnode>
class AI_ext_rrot_np<bits<8> opcod, string opc>
: AExtI<opcod, (outs GPRnopc:$Rd), (ins GPRnopc:$Rm, rot_imm:$rot),
IIC_iEXTr, opc, "\t$Rd, $Rm$rot", []>,
- Requires<[IsARM, HasV6]> {
+ Requires<[IsARM, HasV6]>, Sched<[WriteALUsi]> {
bits<2> rot;
let Inst{19-16} = 0b1111;
let Inst{11-10} = rot;
-}
+ }
/// AI_exta_rrot - A binary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
@@ -1353,7 +1373,7 @@ class AI_exta_rrot<bits<8> opcod, string opc, PatFrag opnode>
IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm$rot",
[(set GPRnopc:$Rd, (opnode GPR:$Rn,
(rotr GPRnopc:$Rm, rot_imm:$rot)))]>,
- Requires<[IsARM, HasV6]> {
+ Requires<[IsARM, HasV6]>, Sched<[WriteALUsr]> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
@@ -1368,7 +1388,7 @@ class AI_exta_rrot<bits<8> opcod, string opc, PatFrag opnode>
class AI_exta_rrot_np<bits<8> opcod, string opc>
: AExtI<opcod, (outs GPRnopc:$Rd), (ins GPR:$Rn, GPRnopc:$Rm, rot_imm:$rot),
IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm$rot", []>,
- Requires<[IsARM, HasV6]> {
+ Requires<[IsARM, HasV6]>, Sched<[WriteALUsr]> {
bits<4> Rn;
bits<2> rot;
let Inst{19-16} = Rn;
@@ -1664,53 +1684,11 @@ PseudoInst<(outs), (ins i32imm:$amt, pred:$p), NoItinerary,
[(ARMcallseq_start timm:$amt)]>;
}
-// Atomic pseudo-insts which will be lowered to ldrexd/strexd loops.
-// (These pseudos use a hand-written selection code).
-let usesCustomInserter = 1, Defs = [CPSR], mayLoad = 1, mayStore = 1 in {
-def ATOMOR6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMXOR6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMADD6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMSUB6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMNAND6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMAND6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMSWAP6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMCMPXCHG6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$cmp1, GPR:$cmp2,
- GPR:$set1, GPR:$set2),
- NoItinerary, []>;
-def ATOMMIN6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMUMIN6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMMAX6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-def ATOMUMAX6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2),
- NoItinerary, []>;
-}
-
-def HINT : AI<(outs), (ins imm0_4:$imm), MiscFrm, NoItinerary,
+def HINT : AI<(outs), (ins imm0_239:$imm), MiscFrm, NoItinerary,
"hint", "\t$imm", []>, Requires<[IsARM, HasV6]> {
- bits<3> imm;
- let Inst{27-3} = 0b0011001000001111000000000;
- let Inst{2-0} = imm;
+ bits<8> imm;
+ let Inst{27-8} = 0b00110010000011110000;
+ let Inst{7-0} = imm;
}
def : InstAlias<"nop$p", (HINT 0, pred:$p)>, Requires<[IsARM, HasV6T2]>;
@@ -1718,6 +1696,9 @@ def : InstAlias<"yield$p", (HINT 1, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"wfe$p", (HINT 2, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"wfi$p", (HINT 3, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"sev$p", (HINT 4, pred:$p)>, Requires<[IsARM, HasV6T2]>;
+def : InstAlias<"sevl$p", (HINT 5, pred:$p)>, Requires<[IsARM, HasV8]>;
+
+def : Pat<(int_arm_sevl), (HINT 5)>;
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
"\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
@@ -1735,12 +1716,23 @@ def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
// The 16-bit operand $val can be used by a debugger to store more information
// about the breakpoint.
-def BKPT : AI<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
- "bkpt", "\t$val", []>, Requires<[IsARM]> {
+def BKPT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
+ "bkpt", "\t$val", []>, Requires<[IsARM]> {
bits<16> val;
let Inst{3-0} = val{3-0};
let Inst{19-8} = val{15-4};
let Inst{27-20} = 0b00010010;
+ let Inst{31-28} = 0xe; // AL
+ let Inst{7-4} = 0b0111;
+}
+
+def HLT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
+ "hlt", "\t$val", []>, Requires<[IsARM, HasV8]> {
+ bits<16> val;
+ let Inst{3-0} = val{3-0};
+ let Inst{19-8} = val{15-4};
+ let Inst{27-20} = 0b00010000;
+ let Inst{31-28} = 0xe; // AL
let Inst{7-4} = 0b0111;
}
@@ -1780,7 +1772,8 @@ multiclass APreLoad<bits<1> read, bits<1> data, string opc> {
def i12 : AXI<(outs), (ins addrmode_imm12:$addr), MiscFrm, IIC_Preload,
!strconcat(opc, "\t$addr"),
- [(ARMPreload addrmode_imm12:$addr, (i32 read), (i32 data))]> {
+ [(ARMPreload addrmode_imm12:$addr, (i32 read), (i32 data))]>,
+ Sched<[WritePreLd]> {
bits<4> Rt;
bits<17> addr;
let Inst{31-26} = 0b111101;
@@ -1796,7 +1789,8 @@ multiclass APreLoad<bits<1> read, bits<1> data, string opc> {
def rs : AXI<(outs), (ins ldst_so_reg:$shift), MiscFrm, IIC_Preload,
!strconcat(opc, "\t$shift"),
- [(ARMPreload ldst_so_reg:$shift, (i32 read), (i32 data))]> {
+ [(ARMPreload ldst_so_reg:$shift, (i32 read), (i32 data))]>,
+ Sched<[WritePreLd]> {
bits<17> shift;
let Inst{31-26} = 0b111101;
let Inst{25} = 1; // 1 for register form
@@ -1816,7 +1810,7 @@ defm PLDW : APreLoad<0, 1, "pldw">, Requires<[IsARM,HasV7,HasMP]>;
defm PLI : APreLoad<1, 0, "pli">, Requires<[IsARM,HasV7]>;
def SETEND : AXI<(outs), (ins setend_op:$end), MiscFrm, NoItinerary,
- "setend\t$end", []>, Requires<[IsARM]> {
+ "setend\t$end", []>, Requires<[IsARM]>, Deprecated<HasV8Ops> {
bits<1> end;
let Inst{31-10} = 0b1111000100000001000000;
let Inst{9} = end;
@@ -1863,7 +1857,8 @@ def TRAP : AXI<(outs), (ins), MiscFrm, NoItinerary,
let isNotDuplicable = 1 in {
def PICADD : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p),
4, IIC_iALUr,
- [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
+ [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>,
+ Sched<[WriteALU, ReadALU]>;
let AddedComplexity = 10 in {
def PICLDR : ARMPseudoInst<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
@@ -1923,11 +1918,11 @@ def ADR : AI1<{0,?,?,0}, (outs GPR:$Rd), (ins adrlabel:$label),
let hasSideEffects = 1 in {
def LEApcrel : ARMPseudoInst<(outs GPR:$Rd), (ins i32imm:$label, pred:$p),
- 4, IIC_iALUi, []>;
+ 4, IIC_iALUi, []>, Sched<[WriteALU, ReadALU]>;
def LEApcrelJT : ARMPseudoInst<(outs GPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
- 4, IIC_iALUi, []>;
+ 4, IIC_iALUi, []>, Sched<[WriteALU, ReadALU]>;
}
//===----------------------------------------------------------------------===//
@@ -1938,16 +1933,22 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
// ARMV4T and above
def BX_RET : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"bx", "\tlr", [(ARMretflag)]>,
- Requires<[IsARM, HasV4T]> {
+ Requires<[IsARM, HasV4T]>, Sched<[WriteBr]> {
let Inst{27-0} = 0b0001001011111111111100011110;
}
// ARMV4 only
def MOVPCLR : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"mov", "\tpc, lr", [(ARMretflag)]>,
- Requires<[IsARM, NoV4T]> {
+ Requires<[IsARM, NoV4T]>, Sched<[WriteBr]> {
let Inst{27-0} = 0b0001101000001111000000001110;
}
+
+ // Exception return: N.b. doesn't set CPSR as far as we're concerned (it sets
+ // the user-space one).
+ def SUBS_PC_LR : ARMPseudoInst<(outs), (ins i32imm:$offset, pred:$p),
+ 4, IIC_Br,
+ [(ARMintretflag imm:$offset)]>;
}
// Indirect branches
@@ -1955,7 +1956,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
// ARMV4T and above
def BX : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "bx\t$dst",
[(brind GPR:$dst)]>,
- Requires<[IsARM, HasV4T]> {
+ Requires<[IsARM, HasV4T]>, Sched<[WriteBr]> {
bits<4> dst;
let Inst{31-4} = 0b1110000100101111111111110001;
let Inst{3-0} = dst;
@@ -1963,7 +1964,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
def BX_pred : AI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br,
"bx", "\t$dst", [/* pattern left blank */]>,
- Requires<[IsARM, HasV4T]> {
+ Requires<[IsARM, HasV4T]>, Sched<[WriteBr]> {
bits<4> dst;
let Inst{27-4} = 0b000100101111111111110001;
let Inst{3-0} = dst;
@@ -1980,7 +1981,7 @@ let isCall = 1,
def BL : ABXI<0b1011, (outs), (ins bl_target:$func),
IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>,
- Requires<[IsARM]> {
+ Requires<[IsARM]>, Sched<[WriteBrL]> {
let Inst{31-28} = 0b1110;
bits<24> func;
let Inst{23-0} = func;
@@ -1990,7 +1991,7 @@ let isCall = 1,
def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func),
IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
- Requires<[IsARM]> {
+ Requires<[IsARM]>, Sched<[WriteBrL]> {
bits<24> func;
let Inst{23-0} = func;
let DecoderMethod = "DecodeBranchImmInstruction";
@@ -2000,7 +2001,7 @@ let isCall = 1,
def BLX : AXI<(outs), (ins GPR:$func), BrMiscFrm,
IIC_Br, "blx\t$func",
[(ARMcall GPR:$func)]>,
- Requires<[IsARM, HasV5T]> {
+ Requires<[IsARM, HasV5T]>, Sched<[WriteBrL]> {
bits<4> func;
let Inst{31-4} = 0b1110000100101111111111110011;
let Inst{3-0} = func;
@@ -2009,7 +2010,7 @@ let isCall = 1,
def BLX_pred : AI<(outs), (ins GPR:$func), BrMiscFrm,
IIC_Br, "blx", "\t$func",
[(ARMcall_pred GPR:$func)]>,
- Requires<[IsARM, HasV5T]> {
+ Requires<[IsARM, HasV5T]>, Sched<[WriteBrL]> {
bits<4> func;
let Inst{27-4} = 0b000100101111111111110011;
let Inst{3-0} = func;
@@ -2019,18 +2020,18 @@ let isCall = 1,
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
def BX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, HasV4T]>;
+ Requires<[IsARM, HasV4T]>, Sched<[WriteBr]>;
// ARMv4
def BMOVPCRX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, NoV4T]>;
+ Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
// mov lr, pc; b if callee is marked noreturn to avoid confusing the
// return stack predictor.
def BMOVPCB_CALL : ARMPseudoInst<(outs), (ins bl_target:$func),
8, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
- Requires<[IsARM]>;
+ Requires<[IsARM]>, Sched<[WriteBr]>;
}
let isBranch = 1, isTerminator = 1 in {
@@ -2038,7 +2039,8 @@ let isBranch = 1, isTerminator = 1 in {
// a two-value operand where a dag node expects two operands. :(
def Bcc : ABI<0b1010, (outs), (ins br_target:$target),
IIC_Br, "b", "\t$target",
- [/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]> {
+ [/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]>,
+ Sched<[WriteBr]> {
bits<24> target;
let Inst{23-0} = target;
let DecoderMethod = "DecodeBranchImmInstruction";
@@ -2051,25 +2053,27 @@ let isBranch = 1, isTerminator = 1 in {
// should be sufficient.
// FIXME: Is B really a Barrier? That doesn't seem right.
def B : ARMPseudoExpand<(outs), (ins br_target:$target), 4, IIC_Br,
- [(br bb:$target)], (Bcc br_target:$target, (ops 14, zero_reg))>;
+ [(br bb:$target)], (Bcc br_target:$target, (ops 14, zero_reg))>,
+ Sched<[WriteBr]>;
let isNotDuplicable = 1, isIndirectBranch = 1 in {
def BR_JTr : ARMPseudoInst<(outs),
(ins GPR:$target, i32imm:$jt, i32imm:$id),
0, IIC_Br,
- [(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]>;
+ [(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]>,
+ Sched<[WriteBr]>;
// FIXME: This shouldn't use the generic "addrmode2," but rather be split
// into i12 and rs suffixed versions.
def BR_JTm : ARMPseudoInst<(outs),
(ins addrmode2:$target, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt (i32 (load addrmode2:$target)), tjumptable:$jt,
- imm:$id)]>;
+ imm:$id)]>, Sched<[WriteBrTbl]>;
def BR_JTadd : ARMPseudoInst<(outs),
(ins GPR:$target, GPR:$idx, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt (add GPR:$target, GPR:$idx), tjumptable:$jt,
- imm:$id)]>;
+ imm:$id)]>, Sched<[WriteBrTbl]>;
} // isNotDuplicable = 1, isIndirectBranch = 1
} // isBarrier = 1
@@ -2078,7 +2082,7 @@ let isBranch = 1, isTerminator = 1 in {
// BLX (immediate)
def BLXi : AXI<(outs), (ins blx_target:$target), BrMiscFrm, NoItinerary,
"blx\t$target", []>,
- Requires<[IsARM, HasV5T]> {
+ Requires<[IsARM, HasV5T]>, Sched<[WriteBrL]> {
let Inst{31-25} = 0b1111101;
bits<25> target;
let Inst{23-0} = target{24-1};
@@ -2087,7 +2091,7 @@ def BLXi : AXI<(outs), (ins blx_target:$target), BrMiscFrm, NoItinerary,
// Branch and Exchange Jazelle
def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
- [/* pattern left blank */]> {
+ [/* pattern left blank */]>, Sched<[WriteBr]> {
bits<4> func;
let Inst{23-20} = 0b0010;
let Inst{19-8} = 0xfff;
@@ -2098,18 +2102,20 @@ def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
// Tail calls.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
- def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst), IIC_Br, []>;
+ def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst), IIC_Br, []>,
+ Sched<[WriteBr]>;
- def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst), IIC_Br, []>;
+ def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst), IIC_Br, []>,
+ Sched<[WriteBr]>;
def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst),
4, IIC_Br, [],
(Bcc br_target:$dst, (ops 14, zero_reg))>,
- Requires<[IsARM]>;
+ Requires<[IsARM]>, Sched<[WriteBr]>;
def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst),
4, IIC_Br, [],
- (BX GPR:$dst)>,
+ (BX GPR:$dst)>, Sched<[WriteBr]>,
Requires<[IsARM]>;
}
@@ -2123,7 +2129,8 @@ def SMC : ABI<0b0001, (outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt",
// Supervisor Call (Software Interrupt)
let isCall = 1, Uses = [SP] in {
-def SVC : ABI<0b1111, (outs), (ins imm24b:$svc), IIC_Br, "svc", "\t$svc", []> {
+def SVC : ABI<0b1111, (outs), (ins imm24b:$svc), IIC_Br, "svc", "\t$svc", []>,
+ Sched<[WriteBr]> {
bits<24> svc;
let Inst{23-0} = svc;
}
@@ -2272,6 +2279,13 @@ def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rd, GPR:$dst2),
[]>, Requires<[IsARM, HasV5TE]>;
}
+def LDA : AIldracq<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
+ NoItinerary, "lda", "\t$Rt, $addr", []>;
+def LDAB : AIldracq<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
+ NoItinerary, "ldab", "\t$Rt, $addr", []>;
+def LDAH : AIldracq<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr),
+ NoItinerary, "ldah", "\t$Rt, $addr", []>;
+
// Indexed loads
multiclass AI2_ldridx<bit isByte, string opc,
InstrItinClass iii, InstrItinClass iir> {
@@ -2284,7 +2298,6 @@ multiclass AI2_ldridx<bit isByte, string opc,
let Inst{19-16} = addr{16-13};
let Inst{11-0} = addr{11-0};
let DecoderMethod = "DecodeLDRPreImm";
- let AsmMatchConverter = "cvtLdWriteBackRegAddrModeImm12";
}
def _PRE_REG : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb),
@@ -2297,7 +2310,6 @@ multiclass AI2_ldridx<bit isByte, string opc,
let Inst{11-0} = addr{11-0};
let Inst{4} = 0;
let DecoderMethod = "DecodeLDRPreReg";
- let AsmMatchConverter = "cvtLdWriteBackRegAddrMode2";
}
def _POST_REG : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
@@ -2355,7 +2367,6 @@ multiclass AI3_ldridx<bits<4> op, string opc, InstrItinClass itin> {
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
- let AsmMatchConverter = "cvtLdWriteBackRegAddrMode3";
let DecoderMethod = "DecodeAddrMode3Instruction";
}
def _POST : AI3ldstidx<op, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
@@ -2391,7 +2402,6 @@ def LDRD_PRE : AI3ldstidx<0b1101, 0, 1, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb),
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
- let AsmMatchConverter = "cvtLdrdPre";
}
def LDRD_POST: AI3ldstidx<0b1101, 0, 0, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am3offset:$offset),
@@ -2494,7 +2504,6 @@ multiclass AI3ldrT<bits<4> op, string opc> {
let Inst{22} = 1;
let Inst{11-8} = offset{7-4};
let Inst{3-0} = offset{3-0};
- let AsmMatchConverter = "cvtLdExtTWriteBackImm";
}
def r : AI3ldstidxT<op, 1, (outs GPRnopc:$Rt, GPRnopc:$base_wb),
(ins addr_offset_none:$addr, postidx_reg:$Rm),
@@ -2506,7 +2515,6 @@ multiclass AI3ldrT<bits<4> op, string opc> {
let Inst{11-8} = 0;
let Unpredictable{11-8} = 0b1111;
let Inst{3-0} = Rm{3-0};
- let AsmMatchConverter = "cvtLdExtTWriteBackReg";
let DecoderMethod = "DecodeLDR";
}
}
@@ -2544,7 +2552,6 @@ multiclass AI2_stridx<bit isByte, string opc,
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{11-0} = addr{11-0}; // imm12
- let AsmMatchConverter = "cvtStWriteBackRegAddrModeImm12";
let DecoderMethod = "DecodeSTRPreImm";
}
@@ -2558,7 +2565,6 @@ multiclass AI2_stridx<bit isByte, string opc,
let Inst{19-16} = addr{16-13}; // Rn
let Inst{11-0} = addr{11-0};
let Inst{4} = 0; // Inst{4} = 0
- let AsmMatchConverter = "cvtStWriteBackRegAddrMode2";
let DecoderMethod = "DecodeSTRPreReg";
}
def _POST_REG : AI2ldstidx<0, isByte, 0, (outs GPR:$Rn_wb),
@@ -2667,7 +2673,6 @@ def STRH_PRE : AI3ldstidx<0b1011, 0, 1, (outs GPR:$Rn_wb),
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
- let AsmMatchConverter = "cvtStWriteBackRegAddrMode3";
let DecoderMethod = "DecodeAddrMode3Instruction";
}
@@ -2701,7 +2706,6 @@ def STRD_PRE : AI3ldstidx<0b1111, 0, 1, (outs GPR:$Rn_wb),
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
- let AsmMatchConverter = "cvtStrdPre";
}
def STRD_POST: AI3ldstidx<0b1111, 0, 0, (outs GPR:$Rn_wb),
@@ -2808,7 +2812,6 @@ multiclass AI3strT<bits<4> op, string opc> {
let Inst{22} = 1;
let Inst{11-8} = offset{7-4};
let Inst{3-0} = offset{3-0};
- let AsmMatchConverter = "cvtStExtTWriteBackImm";
}
def r : AI3ldstidxT<op, 0, (outs GPR:$base_wb),
(ins GPR:$Rt, addr_offset_none:$addr, postidx_reg:$Rm),
@@ -2819,13 +2822,18 @@ multiclass AI3strT<bits<4> op, string opc> {
let Inst{22} = 0;
let Inst{11-8} = 0;
let Inst{3-0} = Rm{3-0};
- let AsmMatchConverter = "cvtStExtTWriteBackReg";
}
}
defm STRHT : AI3strT<0b1011, "strht">;
+def STL : AIstrrel<0b00, (outs), (ins GPR:$Rt, addr_offset_none:$addr),
+ NoItinerary, "stl", "\t$Rt, $addr", []>;
+def STLB : AIstrrel<0b10, (outs), (ins GPR:$Rt, addr_offset_none:$addr),
+ NoItinerary, "stlb", "\t$Rt, $addr", []>;
+def STLH : AIstrrel<0b11, (outs), (ins GPR:$Rt, addr_offset_none:$addr),
+ NoItinerary, "stlh", "\t$Rt, $addr", []>;
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
@@ -2955,7 +2963,7 @@ defm sysSTM : arm_ldst_mult<"stm", " ^", 0, 1, LdStMulFrm, IIC_iStore_m,
let neverHasSideEffects = 1 in
def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
- "mov", "\t$Rd, $Rm", []>, UnaryDP {
+ "mov", "\t$Rd, $Rm", []>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<4> Rm;
@@ -2969,7 +2977,7 @@ def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
// A version for the smaller set of tail call registers.
let neverHasSideEffects = 1 in
def MOVr_TC : AsI1<0b1101, (outs tcGPR:$Rd), (ins tcGPR:$Rm), DPFrm,
- IIC_iMOVr, "mov", "\t$Rd, $Rm", []>, UnaryDP {
+ IIC_iMOVr, "mov", "\t$Rd, $Rm", []>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<4> Rm;
@@ -2982,7 +2990,8 @@ def MOVr_TC : AsI1<0b1101, (outs tcGPR:$Rd), (ins tcGPR:$Rm), DPFrm,
def MOVsr : AsI1<0b1101, (outs GPRnopc:$Rd), (ins shift_so_reg_reg:$src),
DPSoRegRegFrm, IIC_iMOVsr,
"mov", "\t$Rd, $src",
- [(set GPRnopc:$Rd, shift_so_reg_reg:$src)]>, UnaryDP {
+ [(set GPRnopc:$Rd, shift_so_reg_reg:$src)]>, UnaryDP,
+ Sched<[WriteALU]> {
bits<4> Rd;
bits<12> src;
let Inst{15-12} = Rd;
@@ -2998,7 +3007,7 @@ def MOVsr : AsI1<0b1101, (outs GPRnopc:$Rd), (ins shift_so_reg_reg:$src),
def MOVsi : AsI1<0b1101, (outs GPR:$Rd), (ins shift_so_reg_imm:$src),
DPSoRegImmFrm, IIC_iMOVsr,
"mov", "\t$Rd, $src", [(set GPR:$Rd, shift_so_reg_imm:$src)]>,
- UnaryDP {
+ UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<12> src;
let Inst{15-12} = Rd;
@@ -3011,7 +3020,8 @@ def MOVsi : AsI1<0b1101, (outs GPR:$Rd), (ins shift_so_reg_imm:$src),
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MOVi : AsI1<0b1101, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm, IIC_iMOVi,
- "mov", "\t$Rd, $imm", [(set GPR:$Rd, so_imm:$imm)]>, UnaryDP {
+ "mov", "\t$Rd, $imm", [(set GPR:$Rd, so_imm:$imm)]>, UnaryDP,
+ Sched<[WriteALU]> {
bits<4> Rd;
bits<12> imm;
let Inst{25} = 1;
@@ -3025,7 +3035,7 @@ def MOVi16 : AI1<0b1000, (outs GPR:$Rd), (ins imm0_65535_expr:$imm),
DPFrm, IIC_iMOVi,
"movw", "\t$Rd, $imm",
[(set GPR:$Rd, imm0_65535:$imm)]>,
- Requires<[IsARM, HasV6T2]>, UnaryDP {
+ Requires<[IsARM, HasV6T2]>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<16> imm;
let Inst{15-12} = Rd;
@@ -3041,7 +3051,8 @@ def : InstAlias<"mov${p} $Rd, $imm",
Requires<[IsARM]>;
def MOVi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
- (ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
+ (ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>,
+ Sched<[WriteALU]>;
let Constraints = "$src = $Rd" in {
def MOVTi16 : AI1<0b1010, (outs GPRnopc:$Rd),
@@ -3051,7 +3062,7 @@ def MOVTi16 : AI1<0b1010, (outs GPRnopc:$Rd),
[(set GPRnopc:$Rd,
(or (and GPR:$src, 0xffff),
lo16AllZero:$imm))]>, UnaryDP,
- Requires<[IsARM, HasV6T2]> {
+ Requires<[IsARM, HasV6T2]>, Sched<[WriteALU]> {
bits<4> Rd;
bits<16> imm;
let Inst{15-12} = Rd;
@@ -3063,7 +3074,8 @@ def MOVTi16 : AI1<0b1010, (outs GPRnopc:$Rd),
}
def MOVTi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
- (ins GPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
+ (ins GPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>,
+ Sched<[WriteALU]>;
} // Constraints
@@ -3073,7 +3085,7 @@ def : ARMPat<(or GPR:$src, 0xffff0000), (MOVTi16 GPR:$src, 0xffff)>,
let Uses = [CPSR] in
def RRX: PseudoInst<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVsi,
[(set GPR:$Rd, (ARMrrx GPR:$Rm))]>, UnaryDP,
- Requires<[IsARM]>;
+ Requires<[IsARM]>, Sched<[WriteALU]>;
// These aren't really mov instructions, but we have to define them this way
// due to flag operands.
@@ -3081,10 +3093,10 @@ def RRX: PseudoInst<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVsi,
let Defs = [CPSR] in {
def MOVsrl_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
[(set GPR:$dst, (ARMsrl_flag GPR:$src))]>, UnaryDP,
- Requires<[IsARM]>;
+ Sched<[WriteALU]>, Requires<[IsARM]>;
def MOVsra_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
[(set GPR:$dst, (ARMsra_flag GPR:$src))]>, UnaryDP,
- Requires<[IsARM]>;
+ Sched<[WriteALU]>, Requires<[IsARM]>;
}
//===----------------------------------------------------------------------===//
@@ -3250,7 +3262,8 @@ class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
list<dag> pattern = [],
dag iops = (ins GPRnopc:$Rn, GPRnopc:$Rm),
string asm = "\t$Rd, $Rn, $Rm">
- : AI<(outs GPRnopc:$Rd), iops, DPFrm, IIC_iALUr, opc, asm, pattern> {
+ : AI<(outs GPRnopc:$Rd), iops, DPFrm, IIC_iALUr, opc, asm, pattern>,
+ Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rn;
bits<4> Rd;
bits<4> Rm;
@@ -3265,9 +3278,11 @@ class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
// Saturating add/subtract
+let DecoderMethod = "DecodeQADDInstruction" in
def QADD : AAI<0b00010000, 0b00000101, "qadd",
[(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))],
(ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
+
def QSUB : AAI<0b00010010, 0b00000101, "qsub",
[(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))],
(ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
@@ -3326,7 +3341,7 @@ def UHSUB8 : AAI<0b01100111, 0b11111111, "uhsub8">;
def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
MulFrm /* for convenience */, NoItinerary, "usad8",
"\t$Rd, $Rn, $Rm", []>,
- Requires<[IsARM, HasV6]> {
+ Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
@@ -3340,7 +3355,7 @@ def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
MulFrm /* for convenience */, NoItinerary, "usada8",
"\t$Rd, $Rn, $Rm, $Ra", []>,
- Requires<[IsARM, HasV6]> {
+ Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]>{
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
@@ -3473,7 +3488,7 @@ def BFI:I<(outs GPRnopc:$Rd), (ins GPRnopc:$src, GPR:$Rn, bf_inv_mask_imm:$imm),
def MVNr : AsI1<0b1111, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMVNr,
"mvn", "\t$Rd, $Rm",
- [(set GPR:$Rd, (not GPR:$Rm))]>, UnaryDP {
+ [(set GPR:$Rd, (not GPR:$Rm))]>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<4> Rm;
let Inst{25} = 0;
@@ -3484,7 +3499,8 @@ def MVNr : AsI1<0b1111, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMVNr,
}
def MVNsi : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_imm:$shift),
DPSoRegImmFrm, IIC_iMVNsr, "mvn", "\t$Rd, $shift",
- [(set GPR:$Rd, (not so_reg_imm:$shift))]>, UnaryDP {
+ [(set GPR:$Rd, (not so_reg_imm:$shift))]>, UnaryDP,
+ Sched<[WriteALU]> {
bits<4> Rd;
bits<12> shift;
let Inst{25} = 0;
@@ -3496,7 +3512,8 @@ def MVNsi : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_imm:$shift),
}
def MVNsr : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iMVNsr, "mvn", "\t$Rd, $shift",
- [(set GPR:$Rd, (not so_reg_reg:$shift))]>, UnaryDP {
+ [(set GPR:$Rd, (not so_reg_reg:$shift))]>, UnaryDP,
+ Sched<[WriteALU]> {
bits<4> Rd;
bits<12> shift;
let Inst{25} = 0;
@@ -3511,7 +3528,7 @@ def MVNsr : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_reg:$shift),
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MVNi : AsI1<0b1111, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm,
IIC_iMVNi, "mvn", "\t$Rd, $imm",
- [(set GPR:$Rd, so_imm_not:$imm)]>,UnaryDP {
+ [(set GPR:$Rd, so_imm_not:$imm)]>,UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<12> imm;
let Inst{25} = 1;
@@ -3993,14 +4010,58 @@ def PKHTB : APKHI<0b01101000, 1, (outs GPRnopc:$Rd),
// Alternate cases for PKHTB where identities eliminate some nodes. Note that
// a shift amount of 0 is *not legal* here, it is PKHBT instead.
+// We also can not replace a srl (17..31) by an arithmetic shift we would use in
+// pkhtb src1, src2, asr (17..31).
def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000),
- (srl GPRnopc:$src2, imm16_31:$sh)),
+ (srl GPRnopc:$src2, imm16:$sh)),
+ (PKHTB GPRnopc:$src1, GPRnopc:$src2, imm16:$sh)>;
+def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000),
+ (sra GPRnopc:$src2, imm16_31:$sh)),
(PKHTB GPRnopc:$src1, GPRnopc:$src2, imm16_31:$sh)>;
def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000),
(and (srl GPRnopc:$src2, imm1_15:$sh), 0xFFFF)),
(PKHTB GPRnopc:$src1, GPRnopc:$src2, imm1_15:$sh)>;
//===----------------------------------------------------------------------===//
+// CRC Instructions
+//
+// Polynomials:
+// + CRC32{B,H,W} 0x04C11DB7
+// + CRC32C{B,H,W} 0x1EDC6F41
+//
+
+class AI_crc32<bit C, bits<2> sz, string suffix, SDPatternOperator builtin>
+ : AInoP<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm), MiscFrm, NoItinerary,
+ !strconcat("crc32", suffix), "\t$Rd, $Rn, $Rm",
+ [(set GPRnopc:$Rd, (builtin GPRnopc:$Rn, GPRnopc:$Rm))]>,
+ Requires<[IsARM, HasV8, HasCRC]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{31-28} = 0b1110;
+ let Inst{27-23} = 0b00010;
+ let Inst{22-21} = sz;
+ let Inst{20} = 0;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-10} = 0b00;
+ let Inst{9} = C;
+ let Inst{8} = 0;
+ let Inst{7-4} = 0b0100;
+ let Inst{3-0} = Rm;
+
+ let Unpredictable{11-8} = 0b1101;
+}
+
+def CRC32B : AI_crc32<0, 0b00, "b", int_arm_crc32b>;
+def CRC32CB : AI_crc32<1, 0b00, "cb", int_arm_crc32cb>;
+def CRC32H : AI_crc32<0, 0b01, "h", int_arm_crc32h>;
+def CRC32CH : AI_crc32<1, 0b01, "ch", int_arm_crc32ch>;
+def CRC32W : AI_crc32<0, 0b10, "w", int_arm_crc32w>;
+def CRC32CW : AI_crc32<1, 0b10, "cw", int_arm_crc32cw>;
+
+//===----------------------------------------------------------------------===//
// Comparison Instructions...
//
@@ -4022,7 +4083,8 @@ def : ARMPat<(ARMcmpZ GPR:$src, so_reg_reg:$rhs),
let isCompare = 1, Defs = [CPSR] in {
def CMNri : AI1<0b1011, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, IIC_iCMPi,
"cmn", "\t$Rn, $imm",
- [(ARMcmn GPR:$Rn, so_imm:$imm)]> {
+ [(ARMcmn GPR:$Rn, so_imm:$imm)]>,
+ Sched<[WriteCMP, ReadALU]> {
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
@@ -4038,7 +4100,7 @@ def CMNri : AI1<0b1011, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, IIC_iCMPi,
def CMNzrr : AI1<0b1011, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, IIC_iCMPr,
"cmn", "\t$Rn, $Rm",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
- GPR:$Rn, GPR:$Rm)]> {
+ GPR:$Rn, GPR:$Rm)]>, Sched<[WriteCMP, ReadALU, ReadALU]> {
bits<4> Rn;
bits<4> Rm;
let isCommutable = 1;
@@ -4056,7 +4118,8 @@ def CMNzrsi : AI1<0b1011, (outs),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, IIC_iCMPsr,
"cmn", "\t$Rn, $shift",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
- GPR:$Rn, so_reg_imm:$shift)]> {
+ GPR:$Rn, so_reg_imm:$shift)]>,
+ Sched<[WriteCMPsi, ReadALU]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
@@ -4074,7 +4137,8 @@ def CMNzrsr : AI1<0b1011, (outs),
(ins GPRnopc:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, IIC_iCMPsr,
"cmn", "\t$Rn, $shift",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
- GPRnopc:$Rn, so_reg_reg:$shift)]> {
+ GPRnopc:$Rn, so_reg_reg:$shift)]>,
+ Sched<[WriteCMPsr, ReadALU]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
@@ -4112,65 +4176,77 @@ let usesCustomInserter = 1, isBranch = 1, isTerminator = 1,
def BCCi64 : PseudoInst<(outs),
(ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, brtarget:$dst),
IIC_Br,
- [(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, bb:$dst)]>;
+ [(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, bb:$dst)]>,
+ Sched<[WriteBr]>;
def BCCZi64 : PseudoInst<(outs),
(ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, brtarget:$dst), IIC_Br,
- [(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, 0, 0, bb:$dst)]>;
+ [(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, 0, 0, bb:$dst)]>,
+ Sched<[WriteBr]>;
} // usesCustomInserter
// Conditional moves
-// FIXME: should be able to write a pattern for ARMcmov, but can't use
-// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
let isCommutable = 1, isSelect = 1 in
-def MOVCCr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$false, GPR:$Rm, pred:$p),
+def MOVCCr : ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$false, GPR:$Rm, cmovpred:$p),
4, IIC_iCMOVr,
- [/*(set GPR:$Rd, (ARMcmov GPR:$false, GPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd">;
+ [(set GPR:$Rd, (ARMcmov GPR:$false, GPR:$Rm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
def MOVCCsi : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, so_reg_imm:$shift, pred:$p),
- 4, IIC_iCMOVsr,
- [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_reg_imm:$shift,
- imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd">;
+ (ins GPR:$false, so_reg_imm:$shift, cmovpred:$p),
+ 4, IIC_iCMOVsr,
+ [(set GPR:$Rd,
+ (ARMcmov GPR:$false, so_reg_imm:$shift,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
def MOVCCsr : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, so_reg_reg:$shift, pred:$p),
+ (ins GPR:$false, so_reg_reg:$shift, cmovpred:$p),
4, IIC_iCMOVsr,
- [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_reg_reg:$shift,
- imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd">;
+ [(set GPR:$Rd, (ARMcmov GPR:$false, so_reg_reg:$shift,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
let isMoveImm = 1 in
-def MOVCCi16 : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, imm0_65535_expr:$imm, pred:$p),
- 4, IIC_iMOVi,
- []>,
- RegConstraint<"$false = $Rd">, Requires<[IsARM, HasV6T2]>;
+def MOVCCi16
+ : ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$false, imm0_65535_expr:$imm, cmovpred:$p),
+ 4, IIC_iMOVi,
+ [(set GPR:$Rd, (ARMcmov GPR:$false, imm0_65535:$imm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Requires<[IsARM, HasV6T2]>,
+ Sched<[WriteALU]>;
let isMoveImm = 1 in
def MOVCCi : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, so_imm:$imm, pred:$p),
+ (ins GPR:$false, so_imm:$imm, cmovpred:$p),
4, IIC_iCMOVi,
- [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm:$imm, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd">;
+ [(set GPR:$Rd, (ARMcmov GPR:$false, so_imm:$imm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
// Two instruction predicate mov immediate.
let isMoveImm = 1 in
-def MOVCCi32imm : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, i32imm:$src, pred:$p),
- 8, IIC_iCMOVix2, []>, RegConstraint<"$false = $Rd">;
+def MOVCCi32imm
+ : ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$false, i32imm:$src, cmovpred:$p),
+ 8, IIC_iCMOVix2,
+ [(set GPR:$Rd, (ARMcmov GPR:$false, imm:$src,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Requires<[IsARM, HasV6T2]>;
let isMoveImm = 1 in
def MVNCCi : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, so_imm:$imm, pred:$p),
+ (ins GPR:$false, so_imm:$imm, cmovpred:$p),
4, IIC_iCMOVi,
- [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd">;
+ [(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
} // neverHasSideEffects
@@ -4189,10 +4265,20 @@ def memb_opt : Operand<i32> {
let DecoderMethod = "DecodeMemBarrierOption";
}
+def InstSyncBarrierOptOperand : AsmOperandClass {
+ let Name = "InstSyncBarrierOpt";
+ let ParserMethod = "parseInstSyncBarrierOptOperand";
+}
+def instsyncb_opt : Operand<i32> {
+ let PrintMethod = "printInstSyncBOption";
+ let ParserMatchClass = InstSyncBarrierOptOperand;
+ let DecoderMethod = "DecodeInstSyncBarrierOption";
+}
+
// memory barriers protect the atomic sequences
let hasSideEffects = 1 in {
def DMB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
- "dmb", "\t$opt", [(ARMMemBarrier (i32 imm:$opt))]>,
+ "dmb", "\t$opt", [(int_arm_dmb (i32 imm0_15:$opt))]>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff05;
@@ -4201,7 +4287,7 @@ def DMB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
}
def DSB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
- "dsb", "\t$opt", []>,
+ "dsb", "\t$opt", [(int_arm_dsb (i32 imm0_15:$opt))]>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff04;
@@ -4209,7 +4295,7 @@ def DSB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
}
// ISB has only full system option
-def ISB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
+def ISB : AInoP<(outs), (ins instsyncb_opt:$opt), MiscFrm, NoItinerary,
"isb", "\t$opt", []>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
@@ -4217,124 +4303,219 @@ def ISB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
let Inst{3-0} = opt;
}
+let usesCustomInserter = 1, Defs = [CPSR] in {
+
// Pseudo instruction that combines movs + predicated rsbmi
// to implement integer ABS
-let usesCustomInserter = 1, Defs = [CPSR] in
-def ABS : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$src), 8, NoItinerary, []>;
+ def ABS : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$src), 8, NoItinerary, []>;
-let usesCustomInserter = 1 in {
- let Defs = [CPSR] in {
+// Atomic pseudo-insts which will be lowered to ldrex/strex loops.
+// (64-bit pseudos use a hand-written selection code).
+ let mayLoad = 1, mayStore = 1 in {
def ATOMIC_LOAD_ADD_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_add_8 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_SUB_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_sub_8 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_AND_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_and_8 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_OR_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_or_8 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_XOR_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_xor_8 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_NAND_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_nand_8 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_MIN_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_min_8 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_MAX_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_max_8 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_UMIN_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_umin_8 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_UMAX_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_umax_8 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_SWAP_I8 : PseudoInst<
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$new, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_CMP_SWAP_I8 : PseudoInst<
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$old, GPR:$new, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_ADD_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_add_16 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_SUB_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_sub_16 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_AND_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_and_16 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_OR_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_or_16 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_XOR_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_xor_16 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_NAND_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_nand_16 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_MIN_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_min_16 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_MAX_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_max_16 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_UMIN_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_umin_16 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_UMAX_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_umax_16 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_SWAP_I16 : PseudoInst<
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$new, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_CMP_SWAP_I16 : PseudoInst<
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$old, GPR:$new, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_ADD_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_SUB_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_sub_32 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_AND_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_and_32 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_OR_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_or_32 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_XOR_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_xor_32 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_NAND_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- [(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$incr))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_MIN_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_min_32 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_MAX_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_max_32 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_UMIN_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_umin_32 GPR:$ptr, GPR:$val))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_LOAD_UMAX_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_umax_32 GPR:$ptr, GPR:$val))]>;
-
- def ATOMIC_SWAP_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- [(set GPR:$dst, (atomic_swap_8 GPR:$ptr, GPR:$new))]>;
- def ATOMIC_SWAP_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- [(set GPR:$dst, (atomic_swap_16 GPR:$ptr, GPR:$new))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_SWAP_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- [(set GPR:$dst, (atomic_swap_32 GPR:$ptr, GPR:$new))]>;
-
- def ATOMIC_CMP_SWAP_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- [(set GPR:$dst, (atomic_cmp_swap_8 GPR:$ptr, GPR:$old, GPR:$new))]>;
- def ATOMIC_CMP_SWAP_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- [(set GPR:$dst, (atomic_cmp_swap_16 GPR:$ptr, GPR:$old, GPR:$new))]>;
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$new, i32imm:$ordering),
+ NoItinerary, []>;
def ATOMIC_CMP_SWAP_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- [(set GPR:$dst, (atomic_cmp_swap_32 GPR:$ptr, GPR:$old, GPR:$new))]>;
-}
+ (outs GPR:$dst),
+ (ins GPR:$ptr, GPR:$old, GPR:$new, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_ADD_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_SUB_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_AND_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_OR_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_XOR_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_NAND_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_MIN_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_MAX_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_UMIN_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_LOAD_UMAX_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_SWAP_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
+ def ATOMIC_CMP_SWAP_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$cmp1, GPR:$cmp2,
+ GPR:$set1, GPR:$set2, i32imm:$ordering),
+ NoItinerary, []>;
+ }
+ let mayLoad = 1 in
+ def ATOMIC_LOAD_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, i32imm:$ordering),
+ NoItinerary, []>;
+ let mayStore = 1 in
+ def ATOMIC_STORE_I64 : PseudoInst<
+ (outs GPR:$dst1, GPR:$dst2),
+ (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
+ NoItinerary, []>;
}
let usesCustomInserter = 1 in {
@@ -4344,48 +4525,147 @@ let usesCustomInserter = 1 in {
[(ARMcopystructbyval GPR:$dst, GPR:$src, imm:$size, imm:$alignment)]>;
}
+def ldrex_1 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def ldrex_2 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def ldrex_4 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def strex_1 : PatFrag<(ops node:$val, node:$ptr),
+ (int_arm_strex node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def strex_2 : PatFrag<(ops node:$val, node:$ptr),
+ (int_arm_strex node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def strex_4 : PatFrag<(ops node:$val, node:$ptr),
+ (int_arm_strex node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
let mayLoad = 1 in {
def LDREXB : AIldrex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
- NoItinerary,
- "ldrexb", "\t$Rt, $addr", []>;
+ NoItinerary, "ldrexb", "\t$Rt, $addr",
+ [(set GPR:$Rt, (ldrex_1 addr_offset_none:$addr))]>;
def LDREXH : AIldrex<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr),
- NoItinerary, "ldrexh", "\t$Rt, $addr", []>;
+ NoItinerary, "ldrexh", "\t$Rt, $addr",
+ [(set GPR:$Rt, (ldrex_2 addr_offset_none:$addr))]>;
def LDREX : AIldrex<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
- NoItinerary, "ldrex", "\t$Rt, $addr", []>;
+ NoItinerary, "ldrex", "\t$Rt, $addr",
+ [(set GPR:$Rt, (ldrex_4 addr_offset_none:$addr))]>;
let hasExtraDefRegAllocReq = 1 in
-def LDREXD: AIldrex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
+def LDREXD : AIldrex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
NoItinerary, "ldrexd", "\t$Rt, $addr", []> {
let DecoderMethod = "DecodeDoubleRegLoad";
}
+
+def LDAEXB : AIldaex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
+ NoItinerary, "ldaexb", "\t$Rt, $addr", []>;
+def LDAEXH : AIldaex<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr),
+ NoItinerary, "ldaexh", "\t$Rt, $addr", []>;
+def LDAEX : AIldaex<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
+ NoItinerary, "ldaex", "\t$Rt, $addr", []>;
+let hasExtraDefRegAllocReq = 1 in
+def LDAEXD : AIldaex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
+ NoItinerary, "ldaexd", "\t$Rt, $addr", []> {
+ let DecoderMethod = "DecodeDoubleRegLoad";
+}
}
let mayStore = 1, Constraints = "@earlyclobber $Rd" in {
def STREXB: AIstrex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
- NoItinerary, "strexb", "\t$Rd, $Rt, $addr", []>;
+ NoItinerary, "strexb", "\t$Rd, $Rt, $addr",
+ [(set GPR:$Rd, (strex_1 GPR:$Rt, addr_offset_none:$addr))]>;
def STREXH: AIstrex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
- NoItinerary, "strexh", "\t$Rd, $Rt, $addr", []>;
+ NoItinerary, "strexh", "\t$Rd, $Rt, $addr",
+ [(set GPR:$Rd, (strex_2 GPR:$Rt, addr_offset_none:$addr))]>;
def STREX : AIstrex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
- NoItinerary, "strex", "\t$Rd, $Rt, $addr", []>;
+ NoItinerary, "strex", "\t$Rd, $Rt, $addr",
+ [(set GPR:$Rd, (strex_4 GPR:$Rt, addr_offset_none:$addr))]>;
let hasExtraSrcRegAllocReq = 1 in
def STREXD : AIstrex<0b01, (outs GPR:$Rd),
(ins GPRPairOp:$Rt, addr_offset_none:$addr),
NoItinerary, "strexd", "\t$Rd, $Rt, $addr", []> {
let DecoderMethod = "DecodeDoubleRegStore";
}
+def STLEXB: AIstlex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
+ NoItinerary, "stlexb", "\t$Rd, $Rt, $addr",
+ []>;
+def STLEXH: AIstlex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
+ NoItinerary, "stlexh", "\t$Rd, $Rt, $addr",
+ []>;
+def STLEX : AIstlex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
+ NoItinerary, "stlex", "\t$Rd, $Rt, $addr",
+ []>;
+let hasExtraSrcRegAllocReq = 1 in
+def STLEXD : AIstlex<0b01, (outs GPR:$Rd),
+ (ins GPRPairOp:$Rt, addr_offset_none:$addr),
+ NoItinerary, "stlexd", "\t$Rd, $Rt, $addr", []> {
+ let DecoderMethod = "DecodeDoubleRegStore";
+}
}
-
-def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex", []>,
+def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex",
+ [(int_arm_clrex)]>,
Requires<[IsARM, HasV7]> {
let Inst{31-0} = 0b11110101011111111111000000011111;
}
+def : ARMPat<(and (ldrex_1 addr_offset_none:$addr), 0xff),
+ (LDREXB addr_offset_none:$addr)>;
+def : ARMPat<(and (ldrex_2 addr_offset_none:$addr), 0xffff),
+ (LDREXH addr_offset_none:$addr)>;
+def : ARMPat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
+ (STREXB GPR:$Rt, addr_offset_none:$addr)>;
+def : ARMPat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
+ (STREXH GPR:$Rt, addr_offset_none:$addr)>;
+
+class acquiring_load<PatFrag base>
+ : PatFrag<(ops node:$ptr), (base node:$ptr), [{
+ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
+ return Ordering == Acquire || Ordering == SequentiallyConsistent;
+}]>;
+
+def atomic_load_acquire_8 : acquiring_load<atomic_load_8>;
+def atomic_load_acquire_16 : acquiring_load<atomic_load_16>;
+def atomic_load_acquire_32 : acquiring_load<atomic_load_32>;
+
+class releasing_store<PatFrag base>
+ : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
+ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
+ return Ordering == Release || Ordering == SequentiallyConsistent;
+}]>;
+
+def atomic_store_release_8 : releasing_store<atomic_store_8>;
+def atomic_store_release_16 : releasing_store<atomic_store_16>;
+def atomic_store_release_32 : releasing_store<atomic_store_32>;
+
+let AddedComplexity = 8 in {
+ def : ARMPat<(atomic_load_acquire_8 addr_offset_none:$addr), (LDAB addr_offset_none:$addr)>;
+ def : ARMPat<(atomic_load_acquire_16 addr_offset_none:$addr), (LDAH addr_offset_none:$addr)>;
+ def : ARMPat<(atomic_load_acquire_32 addr_offset_none:$addr), (LDA addr_offset_none:$addr)>;
+ def : ARMPat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val), (STLB GPR:$val, addr_offset_none:$addr)>;
+ def : ARMPat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (STLH GPR:$val, addr_offset_none:$addr)>;
+ def : ARMPat<(atomic_store_release_32 addr_offset_none:$addr, GPR:$val), (STL GPR:$val, addr_offset_none:$addr)>;
+}
+
// SWP/SWPB are deprecated in V6/V7.
let mayLoad = 1, mayStore = 1 in {
def SWP : AIswp<0, (outs GPRnopc:$Rt),
- (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swp", []>;
+ (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swp", []>,
+ Requires<[PreV8]>;
def SWPB: AIswp<1, (outs GPRnopc:$Rt),
- (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swpb", []>;
+ (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swpb", []>,
+ Requires<[PreV8]>;
}
//===----------------------------------------------------------------------===//
@@ -4396,7 +4676,8 @@ def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
NoItinerary, "cdp", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
- imm:$CRm, imm:$opc2)]> {
+ imm:$CRm, imm:$opc2)]>,
+ Requires<[PreV8]> {
bits<4> opc1;
bits<4> CRn;
bits<4> CRd;
@@ -4413,11 +4694,12 @@ def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
let Inst{23-20} = opc1;
}
-def CDP2 : ABXI<0b1110, (outs), (ins pf_imm:$cop, imm0_15:$opc1,
+def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
NoItinerary, "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
- imm:$CRm, imm:$opc2)]> {
+ imm:$CRm, imm:$opc2)]>,
+ Requires<[PreV8]> {
let Inst{31-28} = 0b1111;
bits<4> opc1;
bits<4> CRn;
@@ -4595,10 +4877,10 @@ defm LDC : LdStCop <1, 0, "ldc">;
defm LDCL : LdStCop <1, 1, "ldcl">;
defm STC : LdStCop <0, 0, "stc">;
defm STCL : LdStCop <0, 1, "stcl">;
-defm LDC2 : LdSt2Cop<1, 0, "ldc2">;
-defm LDC2L : LdSt2Cop<1, 1, "ldc2l">;
-defm STC2 : LdSt2Cop<0, 0, "stc2">;
-defm STC2L : LdSt2Cop<0, 1, "stc2l">;
+defm LDC2 : LdSt2Cop<1, 0, "ldc2">, Requires<[PreV8]>;
+defm LDC2L : LdSt2Cop<1, 1, "ldc2l">, Requires<[PreV8]>;
+defm STC2 : LdSt2Cop<0, 0, "stc2">, Requires<[PreV8]>;
+defm STC2L : LdSt2Cop<0, 1, "stc2l">, Requires<[PreV8]>;
//===----------------------------------------------------------------------===//
// Move between coprocessor and ARM core register.
@@ -4631,16 +4913,17 @@ def MCR : MovRCopro<"mcr", 0 /* from ARM core register to coprocessor */,
(ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
- imm:$CRm, imm:$opc2)]>;
+ imm:$CRm, imm:$opc2)]>,
+ ComplexDeprecationPredicate<"MCR">;
def : ARMInstAlias<"mcr${p} $cop, $opc1, $Rt, $CRn, $CRm",
(MCR p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, 0, pred:$p)>;
def MRC : MovRCopro<"mrc", 1 /* from coprocessor to ARM core register */,
- (outs GPR:$Rt),
+ (outs GPRwithAPSR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>;
def : ARMInstAlias<"mrc${p} $cop, $opc1, $Rt, $CRn, $CRm",
- (MRC GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ (MRC GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, 0, pred:$p)>;
def : ARMPat<(int_arm_mrc imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2),
@@ -4650,7 +4933,7 @@ class MovRCopro2<string opc, bit direction, dag oops, dag iops,
list<dag> pattern>
: ABXI<0b1110, oops, iops, NoItinerary,
!strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"), pattern> {
- let Inst{31-28} = 0b1111;
+ let Inst{31-24} = 0b11111110;
let Inst{20} = direction;
let Inst{4} = 1;
@@ -4674,16 +4957,18 @@ def MCR2 : MovRCopro2<"mcr2", 0 /* from ARM core register to coprocessor */,
(ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
- imm:$CRm, imm:$opc2)]>;
+ imm:$CRm, imm:$opc2)]>,
+ Requires<[PreV8]>;
def : ARMInstAlias<"mcr2$ $cop, $opc1, $Rt, $CRn, $CRm",
(MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, 0)>;
def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
- (outs GPR:$Rt),
+ (outs GPRwithAPSR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
- imm0_7:$opc2), []>;
+ imm0_7:$opc2), []>,
+ Requires<[PreV8]>;
def : ARMInstAlias<"mrc2$ $cop, $opc1, $Rt, $CRn, $CRm",
- (MRC2 GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ (MRC2 GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, 0)>;
def : ARMV5TPat<(int_arm_mrc2 imm:$cop, imm:$opc1, imm:$CRn,
@@ -4718,7 +5003,8 @@ def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>;
class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
: ABXI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm), NoItinerary,
- !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern> {
+ !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern>,
+ Requires<[PreV8]> {
let Inst{31-28} = 0b1111;
let Inst{23-21} = 0b010;
let Inst{20} = direction;
@@ -4820,7 +5106,7 @@ def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, so_imm:$a), NoItinerary,
let isCall = 1,
Defs = [R0, R12, LR, CPSR], Uses = [SP] in {
def TPsoft : PseudoInst<(outs), (ins), IIC_Br,
- [(set R0, ARMthread_pointer)]>;
+ [(set R0, ARMthread_pointer)]>, Sched<[WriteBr]>;
}
//===----------------------------------------------------------------------===//
@@ -4884,7 +5170,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in
def MOVPCRX : ARMPseudoExpand<(outs), (ins GPR:$dst),
4, IIC_Br, [(brind GPR:$dst)],
(MOVr PC, GPR:$dst, (ops 14, zero_reg), zero_reg)>,
- Requires<[IsARM, NoV4T]>;
+ Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
// Large immediate handling.
@@ -5153,10 +5439,10 @@ def : MnemonicAlias<"rfeed", "rfeib">;
def : MnemonicAlias<"rfe", "rfeia">;
// SRS aliases
-def : MnemonicAlias<"srsfa", "srsda">;
-def : MnemonicAlias<"srsea", "srsdb">;
-def : MnemonicAlias<"srsfd", "srsia">;
-def : MnemonicAlias<"srsed", "srsib">;
+def : MnemonicAlias<"srsfa", "srsib">;
+def : MnemonicAlias<"srsea", "srsia">;
+def : MnemonicAlias<"srsfd", "srsdb">;
+def : MnemonicAlias<"srsed", "srsda">;
def : MnemonicAlias<"srs", "srsia">;
// QSAX == QSUBADDX
@@ -5233,7 +5519,7 @@ def RORi : ARMAsmPseudo<"ror${s}${p} $Rd, $Rm, $imm",
cc_out:$s)>;
}
def RRXi : ARMAsmPseudo<"rrx${s}${p} $Rd, $Rm",
- (ins GPRnopc:$Rd, GPRnopc:$Rm, pred:$p, cc_out:$s)>;
+ (ins GPR:$Rd, GPR:$Rm, pred:$p, cc_out:$s)>;
let TwoOperandAliasConstraint = "$Rn = $Rd" in {
def ASRr : ARMAsmPseudo<"asr${s}${p} $Rd, $Rn, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
@@ -5269,4 +5555,5 @@ def : InstAlias<"umull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
// 'it' blocks in ARM mode just validate the predicates. The IT itself
// is discarded.
-def ITasm : ARMAsmPseudo<"it$mask $cc", (ins it_pred:$cc, it_mask:$mask)>;
+def ITasm : ARMAsmPseudo<"it$mask $cc", (ins it_pred:$cc, it_mask:$mask)>,
+ ComplexDeprecationPredicate<"IT">;
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index 896fd0f7850c..43bd4c21dc39 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -626,7 +626,7 @@ class VLD1D<bits<4> op7_4, string Dt>
"vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
class VLD1Q<bits<4> op7_4, string Dt>
: NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd),
@@ -634,7 +634,7 @@ class VLD1Q<bits<4> op7_4, string Dt>
"vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def VLD1d8 : VLD1D<{0,0,0,?}, "8">;
@@ -655,16 +655,14 @@ multiclass VLD1DWB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
multiclass VLD1QWB<bits<4> op7_4, string Dt> {
@@ -674,16 +672,14 @@ multiclass VLD1QWB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
@@ -703,7 +699,7 @@ class VLD1D3<bits<4> op7_4, string Dt>
"$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
def _fixed : NLdSt<0,0b10,0b0110, op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
@@ -712,16 +708,14 @@ multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
@@ -744,7 +738,7 @@ class VLD1D4<bits<4> op7_4, string Dt>
"$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
def _fixed : NLdSt<0,0b10,0b0010, op7_4, (outs VecListFourD:$Vd, GPR:$wb),
@@ -753,16 +747,14 @@ multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0010,op7_4, (outs VecListFourD:$Vd, GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
@@ -786,7 +778,7 @@ class VLD2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
"vld2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
def VLD2d8 : VLD2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2>;
@@ -810,16 +802,14 @@ multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt,
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm), itin,
"vld2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
}
@@ -853,7 +843,7 @@ class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
"vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST3Instruction";
}
def VLD3d8 : VLD3D<0b0100, {0,0,0,?}, "8">;
@@ -872,7 +862,7 @@ class VLD3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
"vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn$Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST3Instruction";
}
def VLD3d8_UPD : VLD3DWB<0b0100, {0,0,0,?}, "8">;
@@ -912,7 +902,7 @@ class VLD4D<bits<4> op11_8, bits<4> op7_4, string Dt>
"vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST4Instruction";
}
def VLD4d8 : VLD4D<0b0000, {0,0,?,?}, "8">;
@@ -931,7 +921,7 @@ class VLD4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
"vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+ let DecoderMethod = "DecodeVLDST4Instruction";
}
def VLD4d8_UPD : VLD4DWB<0b0000, {0,0,?,?}, "8">;
@@ -1348,7 +1338,6 @@ multiclass VLD1DUPWB<bits<4> op7_4, string Dt> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
}
def _register : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListOneDAllLanes:$Vd, GPR:$wb),
@@ -1357,7 +1346,6 @@ multiclass VLD1DUPWB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
}
}
multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
@@ -1369,7 +1357,6 @@ multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
}
def _register : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListDPairAllLanes:$Vd, GPR:$wb),
@@ -1378,7 +1365,6 @@ multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
}
}
@@ -1419,7 +1405,6 @@ multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD2DupInstruction";
- let AsmMatchConverter = "cvtVLDwbFixed";
}
def _register : NLdSt<1, 0b10, 0b1101, op7_4,
(outs VdTy:$Vd, GPR:$wb),
@@ -1428,7 +1413,6 @@ multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> {
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD2DupInstruction";
- let AsmMatchConverter = "cvtVLDwbRegister";
}
}
@@ -1580,14 +1564,14 @@ class VST1D<bits<4> op7_4, string Dt>
IIC_VST1, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
class VST1Q<bits<4> op7_4, string Dt>
: NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListDPair:$Vd),
IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def VST1d8 : VST1D<{0,0,0,?}, "8">;
@@ -1608,8 +1592,7 @@ multiclass VST1DWB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0111,op7_4, (outs GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm, VecListOneD:$Vd),
@@ -1617,8 +1600,7 @@ multiclass VST1DWB<bits<4> op7_4, string Dt> {
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
multiclass VST1QWB<bits<4> op7_4, string Dt> {
@@ -1628,8 +1610,7 @@ multiclass VST1QWB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm, VecListDPair:$Vd),
@@ -1637,8 +1618,7 @@ multiclass VST1QWB<bits<4> op7_4, string Dt> {
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
@@ -1659,7 +1639,7 @@ class VST1D3<bits<4> op7_4, string Dt>
IIC_VST1x3, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
multiclass VST1D3WB<bits<4> op7_4, string Dt> {
def _fixed : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
@@ -1668,8 +1648,7 @@ multiclass VST1D3WB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm, VecListThreeD:$Vd),
@@ -1677,8 +1656,7 @@ multiclass VST1D3WB<bits<4> op7_4, string Dt> {
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
@@ -1704,7 +1682,7 @@ class VST1D4<bits<4> op7_4, string Dt>
[]> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
multiclass VST1D4WB<bits<4> op7_4, string Dt> {
def _fixed : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
@@ -1713,8 +1691,7 @@ multiclass VST1D4WB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbFixed";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
@@ -1722,8 +1699,7 @@ multiclass VST1D4WB<bits<4> op7_4, string Dt> {
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbRegister";
+ let DecoderMethod = "DecodeVLDST1Instruction";
}
}
@@ -1748,7 +1724,7 @@ class VST2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
itin, "vst2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
def VST2d8 : VST2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VST2>;
@@ -1772,16 +1748,14 @@ multiclass VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt,
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbFixed";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm, VdTy:$Vd), IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbRegister";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
}
multiclass VST2QWB<bits<4> op7_4, string Dt> {
@@ -1791,8 +1765,7 @@ multiclass VST2QWB<bits<4> op7_4, string Dt> {
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbFixed";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
(ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
@@ -1800,8 +1773,7 @@ multiclass VST2QWB<bits<4> op7_4, string Dt> {
"vst2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
- let AsmMatchConverter = "cvtVSTwbRegister";
+ let DecoderMethod = "DecodeVLDST2Instruction";
}
}
@@ -1835,7 +1807,7 @@ class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
"vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST3Instruction";
}
def VST3d8 : VST3D<0b0100, {0,0,0,?}, "8">;
@@ -1854,7 +1826,7 @@ class VST3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
"vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST3Instruction";
}
def VST3d8_UPD : VST3DWB<0b0100, {0,0,0,?}, "8">;
@@ -1894,7 +1866,7 @@ class VST4D<bits<4> op11_8, bits<4> op7_4, string Dt>
"", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST4Instruction";
}
def VST4d8 : VST4D<0b0000, {0,0,?,?}, "8">;
@@ -1913,7 +1885,7 @@ class VST4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
"vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+ let DecoderMethod = "DecodeVLDST4Instruction";
}
def VST4d8_UPD : VST4DWB<0b0000, {0,0,?,?}, "8">;
@@ -2379,6 +2351,40 @@ class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
(ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
+// Same as above, but not predicated.
+class N2VDIntnp<bits<2> op17_16, bits<3> op10_8, bit op7,
+ InstrItinClass itin, string OpcodeStr, string Dt,
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
+ : N2Vnp<0b10, op17_16, op10_8, op7, 0, (outs DPR:$Vd), (ins DPR:$Vm),
+ itin, OpcodeStr, Dt, ResTy, OpTy,
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
+
+class N2VQIntnp<bits<2> op17_16, bits<3> op10_8, bit op7,
+ InstrItinClass itin, string OpcodeStr, string Dt,
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
+ : N2Vnp<0b10, op17_16, op10_8, op7, 1, (outs QPR:$Vd), (ins QPR:$Vm),
+ itin, OpcodeStr, Dt, ResTy, OpTy,
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
+
+// Similar to NV2VQIntnp with some more encoding bits exposed (crypto).
+class N2VQIntXnp<bits<2> op19_18, bits<2> op17_16, bits<3> op10_8, bit op6,
+ bit op7, InstrItinClass itin, string OpcodeStr, string Dt,
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
+ : N2Vnp<op19_18, op17_16, op10_8, op7, op6, (outs QPR:$Vd), (ins QPR:$Vm),
+ itin, OpcodeStr, Dt, ResTy, OpTy,
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
+
+// Same as N2VQIntXnp but with Vd as a src register.
+class N2VQIntX2np<bits<2> op19_18, bits<2> op17_16, bits<3> op10_8, bit op6,
+ bit op7, InstrItinClass itin, string OpcodeStr, string Dt,
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
+ : N2Vnp<op19_18, op17_16, op10_8, op7, op6,
+ (outs QPR:$Vd), (ins QPR:$src, QPR:$Vm),
+ itin, OpcodeStr, Dt, ResTy, OpTy,
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src), (OpTy QPR:$Vm))))]> {
+ let Constraints = "$src = $Vd";
+}
+
// Narrow 2-register operations.
class N2VN<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
@@ -2541,6 +2547,16 @@ class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
+
+class N3VDIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
+ bit op4, Format f, InstrItinClass itin, string OpcodeStr,
+ string Dt, ValueType ResTy, ValueType OpTy,
+ SDPatternOperator IntOp, bit Commutable>
+ : N3Vnp<op27_23, op21_20, op11_8, op6, op4,
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin, OpcodeStr, Dt,
+ ResTy, OpTy, IntOp, Commutable,
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
+
class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType Ty, SDPatternOperator IntOp>
: N3VLane32<0, 1, op21_20, op11_8, 1, 0,
@@ -2552,6 +2568,7 @@ class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
imm:$lane)))))]> {
let isCommutable = 0;
}
+
class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType Ty, SDPatternOperator IntOp>
: N3VLane16<0, 1, op21_20, op11_8, 1, 0,
@@ -2584,6 +2601,29 @@ class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
+
+class N3VQIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
+ bit op4, Format f, InstrItinClass itin, string OpcodeStr,
+ string Dt, ValueType ResTy, ValueType OpTy,
+ SDPatternOperator IntOp, bit Commutable>
+ : N3Vnp<op27_23, op21_20, op11_8, op6, op4,
+ (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin, OpcodeStr, Dt,
+ ResTy, OpTy, IntOp, Commutable,
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>;
+
+// Same as N3VQIntnp but with Vd as a src register.
+class N3VQInt3np<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
+ bit op4, Format f, InstrItinClass itin, string OpcodeStr,
+ string Dt, ValueType ResTy, ValueType OpTy,
+ SDPatternOperator IntOp, bit Commutable>
+ : N3Vnp<op27_23, op21_20, op11_8, op6, op4,
+ (outs QPR:$Vd), (ins QPR:$src, QPR:$Vn, QPR:$Vm), f, itin, OpcodeStr,
+ Dt, ResTy, OpTy, IntOp, Commutable,
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src), (OpTy QPR:$Vn),
+ (OpTy QPR:$Vm))))]> {
+ let Constraints = "$src = $Vd";
+}
+
class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
@@ -2834,6 +2874,7 @@ class N3VL<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
[(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
let isCommutable = Commutable;
}
+
class N3VLSL<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode OpNode>
@@ -2889,6 +2930,17 @@ class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
[(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
let isCommutable = Commutable;
}
+
+// Same as above, but not predicated.
+class N3VLIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
+ bit op4, InstrItinClass itin, string OpcodeStr,
+ string Dt, ValueType ResTy, ValueType OpTy,
+ SDPatternOperator IntOp, bit Commutable>
+ : N3Vnp<op27_23, op21_20, op11_8, op6, op4,
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin, OpcodeStr, Dt,
+ ResTy, OpTy, IntOp, Commutable,
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
+
class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
@@ -3965,12 +4017,18 @@ defm VQADDu : N3VInt_QHSD<1, 0, 0b0000, 1, N3RegFrm,
IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
"vqadd", "u", int_arm_neon_vqaddu, 1>;
// VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
-defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn", "i",
- int_arm_neon_vaddhn, 1>;
+defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn", "i", null_frag, 1>;
// VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn", "i",
int_arm_neon_vraddhn, 1>;
+def : Pat<(v8i8 (trunc (NEONvshru (add (v8i16 QPR:$Vn), QPR:$Vm), 8))),
+ (VADDHNv8i8 QPR:$Vn, QPR:$Vm)>;
+def : Pat<(v4i16 (trunc (NEONvshru (add (v4i32 QPR:$Vn), QPR:$Vm), 16))),
+ (VADDHNv4i16 QPR:$Vn, QPR:$Vm)>;
+def : Pat<(v2i32 (trunc (NEONvshru (add (v2i64 QPR:$Vn), QPR:$Vm), 32))),
+ (VADDHNv2i32 QPR:$Vn, QPR:$Vm)>;
+
// Vector Multiply Operations.
// VMUL : Vector Multiply (integer, polynomial and floating-point)
@@ -4008,6 +4066,17 @@ def : Pat<(v4f32 (fmul (v4f32 QPR:$src1),
(DSubReg_i32_reg imm:$lane))),
(SubReg_i32_lane imm:$lane)))>;
+
+def : Pat<(v2f32 (fmul DPR:$Rn, (NEONvdup (f32 SPR:$Rm)))),
+ (VMULslfd DPR:$Rn,
+ (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$Rm, ssub_0),
+ (i32 0))>;
+def : Pat<(v4f32 (fmul QPR:$Rn, (NEONvdup (f32 SPR:$Rm)))),
+ (VMULslfq QPR:$Rn,
+ (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$Rm, ssub_0),
+ (i32 0))>;
+
+
// VQDMULH : Vector Saturating Doubling Multiply Returning High Half
defm VQDMULH : N3VInt_HS<0, 0, 0b1011, 0, N3RegFrm, IIC_VMULi16D, IIC_VMULi32D,
IIC_VMULi16Q, IIC_VMULi32Q,
@@ -4053,12 +4122,18 @@ def : Pat<(v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$src1),
(SubReg_i32_lane imm:$lane)))>;
// VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
-defm VMULLs : N3VL_QHS<0,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
- "vmull", "s", NEONvmulls, 1>;
-defm VMULLu : N3VL_QHS<1,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
- "vmull", "u", NEONvmullu, 1>;
-def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, IIC_VMULi16D, "vmull", "p8",
- v8i16, v8i8, int_arm_neon_vmullp, 1>;
+let PostEncoderMethod = "NEONThumb2DataIPostEncoder",
+ DecoderNamespace = "NEONData" in {
+ defm VMULLs : N3VL_QHS<0,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
+ "vmull", "s", NEONvmulls, 1>;
+ defm VMULLu : N3VL_QHS<1,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
+ "vmull", "u", NEONvmullu, 1>;
+ def VMULLp8 : N3VLInt<0, 1, 0b00, 0b1110, 0, IIC_VMULi16D, "vmull", "p8",
+ v8i16, v8i8, int_arm_neon_vmullp, 1>;
+ def VMULLp64 : N3VLIntnp<0b00101, 0b10, 0b1110, 0, 0, NoItinerary,
+ "vmull", "p64", v2i64, v1i64, int_arm_neon_vmullp, 1>,
+ Requires<[HasV8, HasCrypto]>;
+}
defm VMULLsls : N3VLSL_HS<0, 0b1010, IIC_VMULi16D, "vmull", "s", NEONvmulls>;
defm VMULLslu : N3VLSL_HS<1, 0b1010, IIC_VMULi16D, "vmull", "u", NEONvmullu>;
@@ -4125,8 +4200,27 @@ defm VMLALslu : N3VLMulOpSL_HS<1, 0b0010, "vmlal", "u", NEONvmullu, add>;
// VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
- "vqdmlal", "s", int_arm_neon_vqdmlal>;
-defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal", "s", int_arm_neon_vqdmlal>;
+ "vqdmlal", "s", null_frag>;
+defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal", "s", null_frag>;
+
+def : Pat<(v4i32 (int_arm_neon_vqadds (v4i32 QPR:$src1),
+ (v4i32 (int_arm_neon_vqdmull (v4i16 DPR:$Vn),
+ (v4i16 DPR:$Vm))))),
+ (VQDMLALv4i32 QPR:$src1, DPR:$Vn, DPR:$Vm)>;
+def : Pat<(v2i64 (int_arm_neon_vqadds (v2i64 QPR:$src1),
+ (v2i64 (int_arm_neon_vqdmull (v2i32 DPR:$Vn),
+ (v2i32 DPR:$Vm))))),
+ (VQDMLALv2i64 QPR:$src1, DPR:$Vn, DPR:$Vm)>;
+def : Pat<(v4i32 (int_arm_neon_vqadds (v4i32 QPR:$src1),
+ (v4i32 (int_arm_neon_vqdmull (v4i16 DPR:$Vn),
+ (v4i16 (NEONvduplane (v4i16 DPR_8:$Vm),
+ imm:$lane)))))),
+ (VQDMLALslv4i16 QPR:$src1, DPR:$Vn, DPR_8:$Vm, imm:$lane)>;
+def : Pat<(v2i64 (int_arm_neon_vqadds (v2i64 QPR:$src1),
+ (v2i64 (int_arm_neon_vqdmull (v2i32 DPR:$Vn),
+ (v2i32 (NEONvduplane (v2i32 DPR_VFP2:$Vm),
+ imm:$lane)))))),
+ (VQDMLALslv2i32 QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, imm:$lane)>;
// VMLS : Vector Multiply Subtract (integer and floating-point)
defm VMLS : N3VMulOp_QHS<1, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
@@ -4182,25 +4276,44 @@ defm VMLSLslu : N3VLMulOpSL_HS<1, 0b0110, "vmlsl", "u", NEONvmullu, sub>;
// VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, IIC_VMACi16D, IIC_VMACi32D,
- "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
-defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
+ "vqdmlsl", "s", null_frag>;
+defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl", "s", null_frag>;
+
+def : Pat<(v4i32 (int_arm_neon_vqsubs (v4i32 QPR:$src1),
+ (v4i32 (int_arm_neon_vqdmull (v4i16 DPR:$Vn),
+ (v4i16 DPR:$Vm))))),
+ (VQDMLSLv4i32 QPR:$src1, DPR:$Vn, DPR:$Vm)>;
+def : Pat<(v2i64 (int_arm_neon_vqsubs (v2i64 QPR:$src1),
+ (v2i64 (int_arm_neon_vqdmull (v2i32 DPR:$Vn),
+ (v2i32 DPR:$Vm))))),
+ (VQDMLSLv2i64 QPR:$src1, DPR:$Vn, DPR:$Vm)>;
+def : Pat<(v4i32 (int_arm_neon_vqsubs (v4i32 QPR:$src1),
+ (v4i32 (int_arm_neon_vqdmull (v4i16 DPR:$Vn),
+ (v4i16 (NEONvduplane (v4i16 DPR_8:$Vm),
+ imm:$lane)))))),
+ (VQDMLSLslv4i16 QPR:$src1, DPR:$Vn, DPR_8:$Vm, imm:$lane)>;
+def : Pat<(v2i64 (int_arm_neon_vqsubs (v2i64 QPR:$src1),
+ (v2i64 (int_arm_neon_vqdmull (v2i32 DPR:$Vn),
+ (v2i32 (NEONvduplane (v2i32 DPR_VFP2:$Vm),
+ imm:$lane)))))),
+ (VQDMLSLslv2i32 QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, imm:$lane)>;
// Fused Vector Multiply-Accumulate and Fused Multiply-Subtract Operations.
def VFMAfd : N3VDMulOp<0, 0, 0b00, 0b1100, 1, IIC_VFMACD, "vfma", "f32",
v2f32, fmul_su, fadd_mlx>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasNEON,HasVFP4,UseFusedMAC]>;
def VFMAfq : N3VQMulOp<0, 0, 0b00, 0b1100, 1, IIC_VFMACQ, "vfma", "f32",
v4f32, fmul_su, fadd_mlx>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasNEON,HasVFP4,UseFusedMAC]>;
// Fused Vector Multiply Subtract (floating-point)
def VFMSfd : N3VDMulOp<0, 0, 0b10, 0b1100, 1, IIC_VFMACD, "vfms", "f32",
v2f32, fmul_su, fsub_mlx>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasNEON,HasVFP4,UseFusedMAC]>;
def VFMSfq : N3VQMulOp<0, 0, 0b10, 0b1100, 1, IIC_VFMACQ, "vfms", "f32",
v4f32, fmul_su, fsub_mlx>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasNEON,HasVFP4,UseFusedMAC]>;
// Match @llvm.fma.* intrinsics
def : Pat<(v2f32 (fma DPR:$Vn, DPR:$Vm, DPR:$src1)),
@@ -4248,12 +4361,18 @@ defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, N3RegFrm,
IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
"vqsub", "u", int_arm_neon_vqsubu, 0>;
// VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
-defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn", "i",
- int_arm_neon_vsubhn, 0>;
+defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn", "i", null_frag, 0>;
// VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn", "i",
int_arm_neon_vrsubhn, 0>;
+def : Pat<(v8i8 (trunc (NEONvshru (sub (v8i16 QPR:$Vn), QPR:$Vm), 8))),
+ (VSUBHNv8i8 QPR:$Vn, QPR:$Vm)>;
+def : Pat<(v4i16 (trunc (NEONvshru (sub (v4i32 QPR:$Vn), QPR:$Vm), 16))),
+ (VSUBHNv4i16 QPR:$Vn, QPR:$Vm)>;
+def : Pat<(v2i32 (trunc (NEONvshru (sub (v2i64 QPR:$Vn), QPR:$Vm), 32))),
+ (VSUBHNv2i32 QPR:$Vn, QPR:$Vm)>;
+
// Vector Comparisons.
// VCEQ : Vector Compare Equal
@@ -4659,6 +4778,18 @@ def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBINQ,
"vmax", "f32",
v4f32, v4f32, int_arm_neon_vmaxs, 1>;
+// VMAXNM
+let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in {
+ def VMAXNMND : N3VDIntnp<0b00110, 0b00, 0b1111, 0, 1,
+ N3RegFrm, NoItinerary, "vmaxnm", "f32",
+ v2f32, v2f32, int_arm_neon_vmaxnm, 1>,
+ Requires<[HasV8, HasNEON]>;
+ def VMAXNMNQ : N3VQIntnp<0b00110, 0b00, 0b1111, 1, 1,
+ N3RegFrm, NoItinerary, "vmaxnm", "f32",
+ v4f32, v4f32, int_arm_neon_vmaxnm, 1>,
+ Requires<[HasV8, HasNEON]>;
+}
+
// VMIN : Vector Minimum
defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, N3RegFrm,
IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
@@ -4673,6 +4804,18 @@ def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBINQ,
"vmin", "f32",
v4f32, v4f32, int_arm_neon_vmins, 1>;
+// VMINNM
+let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in {
+ def VMINNMND : N3VDIntnp<0b00110, 0b10, 0b1111, 0, 1,
+ N3RegFrm, NoItinerary, "vminnm", "f32",
+ v2f32, v2f32, int_arm_neon_vminnm, 1>,
+ Requires<[HasV8, HasNEON]>;
+ def VMINNMNQ : N3VQIntnp<0b00110, 0b10, 0b1111, 1, 1,
+ N3RegFrm, NoItinerary, "vminnm", "f32",
+ v4f32, v4f32, int_arm_neon_vminnm, 1>,
+ Requires<[HasV8, HasNEON]>;
+}
+
// Vector Pairwise Operations.
// VPADD : Vector Pairwise Add
@@ -5015,10 +5158,10 @@ def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
// Vector Move Operations.
// VMOV : Vector Move (Register)
-def : InstAlias<"vmov${p} $Vd, $Vm",
- (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
-def : InstAlias<"vmov${p} $Vd, $Vm",
- (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmov${p} $Vd, $Vm",
+ (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmov${p} $Vd, $Vm",
+ (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
// VMOV : Vector Move (Immediate)
@@ -5386,6 +5529,26 @@ def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
v4f32, v4i32, uint_to_fp>;
+// VCVT{A, N, P, M}
+multiclass VCVT_FPI<string op, bits<3> op10_8, SDPatternOperator IntS,
+ SDPatternOperator IntU> {
+ let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in {
+ def SD : N2VDIntnp<0b11, op10_8, 0, NoItinerary, !strconcat("vcvt", op),
+ "s32.f32", v2i32, v2f32, IntS>, Requires<[HasV8, HasNEON]>;
+ def SQ : N2VQIntnp<0b11, op10_8, 0, NoItinerary, !strconcat("vcvt", op),
+ "s32.f32", v4i32, v4f32, IntS>, Requires<[HasV8, HasNEON]>;
+ def UD : N2VDIntnp<0b11, op10_8, 1, NoItinerary, !strconcat("vcvt", op),
+ "u32.f32", v2i32, v2f32, IntU>, Requires<[HasV8, HasNEON]>;
+ def UQ : N2VQIntnp<0b11, op10_8, 1, NoItinerary, !strconcat("vcvt", op),
+ "u32.f32", v4i32, v4f32, IntU>, Requires<[HasV8, HasNEON]>;
+ }
+}
+
+defm VCVTAN : VCVT_FPI<"a", 0b000, int_arm_neon_vcvtas, int_arm_neon_vcvtau>;
+defm VCVTNN : VCVT_FPI<"n", 0b001, int_arm_neon_vcvtns, int_arm_neon_vcvtnu>;
+defm VCVTPN : VCVT_FPI<"p", 0b010, int_arm_neon_vcvtps, int_arm_neon_vcvtpu>;
+defm VCVTMN : VCVT_FPI<"m", 0b011, int_arm_neon_vcvtms, int_arm_neon_vcvtmu>;
+
// VCVT : Vector Convert Between Floating-Point and Fixed-Point.
let DecoderMethod = "DecodeVCVTD" in {
def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
@@ -5409,6 +5572,25 @@ def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
}
+def : NEONInstAlias<"vcvt${p}.s32.f32 $Dd, $Dm, #0",
+ (VCVTf2sd DPR:$Dd, DPR:$Dm, pred:$p)>;
+def : NEONInstAlias<"vcvt${p}.u32.f32 $Dd, $Dm, #0",
+ (VCVTf2ud DPR:$Dd, DPR:$Dm, pred:$p)>;
+def : NEONInstAlias<"vcvt${p}.f32.s32 $Dd, $Dm, #0",
+ (VCVTs2fd DPR:$Dd, DPR:$Dm, pred:$p)>;
+def : NEONInstAlias<"vcvt${p}.f32.u32 $Dd, $Dm, #0",
+ (VCVTu2fd DPR:$Dd, DPR:$Dm, pred:$p)>;
+
+def : NEONInstAlias<"vcvt${p}.s32.f32 $Qd, $Qm, #0",
+ (VCVTf2sq QPR:$Qd, QPR:$Qm, pred:$p)>;
+def : NEONInstAlias<"vcvt${p}.u32.f32 $Qd, $Qm, #0",
+ (VCVTf2uq QPR:$Qd, QPR:$Qm, pred:$p)>;
+def : NEONInstAlias<"vcvt${p}.f32.s32 $Qd, $Qm, #0",
+ (VCVTs2fq QPR:$Qd, QPR:$Qm, pred:$p)>;
+def : NEONInstAlias<"vcvt${p}.f32.u32 $Qd, $Qm, #0",
+ (VCVTu2fq QPR:$Qd, QPR:$Qm, pred:$p)>;
+
+
// VCVT : Vector Convert Between Half-Precision and Single-Precision.
def VCVTf2h : N2VNInt<0b11, 0b11, 0b01, 0b10, 0b01100, 0, 0,
IIC_VUNAQ, "vcvt", "f16.f32",
@@ -5509,8 +5691,9 @@ class VEXTd<string OpcodeStr, string Dt, ValueType Ty, Operand immTy>
IIC_VEXTD, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
[(set DPR:$Vd, (Ty (NEONvext (Ty DPR:$Vn),
(Ty DPR:$Vm), imm:$index)))]> {
- bits<4> index;
- let Inst{11-8} = index{3-0};
+ bits<3> index;
+ let Inst{11} = 0b0;
+ let Inst{10-8} = index{2-0};
}
class VEXTq<string OpcodeStr, string Dt, ValueType Ty, Operand immTy>
@@ -5525,14 +5708,14 @@ class VEXTq<string OpcodeStr, string Dt, ValueType Ty, Operand immTy>
}
def VEXTd8 : VEXTd<"vext", "8", v8i8, imm0_7> {
- let Inst{11-8} = index{3-0};
+ let Inst{10-8} = index{2-0};
}
def VEXTd16 : VEXTd<"vext", "16", v4i16, imm0_3> {
- let Inst{11-9} = index{2-0};
+ let Inst{10-9} = index{1-0};
let Inst{8} = 0b0;
}
def VEXTd32 : VEXTd<"vext", "32", v2i32, imm0_1> {
- let Inst{11-10} = index{1-0};
+ let Inst{10} = index{0};
let Inst{9-8} = 0b00;
}
def : Pat<(v2f32 (NEONvext (v2f32 DPR:$Vn),
@@ -5657,6 +5840,77 @@ def VTBX4Pseudo
IIC_VTBX4, "$orig = $dst", []>;
} // DecoderMethod = "DecodeTBLInstruction"
+// VRINT : Vector Rounding
+multiclass VRINT_FPI<string op, bits<3> op9_7, SDPatternOperator Int> {
+ let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in {
+ def D : N2VDIntnp<0b10, 0b100, 0, NoItinerary,
+ !strconcat("vrint", op), "f32",
+ v2f32, v2f32, Int>, Requires<[HasV8, HasNEON]> {
+ let Inst{9-7} = op9_7;
+ }
+ def Q : N2VQIntnp<0b10, 0b100, 0, NoItinerary,
+ !strconcat("vrint", op), "f32",
+ v4f32, v4f32, Int>, Requires<[HasV8, HasNEON]> {
+ let Inst{9-7} = op9_7;
+ }
+ }
+
+ def : NEONInstAlias<!strconcat("vrint", op, ".f32.f32\t$Dd, $Dm"),
+ (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm)>;
+ def : NEONInstAlias<!strconcat("vrint", op, ".f32.f32\t$Qd, $Qm"),
+ (!cast<Instruction>(NAME#"Q") QPR:$Qd, QPR:$Qm)>;
+}
+
+defm VRINTNN : VRINT_FPI<"n", 0b000, int_arm_neon_vrintn>;
+defm VRINTXN : VRINT_FPI<"x", 0b001, int_arm_neon_vrintx>;
+defm VRINTAN : VRINT_FPI<"a", 0b010, int_arm_neon_vrinta>;
+defm VRINTZN : VRINT_FPI<"z", 0b011, int_arm_neon_vrintz>;
+defm VRINTMN : VRINT_FPI<"m", 0b101, int_arm_neon_vrintm>;
+defm VRINTPN : VRINT_FPI<"p", 0b111, int_arm_neon_vrintp>;
+
+// Cryptography instructions
+let PostEncoderMethod = "NEONThumb2DataIPostEncoder",
+ DecoderNamespace = "v8Crypto" in {
+ class AES<string op, bit op7, bit op6, SDPatternOperator Int>
+ : N2VQIntXnp<0b00, 0b00, 0b011, op6, op7, NoItinerary,
+ !strconcat("aes", op), "8", v16i8, v16i8, Int>,
+ Requires<[HasV8, HasCrypto]>;
+ class AES2Op<string op, bit op7, bit op6, SDPatternOperator Int>
+ : N2VQIntX2np<0b00, 0b00, 0b011, op6, op7, NoItinerary,
+ !strconcat("aes", op), "8", v16i8, v16i8, Int>,
+ Requires<[HasV8, HasCrypto]>;
+ class N2SHA<string op, bits<2> op17_16, bits<3> op10_8, bit op7, bit op6,
+ SDPatternOperator Int>
+ : N2VQIntXnp<0b10, op17_16, op10_8, op6, op7, NoItinerary,
+ !strconcat("sha", op), "32", v4i32, v4i32, Int>,
+ Requires<[HasV8, HasCrypto]>;
+ class N2SHA2Op<string op, bits<2> op17_16, bits<3> op10_8, bit op7, bit op6,
+ SDPatternOperator Int>
+ : N2VQIntX2np<0b10, op17_16, op10_8, op6, op7, NoItinerary,
+ !strconcat("sha", op), "32", v4i32, v4i32, Int>,
+ Requires<[HasV8, HasCrypto]>;
+ class N3SHA3Op<string op, bits<5> op27_23, bits<2> op21_20, SDPatternOperator Int>
+ : N3VQInt3np<op27_23, op21_20, 0b1100, 1, 0, N3RegFrm, NoItinerary,
+ !strconcat("sha", op), "32", v4i32, v4i32, Int, 0>,
+ Requires<[HasV8, HasCrypto]>;
+}
+
+def AESD : AES2Op<"d", 0, 1, int_arm_neon_aesd>;
+def AESE : AES2Op<"e", 0, 0, int_arm_neon_aese>;
+def AESIMC : AES<"imc", 1, 1, int_arm_neon_aesimc>;
+def AESMC : AES<"mc", 1, 0, int_arm_neon_aesmc>;
+
+def SHA1H : N2SHA<"1h", 0b01, 0b010, 1, 1, int_arm_neon_sha1h>;
+def SHA1SU1 : N2SHA2Op<"1su1", 0b10, 0b011, 1, 0, int_arm_neon_sha1su1>;
+def SHA256SU0 : N2SHA2Op<"256su0", 0b10, 0b011, 1, 1, int_arm_neon_sha256su0>;
+def SHA1C : N3SHA3Op<"1c", 0b00100, 0b00, int_arm_neon_sha1c>;
+def SHA1M : N3SHA3Op<"1m", 0b00100, 0b10, int_arm_neon_sha1m>;
+def SHA1P : N3SHA3Op<"1p", 0b00100, 0b01, int_arm_neon_sha1p>;
+def SHA1SU0 : N3SHA3Op<"1su0", 0b00100, 0b11, int_arm_neon_sha1su0>;
+def SHA256H : N3SHA3Op<"256h", 0b00110, 0b00, int_arm_neon_sha256h>;
+def SHA256H2 : N3SHA3Op<"256h2", 0b00110, 0b01, int_arm_neon_sha256h2>;
+def SHA256SU1 : N3SHA3Op<"256su1", 0b00110, 0b10, int_arm_neon_sha256su1>;
+
//===----------------------------------------------------------------------===//
// NEON instructions for single-precision FP math
//===----------------------------------------------------------------------===//
@@ -6697,12 +6951,17 @@ def VST4qWB_register_Asm_32 :
(ins VecListFourQ:$list, addrmode6:$addr,
rGPR:$Rm, pred:$p)>;
-// VMOV takes an optional datatype suffix
+// VMOV/VMVN takes an optional datatype suffix
defm : NEONDTAnyInstAlias<"vmov${p}", "$Vd, $Vm",
(VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
defm : NEONDTAnyInstAlias<"vmov${p}", "$Vd, $Vm",
(VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vmvn${p}", "$Vd, $Vm",
+ (VMVNd DPR:$Vd, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vmvn${p}", "$Vd, $Vm",
+ (VMVNq QPR:$Vd, QPR:$Vm, pred:$p)>;
+
// VCLT (register) is an assembler alias for VCGT w/ the operands reversed.
// D-register versions.
def : NEONInstAlias<"vcle${p}.s8 $Dd, $Dn, $Dm",
diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td
index ae7a5c00bd74..af5ef537b536 100644
--- a/lib/Target/ARM/ARMInstrThumb.td
+++ b/lib/Target/ARM/ARMInstrThumb.td
@@ -69,11 +69,6 @@ def thumb_immshifted_shamt : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(V, MVT::i32);
}]>;
-// ADR instruction labels.
-def t_adrlabel : Operand<i32> {
- let EncoderMethod = "getThumbAdrLabelOpValue";
-}
-
// Scaled 4 immediate.
def t_imm0_1020s4_asmoperand: AsmOperandClass { let Name = "Imm0_1020s4"; }
def t_imm0_1020s4 : Operand<i32> {
@@ -97,12 +92,34 @@ def t_imm0_508s4_neg : Operand<i32> {
// Define Thumb specific addressing modes.
+// unsigned 8-bit, 2-scaled memory offset
+class OperandUnsignedOffset_b8s2 : AsmOperandClass {
+ let Name = "UnsignedOffset_b8s2";
+ let PredicateMethod = "isUnsignedOffset<8, 2>";
+}
+
+def UnsignedOffset_b8s2 : OperandUnsignedOffset_b8s2;
+
+// thumb style PC relative operand. signed, 8 bits magnitude,
+// two bits shift. can be represented as either [pc, #imm], #imm,
+// or relocatable expression...
+def ThumbMemPC : AsmOperandClass {
+ let Name = "ThumbMemPC";
+}
+
let OperandType = "OPERAND_PCREL" in {
def t_brtarget : Operand<OtherVT> {
let EncoderMethod = "getThumbBRTargetOpValue";
let DecoderMethod = "DecodeThumbBROperand";
}
+// ADR instruction labels.
+def t_adrlabel : Operand<i32> {
+ let EncoderMethod = "getThumbAdrLabelOpValue";
+ let PrintMethod = "printAdrLabelOperand<2>";
+ let ParserMatchClass = UnsignedOffset_b8s2;
+}
+
def t_bcctarget : Operand<i32> {
let EncoderMethod = "getThumbBCCTargetOpValue";
let DecoderMethod = "DecodeThumbBCCTargetOperand";
@@ -122,6 +139,15 @@ def t_blxtarget : Operand<i32> {
let EncoderMethod = "getThumbBLXTargetOpValue";
let DecoderMethod = "DecodeThumbBLXOffset";
}
+
+// t_addrmode_pc := <label> => pc + imm8 * 4
+//
+def t_addrmode_pc : Operand<i32> {
+ let EncoderMethod = "getAddrModePCOpValue";
+ let DecoderMethod = "DecodeThumbAddrModePC";
+ let PrintMethod = "printThumbLdrLabelOperand";
+ let ParserMatchClass = ThumbMemPC;
+}
}
// t_addrmode_rr := reg + reg
@@ -218,14 +244,6 @@ def t_addrmode_sp : Operand<i32>,
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
}
-// t_addrmode_pc := <label> => pc + imm8 * 4
-//
-def t_addrmode_pc : Operand<i32> {
- let EncoderMethod = "getAddrModePCOpValue";
- let DecoderMethod = "DecodeThumbAddrModePC";
- let PrintMethod = "printThumbLdrLabelOperand";
-}
-
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions.
//
@@ -251,25 +269,26 @@ class T1SystemEncoding<bits<8> opc>
let Inst{7-0} = opc;
}
-def tNOP : T1pI<(outs), (ins), NoItinerary, "nop", "", []>,
- T1SystemEncoding<0x00>, // A8.6.110
- Requires<[IsThumb2]>;
-
-def tYIELD : T1pI<(outs), (ins), NoItinerary, "yield", "", []>,
- T1SystemEncoding<0x10>, // A8.6.410
- Requires<[IsThumb2]>;
-
-def tWFE : T1pI<(outs), (ins), NoItinerary, "wfe", "", []>,
- T1SystemEncoding<0x20>, // A8.6.408
- Requires<[IsThumb2]>;
+def tHINT : T1pI<(outs), (ins imm0_15:$imm), NoItinerary, "hint", "\t$imm", []>,
+ T1SystemEncoding<0x00>,
+ Requires<[IsThumb, HasV6M]> {
+ bits<4> imm;
+ let Inst{7-4} = imm;
+}
-def tWFI : T1pI<(outs), (ins), NoItinerary, "wfi", "", []>,
- T1SystemEncoding<0x30>, // A8.6.409
- Requires<[IsThumb2]>;
+class tHintAlias<string Asm, dag Result> : tInstAlias<Asm, Result> {
+ let Predicates = [IsThumb, HasV6M];
+}
-def tSEV : T1pI<(outs), (ins), NoItinerary, "sev", "", []>,
- T1SystemEncoding<0x40>, // A8.6.157
- Requires<[IsThumb2]>;
+def : tHintAlias<"nop$p", (tHINT 0, pred:$p)>; // A8.6.110
+def : tHintAlias<"yield$p", (tHINT 1, pred:$p)>; // A8.6.410
+def : tHintAlias<"wfe$p", (tHINT 2, pred:$p)>; // A8.6.408
+def : tHintAlias<"wfi$p", (tHINT 3, pred:$p)>; // A8.6.409
+def : tHintAlias<"sev$p", (tHINT 4, pred:$p)>; // A8.6.157
+def : tInstAlias<"sevl$p", (tHINT 5, pred:$p)> {
+ let Predicates = [IsThumb2, HasV8];
+}
+def : T2Pat<(int_arm_sevl), (tHINT 5)>;
// The imm operand $val can be used by a debugger to store more information
// about the breakpoint.
@@ -282,8 +301,15 @@ def tBKPT : T1I<(outs), (ins imm0_255:$val), NoItinerary, "bkpt\t$val",
let Inst{7-0} = val;
}
+def tHLT : T1I<(outs), (ins imm0_63:$val), NoItinerary, "hlt\t$val",
+ []>, T1Encoding<0b101110>, Requires<[IsThumb, HasV8]> {
+ let Inst{9-6} = 0b1010;
+ bits<6> val;
+ let Inst{5-0} = val;
+}
+
def tSETEND : T1I<(outs), (ins setend_op:$end), NoItinerary, "setend\t$end",
- []>, T1Encoding<0b101101> {
+ []>, T1Encoding<0b101101>, Deprecated<HasV8Ops> {
bits<1> end;
// A8.6.156
let Inst{9-5} = 0b10010;
@@ -310,7 +336,7 @@ def tCPS : T1I<(outs), (ins imod_op:$imod, iflags_op:$iflags),
let isNotDuplicable = 1, isCodeGenOnly = 1 in
def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr, "",
[(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>,
- T1Special<{0,0,?,?}> {
+ T1Special<{0,0,?,?}>, Sched<[WriteALU]> {
// A8.6.6
bits<3> dst;
let Inst{6-3} = 0b1111; // Rm = pc
@@ -323,7 +349,7 @@ def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr, "",
// probably because the instruction can be moved around.
def tADDrSPi : T1pI<(outs tGPR:$dst), (ins GPRsp:$sp, t_imm0_1020s4:$imm),
IIC_iALUi, "add", "\t$dst, $sp, $imm", []>,
- T1Encoding<{1,0,1,0,1,?}> {
+ T1Encoding<{1,0,1,0,1,?}>, Sched<[WriteALU]> {
// A6.2 & A8.6.8
bits<3> dst;
bits<8> imm;
@@ -335,7 +361,7 @@ def tADDrSPi : T1pI<(outs tGPR:$dst), (ins GPRsp:$sp, t_imm0_1020s4:$imm),
// ADD sp, sp, #<imm7>
def tADDspi : T1pIt<(outs GPRsp:$Rdn), (ins GPRsp:$Rn, t_imm0_508s4:$imm),
IIC_iALUi, "add", "\t$Rdn, $imm", []>,
- T1Misc<{0,0,0,0,0,?,?}> {
+ T1Misc<{0,0,0,0,0,?,?}>, Sched<[WriteALU]> {
// A6.2.5 & A8.6.8
bits<7> imm;
let Inst{6-0} = imm;
@@ -346,7 +372,7 @@ def tADDspi : T1pIt<(outs GPRsp:$Rdn), (ins GPRsp:$Rn, t_imm0_508s4:$imm),
// FIXME: The encoding and the ASM string don't match up.
def tSUBspi : T1pIt<(outs GPRsp:$Rdn), (ins GPRsp:$Rn, t_imm0_508s4:$imm),
IIC_iALUi, "sub", "\t$Rdn, $imm", []>,
- T1Misc<{0,0,0,0,1,?,?}> {
+ T1Misc<{0,0,0,0,1,?,?}>, Sched<[WriteALU]> {
// A6.2.5 & A8.6.214
bits<7> imm;
let Inst{6-0} = imm;
@@ -367,7 +393,7 @@ def : tInstAlias<"sub${p} sp, sp, $imm",
// ADD <Rm>, sp
def tADDrSP : T1pI<(outs GPR:$Rdn), (ins GPRsp:$sp, GPR:$Rn), IIC_iALUr,
"add", "\t$Rdn, $sp, $Rn", []>,
- T1Special<{0,0,?,?}> {
+ T1Special<{0,0,?,?}>, Sched<[WriteALU]> {
// A8.6.9 Encoding T1
bits<4> Rdn;
let Inst{7} = Rdn{3};
@@ -379,7 +405,7 @@ def tADDrSP : T1pI<(outs GPR:$Rdn), (ins GPRsp:$sp, GPR:$Rn), IIC_iALUr,
// ADD sp, <Rm>
def tADDspr : T1pIt<(outs GPRsp:$Rdn), (ins GPRsp:$Rn, GPR:$Rm), IIC_iALUr,
"add", "\t$Rdn, $Rm", []>,
- T1Special<{0,0,?,?}> {
+ T1Special<{0,0,?,?}>, Sched<[WriteALU]> {
// A8.6.9 Encoding T2
bits<4> Rm;
let Inst{7} = 1;
@@ -395,7 +421,7 @@ def tADDspr : T1pIt<(outs GPRsp:$Rdn), (ins GPRsp:$Rn, GPR:$Rm), IIC_iALUr,
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
def tBX : TI<(outs), (ins GPR:$Rm, pred:$p), IIC_Br, "bx${p}\t$Rm", []>,
- T1Special<{1,1,0,?}> {
+ T1Special<{1,1,0,?}>, Sched<[WriteBr]> {
// A6.2.3 & A8.6.25
bits<4> Rm;
let Inst{6-3} = Rm;
@@ -406,12 +432,12 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
def tBX_RET : tPseudoExpand<(outs), (ins pred:$p), 2, IIC_Br,
- [(ARMretflag)], (tBX LR, pred:$p)>;
+ [(ARMretflag)], (tBX LR, pred:$p)>, Sched<[WriteBr]>;
// Alternative return instruction used by vararg functions.
def tBX_RET_vararg : tPseudoExpand<(outs), (ins tGPR:$Rm, pred:$p),
2, IIC_Br, [],
- (tBX GPR:$Rm, pred:$p)>;
+ (tBX GPR:$Rm, pred:$p)>, Sched<[WriteBr]>;
}
// All calls clobber the non-callee saved registers. SP is marked as a use to
@@ -424,7 +450,7 @@ let isCall = 1,
(outs), (ins pred:$p, t_bltarget:$func), IIC_Br,
"bl${p}\t$func",
[(ARMtcall tglobaladdr:$func)]>,
- Requires<[IsThumb]> {
+ Requires<[IsThumb]>, Sched<[WriteBrL]> {
bits<24> func;
let Inst{26} = func{23};
let Inst{25-16} = func{20-11};
@@ -438,7 +464,7 @@ let isCall = 1,
(outs), (ins pred:$p, t_blxtarget:$func), IIC_Br,
"blx${p}\t$func",
[(ARMcall tglobaladdr:$func)]>,
- Requires<[IsThumb, HasV5T]> {
+ Requires<[IsThumb, HasV5T]>, Sched<[WriteBrL]> {
bits<24> func;
let Inst{26} = func{23};
let Inst{25-16} = func{20-11};
@@ -453,7 +479,7 @@ let isCall = 1,
"blx${p}\t$func",
[(ARMtcall GPR:$func)]>,
Requires<[IsThumb, HasV5T]>,
- T1Special<{1,1,1,?}> { // A6.2.3 & A8.6.24;
+ T1Special<{1,1,1,?}>, Sched<[WriteBrL]> { // A6.2.3 & A8.6.24;
bits<4> func;
let Inst{6-3} = func;
let Inst{2-0} = 0b000;
@@ -463,29 +489,32 @@ let isCall = 1,
def tBX_CALL : tPseudoInst<(outs), (ins tGPR:$func),
4, IIC_Br,
[(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsThumb, IsThumb1Only]>;
+ Requires<[IsThumb, IsThumb1Only]>, Sched<[WriteBr]>;
}
let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
let isPredicable = 1 in
def tB : T1pI<(outs), (ins t_brtarget:$target), IIC_Br,
"b", "\t$target", [(br bb:$target)]>,
- T1Encoding<{1,1,1,0,0,?}> {
+ T1Encoding<{1,1,1,0,0,?}>, Sched<[WriteBr]> {
bits<11> target;
let Inst{10-0} = target;
- }
+ let AsmMatchConverter = "cvtThumbBranches";
+ }
// Far jump
// Just a pseudo for a tBL instruction. Needed to let regalloc know about
// the clobber of LR.
let Defs = [LR] in
def tBfar : tPseudoExpand<(outs), (ins t_bltarget:$target, pred:$p),
- 4, IIC_Br, [], (tBL pred:$p, t_bltarget:$target)>;
+ 4, IIC_Br, [], (tBL pred:$p, t_bltarget:$target)>,
+ Sched<[WriteBrTbl]>;
def tBR_JTr : tPseudoInst<(outs),
(ins tGPR:$target, i32imm:$jt, i32imm:$id),
0, IIC_Br,
- [(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]> {
+ [(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]>,
+ Sched<[WriteBrTbl]> {
list<Predicate> Predicates = [IsThumb, IsThumb1Only];
}
}
@@ -496,13 +525,15 @@ let isBranch = 1, isTerminator = 1 in
def tBcc : T1I<(outs), (ins t_bcctarget:$target, pred:$p), IIC_Br,
"b${p}\t$target",
[/*(ARMbrcond bb:$target, imm:$cc)*/]>,
- T1BranchCond<{1,1,0,1}> {
+ T1BranchCond<{1,1,0,1}>, Sched<[WriteBr]> {
bits<4> p;
bits<8> target;
let Inst{11-8} = p;
let Inst{7-0} = target;
+ let AsmMatchConverter = "cvtThumbBranches";
}
+
// Tail calls
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// IOS versions.
@@ -510,7 +541,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
def tTAILJMPr : tPseudoExpand<(outs), (ins tcGPR:$dst),
4, IIC_Br, [],
(tBX GPR:$dst, (ops 14, zero_reg))>,
- Requires<[IsThumb]>;
+ Requires<[IsThumb]>, Sched<[WriteBr]>;
}
// tTAILJMPd: IOS version uses a Thumb2 branch (no Thumb1 tail calls
// on IOS), so it's in ARMInstrThumb2.td.
@@ -520,7 +551,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
(ins t_brtarget:$dst, pred:$p),
4, IIC_Br, [],
(tB t_brtarget:$dst, pred:$p)>,
- Requires<[IsThumb, IsNotIOS]>;
+ Requires<[IsThumb, IsNotIOS]>, Sched<[WriteBr]>;
}
}
@@ -530,7 +561,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// If Inst{11-8} == 0b1111 then SEE SVC
let isCall = 1, Uses = [SP] in
def tSVC : T1pI<(outs), (ins imm0_255:$imm), IIC_Br,
- "svc", "\t$imm", []>, Encoding16 {
+ "svc", "\t$imm", []>, Encoding16, Sched<[WriteBr]> {
bits<8> imm;
let Inst{15-12} = 0b1101;
let Inst{11-8} = 0b1111;
@@ -540,7 +571,7 @@ def tSVC : T1pI<(outs), (ins imm0_255:$imm), IIC_Br,
// The assembler uses 0xDEFE for a trap instruction.
let isBarrier = 1, isTerminator = 1 in
def tTRAP : TI<(outs), (ins), IIC_Br,
- "trap", [(trap)]>, Encoding16 {
+ "trap", [(trap)]>, Encoding16, Sched<[WriteBr]> {
let Inst = 0xdefe;
}
@@ -627,11 +658,9 @@ def tLDRspi : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_sp:$addr), IIC_iLoad_i,
let Inst{7-0} = addr;
}
-// Load tconstpool
-// FIXME: Use ldr.n to work around a darwin assembler bug.
-let canFoldAsLoad = 1, isReMaterializable = 1, isCodeGenOnly = 1 in
+let canFoldAsLoad = 1, isReMaterializable = 1 in
def tLDRpci : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
- "ldr", ".n\t$Rt, $addr",
+ "ldr", "\t$Rt, $addr",
[(set tGPR:$Rt, (load (ARMWrapper tconstpool:$addr)))]>,
T1Encoding<{0,1,0,0,1,?}> {
// A6.2 & A8.6.59
@@ -641,18 +670,6 @@ def tLDRpci : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
let Inst{7-0} = addr;
}
-// FIXME: Remove this entry when the above ldr.n workaround is fixed.
-// For assembly/disassembly use only.
-def tLDRpciASM : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
- "ldr", "\t$Rt, $addr", []>,
- T1Encoding<{0,1,0,0,1,?}> {
- // A6.2 & A8.6.59
- bits<3> Rt;
- bits<8> addr;
- let Inst{10-8} = Rt;
- let Inst{7-0} = addr;
-}
-
// A8.6.194 & A8.6.192
defm tSTR : thumb_st_rr_ri_enc<0b000, 0b0110, t_addrmode_rrs4,
t_addrmode_is4, AddrModeT1_4,
@@ -833,14 +850,15 @@ let isCommutable = 1, Uses = [CPSR] in
def tADC : // A8.6.2
T1sItDPEncode<0b0101, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm), IIC_iALUr,
"adc", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (adde tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (adde tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
// Add immediate
def tADDi3 : // A8.6.4 T1
T1sIGenEncodeImm<0b01110, (outs tGPR:$Rd), (ins tGPR:$Rm, imm0_7:$imm3),
IIC_iALUi,
"add", "\t$Rd, $Rm, $imm3",
- [(set tGPR:$Rd, (add tGPR:$Rm, imm0_7:$imm3))]> {
+ [(set tGPR:$Rd, (add tGPR:$Rm, imm0_7:$imm3))]>,
+ Sched<[WriteALU]> {
bits<3> imm3;
let Inst{8-6} = imm3;
}
@@ -849,7 +867,8 @@ def tADDi8 : // A8.6.4 T2
T1sItGenEncodeImm<{1,1,0,?,?}, (outs tGPR:$Rdn),
(ins tGPR:$Rn, imm0_255:$imm8), IIC_iALUi,
"add", "\t$Rdn, $imm8",
- [(set tGPR:$Rdn, (add tGPR:$Rn, imm8_255:$imm8))]>;
+ [(set tGPR:$Rdn, (add tGPR:$Rn, imm8_255:$imm8))]>,
+ Sched<[WriteALU]>;
// Add register
let isCommutable = 1 in
@@ -857,12 +876,12 @@ def tADDrr : // A8.6.6 T1
T1sIGenEncode<0b01100, (outs tGPR:$Rd), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iALUr,
"add", "\t$Rd, $Rn, $Rm",
- [(set tGPR:$Rd, (add tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rd, (add tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
let neverHasSideEffects = 1 in
def tADDhirr : T1pIt<(outs GPR:$Rdn), (ins GPR:$Rn, GPR:$Rm), IIC_iALUr,
"add", "\t$Rdn, $Rm", []>,
- T1Special<{0,0,?,?}> {
+ T1Special<{0,0,?,?}>, Sched<[WriteALU]> {
// A8.6.6 T2
bits<4> Rdn;
bits<4> Rm;
@@ -877,14 +896,15 @@ def tAND : // A8.6.12
T1sItDPEncode<0b0000, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iBITr,
"and", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (and tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (and tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
// ASR immediate
def tASRri : // A8.6.14
T1sIGenEncodeImm<{0,1,0,?,?}, (outs tGPR:$Rd), (ins tGPR:$Rm, imm_sr:$imm5),
IIC_iMOVsi,
"asr", "\t$Rd, $Rm, $imm5",
- [(set tGPR:$Rd, (sra tGPR:$Rm, (i32 imm_sr:$imm5)))]> {
+ [(set tGPR:$Rd, (sra tGPR:$Rm, (i32 imm_sr:$imm5)))]>,
+ Sched<[WriteALU]> {
bits<5> imm5;
let Inst{10-6} = imm5;
}
@@ -894,14 +914,15 @@ def tASRrr : // A8.6.15
T1sItDPEncode<0b0100, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iMOVsr,
"asr", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (sra tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (sra tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
// BIC register
def tBIC : // A8.6.20
T1sItDPEncode<0b1110, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iBITr,
"bic", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (and tGPR:$Rn, (not tGPR:$Rm)))]>;
+ [(set tGPR:$Rdn, (and tGPR:$Rn, (not tGPR:$Rm)))]>,
+ Sched<[WriteALU]>;
// CMN register
let isCompare = 1, Defs = [CPSR] in {
@@ -917,7 +938,7 @@ def tCMNz : // A8.6.33
T1pIDPEncode<0b1011, (outs), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iCMPr,
"cmn", "\t$Rn, $Rm",
- [(ARMcmpZ tGPR:$Rn, (ineg tGPR:$Rm))]>;
+ [(ARMcmpZ tGPR:$Rn, (ineg tGPR:$Rm))]>, Sched<[WriteCMP]>;
} // isCompare = 1, Defs = [CPSR]
@@ -926,7 +947,7 @@ let isCompare = 1, Defs = [CPSR] in {
def tCMPi8 : T1pI<(outs), (ins tGPR:$Rn, imm0_255:$imm8), IIC_iCMPi,
"cmp", "\t$Rn, $imm8",
[(ARMcmp tGPR:$Rn, imm0_255:$imm8)]>,
- T1General<{1,0,1,?,?}> {
+ T1General<{1,0,1,?,?}>, Sched<[WriteCMP]> {
// A8.6.35
bits<3> Rn;
bits<8> imm8;
@@ -939,11 +960,11 @@ def tCMPr : // A8.6.36 T1
T1pIDPEncode<0b1010, (outs), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iCMPr,
"cmp", "\t$Rn, $Rm",
- [(ARMcmp tGPR:$Rn, tGPR:$Rm)]>;
+ [(ARMcmp tGPR:$Rn, tGPR:$Rm)]>, Sched<[WriteCMP]>;
def tCMPhir : T1pI<(outs), (ins GPR:$Rn, GPR:$Rm), IIC_iCMPr,
"cmp", "\t$Rn, $Rm", []>,
- T1Special<{0,1,?,?}> {
+ T1Special<{0,1,?,?}>, Sched<[WriteCMP]> {
// A8.6.36 T2
bits<4> Rm;
bits<4> Rn;
@@ -960,14 +981,15 @@ def tEOR : // A8.6.45
T1sItDPEncode<0b0001, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iBITr,
"eor", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (xor tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (xor tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
// LSL immediate
def tLSLri : // A8.6.88
T1sIGenEncodeImm<{0,0,0,?,?}, (outs tGPR:$Rd), (ins tGPR:$Rm, imm0_31:$imm5),
IIC_iMOVsi,
"lsl", "\t$Rd, $Rm, $imm5",
- [(set tGPR:$Rd, (shl tGPR:$Rm, (i32 imm:$imm5)))]> {
+ [(set tGPR:$Rd, (shl tGPR:$Rm, (i32 imm:$imm5)))]>,
+ Sched<[WriteALU]> {
bits<5> imm5;
let Inst{10-6} = imm5;
}
@@ -977,14 +999,15 @@ def tLSLrr : // A8.6.89
T1sItDPEncode<0b0010, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iMOVsr,
"lsl", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (shl tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (shl tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
// LSR immediate
def tLSRri : // A8.6.90
T1sIGenEncodeImm<{0,0,1,?,?}, (outs tGPR:$Rd), (ins tGPR:$Rm, imm_sr:$imm5),
IIC_iMOVsi,
"lsr", "\t$Rd, $Rm, $imm5",
- [(set tGPR:$Rd, (srl tGPR:$Rm, (i32 imm_sr:$imm5)))]> {
+ [(set tGPR:$Rd, (srl tGPR:$Rm, (i32 imm_sr:$imm5)))]>,
+ Sched<[WriteALU]> {
bits<5> imm5;
let Inst{10-6} = imm5;
}
@@ -994,14 +1017,14 @@ def tLSRrr : // A8.6.91
T1sItDPEncode<0b0011, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iMOVsr,
"lsr", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (srl tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (srl tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
// Move register
let isMoveImm = 1 in
def tMOVi8 : T1sI<(outs tGPR:$Rd), (ins imm0_255:$imm8), IIC_iMOVi,
"mov", "\t$Rd, $imm8",
[(set tGPR:$Rd, imm0_255:$imm8)]>,
- T1General<{1,0,0,?,?}> {
+ T1General<{1,0,0,?,?}>, Sched<[WriteALU]> {
// A8.6.96
bits<3> Rd;
bits<8> imm8;
@@ -1019,7 +1042,7 @@ let neverHasSideEffects = 1 in {
def tMOVr : Thumb1pI<(outs GPR:$Rd), (ins GPR:$Rm), AddrModeNone,
2, IIC_iMOVr,
"mov", "\t$Rd, $Rm", "", []>,
- T1Special<{1,0,?,?}> {
+ T1Special<{1,0,?,?}>, Sched<[WriteALU]> {
// A8.6.97
bits<4> Rd;
bits<4> Rm;
@@ -1029,7 +1052,7 @@ def tMOVr : Thumb1pI<(outs GPR:$Rd), (ins GPR:$Rm), AddrModeNone,
}
let Defs = [CPSR] in
def tMOVSr : T1I<(outs tGPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr,
- "movs\t$Rd, $Rm", []>, Encoding16 {
+ "movs\t$Rd, $Rm", []>, Encoding16, Sched<[WriteALU]> {
// A8.6.97
bits<3> Rd;
bits<3> Rm;
@@ -1060,7 +1083,7 @@ def :tInstAlias<"mul${s}${p} $Rdm, $Rn", (tMUL tGPR:$Rdm, s_cc_out:$s, tGPR:$Rn,
def tMVN : // A8.6.107
T1sIDPEncode<0b1111, (outs tGPR:$Rd), (ins tGPR:$Rn), IIC_iMVNr,
"mvn", "\t$Rd, $Rn",
- [(set tGPR:$Rd, (not tGPR:$Rn))]>;
+ [(set tGPR:$Rd, (not tGPR:$Rn))]>, Sched<[WriteALU]>;
// Bitwise or register
let isCommutable = 1 in
@@ -1068,7 +1091,7 @@ def tORR : // A8.6.114
T1sItDPEncode<0b1100, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iBITr,
"orr", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (or tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (or tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
// Swaps
def tREV : // A8.6.134
@@ -1076,35 +1099,36 @@ def tREV : // A8.6.134
IIC_iUNAr,
"rev", "\t$Rd, $Rm",
[(set tGPR:$Rd, (bswap tGPR:$Rm))]>,
- Requires<[IsThumb, IsThumb1Only, HasV6]>;
+ Requires<[IsThumb, IsThumb1Only, HasV6]>, Sched<[WriteALU]>;
def tREV16 : // A8.6.135
T1pIMiscEncode<{1,0,1,0,0,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
IIC_iUNAr,
"rev16", "\t$Rd, $Rm",
[(set tGPR:$Rd, (rotr (bswap tGPR:$Rm), (i32 16)))]>,
- Requires<[IsThumb, IsThumb1Only, HasV6]>;
+ Requires<[IsThumb, IsThumb1Only, HasV6]>, Sched<[WriteALU]>;
def tREVSH : // A8.6.136
T1pIMiscEncode<{1,0,1,0,1,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
IIC_iUNAr,
"revsh", "\t$Rd, $Rm",
[(set tGPR:$Rd, (sra (bswap tGPR:$Rm), (i32 16)))]>,
- Requires<[IsThumb, IsThumb1Only, HasV6]>;
+ Requires<[IsThumb, IsThumb1Only, HasV6]>, Sched<[WriteALU]>;
// Rotate right register
def tROR : // A8.6.139
T1sItDPEncode<0b0111, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iMOVsr,
"ror", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (rotr tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (rotr tGPR:$Rn, tGPR:$Rm))]>,
+ Sched<[WriteALU]>;
// Negate register
def tRSB : // A8.6.141
T1sIDPEncode<0b1001, (outs tGPR:$Rd), (ins tGPR:$Rn),
IIC_iALUi,
"rsb", "\t$Rd, $Rn, #0",
- [(set tGPR:$Rd, (ineg tGPR:$Rn))]>;
+ [(set tGPR:$Rd, (ineg tGPR:$Rn))]>, Sched<[WriteALU]>;
// Subtract with carry register
let Uses = [CPSR] in
@@ -1112,14 +1136,16 @@ def tSBC : // A8.6.151
T1sItDPEncode<0b0110, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iALUr,
"sbc", "\t$Rdn, $Rm",
- [(set tGPR:$Rdn, (sube tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rdn, (sube tGPR:$Rn, tGPR:$Rm))]>,
+ Sched<[WriteALU]>;
// Subtract immediate
def tSUBi3 : // A8.6.210 T1
T1sIGenEncodeImm<0b01111, (outs tGPR:$Rd), (ins tGPR:$Rm, imm0_7:$imm3),
IIC_iALUi,
"sub", "\t$Rd, $Rm, $imm3",
- [(set tGPR:$Rd, (add tGPR:$Rm, imm0_7_neg:$imm3))]> {
+ [(set tGPR:$Rd, (add tGPR:$Rm, imm0_7_neg:$imm3))]>,
+ Sched<[WriteALU]> {
bits<3> imm3;
let Inst{8-6} = imm3;
}
@@ -1128,14 +1154,16 @@ def tSUBi8 : // A8.6.210 T2
T1sItGenEncodeImm<{1,1,1,?,?}, (outs tGPR:$Rdn),
(ins tGPR:$Rn, imm0_255:$imm8), IIC_iALUi,
"sub", "\t$Rdn, $imm8",
- [(set tGPR:$Rdn, (add tGPR:$Rn, imm8_255_neg:$imm8))]>;
+ [(set tGPR:$Rdn, (add tGPR:$Rn, imm8_255_neg:$imm8))]>,
+ Sched<[WriteALU]>;
// Subtract register
def tSUBrr : // A8.6.212
T1sIGenEncode<0b01101, (outs tGPR:$Rd), (ins tGPR:$Rn, tGPR:$Rm),
IIC_iALUr,
"sub", "\t$Rd, $Rn, $Rm",
- [(set tGPR:$Rd, (sub tGPR:$Rn, tGPR:$Rm))]>;
+ [(set tGPR:$Rd, (sub tGPR:$Rn, tGPR:$Rm))]>,
+ Sched<[WriteALU]>;
// Sign-extend byte
def tSXTB : // A8.6.222
@@ -1143,7 +1171,8 @@ def tSXTB : // A8.6.222
IIC_iUNAr,
"sxtb", "\t$Rd, $Rm",
[(set tGPR:$Rd, (sext_inreg tGPR:$Rm, i8))]>,
- Requires<[IsThumb, IsThumb1Only, HasV6]>;
+ Requires<[IsThumb, IsThumb1Only, HasV6]>,
+ Sched<[WriteALU]>;
// Sign-extend short
def tSXTH : // A8.6.224
@@ -1151,14 +1180,16 @@ def tSXTH : // A8.6.224
IIC_iUNAr,
"sxth", "\t$Rd, $Rm",
[(set tGPR:$Rd, (sext_inreg tGPR:$Rm, i16))]>,
- Requires<[IsThumb, IsThumb1Only, HasV6]>;
+ Requires<[IsThumb, IsThumb1Only, HasV6]>,
+ Sched<[WriteALU]>;
// Test
let isCompare = 1, isCommutable = 1, Defs = [CPSR] in
def tTST : // A8.6.230
T1pIDPEncode<0b1000, (outs), (ins tGPR:$Rn, tGPR:$Rm), IIC_iTSTr,
"tst", "\t$Rn, $Rm",
- [(ARMcmpZ (and_su tGPR:$Rn, tGPR:$Rm), 0)]>;
+ [(ARMcmpZ (and_su tGPR:$Rn, tGPR:$Rm), 0)]>,
+ Sched<[WriteALU]>;
// Zero-extend byte
def tUXTB : // A8.6.262
@@ -1166,7 +1197,8 @@ def tUXTB : // A8.6.262
IIC_iUNAr,
"uxtb", "\t$Rd, $Rm",
[(set tGPR:$Rd, (and tGPR:$Rm, 0xFF))]>,
- Requires<[IsThumb, IsThumb1Only, HasV6]>;
+ Requires<[IsThumb, IsThumb1Only, HasV6]>,
+ Sched<[WriteALU]>;
// Zero-extend short
def tUXTH : // A8.6.264
@@ -1174,22 +1206,22 @@ def tUXTH : // A8.6.264
IIC_iUNAr,
"uxth", "\t$Rd, $Rm",
[(set tGPR:$Rd, (and tGPR:$Rm, 0xFFFF))]>,
- Requires<[IsThumb, IsThumb1Only, HasV6]>;
+ Requires<[IsThumb, IsThumb1Only, HasV6]>, Sched<[WriteALU]>;
// Conditional move tMOVCCr - Used to implement the Thumb SELECT_CC operation.
// Expanded after instruction selection into a branch sequence.
let usesCustomInserter = 1 in // Expanded after instruction selection.
def tMOVCCr_pseudo :
- PseudoInst<(outs tGPR:$dst), (ins tGPR:$false, tGPR:$true, pred:$cc),
- NoItinerary,
- [/*(set tGPR:$dst, (ARMcmov tGPR:$false, tGPR:$true, imm:$cc))*/]>;
+ PseudoInst<(outs tGPR:$dst), (ins tGPR:$false, tGPR:$true, cmovpred:$p),
+ NoItinerary,
+ [(set tGPR:$dst, (ARMcmov tGPR:$false, tGPR:$true, cmovpred:$p))]>;
// tLEApcrel - Load a pc-relative address into a register without offending the
// assembler.
def tADR : T1I<(outs tGPR:$Rd), (ins t_adrlabel:$addr, pred:$p),
IIC_iALUi, "adr{$p}\t$Rd, $addr", []>,
- T1Encoding<{1,0,1,0,0,?}> {
+ T1Encoding<{1,0,1,0,0,?}>, Sched<[WriteALU]> {
bits<3> Rd;
bits<8> addr;
let Inst{10-8} = Rd;
@@ -1199,12 +1231,12 @@ def tADR : T1I<(outs tGPR:$Rd), (ins t_adrlabel:$addr, pred:$p),
let neverHasSideEffects = 1, isReMaterializable = 1 in
def tLEApcrel : tPseudoInst<(outs tGPR:$Rd), (ins i32imm:$label, pred:$p),
- 2, IIC_iALUi, []>;
+ 2, IIC_iALUi, []>, Sched<[WriteALU]>;
let hasSideEffects = 1 in
def tLEApcrelJT : tPseudoInst<(outs tGPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
- 2, IIC_iALUi, []>;
+ 2, IIC_iALUi, []>, Sched<[WriteALU]>;
//===----------------------------------------------------------------------===//
// TLS Instructions
@@ -1215,7 +1247,8 @@ def tLEApcrelJT : tPseudoInst<(outs tGPR:$Rd),
// complete with fixup for the aeabi_read_tp function.
let isCall = 1, Defs = [R0, R12, LR, CPSR], Uses = [SP] in
def tTPsoft : tPseudoInst<(outs), (ins), 4, IIC_Br,
- [(set R0, ARMthread_pointer)]>;
+ [(set R0, ARMthread_pointer)]>,
+ Sched<[WriteBr]>;
//===----------------------------------------------------------------------===//
// SJLJ Exception handling intrinsics
@@ -1381,13 +1414,13 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
hasExtraDefRegAllocReq = 1 in
def tPOP_RET : tPseudoExpand<(outs), (ins pred:$p, reglist:$regs, variable_ops),
2, IIC_iPop_Br, [],
- (tPOP pred:$p, reglist:$regs)>;
+ (tPOP pred:$p, reglist:$regs)>, Sched<[WriteBrL]>;
// Indirect branch using "mov pc, $Rm"
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
def tBRIND : tPseudoExpand<(outs), (ins GPR:$Rm, pred:$p),
2, IIC_Br, [(brind GPR:$Rm)],
- (tMOVr PC, GPR:$Rm, pred:$p)>;
+ (tMOVr PC, GPR:$Rm, pred:$p)>, Sched<[WriteBr]>;
}
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index 4dacb86df4ce..48acffd3a64e 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -173,14 +173,13 @@ def t2ldr_pcrel_imm12 : Operand<i32> {
// ADR instruction labels.
def t2adrlabel : Operand<i32> {
let EncoderMethod = "getT2AdrLabelOpValue";
- let PrintMethod = "printAdrLabelOperand";
+ let PrintMethod = "printAdrLabelOperand<0>";
}
-
// t2addrmode_posimm8 := reg + imm8
def MemPosImm8OffsetAsmOperand : AsmOperandClass {let Name="MemPosImm8Offset";}
def t2addrmode_posimm8 : Operand<i32> {
- let PrintMethod = "printT2AddrModeImm8Operand";
+ let PrintMethod = "printT2AddrModeImm8Operand<false>";
let EncoderMethod = "getT2AddrModeImm8OpValue";
let DecoderMethod = "DecodeT2AddrModeImm8";
let ParserMatchClass = MemPosImm8OffsetAsmOperand;
@@ -191,7 +190,7 @@ def t2addrmode_posimm8 : Operand<i32> {
def MemNegImm8OffsetAsmOperand : AsmOperandClass {let Name="MemNegImm8Offset";}
def t2addrmode_negimm8 : Operand<i32>,
ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> {
- let PrintMethod = "printT2AddrModeImm8Operand";
+ let PrintMethod = "printT2AddrModeImm8Operand<false>";
let EncoderMethod = "getT2AddrModeImm8OpValue";
let DecoderMethod = "DecodeT2AddrModeImm8";
let ParserMatchClass = MemNegImm8OffsetAsmOperand;
@@ -200,15 +199,22 @@ def t2addrmode_negimm8 : Operand<i32>,
// t2addrmode_imm8 := reg +/- imm8
def MemImm8OffsetAsmOperand : AsmOperandClass { let Name = "MemImm8Offset"; }
-def t2addrmode_imm8 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> {
- let PrintMethod = "printT2AddrModeImm8Operand";
+class T2AddrMode_Imm8 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> {
let EncoderMethod = "getT2AddrModeImm8OpValue";
let DecoderMethod = "DecodeT2AddrModeImm8";
let ParserMatchClass = MemImm8OffsetAsmOperand;
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
}
+def t2addrmode_imm8 : T2AddrMode_Imm8 {
+ let PrintMethod = "printT2AddrModeImm8Operand<false>";
+}
+
+def t2addrmode_imm8_pre : T2AddrMode_Imm8 {
+ let PrintMethod = "printT2AddrModeImm8Operand<true>";
+}
+
def t2am_imm8_offset : Operand<i32>,
ComplexPattern<i32, 1, "SelectT2AddrModeImm8Offset",
[], [SDNPWantRoot]> {
@@ -219,14 +225,21 @@ def t2am_imm8_offset : Operand<i32>,
// t2addrmode_imm8s4 := reg +/- (imm8 << 2)
def MemImm8s4OffsetAsmOperand : AsmOperandClass {let Name = "MemImm8s4Offset";}
-def t2addrmode_imm8s4 : Operand<i32> {
- let PrintMethod = "printT2AddrModeImm8s4Operand";
+class T2AddrMode_Imm8s4 : Operand<i32> {
let EncoderMethod = "getT2AddrModeImm8s4OpValue";
let DecoderMethod = "DecodeT2AddrModeImm8s4";
let ParserMatchClass = MemImm8s4OffsetAsmOperand;
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
}
+def t2addrmode_imm8s4 : T2AddrMode_Imm8s4 {
+ let PrintMethod = "printT2AddrModeImm8s4Operand<false>";
+}
+
+def t2addrmode_imm8s4_pre : T2AddrMode_Imm8s4 {
+ let PrintMethod = "printT2AddrModeImm8s4Operand<true>";
+}
+
def t2am_imm8s4_offset_asmoperand : AsmOperandClass { let Name = "Imm8s4"; }
def t2am_imm8s4_offset : Operand<i32> {
let PrintMethod = "printT2AddrModeImm8s4OffsetOperand";
@@ -238,7 +251,8 @@ def t2am_imm8s4_offset : Operand<i32> {
def MemImm0_1020s4OffsetAsmOperand : AsmOperandClass {
let Name = "MemImm0_1020s4Offset";
}
-def t2addrmode_imm0_1020s4 : Operand<i32> {
+def t2addrmode_imm0_1020s4 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectT2AddrModeExclusive"> {
let PrintMethod = "printT2AddrModeImm0_1020s4Operand";
let EncoderMethod = "getT2AddrModeImm0_1020s4OpValue";
let DecoderMethod = "DecodeT2AddrModeImm0_1020s4";
@@ -451,6 +465,18 @@ class T2ThreeReg<dag oops, dag iops, InstrItinClass itin,
let Inst{3-0} = Rm;
}
+class T2ThreeRegNoP<dag oops, dag iops, InstrItinClass itin,
+ string asm, list<dag> pattern>
+ : T2XI<oops, iops, itin, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = Rm;
+}
+
class T2sThreeReg<dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: T2sI<oops, iops, itin, opc, asm, pattern> {
@@ -554,7 +580,8 @@ multiclass T2I_bin_irs<bits<4> opcod, string opc,
def ri : T2sTwoRegImm<
(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), iii,
opc, "\t$Rd, $Rn, $imm",
- [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_imm:$imm))]> {
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_imm:$imm))]>,
+ Sched<[WriteALU, ReadALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
@@ -563,7 +590,8 @@ multiclass T2I_bin_irs<bits<4> opcod, string opc,
// register
def rr : T2sThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), iir,
opc, !strconcat(wide, "\t$Rd, $Rn, $Rm"),
- [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]> {
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]>,
+ Sched<[WriteALU, ReadALU, ReadALU]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
@@ -576,7 +604,8 @@ multiclass T2I_bin_irs<bits<4> opcod, string opc,
def rs : T2sTwoRegShiftedReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm), iis,
opc, !strconcat(wide, "\t$Rd, $Rn, $ShiftedRm"),
- [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_reg:$ShiftedRm))]> {
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_reg:$ShiftedRm))]>,
+ Sched<[WriteALUsi, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -635,7 +664,8 @@ multiclass T2I_rbin_irs<bits<4> opcod, string opc, PatFrag opnode> {
def ri : T2sTwoRegImm<
(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), IIC_iALUi,
opc, ".w\t$Rd, $Rn, $imm",
- [(set rGPR:$Rd, (opnode t2_so_imm:$imm, rGPR:$Rn))]> {
+ [(set rGPR:$Rd, (opnode t2_so_imm:$imm, rGPR:$Rn))]>,
+ Sched<[WriteALU, ReadALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
@@ -645,7 +675,8 @@ multiclass T2I_rbin_irs<bits<4> opcod, string opc, PatFrag opnode> {
def rr : T2sThreeReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUr,
opc, "\t$Rd, $Rn, $Rm",
- [/* For disassembly only; pattern left blank */]> {
+ [/* For disassembly only; pattern left blank */]>,
+ Sched<[WriteALU, ReadALU, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -657,7 +688,8 @@ multiclass T2I_rbin_irs<bits<4> opcod, string opc, PatFrag opnode> {
def rs : T2sTwoRegShiftedReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm),
IIC_iALUsir, opc, "\t$Rd, $Rn, $ShiftedRm",
- [(set rGPR:$Rd, (opnode t2_so_reg:$ShiftedRm, rGPR:$Rn))]> {
+ [(set rGPR:$Rd, (opnode t2_so_reg:$ShiftedRm, rGPR:$Rn))]>,
+ Sched<[WriteALUsi, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -678,12 +710,14 @@ multiclass T2I_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
(ins GPRnopc:$Rn, t2_so_imm:$imm, pred:$p),
4, iii,
[(set rGPR:$Rd, CPSR, (opnode GPRnopc:$Rn,
- t2_so_imm:$imm))]>;
+ t2_so_imm:$imm))]>,
+ Sched<[WriteALU, ReadALU]>;
// register
def rr : t2PseudoInst<(outs rGPR:$Rd), (ins GPRnopc:$Rn, rGPR:$Rm, pred:$p),
4, iir,
[(set rGPR:$Rd, CPSR, (opnode GPRnopc:$Rn,
- rGPR:$Rm))]> {
+ rGPR:$Rm))]>,
+ Sched<[WriteALU, ReadALU, ReadALU]> {
let isCommutable = Commutable;
}
// shifted register
@@ -691,7 +725,8 @@ multiclass T2I_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
(ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm, pred:$p),
4, iis,
[(set rGPR:$Rd, CPSR, (opnode GPRnopc:$Rn,
- t2_so_reg:$ShiftedRm))]>;
+ t2_so_reg:$ShiftedRm))]>,
+ Sched<[WriteALUsi, ReadALUsr]>;
}
}
@@ -704,13 +739,15 @@ multiclass T2I_rbin_s_is<PatFrag opnode> {
(ins rGPR:$Rn, t2_so_imm:$imm, pred:$p),
4, IIC_iALUi,
[(set rGPR:$Rd, CPSR, (opnode t2_so_imm:$imm,
- rGPR:$Rn))]>;
+ rGPR:$Rn))]>,
+ Sched<[WriteALU, ReadALU]>;
// shifted register
def rs : t2PseudoInst<(outs rGPR:$Rd),
(ins rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p),
4, IIC_iALUsi,
[(set rGPR:$Rd, CPSR, (opnode t2_so_reg:$ShiftedRm,
- rGPR:$Rn))]>;
+ rGPR:$Rn))]>,
+ Sched<[WriteALUsi, ReadALU]>;
}
}
@@ -725,7 +762,8 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
def ri : T2sTwoRegImm<
(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, t2_so_imm:$imm), IIC_iALUi,
opc, ".w\t$Rd, $Rn, $imm",
- [(set GPRnopc:$Rd, (opnode GPRnopc:$Rn, t2_so_imm:$imm))]> {
+ [(set GPRnopc:$Rd, (opnode GPRnopc:$Rn, t2_so_imm:$imm))]>,
+ Sched<[WriteALU, ReadALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24} = 1;
@@ -737,7 +775,8 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
def ri12 : T2I<
(outs GPRnopc:$Rd), (ins GPR:$Rn, imm0_4095:$imm), IIC_iALUi,
!strconcat(opc, "w"), "\t$Rd, $Rn, $imm",
- [(set GPRnopc:$Rd, (opnode GPR:$Rn, imm0_4095:$imm))]> {
+ [(set GPRnopc:$Rd, (opnode GPR:$Rn, imm0_4095:$imm))]>,
+ Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
@@ -755,7 +794,8 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
// register
def rr : T2sThreeReg<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, rGPR:$Rm),
IIC_iALUr, opc, ".w\t$Rd, $Rn, $Rm",
- [(set GPRnopc:$Rd, (opnode GPRnopc:$Rn, rGPR:$Rm))]> {
+ [(set GPRnopc:$Rd, (opnode GPRnopc:$Rn, rGPR:$Rm))]>,
+ Sched<[WriteALU, ReadALU, ReadALU]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
@@ -769,7 +809,8 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
def rs : T2sTwoRegShiftedReg<
(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm),
IIC_iALUsi, opc, ".w\t$Rd, $Rn, $ShiftedRm",
- [(set GPRnopc:$Rd, (opnode GPRnopc:$Rn, t2_so_reg:$ShiftedRm))]> {
+ [(set GPRnopc:$Rd, (opnode GPRnopc:$Rn, t2_so_reg:$ShiftedRm))]>,
+ Sched<[WriteALUsi, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24} = 1;
@@ -787,7 +828,7 @@ multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
def ri : T2sTwoRegImm<(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm),
IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
[(set rGPR:$Rd, CPSR, (opnode rGPR:$Rn, t2_so_imm:$imm, CPSR))]>,
- Requires<[IsThumb2]> {
+ Requires<[IsThumb2]>, Sched<[WriteALU, ReadALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
@@ -797,7 +838,7 @@ multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
def rr : T2sThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUr,
opc, ".w\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, CPSR, (opnode rGPR:$Rn, rGPR:$Rm, CPSR))]>,
- Requires<[IsThumb2]> {
+ Requires<[IsThumb2]>, Sched<[WriteALU, ReadALU, ReadALU]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
@@ -811,7 +852,7 @@ multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm),
IIC_iALUsi, opc, ".w\t$Rd, $Rn, $ShiftedRm",
[(set rGPR:$Rd, CPSR, (opnode rGPR:$Rn, t2_so_reg:$ShiftedRm, CPSR))]>,
- Requires<[IsThumb2]> {
+ Requires<[IsThumb2]>, Sched<[WriteALUsi, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -826,7 +867,8 @@ multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode> {
def ri : T2sTwoRegShiftImm<
(outs rGPR:$Rd), (ins rGPR:$Rm, ty:$imm), IIC_iMOVsi,
opc, ".w\t$Rd, $Rm, $imm",
- [(set rGPR:$Rd, (opnode rGPR:$Rm, (i32 ty:$imm)))]> {
+ [(set rGPR:$Rd, (opnode rGPR:$Rm, (i32 ty:$imm)))]>,
+ Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-21} = 0b010010;
let Inst{19-16} = 0b1111; // Rn
@@ -836,7 +878,8 @@ multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode> {
def rr : T2sThreeReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMOVsr,
opc, ".w\t$Rd, $Rn, $Rm",
- [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]> {
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]>,
+ Sched<[WriteALU]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-21} = opcod;
@@ -880,7 +923,7 @@ let isCompare = 1, Defs = [CPSR] in {
def ri : T2OneRegCmpImm<
(outs), (ins GPRnopc:$Rn, t2_so_imm:$imm), iii,
opc, ".w\t$Rn, $imm",
- [(opnode GPRnopc:$Rn, t2_so_imm:$imm)]> {
+ [(opnode GPRnopc:$Rn, t2_so_imm:$imm)]>, Sched<[WriteCMP]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
@@ -892,7 +935,7 @@ let isCompare = 1, Defs = [CPSR] in {
def rr : T2TwoRegCmp<
(outs), (ins GPRnopc:$Rn, rGPR:$Rm), iir,
opc, ".w\t$Rn, $Rm",
- [(opnode GPRnopc:$Rn, rGPR:$Rm)]> {
+ [(opnode GPRnopc:$Rn, rGPR:$Rm)]>, Sched<[WriteCMP]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -906,7 +949,8 @@ let isCompare = 1, Defs = [CPSR] in {
def rs : T2OneRegCmpShiftedReg<
(outs), (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm), iis,
opc, ".w\t$Rn, $ShiftedRm",
- [(opnode GPRnopc:$Rn, t2_so_reg:$ShiftedRm)]> {
+ [(opnode GPRnopc:$Rn, t2_so_reg:$ShiftedRm)]>,
+ Sched<[WriteCMPsi]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -941,6 +985,8 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc,
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm
+
+ let DecoderMethod = "DecodeT2LoadImm12";
}
def i8 : T2Ii8 <(outs target:$Rt), (ins t2addrmode_negimm8:$addr), iii,
opc, "\t$Rt, $addr",
@@ -961,6 +1007,8 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc,
let Inst{9} = addr{8}; // U
let Inst{8} = 0; // The W bit.
let Inst{7-0} = addr{7-0}; // imm
+
+ let DecoderMethod = "DecodeT2LoadImm8";
}
def s : T2Iso <(outs target:$Rt), (ins t2addrmode_so_reg:$addr), iis,
opc, ".w\t$Rt, $addr",
@@ -993,14 +1041,18 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc,
let Inst{31-27} = 0b11111;
let Inst{26-25} = 0b00;
let Inst{24} = signed;
- let Inst{23} = ?; // add = (U == '1')
let Inst{22-21} = opcod;
let Inst{20} = 1; // load
let Inst{19-16} = 0b1111; // Rn
+
bits<4> Rt;
- bits<12> addr;
let Inst{15-12} = Rt{3-0};
+
+ bits<13> addr;
+ let Inst{23} = addr{12}; // add = (U == '1')
let Inst{11-0} = addr{11-0};
+
+ let DecoderMethod = "DecodeT2LoadLabel";
}
}
@@ -1167,7 +1219,8 @@ class T2PCOneRegImm<dag oops, dag iops, InstrItinClass itin,
// assembler.
def t2ADR : T2PCOneRegImm<(outs rGPR:$Rd),
(ins t2adrlabel:$addr, pred:$p),
- IIC_iALUi, "adr{$p}.w\t$Rd, $addr", []> {
+ IIC_iALUi, "adr{$p}.w\t$Rd, $addr", []>,
+ Sched<[WriteALU, ReadALU]> {
let Inst{31-27} = 0b11110;
let Inst{25-24} = 0b10;
// Inst{23:21} = '11' (add = FALSE) or '00' (add = TRUE)
@@ -1190,12 +1243,12 @@ def t2ADR : T2PCOneRegImm<(outs rGPR:$Rd),
let neverHasSideEffects = 1, isReMaterializable = 1 in
def t2LEApcrel : t2PseudoInst<(outs rGPR:$Rd), (ins i32imm:$label, pred:$p),
- 4, IIC_iALUi, []>;
+ 4, IIC_iALUi, []>, Sched<[WriteALU, ReadALU]>;
let hasSideEffects = 1 in
def t2LEApcrelJT : t2PseudoInst<(outs rGPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
4, IIC_iALUi,
- []>;
+ []>, Sched<[WriteALU, ReadALU]>;
//===----------------------------------------------------------------------===//
@@ -1209,15 +1262,15 @@ defm t2LDR : T2I_ld<0, 0b10, "ldr", IIC_iLoad_i, IIC_iLoad_si, GPR,
// Loads with zero extension
defm t2LDRH : T2I_ld<0, 0b01, "ldrh", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- rGPR, UnOpFrag<(zextloadi16 node:$Src)>>;
+ GPR, UnOpFrag<(zextloadi16 node:$Src)>>;
defm t2LDRB : T2I_ld<0, 0b00, "ldrb", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- rGPR, UnOpFrag<(zextloadi8 node:$Src)>>;
+ GPR, UnOpFrag<(zextloadi8 node:$Src)>>;
// Loads with sign extension
defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- rGPR, UnOpFrag<(sextloadi16 node:$Src)>>;
+ GPR, UnOpFrag<(sextloadi16 node:$Src)>>;
defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- rGPR, UnOpFrag<(sextloadi8 node:$Src)>>;
+ GPR, UnOpFrag<(sextloadi8 node:$Src)>>;
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
@@ -1275,12 +1328,9 @@ def : T2Pat<(extloadi16 (ARMWrapper tconstpool:$addr)),
let mayLoad = 1, neverHasSideEffects = 1 in {
def t2LDR_PRE : T2Ipreldst<0, 0b10, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins t2addrmode_imm8:$addr),
+ (ins t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iLoad_iu,
- "ldr", "\t$Rt, $addr!", "$addr.base = $Rn_wb",
- []> {
- let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8";
-}
+ "ldr", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []>;
def t2LDR_POST : T2Ipostldst<0, 0b10, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$Rn, t2am_imm8_offset:$offset),
@@ -1288,48 +1338,42 @@ def t2LDR_POST : T2Ipostldst<0, 0b10, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
"ldr", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>;
def t2LDRB_PRE : T2Ipreldst<0, 0b00, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins t2addrmode_imm8:$addr),
+ (ins t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
- "ldrb", "\t$Rt, $addr!", "$addr.base = $Rn_wb",
- []> {
- let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8";
-}
+ "ldrb", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []>;
+
def t2LDRB_POST : T2Ipostldst<0, 0b00, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$Rn, t2am_imm8_offset:$offset),
AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
"ldrb", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>;
def t2LDRH_PRE : T2Ipreldst<0, 0b01, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins t2addrmode_imm8:$addr),
+ (ins t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
- "ldrh", "\t$Rt, $addr!", "$addr.base = $Rn_wb",
- []> {
- let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8";
-}
+ "ldrh", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []>;
+
def t2LDRH_POST : T2Ipostldst<0, 0b01, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$Rn, t2am_imm8_offset:$offset),
AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
"ldrh", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>;
def t2LDRSB_PRE : T2Ipreldst<1, 0b00, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins t2addrmode_imm8:$addr),
+ (ins t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
"ldrsb", "\t$Rt, $addr!", "$addr.base = $Rn_wb",
- []> {
- let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8";
-}
+ []>;
+
def t2LDRSB_POST : T2Ipostldst<1, 0b00, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$Rn, t2am_imm8_offset:$offset),
AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
"ldrsb", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>;
def t2LDRSH_PRE : T2Ipreldst<1, 0b01, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins t2addrmode_imm8:$addr),
+ (ins t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
"ldrsh", "\t$Rt, $addr!", "$addr.base = $Rn_wb",
- []> {
- let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8";
-}
+ []>;
+
def t2LDRSH_POST : T2Ipostldst<1, 0b01, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$Rn, t2am_imm8_offset:$offset),
AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
@@ -1354,6 +1398,8 @@ class T2IldT<bit signed, bits<2> type, string opc, InstrItinClass ii>
let Inst{11} = 1;
let Inst{10-8} = 0b110; // PUW.
let Inst{7-0} = addr{7-0};
+
+ let DecoderMethod = "DecodeT2LoadT";
}
def t2LDRT : T2IldT<0, 0b10, "ldrt", IIC_iLoad_i>;
@@ -1362,6 +1408,32 @@ def t2LDRHT : T2IldT<0, 0b01, "ldrht", IIC_iLoad_bh_i>;
def t2LDRSBT : T2IldT<1, 0b00, "ldrsbt", IIC_iLoad_bh_i>;
def t2LDRSHT : T2IldT<1, 0b01, "ldrsht", IIC_iLoad_bh_i>;
+class T2Ildacq<bits<4> bits23_20, bits<2> bit54, dag oops, dag iops,
+ string opc, string asm, list<dag> pattern>
+ : Thumb2I<oops, iops, AddrModeNone, 4, NoItinerary,
+ opc, asm, "", pattern>, Requires<[IsThumb, HasV8]> {
+ bits<4> Rt;
+ bits<4> addr;
+
+ let Inst{31-27} = 0b11101;
+ let Inst{26-24} = 0b000;
+ let Inst{23-20} = bits23_20;
+ let Inst{11-6} = 0b111110;
+ let Inst{5-4} = bit54;
+ let Inst{3-0} = 0b1111;
+
+ // Encode instruction operands
+ let Inst{19-16} = addr;
+ let Inst{15-12} = Rt;
+}
+
+def t2LDA : T2Ildacq<0b1101, 0b10, (outs rGPR:$Rt),
+ (ins addr_offset_none:$addr), "lda", "\t$Rt, $addr", []>;
+def t2LDAB : T2Ildacq<0b1101, 0b00, (outs rGPR:$Rt),
+ (ins addr_offset_none:$addr), "ldab", "\t$Rt, $addr", []>;
+def t2LDAH : T2Ildacq<0b1101, 0b01, (outs rGPR:$Rt),
+ (ins addr_offset_none:$addr), "ldah", "\t$Rt, $addr", []>;
+
// Store
defm t2STR :T2I_st<0b10,"str", IIC_iStore_i, IIC_iStore_si, GPR,
BinOpFrag<(store node:$LHS, node:$RHS)>>;
@@ -1380,27 +1452,22 @@ def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs),
let mayStore = 1, neverHasSideEffects = 1 in {
def t2STR_PRE : T2Ipreldst<0, 0b10, 0, 1, (outs GPRnopc:$Rn_wb),
- (ins GPRnopc:$Rt, t2addrmode_imm8:$addr),
+ (ins GPRnopc:$Rt, t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iStore_iu,
"str", "\t$Rt, $addr!",
- "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
- let AsmMatchConverter = "cvtStWriteBackRegT2AddrModeImm8";
-}
+ "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []>;
+
def t2STRH_PRE : T2Ipreldst<0, 0b01, 0, 1, (outs GPRnopc:$Rn_wb),
- (ins rGPR:$Rt, t2addrmode_imm8:$addr),
+ (ins rGPR:$Rt, t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iStore_iu,
"strh", "\t$Rt, $addr!",
- "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
- let AsmMatchConverter = "cvtStWriteBackRegT2AddrModeImm8";
-}
+ "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []>;
def t2STRB_PRE : T2Ipreldst<0, 0b00, 0, 1, (outs GPRnopc:$Rn_wb),
- (ins rGPR:$Rt, t2addrmode_imm8:$addr),
+ (ins rGPR:$Rt, t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iStore_bh_iu,
"strb", "\t$Rt, $addr!",
- "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
- let AsmMatchConverter = "cvtStWriteBackRegT2AddrModeImm8";
-}
+ "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []>;
} // mayStore = 1, neverHasSideEffects = 1
def t2STR_POST : T2Ipostldst<0, 0b10, 0, 0, (outs GPRnopc:$Rn_wb),
@@ -1487,9 +1554,8 @@ def t2STRHT : T2IstT<0b01, "strht", IIC_iStore_bh_i>;
// For disassembly only.
def t2LDRD_PRE : T2Ii8s4<1, 1, 1, (outs rGPR:$Rt, rGPR:$Rt2, GPR:$wb),
- (ins t2addrmode_imm8s4:$addr), IIC_iLoad_d_ru,
+ (ins t2addrmode_imm8s4_pre:$addr), IIC_iLoad_d_ru,
"ldrd", "\t$Rt, $Rt2, $addr!", "$addr.base = $wb", []> {
- let AsmMatchConverter = "cvtT2LdrdPre";
let DecoderMethod = "DecodeT2LDRDPreInstruction";
}
@@ -1499,10 +1565,9 @@ def t2LDRD_POST : T2Ii8s4post<0, 1, 1, (outs rGPR:$Rt, rGPR:$Rt2, GPR:$wb),
"$addr.base = $wb", []>;
def t2STRD_PRE : T2Ii8s4<1, 1, 0, (outs GPR:$wb),
- (ins rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4:$addr),
+ (ins rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4_pre:$addr),
IIC_iStore_d_ru, "strd", "\t$Rt, $Rt2, $addr!",
"$addr.base = $wb", []> {
- let AsmMatchConverter = "cvtT2StrdPre";
let DecoderMethod = "DecodeT2STRDPreInstruction";
}
@@ -1512,6 +1577,31 @@ def t2STRD_POST : T2Ii8s4post<0, 1, 0, (outs GPR:$wb),
IIC_iStore_d_ru, "strd", "\t$Rt, $Rt2, $addr$imm",
"$addr.base = $wb", []>;
+class T2Istrrel<bits<2> bit54, dag oops, dag iops,
+ string opc, string asm, list<dag> pattern>
+ : Thumb2I<oops, iops, AddrModeNone, 4, NoItinerary, opc,
+ asm, "", pattern>, Requires<[IsThumb, HasV8]> {
+ bits<4> Rt;
+ bits<4> addr;
+
+ let Inst{31-27} = 0b11101;
+ let Inst{26-20} = 0b0001100;
+ let Inst{11-6} = 0b111110;
+ let Inst{5-4} = bit54;
+ let Inst{3-0} = 0b1111;
+
+ // Encode instruction operands
+ let Inst{19-16} = addr;
+ let Inst{15-12} = Rt;
+}
+
+def t2STL : T2Istrrel<0b10, (outs), (ins rGPR:$Rt, addr_offset_none:$addr),
+ "stl", "\t$Rt, $addr", []>;
+def t2STLB : T2Istrrel<0b00, (outs), (ins rGPR:$Rt, addr_offset_none:$addr),
+ "stlb", "\t$Rt, $addr", []>;
+def t2STLH : T2Istrrel<0b01, (outs), (ins rGPR:$Rt, addr_offset_none:$addr),
+ "stlh", "\t$Rt, $addr", []>;
+
// T2Ipl (Preload Data/Instruction) signals the memory system of possible future
// data/instruction access.
// instr_write is inverted for Thumb mode: (prefetch 3) -> (preload 0),
@@ -1520,24 +1610,27 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> {
def i12 : T2Ii12<(outs), (ins t2addrmode_imm12:$addr), IIC_Preload, opc,
"\t$addr",
- [(ARMPreload t2addrmode_imm12:$addr, (i32 write), (i32 instr))]> {
+ [(ARMPreload t2addrmode_imm12:$addr, (i32 write), (i32 instr))]>,
+ Sched<[WritePreLd]> {
let Inst{31-25} = 0b1111100;
let Inst{24} = instr;
+ let Inst{23} = 1;
let Inst{22} = 0;
let Inst{21} = write;
let Inst{20} = 1;
let Inst{15-12} = 0b1111;
bits<17> addr;
- let addr{12} = 1; // add = TRUE
let Inst{19-16} = addr{16-13}; // Rn
- let Inst{23} = addr{12}; // U
let Inst{11-0} = addr{11-0}; // imm12
+
+ let DecoderMethod = "DecodeT2LoadImm12";
}
def i8 : T2Ii8<(outs), (ins t2addrmode_negimm8:$addr), IIC_Preload, opc,
"\t$addr",
- [(ARMPreload t2addrmode_negimm8:$addr, (i32 write), (i32 instr))]> {
+ [(ARMPreload t2addrmode_negimm8:$addr, (i32 write), (i32 instr))]>,
+ Sched<[WritePreLd]> {
let Inst{31-25} = 0b1111100;
let Inst{24} = instr;
let Inst{23} = 0; // U = 0
@@ -1550,11 +1643,14 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> {
bits<13> addr;
let Inst{19-16} = addr{12-9}; // Rn
let Inst{7-0} = addr{7-0}; // imm8
+
+ let DecoderMethod = "DecodeT2LoadImm8";
}
def s : T2Iso<(outs), (ins t2addrmode_so_reg:$addr), IIC_Preload, opc,
"\t$addr",
- [(ARMPreload t2addrmode_so_reg:$addr, (i32 write), (i32 instr))]> {
+ [(ARMPreload t2addrmode_so_reg:$addr, (i32 write), (i32 instr))]>,
+ Sched<[WritePreLd]> {
let Inst{31-25} = 0b1111100;
let Inst{24} = instr;
let Inst{23} = 0; // add = TRUE for T1
@@ -1562,7 +1658,7 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> {
let Inst{21} = write;
let Inst{20} = 1;
let Inst{15-12} = 0b1111;
- let Inst{11-6} = 0000000;
+ let Inst{11-6} = 0b000000;
bits<10> addr;
let Inst{19-16} = addr{9-6}; // Rn
@@ -1571,15 +1667,33 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> {
let DecoderMethod = "DecodeT2LoadShift";
}
- // FIXME: We should have a separate 'pci' variant here. As-is we represent
- // it via the i12 variant, which it's related to, but that means we can
- // represent negative immediates, which aren't legal for anything except
- // the 'pci' case (Rn == 15).
}
-defm t2PLD : T2Ipl<0, 0, "pld">, Requires<[IsThumb2]>;
-defm t2PLDW : T2Ipl<1, 0, "pldw">, Requires<[IsThumb2,HasV7,HasMP]>;
-defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>;
+defm t2PLD : T2Ipl<0, 0, "pld">, Requires<[IsThumb2]>;
+defm t2PLDW : T2Ipl<1, 0, "pldw">, Requires<[IsThumb2,HasV7,HasMP]>;
+defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>;
+
+// pci variant is very similar to i12, but supports negative offsets
+// from the PC. Only PLD and PLI have pci variants (not PLDW)
+class T2Iplpci<bits<1> inst, string opc> : T2Iso<(outs), (ins t2ldrlabel:$addr),
+ IIC_Preload, opc, "\t$addr",
+ [(ARMPreload (ARMWrapper tconstpool:$addr),
+ (i32 0), (i32 inst))]>, Sched<[WritePreLd]> {
+ let Inst{31-25} = 0b1111100;
+ let Inst{24} = inst;
+ let Inst{22-20} = 0b001;
+ let Inst{19-16} = 0b1111;
+ let Inst{15-12} = 0b1111;
+
+ bits<13> addr;
+ let Inst{23} = addr{12}; // add = (U == '1')
+ let Inst{11-0} = addr{11-0}; // imm12
+
+ let DecoderMethod = "DecodeT2LoadLabel";
+}
+
+def t2PLDpci : T2Iplpci<0, "pld">, Requires<[IsThumb2]>;
+def t2PLIpci : T2Iplpci<1, "pli">, Requires<[IsThumb2,HasV7]>;
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
@@ -1743,7 +1857,7 @@ defm t2STM : thumb2_st_mult<"stm", IIC_iStore_m, IIC_iStore_mu, 0>;
let neverHasSideEffects = 1 in
def t2MOVr : T2sTwoReg<(outs GPRnopc:$Rd), (ins GPR:$Rm), IIC_iMOVr,
- "mov", ".w\t$Rd, $Rm", []> {
+ "mov", ".w\t$Rd, $Rm", []>, Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -1763,7 +1877,7 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
AddedComplexity = 1 in
def t2MOVi : T2sOneRegImm<(outs rGPR:$Rd), (ins t2_so_imm:$imm), IIC_iMOVi,
"mov", ".w\t$Rd, $imm",
- [(set rGPR:$Rd, t2_so_imm:$imm)]> {
+ [(set rGPR:$Rd, t2_so_imm:$imm)]>, Sched<[WriteALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = 0b0010;
@@ -1786,7 +1900,7 @@ def : t2InstAlias<"mov${p} $Rd, $imm", (t2MOVi rGPR:$Rd, t2_so_imm:$imm,
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins imm0_65535_expr:$imm), IIC_iMOVi,
"movw", "\t$Rd, $imm",
- [(set rGPR:$Rd, imm0_65535:$imm)]> {
+ [(set rGPR:$Rd, imm0_65535:$imm)]>, Sched<[WriteALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-21} = 0b0010;
@@ -1804,6 +1918,9 @@ def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins imm0_65535_expr:$imm), IIC_iMOVi,
let DecoderMethod = "DecodeT2MOVTWInstruction";
}
+def : t2InstAlias<"mov${p} $Rd, $imm",
+ (t2MOVi16 rGPR:$Rd, imm256_65535_expr:$imm, pred:$p)>;
+
def t2MOVi16_ga_pcrel : PseudoInst<(outs rGPR:$Rd),
(ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
@@ -1812,7 +1929,8 @@ def t2MOVTi16 : T2I<(outs rGPR:$Rd),
(ins rGPR:$src, imm0_65535_expr:$imm), IIC_iMOVi,
"movt", "\t$Rd, $imm",
[(set rGPR:$Rd,
- (or (and rGPR:$src, 0xffff), lo16AllZero:$imm))]> {
+ (or (and rGPR:$src, 0xffff), lo16AllZero:$imm))]>,
+ Sched<[WriteALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-21} = 0b0110;
@@ -1831,7 +1949,8 @@ def t2MOVTi16 : T2I<(outs rGPR:$Rd),
}
def t2MOVTi16_ga_pcrel : PseudoInst<(outs rGPR:$Rd),
- (ins rGPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
+ (ins rGPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>,
+ Sched<[WriteALU]>;
} // Constraints
def : T2Pat<(or rGPR:$src, 0xffff0000), (t2MOVTi16 rGPR:$src, 0xffff)>;
@@ -2171,7 +2290,7 @@ def : T2Pat<(rotr rGPR:$lhs, (and rGPR:$rhs, lo5AllOne)),
let Uses = [CPSR] in {
def t2RRX : T2sTwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iMOVsi,
"rrx", "\t$Rd, $Rm",
- [(set rGPR:$Rd, (ARMrrx rGPR:$Rm))]> {
+ [(set rGPR:$Rd, (ARMrrx rGPR:$Rm))]>, Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -2185,7 +2304,8 @@ let isCodeGenOnly = 1, Defs = [CPSR] in {
def t2MOVsrl_flag : T2TwoRegShiftImm<
(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iMOVsi,
"lsrs", ".w\t$Rd, $Rm, #1",
- [(set rGPR:$Rd, (ARMsrl_flag rGPR:$Rm))]> {
+ [(set rGPR:$Rd, (ARMsrl_flag rGPR:$Rm))]>,
+ Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -2199,7 +2319,8 @@ def t2MOVsrl_flag : T2TwoRegShiftImm<
def t2MOVsra_flag : T2TwoRegShiftImm<
(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iMOVsi,
"asrs", ".w\t$Rd, $Rm, #1",
- [(set rGPR:$Rd, (ARMsra_flag rGPR:$Rm))]> {
+ [(set rGPR:$Rd, (ARMsra_flag rGPR:$Rm))]>,
+ Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -2320,7 +2441,7 @@ multiclass T2I_un_irs<bits<4> opcod, string opc,
// shifted imm
def i : T2sOneRegImm<(outs rGPR:$Rd), (ins t2_so_imm:$imm), iii,
opc, "\t$Rd, $imm",
- [(set rGPR:$Rd, (opnode t2_so_imm:$imm))]> {
+ [(set rGPR:$Rd, (opnode t2_so_imm:$imm))]>, Sched<[WriteALU]> {
let isAsCheapAsAMove = Cheap;
let isReMaterializable = ReMat;
let isMoveImm = MoveImm;
@@ -2333,7 +2454,7 @@ multiclass T2I_un_irs<bits<4> opcod, string opc,
// register
def r : T2sTwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), iir,
opc, ".w\t$Rd, $Rm",
- [(set rGPR:$Rd, (opnode rGPR:$Rm))]> {
+ [(set rGPR:$Rd, (opnode rGPR:$Rm))]>, Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -2345,7 +2466,8 @@ multiclass T2I_un_irs<bits<4> opcod, string opc,
// shifted register
def s : T2sOneRegShiftedReg<(outs rGPR:$Rd), (ins t2_so_reg:$ShiftedRm), iis,
opc, ".w\t$Rd, $ShiftedRm",
- [(set rGPR:$Rd, (opnode t2_so_reg:$ShiftedRm))]> {
+ [(set rGPR:$Rd, (opnode t2_so_reg:$ShiftedRm))]>,
+ Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -2804,22 +2926,27 @@ class T2I_misc<bits<2> op1, bits<2> op2, dag oops, dag iops,
}
def t2CLZ : T2I_misc<0b11, 0b00, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
- "clz", "\t$Rd, $Rm", [(set rGPR:$Rd, (ctlz rGPR:$Rm))]>;
+ "clz", "\t$Rd, $Rm", [(set rGPR:$Rd, (ctlz rGPR:$Rm))]>,
+ Sched<[WriteALU]>;
def t2RBIT : T2I_misc<0b01, 0b10, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
"rbit", "\t$Rd, $Rm",
- [(set rGPR:$Rd, (ARMrbit rGPR:$Rm))]>;
+ [(set rGPR:$Rd, (ARMrbit rGPR:$Rm))]>,
+ Sched<[WriteALU]>;
def t2REV : T2I_misc<0b01, 0b00, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
- "rev", ".w\t$Rd, $Rm", [(set rGPR:$Rd, (bswap rGPR:$Rm))]>;
+ "rev", ".w\t$Rd, $Rm", [(set rGPR:$Rd, (bswap rGPR:$Rm))]>,
+ Sched<[WriteALU]>;
def t2REV16 : T2I_misc<0b01, 0b01, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
"rev16", ".w\t$Rd, $Rm",
- [(set rGPR:$Rd, (rotr (bswap rGPR:$Rm), (i32 16)))]>;
+ [(set rGPR:$Rd, (rotr (bswap rGPR:$Rm), (i32 16)))]>,
+ Sched<[WriteALU]>;
def t2REVSH : T2I_misc<0b01, 0b11, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
"revsh", ".w\t$Rd, $Rm",
- [(set rGPR:$Rd, (sra (bswap rGPR:$Rm), (i32 16)))]>;
+ [(set rGPR:$Rd, (sra (bswap rGPR:$Rm), (i32 16)))]>,
+ Sched<[WriteALU]>;
def : T2Pat<(or (sra (shl rGPR:$Rm, (i32 24)), (i32 16)),
(and (srl rGPR:$Rm, (i32 8)), 0xFF)),
@@ -2831,7 +2958,8 @@ def t2PKHBT : T2ThreeReg<
[(set rGPR:$Rd, (or (and rGPR:$Rn, 0xFFFF),
(and (shl rGPR:$Rm, pkh_lsl_amt:$sh),
0xFFFF0000)))]>,
- Requires<[HasT2ExtractPack, IsThumb2]> {
+ Requires<[HasT2ExtractPack, IsThumb2]>,
+ Sched<[WriteALUsi, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-20} = 0b01100;
@@ -2859,7 +2987,8 @@ def t2PKHTB : T2ThreeReg<
[(set rGPR:$Rd, (or (and rGPR:$Rn, 0xFFFF0000),
(and (sra rGPR:$Rm, pkh_asr_amt:$sh),
0xFFFF)))]>,
- Requires<[HasT2ExtractPack, IsThumb2]> {
+ Requires<[HasT2ExtractPack, IsThumb2]>,
+ Sched<[WriteALUsi, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-20} = 0b01100;
@@ -2873,7 +3002,12 @@ def t2PKHTB : T2ThreeReg<
// Alternate cases for PKHTB where identities eliminate some nodes. Note that
// a shift amount of 0 is *not legal* here, it is PKHBT instead.
-def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), (srl rGPR:$src2, imm16_31:$sh)),
+// We also can not replace a srl (17..31) by an arithmetic shift we would use in
+// pkhtb src1, src2, asr (17..31).
+def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), (srl rGPR:$src2, imm16:$sh)),
+ (t2PKHTB rGPR:$src1, rGPR:$src2, imm16:$sh)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), (sra rGPR:$src2, imm16_31:$sh)),
(t2PKHTB rGPR:$src1, rGPR:$src2, imm16_31:$sh)>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000),
@@ -2882,6 +3016,34 @@ def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000),
Requires<[HasT2ExtractPack, IsThumb2]>;
//===----------------------------------------------------------------------===//
+// CRC32 Instructions
+//
+// Polynomials:
+// + CRC32{B,H,W} 0x04C11DB7
+// + CRC32C{B,H,W} 0x1EDC6F41
+//
+
+class T2I_crc32<bit C, bits<2> sz, string suffix, SDPatternOperator builtin>
+ : T2ThreeRegNoP<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), NoItinerary,
+ !strconcat("crc32", suffix, "\t$Rd, $Rn, $Rm"),
+ [(set rGPR:$Rd, (builtin rGPR:$Rn, rGPR:$Rm))]>,
+ Requires<[IsThumb2, HasV8, HasCRC]> {
+ let Inst{31-27} = 0b11111;
+ let Inst{26-21} = 0b010110;
+ let Inst{20} = C;
+ let Inst{15-12} = 0b1111;
+ let Inst{7-6} = 0b10;
+ let Inst{5-4} = sz;
+}
+
+def t2CRC32B : T2I_crc32<0, 0b00, "b", int_arm_crc32b>;
+def t2CRC32CB : T2I_crc32<1, 0b00, "cb", int_arm_crc32cb>;
+def t2CRC32H : T2I_crc32<0, 0b01, "h", int_arm_crc32h>;
+def t2CRC32CH : T2I_crc32<1, 0b01, "ch", int_arm_crc32ch>;
+def t2CRC32W : T2I_crc32<0, 0b10, "w", int_arm_crc32w>;
+def t2CRC32CW : T2I_crc32<1, 0b10, "cw", int_arm_crc32cw>;
+
+//===----------------------------------------------------------------------===//
// Comparison Instructions...
//
defm t2CMP : T2I_cmp_irs<0b1101, "cmp",
@@ -2900,7 +3062,8 @@ let isCompare = 1, Defs = [CPSR] in {
def t2CMNri : T2OneRegCmpImm<
(outs), (ins GPRnopc:$Rn, t2_so_imm:$imm), IIC_iCMPi,
"cmn", ".w\t$Rn, $imm",
- [(ARMcmn GPRnopc:$Rn, (ineg t2_so_imm:$imm))]> {
+ [(ARMcmn GPRnopc:$Rn, (ineg t2_so_imm:$imm))]>,
+ Sched<[WriteCMP, ReadALU]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = 0b1000;
@@ -2913,7 +3076,7 @@ let isCompare = 1, Defs = [CPSR] in {
(outs), (ins GPRnopc:$Rn, rGPR:$Rm), IIC_iCMPr,
"cmn", ".w\t$Rn, $Rm",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
- GPRnopc:$Rn, rGPR:$Rm)]> {
+ GPRnopc:$Rn, rGPR:$Rm)]>, Sched<[WriteCMP, ReadALU, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b1000;
@@ -2928,7 +3091,8 @@ let isCompare = 1, Defs = [CPSR] in {
(outs), (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm), IIC_iCMPsi,
"cmn", ".w\t$Rn, $ShiftedRm",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
- GPRnopc:$Rn, t2_so_reg:$ShiftedRm)]> {
+ GPRnopc:$Rn, t2_so_reg:$ShiftedRm)]>,
+ Sched<[WriteCMPsi, ReadALU, ReadALU]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b1000;
@@ -2959,92 +3123,67 @@ defm t2TEQ : T2I_cmp_irs<0b0100, "teq",
BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>>;
// Conditional moves
-// FIXME: should be able to write a pattern for ARMcmov, but can't use
-// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
let isCommutable = 1, isSelect = 1 in
def t2MOVCCr : t2PseudoInst<(outs rGPR:$Rd),
- (ins rGPR:$false, rGPR:$Rm, pred:$p),
+ (ins rGPR:$false, rGPR:$Rm, cmovpred:$p),
4, IIC_iCMOVr,
- [/*(set rGPR:$Rd, (ARMcmov rGPR:$false, rGPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd">;
+ [(set rGPR:$Rd, (ARMcmov rGPR:$false, rGPR:$Rm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
let isMoveImm = 1 in
-def t2MOVCCi : t2PseudoInst<(outs rGPR:$Rd),
- (ins rGPR:$false, t2_so_imm:$imm, pred:$p),
+def t2MOVCCi
+ : t2PseudoInst<(outs rGPR:$Rd),
+ (ins rGPR:$false, t2_so_imm:$imm, cmovpred:$p),
4, IIC_iCMOVi,
-[/*(set rGPR:$Rd,(ARMcmov rGPR:$false,t2_so_imm:$imm, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd">;
+ [(set rGPR:$Rd, (ARMcmov rGPR:$false,t2_so_imm:$imm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
-// FIXME: Pseudo-ize these. For now, just mark codegen only.
let isCodeGenOnly = 1 in {
let isMoveImm = 1 in
-def t2MOVCCi16 : T2I<(outs rGPR:$Rd), (ins rGPR:$false, imm0_65535_expr:$imm),
- IIC_iCMOVi,
- "movw", "\t$Rd, $imm", []>,
- RegConstraint<"$false = $Rd"> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 0; // The S bit.
- let Inst{15} = 0;
-
- bits<4> Rd;
- bits<16> imm;
-
- let Inst{11-8} = Rd;
- let Inst{19-16} = imm{15-12};
- let Inst{26} = imm{11};
- let Inst{14-12} = imm{10-8};
- let Inst{7-0} = imm{7-0};
-}
+def t2MOVCCi16
+ : t2PseudoInst<(outs rGPR:$Rd),
+ (ins rGPR:$false, imm0_65535_expr:$imm, cmovpred:$p),
+ 4, IIC_iCMOVi,
+ [(set rGPR:$Rd, (ARMcmov rGPR:$false, imm0_65535:$imm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
let isMoveImm = 1 in
-def t2MOVCCi32imm : PseudoInst<(outs rGPR:$dst),
- (ins rGPR:$false, i32imm:$src, pred:$p),
- IIC_iCMOVix2, []>, RegConstraint<"$false = $dst">;
+def t2MVNCCi
+ : t2PseudoInst<(outs rGPR:$Rd),
+ (ins rGPR:$false, t2_so_imm:$imm, cmovpred:$p),
+ 4, IIC_iCMOVi,
+ [(set rGPR:$Rd,
+ (ARMcmov rGPR:$false, t2_so_imm_not:$imm,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
+
+class MOVCCShPseudo<SDPatternOperator opnode, Operand ty>
+ : t2PseudoInst<(outs rGPR:$Rd),
+ (ins rGPR:$false, rGPR:$Rm, i32imm:$imm, cmovpred:$p),
+ 4, IIC_iCMOVsi,
+ [(set rGPR:$Rd, (ARMcmov rGPR:$false,
+ (opnode rGPR:$Rm, (i32 ty:$imm)),
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
+
+def t2MOVCClsl : MOVCCShPseudo<shl, imm0_31>;
+def t2MOVCClsr : MOVCCShPseudo<srl, imm_sr>;
+def t2MOVCCasr : MOVCCShPseudo<sra, imm_sr>;
+def t2MOVCCror : MOVCCShPseudo<rotr, imm0_31>;
let isMoveImm = 1 in
-def t2MVNCCi : T2OneRegImm<(outs rGPR:$Rd), (ins rGPR:$false, t2_so_imm:$imm),
- IIC_iCMOVi, "mvn", "\t$Rd, $imm",
-[/*(set rGPR:$Rd,(ARMcmov rGPR:$false,t2_so_imm_not:$imm,
- imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $Rd"> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = 0b0011;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
-}
-
-class T2I_movcc_sh<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : T2TwoRegShiftImm<oops, iops, itin, opc, asm, pattern> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{5-4} = opcod; // Shift type.
-}
-def t2MOVCClsl : T2I_movcc_sh<0b00, (outs rGPR:$Rd),
- (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
- IIC_iCMOVsi, "lsl", ".w\t$Rd, $Rm, $imm", []>,
- RegConstraint<"$false = $Rd">;
-def t2MOVCClsr : T2I_movcc_sh<0b01, (outs rGPR:$Rd),
- (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
- IIC_iCMOVsi, "lsr", ".w\t$Rd, $Rm, $imm", []>,
- RegConstraint<"$false = $Rd">;
-def t2MOVCCasr : T2I_movcc_sh<0b10, (outs rGPR:$Rd),
- (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
- IIC_iCMOVsi, "asr", ".w\t$Rd, $Rm, $imm", []>,
- RegConstraint<"$false = $Rd">;
-def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd),
- (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
- IIC_iCMOVsi, "ror", ".w\t$Rd, $Rm, $imm", []>,
- RegConstraint<"$false = $Rd">;
+def t2MOVCCi32imm
+ : t2PseudoInst<(outs rGPR:$dst),
+ (ins rGPR:$false, i32imm:$src, cmovpred:$p),
+ 8, IIC_iCMOVix2,
+ [(set rGPR:$dst, (ARMcmov rGPR:$false, imm:$src,
+ cmovpred:$p))]>,
+ RegConstraint<"$false = $dst">;
} // isCodeGenOnly = 1
} // neverHasSideEffects
@@ -3055,40 +3194,38 @@ def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd),
// memory barriers protect the atomic sequences
let hasSideEffects = 1 in {
-def t2DMB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary,
- "dmb", "\t$opt", [(ARMMemBarrier (i32 imm:$opt))]>,
- Requires<[IsThumb, HasDB]> {
+def t2DMB : T2I<(outs), (ins memb_opt:$opt), NoItinerary,
+ "dmb", "\t$opt", [(int_arm_dmb (i32 imm0_15:$opt))]>,
+ Requires<[HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf3bf8f5;
let Inst{3-0} = opt;
}
}
-def t2DSB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary,
- "dsb", "\t$opt", []>,
- Requires<[IsThumb, HasDB]> {
+def t2DSB : T2I<(outs), (ins memb_opt:$opt), NoItinerary,
+ "dsb", "\t$opt", [(int_arm_dsb (i32 imm0_15:$opt))]>,
+ Requires<[HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf3bf8f4;
let Inst{3-0} = opt;
}
-def t2ISB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary,
- "isb", "\t$opt",
- []>, Requires<[IsThumb, HasDB]> {
+def t2ISB : T2I<(outs), (ins instsyncb_opt:$opt), NoItinerary,
+ "isb", "\t$opt", []>, Requires<[HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf3bf8f6;
let Inst{3-0} = opt;
}
-class T2I_ldrex<bits<2> opcod, dag oops, dag iops, AddrMode am, int sz,
+class T2I_ldrex<bits<4> opcod, dag oops, dag iops, AddrMode am, int sz,
InstrItinClass itin, string opc, string asm, string cstr,
list<dag> pattern, bits<4> rt2 = 0b1111>
: Thumb2I<oops, iops, am, sz, itin, opc, asm, cstr, pattern> {
let Inst{31-27} = 0b11101;
let Inst{26-20} = 0b0001101;
let Inst{11-8} = rt2;
- let Inst{7-6} = 0b01;
- let Inst{5-4} = opcod;
+ let Inst{7-4} = opcod;
let Inst{3-0} = 0b1111;
bits<4> addr;
@@ -3096,15 +3233,14 @@ class T2I_ldrex<bits<2> opcod, dag oops, dag iops, AddrMode am, int sz,
let Inst{19-16} = addr;
let Inst{15-12} = Rt;
}
-class T2I_strex<bits<2> opcod, dag oops, dag iops, AddrMode am, int sz,
+class T2I_strex<bits<4> opcod, dag oops, dag iops, AddrMode am, int sz,
InstrItinClass itin, string opc, string asm, string cstr,
list<dag> pattern, bits<4> rt2 = 0b1111>
: Thumb2I<oops, iops, am, sz, itin, opc, asm, cstr, pattern> {
let Inst{31-27} = 0b11101;
let Inst{26-20} = 0b0001100;
let Inst{11-8} = rt2;
- let Inst{7-6} = 0b01;
- let Inst{5-4} = opcod;
+ let Inst{7-4} = opcod;
bits<4> Rd;
bits<4> addr;
@@ -3115,15 +3251,18 @@ class T2I_strex<bits<2> opcod, dag oops, dag iops, AddrMode am, int sz,
}
let mayLoad = 1 in {
-def t2LDREXB : T2I_ldrex<0b00, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
+def t2LDREXB : T2I_ldrex<0b0100, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
- "ldrexb", "\t$Rt, $addr", "", []>;
-def t2LDREXH : T2I_ldrex<0b01, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
+ "ldrexb", "\t$Rt, $addr", "",
+ [(set rGPR:$Rt, (ldrex_1 addr_offset_none:$addr))]>;
+def t2LDREXH : T2I_ldrex<0b0101, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
- "ldrexh", "\t$Rt, $addr", "", []>;
+ "ldrexh", "\t$Rt, $addr", "",
+ [(set rGPR:$Rt, (ldrex_2 addr_offset_none:$addr))]>;
def t2LDREX : Thumb2I<(outs rGPR:$Rt), (ins t2addrmode_imm0_1020s4:$addr),
AddrModeNone, 4, NoItinerary,
- "ldrex", "\t$Rt, $addr", "", []> {
+ "ldrex", "\t$Rt, $addr", "",
+ [(set rGPR:$Rt, (ldrex_4 t2addrmode_imm0_1020s4:$addr))]> {
bits<4> Rt;
bits<12> addr;
let Inst{31-27} = 0b11101;
@@ -3134,7 +3273,7 @@ def t2LDREX : Thumb2I<(outs rGPR:$Rt), (ins t2addrmode_imm0_1020s4:$addr),
let Inst{7-0} = addr{7-0};
}
let hasExtraDefRegAllocReq = 1 in
-def t2LDREXD : T2I_ldrex<0b11, (outs rGPR:$Rt, rGPR:$Rt2),
+def t2LDREXD : T2I_ldrex<0b0111, (outs rGPR:$Rt, rGPR:$Rt2),
(ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldrexd", "\t$Rt, $Rt2, $addr", "",
@@ -3142,22 +3281,60 @@ def t2LDREXD : T2I_ldrex<0b11, (outs rGPR:$Rt, rGPR:$Rt2),
bits<4> Rt2;
let Inst{11-8} = Rt2;
}
+def t2LDAEXB : T2I_ldrex<0b1100, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "ldaexb", "\t$Rt, $addr", "",
+ []>, Requires<[IsThumb, HasV8]>;
+def t2LDAEXH : T2I_ldrex<0b1101, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "ldaexh", "\t$Rt, $addr", "",
+ []>, Requires<[IsThumb, HasV8]>;
+def t2LDAEX : Thumb2I<(outs rGPR:$Rt), (ins addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "ldaex", "\t$Rt, $addr", "",
+ []>, Requires<[IsThumb, HasV8]> {
+ bits<4> Rt;
+ bits<4> addr;
+ let Inst{31-27} = 0b11101;
+ let Inst{26-20} = 0b0001101;
+ let Inst{19-16} = addr;
+ let Inst{15-12} = Rt;
+ let Inst{11-8} = 0b1111;
+ let Inst{7-0} = 0b11101111;
+}
+let hasExtraDefRegAllocReq = 1 in
+def t2LDAEXD : T2I_ldrex<0b1111, (outs rGPR:$Rt, rGPR:$Rt2),
+ (ins addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "ldaexd", "\t$Rt, $Rt2, $addr", "",
+ [], {?, ?, ?, ?}>, Requires<[IsThumb, HasV8]> {
+ bits<4> Rt2;
+ let Inst{11-8} = Rt2;
+
+ let Inst{7} = 1;
+}
}
let mayStore = 1, Constraints = "@earlyclobber $Rd" in {
-def t2STREXB : T2I_strex<0b00, (outs rGPR:$Rd),
+def t2STREXB : T2I_strex<0b0100, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
- "strexb", "\t$Rd, $Rt, $addr", "", []>;
-def t2STREXH : T2I_strex<0b01, (outs rGPR:$Rd),
+ "strexb", "\t$Rd, $Rt, $addr", "",
+ [(set rGPR:$Rd, (strex_1 rGPR:$Rt,
+ addr_offset_none:$addr))]>;
+def t2STREXH : T2I_strex<0b0101, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
- "strexh", "\t$Rd, $Rt, $addr", "", []>;
+ "strexh", "\t$Rd, $Rt, $addr", "",
+ [(set rGPR:$Rd, (strex_2 rGPR:$Rt,
+ addr_offset_none:$addr))]>;
+
def t2STREX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
t2addrmode_imm0_1020s4:$addr),
AddrModeNone, 4, NoItinerary,
"strex", "\t$Rd, $Rt, $addr", "",
- []> {
+ [(set rGPR:$Rd, (strex_4 rGPR:$Rt,
+ t2addrmode_imm0_1020s4:$addr))]> {
bits<4> Rd;
bits<4> Rt;
bits<12> addr;
@@ -3169,7 +3346,7 @@ def t2STREX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
let Inst{7-0} = addr{7-0};
}
let hasExtraSrcRegAllocReq = 1 in
-def t2STREXD : T2I_strex<0b11, (outs rGPR:$Rd),
+def t2STREXD : T2I_strex<0b0111, (outs rGPR:$Rd),
(ins rGPR:$Rt, rGPR:$Rt2, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"strexd", "\t$Rd, $Rt, $Rt2, $addr", "", [],
@@ -3177,9 +3354,45 @@ def t2STREXD : T2I_strex<0b11, (outs rGPR:$Rd),
bits<4> Rt2;
let Inst{11-8} = Rt2;
}
+def t2STLEXB : T2I_strex<0b1100, (outs rGPR:$Rd),
+ (ins rGPR:$Rt, addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "stlexb", "\t$Rd, $Rt, $addr", "",
+ []>, Requires<[IsThumb, HasV8]>;
+
+def t2STLEXH : T2I_strex<0b1101, (outs rGPR:$Rd),
+ (ins rGPR:$Rt, addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "stlexh", "\t$Rd, $Rt, $addr", "",
+ []>, Requires<[IsThumb, HasV8]>;
+
+def t2STLEX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
+ addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "stlex", "\t$Rd, $Rt, $addr", "",
+ []>, Requires<[IsThumb, HasV8]> {
+ bits<4> Rd;
+ bits<4> Rt;
+ bits<4> addr;
+ let Inst{31-27} = 0b11101;
+ let Inst{26-20} = 0b0001100;
+ let Inst{19-16} = addr;
+ let Inst{15-12} = Rt;
+ let Inst{11-4} = 0b11111110;
+ let Inst{3-0} = Rd;
+}
+let hasExtraSrcRegAllocReq = 1 in
+def t2STLEXD : T2I_strex<0b1111, (outs rGPR:$Rd),
+ (ins rGPR:$Rt, rGPR:$Rt2, addr_offset_none:$addr),
+ AddrModeNone, 4, NoItinerary,
+ "stlexd", "\t$Rd, $Rt, $Rt2, $addr", "", [],
+ {?, ?, ?, ?}>, Requires<[IsThumb, HasV8]> {
+ bits<4> Rt2;
+ let Inst{11-8} = Rt2;
+}
}
-def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", []>,
+def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", [(int_arm_clrex)]>,
Requires<[IsThumb2, HasV7]> {
let Inst{31-16} = 0xf3bf;
let Inst{15-14} = 0b10;
@@ -3190,6 +3403,15 @@ def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", []>,
let Inst{3-0} = 0b1111;
}
+def : T2Pat<(and (ldrex_1 addr_offset_none:$addr), 0xff),
+ (t2LDREXB addr_offset_none:$addr)>;
+def : T2Pat<(and (ldrex_2 addr_offset_none:$addr), 0xffff),
+ (t2LDREXH addr_offset_none:$addr)>;
+def : T2Pat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
+ (t2STREXB GPR:$Rt, addr_offset_none:$addr)>;
+def : T2Pat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
+ (t2STREXH GPR:$Rt, addr_offset_none:$addr)>;
+
//===----------------------------------------------------------------------===//
// SJLJ Exception handling intrinsics
// eh_sjlj_setjmp() is an instruction sequence to store the return
@@ -3243,35 +3465,39 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
let isPredicable = 1 in
def t2B : T2I<(outs), (ins uncondbrtarget:$target), IIC_Br,
"b", ".w\t$target",
- [(br bb:$target)]> {
+ [(br bb:$target)]>, Sched<[WriteBr]> {
let Inst{31-27} = 0b11110;
let Inst{15-14} = 0b10;
let Inst{12} = 1;
bits<24> target;
- let Inst{26} = target{19};
- let Inst{11} = target{18};
- let Inst{13} = target{17};
+ let Inst{26} = target{23};
+ let Inst{13} = target{22};
+ let Inst{11} = target{21};
let Inst{25-16} = target{20-11};
let Inst{10-0} = target{10-0};
let DecoderMethod = "DecodeT2BInstruction";
-}
+ let AsmMatchConverter = "cvtThumbBranches";
+}
let isNotDuplicable = 1, isIndirectBranch = 1 in {
def t2BR_JT : t2PseudoInst<(outs),
(ins GPR:$target, GPR:$index, i32imm:$jt, i32imm:$id),
0, IIC_Br,
- [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt, imm:$id)]>;
+ [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt, imm:$id)]>,
+ Sched<[WriteBr]>;
// FIXME: Add a non-pc based case that can be predicated.
def t2TBB_JT : t2PseudoInst<(outs),
- (ins GPR:$index, i32imm:$jt, i32imm:$id), 0, IIC_Br, []>;
+ (ins GPR:$index, i32imm:$jt, i32imm:$id), 0, IIC_Br, []>,
+ Sched<[WriteBr]>;
def t2TBH_JT : t2PseudoInst<(outs),
- (ins GPR:$index, i32imm:$jt, i32imm:$id), 0, IIC_Br, []>;
+ (ins GPR:$index, i32imm:$jt, i32imm:$id), 0, IIC_Br, []>,
+ Sched<[WriteBr]>;
def t2TBB : T2I<(outs), (ins addrmode_tbb:$addr), IIC_Br,
- "tbb", "\t$addr", []> {
+ "tbb", "\t$addr", []>, Sched<[WriteBrTbl]> {
bits<4> Rn;
bits<4> Rm;
let Inst{31-20} = 0b111010001101;
@@ -3284,7 +3510,7 @@ def t2TBB : T2I<(outs), (ins addrmode_tbb:$addr), IIC_Br,
}
def t2TBH : T2I<(outs), (ins addrmode_tbh:$addr), IIC_Br,
- "tbh", "\t$addr", []> {
+ "tbh", "\t$addr", []>, Sched<[WriteBrTbl]> {
bits<4> Rn;
bits<4> Rm;
let Inst{31-20} = 0b111010001101;
@@ -3304,7 +3530,7 @@ def t2TBH : T2I<(outs), (ins addrmode_tbh:$addr), IIC_Br,
let isBranch = 1, isTerminator = 1 in
def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br,
"b", ".w\t$target",
- [/*(ARMbrcond bb:$target, imm:$cc)*/]> {
+ [/*(ARMbrcond bb:$target, imm:$cc)*/]>, Sched<[WriteBr]> {
let Inst{31-27} = 0b11110;
let Inst{15-14} = 0b10;
let Inst{12} = 0;
@@ -3320,6 +3546,7 @@ def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br,
let Inst{10-0} = target{11-1};
let DecoderMethod = "DecodeThumb2BCCInstruction";
+ let AsmMatchConverter = "cvtThumbBranches";
}
// Tail calls. The IOS version of thumb tail calls uses a t2 branch, so
@@ -3331,14 +3558,15 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
(ins uncondbrtarget:$dst, pred:$p),
4, IIC_Br, [],
(t2B uncondbrtarget:$dst, pred:$p)>,
- Requires<[IsThumb2, IsIOS]>;
+ Requires<[IsThumb2, IsIOS]>, Sched<[WriteBr]>;
}
// IT block
let Defs = [ITSTATE] in
def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
AddrModeNone, 2, IIC_iALUx,
- "it$mask\t$cc", "", []> {
+ "it$mask\t$cc", "", []>,
+ ComplexDeprecationPredicate<"IT"> {
// 16-bit instruction.
let Inst{31-16} = 0x0000;
let Inst{15-8} = 0b10111111;
@@ -3353,7 +3581,8 @@ def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
// Branch and Exchange Jazelle -- for disassembly only
// Rm = Inst{19-16}
-def t2BXJ : T2I<(outs), (ins rGPR:$func), NoItinerary, "bxj", "\t$func", []> {
+def t2BXJ : T2I<(outs), (ins rGPR:$func), NoItinerary, "bxj", "\t$func", []>,
+ Sched<[WriteBr]> {
bits<4> func;
let Inst{31-27} = 0b11110;
let Inst{26} = 0;
@@ -3367,7 +3596,7 @@ let isBranch = 1, isTerminator = 1 in {
def tCBZ : T1I<(outs), (ins tGPR:$Rn, t_cbtarget:$target), IIC_Br,
"cbz\t$Rn, $target", []>,
T1Misc<{0,0,?,1,?,?,?}>,
- Requires<[IsThumb2]> {
+ Requires<[IsThumb2]>, Sched<[WriteBr]> {
// A8.6.27
bits<6> target;
bits<3> Rn;
@@ -3379,7 +3608,7 @@ let isBranch = 1, isTerminator = 1 in {
def tCBNZ : T1I<(outs), (ins tGPR:$Rn, t_cbtarget:$target), IIC_Br,
"cbnz\t$Rn, $target", []>,
T1Misc<{1,0,?,1,?,?,?}>,
- Requires<[IsThumb2]> {
+ Requires<[IsThumb2]>, Sched<[WriteBr]> {
// A8.6.27
bits<6> target;
bits<3> Rn;
@@ -3411,27 +3640,34 @@ class t2CPS<dag iops, string asm_op> : T2XI<(outs), iops, NoItinerary,
let M = 1 in
def t2CPS3p : t2CPS<(ins imod_op:$imod, iflags_op:$iflags, i32imm:$mode),
- "$imod.w\t$iflags, $mode">;
+ "$imod\t$iflags, $mode">;
let mode = 0, M = 0 in
def t2CPS2p : t2CPS<(ins imod_op:$imod, iflags_op:$iflags),
"$imod.w\t$iflags">;
let imod = 0, iflags = 0, M = 1 in
def t2CPS1p : t2CPS<(ins imm0_31:$mode), "\t$mode">;
+def : t2InstAlias<"cps$imod.w $iflags, $mode",
+ (t2CPS3p imod_op:$imod, iflags_op:$iflags, i32imm:$mode), 0>;
+def : t2InstAlias<"cps.w $mode", (t2CPS1p imm0_31:$mode), 0>;
+
// A6.3.4 Branches and miscellaneous control
// Table A6-14 Change Processor State, and hint instructions
-def t2HINT : T2I<(outs), (ins imm0_4:$imm), NoItinerary, "hint", "\t$imm",[]> {
- bits<3> imm;
+def t2HINT : T2I<(outs), (ins imm0_239:$imm), NoItinerary, "hint", ".w\t$imm",[]> {
+ bits<8> imm;
let Inst{31-3} = 0b11110011101011111000000000000;
- let Inst{2-0} = imm;
+ let Inst{7-0} = imm;
}
-def : t2InstAlias<"hint$p.w $imm", (t2HINT imm0_4:$imm, pred:$p)>;
+def : t2InstAlias<"hint$p $imm", (t2HINT imm0_239:$imm, pred:$p)>;
def : t2InstAlias<"nop$p.w", (t2HINT 0, pred:$p)>;
def : t2InstAlias<"yield$p.w", (t2HINT 1, pred:$p)>;
def : t2InstAlias<"wfe$p.w", (t2HINT 2, pred:$p)>;
def : t2InstAlias<"wfi$p.w", (t2HINT 3, pred:$p)>;
def : t2InstAlias<"sev$p.w", (t2HINT 4, pred:$p)>;
+def : t2InstAlias<"sevl$p.w", (t2HINT 5, pred:$p)> {
+ let Predicates = [IsThumb2, HasV8];
+}
def t2DBG : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "dbg", "\t$opt", []> {
bits<4> opt;
@@ -3454,6 +3690,20 @@ def t2SMC : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt",
let Inst{19-16} = opt;
}
+class T2DCPS<bits<2> opt, string opc>
+ : T2I<(outs), (ins), NoItinerary, opc, "", []>, Requires<[IsThumb2, HasV8]> {
+ let Inst{31-27} = 0b11110;
+ let Inst{26-20} = 0b1111000;
+ let Inst{19-16} = 0b1111;
+ let Inst{15-12} = 0b1000;
+ let Inst{11-2} = 0b0000000000;
+ let Inst{1-0} = opt;
+}
+
+def t2DCPS1 : T2DCPS<0b01, "dcps1">;
+def t2DCPS2 : T2DCPS<0b10, "dcps2">;
+def t2DCPS3 : T2DCPS<0b11, "dcps3">;
+
class T2SRS<bits<2> Op, bit W, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: T2I<oops, iops, itin, opc, asm, pattern> {
@@ -3508,6 +3758,19 @@ def t2RFEIA : T2RFE<0b111010011001,
(outs), (ins GPR:$Rn), NoItinerary, "rfeia", "\t$Rn",
[/* For disassembly only; pattern left blank */]>;
+// B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction.
+// Exception return instruction is "subs pc, lr, #imm".
+let isReturn = 1, isBarrier = 1, isTerminator = 1, Defs = [PC] in
+def t2SUBS_PC_LR : T2I <(outs), (ins imm0_255:$imm), NoItinerary,
+ "subs", "\tpc, lr, $imm",
+ [(ARMintretflag imm0_255:$imm)]>,
+ Requires<[IsThumb2]> {
+ let Inst{31-8} = 0b111100111101111010001111;
+
+ bits<8> imm;
+ let Inst{7-0} = imm;
+}
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
@@ -3591,7 +3854,7 @@ multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm> {
let DecoderMethod = "DecodeCopMemInstruction";
}
def _PRE : T2CI<op31_28,
- (outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr),
+ (outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr),
asm, "\t$cop, $CRd, $addr!"> {
bits<13> addr;
bits<4> cop;
@@ -3651,10 +3914,10 @@ defm t2LDC : t2LdStCop<0b1110, 1, 0, "ldc">;
defm t2LDCL : t2LdStCop<0b1110, 1, 1, "ldcl">;
defm t2STC : t2LdStCop<0b1110, 0, 0, "stc">;
defm t2STCL : t2LdStCop<0b1110, 0, 1, "stcl">;
-defm t2LDC2 : t2LdStCop<0b1111, 1, 0, "ldc2">;
-defm t2LDC2L : t2LdStCop<0b1111, 1, 1, "ldc2l">;
-defm t2STC2 : t2LdStCop<0b1111, 0, 0, "stc2">;
-defm t2STC2L : t2LdStCop<0b1111, 0, 1, "stc2l">;
+defm t2LDC2 : t2LdStCop<0b1111, 1, 0, "ldc2">, Requires<[PreV8]>;
+defm t2LDC2L : t2LdStCop<0b1111, 1, 1, "ldc2l">, Requires<[PreV8]>;
+defm t2STC2 : t2LdStCop<0b1111, 0, 0, "stc2">, Requires<[PreV8]>;
+defm t2STC2L : t2LdStCop<0b1111, 0, 1, "stc2l">, Requires<[PreV8]>;
//===----------------------------------------------------------------------===//
@@ -3666,7 +3929,7 @@ defm t2STC2L : t2LdStCop<0b1111, 0, 1, "stc2l">;
//
// A/R class can only move from CPSR or SPSR.
def t2MRS_AR : T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, apsr",
- []>, Requires<[IsThumb2,IsARClass]> {
+ []>, Requires<[IsThumb2,IsNotMClass]> {
bits<4> Rd;
let Inst{31-12} = 0b11110011111011111000;
let Inst{11-8} = Rd;
@@ -3676,7 +3939,7 @@ def t2MRS_AR : T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, apsr",
def : t2InstAlias<"mrs${p} $Rd, cpsr", (t2MRS_AR GPR:$Rd, pred:$p)>;
def t2MRSsys_AR: T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, spsr",
- []>, Requires<[IsThumb2,IsARClass]> {
+ []>, Requires<[IsThumb2,IsNotMClass]> {
bits<4> Rd;
let Inst{31-12} = 0b11110011111111111000;
let Inst{11-8} = Rd;
@@ -3709,7 +3972,7 @@ def t2MRS_M : T2I<(outs rGPR:$Rd), (ins msr_mask:$mask), NoItinerary,
// the mask with the fields to be accessed in the special register.
def t2MSR_AR : T2I<(outs), (ins msr_mask:$mask, rGPR:$Rn),
NoItinerary, "msr", "\t$mask, $Rn", []>,
- Requires<[IsThumb2,IsARClass]> {
+ Requires<[IsThumb2,IsNotMClass]> {
bits<5> mask;
bits<4> Rn;
let Inst{31-21} = 0b11110011100;
@@ -3742,8 +4005,7 @@ def t2MSR_M : T2I<(outs), (ins msr_mask:$SYSm, rGPR:$Rn),
class t2MovRCopro<bits<4> Op, string opc, bit direction, dag oops, dag iops,
list<dag> pattern>
- : T2Cop<Op, oops, iops,
- !strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"),
+ : T2Cop<Op, oops, iops, opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2",
pattern> {
let Inst{27-24} = 0b1110;
let Inst{20} = direction;
@@ -3768,7 +4030,7 @@ class t2MovRRCopro<bits<4> Op, string opc, bit direction,
list<dag> pattern = []>
: T2Cop<Op, (outs),
(ins p_imm:$cop, imm0_15:$opc1, GPR:$Rt, GPR:$Rt2, c_imm:$CRm),
- !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern> {
+ opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm", pattern> {
let Inst{27-24} = 0b1100;
let Inst{23-21} = 0b010;
let Inst{20} = direction;
@@ -3792,33 +4054,38 @@ def t2MCR : t2MovRCopro<0b1110, "mcr", 0,
(ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
- imm:$CRm, imm:$opc2)]>;
-def : t2InstAlias<"mcr $cop, $opc1, $Rt, $CRn, $CRm",
+ imm:$CRm, imm:$opc2)]>,
+ ComplexDeprecationPredicate<"MCR">;
+def : t2InstAlias<"mcr${p} $cop, $opc1, $Rt, $CRn, $CRm",
(t2MCR p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
- c_imm:$CRm, 0)>;
+ c_imm:$CRm, 0, pred:$p)>;
def t2MCR2 : t2MovRCopro<0b1111, "mcr2", 0,
(outs), (ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
- imm:$CRm, imm:$opc2)]>;
-def : t2InstAlias<"mcr2 $cop, $opc1, $Rt, $CRn, $CRm",
+ imm:$CRm, imm:$opc2)]> {
+ let Predicates = [IsThumb2, PreV8];
+}
+def : t2InstAlias<"mcr2${p} $cop, $opc1, $Rt, $CRn, $CRm",
(t2MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
- c_imm:$CRm, 0)>;
+ c_imm:$CRm, 0, pred:$p)>;
/* from coprocessor to ARM core register */
def t2MRC : t2MovRCopro<0b1110, "mrc", 1,
- (outs GPR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ (outs GPRwithAPSR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2), []>;
-def : t2InstAlias<"mrc $cop, $opc1, $Rt, $CRn, $CRm",
- (t2MRC GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
- c_imm:$CRm, 0)>;
+def : t2InstAlias<"mrc${p} $cop, $opc1, $Rt, $CRn, $CRm",
+ (t2MRC GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ c_imm:$CRm, 0, pred:$p)>;
def t2MRC2 : t2MovRCopro<0b1111, "mrc2", 1,
- (outs GPR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
- c_imm:$CRm, imm0_7:$opc2), []>;
-def : t2InstAlias<"mrc2 $cop, $opc1, $Rt, $CRn, $CRm",
- (t2MRC2 GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
- c_imm:$CRm, 0)>;
+ (outs GPRwithAPSR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ c_imm:$CRm, imm0_7:$opc2), []> {
+ let Predicates = [IsThumb2, PreV8];
+}
+def : t2InstAlias<"mrc2${p} $cop, $opc1, $Rt, $CRn, $CRm",
+ (t2MRC2 GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ c_imm:$CRm, 0, pred:$p)>;
def : T2v6Pat<(int_arm_mrc imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2),
(t2MRC imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>;
@@ -3833,19 +4100,24 @@ def t2MCRR : t2MovRRCopro<0b1110, "mcrr", 0,
imm:$CRm)]>;
def t2MCRR2 : t2MovRRCopro<0b1111, "mcrr2", 0,
[(int_arm_mcrr2 imm:$cop, imm:$opc1, GPR:$Rt,
- GPR:$Rt2, imm:$CRm)]>;
+ GPR:$Rt2, imm:$CRm)]> {
+ let Predicates = [IsThumb2, PreV8];
+}
+
/* from coprocessor to ARM core register */
def t2MRRC : t2MovRRCopro<0b1110, "mrrc", 1>;
-def t2MRRC2 : t2MovRRCopro<0b1111, "mrrc2", 1>;
+def t2MRRC2 : t2MovRRCopro<0b1111, "mrrc2", 1> {
+ let Predicates = [IsThumb2, PreV8];
+}
//===----------------------------------------------------------------------===//
// Other Coprocessor Instructions.
//
-def tCDP : T2Cop<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
+def t2CDP : T2Cop<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
- "cdp\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
+ "cdp", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
imm:$CRm, imm:$opc2)]> {
let Inst{27-24} = 0b1110;
@@ -3864,11 +4136,13 @@ def tCDP : T2Cop<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
let Inst{15-12} = CRd;
let Inst{19-16} = CRn;
let Inst{23-20} = opc1;
+
+ let Predicates = [IsThumb2, PreV8];
}
def t2CDP2 : T2Cop<0b1111, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
- "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
+ "cdp2", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
imm:$CRm, imm:$opc2)]> {
let Inst{27-24} = 0b1110;
@@ -3887,6 +4161,8 @@ def t2CDP2 : T2Cop<0b1111, (outs), (ins p_imm:$cop, imm0_15:$opc1,
let Inst{15-12} = CRd;
let Inst{19-16} = CRn;
let Inst{23-20} = opc1;
+
+ let Predicates = [IsThumb2, PreV8];
}
@@ -3960,6 +4236,15 @@ def : T2Pat<(atomic_store_32 t2addrmode_negimm8:$addr, GPR:$val),
def : T2Pat<(atomic_store_32 t2addrmode_so_reg:$addr, GPR:$val),
(t2STRs GPR:$val, t2addrmode_so_reg:$addr)>;
+let AddedComplexity = 8 in {
+ def : T2Pat<(atomic_load_acquire_8 addr_offset_none:$addr), (t2LDAB addr_offset_none:$addr)>;
+ def : T2Pat<(atomic_load_acquire_16 addr_offset_none:$addr), (t2LDAH addr_offset_none:$addr)>;
+ def : T2Pat<(atomic_load_acquire_32 addr_offset_none:$addr), (t2LDA addr_offset_none:$addr)>;
+ def : T2Pat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val), (t2STLB GPR:$val, addr_offset_none:$addr)>;
+ def : T2Pat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (t2STLH GPR:$val, addr_offset_none:$addr)>;
+ def : T2Pat<(atomic_store_release_32 addr_offset_none:$addr, GPR:$val), (t2STL GPR:$val, addr_offset_none:$addr)>;
+}
+
//===----------------------------------------------------------------------===//
// Assembler aliases
@@ -3981,7 +4266,8 @@ def : t2InstAlias<"sbc${s}${p} $Rd, $Rn, $ShiftedRm",
// Aliases for ADD without the ".w" optional width specifier.
def : t2InstAlias<"add${s}${p} $Rd, $Rn, $imm",
- (t2ADDri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>;
+ (t2ADDri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm:$imm, pred:$p,
+ cc_out:$s)>;
def : t2InstAlias<"add${p} $Rd, $Rn, $imm",
(t2ADDri12 GPRnopc:$Rd, GPR:$Rn, imm0_4095:$imm, pred:$p)>;
def : t2InstAlias<"add${s}${p} $Rd, $Rn, $Rm",
@@ -4056,9 +4342,9 @@ def : t2InstAlias<"tst${p} $Rn, $Rm",
(t2TSTrr GPRnopc:$Rn, rGPR:$Rm, pred:$p)>;
// Memory barriers
-def : InstAlias<"dmb", (t2DMB 0xf)>, Requires<[IsThumb, HasDB]>;
-def : InstAlias<"dsb", (t2DSB 0xf)>, Requires<[IsThumb, HasDB]>;
-def : InstAlias<"isb", (t2ISB 0xf)>, Requires<[IsThumb, HasDB]>;
+def : InstAlias<"dmb${p}", (t2DMB 0xf, pred:$p)>, Requires<[HasDB]>;
+def : InstAlias<"dsb${p}", (t2DSB 0xf, pred:$p)>, Requires<[HasDB]>;
+def : InstAlias<"isb${p}", (t2ISB 0xf, pred:$p)>, Requires<[HasDB]>;
// Alias for LDR, LDRB, LDRH, LDRSB, and LDRSH without the ".w" optional
// width specifier.
@@ -4085,7 +4371,7 @@ def : t2InstAlias<"ldrsh${p} $Rt, $addr",
(t2LDRSHs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
def : t2InstAlias<"ldr${p} $Rt, $addr",
- (t2LDRpci GPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
+ (t2LDRpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
def : t2InstAlias<"ldrb${p} $Rt, $addr",
(t2LDRBpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
def : t2InstAlias<"ldrh${p} $Rt, $addr",
@@ -4247,16 +4533,16 @@ def : t2InstAlias<"mvn${p} $Rd, $imm",
(t2MOVi rGPR:$Rd, t2_so_imm_not:$imm, pred:$p, zero_reg)>;
// Same for AND <--> BIC
def : t2InstAlias<"bic${s}${p} $Rd, $Rn, $imm",
- (t2ANDri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ (t2ANDri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : t2InstAlias<"bic${s}${p} $Rdn, $imm",
- (t2ANDri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ (t2ANDri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : t2InstAlias<"and${s}${p} $Rd, $Rn, $imm",
- (t2BICri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ (t2BICri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : t2InstAlias<"and${s}${p} $Rdn, $imm",
- (t2BICri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ (t2BICri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
pred:$p, cc_out:$s)>;
// Likewise, "add Rd, t2_so_imm_neg" -> sub
def : t2InstAlias<"add${s}${p} $Rd, $Rn, $imm",
@@ -4298,7 +4584,7 @@ def : t2InstAlias<"adr${p} $Rd, $addr",
// LDR(literal) w/ alternate [pc, #imm] syntax.
def t2LDRpcrel : t2AsmPseudo<"ldr${p} $Rt, $addr",
- (ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+ (ins GPR:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
def t2LDRBpcrel : t2AsmPseudo<"ldrb${p} $Rt, $addr",
(ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
def t2LDRHpcrel : t2AsmPseudo<"ldrh${p} $Rt, $addr",
@@ -4309,7 +4595,7 @@ def t2LDRSHpcrel : t2AsmPseudo<"ldrsh${p} $Rt, $addr",
(ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
// Version w/ the .w suffix.
def : t2InstAlias<"ldr${p}.w $Rt, $addr",
- (t2LDRpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+ (t2LDRpcrel GPR:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p), 0>;
def : t2InstAlias<"ldrb${p}.w $Rt, $addr",
(t2LDRBpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
def : t2InstAlias<"ldrh${p}.w $Rt, $addr",
@@ -4321,3 +4607,10 @@ def : t2InstAlias<"ldrsh${p}.w $Rt, $addr",
def : t2InstAlias<"add${p} $Rd, pc, $imm",
(t2ADR rGPR:$Rd, imm0_4095:$imm, pred:$p)>;
+
+// PLD/PLDW/PLI with alternate literal form.
+def : t2InstAlias<"pld${p} $addr",
+ (t2PLDpci t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def : InstAlias<"pli${p} $addr",
+ (t2PLIpci t2ldr_pcrel_imm12:$addr, pred:$p)>,
+ Requires<[IsThumb2,HasV7]>;
diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td
index b5a896c69985..a8cdc5ca0637 100644
--- a/lib/Target/ARM/ARMInstrVFP.td
+++ b/lib/Target/ARM/ARMInstrVFP.td
@@ -224,7 +224,36 @@ defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
(VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
-// FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
+// FLDMX, FSTMX - Load and store multiple unknown precision registers for
+// pre-armv6 cores.
+// These instruction are deprecated so we don't want them to get selected.
+multiclass vfp_ldstx_mult<string asm, bit L_bit> {
+ // Unknown precision
+ def XIA :
+ AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
+ IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ }
+ def XIA_UPD :
+ AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
+ IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ }
+ def XDB_UPD :
+ AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
+ IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{21} = 1;
+ let Inst{20} = L_bit;
+ }
+}
+
+defm FLDM : vfp_ldstx_mult<"fldm", 1>;
+defm FSTM : vfp_ldstx_mult<"fstm", 0>;
//===----------------------------------------------------------------------===//
// FP Binary Operations.
@@ -304,9 +333,52 @@ def VNMULS : ASbI<0b11100, 0b10, 1, 0,
let D = VFPNeonA8Domain;
}
+multiclass vsel_inst<string op, bits<2> opc, int CC> {
+ let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
+ Uses = [CPSR], AddedComplexity = 4 in {
+ def S : ASbInp<0b11100, opc, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
+ [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>,
+ Requires<[HasFPARMv8]>;
+
+ def D : ADbInp<0b11100, opc, 0,
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"),
+ [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>,
+ Requires<[HasFPARMv8, HasDPVFP]>;
+ }
+}
+
+// The CC constants here match ARMCC::CondCodes.
+defm VSELGT : vsel_inst<"gt", 0b11, 12>;
+defm VSELGE : vsel_inst<"ge", 0b10, 10>;
+defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
+defm VSELVS : vsel_inst<"vs", 0b01, 6>;
+
+multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
+ let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in {
+ def S : ASbInp<0b11101, 0b00, opc,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
+ [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>,
+ Requires<[HasFPARMv8]>;
+
+ def D : ADbInp<0b11101, 0b00, opc,
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"),
+ [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>,
+ Requires<[HasFPARMv8, HasDPVFP]>;
+ }
+}
+
+defm VMAXNM : vmaxmin_inst<"vmaxnm", 0, ARMvmaxnm>;
+defm VMINNM : vmaxmin_inst<"vminnm", 1, ARMvminnm>;
+
// Match reassociated forms only if not sign dependent rounding.
def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
- (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
+ (VNMULD DPR:$a, DPR:$b)>,
+ Requires<[NoHonorSignDependentRounding,HasDPVFP]>;
def : Pat<(fmul (fneg SPR:$a), SPR:$b),
(VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
@@ -437,9 +509,11 @@ def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
let Inst{11-8} = 0b1011;
let Inst{7-6} = 0b11;
let Inst{4} = 0;
+
+ let Predicates = [HasVFP2, HasDPVFP];
}
-// Between half-precision and single-precision. For disassembly only.
+// Between half, single and double-precision. For disassembly only.
// FIXME: Verify encoding after integrated assembler is working.
def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
@@ -464,6 +538,111 @@ def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
+def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
+ (outs DPR:$Dd), (ins SPR:$Sm),
+ NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm",
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ // Instruction operands.
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+}
+
+def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm",
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Dm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+}
+
+def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
+ (outs DPR:$Dd), (ins SPR:$Sm),
+ NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm",
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ // Instruction operands.
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+}
+
+def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm",
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Dm;
+
+ // Encode instruction operands.
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+}
+
+multiclass vcvt_inst<string opc, bits<2> rm> {
+ let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
+ def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
+ []>, Requires<[HasFPARMv8]> {
+ let Inst{17-16} = rm;
+ }
+
+ def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"),
+ []>, Requires<[HasFPARMv8]> {
+ let Inst{17-16} = rm;
+ }
+
+ def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"),
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ bits<5> Dm;
+
+ let Inst{17-16} = rm;
+
+ // Encode instruction operands
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{8} = 1;
+ }
+
+ def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"),
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ bits<5> Dm;
+
+ let Inst{17-16} = rm;
+
+ // Encode instruction operands
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{8} = 1;
+ }
+ }
+}
+
+defm VCVTA : vcvt_inst<"a", 0b00>;
+defm VCVTN : vcvt_inst<"n", 0b01>;
+defm VCVTP : vcvt_inst<"p", 0b10>;
+defm VCVTM : vcvt_inst<"m", 0b11>;
+
def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
(outs DPR:$Dd), (ins DPR:$Dm),
IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
@@ -478,6 +657,63 @@ def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
let D = VFPNeonA8Domain;
}
+multiclass vrint_inst_zrx<string opc, bit op, bit op2> {
+ def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm",
+ []>, Requires<[HasFPARMv8]> {
+ let Inst{7} = op2;
+ let Inst{16} = op;
+ }
+ def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0,
+ (outs DPR:$Dd), (ins DPR:$Dm),
+ NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm",
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ let Inst{7} = op2;
+ let Inst{16} = op;
+ }
+
+ def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
+ (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p)>,
+ Requires<[HasFPARMv8]>;
+ def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
+ (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p)>,
+ Requires<[HasFPARMv8,HasDPVFP]>;
+}
+
+defm VRINTZ : vrint_inst_zrx<"z", 0, 1>;
+defm VRINTR : vrint_inst_zrx<"r", 0, 0>;
+defm VRINTX : vrint_inst_zrx<"x", 1, 0>;
+
+multiclass vrint_inst_anpm<string opc, bits<2> rm> {
+ let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
+ def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
+ []>, Requires<[HasFPARMv8]> {
+ let Inst{17-16} = rm;
+ }
+ def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0,
+ (outs DPR:$Dd), (ins DPR:$Dm),
+ NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"),
+ []>, Requires<[HasFPARMv8, HasDPVFP]> {
+ let Inst{17-16} = rm;
+ }
+ }
+
+ def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
+ (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm)>,
+ Requires<[HasFPARMv8]>;
+ def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
+ (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm)>,
+ Requires<[HasFPARMv8,HasDPVFP]>;
+}
+
+defm VRINTA : vrint_inst_anpm<"a", 0b00>;
+defm VRINTN : vrint_inst_anpm<"n", 0b01>;
+defm VRINTP : vrint_inst_anpm<"p", 0b10>;
+defm VRINTM : vrint_inst_anpm<"m", 0b11>;
+
def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
(outs DPR:$Dd), (ins DPR:$Dm),
IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
@@ -667,6 +903,8 @@ class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
let Inst{5} = Sm{0};
let Inst{15-12} = Dd{3-0};
let Inst{22} = Dd{4};
+
+ let Predicates = [HasVFP2, HasDPVFP];
}
class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
@@ -738,6 +976,8 @@ class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
let Inst{5} = Dm{4};
let Inst{15-12} = Sd{4-1};
let Inst{22} = Sd{0};
+
+ let Predicates = [HasVFP2, HasDPVFP];
}
class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
@@ -841,7 +1081,8 @@ let Constraints = "$a = $dst" in {
class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
bit op5, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
- : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
+ : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern>,
+ Sched<[WriteCvtFP]> {
bits<5> dst;
// if dp_operation then UInt(D:Vd) else UInt(Vd:D);
let Inst{22} = dst{0};
@@ -852,11 +1093,14 @@ class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
bit op5, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
- : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
+ : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern>,
+ Sched<[WriteCvtFP]> {
bits<5> dst;
// if dp_operation then UInt(D:Vd) else UInt(Vd:D);
let Inst{22} = dst{4};
let Inst{15-12} = dst{3-0};
+
+ let Predicates = [HasVFP2, HasDPVFP];
}
def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
@@ -969,7 +1213,7 @@ def VMLAD : ADbI<0b11100, 0b00, 0, 0,
[(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -985,7 +1229,7 @@ def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
(VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx,DontUseFusedMAC]>;
@@ -996,7 +1240,7 @@ def VMLSD : ADbI<0b11100, 0b00, 1, 0,
[(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -1012,7 +1256,7 @@ def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
(VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
@@ -1023,7 +1267,7 @@ def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
[(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -1039,7 +1283,7 @@ def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
(VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
(VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
@@ -1050,7 +1294,7 @@ def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
[(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -1065,7 +1309,7 @@ def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
(VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
+ Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
(VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
@@ -1079,7 +1323,7 @@ def VFMAD : ADbI<0b11101, 0b10, 0, 0,
[(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -1094,7 +1338,7 @@ def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
(VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
@@ -1103,7 +1347,7 @@ def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
// (fma x, y, z) -> (vfms z, x, y)
def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
(VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
(VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1114,7 +1358,7 @@ def VFMSD : ADbI<0b11101, 0b10, 1, 0,
[(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -1129,7 +1373,7 @@ def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
(VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
@@ -1138,14 +1382,14 @@ def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
// (fma (fneg x), y, z) -> (vfms z, x, y)
def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
(VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
(VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
// (fma x, (fneg y), z) -> (vfms z, x, y)
def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)),
(VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)),
(VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1156,7 +1400,7 @@ def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
[(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -1171,7 +1415,7 @@ def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
(VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
(VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
@@ -1180,14 +1424,14 @@ def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
// (fneg (fma x, y, z)) -> (vfnma z, x, y)
def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
(VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
(VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
(VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
(VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1198,7 +1442,7 @@ def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
[(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -1212,7 +1456,7 @@ def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
(VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP4,UseFusedMAC]>;
+ Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
(VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
@@ -1222,21 +1466,21 @@ def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
// (fma x, y, (fneg z)) -> (vfnms z, x, y))
def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
(VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
(VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
(VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
(VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
// (fneg (fma x, (fneg y), z) -> (vfnms z, x, y)
def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))),
(VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
- Requires<[HasVFP4]>;
+ Requires<[HasVFP4,HasDPVFP]>;
def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
(VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1246,15 +1490,17 @@ def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
//
let neverHasSideEffects = 1 in {
-def VMOVDcc : ARMPseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, pred:$p),
- 4, IIC_fpUNA64,
- [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
- RegConstraint<"$Dn = $Dd">;
-
-def VMOVScc : ARMPseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, pred:$p),
- 4, IIC_fpUNA32,
- [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
- RegConstraint<"$Sn = $Sd">;
+def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p),
+ IIC_fpUNA64,
+ [(set (f64 DPR:$Dd),
+ (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>,
+ RegConstraint<"$Dn = $Dd">, Requires<[HasVFP2,HasDPVFP]>;
+
+def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p),
+ IIC_fpUNA32,
+ [(set (f32 SPR:$Sd),
+ (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>,
+ RegConstraint<"$Sn = $Sd">, Requires<[HasVFP2]>;
} // neverHasSideEffects
//===----------------------------------------------------------------------===//
@@ -1300,6 +1546,12 @@ let Uses = [FPSCR] in {
"vmrs", "\t$Rt, mvfr0", []>;
def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPR:$Rt), (ins),
"vmrs", "\t$Rt, mvfr1", []>;
+ def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, mvfr2", []>, Requires<[HasFPARMv8]>;
+ def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, fpinst", []>;
+ def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, fpinst2", []>;
}
//===----------------------------------------------------------------------===//
@@ -1333,6 +1585,11 @@ let Defs = [FPSCR] in {
// System level GPR -> FPSID
def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src),
"vmsr", "\tfpsid, $src", []>;
+
+ def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPR:$src),
+ "vmsr", "\tfpinst, $src", []>;
+ def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPR:$src),
+ "vmsr", "\tfpinst2, $src", []>;
}
//===----------------------------------------------------------------------===//
@@ -1344,7 +1601,8 @@ let isReMaterializable = 1 in {
def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
VFPMiscFrm, IIC_fpUNA64,
"vmov", ".f64\t$Dd, $imm",
- [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
+ [(set DPR:$Dd, vfp_f64imm:$imm)]>,
+ Requires<[HasVFP3,HasDPVFP]> {
bits<5> Dd;
bits<8> imm;
@@ -1426,23 +1684,23 @@ def : VFP2MnemonicAlias<"fmrx", "vmrs">;
def : VFP2MnemonicAlias<"fmxr", "vmsr">;
// Be friendly and accept the old form of zero-compare
-def : VFP2InstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
+def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
(VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
-def : VFP2InstAlias<"faddd${p} $Dd, $Dn, $Dm",
- (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
+def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm",
+ (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
(VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
-def : VFP2InstAlias<"fsubd${p} $Dd, $Dn, $Dm",
- (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
+def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm",
+ (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
// No need for the size suffix on VSQRT. It's implied by the register classes.
def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
-def : VFP2InstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
+def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
// VLDR/VSTR accept an optional type suffix.
def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index c8ed5760f935..61596d54b692 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -90,6 +90,10 @@ namespace {
typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
typedef MemOpQueue::iterator MemOpQueueIter;
+ void findUsesOfImpDef(SmallVectorImpl<MachineOperand *> &UsesOfImpDefs,
+ const MemOpQueue &MemOps, unsigned DefReg,
+ unsigned RangeBegin, unsigned RangeEnd);
+
bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
int Offset, unsigned Base, bool BaseKill, int Opcode,
ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
@@ -109,12 +113,12 @@ namespace {
unsigned PredReg,
unsigned Scratch,
DebugLoc dl,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges);
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges);
void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
int Opcode, unsigned Size,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned Scratch, MemOpQueue &MemOps,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges);
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges);
void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
@@ -360,6 +364,62 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
return true;
}
+/// \brief Find all instructions using a given imp-def within a range.
+///
+/// We are trying to combine a range of instructions, one of which (located at
+/// position RangeBegin) implicitly defines a register. The final LDM/STM will
+/// be placed at RangeEnd, and so any uses of this definition between RangeStart
+/// and RangeEnd must be modified to use an undefined value.
+///
+/// The live range continues until we find a second definition or one of the
+/// uses we find is a kill. Unfortunately MemOps is not sorted by Position, so
+/// we must consider all uses and decide which are relevant in a second pass.
+void ARMLoadStoreOpt::findUsesOfImpDef(
+ SmallVectorImpl<MachineOperand *> &UsesOfImpDefs, const MemOpQueue &MemOps,
+ unsigned DefReg, unsigned RangeBegin, unsigned RangeEnd) {
+ std::map<unsigned, MachineOperand *> Uses;
+ unsigned LastLivePos = RangeEnd;
+
+ // First we find all uses of this register with Position between RangeBegin
+ // and RangeEnd, any or all of these could be uses of a definition at
+ // RangeBegin. We also record the latest position a definition at RangeBegin
+ // would be considered live.
+ for (unsigned i = 0; i < MemOps.size(); ++i) {
+ MachineInstr &MI = *MemOps[i].MBBI;
+ unsigned MIPosition = MemOps[i].Position;
+ if (MIPosition <= RangeBegin || MIPosition > RangeEnd)
+ continue;
+
+ // If this instruction defines the register, then any later use will be of
+ // that definition rather than ours.
+ if (MI.definesRegister(DefReg))
+ LastLivePos = std::min(LastLivePos, MIPosition);
+
+ MachineOperand *UseOp = MI.findRegisterUseOperand(DefReg);
+ if (!UseOp)
+ continue;
+
+ // If this instruction kills the register then (assuming liveness is
+ // correct when we start) we don't need to think about anything after here.
+ if (UseOp->isKill())
+ LastLivePos = std::min(LastLivePos, MIPosition);
+
+ Uses[MIPosition] = UseOp;
+ }
+
+ // Now we traverse the list of all uses, and append the ones that actually use
+ // our definition to the requested list.
+ for (std::map<unsigned, MachineOperand *>::iterator I = Uses.begin(),
+ E = Uses.end();
+ I != E; ++I) {
+ // List is sorted by position so once we've found one out of range there
+ // will be no more to consider.
+ if (I->first > LastLivePos)
+ break;
+ UsesOfImpDefs.push_back(I->second);
+ }
+}
+
// MergeOpsUpdate - call MergeOps and update MemOps and merges accordingly on
// success.
void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
@@ -371,7 +431,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned Scratch,
DebugLoc dl,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges) {
// First calculate which of the registers should be killed by the merged
// instruction.
const unsigned insertPos = memOps[insertAfter].Position;
@@ -392,6 +452,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
SmallVector<std::pair<unsigned, bool>, 8> Regs;
SmallVector<unsigned, 8> ImpDefs;
+ SmallVector<MachineOperand *, 8> UsesOfImpDefs;
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
unsigned Reg = memOps[i].Reg;
// If we are inserting the merged operation after an operation that
@@ -406,6 +467,12 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
unsigned DefReg = MO->getReg();
if (std::find(ImpDefs.begin(), ImpDefs.end(), DefReg) == ImpDefs.end())
ImpDefs.push_back(DefReg);
+
+ // There may be other uses of the definition between this instruction and
+ // the eventual LDM/STM position. These should be marked undef if the
+ // merge takes place.
+ findUsesOfImpDef(UsesOfImpDefs, memOps, DefReg, memOps[i].Position,
+ insertPos);
}
}
@@ -418,6 +485,16 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
// Merge succeeded, update records.
Merges.push_back(prior(Loc));
+
+ // In gathering loads together, we may have moved the imp-def of a register
+ // past one of its uses. This is OK, since we know better than the rest of
+ // LLVM what's OK with ARM loads and stores; but we still have to adjust the
+ // affected uses.
+ for (SmallVectorImpl<MachineOperand *>::iterator I = UsesOfImpDefs.begin(),
+ E = UsesOfImpDefs.end();
+ I != E; ++I)
+ (*I)->setIsUndef();
+
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
// Remove kill flags from any memops that come before insertPos.
if (Regs[i-memOpsBegin].second) {
@@ -444,10 +521,10 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
/// load / store multiple instructions.
void
ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
- unsigned Base, int Opcode, unsigned Size,
- ARMCC::CondCodes Pred, unsigned PredReg,
- unsigned Scratch, MemOpQueue &MemOps,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
+ unsigned Base, int Opcode, unsigned Size,
+ ARMCC::CondCodes Pred, unsigned PredReg,
+ unsigned Scratch, MemOpQueue &MemOps,
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges) {
bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
int Offset = MemOps[SIndex].Offset;
int SOffset = Offset;
@@ -489,7 +566,10 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
if (Reg != ARM::SP &&
NewOffset == Offset + (int)Size &&
((isNotVFP && RegNum > PRegNum) ||
- ((Count < Limit) && RegNum == PRegNum+1))) {
+ ((Count < Limit) && RegNum == PRegNum+1)) &&
+ // On Swift we don't want vldm/vstm to start with a odd register num
+ // because Q register unaligned vldm/vstm need more uops.
+ (!STI->isSwift() || isNotVFP || Count != 1 || !(PRegNum & 0x1))) {
Offset += Size;
PRegNum = RegNum;
++Count;
@@ -1484,7 +1564,7 @@ namespace {
unsigned &PredReg, ARMCC::CondCodes &Pred,
bool &isT2);
bool RescheduleOps(MachineBasicBlock *MBB,
- SmallVector<MachineInstr*, 4> &Ops,
+ SmallVectorImpl<MachineInstr *> &Ops,
unsigned Base, bool isLd,
DenseMap<MachineInstr*, unsigned> &MI2LocMap);
bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
@@ -1602,8 +1682,9 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
return false;
// Make sure the base address satisfies i64 ld / st alignment requirement.
+ // At the moment, we ignore the memoryoperand's value.
+ // If we want to use AliasAnalysis, we should check it accordingly.
if (!Op0->hasOneMemOperand() ||
- !(*Op0->memoperands_begin())->getValue() ||
(*Op0->memoperands_begin())->isVolatile())
return false;
@@ -1655,7 +1736,7 @@ namespace {
}
bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
- SmallVector<MachineInstr*, 4> &Ops,
+ SmallVectorImpl<MachineInstr *> &Ops,
unsigned Base, bool isLd,
DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
bool RetVal = false;
@@ -1857,9 +1938,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
if (!StopHere)
BI->second.push_back(MI);
} else {
- SmallVector<MachineInstr*, 4> MIs;
- MIs.push_back(MI);
- Base2LdsMap[Base] = MIs;
+ Base2LdsMap[Base].push_back(MI);
LdBases.push_back(Base);
}
} else {
@@ -1875,9 +1954,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
if (!StopHere)
BI->second.push_back(MI);
} else {
- SmallVector<MachineInstr*, 4> MIs;
- MIs.push_back(MI);
- Base2StsMap[Base] = MIs;
+ Base2StsMap[Base].push_back(MI);
StBases.push_back(Base);
}
}
@@ -1893,7 +1970,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
// Re-schedule loads.
for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
unsigned Base = LdBases[i];
- SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base];
+ SmallVectorImpl<MachineInstr *> &Lds = Base2LdsMap[Base];
if (Lds.size() > 1)
RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
}
@@ -1901,7 +1978,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
// Re-schedule stores.
for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
unsigned Base = StBases[i];
- SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base];
+ SmallVectorImpl<MachineInstr *> &Sts = Base2StsMap[Base];
if (Sts.size() > 1)
RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
}
diff --git a/lib/Target/ARM/ARMMCInstLower.cpp b/lib/Target/ARM/ARMMCInstLower.cpp
index b6414832003d..e12c9c61ab14 100644
--- a/lib/Target/ARM/ARMMCInstLower.cpp
+++ b/lib/Target/ARM/ARMMCInstLower.cpp
@@ -82,7 +82,7 @@ bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO,
MO.getMBB()->getSymbol(), OutContext));
break;
case MachineOperand::MO_GlobalAddress:
- MCOp = GetSymbolRef(MO, Mang->getSymbol(MO.getGlobal()));
+ MCOp = GetSymbolRef(MO, getSymbol(MO.getGlobal()));
break;
case MachineOperand::MO_ExternalSymbol:
MCOp = GetSymbolRef(MO,
diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.h b/lib/Target/ARM/ARMMachineFunctionInfo.h
index f4248fcfcc75..010edf33cea4 100644
--- a/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -36,6 +36,13 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// 'isThumb'.
bool hasThumb2;
+ /// StByValParamsPadding - For parameter that is split between
+ /// GPRs and memory; while recovering GPRs part, when
+ /// StackAlignment == 8, and GPRs-part-size mod 8 != 0,
+ /// we need to insert gap before parameter start address. It allows to
+ /// "attach" GPR-part to the part that was passed via stack.
+ unsigned StByValParamsPadding;
+
/// VarArgsRegSaveSize - Size of the register save area for vararg functions.
///
unsigned ArgRegsSaveSize;
@@ -77,12 +84,6 @@ class ARMFunctionInfo : public MachineFunctionInfo {
unsigned GPRCS2Size;
unsigned DPRCSSize;
- /// GPRCS1Frames, GPRCS2Frames, DPRCSFrames - Keeps track of frame indices
- /// which belong to these spill areas.
- BitVector GPRCS1Frames;
- BitVector GPRCS2Frames;
- BitVector DPRCSFrames;
-
/// NumAlignedDPRCS2Regs - The number of callee-saved DPRs that are saved in
/// the aligned portion of the stack frame. This is always a contiguous
/// sequence of D-registers starting from d8.
@@ -121,7 +122,6 @@ public:
LRSpilledForFarJump(false),
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
- GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0),
NumAlignedDPRCS2Regs(0),
JumpTableUId(0), PICLabelUId(0),
VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {}
@@ -129,11 +129,11 @@ public:
explicit ARMFunctionInfo(MachineFunction &MF) :
isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
hasThumb2(MF.getTarget().getSubtarget<ARMSubtarget>().hasThumb2()),
+ StByValParamsPadding(0),
ArgRegsSaveSize(0), HasStackFrame(false), RestoreSPFromFP(false),
LRSpilledForFarJump(false),
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
- GPRCS1Frames(32), GPRCS2Frames(32), DPRCSFrames(32),
JumpTableUId(0), PICLabelUId(0),
VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {}
@@ -141,7 +141,14 @@ public:
bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
bool isThumb2Function() const { return isThumb && hasThumb2; }
- unsigned getArgRegsSaveSize() const { return ArgRegsSaveSize; }
+ unsigned getStoredByValParamsPadding() const { return StByValParamsPadding; }
+ void setStoredByValParamsPadding(unsigned p) { StByValParamsPadding = p; }
+
+ unsigned getArgRegsSaveSize(unsigned Align = 0) const {
+ if (!Align)
+ return ArgRegsSaveSize;
+ return (ArgRegsSaveSize + Align - 1) & ~(Align - 1);
+ }
void setArgRegsSaveSize(unsigned s) { ArgRegsSaveSize = s; }
bool hasStackFrame() const { return HasStackFrame; }
@@ -175,59 +182,6 @@ public:
void setGPRCalleeSavedArea2Size(unsigned s) { GPRCS2Size = s; }
void setDPRCalleeSavedAreaSize(unsigned s) { DPRCSSize = s; }
- bool isGPRCalleeSavedArea1Frame(int fi) const {
- if (fi < 0 || fi >= (int)GPRCS1Frames.size())
- return false;
- return GPRCS1Frames[fi];
- }
- bool isGPRCalleeSavedArea2Frame(int fi) const {
- if (fi < 0 || fi >= (int)GPRCS2Frames.size())
- return false;
- return GPRCS2Frames[fi];
- }
- bool isDPRCalleeSavedAreaFrame(int fi) const {
- if (fi < 0 || fi >= (int)DPRCSFrames.size())
- return false;
- return DPRCSFrames[fi];
- }
-
- void addGPRCalleeSavedArea1Frame(int fi) {
- if (fi >= 0) {
- int Size = GPRCS1Frames.size();
- if (fi >= Size) {
- Size *= 2;
- if (fi >= Size)
- Size = fi+1;
- GPRCS1Frames.resize(Size);
- }
- GPRCS1Frames[fi] = true;
- }
- }
- void addGPRCalleeSavedArea2Frame(int fi) {
- if (fi >= 0) {
- int Size = GPRCS2Frames.size();
- if (fi >= Size) {
- Size *= 2;
- if (fi >= Size)
- Size = fi+1;
- GPRCS2Frames.resize(Size);
- }
- GPRCS2Frames[fi] = true;
- }
- }
- void addDPRCalleeSavedAreaFrame(int fi) {
- if (fi >= 0) {
- int Size = DPRCSFrames.size();
- if (fi >= Size) {
- Size *= 2;
- if (fi >= Size)
- Size = fi+1;
- DPRCSFrames.resize(Size);
- }
- DPRCSFrames[fi] = true;
- }
- }
-
unsigned createJumpTableUId() {
return JumpTableUId++;
}
diff --git a/lib/Target/ARM/ARMRegisterInfo.cpp b/lib/Target/ARM/ARMRegisterInfo.cpp
index 6f3819afd0a5..a7880364d89a 100644
--- a/lib/Target/ARM/ARMRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMRegisterInfo.cpp
@@ -18,7 +18,6 @@ using namespace llvm;
void ARMRegisterInfo::anchor() { }
-ARMRegisterInfo::ARMRegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMBaseRegisterInfo(tii, sti) {
+ARMRegisterInfo::ARMRegisterInfo(const ARMSubtarget &sti)
+ : ARMBaseRegisterInfo(sti) {
}
diff --git a/lib/Target/ARM/ARMRegisterInfo.h b/lib/Target/ARM/ARMRegisterInfo.h
index 8a248425c33c..fb1537cf9425 100644
--- a/lib/Target/ARM/ARMRegisterInfo.h
+++ b/lib/Target/ARM/ARMRegisterInfo.h
@@ -19,13 +19,13 @@
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
- class ARMSubtarget;
- class ARMBaseInstrInfo;
+
+class ARMSubtarget;
struct ARMRegisterInfo : public ARMBaseRegisterInfo {
virtual void anchor();
public:
- ARMRegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &STI);
+ ARMRegisterInfo(const ARMSubtarget &STI);
};
} // end namespace llvm
diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td
index b0f576bc2b6f..d0457618ef6f 100644
--- a/lib/Target/ARM/ARMRegisterInfo.td
+++ b/lib/Target/ARM/ARMRegisterInfo.td
@@ -27,31 +27,31 @@ class ARMFReg<bits<16> Enc, string n> : Register<n> {
// Subregister indices.
let Namespace = "ARM" in {
-def qqsub_0 : SubRegIndex;
-def qqsub_1 : SubRegIndex;
+def qqsub_0 : SubRegIndex<256>;
+def qqsub_1 : SubRegIndex<256, 256>;
// Note: Code depends on these having consecutive numbers.
-def qsub_0 : SubRegIndex;
-def qsub_1 : SubRegIndex;
-def qsub_2 : SubRegIndex<[qqsub_1, qsub_0]>;
-def qsub_3 : SubRegIndex<[qqsub_1, qsub_1]>;
-
-def dsub_0 : SubRegIndex;
-def dsub_1 : SubRegIndex;
-def dsub_2 : SubRegIndex<[qsub_1, dsub_0]>;
-def dsub_3 : SubRegIndex<[qsub_1, dsub_1]>;
-def dsub_4 : SubRegIndex<[qsub_2, dsub_0]>;
-def dsub_5 : SubRegIndex<[qsub_2, dsub_1]>;
-def dsub_6 : SubRegIndex<[qsub_3, dsub_0]>;
-def dsub_7 : SubRegIndex<[qsub_3, dsub_1]>;
-
-def ssub_0 : SubRegIndex;
-def ssub_1 : SubRegIndex;
-def ssub_2 : SubRegIndex<[dsub_1, ssub_0]>;
-def ssub_3 : SubRegIndex<[dsub_1, ssub_1]>;
-
-def gsub_0 : SubRegIndex;
-def gsub_1 : SubRegIndex;
+def qsub_0 : SubRegIndex<128>;
+def qsub_1 : SubRegIndex<128, 128>;
+def qsub_2 : ComposedSubRegIndex<qqsub_1, qsub_0>;
+def qsub_3 : ComposedSubRegIndex<qqsub_1, qsub_1>;
+
+def dsub_0 : SubRegIndex<64>;
+def dsub_1 : SubRegIndex<64, 64>;
+def dsub_2 : ComposedSubRegIndex<qsub_1, dsub_0>;
+def dsub_3 : ComposedSubRegIndex<qsub_1, dsub_1>;
+def dsub_4 : ComposedSubRegIndex<qsub_2, dsub_0>;
+def dsub_5 : ComposedSubRegIndex<qsub_2, dsub_1>;
+def dsub_6 : ComposedSubRegIndex<qsub_3, dsub_0>;
+def dsub_7 : ComposedSubRegIndex<qsub_3, dsub_1>;
+
+def ssub_0 : SubRegIndex<32>;
+def ssub_1 : SubRegIndex<32, 32>;
+def ssub_2 : ComposedSubRegIndex<dsub_1, ssub_0>;
+def ssub_3 : ComposedSubRegIndex<dsub_1, ssub_1>;
+
+def gsub_0 : SubRegIndex<32>;
+def gsub_1 : SubRegIndex<32, 32>;
// Let TableGen synthesize the remaining 12 ssub_* indices.
// We don't need to name them.
}
@@ -157,21 +157,27 @@ def Q15 : ARMReg<15, "q15", [D30, D31]>;
// Current Program Status Register.
// We model fpscr with two registers: FPSCR models the control bits and will be
-// reserved. FPSCR_NZCV models the flag bits and will be unreserved.
-def CPSR : ARMReg<0, "cpsr">;
-def APSR : ARMReg<1, "apsr">;
-def SPSR : ARMReg<2, "spsr">;
-def FPSCR : ARMReg<3, "fpscr">;
-def FPSCR_NZCV : ARMReg<3, "fpscr_nzcv"> {
+// reserved. FPSCR_NZCV models the flag bits and will be unreserved. APSR_NZCV
+// models the APSR when it's accessed by some special instructions. In such cases
+// it has the same encoding as PC.
+def CPSR : ARMReg<0, "cpsr">;
+def APSR : ARMReg<1, "apsr">;
+def APSR_NZCV : ARMReg<15, "apsr_nzcv">;
+def SPSR : ARMReg<2, "spsr">;
+def FPSCR : ARMReg<3, "fpscr">;
+def FPSCR_NZCV : ARMReg<3, "fpscr_nzcv"> {
let Aliases = [FPSCR];
}
def ITSTATE : ARMReg<4, "itstate">;
// Special Registers - only available in privileged mode.
-def FPSID : ARMReg<0, "fpsid">;
-def MVFR1 : ARMReg<6, "mvfr1">;
-def MVFR0 : ARMReg<7, "mvfr0">;
-def FPEXC : ARMReg<8, "fpexc">;
+def FPSID : ARMReg<0, "fpsid">;
+def MVFR2 : ARMReg<5, "mvfr2">;
+def MVFR1 : ARMReg<6, "mvfr1">;
+def MVFR0 : ARMReg<7, "mvfr0">;
+def FPEXC : ARMReg<8, "fpexc">;
+def FPINST : ARMReg<9, "fpinst">;
+def FPINST2 : ARMReg<10, "fpinst2">;
// Register classes.
//
@@ -207,6 +213,16 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> {
}];
}
+// GPRs without the PC but with APSR. Some instructions allow accessing the
+// APSR, while actually encoding PC in the register field. This is usefull
+// for assembly and disassembly only.
+def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> {
+ let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)];
+ let AltOrderSelect = [{
+ return 1 + MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only();
+ }];
+}
+
// GPRsp - Only the SP is legal. Used by Thumb1 instructions that want the
// implied SP argument list.
// FIXME: It would be better to not use this at all and refactor the
@@ -236,7 +252,7 @@ def hGPR : RegisterClass<"ARM", [i32], 32, (sub GPR, tGPR)>;
// to the saved value before the tail call, which would clobber a call address.
// Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of
// this class and the preceding one(!) This is what we want.
-def tcGPR : RegisterClass<"ARM", [i32], 32, (add R0, R1, R2, R3, R9, R12)> {
+def tcGPR : RegisterClass<"ARM", [i32], 32, (add R0, R1, R2, R3, R12)> {
let AltOrders = [(and tcGPR, tGPR)];
let AltOrderSelect = [{
return MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only();
diff --git a/lib/Target/ARM/ARMSchedule.td b/lib/Target/ARM/ARMSchedule.td
index 2d088de96e27..528c4ec73781 100644
--- a/lib/Target/ARM/ARMSchedule.td
+++ b/lib/Target/ARM/ARMSchedule.td
@@ -69,6 +69,24 @@ def WriteCMP : SchedWrite;
def WriteCMPsi : SchedWrite;
def WriteCMPsr : SchedWrite;
+// Division.
+def WriteDiv : SchedWrite;
+
+// Loads.
+def WriteLd : SchedWrite;
+def WritePreLd : SchedWrite;
+
+// Branches.
+def WriteBr : SchedWrite;
+def WriteBrL : SchedWrite;
+def WriteBrTbl : SchedWrite;
+
+// Fixpoint conversions.
+def WriteCvtFP : SchedWrite;
+
+// Noop.
+def WriteNoop : SchedWrite;
+
// Define TII for use in SchedVariant Predicates.
def : PredicateProlog<[{
const ARMBaseInstrInfo *TII =
diff --git a/lib/Target/ARM/ARMScheduleA9.td b/lib/Target/ARM/ARMScheduleA9.td
index 9739ed20ce2e..603e775d351d 100644
--- a/lib/Target/ARM/ARMScheduleA9.td
+++ b/lib/Target/ARM/ARMScheduleA9.td
@@ -1879,17 +1879,18 @@ def CortexA9Itineraries : ProcessorItineraries<
// The following definitions describe the simpler per-operand machine model.
// This works with MachineScheduler and will eventually replace itineraries.
+class A9WriteLMOpsListType<list<WriteSequence> writes> {
+ list <WriteSequence> Writes = writes;
+ SchedMachineModel SchedModel = ?;
+}
// Cortex-A9 machine model for scheduling and other instruction cost heuristics.
def CortexA9Model : SchedMachineModel {
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
- let MinLatency = 0; // Data dependencies are allowed within dispatch groups.
+ let MicroOpBufferSize = 56; // Based on available renamed registers.
let LoadLatency = 2; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
- let ILPWindow = 10; // Don't reschedule small blocks to hide
- // latency. Minimum latency requirements are already
- // modeled strictly by reserving resources.
let MispredictPenalty = 8; // Based on estimate of pipeline depth.
let Itineraries = CortexA9Itineraries;
@@ -1904,7 +1905,7 @@ def A9UnitALU : ProcResource<2>;
def A9UnitMul : ProcResource<1> { let Super = A9UnitALU; }
def A9UnitAGU : ProcResource<1>;
def A9UnitLS : ProcResource<1>;
-def A9UnitFP : ProcResource<1> { let Buffered = 0; }
+def A9UnitFP : ProcResource<1> { let BufferSize = 0; }
def A9UnitB : ProcResource<1>;
//===----------------------------------------------------------------------===//
@@ -2014,7 +2015,7 @@ def A9WriteAdr#NumAddr : WriteSequence<[A9WriteAdr], NumAddr>;
// Define a predicate to select the LDM based on number of memory addresses.
def A9LMAdr#NumAddr#Pred :
- SchedPredicate<"TII->getNumLDMAddresses(MI) == "#NumAddr>;
+ SchedPredicate<"(TII->getNumLDMAddresses(MI)+1)/2 == "#NumAddr>;
} // foreach NumAddr
@@ -2057,48 +2058,30 @@ def A9WriteL#NumAddr#Hi : WriteSequence<
//===----------------------------------------------------------------------===//
// LDM: Load multiple into 32-bit integer registers.
+def A9WriteLMOpsList : A9WriteLMOpsListType<
+ [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3, A9WriteL3Hi,
+ A9WriteL4, A9WriteL4Hi,
+ A9WriteL5, A9WriteL5Hi,
+ A9WriteL6, A9WriteL6Hi,
+ A9WriteL7, A9WriteL7Hi,
+ A9WriteL8, A9WriteL8Hi]>;
+
// A9WriteLM variants expand into a pair of writes for each 64-bit
// value loaded. When the number of registers is odd, the last
// A9WriteLnHi is naturally ignored because the instruction has no
// following def operands. These variants take no issue resource, so
// they may need to be part of a WriteSequence that includes A9WriteIssue.
def A9WriteLM : SchedWriteVariant<[
- SchedVar<A9LMAdr1Pred, [A9WriteL1, A9WriteL1Hi]>,
- SchedVar<A9LMAdr2Pred, [A9WriteL1, A9WriteL1Hi,
- A9WriteL2, A9WriteL2Hi]>,
- SchedVar<A9LMAdr3Pred, [A9WriteL1, A9WriteL1Hi,
- A9WriteL2, A9WriteL2Hi,
- A9WriteL3, A9WriteL3Hi]>,
- SchedVar<A9LMAdr4Pred, [A9WriteL1, A9WriteL1Hi,
- A9WriteL2, A9WriteL2Hi,
- A9WriteL3, A9WriteL3Hi,
- A9WriteL4, A9WriteL4Hi]>,
- SchedVar<A9LMAdr5Pred, [A9WriteL1, A9WriteL1Hi,
- A9WriteL2, A9WriteL2Hi,
- A9WriteL3, A9WriteL3Hi,
- A9WriteL4, A9WriteL4Hi,
- A9WriteL5, A9WriteL5Hi]>,
- SchedVar<A9LMAdr6Pred, [A9WriteL1, A9WriteL1Hi,
- A9WriteL2, A9WriteL2Hi,
- A9WriteL3, A9WriteL3Hi,
- A9WriteL4, A9WriteL4Hi,
- A9WriteL5, A9WriteL5Hi,
- A9WriteL6, A9WriteL6Hi]>,
- SchedVar<A9LMAdr7Pred, [A9WriteL1, A9WriteL1Hi,
- A9WriteL2, A9WriteL2Hi,
- A9WriteL3, A9WriteL3Hi,
- A9WriteL4, A9WriteL4Hi,
- A9WriteL5, A9WriteL5Hi,
- A9WriteL6, A9WriteL6Hi,
- A9WriteL7, A9WriteL7Hi]>,
- SchedVar<A9LMAdr8Pred, [A9WriteL1, A9WriteL1Hi,
- A9WriteL2, A9WriteL2Hi,
- A9WriteL3, A9WriteL3Hi,
- A9WriteL4, A9WriteL4Hi,
- A9WriteL5, A9WriteL5Hi,
- A9WriteL6, A9WriteL6Hi,
- A9WriteL7, A9WriteL7Hi,
- A9WriteL8, A9WriteL8Hi]>,
+ SchedVar<A9LMAdr1Pred, A9WriteLMOpsList.Writes[0-1]>,
+ SchedVar<A9LMAdr2Pred, A9WriteLMOpsList.Writes[0-3]>,
+ SchedVar<A9LMAdr3Pred, A9WriteLMOpsList.Writes[0-5]>,
+ SchedVar<A9LMAdr4Pred, A9WriteLMOpsList.Writes[0-7]>,
+ SchedVar<A9LMAdr5Pred, A9WriteLMOpsList.Writes[0-9]>,
+ SchedVar<A9LMAdr6Pred, A9WriteLMOpsList.Writes[0-11]>,
+ SchedVar<A9LMAdr7Pred, A9WriteLMOpsList.Writes[0-13]>,
+ SchedVar<A9LMAdr8Pred, A9WriteLMOpsList.Writes[0-15]>,
// For unknown LDMs, define the maximum number of writes, but only
// make the first two consume resources.
SchedVar<A9LMUnknownPred, [A9WriteL1, A9WriteL1Hi,
@@ -2180,49 +2163,39 @@ def A9WriteLMfp#NumAddr#Hi : WriteSequence<
// pair of writes for each 64-bit data loaded. When the number of
// registers is odd, the last WriteLMfpnHi is naturally ignored because
// the instruction has no following def operands.
+
+def A9WriteLMfpPostRAOpsList : A9WriteLMOpsListType<
+ [A9WriteLMfp1, A9WriteLMfp2, // 0-1
+ A9WriteLMfp3, A9WriteLMfp4, // 2-3
+ A9WriteLMfp5, A9WriteLMfp6, // 4-5
+ A9WriteLMfp7, A9WriteLMfp8, // 6-7
+ A9WriteLMfp1Hi, // 8-8
+ A9WriteLMfp2Hi, A9WriteLMfp2Hi, // 9-10
+ A9WriteLMfp3Hi, A9WriteLMfp3Hi, // 11-12
+ A9WriteLMfp4Hi, A9WriteLMfp4Hi, // 13-14
+ A9WriteLMfp5Hi, A9WriteLMfp5Hi, // 15-16
+ A9WriteLMfp6Hi, A9WriteLMfp6Hi, // 17-18
+ A9WriteLMfp7Hi, A9WriteLMfp7Hi, // 19-20
+ A9WriteLMfp8Hi, A9WriteLMfp8Hi]>; // 21-22
+
def A9WriteLMfpPostRA : SchedWriteVariant<[
- SchedVar<A9LMAdr1Pred, [A9WriteLMfp1, A9WriteLMfp1Hi]>,
- SchedVar<A9LMAdr2Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi]>,
- SchedVar<A9LMAdr3Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi,
- A9WriteLMfp3, A9WriteLMfp3Hi]>,
- SchedVar<A9LMAdr4Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi,
- A9WriteLMfp3, A9WriteLMfp3Hi,
- A9WriteLMfp4, A9WriteLMfp4Hi]>,
- SchedVar<A9LMAdr5Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi,
- A9WriteLMfp3, A9WriteLMfp3Hi,
- A9WriteLMfp4, A9WriteLMfp4Hi,
- A9WriteLMfp5, A9WriteLMfp5Hi]>,
- SchedVar<A9LMAdr6Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi,
- A9WriteLMfp3, A9WriteLMfp3Hi,
- A9WriteLMfp4, A9WriteLMfp4Hi,
- A9WriteLMfp5, A9WriteLMfp5Hi,
- A9WriteLMfp6, A9WriteLMfp6Hi]>,
- SchedVar<A9LMAdr7Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi,
- A9WriteLMfp3, A9WriteLMfp3Hi,
- A9WriteLMfp4, A9WriteLMfp4Hi,
- A9WriteLMfp5, A9WriteLMfp5Hi,
- A9WriteLMfp6, A9WriteLMfp6Hi,
- A9WriteLMfp7, A9WriteLMfp7Hi]>,
- SchedVar<A9LMAdr8Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi,
- A9WriteLMfp3, A9WriteLMfp3Hi,
- A9WriteLMfp4, A9WriteLMfp4Hi,
- A9WriteLMfp5, A9WriteLMfp5Hi,
- A9WriteLMfp6, A9WriteLMfp6Hi,
- A9WriteLMfp7, A9WriteLMfp7Hi,
- A9WriteLMfp8, A9WriteLMfp8Hi]>,
+ SchedVar<A9LMAdr1Pred, A9WriteLMfpPostRAOpsList.Writes[0-0, 8-8]>,
+ SchedVar<A9LMAdr2Pred, A9WriteLMfpPostRAOpsList.Writes[0-1, 9-10]>,
+ SchedVar<A9LMAdr3Pred, A9WriteLMfpPostRAOpsList.Writes[0-2, 10-12]>,
+ SchedVar<A9LMAdr4Pred, A9WriteLMfpPostRAOpsList.Writes[0-3, 11-14]>,
+ SchedVar<A9LMAdr5Pred, A9WriteLMfpPostRAOpsList.Writes[0-4, 12-16]>,
+ SchedVar<A9LMAdr6Pred, A9WriteLMfpPostRAOpsList.Writes[0-5, 13-18]>,
+ SchedVar<A9LMAdr7Pred, A9WriteLMfpPostRAOpsList.Writes[0-6, 14-20]>,
+ SchedVar<A9LMAdr8Pred, A9WriteLMfpPostRAOpsList.Writes[0-7, 15-22]>,
// For unknown LDMs, define the maximum number of writes, but only
- // make the first two consume resources.
- SchedVar<A9LMUnknownPred, [A9WriteLMfp1, A9WriteLMfp1Hi,
- A9WriteLMfp2, A9WriteLMfp2Hi,
- A9WriteLMfp3Hi, A9WriteLMfp3Hi,
- A9WriteLMfp4Hi, A9WriteLMfp4Hi,
+ // make the first two consume resources. We are optimizing for the case
+ // where the operands are DPRs, and this determines the first eight
+ // types. The remaining eight types are filled to cover the case
+ // where the operands are SPRs.
+ SchedVar<A9LMUnknownPred, [A9WriteLMfp1, A9WriteLMfp2,
+ A9WriteLMfp3Hi, A9WriteLMfp4Hi,
+ A9WriteLMfp5Hi, A9WriteLMfp6Hi,
+ A9WriteLMfp7Hi, A9WriteLMfp8Hi,
A9WriteLMfp5Hi, A9WriteLMfp5Hi,
A9WriteLMfp6Hi, A9WriteLMfp6Hi,
A9WriteLMfp7Hi, A9WriteLMfp7Hi,
@@ -2275,10 +2248,10 @@ def A9Read4 : SchedReadAdvance<3>;
// This table follows the ARM Cortex-A9 Technical Reference Manuals,
// mostly in order.
-def :ItinRW<[A9WriteI], [IIC_iMOVi,IIC_iMOVr,IIC_iMOVsi,
+def :ItinRW<[WriteALU], [IIC_iMOVi,IIC_iMOVr,IIC_iMOVsi,
IIC_iMVNi,IIC_iMVNsi,
IIC_iCMOVi,IIC_iCMOVr,IIC_iCMOVsi]>;
-def :ItinRW<[A9WriteI,A9ReadALU],[IIC_iMVNr]>;
+def :ItinRW<[WriteALU, A9ReadALU],[IIC_iMVNr]>;
def :ItinRW<[A9WriteIsr], [IIC_iMOVsr,IIC_iMVNsr,IIC_iCMOVsr]>;
def :ItinRW<[A9WriteI2], [IIC_iMOVix2,IIC_iCMOVix2]>;
@@ -2487,10 +2460,59 @@ def : SchedAlias<WriteALUsr, A9WriteALUsr>;
def : SchedAlias<WriteALUSsr, A9WriteALUsr>;
def : SchedAlias<ReadALU, A9ReadALU>;
def : SchedAlias<ReadALUsr, A9ReadALU>;
-// FIXME: need to special case AND, ORR, EOR, BIC because they don't read
-// advance. But our instrinfo claims it does.
+def : InstRW< [WriteALU],
+ (instregex "ANDri", "ORRri", "EORri", "BICri", "ANDrr", "ORRrr", "EORrr",
+ "BICrr")>;
+def : InstRW< [WriteALUsi], (instregex "ANDrsi", "ORRrsi", "EORrsi", "BICrsi")>;
+def : InstRW< [WriteALUsr], (instregex "ANDrsr", "ORRrsr", "EORrsr", "BICrsr")>;
+
def : SchedAlias<WriteCMP, A9WriteALU>;
def : SchedAlias<WriteCMPsi, A9WriteALU>;
def : SchedAlias<WriteCMPsr, A9WriteALU>;
+
+def : InstRW< [A9WriteIsr], (instregex "MOVsr", "MOVsi", "MVNsr", "MOVCCsi",
+ "MOVCCsr")>;
+def : InstRW< [WriteALU, A9ReadALU], (instregex "MVNr")>;
+def : InstRW< [A9WriteI2], (instregex "MOVCCi32imm", "MOVi32imm",
+ "MOV_ga_dyn")>;
+def : InstRW< [A9WriteI2pc], (instregex "MOV_ga_pcrel")>;
+def : InstRW< [A9WriteI2ld], (instregex "MOV_ga_pcrel_ldr")>;
+
+def : InstRW< [WriteALU], (instregex "SEL")>;
+
+def : InstRW< [WriteALUsi], (instregex "BFC", "BFI", "UBFX", "SBFX")>;
+
+def : InstRW< [A9WriteM],
+ (instregex "MUL", "MULv5", "SMMUL", "SMMULR", "MLA", "MLAv5", "MLS",
+ "SMMLA", "SMMLAR", "SMMLS", "SMMLSR")>;
+def : InstRW< [A9WriteM, A9WriteMHi],
+ (instregex "SMULL", "SMULLv5", "UMULL", "UMULLv5", "SMLAL$", "UMLAL",
+ "UMAAL", "SMLALv5", "UMLALv5", "UMAALv5", "SMLALBB", "SMLALBT", "SMLALTB",
+ "SMLALTT")>;
+// FIXME: These instructions used to have NoItinerary. Just copied the one from above.
+def : InstRW< [A9WriteM, A9WriteMHi],
+ (instregex "SMLAD", "SMLADX", "SMLALD", "SMLALDX", "SMLSD", "SMLSDX",
+ "SMLSLD", "SMLLDX", "SMUAD", "SMUADX", "SMUSD", "SMUSDX")>;
+
+def : InstRW<[A9WriteM16, A9WriteM16Hi],
+ (instregex "SMULBB", "SMULBT", "SMULTB", "SMULTT", "SMULWB", "SMULWT")>;
+def : InstRW<[A9WriteM16, A9WriteM16Hi],
+ (instregex "SMLABB", "SMLABT", "SMLATB", "SMLATT", "SMLAWB", "SMLAWT")>;
+
+def : InstRW<[A9WriteL], (instregex "LDRi12", "PICLDR$")>;
+def : InstRW<[A9WriteLsi], (instregex "LDRrs")>;
+def : InstRW<[A9WriteLb],
+ (instregex "LDRBi12", "PICLDRH", "PICLDRB", "PICLDRSH", "PICLDRSB",
+ "LDRH", "LDRSH", "LDRSB")>;
+def : InstRW<[A9WriteLbsi], (instregex "LDRrs")>;
+
+def : WriteRes<WriteDiv, []> { let Latency = 0; }
+
+def : WriteRes<WriteBr, [A9UnitB]>;
+def : WriteRes<WriteBrL, [A9UnitB]>;
+def : WriteRes<WriteBrTbl, [A9UnitB]>;
+def : WriteRes<WritePreLd, []>;
+def : SchedAlias<WriteCvtFP, A9WriteF>;
+def : WriteRes<WriteNoop, []> { let Latency = 0; let NumMicroOps = 0; }
} // SchedModel = CortexA9Model
diff --git a/lib/Target/ARM/ARMScheduleSwift.td b/lib/Target/ARM/ARMScheduleSwift.td
index 7c6df410706e..8d7dbc246095 100644
--- a/lib/Target/ARM/ARMScheduleSwift.td
+++ b/lib/Target/ARM/ARMScheduleSwift.td
@@ -1076,7 +1076,7 @@ def SwiftItineraries : ProcessorItineraries<
// Swift machine model for scheduling and other instruction cost heuristics.
def SwiftModel : SchedMachineModel {
let IssueWidth = 3; // 3 micro-ops are dispatched per cycle.
- let MinLatency = 0; // Data dependencies are allowed within dispatch groups.
+ let MicroOpBufferSize = 45; // Based on NEON renamed registers.
let LoadLatency = 3;
let MispredictPenalty = 14; // A branch direction mispredict.
@@ -1096,9 +1096,27 @@ let SchedModel = SwiftModel in {
def SwiftUnitDiv : ProcResource<1>;
// Generic resource requirements.
+ def SwiftWriteP0OneCycle : SchedWriteRes<[SwiftUnitP0]>;
+ def SwiftWriteP0TwoCycle : SchedWriteRes<[SwiftUnitP0]> { let Latency = 2; }
+ def SwiftWriteP0FourCycle : SchedWriteRes<[SwiftUnitP0]> { let Latency = 4; }
+ def SwiftWriteP0SixCycle : SchedWriteRes<[SwiftUnitP0]> { let Latency = 6; }
+ def SwiftWriteP0P1FourCycle : SchedWriteRes<[SwiftUnitP0, SwiftUnitP1]> {
+ let Latency = 4;
+ }
+ def SwiftWriteP0P1SixCycle : SchedWriteRes<[SwiftUnitP0, SwiftUnitP1]> {
+ let Latency = 6;
+ }
+ def SwiftWriteP01OneCycle : SchedWriteRes<[SwiftUnitP01]>;
+ def SwiftWriteP1TwoCycle : SchedWriteRes<[SwiftUnitP1]> { let Latency = 2; }
+ def SwiftWriteP1FourCycle : SchedWriteRes<[SwiftUnitP1]> { let Latency = 4; }
+ def SwiftWriteP1SixCycle : SchedWriteRes<[SwiftUnitP1]> { let Latency = 6; }
+ def SwiftWriteP1EightCycle : SchedWriteRes<[SwiftUnitP1]> { let Latency = 8; }
+ def SwiftWriteP1TwelveCyc : SchedWriteRes<[SwiftUnitP1]> { let Latency = 12; }
+ def SwiftWriteP01OneCycle2x : WriteSequence<[SwiftWriteP01OneCycle], 2>;
+ def SwiftWriteP01OneCycle3x : WriteSequence<[SwiftWriteP01OneCycle], 3>;
def SwiftWriteP01TwoCycle : SchedWriteRes<[SwiftUnitP01]> { let Latency = 2; }
- def SwiftWriteP01ThreeCycleTwoUops :
- SchedWriteRes<[SwiftUnitP01, SwiftUnitP01]> {
+ def SwiftWriteP01ThreeCycleTwoUops : SchedWriteRes<[SwiftUnitP01,
+ SwiftUnitP01]> {
let Latency = 3;
let NumMicroOps = 2;
}
@@ -1107,7 +1125,23 @@ let SchedModel = SwiftModel in {
let NumMicroOps = 3;
let ResourceCycles = [3];
}
-
+ // Plain load without writeback.
+ def SwiftWriteP2ThreeCycle : SchedWriteRes<[SwiftUnitP2]> {
+ let Latency = 3;
+ }
+ def SwiftWriteP2FourCycle : SchedWriteRes<[SwiftUnitP2]> {
+ let Latency = 4;
+ }
+ // A store does not write to a register.
+ def SwiftWriteP2 : SchedWriteRes<[SwiftUnitP2]> {
+ let Latency = 0;
+ }
+ foreach Num = 1-4 in {
+ def SwiftWrite#Num#xP2 : WriteSequence<[SwiftWriteP2], Num>;
+ }
+ def SwiftWriteP01OneCycle2x_load : WriteSequence<[SwiftWriteP01OneCycle,
+ SwiftWriteP01OneCycle,
+ SwiftWriteP2ThreeCycle]>;
// 4.2.4 Arithmetic and Logical.
// ALU operation register shifted by immediate variant.
def SwiftWriteALUsi : SchedWriteVariant<[
@@ -1137,8 +1171,906 @@ let SchedModel = SwiftModel in {
def : ReadAdvance<ReadALU, 0>;
def : SchedAlias<ReadALUsr, SwiftReadAdvanceALUsr>;
+
+ def SwiftChooseShiftKindP01OneOrTwoCycle : SchedWriteVariant<[
+ SchedVar<IsFastImmShiftSwiftPred, [SwiftWriteP01OneCycle]>,
+ SchedVar<NoSchedPred, [SwiftWriteP01TwoCycle]>
+ ]>;
+
// 4.2.5 Integer comparison
def : WriteRes<WriteCMP, [SwiftUnitP01]>;
- def : WriteRes<WriteCMPsi, [SwiftUnitP01]>;
- def : WriteRes<WriteCMPsr, [SwiftUnitP01]>;
+ def : SchedAlias<WriteCMPsi, SwiftChooseShiftKindP01OneOrTwoCycle>;
+ def : SchedAlias<WriteCMPsr, SwiftWriteP01TwoCycle>;
+
+ // 4.2.6 Shift, Move
+ // Shift
+ // ASR,LSL,ROR,RRX
+ // MOV(register-shiftedregister) MVN(register-shiftedregister)
+ // Move
+ // MOV,MVN
+ // MOVT
+ // Sign/Zero extension
+ def : InstRW<[SwiftWriteP01OneCycle],
+ (instregex "SXTB", "SXTH", "SXTB16", "UXTB", "UXTH", "UXTB16",
+ "t2SXTB", "t2SXTH", "t2SXTB16", "t2UXTB", "t2UXTH",
+ "t2UXTB16")>;
+ // Pseudo instructions.
+ def : InstRW<[SwiftWriteP01OneCycle2x],
+ (instregex "MOVCCi32imm", "MOVi32imm", "MOV_ga_dyn", "t2MOVCCi32imm",
+ "t2MOVi32imm", "t2MOV_ga_dyn")>;
+ def : InstRW<[SwiftWriteP01OneCycle3x],
+ (instregex "MOV_ga_pcrel", "t2MOV_ga_pcrel", "t2MOVi16_ga_pcrel")>;
+ def : InstRW<[SwiftWriteP01OneCycle2x_load],
+ (instregex "MOV_ga_pcrel_ldr", "t2MOV_ga_pcrel_ldr")>;
+
+ def SwiftWriteP0TwoCyleTwoUops : WriteSequence<[SwiftWriteP0OneCycle], 2>;
+
+ def SwiftPredP0OneOrTwoCycle : SchedWriteVariant<[
+ SchedVar<IsPredicatedPred, [ SwiftWriteP0TwoCyleTwoUops ]>,
+ SchedVar<NoSchedPred, [ SwiftWriteP0OneCycle ]>
+ ]>;
+
+ // 4.2.7 Select
+ // SEL
+ def : InstRW<[SwiftPredP0OneOrTwoCycle], (instregex "SEL", "t2SEL")>;
+
+ // 4.2.8 Bitfield
+ // BFI,BFC, SBFX,UBFX
+ def : InstRW< [SwiftWriteP01TwoCycle],
+ (instregex "BFC", "BFI", "UBFX", "SBFX", "(t|t2)BFC", "(t|t2)BFI",
+ "(t|t2)UBFX", "(t|t2)SBFX")>;
+
+ // 4.2.9 Saturating arithmetic
+ def : InstRW< [SwiftWriteP01TwoCycle],
+ (instregex "QADD", "QSUB", "QDADD", "QDSUB", "SSAT", "SSAT16", "USAT",
+ "USAT16", "QADD8", "QADD16", "QSUB8", "QSUB16", "QASX", "QSAX",
+ "UQADD8", "UQADD16","UQSUB8","UQSUB16","UQASX","UQSAX", "t2QADD",
+ "t2QSUB", "t2QDADD", "t2QDSUB", "t2SSAT", "t2SSAT16", "t2USAT",
+ "t2QADD8", "t2QADD16", "t2QSUB8", "t2QSUB16", "t2QASX", "t2QSAX",
+ "t2UQADD8", "t2UQADD16","t2UQSUB8","t2UQSUB16","t2UQASX","t2UQSAX")>;
+
+ // 4.2.10 Parallel Arithmetic
+ // Not flag setting.
+ def : InstRW< [SwiftWriteALUsr],
+ (instregex "SADD8", "SADD16", "SSUB8", "SSUB16", "SASX", "SSAX",
+ "UADD8", "UADD16", "USUB8", "USUB16", "UASX", "USAX", "t2SADD8",
+ "t2SADD16", "t2SSUB8", "t2SSUB16", "t2SASX", "t2SSAX", "t2UADD8",
+ "t2UADD16", "t2USUB8", "t2USUB16", "t2UASX", "t2USAX")>;
+ // Flag setting.
+ def : InstRW< [SwiftWriteP01TwoCycle],
+ (instregex "SHADD8", "SHADD16", "SHSUB8", "SHSUB16", "SHASX", "SHSAX",
+ "SXTAB", "SXTAB16", "SXTAH", "UHADD8", "UHADD16", "UHSUB8", "UHSUB16",
+ "UHASX", "UHSAX", "UXTAB", "UXTAB16", "UXTAH", "t2SHADD8", "t2SHADD16",
+ "t2SHSUB8", "t2SHSUB16", "t2SHASX", "t2SHSAX", "t2SXTAB", "t2SXTAB16",
+ "t2SXTAH", "t2UHADD8", "t2UHADD16", "t2UHSUB8", "t2UHSUB16", "t2UHASX",
+ "t2UHSAX", "t2UXTAB", "t2UXTAB16", "t2UXTAH")>;
+
+ // 4.2.11 Sum of Absolute Difference
+ def : InstRW< [SwiftWriteP0P1FourCycle], (instregex "USAD8") >;
+ def : InstRW<[SwiftWriteP0P1FourCycle, ReadALU, ReadALU, SchedReadAdvance<2>],
+ (instregex "USADA8")>;
+
+ // 4.2.12 Integer Multiply (32-bit result)
+ // Two sources.
+ def : InstRW< [SwiftWriteP0FourCycle],
+ (instregex "MULS", "MUL", "SMMUL", "SMMULR", "SMULBB", "SMULBT",
+ "SMULTB", "SMULTT", "SMULWB", "SMULWT", "SMUSD", "SMUSDXi", "t2MUL",
+ "t2SMMUL", "t2SMMULR", "t2SMULBB", "t2SMULBT", "t2SMULTB", "t2SMULTT",
+ "t2SMULWB", "t2SMULWT", "t2SMUSD")>;
+
+ def SwiftWriteP0P01FiveCycleTwoUops :
+ SchedWriteRes<[SwiftUnitP0, SwiftUnitP01]> {
+ let Latency = 5;
+ }
+
+ def SwiftPredP0P01FourFiveCycle : SchedWriteVariant<[
+ SchedVar<IsPredicatedPred, [ SwiftWriteP0P01FiveCycleTwoUops ]>,
+ SchedVar<NoSchedPred, [ SwiftWriteP0FourCycle ]>
+ ]>;
+
+ def SwiftReadAdvanceFourCyclesPred : SchedReadVariant<[
+ SchedVar<IsPredicatedPred, [SchedReadAdvance<4>]>,
+ SchedVar<NoSchedPred, [ReadALU]>
+ ]>;
+
+ // Multiply accumulate, three sources
+ def : InstRW< [SwiftPredP0P01FourFiveCycle, ReadALU, ReadALU,
+ SwiftReadAdvanceFourCyclesPred],
+ (instregex "MLAS", "MLA", "MLS", "SMMLA", "SMMLAR", "SMMLS", "SMMLSR",
+ "t2MLA", "t2MLS", "t2MLAS", "t2SMMLA", "t2SMMLAR", "t2SMMLS",
+ "t2SMMLSR")>;
+
+ // 4.2.13 Integer Multiply (32-bit result, Q flag)
+ def : InstRW< [SwiftWriteP0FourCycle],
+ (instregex "SMUAD", "SMUADX", "t2SMUAD", "t2SMUADX")>;
+ def : InstRW< [SwiftPredP0P01FourFiveCycle, ReadALU, ReadALU,
+ SwiftReadAdvanceFourCyclesPred],
+ (instregex "SMLABB", "SMLABT", "SMLATB", "SMLATT", "SMLSD", "SMLSDX",
+ "SMLAWB", "SMLAWT", "t2SMLABB", "t2SMLABT", "t2SMLATB", "t2SMLATT",
+ "t2SMLSD", "t2SMLSDX", "t2SMLAWB", "t2SMLAWT")>;
+ def : InstRW< [SwiftPredP0P01FourFiveCycle],
+ (instregex "SMLAD", "SMLADX", "t2SMLAD", "t2SMLADX")>;
+
+ def SwiftP0P0P01FiveCycle : SchedWriteRes<[SwiftUnitP0, SwiftUnitP01]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2, 1];
+ }
+ def SwiftWrite1Cycle : SchedWriteRes<[]> {
+ let Latency = 1;
+ let NumMicroOps = 0;
+ }
+ def SwiftWrite5Cycle : SchedWriteRes<[]> {
+ let Latency = 5;
+ let NumMicroOps = 0;
+ }
+ def SwiftWrite6Cycle : SchedWriteRes<[]> {
+ let Latency = 6;
+ let NumMicroOps = 0;
+ }
+
+ // 4.2.14 Integer Multiply, Long
+ def : InstRW< [SwiftP0P0P01FiveCycle, SwiftWrite5Cycle],
+ (instregex "SMULL$", "UMULL$", "t2SMULL$", "t2UMULL$")>;
+
+ def Swift2P03P01FiveCycle : SchedWriteRes<[SwiftUnitP0, SwiftUnitP01]> {
+ let Latency = 7;
+ let NumMicroOps = 5;
+ let ResourceCycles = [2, 3];
+ }
+
+ // 4.2.15 Integer Multiply Accumulate, Long
+ // 4.2.16 Integer Multiply Accumulate, Dual
+ // 4.2.17 Integer Multiply Accumulate Accumulate, Long
+ // We are being a bit inaccurate here.
+ def : InstRW< [SwiftWrite5Cycle, Swift2P03P01FiveCycle, ReadALU, ReadALU,
+ SchedReadAdvance<4>, SchedReadAdvance<3>],
+ (instregex "SMLALS", "UMLALS", "SMLAL", "UMLAL", "MLALBB", "SMLALBT",
+ "SMLALTB", "SMLALTT", "SMLALD", "SMLALDX", "SMLSLD", "SMLSLDX",
+ "UMAAL", "t2SMLALS", "t2UMLALS", "t2SMLAL", "t2UMLAL", "t2MLALBB", "t2SMLALBT",
+ "t2SMLALTB", "t2SMLALTT", "t2SMLALD", "t2SMLALDX", "t2SMLSLD", "t2SMLSLDX",
+ "t2UMAAL")>;
+
+ def SwiftDiv : SchedWriteRes<[SwiftUnitP0, SwiftUnitDiv]> {
+ let NumMicroOps = 1;
+ let Latency = 14;
+ let ResourceCycles = [1, 14];
+ }
+ // 4.2.18 Integer Divide
+ def : WriteRes<WriteDiv, [SwiftUnitDiv]>; // Workaround.
+ def : InstRW <[SwiftDiv],
+ (instregex "SDIV", "UDIV", "t2SDIV", "t2UDIV")>;
+
+ // 4.2.19 Integer Load Single Element
+ // 4.2.20 Integer Load Signextended
+ def SwiftWriteP2P01ThreeCycle : SchedWriteRes<[SwiftUnitP2, SwiftUnitP01]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+ }
+ def SwiftWriteP2P01FourCyle : SchedWriteRes<[SwiftUnitP2, SwiftUnitP01]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+ }
+ def SwiftWriteP2P01P01FourCycle : SchedWriteRes<[SwiftUnitP2, SwiftUnitP01,
+ SwiftUnitP01]> {
+ let Latency = 4;
+ let NumMicroOps = 3;
+ }
+ def SwiftWriteP2P2ThreeCycle : SchedWriteRes<[SwiftUnitP2, SwiftUnitP2]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+ }
+ def SwiftWriteP2P2P01ThreeCycle : SchedWriteRes<[SwiftUnitP2, SwiftUnitP2,
+ SwiftUnitP01]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+ }
+ def SwiftWrBackOne : SchedWriteRes<[]> {
+ let Latency = 1;
+ let NumMicroOps = 0;
+ }
+ def SwiftWriteLdFour : SchedWriteRes<[]> {
+ let Latency = 4;
+ let NumMicroOps = 0;
+ }
+ // Not accurate.
+ def : InstRW<[SwiftWriteP2ThreeCycle],
+ (instregex "LDR(i12|rs)$", "LDRB(i12|rs)$", "t2LDR(i8|i12|s|pci)",
+ "t2LDR(H|B)(i8|i12|s|pci)", "LDREX", "tLDR[BH](r|i|spi|pci|pciASM)",
+ "tLDR(r|i|spi|pci|pciASM)")>;
+ def : InstRW<[SwiftWriteP2ThreeCycle],
+ (instregex "LDRH$", "PICLDR$", "PICLDR(H|B)$", "LDRcp$")>;
+ def : InstRW<[SwiftWriteP2P01FourCyle],
+ (instregex "PICLDRS(H|B)$", "t2LDRS(H|B)(i|r|p|s)", "LDRS(H|B)$",
+ "t2LDRpci_pic", "tLDRS(B|H)")>;
+ def : InstRW<[SwiftWriteP2P01ThreeCycle, SwiftWrBackOne],
+ (instregex "LD(RB|R)(_|T_)(POST|PRE)_(IMM|REG)", "LDRH(_PRE|_POST)",
+ "LDR(T|BT)_POST_(REG|IMM)", "LDRHT(i|r)",
+ "t2LD(R|RB|RH)_(PRE|POST)", "t2LD(R|RB|RH)T")>;
+ def : InstRW<[SwiftWriteP2P01P01FourCycle, SwiftWrBackOne],
+ (instregex "LDR(SH|SB)(_POST|_PRE)", "t2LDR(SH|SB)(_POST|_PRE)",
+ "LDRS(B|H)T(i|r)", "t2LDRS(B|H)T(i|r)", "t2LDRS(B|H)T")>;
+
+ // 4.2.21 Integer Dual Load
+ // Not accurate.
+ def : InstRW<[SwiftWriteP2P2ThreeCycle, SwiftWriteLdFour],
+ (instregex "t2LDRDi8", "LDRD$")>;
+ def : InstRW<[SwiftWriteP2P2P01ThreeCycle, SwiftWriteLdFour, SwiftWrBackOne],
+ (instregex "LDRD_(POST|PRE)", "t2LDRD_(POST|PRE)")>;
+
+ // 4.2.22 Integer Load, Multiple
+ // NumReg = 1 .. 16
+ foreach Lat = 3-25 in {
+ def SwiftWriteLM#Lat#Cy : SchedWriteRes<[SwiftUnitP2]> {
+ let Latency = Lat;
+ }
+ def SwiftWriteLM#Lat#CyNo : SchedWriteRes<[]> {
+ let Latency = Lat;
+ let NumMicroOps = 0;
+ }
+ }
+ // Predicate.
+ foreach NumAddr = 1-16 in {
+ def SwiftLMAddr#NumAddr#Pred : SchedPredicate<"TII->getNumLDMAddresses(MI) == "#NumAddr>;
+ }
+ def SwiftWriteLDMAddrNoWB : SchedWriteRes<[SwiftUnitP01]> { let Latency = 0; }
+ def SwiftWriteLDMAddrWB : SchedWriteRes<[SwiftUnitP01, SwiftUnitP01]>;
+ def SwiftWriteLM : SchedWriteVariant<[
+ SchedVar<SwiftLMAddr2Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy]>,
+ SchedVar<SwiftLMAddr3Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy]>,
+ SchedVar<SwiftLMAddr4Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy]>,
+ SchedVar<SwiftLMAddr5Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy]>,
+ SchedVar<SwiftLMAddr6Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy]>,
+ SchedVar<SwiftLMAddr7Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy]>,
+ SchedVar<SwiftLMAddr8Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy]>,
+ SchedVar<SwiftLMAddr9Pred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy]>,
+ SchedVar<SwiftLMAddr10Pred,[SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM12Cy]>,
+ SchedVar<SwiftLMAddr11Pred,[SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM12Cy,
+ SwiftWriteLM13Cy]>,
+ SchedVar<SwiftLMAddr12Pred,[SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM12Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14Cy]>,
+ SchedVar<SwiftLMAddr13Pred,[SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM12Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM15Cy]>,
+ SchedVar<SwiftLMAddr14Pred,[SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM12Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM15Cy, SwiftWriteLM16Cy]>,
+ SchedVar<SwiftLMAddr15Pred,[SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM12Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM15Cy, SwiftWriteLM16Cy,
+ SwiftWriteLM17Cy]>,
+ SchedVar<SwiftLMAddr16Pred,[SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5Cy, SwiftWriteLM6Cy,
+ SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM12Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM15Cy, SwiftWriteLM16Cy,
+ SwiftWriteLM17Cy, SwiftWriteLM18Cy]>,
+ // Unknow number of registers, just use resources for two registers.
+ SchedVar<NoSchedPred, [SwiftWriteLM3Cy, SwiftWriteLM4Cy,
+ SwiftWriteLM5CyNo, SwiftWriteLM6CyNo,
+ SwiftWriteLM7CyNo, SwiftWriteLM8CyNo,
+ SwiftWriteLM9CyNo, SwiftWriteLM10CyNo,
+ SwiftWriteLM11CyNo, SwiftWriteLM12CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteLM15CyNo, SwiftWriteLM16CyNo,
+ SwiftWriteLM17CyNo, SwiftWriteLM18CyNo]>
+
+ ]> { let Variadic=1; }
+
+ def : InstRW<[SwiftWriteLM, SwiftWriteLDMAddrNoWB],
+ (instregex "LDM(IA|DA|DB|IB)$", "t2LDM(IA|DA|DB|IB)$",
+ "(t|sys)LDM(IA|DA|DB|IB)$")>;
+ def : InstRW<[SwiftWriteLDMAddrWB, SwiftWriteLM],
+ (instregex /*"t2LDMIA_RET", "tLDMIA_RET", "LDMIA_RET",*/
+ "LDM(IA|DA|DB|IB)_UPD", "(t2|sys|t)LDM(IA|DA|DB|IB)_UPD")>;
+ def : InstRW<[SwiftWriteLDMAddrWB, SwiftWriteLM, SwiftWriteP1TwoCycle],
+ (instregex "LDMIA_RET", "(t|t2)LDMIA_RET", "POP", "tPOP")>;
+ // 4.2.23 Integer Store, Single Element
+ def : InstRW<[SwiftWriteP2],
+ (instregex "PICSTR", "STR(i12|rs)", "STRB(i12|rs)", "STRH$", "STREX",
+ "t2STR(i12|i8|s)$", "t2STR[BH](i12|i8|s)$", "tSTR[BH](i|r)", "tSTR(i|r)", "tSTRspi")>;
+
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWriteP2],
+ (instregex "STR(B_|_|BT_|T_)(PRE_IMM|PRE_REG|POST_REG|POST_IMM)",
+ "STR(i|r)_preidx", "STRB(i|r)_preidx", "STRH_preidx", "STR(H_|HT_)(PRE|POST)",
+ "STR(BT|HT|T)", "t2STR_(PRE|POST)", "t2STR[BH]_(PRE|POST)",
+ "t2STR_preidx", "t2STR[BH]_preidx", "t2ST(RB|RH|R)T")>;
+
+ // 4.2.24 Integer Store, Dual
+ def : InstRW<[SwiftWriteP2, SwiftWriteP2, SwiftWriteP01OneCycle],
+ (instregex "STRD$", "t2STRDi8")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWriteP2, SwiftWriteP2,
+ SwiftWriteP01OneCycle],
+ (instregex "(t2|t)STRD_(POST|PRE)", "STRD_(POST|PRE)")>;
+
+ // 4.2.25 Integer Store, Multiple
+ def SwiftWriteStIncAddr : SchedWriteRes<[SwiftUnitP2, SwiftUnitP01]> {
+ let Latency = 0;
+ let NumMicroOps = 2;
+ }
+ foreach NumAddr = 1-16 in {
+ def SwiftWriteSTM#NumAddr : WriteSequence<[SwiftWriteStIncAddr], NumAddr>;
+ }
+ def SwiftWriteSTM : SchedWriteVariant<[
+ SchedVar<SwiftLMAddr2Pred, [SwiftWriteSTM2]>,
+ SchedVar<SwiftLMAddr3Pred, [SwiftWriteSTM3]>,
+ SchedVar<SwiftLMAddr4Pred, [SwiftWriteSTM4]>,
+ SchedVar<SwiftLMAddr5Pred, [SwiftWriteSTM5]>,
+ SchedVar<SwiftLMAddr6Pred, [SwiftWriteSTM6]>,
+ SchedVar<SwiftLMAddr7Pred, [SwiftWriteSTM7]>,
+ SchedVar<SwiftLMAddr8Pred, [SwiftWriteSTM8]>,
+ SchedVar<SwiftLMAddr9Pred, [SwiftWriteSTM9]>,
+ SchedVar<SwiftLMAddr10Pred,[SwiftWriteSTM10]>,
+ SchedVar<SwiftLMAddr11Pred,[SwiftWriteSTM11]>,
+ SchedVar<SwiftLMAddr12Pred,[SwiftWriteSTM12]>,
+ SchedVar<SwiftLMAddr13Pred,[SwiftWriteSTM13]>,
+ SchedVar<SwiftLMAddr14Pred,[SwiftWriteSTM14]>,
+ SchedVar<SwiftLMAddr15Pred,[SwiftWriteSTM15]>,
+ SchedVar<SwiftLMAddr16Pred,[SwiftWriteSTM16]>,
+ // Unknow number of registers, just use resources for two registers.
+ SchedVar<NoSchedPred, [SwiftWriteSTM2]>
+ ]>;
+ def : InstRW<[SwiftWriteSTM],
+ (instregex "STM(IB|IA|DB|DA)$", "(t2|sys|t)STM(IB|IA|DB|DA)$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWriteSTM],
+ (instregex "STM(IB|IA|DB|DA)_UPD", "(t2|sys|t)STM(IB|IA|DB|DA)_UPD",
+ "PUSH", "tPUSH")>;
+
+ // 4.2.26 Branch
+ def : WriteRes<WriteBr, [SwiftUnitP1]> { let Latency = 0; }
+ def : WriteRes<WriteBrL, [SwiftUnitP1]> { let Latency = 2; }
+ def : WriteRes<WriteBrTbl, [SwiftUnitP1, SwiftUnitP2]> { let Latency = 0; }
+
+ // 4.2.27 Not issued
+ def : WriteRes<WriteNoop, []> { let Latency = 0; let NumMicroOps = 0; }
+ def : InstRW<[WriteNoop], (instregex "t2IT", "IT", "NOP")>;
+
+ // 4.2.28 Advanced SIMD, Integer, 2 cycle
+ def : InstRW<[SwiftWriteP0TwoCycle],
+ (instregex "VADDv", "VSUBv", "VNEG(s|f|v)", "VADDL", "VSUBL",
+ "VADDW", "VSUBW", "VHADD", "VHSUB", "VRHADD", "VPADDi",
+ "VPADDL", "VAND", "VBIC", "VEOR", "VORN", "VORR", "VTST",
+ "VSHL", "VSHR(s|u)", "VSHLL", "VQSHL", "VQSHLU", "VBIF",
+ "VBIT", "VBSL", "VSLI", "VSRI", "VCLS", "VCLZ", "VCNT")>;
+
+ def : InstRW<[SwiftWriteP1TwoCycle],
+ (instregex "VEXT", "VREV16", "VREV32", "VREV64")>;
+
+ // 4.2.29 Advanced SIMD, Integer, 4 cycle
+ // 4.2.30 Advanced SIMD, Integer with Accumulate
+ def : InstRW<[SwiftWriteP0FourCycle],
+ (instregex "VABA", "VABAL", "VPADAL", "VRSRA", "VSRA", "VACGE", "VACGT",
+ "VACLE", "VACLT", "VCEQ", "VCGE", "VCGT", "VCLE", "VCLT", "VRSHL",
+ "VQRSHL", "VRSHR(u|s)", "VABS(f|v)", "VQABS", "VQNEG", "VQADD",
+ "VQSUB")>;
+ def : InstRW<[SwiftWriteP1FourCycle],
+ (instregex "VRECPE", "VRSQRTE")>;
+
+ // 4.2.31 Advanced SIMD, Add and Shift with Narrow
+ def : InstRW<[SwiftWriteP0P1FourCycle],
+ (instregex "VADDHN", "VSUBHN", "VSHRN")>;
+ def : InstRW<[SwiftWriteP0P1SixCycle],
+ (instregex "VRADDHN", "VRSUBHN", "VRSHRN", "VQSHRN", "VQSHRUN",
+ "VQRSHRN", "VQRSHRUN")>;
+
+ // 4.2.32 Advanced SIMD, Vector Table Lookup
+ foreach Num = 1-4 in {
+ def SwiftWrite#Num#xP1TwoCycle : WriteSequence<[SwiftWriteP1TwoCycle], Num>;
+ }
+ def : InstRW<[SwiftWrite1xP1TwoCycle],
+ (instregex "VTB(L|X)1")>;
+ def : InstRW<[SwiftWrite2xP1TwoCycle],
+ (instregex "VTB(L|X)2")>;
+ def : InstRW<[SwiftWrite3xP1TwoCycle],
+ (instregex "VTB(L|X)3")>;
+ def : InstRW<[SwiftWrite4xP1TwoCycle],
+ (instregex "VTB(L|X)4")>;
+
+ // 4.2.33 Advanced SIMD, Transpose
+ def : InstRW<[SwiftWriteP1FourCycle, SwiftWriteP1FourCycle,
+ SwiftWriteP1TwoCycle/*RsrcOnly*/, SchedReadAdvance<2>],
+ (instregex "VSWP", "VTRN", "VUZP", "VZIP")>;
+
+ // 4.2.34 Advanced SIMD and VFP, Floating Point
+ def : InstRW<[SwiftWriteP0TwoCycle], (instregex "VABS(S|D)$", "VNEG(S|D)$")>;
+ def : InstRW<[SwiftWriteP0FourCycle],
+ (instregex "VCMP(D|S|ZD|ZS)$", "VCMPE(D|S|ZD|ZS)")>;
+ def : InstRW<[SwiftWriteP0FourCycle],
+ (instregex "VADD(S|f)", "VSUB(S|f)", "VABD", "VPADDf", "VMAX", "VMIN", "VPMAX",
+ "VPMIN")>;
+ def : InstRW<[SwiftWriteP0SixCycle], (instregex "VADDD$", "VSUBD$")>;
+ def : InstRW<[SwiftWriteP1EightCycle], (instregex "VRECPS", "VRSQRTS")>;
+
+ // 4.2.35 Advanced SIMD and VFP, Multiply
+ def : InstRW<[SwiftWriteP1FourCycle],
+ (instregex "VMUL(S|v|p|f|s)", "VNMULS", "VQDMULH", "VQRDMULH",
+ "VMULL", "VQDMULL")>;
+ def : InstRW<[SwiftWriteP1SixCycle],
+ (instregex "VMULD", "VNMULD")>;
+ def : InstRW<[SwiftWriteP1FourCycle],
+ (instregex "VMLA", "VMLS", "VNMLA", "VNMLS", "VFMA(S|D)", "VFMS(S|D)",
+ "VFNMA", "VFNMS", "VMLAL", "VMLSL","VQDMLAL", "VQDMLSL")>;
+ def : InstRW<[SwiftWriteP1EightCycle], (instregex "VFMAfd", "VFMSfd")>;
+ def : InstRW<[SwiftWriteP1TwelveCyc], (instregex "VFMAfq", "VFMSfq")>;
+
+ // 4.2.36 Advanced SIMD and VFP, Convert
+ def : InstRW<[SwiftWriteP1FourCycle], (instregex "VCVT", "V(S|U)IT", "VTO(S|U)")>;
+ // Fixpoint conversions.
+ def : WriteRes<WriteCvtFP, [SwiftUnitP1]> { let Latency = 4; }
+
+ // 4.2.37 Advanced SIMD and VFP, Move
+ def : InstRW<[SwiftWriteP0TwoCycle],
+ (instregex "VMOVv", "VMOV(S|D)$", "VMOV(S|D)cc",
+ "VMVNv", "VMVN(d|q)", "VMVN(S|D)cc",
+ "FCONST(D|S)")>;
+ def : InstRW<[SwiftWriteP1TwoCycle], (instregex "VMOVN", "VMOVL")>;
+ def : InstRW<[WriteSequence<[SwiftWriteP0FourCycle, SwiftWriteP1TwoCycle]>],
+ (instregex "VQMOVN")>;
+ def : InstRW<[SwiftWriteP1TwoCycle], (instregex "VDUPLN", "VDUPf")>;
+ def : InstRW<[WriteSequence<[SwiftWriteP2FourCycle, SwiftWriteP1TwoCycle]>],
+ (instregex "VDUP(8|16|32)")>;
+ def : InstRW<[SwiftWriteP2ThreeCycle], (instregex "VMOVRS$")>;
+ def : InstRW<[WriteSequence<[SwiftWriteP2FourCycle, SwiftWriteP0TwoCycle]>],
+ (instregex "VMOVSR$", "VSETLN")>;
+ def : InstRW<[SwiftWriteP2ThreeCycle, SwiftWriteP2FourCycle],
+ (instregex "VMOVRR(D|S)$")>;
+ def : InstRW<[SwiftWriteP2FourCycle], (instregex "VMOVDRR$")>;
+ def : InstRW<[WriteSequence<[SwiftWriteP2FourCycle, SwiftWriteP1TwoCycle]>,
+ WriteSequence<[SwiftWrite1Cycle, SwiftWriteP2FourCycle,
+ SwiftWriteP1TwoCycle]>],
+ (instregex "VMOVSRR$")>;
+ def : InstRW<[WriteSequence<[SwiftWriteP1TwoCycle, SwiftWriteP2ThreeCycle]>],
+ (instregex "VGETLN(u|i)")>;
+ def : InstRW<[WriteSequence<[SwiftWriteP1TwoCycle, SwiftWriteP2ThreeCycle,
+ SwiftWriteP01OneCycle]>],
+ (instregex "VGETLNs")>;
+
+ // 4.2.38 Advanced SIMD and VFP, Move FPSCR
+ // Serializing instructions.
+ def SwiftWaitP0For15Cy : SchedWriteRes<[SwiftUnitP0]> {
+ let Latency = 15;
+ let ResourceCycles = [15];
+ }
+ def SwiftWaitP1For15Cy : SchedWriteRes<[SwiftUnitP1]> {
+ let Latency = 15;
+ let ResourceCycles = [15];
+ }
+ def SwiftWaitP2For15Cy : SchedWriteRes<[SwiftUnitP2]> {
+ let Latency = 15;
+ let ResourceCycles = [15];
+ }
+ def : InstRW<[SwiftWaitP0For15Cy, SwiftWaitP1For15Cy, SwiftWaitP2For15Cy],
+ (instregex "VMRS")>;
+ def : InstRW<[SwiftWaitP0For15Cy, SwiftWaitP1For15Cy, SwiftWaitP2For15Cy],
+ (instregex "VMSR")>;
+ // Not serializing.
+ def : InstRW<[SwiftWriteP0TwoCycle], (instregex "FMSTAT")>;
+
+ // 4.2.39 Advanced SIMD and VFP, Load Single Element
+ def : InstRW<[SwiftWriteLM4Cy], (instregex "VLDRD$", "VLDRS$")>;
+
+ // 4.2.40 Advanced SIMD and VFP, Store Single Element
+ def : InstRW<[SwiftWriteLM4Cy], (instregex "VSTRD$", "VSTRS$")>;
+
+ // 4.2.41 Advanced SIMD and VFP, Load Multiple
+ // 4.2.42 Advanced SIMD and VFP, Store Multiple
+
+ // Resource requirement for permuting, just reserves the resources.
+ foreach Num = 1-28 in {
+ def SwiftVLDMPerm#Num : SchedWriteRes<[SwiftUnitP1]> {
+ let Latency = 0;
+ let NumMicroOps = Num;
+ let ResourceCycles = [Num];
+ }
+ }
+
+ // Pre RA pseudos - load/store to a Q register as a D register pair.
+ def : InstRW<[SwiftWriteLM4Cy], (instregex "VLDMQIA$", "VSTMQIA$")>;
+
+ // Post RA not modelled accurately. We assume that register use of width 64
+ // bit maps to a D register, 128 maps to a Q register. Not all different kinds
+ // are accurately represented.
+ def SwiftWriteVLDM : SchedWriteVariant<[
+ // Load of one S register.
+ SchedVar<SwiftLMAddr1Pred, [SwiftWriteLM4Cy]>,
+ // Load of one D register.
+ SchedVar<SwiftLMAddr2Pred, [SwiftWriteLM4Cy, SwiftWriteLM4CyNo]>,
+ // Load of 3 S register.
+ SchedVar<SwiftLMAddr3Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM13CyNo, SwiftWriteP01OneCycle,
+ SwiftVLDMPerm3]>,
+ // Load of a Q register (not neccessarily true). We should not be mapping to
+ // 4 S registers, either.
+ SchedVar<SwiftLMAddr4Pred, [SwiftWriteLM4Cy, SwiftWriteLM4CyNo,
+ SwiftWriteLM4CyNo, SwiftWriteLM4CyNo]>,
+ // Load of 5 S registers.
+ SchedVar<SwiftLMAddr5Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM13CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteLM17CyNo, SwiftWriteP01OneCycle,
+ SwiftVLDMPerm5]>,
+ // Load of 3 D registers. (Must also be able to handle s register list -
+ // though, not accurate)
+ SchedVar<SwiftLMAddr6Pred, [SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM10Cy, SwiftWriteLM14CyNo,
+ SwiftWriteLM14CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm5]>,
+ // Load of 7 S registers.
+ SchedVar<SwiftLMAddr7Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14CyNo,
+ SwiftWriteLM17CyNo, SwiftWriteLM18CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteP01OneCycle,
+ SwiftVLDMPerm7]>,
+ // Load of two Q registers.
+ SchedVar<SwiftLMAddr8Pred, [SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm2]>,
+ // Load of 9 S registers.
+ SchedVar<SwiftLMAddr9Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14CyNo,
+ SwiftWriteLM17CyNo, SwiftWriteLM18CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM25CyNo, SwiftWriteP01OneCycle,
+ SwiftVLDMPerm9]>,
+ // Load of 5 D registers.
+ SchedVar<SwiftLMAddr10Pred,[SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM10Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM14CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteLM14CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteLM14CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm5]>,
+ // Inaccurate: reuse describtion from 9 S registers.
+ SchedVar<SwiftLMAddr11Pred,[SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14CyNo,
+ SwiftWriteLM17CyNo, SwiftWriteLM18CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM25CyNo, SwiftWriteP01OneCycle,
+ SwiftVLDMPerm9]>,
+ // Load of three Q registers.
+ SchedVar<SwiftLMAddr12Pred,[SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM11Cy,
+ SwiftWriteLM11CyNo, SwiftWriteLM11CyNo,
+ SwiftWriteLM11CyNo, SwiftWriteLM11CyNo,
+ SwiftWriteLM11CyNo, SwiftWriteLM11CyNo,
+ SwiftWriteLM11CyNo, SwiftWriteLM11CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm3]>,
+ // Inaccurate: reuse describtion from 9 S registers.
+ SchedVar<SwiftLMAddr13Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14CyNo,
+ SwiftWriteLM17CyNo, SwiftWriteLM18CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM25CyNo, SwiftWriteP01OneCycle,
+ SwiftVLDMPerm9]>,
+ // Load of 7 D registers inaccurate.
+ SchedVar<SwiftLMAddr14Pred,[SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM10Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM14Cy, SwiftWriteLM14CyNo,
+ SwiftWriteLM14CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteLM14CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteLM14CyNo, SwiftWriteLM14CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm7]>,
+ SchedVar<SwiftLMAddr15Pred,[SwiftWriteLM9Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM17Cy, SwiftWriteLM18CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM21CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM25CyNo, SwiftWriteP01OneCycle,
+ SwiftVLDMPerm9]>,
+ // Load of 4 Q registers.
+ SchedVar<SwiftLMAddr16Pred,[SwiftWriteLM7Cy, SwiftWriteLM10Cy,
+ SwiftWriteLM11Cy, SwiftWriteLM14Cy,
+ SwiftWriteLM15Cy, SwiftWriteLM18CyNo,
+ SwiftWriteLM19CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM19CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM19CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM19CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteLM19CyNo, SwiftWriteLM22CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm4]>,
+ // Unknow number of registers, just use resources for two registers.
+ SchedVar<NoSchedPred, [SwiftWriteLM7Cy, SwiftWriteLM8Cy,
+ SwiftWriteLM13Cy, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteLM13CyNo, SwiftWriteLM13CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm2]>
+ ]> { let Variadic = 1; }
+
+ def : InstRW<[SwiftWriteVLDM], (instregex "VLDM[SD](IA|DB)$")>;
+
+ def : InstRW<[SwiftWriteP01OneCycle2x, SwiftWriteVLDM],
+ (instregex "VLDM[SD](IA|DB)_UPD$")>;
+
+ def SwiftWriteVSTM : SchedWriteVariant<[
+ // One S register.
+ SchedVar<SwiftLMAddr1Pred, [SwiftWriteSTM1]>,
+ // One D register.
+ SchedVar<SwiftLMAddr2Pred, [SwiftWriteSTM1]>,
+ // Three S registers.
+ SchedVar<SwiftLMAddr3Pred, [SwiftWriteSTM4]>,
+ // Assume one Q register.
+ SchedVar<SwiftLMAddr4Pred, [SwiftWriteSTM1]>,
+ SchedVar<SwiftLMAddr5Pred, [SwiftWriteSTM6]>,
+ // Assume three D registers.
+ SchedVar<SwiftLMAddr6Pred, [SwiftWriteSTM4]>,
+ SchedVar<SwiftLMAddr7Pred, [SwiftWriteSTM8]>,
+ // Assume two Q registers.
+ SchedVar<SwiftLMAddr8Pred, [SwiftWriteSTM3]>,
+ SchedVar<SwiftLMAddr9Pred, [SwiftWriteSTM10]>,
+ // Assume 5 D registers.
+ SchedVar<SwiftLMAddr10Pred, [SwiftWriteSTM6]>,
+ SchedVar<SwiftLMAddr11Pred, [SwiftWriteSTM12]>,
+ // Asume three Q registers.
+ SchedVar<SwiftLMAddr12Pred, [SwiftWriteSTM4]>,
+ SchedVar<SwiftLMAddr13Pred, [SwiftWriteSTM14]>,
+ // Assume 7 D registers.
+ SchedVar<SwiftLMAddr14Pred, [SwiftWriteSTM8]>,
+ SchedVar<SwiftLMAddr15Pred, [SwiftWriteSTM16]>,
+ // Assume four Q registers.
+ SchedVar<SwiftLMAddr16Pred, [SwiftWriteSTM5]>,
+ // Asumme two Q registers.
+ SchedVar<NoSchedPred, [SwiftWriteSTM3]>
+ ]> { let Variadic = 1; }
+
+ def : InstRW<[SwiftWriteVSTM], (instregex "VSTM[SD](IA|DB)$")>;
+
+ def : InstRW<[SwiftWriteP01OneCycle2x, SwiftWriteVSTM],
+ (instregex "VSTM[SD](IA|DB)_UPD")>;
+
+ // 4.2.43 Advanced SIMD, Element or Structure Load and Store
+ def SwiftWrite2xP2FourCy : SchedWriteRes<[SwiftUnitP2]> {
+ let Latency = 4;
+ let ResourceCycles = [2];
+ }
+ def SwiftWrite3xP2FourCy : SchedWriteRes<[SwiftUnitP2]> {
+ let Latency = 4;
+ let ResourceCycles = [3];
+ }
+ foreach Num = 1-2 in {
+ def SwiftExt#Num#xP0 : SchedWriteRes<[SwiftUnitP0]> {
+ let Latency = 0;
+ let NumMicroOps = Num;
+ let ResourceCycles = [Num];
+ }
+ }
+ // VLDx
+ // Multiple structures.
+ // Single element structure loads.
+ // We assume aligned.
+ // Single/two register.
+ def : InstRW<[SwiftWriteLM4Cy], (instregex "VLD1(d|q)(8|16|32|64)$")>;
+ def : InstRW<[SwiftWriteLM4Cy, SwiftWriteP01OneCycle],
+ (instregex "VLD1(d|q)(8|16|32|64)wb")>;
+ // Three register.
+ def : InstRW<[SwiftWrite3xP2FourCy],
+ (instregex "VLD1(d|q)(8|16|32|64)T$", "VLD1d64TPseudo")>;
+ def : InstRW<[SwiftWrite3xP2FourCy, SwiftWriteP01OneCycle],
+ (instregex "VLD1(d|q)(8|16|32|64)Twb")>;
+ /// Four Register.
+ def : InstRW<[SwiftWrite2xP2FourCy],
+ (instregex "VLD1(d|q)(8|16|32|64)Q$", "VLD1d64QPseudo")>;
+ def : InstRW<[SwiftWrite2xP2FourCy, SwiftWriteP01OneCycle],
+ (instregex "VLD1(d|q)(8|16|32|64)Qwb")>;
+ // Two element structure loads.
+ // Two/four register.
+ def : InstRW<[SwiftWriteLM9Cy, SwiftExt2xP0, SwiftVLDMPerm2],
+ (instregex "VLD2(d|q|b)(8|16|32)$", "VLD2q(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteLM9Cy, SwiftWriteP01OneCycle, SwiftExt2xP0,
+ SwiftVLDMPerm2],
+ (instregex "VLD2(d|q|b)(8|16|32)wb", "VLD2q(8|16|32)PseudoWB")>;
+ // Three element structure.
+ def : InstRW<[SwiftWriteLM9Cy, SwiftWriteLM9CyNo, SwiftWriteLM9CyNo,
+ SwiftVLDMPerm3, SwiftWrite3xP2FourCy],
+ (instregex "VLD3(d|q)(8|16|32)$")>;
+ def : InstRW<[SwiftWriteLM9Cy, SwiftVLDMPerm3, SwiftWrite3xP2FourCy],
+ (instregex "VLD3(d|q)(8|16|32)(oddP|P)seudo$")>;
+
+ def : InstRW<[SwiftWriteLM9Cy, SwiftWriteLM9CyNo, SwiftWriteLM9CyNo,
+ SwiftWriteP01OneCycle, SwiftVLDMPerm3, SwiftWrite3xP2FourCy],
+ (instregex "VLD3(d|q)(8|16|32)_UPD$")>;
+ def : InstRW<[SwiftWriteLM9Cy, SwiftWriteP01OneCycle, SwiftVLDMPerm3,
+ SwiftWrite3xP2FourCy],
+ (instregex "VLD3(d|q)(8|16|32)(oddP|P)seudo_UPD")>;
+ // Four element structure loads.
+ def : InstRW<[SwiftWriteLM11Cy, SwiftWriteLM11Cy, SwiftWriteLM11Cy,
+ SwiftWriteLM11Cy, SwiftExt2xP0, SwiftVLDMPerm4,
+ SwiftWrite3xP2FourCy],
+ (instregex "VLD4(d|q)(8|16|32)$")>;
+ def : InstRW<[SwiftWriteLM11Cy, SwiftExt2xP0, SwiftVLDMPerm4,
+ SwiftWrite3xP2FourCy],
+ (instregex "VLD4(d|q)(8|16|32)(oddP|P)seudo$")>;
+ def : InstRW<[SwiftWriteLM11Cy, SwiftWriteLM11Cy, SwiftWriteLM11Cy,
+ SwiftWriteLM11Cy, SwiftWriteP01OneCycle, SwiftExt2xP0,
+ SwiftVLDMPerm4, SwiftWrite3xP2FourCy],
+ (instregex "VLD4(d|q)(8|16|32)_UPD")>;
+ def : InstRW<[SwiftWriteLM11Cy, SwiftWriteP01OneCycle, SwiftExt2xP0,
+ SwiftVLDMPerm4, SwiftWrite3xP2FourCy],
+ (instregex "VLD4(d|q)(8|16|32)(oddP|P)seudo_UPD")>;
+
+ // Single all/lane loads.
+ // One element structure.
+ def : InstRW<[SwiftWriteLM6Cy, SwiftVLDMPerm2],
+ (instregex "VLD1(LN|DUP)(d|q)(8|16|32)$", "VLD1(LN|DUP)(d|q)(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteLM6Cy, SwiftWriteP01OneCycle, SwiftVLDMPerm2],
+ (instregex "VLD1(LN|DUP)(d|q)(8|16|32)(wb|_UPD)",
+ "VLD1LNq(8|16|32)Pseudo_UPD")>;
+ // Two element structure.
+ def : InstRW<[SwiftWriteLM6Cy, SwiftWriteLM6Cy, SwiftExt1xP0, SwiftVLDMPerm2],
+ (instregex "VLD2(DUP|LN)(d|q)(8|16|32|8x2|16x2|32x2)$",
+ "VLD2LN(d|q)(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteLM6Cy, SwiftWriteLM6Cy, SwiftWriteP01OneCycle,
+ SwiftExt1xP0, SwiftVLDMPerm2],
+ (instregex "VLD2LN(d|q)(8|16|32)_UPD$")>;
+ def : InstRW<[SwiftWriteLM6Cy, SwiftWriteP01OneCycle, SwiftWriteLM6Cy,
+ SwiftExt1xP0, SwiftVLDMPerm2],
+ (instregex "VLD2DUPd(8|16|32|8x2|16x2|32x2)wb")>;
+ def : InstRW<[SwiftWriteLM6Cy, SwiftWriteP01OneCycle, SwiftWriteLM6Cy,
+ SwiftExt1xP0, SwiftVLDMPerm2],
+ (instregex "VLD2LN(d|q)(8|16|32)Pseudo_UPD")>;
+ // Three element structure.
+ def : InstRW<[SwiftWriteLM7Cy, SwiftWriteLM8Cy, SwiftWriteLM8Cy, SwiftExt1xP0,
+ SwiftVLDMPerm3],
+ (instregex "VLD3(DUP|LN)(d|q)(8|16|32)$",
+ "VLD3(LN|DUP)(d|q)(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteLM7Cy, SwiftWriteLM8Cy, SwiftWriteLM8Cy,
+ SwiftWriteP01OneCycle, SwiftExt1xP0, SwiftVLDMPerm3],
+ (instregex "VLD3(LN|DUP)(d|q)(8|16|32)_UPD")>;
+ def : InstRW<[SwiftWriteLM7Cy, SwiftWriteP01OneCycle, SwiftWriteLM8Cy,
+ SwiftWriteLM8Cy, SwiftExt1xP0, SwiftVLDMPerm3],
+ (instregex "VLD3(LN|DUP)(d|q)(8|16|32)Pseudo_UPD")>;
+ // Four element struture.
+ def : InstRW<[SwiftWriteLM8Cy, SwiftWriteLM9Cy, SwiftWriteLM10CyNo,
+ SwiftWriteLM10CyNo, SwiftExt1xP0, SwiftVLDMPerm5],
+ (instregex "VLD4(LN|DUP)(d|q)(8|16|32)$",
+ "VLD4(LN|DUP)(d|q)(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteLM8Cy, SwiftWriteLM9Cy, SwiftWriteLM10CyNo,
+ SwiftWriteLM10CyNo, SwiftWriteP01OneCycle, SwiftExt1xP0,
+ SwiftVLDMPerm5],
+ (instregex "VLD4(DUP|LN)(d|q)(8|16|32)_UPD")>;
+ def : InstRW<[SwiftWriteLM8Cy, SwiftWriteP01OneCycle, SwiftWriteLM9Cy,
+ SwiftWriteLM10CyNo, SwiftWriteLM10CyNo, SwiftExt1xP0,
+ SwiftVLDMPerm5],
+ (instregex "VLD4(DUP|LN)(d|q)(8|16|32)Pseudo_UPD")>;
+ // VSTx
+ // Multiple structures.
+ // Single element structure store.
+ def : InstRW<[SwiftWrite1xP2], (instregex "VST1d(8|16|32|64)$")>;
+ def : InstRW<[SwiftWrite2xP2], (instregex "VST1q(8|16|32|64)$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite1xP2],
+ (instregex "VST1d(8|16|32|64)wb")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite2xP2],
+ (instregex "VST1q(8|16|32|64)wb")>;
+ def : InstRW<[SwiftWrite3xP2],
+ (instregex "VST1d(8|16|32|64)T$", "VST1d64TPseudo$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite3xP2],
+ (instregex "VST1d(8|16|32|64)Twb", "VST1d64TPseudoWB")>;
+ def : InstRW<[SwiftWrite4xP2],
+ (instregex "VST1d(8|16|32|64)(Q|QPseudo)$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite4xP2],
+ (instregex "VST1d(8|16|32|64)(Qwb|QPseudoWB)")>;
+ // Two element structure store.
+ def : InstRW<[SwiftWrite1xP2, SwiftVLDMPerm1],
+ (instregex "VST2(d|b)(8|16|32)$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite1xP2, SwiftVLDMPerm1],
+ (instregex "VST2(b|d)(8|16|32)wb")>;
+ def : InstRW<[SwiftWrite2xP2, SwiftVLDMPerm2],
+ (instregex "VST2q(8|16|32)$", "VST2q(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWrite2xP2, SwiftVLDMPerm2],
+ (instregex "VST2q(8|16|32)wb", "VST2q(8|16|32)PseudoWB")>;
+ // Three element structure store.
+ def : InstRW<[SwiftWrite4xP2, SwiftVLDMPerm2],
+ (instregex "VST3(d|q)(8|16|32)$", "VST3(d|q)(8|16|32)(oddP|P)seudo$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite4xP2, SwiftVLDMPerm2],
+ (instregex "VST3(d|q)(8|16|32)_UPD",
+ "VST3(d|q)(8|16|32)(oddP|P)seudo_UPD$")>;
+ // Four element structure store.
+ def : InstRW<[SwiftWrite4xP2, SwiftVLDMPerm2],
+ (instregex "VST4(d|q)(8|16|32)$", "VST4(d|q)(8|16|32)(oddP|P)seudo$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite4xP2, SwiftVLDMPerm4],
+ (instregex "VST4(d|q)(8|16|32)_UPD",
+ "VST4(d|q)(8|16|32)(oddP|P)seudo_UPD$")>;
+ // Single/all lane store.
+ // One element structure.
+ def : InstRW<[SwiftWrite1xP2, SwiftVLDMPerm1],
+ (instregex "VST1LNd(8|16|32)$", "VST1LNq(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite1xP2, SwiftVLDMPerm1],
+ (instregex "VST1LNd(8|16|32)_UPD", "VST1LNq(8|16|32)Pseudo_UPD")>;
+ // Two element structure.
+ def : InstRW<[SwiftWrite1xP2, SwiftVLDMPerm2],
+ (instregex "VST2LN(d|q)(8|16|32)$", "VST2LN(d|q)(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite1xP2, SwiftVLDMPerm2],
+ (instregex "VST2LN(d|q)(8|16|32)_UPD",
+ "VST2LN(d|q)(8|16|32)Pseudo_UPD")>;
+ // Three element structure.
+ def : InstRW<[SwiftWrite4xP2, SwiftVLDMPerm2],
+ (instregex "VST3LN(d|q)(8|16|32)$", "VST3LN(d|q)(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite4xP2, SwiftVLDMPerm2],
+ (instregex "VST3LN(d|q)(8|16|32)_UPD",
+ "VST3LN(d|q)(8|16|32)Pseudo_UPD")>;
+ // Four element structure.
+ def : InstRW<[SwiftWrite2xP2, SwiftVLDMPerm2],
+ (instregex "VST4LN(d|q)(8|16|32)$", "VST4LN(d|q)(8|16|32)Pseudo$")>;
+ def : InstRW<[SwiftWriteP01OneCycle, SwiftWrite2xP2, SwiftVLDMPerm2],
+ (instregex "VST4LN(d|q)(8|16|32)_UPD",
+ "VST4LN(d|q)(8|16|32)Pseudo_UPD")>;
+
+ // 4.2.44 VFP, Divide and Square Root
+ def SwiftDiv17 : SchedWriteRes<[SwiftUnitP0, SwiftUnitDiv]> {
+ let NumMicroOps = 1;
+ let Latency = 17;
+ let ResourceCycles = [1, 15];
+ }
+ def SwiftDiv32 : SchedWriteRes<[SwiftUnitP0, SwiftUnitDiv]> {
+ let NumMicroOps = 1;
+ let Latency = 32;
+ let ResourceCycles = [1, 30];
+ }
+ def : InstRW<[SwiftDiv17], (instregex "VDIVS", "VSQRTS")>;
+ def : InstRW<[SwiftDiv32], (instregex "VDIVD", "VSQRTD")>;
+
+ // Not specified.
+ def : InstRW<[SwiftWriteP01OneCycle2x], (instregex "ABS")>;
+ // Preload.
+ def : WriteRes<WritePreLd, [SwiftUnitP2]> { let Latency = 0;
+ let ResourceCycles = [0];
+ }
+
}
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index 41a7e0c2c8a5..93add6ee33cf 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -26,7 +26,7 @@ ARMSelectionDAGInfo::~ARMSelectionDAGInfo() {
}
SDValue
-ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
@@ -140,7 +140,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
// GNU library uses (ptr, value, size)
// See RTABI section 4.3.4
SDValue ARMSelectionDAGInfo::
-EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
+EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.h b/lib/Target/ARM/ARMSelectionDAGInfo.h
index 6419a737295a..56c9375855dd 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.h
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.h
@@ -45,7 +45,7 @@ public:
~ARMSelectionDAGInfo();
virtual
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
@@ -55,7 +55,7 @@ public:
// Adjust parameters for memset, see RTABI section 4.3.4
virtual
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align,
diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp
index 8653c462f06b..a11629852ad7 100644
--- a/lib/Target/ARM/ARMSubtarget.cpp
+++ b/lib/Target/ARM/ARMSubtarget.cpp
@@ -32,20 +32,53 @@ ReserveR9("arm-reserve-r9", cl::Hidden,
cl::desc("Reserve R9, making it unavailable as GPR"));
static cl::opt<bool>
-DarwinUseMOVT("arm-darwin-use-movt", cl::init(true), cl::Hidden);
+ArmUseMOVT("arm-use-movt", cl::init(true), cl::Hidden);
static cl::opt<bool>
UseFusedMulOps("arm-use-mulops",
cl::init(true), cl::Hidden);
-static cl::opt<bool>
-StrictAlign("arm-strict-align", cl::Hidden,
- cl::desc("Disallow all unaligned memory accesses"));
+enum AlignMode {
+ DefaultAlign,
+ StrictAlign,
+ NoStrictAlign
+};
+
+static cl::opt<AlignMode>
+Align(cl::desc("Load/store alignment support"),
+ cl::Hidden, cl::init(DefaultAlign),
+ cl::values(
+ clEnumValN(DefaultAlign, "arm-default-align",
+ "Generate unaligned accesses only on hardware/OS "
+ "combinations that are known to support them"),
+ clEnumValN(StrictAlign, "arm-strict-align",
+ "Disallow all unaligned memory accesses"),
+ clEnumValN(NoStrictAlign, "arm-no-strict-align",
+ "Allow unaligned memory accesses"),
+ clEnumValEnd));
+
+enum ITMode {
+ DefaultIT,
+ RestrictedIT,
+ NoRestrictedIT
+};
+
+static cl::opt<ITMode>
+IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT),
+ cl::ZeroOrMore,
+ cl::values(clEnumValN(DefaultIT, "arm-default-it",
+ "Generate IT block based on arch"),
+ clEnumValN(RestrictedIT, "arm-restrict-it",
+ "Disallow deprecated IT based on ARMv8"),
+ clEnumValN(NoRestrictedIT, "arm-no-restrict-it",
+ "Allow IT blocks based on ARMv7"),
+ clEnumValEnd));
ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, const TargetOptions &Options)
: ARMGenSubtargetInfo(TT, CPU, FS)
, ARMProcFamily(Others)
+ , ARMProcClass(None)
, stackAlignment(4)
, CPUString(CPU)
, TargetTriple(TT)
@@ -60,11 +93,14 @@ void ARMSubtarget::initializeEnvironment() {
HasV5TOps = false;
HasV5TEOps = false;
HasV6Ops = false;
+ HasV6MOps = false;
HasV6T2Ops = false;
HasV7Ops = false;
+ HasV8Ops = false;
HasVFPv2 = false;
HasVFPv3 = false;
HasVFPv4 = false;
+ HasFPARMv8 = false;
HasNEON = false;
UseNEONForSinglePrecisionFP = false;
UseMulOps = UseFusedMulOps;
@@ -73,7 +109,6 @@ void ARMSubtarget::initializeEnvironment() {
SlowFPBrcc = false;
InThumbMode = false;
HasThumb2 = false;
- IsMClass = false;
NoARM = false;
PostRAScheduler = false;
IsR9Reserved = ReserveR9;
@@ -90,8 +125,12 @@ void ARMSubtarget::initializeEnvironment() {
AvoidMOVsShifterOperand = false;
HasRAS = false;
HasMPExtension = false;
+ HasVirtualization = false;
FPOnlySP = false;
+ HasPerfMon = false;
HasTrustZone = false;
+ HasCrypto = false;
+ HasCRC = false;
AllowsUnalignedMem = false;
Thumb2DSP = false;
UseNaClTrap = false;
@@ -115,8 +154,13 @@ void ARMSubtarget::resetSubtargetFeatures(const MachineFunction *MF) {
}
void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
- if (CPUString.empty())
- CPUString = "generic";
+ if (CPUString.empty()) {
+ if (isTargetIOS() && TargetTriple.getArchName().endswith("v7s"))
+ // Default to the Swift CPU when targeting armv7s/thumbv7s.
+ CPUString = "swift";
+ else
+ CPUString = "generic";
+ }
// Insert the architecture feature derived from the target triple into the
// feature string. This is important for setting features that are implied
@@ -134,7 +178,7 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// Thumb2 implies at least V6T2. FIXME: Fix tests to explicitly specify a
// ARM version or CPU and then remove this.
if (!HasV6T2Ops && hasThumb2())
- HasV4TOps = HasV5TOps = HasV5TEOps = HasV6Ops = HasV6T2Ops = true;
+ HasV4TOps = HasV5TOps = HasV5TEOps = HasV6Ops = HasV6MOps = HasV6T2Ops = true;
// Keep a pointer to static instruction cost data for the specified CPU.
SchedModel = getSchedModelForCPU(CPUString);
@@ -151,21 +195,56 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
if (isAAPCS_ABI())
stackAlignment = 8;
- if (!isTargetIOS())
- UseMovt = hasV6T2Ops();
- else {
+ UseMovt = hasV6T2Ops() && ArmUseMOVT;
+
+ if (!isTargetIOS()) {
+ IsR9Reserved = ReserveR9;
+ } else {
IsR9Reserved = ReserveR9 | !HasV6Ops;
- UseMovt = DarwinUseMOVT && hasV6T2Ops();
SupportsTailCall = !getTargetTriple().isOSVersionLT(5, 0);
}
if (!isThumb() || hasThumb2())
PostRAScheduler = true;
- // v6+ may or may not support unaligned mem access depending on the system
- // configuration.
- if (!StrictAlign && hasV6Ops() && isTargetDarwin())
- AllowsUnalignedMem = true;
+ switch (Align) {
+ case DefaultAlign:
+ // Assume pre-ARMv6 doesn't support unaligned accesses.
+ //
+ // ARMv6 may or may not support unaligned accesses depending on the
+ // SCTLR.U bit, which is architecture-specific. We assume ARMv6
+ // Darwin targets support unaligned accesses, and others don't.
+ //
+ // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
+ // which raises an alignment fault on unaligned accesses. Linux
+ // defaults this bit to 0 and handles it as a system-wide (not
+ // per-process) setting. It is therefore safe to assume that ARMv7+
+ // Linux targets support unaligned accesses. The same goes for NaCl.
+ //
+ // The above behavior is consistent with GCC.
+ AllowsUnalignedMem = (
+ (hasV7Ops() && (isTargetLinux() || isTargetNaCl())) ||
+ (hasV6Ops() && isTargetDarwin()));
+ break;
+ case StrictAlign:
+ AllowsUnalignedMem = false;
+ break;
+ case NoStrictAlign:
+ AllowsUnalignedMem = true;
+ break;
+ }
+
+ switch (IT) {
+ case DefaultIT:
+ RestrictIT = hasV8Ops() ? true : false;
+ break;
+ case RestrictedIT:
+ RestrictIT = true;
+ break;
+ case NoRestrictedIT:
+ RestrictIT = false;
+ break;
+ }
// NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default.
uint64_t Bits = getFeatureBits();
@@ -231,12 +310,15 @@ unsigned ARMSubtarget::getMispredictionPenalty() const {
return SchedModel->MispredictPenalty;
}
+bool ARMSubtarget::hasSinCos() const {
+ return getTargetTriple().getOS() == Triple::IOS &&
+ !getTargetTriple().isOSVersionLT(7, 0);
+}
+
bool ARMSubtarget::enablePostRAScheduler(
CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
- CriticalPathRCs.clear();
- CriticalPathRCs.push_back(&ARM::GPRRegClass);
+ Mode = TargetSubtargetInfo::ANTIDEP_NONE;
return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
}
diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h
index 038eb76ae1d2..5276901bbb92 100644
--- a/lib/Target/ARM/ARMSubtarget.h
+++ b/lib/Target/ARM/ARMSubtarget.h
@@ -31,26 +31,36 @@ class TargetOptions;
class ARMSubtarget : public ARMGenSubtargetInfo {
protected:
enum ARMProcFamilyEnum {
- Others, CortexA5, CortexA8, CortexA9, CortexA15, CortexR5, Swift
+ Others, CortexA5, CortexA8, CortexA9, CortexA15, CortexR5, Swift, CortexA53, CortexA57
+ };
+ enum ARMProcClassEnum {
+ None, AClass, RClass, MClass
};
/// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
ARMProcFamilyEnum ARMProcFamily;
- /// HasV4TOps, HasV5TOps, HasV5TEOps, HasV6Ops, HasV6T2Ops, HasV7Ops -
+ /// ARMProcClass - ARM processor class: None, AClass, RClass or MClass.
+ ARMProcClassEnum ARMProcClass;
+
+ /// HasV4TOps, HasV5TOps, HasV5TEOps,
+ /// HasV6Ops, HasV6MOps, HasV6T2Ops, HasV7Ops, HasV8Ops -
/// Specify whether target support specific ARM ISA variants.
bool HasV4TOps;
bool HasV5TOps;
bool HasV5TEOps;
bool HasV6Ops;
+ bool HasV6MOps;
bool HasV6T2Ops;
bool HasV7Ops;
+ bool HasV8Ops;
- /// HasVFPv2, HasVFPv3, HasVFPv4, HasNEON - Specify what
+ /// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what
/// floating point ISAs are supported.
bool HasVFPv2;
bool HasVFPv3;
bool HasVFPv4;
+ bool HasFPARMv8;
bool HasNEON;
/// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
@@ -79,10 +89,6 @@ protected:
/// HasThumb2 - True if Thumb2 instructions are supported.
bool HasThumb2;
- /// IsMClass - True if the subtarget belongs to the 'M' profile of CPUs -
- /// v6m, v7m for example.
- bool IsMClass;
-
/// NoARM - True if subtarget does not support ARM mode execution.
bool NoARM;
@@ -144,18 +150,37 @@ protected:
/// extension (ARMv7 only).
bool HasMPExtension;
+ /// HasVirtualization - True if the subtarget supports the Virtualization
+ /// extension.
+ bool HasVirtualization;
+
/// FPOnlySP - If true, the floating point unit only supports single
/// precision.
bool FPOnlySP;
+ /// If true, the processor supports the Performance Monitor Extensions. These
+ /// include a generic cycle-counter as well as more fine-grained (often
+ /// implementation-specific) events.
+ bool HasPerfMon;
+
/// HasTrustZone - if true, processor supports TrustZone security extensions
bool HasTrustZone;
+ /// HasCrypto - if true, processor supports Cryptography extensions
+ bool HasCrypto;
+
+ /// HasCRC - if true, processor supports CRC instructions
+ bool HasCRC;
+
/// AllowsUnalignedMem - If true, the subtarget allows unaligned memory
/// accesses for some types. For details, see
/// ARMTargetLowering::allowsUnalignedMemoryAccesses().
bool AllowsUnalignedMem;
+ /// RestrictIT - If true, the subtarget disallows generation of deprecated IT
+ /// blocks to conform to ARMv8 rule.
+ bool RestrictIT;
+
/// Thumb2DSP - If true, the subtarget supports the v7 DSP (saturating arith
/// and such) instructions in Thumb2 code.
bool Thumb2DSP;
@@ -187,10 +212,6 @@ protected:
public:
enum {
- isELF, isDarwin
- } TargetType;
-
- enum {
ARM_ABI_APCS,
ARM_ABI_AAPCS // ARM EABI
} TargetABI;
@@ -224,8 +245,10 @@ public:
bool hasV5TOps() const { return HasV5TOps; }
bool hasV5TEOps() const { return HasV5TEOps; }
bool hasV6Ops() const { return HasV6Ops; }
+ bool hasV6MOps() const { return HasV6MOps; }
bool hasV6T2Ops() const { return HasV6T2Ops; }
bool hasV7Ops() const { return HasV7Ops; }
+ bool hasV8Ops() const { return HasV8Ops; }
bool isCortexA5() const { return ARMProcFamily == CortexA5; }
bool isCortexA8() const { return ARMProcFamily == CortexA8; }
@@ -241,7 +264,11 @@ public:
bool hasVFP2() const { return HasVFPv2; }
bool hasVFP3() const { return HasVFPv3; }
bool hasVFP4() const { return HasVFPv4; }
+ bool hasFPARMv8() const { return HasFPARMv8; }
bool hasNEON() const { return HasNEON; }
+ bool hasCrypto() const { return HasCrypto; }
+ bool hasCRC() const { return HasCRC; }
+ bool hasVirtualization() const { return HasVirtualization; }
bool useNEONForSinglePrecisionFP() const {
return hasNEON() && UseNEONForSinglePrecisionFP; }
@@ -249,11 +276,15 @@ public:
bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
bool hasT2ExtractPack() const { return HasT2ExtractPack; }
bool hasDataBarrier() const { return HasDataBarrier; }
+ bool hasAnyDataBarrier() const {
+ return HasDataBarrier || (hasV6Ops() && !isThumb());
+ }
bool useMulOps() const { return UseMulOps; }
bool useFPVMLx() const { return !SlowFPVMLx; }
bool hasVMLxForwarding() const { return HasVMLxForwarding; }
bool isFPBrccSlow() const { return SlowFPBrcc; }
bool isFPOnlySP() const { return FPOnlySP; }
+ bool hasPerfMon() const { return HasPerfMon; }
bool hasTrustZone() const { return HasTrustZone; }
bool prefers32BitThumb() const { return Pref32BitThumb; }
bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
@@ -268,12 +299,19 @@ public:
const Triple &getTargetTriple() const { return TargetTriple; }
- bool isTargetIOS() const { return TargetTriple.getOS() == Triple::IOS; }
+ bool isTargetIOS() const { return TargetTriple.isiOS(); }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
- bool isTargetNaCl() const {
- return TargetTriple.getOS() == Triple::NaCl;
- }
+ bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
+ bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isTargetELF() const { return !isTargetDarwin(); }
+ // ARM EABI is the bare-metal EABI described in ARM ABI documents and
+ // can be accessed via -target arm-none-eabi. This is NOT GNUEABI.
+ // FIXME: Add a flag for bare-metal for that target and set Triple::EABI
+ // even for GNUEABI, so we can make a distinction here and still conform to
+ // the EABI on GNU (and Android) mode. This requires change in Clang, too.
+ bool isTargetAEABI() const {
+ return TargetTriple.getEnvironment() == Triple::EABI;
+ }
bool isAPCS_ABI() const { return TargetABI == ARM_ABI_APCS; }
bool isAAPCS_ABI() const { return TargetABI == ARM_ABI_AAPCS; }
@@ -282,8 +320,9 @@ public:
bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
bool isThumb2() const { return InThumbMode && HasThumb2; }
bool hasThumb2() const { return HasThumb2; }
- bool isMClass() const { return IsMClass; }
- bool isARClass() const { return !IsMClass; }
+ bool isMClass() const { return ARMProcClass == MClass; }
+ bool isRClass() const { return ARMProcClass == RClass; }
+ bool isAClass() const { return ARMProcClass == AClass; }
bool isR9Reserved() const { return IsR9Reserved; }
@@ -292,9 +331,15 @@ public:
bool allowsUnalignedMem() const { return AllowsUnalignedMem; }
+ bool restrictIT() const { return RestrictIT; }
+
const std::string & getCPUString() const { return CPUString; }
unsigned getMispredictionPenalty() const;
+
+ /// This function returns true if the target has sincos() routine in its
+ /// compiler runtime or math libraries.
+ bool hasSinCos() const;
/// enablePostRAScheduler - True at 'More' optimization.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp
index 42c7d2c437e0..c2bf78877875 100644
--- a/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/lib/Target/ARM/ARMTargetMachine.cpp
@@ -60,7 +60,7 @@ void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// Add first the target-independent BasicTTI pass, then our ARM pass. This
// allows the ARM pass to delegate to the target independent layer when
// appropriate.
- PM.add(createBasicTargetTransformInfoPass(getTargetLowering()));
+ PM.add(createBasicTargetTransformInfoPass(this));
PM.add(createARMTargetTransformInfoPass(this));
}
@@ -85,6 +85,7 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget) {
+ initAsmInfo();
if (!Subtarget.hasARMOps())
report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
"support ARM mode execution!");
@@ -117,6 +118,7 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
FrameLowering(Subtarget.hasThumb2()
? new ARMFrameLowering(Subtarget)
: (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) {
+ initAsmInfo();
}
namespace {
@@ -148,7 +150,7 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
bool ARMPassConfig::addPreISel() {
if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge)
- addPass(createGlobalMergePass(TM->getTargetLowering()));
+ addPass(createGlobalMergePass(TM));
return false;
}
@@ -167,7 +169,7 @@ bool ARMPassConfig::addPreRegAlloc() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
addPass(createARMLoadStoreOptimizationPass(true));
- if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isLikeA9())
+ if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
addPass(createMLxExpansionPass());
// Since the A15SDOptimizer pass can insert VDUP instructions, it can only be
// enabled when NEON is available.
@@ -194,8 +196,13 @@ bool ARMPassConfig::addPreSched2() {
addPass(createARMExpandPseudoPass());
if (getOptLevel() != CodeGenOpt::None) {
- if (!getARMSubtarget().isThumb1Only())
+ if (!getARMSubtarget().isThumb1Only()) {
+ // in v8, IfConversion depends on Thumb instruction widths
+ if (getARMSubtarget().restrictIT() &&
+ !getARMSubtarget().prefers32BitThumb())
+ addPass(createThumb2SizeReductionPass());
addPass(&IfConverterID);
+ }
}
if (getARMSubtarget().isThumb2())
addPass(createThumb2ITBlockPass());
diff --git a/lib/Target/ARM/ARMTargetObjectFile.cpp b/lib/Target/ARM/ARMTargetObjectFile.cpp
index dfdf6ab356a3..7ec71b20c64d 100644
--- a/lib/Target/ARM/ARMTargetObjectFile.cpp
+++ b/lib/Target/ARM/ARMTargetObjectFile.cpp
@@ -47,7 +47,7 @@ getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
MCStreamer &Streamer) const {
assert(Encoding == DW_EH_PE_absptr && "Can handle absptr encoding only");
- return MCSymbolRefExpr::Create(Mang->getSymbol(GV),
+ return MCSymbolRefExpr::Create(getSymbol(*Mang, GV),
MCSymbolRefExpr::VK_ARM_TARGET2,
getContext());
}
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 53ece668c3f5..6bbb38facc24 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -124,11 +124,14 @@ public:
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const;
- unsigned getAddressComputationCost(Type *Val) const;
+ unsigned getAddressComputationCost(Type *Val, bool IsComplex) const;
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
OperandValueKind Op1Info = OK_AnyValue,
OperandValueKind Op2Info = OK_AnyValue) const;
+
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const;
/// @}
};
@@ -182,7 +185,7 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
assert(ISD && "Invalid opcode");
// Single to/from double precision conversions.
- static const CostTblEntry<MVT> NEONFltDblTbl[] = {
+ static const CostTblEntry<MVT::SimpleValueType> NEONFltDblTbl[] = {
// Vector fptrunc/fpext conversions.
{ ISD::FP_ROUND, MVT::v2f64, 2 },
{ ISD::FP_EXTEND, MVT::v2f32, 2 },
@@ -192,8 +195,7 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
ISD == ISD::FP_EXTEND)) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
- int Idx = CostTableLookup<MVT>(NEONFltDblTbl, array_lengthof(NEONFltDblTbl),
- ISD, LT.second);
+ int Idx = CostTableLookup(NEONFltDblTbl, ISD, LT.second);
if (Idx != -1)
return LT.first * NEONFltDblTbl[Idx].Cost;
}
@@ -207,7 +209,8 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
// Some arithmetic, load and store operations have specific instructions
// to cast up/down their types automatically at no extra cost.
// TODO: Get these tables to know at least what the related operations are.
- static const TypeConversionCostTblEntry<MVT> NEONVectorConversionTbl[] = {
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ NEONVectorConversionTbl[] = {
{ ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
{ ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
{ ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
@@ -283,15 +286,15 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
};
if (SrcTy.isVector() && ST->hasNEON()) {
- int Idx = ConvertCostTableLookup<MVT>(NEONVectorConversionTbl,
- array_lengthof(NEONVectorConversionTbl),
- ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT());
+ int Idx = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
+ DstTy.getSimpleVT(), SrcTy.getSimpleVT());
if (Idx != -1)
return NEONVectorConversionTbl[Idx].Cost;
}
// Scalar float to integer conversions.
- static const TypeConversionCostTblEntry<MVT> NEONFloatConversionTbl[] = {
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ NEONFloatConversionTbl[] = {
{ ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
{ ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
{ ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
@@ -314,16 +317,15 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
{ ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
};
if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
- int Idx = ConvertCostTableLookup<MVT>(NEONFloatConversionTbl,
- array_lengthof(NEONFloatConversionTbl),
- ISD, DstTy.getSimpleVT(),
- SrcTy.getSimpleVT());
+ int Idx = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
+ DstTy.getSimpleVT(), SrcTy.getSimpleVT());
if (Idx != -1)
return NEONFloatConversionTbl[Idx].Cost;
}
// Scalar integer to float conversions.
- static const TypeConversionCostTblEntry<MVT> NEONIntegerConversionTbl[] = {
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ NEONIntegerConversionTbl[] = {
{ ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
{ ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
{ ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
@@ -347,16 +349,15 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
};
if (SrcTy.isInteger() && ST->hasNEON()) {
- int Idx = ConvertCostTableLookup<MVT>(NEONIntegerConversionTbl,
- array_lengthof(NEONIntegerConversionTbl),
- ISD, DstTy.getSimpleVT(),
- SrcTy.getSimpleVT());
+ int Idx = ConvertCostTableLookup(NEONIntegerConversionTbl, ISD,
+ DstTy.getSimpleVT(), SrcTy.getSimpleVT());
if (Idx != -1)
return NEONIntegerConversionTbl[Idx].Cost;
}
// Scalar integer conversion costs.
- static const TypeConversionCostTblEntry<MVT> ARMIntegerConversionTbl[] = {
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ ARMIntegerConversionTbl[] = {
// i16 -> i64 requires two dependent operations.
{ ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
@@ -368,11 +369,8 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
};
if (SrcTy.isInteger()) {
- int Idx =
- ConvertCostTableLookup<MVT>(ARMIntegerConversionTbl,
- array_lengthof(ARMIntegerConversionTbl),
- ISD, DstTy.getSimpleVT(),
- SrcTy.getSimpleVT());
+ int Idx = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
+ DstTy.getSimpleVT(), SrcTy.getSimpleVT());
if (Idx != -1)
return ARMIntegerConversionTbl[Idx].Cost;
}
@@ -400,7 +398,8 @@ unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
// On NEON a a vector select gets lowered to vbsl.
if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
// Lowering of some vector selects is currently far from perfect.
- static const TypeConversionCostTblEntry<MVT> NEONVectorSelectTbl[] = {
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ NEONVectorSelectTbl[] = {
{ ISD::SELECT, MVT::v16i1, MVT::v16i16, 2*16 + 1 + 3*1 + 4*1 },
{ ISD::SELECT, MVT::v8i1, MVT::v8i32, 4*8 + 1*3 + 1*4 + 1*2 },
{ ISD::SELECT, MVT::v16i1, MVT::v16i32, 4*16 + 1*6 + 1*8 + 1*4 },
@@ -411,12 +410,13 @@ unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
EVT SelCondTy = TLI->getValueType(CondTy);
EVT SelValTy = TLI->getValueType(ValTy);
- int Idx = ConvertCostTableLookup<MVT>(NEONVectorSelectTbl,
- array_lengthof(NEONVectorSelectTbl),
- ISD, SelCondTy.getSimpleVT(),
- SelValTy.getSimpleVT());
- if (Idx != -1)
- return NEONVectorSelectTbl[Idx].Cost;
+ if (SelCondTy.isSimple() && SelValTy.isSimple()) {
+ int Idx = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
+ SelCondTy.getSimpleVT(),
+ SelValTy.getSimpleVT());
+ if (Idx != -1)
+ return NEONVectorSelectTbl[Idx].Cost;
+ }
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
return LT.first;
@@ -425,7 +425,16 @@ unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
-unsigned ARMTTI::getAddressComputationCost(Type *Ty) const {
+unsigned ARMTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
+ // Address computations in vectorized code with non-consecutive addresses will
+ // likely result in more instructions compared to scalar code where the
+ // computation can more often be merged into the index mode. The resulting
+ // extra micro-ops can significantly decrease throughput.
+ unsigned NumVectorInstToHideOverhead = 10;
+
+ if (Ty->isVectorTy() && IsComplex)
+ return NumVectorInstToHideOverhead;
+
// In many cases the address computation is not merged into the instruction
// addressing mode.
return 1;
@@ -437,7 +446,7 @@ unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
if (Kind != SK_Reverse)
return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
- static const CostTblEntry<MVT> NEONShuffleTbl[] = {
+ static const CostTblEntry<MVT::SimpleValueType> NEONShuffleTbl[] = {
// Reverse shuffle cost one instruction if we are shuffling within a double
// word (vrev) or two if we shuffle a quad word (vrev, vext).
{ ISD::VECTOR_SHUFFLE, MVT::v2i32, 1 },
@@ -453,8 +462,7 @@ unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
- int Idx = CostTableLookup<MVT>(NEONShuffleTbl, array_lengthof(NEONShuffleTbl),
- ISD::VECTOR_SHUFFLE, LT.second);
+ int Idx = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
if (Idx == -1)
return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
@@ -469,7 +477,7 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
const unsigned FunctionCallDivCost = 20;
const unsigned ReciprocalDivCost = 10;
- static const CostTblEntry<MVT> CostTbl[] = {
+ static const CostTblEntry<MVT::SimpleValueType> CostTbl[] = {
// Division.
// These costs are somewhat random. Choose a cost of 20 to indicate that
// vectorizing devision (added function call) is going to be very expensive.
@@ -513,14 +521,37 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
int Idx = -1;
if (ST->hasNEON())
- Idx = CostTableLookup<MVT>(CostTbl, array_lengthof(CostTbl), ISDOpcode,
- LT.second);
+ Idx = CostTableLookup(CostTbl, ISDOpcode, LT.second);
if (Idx != -1)
return LT.first * CostTbl[Idx].Cost;
-
- return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
- Op2Info);
+ unsigned Cost =
+ TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
+
+ // This is somewhat of a hack. The problem that we are facing is that SROA
+ // creates a sequence of shift, and, or instructions to construct values.
+ // These sequences are recognized by the ISel and have zero-cost. Not so for
+ // the vectorized code. Because we have support for v2i64 but not i64 those
+ // sequences look particularily beneficial to vectorize.
+ // To work around this we increase the cost of v2i64 operations to make them
+ // seem less beneficial.
+ if (LT.second == MVT::v2i64 &&
+ Op2Info == TargetTransformInfo::OK_UniformConstantValue)
+ Cost += 4;
+
+ return Cost;
}
+unsigned ARMTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const {
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
+
+ if (Src->isVectorTy() && Alignment != 16 &&
+ Src->getVectorElementType()->isDoubleTy()) {
+ // Unaligned loads/stores are extremely inefficient.
+ // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
+ return LT.first * 4;
+ }
+ return LT.first;
+}
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 1dd2953b29b9..e3f9e0dc609a 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -7,6 +7,9 @@
//
//===----------------------------------------------------------------------===//
+#include "ARMBuildAttrs.h"
+#include "ARMFPUName.h"
+#include "ARMFeatures.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
@@ -24,6 +27,7 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
@@ -47,11 +51,33 @@ enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
class ARMAsmParser : public MCTargetAsmParser {
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ const MCInstrInfo &MII;
const MCRegisterInfo *MRI;
+ ARMTargetStreamer &getTargetStreamer() {
+ MCTargetStreamer &TS = getParser().getStreamer().getTargetStreamer();
+ return static_cast<ARMTargetStreamer &>(TS);
+ }
+
+ // Unwind directives state
+ SMLoc FnStartLoc;
+ SMLoc CantUnwindLoc;
+ SMLoc PersonalityLoc;
+ SMLoc HandlerDataLoc;
+ int FPReg;
+ void resetUnwindDirectiveParserState() {
+ FnStartLoc = SMLoc();
+ CantUnwindLoc = SMLoc();
+ PersonalityLoc = SMLoc();
+ HandlerDataLoc = SMLoc();
+ FPReg = -1;
+ }
+
// Map of register aliases registers via the .req directive.
StringMap<unsigned> RegisterReqs;
+ bool NextSymbolIsThumb;
+
struct {
ARMCC::CondCodes Cond; // Condition for IT block.
unsigned Mask:4; // Condition mask for instructions.
@@ -76,7 +102,7 @@ class ARMAsmParser : public MCTargetAsmParser {
if (!inITBlock()) return;
// Move to the next instruction in the IT block, if there is one. If not,
// mark the block as done.
- unsigned TZ = CountTrailingZeros_32(ITState.Mask);
+ unsigned TZ = countTrailingZeros(ITState.Mask);
if (++ITState.CurPosition == 5 - TZ)
ITState.CurPosition = ~0U; // Done with the IT block after this.
}
@@ -113,11 +139,22 @@ class ARMAsmParser : public MCTargetAsmParser {
bool parseDirectiveUnreq(SMLoc L);
bool parseDirectiveArch(SMLoc L);
bool parseDirectiveEabiAttr(SMLoc L);
+ bool parseDirectiveCPU(SMLoc L);
+ bool parseDirectiveFPU(SMLoc L);
+ bool parseDirectiveFnStart(SMLoc L);
+ bool parseDirectiveFnEnd(SMLoc L);
+ bool parseDirectiveCantUnwind(SMLoc L);
+ bool parseDirectivePersonality(SMLoc L);
+ bool parseDirectiveHandlerData(SMLoc L);
+ bool parseDirectiveSetFP(SMLoc L);
+ bool parseDirectivePad(SMLoc L);
+ bool parseDirectiveRegSave(SMLoc L, bool IsVector);
StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
bool &CarrySetting, unsigned &ProcessorIMod,
StringRef &ITMask);
- void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
+ void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
+ bool &CanAcceptCarrySet,
bool &CanAcceptPredicationCode);
bool isThumb() const {
@@ -130,12 +167,25 @@ class ARMAsmParser : public MCTargetAsmParser {
bool isThumbTwo() const {
return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
}
+ bool hasThumb() const {
+ return STI.getFeatureBits() & ARM::HasV4TOps;
+ }
bool hasV6Ops() const {
return STI.getFeatureBits() & ARM::HasV6Ops;
}
+ bool hasV6MOps() const {
+ return STI.getFeatureBits() & ARM::HasV6MOps;
+ }
bool hasV7Ops() const {
return STI.getFeatureBits() & ARM::HasV7Ops;
}
+ bool hasV8Ops() const {
+ return STI.getFeatureBits() & ARM::HasV8Ops;
+ }
+ bool hasARM() const {
+ return !(STI.getFeatureBits() & ARM::FeatureNoARM);
+ }
+
void SwitchMode() {
unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
setAvailableFeatures(FB);
@@ -161,6 +211,8 @@ class ARMAsmParser : public MCTargetAsmParser {
SmallVectorImpl<MCParsedAsmOperand*>&);
OperandMatchResultTy parseMemBarrierOptOperand(
SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy parseInstSyncBarrierOptOperand(
+ SmallVectorImpl<MCParsedAsmOperand*>&);
OperandMatchResultTy parseProcIFlagsOperand(
SmallVectorImpl<MCParsedAsmOperand*>&);
OperandMatchResultTy parseMSRMaskOperand(
@@ -185,51 +237,19 @@ class ARMAsmParser : public MCTargetAsmParser {
SMLoc &EndLoc);
// Asm Match Converter Methods
- void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtStWriteBackRegAddrMode2(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtStWriteBackRegAddrMode3(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtLdExtTWriteBackImm(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtLdExtTWriteBackReg(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtStExtTWriteBackImm(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtStExtTWriteBackReg(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
void cvtThumbMultiply(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtVLDwbFixed(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtVLDwbRegister(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtVSTwbFixed(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtVSTwbRegister(MCInst &Inst,
+ void cvtThumbBranches(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
+
bool validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
bool processInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
bool shouldOmitCCOutOperand(StringRef Mnemonic,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
+ bool shouldOmitPredicateOperand(StringRef Mnemonic,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands);
public:
enum ARMMatchResultTy {
Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
@@ -241,12 +261,13 @@ public:
};
- ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
- : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
+ ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), FPReg(-1) {
MCAsmParserExtension::Initialize(_Parser);
// Cache the MCRegisterInfo.
- MRI = &getContext().getRegisterInfo();
+ MRI = getContext().getRegisterInfo();
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
@@ -254,12 +275,7 @@ public:
// Not in an ITBlock to start with.
ITState.CurPosition = ~0U;
- // Set ELF header flags.
- // FIXME: This should eventually end up somewhere else where more
- // intelligent flag decisions can be made. For now we are just maintaining
- // the statu/parseDirects quo for ARM and setting EF_ARM_EABI_VER5 as the default.
- if (MCELFStreamer *MES = dyn_cast<MCELFStreamer>(&Parser.getStreamer()))
- MES->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5);
+ NextSymbolIsThumb = false;
}
// Implementation of the MCTargetAsmParser interface:
@@ -276,6 +292,8 @@ public:
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCStreamer &Out, unsigned &ErrorInfo,
bool MatchingInlineAsm);
+ void onLabelParsed(MCSymbol *Symbol);
+
};
} // end anonymous namespace
@@ -293,6 +311,7 @@ class ARMOperand : public MCParsedAsmOperand {
k_CoprocOption,
k_Immediate,
k_MemBarrierOpt,
+ k_InstSyncBarrierOpt,
k_Memory,
k_PostIndexRegister,
k_MSRMask,
@@ -336,6 +355,10 @@ class ARMOperand : public MCParsedAsmOperand {
ARM_MB::MemBOpt Val;
};
+ struct ISBOptOp {
+ ARM_ISB::InstSyncBOpt Val;
+ };
+
struct IFlagsOp {
ARM_PROC::IFlags Val;
};
@@ -422,6 +445,7 @@ class ARMOperand : public MCParsedAsmOperand {
struct CopOp Cop;
struct CoprocOptionOp CoprocOption;
struct MBOptOp MBOpt;
+ struct ISBOptOp ISBOpt;
struct ITMaskOp ITMask;
struct IFlagsOp IFlags;
struct MMaskOp MMask;
@@ -482,6 +506,8 @@ public:
case k_MemBarrierOpt:
MBOpt = o.MBOpt;
break;
+ case k_InstSyncBarrierOpt:
+ ISBOpt = o.ISBOpt;
case k_Memory:
Memory = o.Memory;
break;
@@ -564,6 +590,11 @@ public:
return MBOpt.Val;
}
+ ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
+ assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
+ return ISBOpt.Val;
+ }
+
ARM_PROC::IFlags getProcIFlags() const {
assert(Kind == k_ProcIFlags && "Invalid access!");
return IFlags.Val;
@@ -582,6 +613,56 @@ public:
bool isITMask() const { return Kind == k_ITCondMask; }
bool isITCondCode() const { return Kind == k_CondCode; }
bool isImm() const { return Kind == k_Immediate; }
+ // checks whether this operand is an unsigned offset which fits is a field
+ // of specified width and scaled by a specific number of bits
+ template<unsigned width, unsigned scale>
+ bool isUnsignedOffset() const {
+ if (!isImm()) return false;
+ if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Align = 1LL << scale;
+ int64_t Max = Align * ((1LL << width) - 1);
+ return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
+ }
+ return false;
+ }
+ // checks whether this operand is an signed offset which fits is a field
+ // of specified width and scaled by a specific number of bits
+ template<unsigned width, unsigned scale>
+ bool isSignedOffset() const {
+ if (!isImm()) return false;
+ if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Align = 1LL << scale;
+ int64_t Max = Align * ((1LL << (width-1)) - 1);
+ int64_t Min = -Align * (1LL << (width-1));
+ return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
+ }
+ return false;
+ }
+
+ // checks whether this operand is a memory operand computed as an offset
+ // applied to PC. the offset may have 8 bits of magnitude and is represented
+ // with two bits of shift. textually it may be either [pc, #imm], #imm or
+ // relocable expression...
+ bool isThumbMemPC() const {
+ int64_t Val = 0;
+ if (isImm()) {
+ if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
+ if (!CE) return false;
+ Val = CE->getValue();
+ }
+ else if (isMem()) {
+ if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
+ if(Memory.BaseRegNum != ARM::PC) return false;
+ Val = Memory.OffsetImm->getValue();
+ }
+ else return false;
+ return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
+ }
bool isFPImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
@@ -610,13 +691,6 @@ public:
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
}
- bool isImm0_4() const {
- if (!isImm()) return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
- int64_t Value = CE->getValue();
- return Value >= 0 && Value < 5;
- }
bool isImm0_1020s4() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
@@ -639,6 +713,13 @@ public:
// explicitly exclude zero. we want that to use the normal 0_508 version.
return ((Value & 3) == 0) && Value > 0 && Value <= 508;
}
+ bool isImm0_239() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < 240;
+ }
bool isImm0_255() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
@@ -800,6 +881,15 @@ public:
int64_t Value = CE->getValue();
return Value >= 0 && Value < 65536;
}
+ bool isImm256_65535Expr() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // If it's not a constant expression, it'll generate a fixup and be
+ // handled later.
+ if (!CE) return true;
+ int64_t Value = CE->getValue();
+ return Value >= 256 && Value < 65536;
+ }
bool isImm0_65535Expr() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
@@ -879,7 +969,8 @@ public:
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
- return ARM_AM::getT2SOImmVal(~Value) != -1;
+ return ARM_AM::getT2SOImmVal(Value) == -1 &&
+ ARM_AM::getT2SOImmVal(~Value) != -1;
}
bool isT2SOImmNeg() const {
if (!isImm()) return false;
@@ -903,6 +994,7 @@ public:
bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
bool isToken() const { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
+ bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
bool isMem() const { return Kind == k_Memory; }
bool isShifterImm() const { return Kind == k_ShifterImmediate; }
bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
@@ -949,7 +1041,7 @@ public:
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
- return Val > -4096 && Val < 4096;
+ return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
}
bool isAddrMode3() const {
// If we have an immediate that's not a constant, treat it as a label
@@ -1659,6 +1751,37 @@ public:
Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
}
+ void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
+ if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2));
+ return;
+ }
+
+ const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
+ assert(SR && "Unknown value type!");
+ Inst.addOperand(MCOperand::CreateExpr(SR));
+ }
+
+ void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (isImm()) {
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (CE) {
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ return;
+ }
+
+ const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
+ assert(SR && "Unknown value type!");
+ Inst.addOperand(MCOperand::CreateExpr(SR));
+ return;
+ }
+
+ assert(isMem() && "Unknown value type!");
+ assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
+ Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue()));
+ }
+
void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The operand is actually a so_imm, but we have its bitwise
@@ -1680,6 +1803,11 @@ public:
Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
}
+ void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt())));
+ }
+
void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
@@ -1688,8 +1816,6 @@ public:
void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
int32_t Imm = Memory.OffsetImm->getValue();
- // FIXME: Handle #-0
- if (Imm == INT32_MIN) Imm = 0;
Inst.addOperand(MCOperand::CreateImm(Imm));
}
@@ -2228,21 +2354,24 @@ public:
}
static ARMOperand *
- CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
+ CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned> > &Regs,
SMLoc StartLoc, SMLoc EndLoc) {
+ assert (Regs.size() > 0 && "RegList contains no registers?");
KindTy Kind = k_RegisterList;
- if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
+ if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
Kind = k_DPRRegisterList;
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
- contains(Regs.front().first))
+ contains(Regs.front().second))
Kind = k_SPRRegisterList;
+ // Sort based on the register encoding values.
+ array_pod_sort(Regs.begin(), Regs.end());
+
ARMOperand *Op = new ARMOperand(Kind);
- for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
+ for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
I = Regs.begin(), E = Regs.end(); I != E; ++I)
- Op->Registers.push_back(I->first);
- array_pod_sort(Op->Registers.begin(), Op->Registers.end());
+ Op->Registers.push_back(I->second);
Op->StartLoc = StartLoc;
Op->EndLoc = EndLoc;
return Op;
@@ -2345,6 +2474,15 @@ public:
return Op;
}
+ static ARMOperand *CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,
+ SMLoc S) {
+ ARMOperand *Op = new ARMOperand(k_InstSyncBarrierOpt);
+ Op->ISBOpt.Val = Opt;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
ARMOperand *Op = new ARMOperand(k_ProcIFlags);
Op->IFlags.Val = IFlags;
@@ -2397,7 +2535,10 @@ void ARMOperand::print(raw_ostream &OS) const {
getImm()->print(OS);
break;
case k_MemBarrierOpt:
- OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
+ OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
+ break;
+ case k_InstSyncBarrierOpt:
+ OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
break;
case k_Memory:
OS << "<memory "
@@ -2731,8 +2872,9 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
return -1;
switch (Name[2]) {
default: return -1;
- case '0': return 10;
- case '1': return 11;
+ // p10 and p11 are invalid for coproc instructions (reserved for FP/NEON)
+ case '0': return CoprocOp == 'p'? -1: 10;
+ case '1': return CoprocOp == 'p'? -1: 11;
case '2': return 12;
case '3': return 13;
case '4': return 14;
@@ -2910,12 +3052,14 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// The reglist instructions have at most 16 registers, so reserve
// space for that many.
- SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
+ int EReg = 0;
+ SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
// Allow Q regs and just interpret them as the two D sub-registers.
if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
Reg = getDRegFromQReg(Reg);
- Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ EReg = MRI->getEncodingValue(Reg);
+ Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
++Reg;
}
const MCRegisterClass *RC;
@@ -2929,7 +3073,8 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return Error(RegLoc, "invalid register in register list");
// Store the register.
- Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ EReg = MRI->getEncodingValue(Reg);
+ Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
// This starts immediately after the first register token in the list,
// so we can see either a comma or a minus (range separator) as a legal
@@ -2959,7 +3104,8 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Add all the registers in the range to the register list.
while (Reg != EndReg) {
Reg = getNextRegister(Reg);
- Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ EReg = MRI->getEncodingValue(Reg);
+ Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
}
continue;
}
@@ -2992,14 +3138,15 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
continue;
}
// VFP register lists must also be contiguous.
- // It's OK to use the enumeration values directly here rather, as the
- // VFP register classes have the enum sorted properly.
if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
Reg != OldReg + 1)
return Error(RegLoc, "non-contiguous register range");
- Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
- if (isQReg)
- Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
+ EReg = MRI->getEncodingValue(Reg);
+ Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
+ if (isQReg) {
+ EReg = MRI->getEncodingValue(++Reg);
+ Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
+ }
}
if (Parser.getTok().isNot(AsmToken::RCurly))
@@ -3036,7 +3183,7 @@ parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
// There's an optional '#' token here. Normally there wouldn't be, but
// inline assemble puts one in, and it's friendly to accept that.
if (Parser.getTok().is(AsmToken::Hash))
- Parser.Lex(); // Eat the '#'
+ Parser.Lex(); // Eat '#' or '$'.
const MCExpr *LaneIndex;
SMLoc Loc = Parser.getTok().getLoc();
@@ -3334,18 +3481,27 @@ parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
.Case("sy", ARM_MB::SY)
.Case("st", ARM_MB::ST)
+ .Case("ld", ARM_MB::LD)
.Case("sh", ARM_MB::ISH)
.Case("ish", ARM_MB::ISH)
.Case("shst", ARM_MB::ISHST)
.Case("ishst", ARM_MB::ISHST)
+ .Case("ishld", ARM_MB::ISHLD)
.Case("nsh", ARM_MB::NSH)
.Case("un", ARM_MB::NSH)
.Case("nshst", ARM_MB::NSHST)
+ .Case("nshld", ARM_MB::NSHLD)
.Case("unst", ARM_MB::NSHST)
.Case("osh", ARM_MB::OSH)
.Case("oshst", ARM_MB::OSHST)
+ .Case("oshld", ARM_MB::OSHLD)
.Default(~0U);
+ // ishld, oshld, nshld and ld are only available from ARMv8.
+ if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
+ Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
+ Opt = ~0U;
+
if (Opt == ~0U)
return MatchOperand_NoMatch;
@@ -3354,7 +3510,7 @@ parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Tok.is(AsmToken::Dollar) ||
Tok.is(AsmToken::Integer)) {
if (Parser.getTok().isNot(AsmToken::Integer))
- Parser.Lex(); // Eat the '#'.
+ Parser.Lex(); // Eat '#' or '$'.
SMLoc Loc = Parser.getTok().getLoc();
const MCExpr *MemBarrierID;
@@ -3383,6 +3539,57 @@ parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
+/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+parseInstSyncBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ unsigned Opt;
+
+ if (Tok.is(AsmToken::Identifier)) {
+ StringRef OptStr = Tok.getString();
+
+ if (OptStr.equals_lower("sy"))
+ Opt = ARM_ISB::SY;
+ else
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat identifier token.
+ } else if (Tok.is(AsmToken::Hash) ||
+ Tok.is(AsmToken::Dollar) ||
+ Tok.is(AsmToken::Integer)) {
+ if (Parser.getTok().isNot(AsmToken::Integer))
+ Parser.Lex(); // Eat '#' or '$'.
+ SMLoc Loc = Parser.getTok().getLoc();
+
+ const MCExpr *ISBarrierID;
+ if (getParser().parseExpression(ISBarrierID)) {
+ Error(Loc, "illegal expression");
+ return MatchOperand_ParseFail;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
+ if (!CE) {
+ Error(Loc, "constant expression expected");
+ return MatchOperand_ParseFail;
+ }
+
+ int Val = CE->getValue();
+ if (Val & ~0xf) {
+ Error(Loc, "immediate value out of range");
+ return MatchOperand_ParseFail;
+ }
+
+ Opt = ARM_ISB::RESERVED_0 + Val;
+ } else
+ return MatchOperand_ParseFail;
+
+ Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
+ (ARM_ISB::InstSyncBOpt)Opt, S));
+ return MatchOperand_Success;
+}
+
+
/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -3602,7 +3809,7 @@ parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Error(S, "'be' or 'le' operand expected");
return MatchOperand_ParseFail;
}
- int Val = StringSwitch<int>(Tok.getString())
+ int Val = StringSwitch<int>(Tok.getString().lower())
.Case("be", 1)
.Case("le", 0)
.Default(-1);
@@ -3875,7 +4082,7 @@ parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Do immediates first, as we always parse those if we have a '#'.
if (Parser.getTok().is(AsmToken::Hash) ||
Parser.getTok().is(AsmToken::Dollar)) {
- Parser.Lex(); // Eat the '#'.
+ Parser.Lex(); // Eat '#' or '$'.
// Explicitly look for a '-', as we need to encode negative zero
// differently.
bool isNegative = Parser.getTok().is(AsmToken::Minus);
@@ -3926,260 +4133,9 @@ parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
-/// cvtT2LdrdPre - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtT2LdrdPre(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Rt, Rt2
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateReg(0));
- // addr
- ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtT2StrdPre - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtT2StrdPre(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateReg(0));
- // Rt, Rt2
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
- // addr
- ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
-
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
-
- ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtLdWriteBackRegAddrMode2(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
-
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
-
- ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
-
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
-
- ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-
-/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtStWriteBackRegAddrMode2(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtStWriteBackRegAddrMode3(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtLdExtTWriteBackImm(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Rt
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // addr
- ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
- // offset
- ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtLdExtTWriteBackReg(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Rt
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // addr
- ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
- // offset
- ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtStExtTWriteBackImm(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // Rt
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- // addr
- ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
- // offset
- ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtStExtTWriteBackReg(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // Rt
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- // addr
- ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
- // offset
- ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtLdrdPre - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtLdrdPre(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Rt, Rt2
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // addr
- ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtStrdPre - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtStrdPre(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // Rt, Rt2
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
- // addr
- ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
-void ARMAsmParser::
-cvtLdWriteBackRegAddrMode3(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-/// cvtThumbMultiply - Convert parsed operands to MCInst.
-/// Needed here because the Asm Gen Matcher can't handle properly tied operands
-/// when they refer multiple MIOperands inside a single one.
+/// Convert parsed operands to MCInst. Needed here because this instruction
+/// only has two register operands, but multiplication is commutative so
+/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
void ARMAsmParser::
cvtThumbMultiply(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -4198,59 +4154,62 @@ cvtThumbMultiply(MCInst &Inst,
}
void ARMAsmParser::
-cvtVLDwbFixed(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Vd
- ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // Vn
- ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
+cvtThumbBranches(MCInst &Inst,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ int CondOp = -1, ImmOp = -1;
+ switch(Inst.getOpcode()) {
+ case ARM::tB:
+ case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
-void ARMAsmParser::
-cvtVLDwbRegister(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Vd
- ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // Vn
- ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
- // Vm
- ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
+ case ARM::t2B:
+ case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
-void ARMAsmParser::
-cvtVSTwbFixed(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // Vn
- ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
- // Vt
- ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
-}
-
-void ARMAsmParser::
-cvtVSTwbRegister(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Create a writeback register dummy placeholder.
- Inst.addOperand(MCOperand::CreateImm(0));
- // Vn
- ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
- // Vm
- ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
- // Vt
- ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
- // pred
- ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
+ default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
+ }
+ // first decide whether or not the branch should be conditional
+ // by looking at it's location relative to an IT block
+ if(inITBlock()) {
+ // inside an IT block we cannot have any conditional branches. any
+ // such instructions needs to be converted to unconditional form
+ switch(Inst.getOpcode()) {
+ case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
+ case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
+ }
+ } else {
+ // outside IT blocks we can only have unconditional branches with AL
+ // condition code or conditional branches with non-AL condition code
+ unsigned Cond = static_cast<ARMOperand*>(Operands[CondOp])->getCondCode();
+ switch(Inst.getOpcode()) {
+ case ARM::tB:
+ case ARM::tBcc:
+ Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
+ break;
+ case ARM::t2B:
+ case ARM::t2Bcc:
+ Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
+ break;
+ }
+ }
+
+ // now decide on encoding size based on branch target range
+ switch(Inst.getOpcode()) {
+ // classify tB as either t2B or t1B based on range of immediate operand
+ case ARM::tB: {
+ ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
+ if(!op->isSignedOffset<11, 1>() && isThumbTwo())
+ Inst.setOpcode(ARM::t2B);
+ break;
+ }
+ // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
+ case ARM::tBcc: {
+ ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
+ if(!op->isSignedOffset<8, 1>() && isThumbTwo())
+ Inst.setOpcode(ARM::t2Bcc);
+ break;
+ }
+ }
+ ((ARMOperand*)Operands[ImmOp])->addImmOperands(Inst, 1);
+ ((ARMOperand*)Operands[CondOp])->addCondCodeOperands(Inst, 2);
}
/// Parse an ARM memory expression, return false if successful else return true
@@ -4354,7 +4313,7 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Parser.getTok().is(AsmToken::Dollar) ||
Parser.getTok().is(AsmToken::Integer)) {
if (Parser.getTok().isNot(AsmToken::Integer))
- Parser.Lex(); // Eat the '#'.
+ Parser.Lex(); // Eat '#' or '$'.
E = Parser.getTok().getLoc();
bool isNegative = getParser().getTok().is(AsmToken::Minus);
@@ -4536,7 +4495,7 @@ parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
TyOp->getToken() != ".f64"))
return MatchOperand_NoMatch;
- Parser.Lex(); // Eat the '#'.
+ Parser.Lex(); // Eat '#' or '$'.
// Handle negation, as that still comes through as a separate token.
bool isNegative = false;
@@ -4752,11 +4711,14 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
- Mnemonic == "vaclt" || Mnemonic == "vacle" ||
+ Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
- Mnemonic == "fmuls")
+ Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
+ Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
+ Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
+ Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel"))
return Mnemonic;
// First, split out any predication code. Ignore mnemonics we know aren't
@@ -4836,8 +4798,8 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
//
// FIXME: It would be nice to autogen this.
void ARMAsmParser::
-getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
- bool &CanAcceptPredicationCode) {
+getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
+ bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) {
if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
Mnemonic == "add" || Mnemonic == "adc" ||
@@ -4853,28 +4815,35 @@ getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
} else
CanAcceptCarrySet = false;
- if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
- Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
- Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
- Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
- Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
- (Mnemonic == "clrex" && !isThumb()) ||
- (Mnemonic == "nop" && isThumbOne()) ||
- ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
- Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
- Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
- ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
- !isThumb()) ||
- Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
+ if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
+ Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
+ Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic.startswith("crc32") ||
+ Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") ||
+ Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
+ Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
+ Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
+ Mnemonic == "vrintm" || Mnemonic.startswith("aes") ||
+ Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
+ (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
+ // These mnemonics are never predicable
CanAcceptPredicationCode = false;
+ } else if (!isThumb()) {
+ // Some instructions are only predicable in Thumb mode
+ CanAcceptPredicationCode
+ = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
+ Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
+ Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
+ Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
+ Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
+ Mnemonic != "stc2" && Mnemonic != "stc2l" &&
+ !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
+ } else if (isThumbOne()) {
+ if (hasV6MOps())
+ CanAcceptPredicationCode = Mnemonic != "movs";
+ else
+ CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
} else
CanAcceptPredicationCode = true;
-
- if (isThumb()) {
- if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
- Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
- CanAcceptPredicationCode = false;
- }
}
bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
@@ -4929,15 +4898,6 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
static_cast<ARMOperand*>(Operands[5])->isImm()) {
// Nest conditions rather than one big 'if' statement for readability.
//
- // If either register is a high reg, it's either one of the SP
- // variants (handled above) or a 32-bit encoding, so we just
- // check against T3. If the second register is the PC, this is an
- // alternate form of ADR, which uses encoding T4, so check for that too.
- if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
- !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
- static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
- static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
- return false;
// If both registers are low, we're in an IT block, and the immediate is
// in range, we should use encoding T1 instead, which has a cc_out.
if (inITBlock() &&
@@ -4945,6 +4905,11 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
static_cast<ARMOperand*>(Operands[5])->isImm0_7())
return false;
+ // Check against T3. If the second register is the PC, this is an
+ // alternate form of ADR, which uses encoding T4, so check for that too.
+ if (static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
+ static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
+ return false;
// Otherwise, we use encoding T4, which does not have a cc_out
// operand.
@@ -5007,6 +4972,26 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
return false;
}
+bool ARMAsmParser::shouldOmitPredicateOperand(
+ StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
+ unsigned RegIdx = 3;
+ if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
+ static_cast<ARMOperand *>(Operands[2])->getToken() == ".f32") {
+ if (static_cast<ARMOperand *>(Operands[3])->isToken() &&
+ static_cast<ARMOperand *>(Operands[3])->getToken() == ".f32")
+ RegIdx = 4;
+
+ if (static_cast<ARMOperand *>(Operands[RegIdx])->isReg() &&
+ (ARMMCRegisterClasses[ARM::DPRRegClassID]
+ .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()) ||
+ ARMMCRegisterClasses[ARM::QPRRegClassID]
+ .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg())))
+ return true;
+ }
+ return false;
+}
+
static bool isDataTypeToken(StringRef Tok) {
return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
@@ -5102,7 +5087,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// the matcher deal with finding the right instruction or generating an
// appropriate error.
bool CanAcceptCarrySet, CanAcceptPredicationCode;
- getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
+ getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
// If we had a carry-set on an instruction that can't do that, issue an
// error.
@@ -5153,7 +5138,17 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
continue;
- if (ExtraToken != ".n") {
+ // For for ARM mode generate an error if the .n qualifier is used.
+ if (ExtraToken == ".n" && !isThumb()) {
+ SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
+ return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
+ "arm mode");
+ }
+
+ // The .n qualifier is always discarded as that is what the tables
+ // and matcher expect. In ARM mode the .w qualifier has no effect,
+ // so discard it to avoid errors that can be caused by the matcher.
+ if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
}
@@ -5199,6 +5194,15 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
delete Op;
}
+ // Some instructions have the same mnemonic, but don't always
+ // have a predicate. Distinguish them here and delete the
+ // predicate if needed.
+ if (shouldOmitPredicateOperand(Mnemonic, Operands)) {
+ ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
+ Operands.erase(Operands.begin() + 1);
+ delete Op;
+ }
+
// ARM mode 'blx' need special handling, as the register operand version
// is predicable, but the label operand version is not. So, we can't rely
// on the Mnemonic based checking to correctly figure out when to put
@@ -5218,8 +5222,9 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// expressed as a GPRPair, so we have to manually merge them.
// FIXME: We would really like to be able to tablegen'erate this.
if (!isThumb() && Operands.size() > 4 &&
- (Mnemonic == "ldrexd" || Mnemonic == "strexd")) {
- bool isLoad = (Mnemonic == "ldrexd");
+ (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
+ Mnemonic == "stlexd")) {
+ bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
unsigned Idx = isLoad ? 2 : 3;
ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
@@ -5250,6 +5255,26 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
}
+ // FIXME: As said above, this is all a pretty gross hack. This instruction
+ // does not fit with other "subs" and tblgen.
+ // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
+ // so the Mnemonic is the original name "subs" and delete the predicate
+ // operand so it will match the table entry.
+ if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
+ static_cast<ARMOperand*>(Operands[3])->isReg() &&
+ static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::PC &&
+ static_cast<ARMOperand*>(Operands[4])->isReg() &&
+ static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::LR &&
+ static_cast<ARMOperand*>(Operands[5])->isImm()) {
+ ARMOperand *Op0 = static_cast<ARMOperand*>(Operands[0]);
+ Operands.erase(Operands.begin());
+ delete Op0;
+ Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc));
+
+ ARMOperand *Op1 = static_cast<ARMOperand*>(Operands[1]);
+ Operands.erase(Operands.begin() + 1);
+ delete Op1;
+ }
return false;
}
@@ -5283,45 +5308,44 @@ static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
return false;
}
-// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
-// the ARMInsts array) instead. Getting that here requires awkward
-// API changes, though. Better way?
-namespace llvm {
-extern const MCInstrDesc ARMInsts[];
-}
-static const MCInstrDesc &getInstDesc(unsigned Opcode) {
- return ARMInsts[Opcode];
+// Return true if instruction has the interesting property of being
+// allowed in IT blocks, but not being predicable.
+static bool instIsBreakpoint(const MCInst &Inst) {
+ return Inst.getOpcode() == ARM::tBKPT ||
+ Inst.getOpcode() == ARM::BKPT ||
+ Inst.getOpcode() == ARM::tHLT ||
+ Inst.getOpcode() == ARM::HLT;
+
}
// FIXME: We would really like to be able to tablegen'erate this.
bool ARMAsmParser::
validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
+ const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
SMLoc Loc = Operands[0]->getStartLoc();
+
// Check the IT block state first.
- // NOTE: BKPT instruction has the interesting property of being
- // allowed in IT blocks, but not being predicable. It just always
- // executes.
- if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
- Inst.getOpcode() != ARM::BKPT) {
- unsigned bit = 1;
+ // NOTE: BKPT and HLT instructions have the interesting property of being
+ // allowed in IT blocks, but not being predicable. They just always execute.
+ if (inITBlock() && !instIsBreakpoint(Inst)) {
+ unsigned Bit = 1;
if (ITState.FirstCond)
ITState.FirstCond = false;
else
- bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
+ Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
// The instruction must be predicable.
if (!MCID.isPredicable())
return Error(Loc, "instructions in IT block must be predicable");
unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
- unsigned ITCond = bit ? ITState.Cond :
+ unsigned ITCond = Bit ? ITState.Cond :
ARMCC::getOppositeCondition(ITState.Cond);
if (Cond != ITCond) {
// Find the condition code Operand to get its SMLoc information.
SMLoc CondLoc;
- for (unsigned i = 1; i < Operands.size(); ++i)
- if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
- CondLoc = Operands[i]->getStartLoc();
+ for (unsigned I = 1; I < Operands.size(); ++I)
+ if (static_cast<ARMOperand*>(Operands[I])->isCondCode())
+ CondLoc = Operands[I]->getStartLoc();
return Error(CondLoc, "incorrect condition in IT block; got '" +
StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
"', but expected '" +
@@ -5330,20 +5354,55 @@ validateInstruction(MCInst &Inst,
// Check for non-'al' condition codes outside of the IT block.
} else if (isThumbTwo() && MCID.isPredicable() &&
Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
- ARMCC::AL && Inst.getOpcode() != ARM::tB &&
- Inst.getOpcode() != ARM::t2B)
+ ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
+ Inst.getOpcode() != ARM::t2Bcc)
return Error(Loc, "predicated instructions must be in IT block");
- switch (Inst.getOpcode()) {
+ const unsigned Opcode = Inst.getOpcode();
+ switch (Opcode) {
case ARM::LDRD:
case ARM::LDRD_PRE:
case ARM::LDRD_POST: {
+ const unsigned RtReg = Inst.getOperand(0).getReg();
+
+ // Rt can't be R14.
+ if (RtReg == ARM::LR)
+ return Error(Operands[3]->getStartLoc(),
+ "Rt can't be R14");
+
+ const unsigned Rt = MRI->getEncodingValue(RtReg);
+ // Rt must be even-numbered.
+ if ((Rt & 1) == 1)
+ return Error(Operands[3]->getStartLoc(),
+ "Rt must be even-numbered");
+
// Rt2 must be Rt + 1.
- unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
- unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
+ const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
if (Rt2 != Rt + 1)
return Error(Operands[3]->getStartLoc(),
"destination operands must be sequential");
+
+ if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
+ const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
+ // For addressing modes with writeback, the base register needs to be
+ // different from the destination registers.
+ if (Rn == Rt || Rn == Rt2)
+ return Error(Operands[3]->getStartLoc(),
+ "base register needs to be different from destination "
+ "registers");
+ }
+
+ return false;
+ }
+ case ARM::t2LDRDi8:
+ case ARM::t2LDRD_PRE:
+ case ARM::t2LDRD_POST: {
+ // Rt2 must be different from Rt.
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
+ if (Rt2 == Rt)
+ return Error(Operands[3]->getStartLoc(),
+ "destination operands can't be identical");
return false;
}
case ARM::STRD: {
@@ -5367,50 +5426,77 @@ validateInstruction(MCInst &Inst,
}
case ARM::SBFX:
case ARM::UBFX: {
- // width must be in range [1, 32-lsb]
- unsigned lsb = Inst.getOperand(2).getImm();
- unsigned widthm1 = Inst.getOperand(3).getImm();
- if (widthm1 >= 32 - lsb)
+ // Width must be in range [1, 32-lsb].
+ unsigned LSB = Inst.getOperand(2).getImm();
+ unsigned Widthm1 = Inst.getOperand(3).getImm();
+ if (Widthm1 >= 32 - LSB)
return Error(Operands[5]->getStartLoc(),
"bitfield width must be in range [1,32-lsb]");
return false;
}
+ // Notionally handles ARM::tLDMIA_UPD too.
case ARM::tLDMIA: {
// If we're parsing Thumb2, the .w variant is available and handles
- // most cases that are normally illegal for a Thumb1 LDM
- // instruction. We'll make the transformation in processInstruction()
- // if necessary.
+ // most cases that are normally illegal for a Thumb1 LDM instruction.
+ // We'll make the transformation in processInstruction() if necessary.
//
// Thumb LDM instructions are writeback iff the base register is not
// in the register list.
unsigned Rn = Inst.getOperand(0).getReg();
- bool hasWritebackToken =
+ bool HasWritebackToken =
(static_cast<ARMOperand*>(Operands[3])->isToken() &&
static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
- bool listContainsBase;
- if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
- return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
+ bool ListContainsBase;
+ if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
+ return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
"registers must be in range r0-r7");
// If we should have writeback, then there should be a '!' token.
- if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
+ if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"writeback operator '!' expected");
// If we should not have writeback, there must not be a '!'. This is
// true even for the 32-bit wide encodings.
- if (listContainsBase && hasWritebackToken)
+ if (ListContainsBase && HasWritebackToken)
return Error(Operands[3]->getStartLoc(),
"writeback operator '!' not allowed when base register "
"in register list");
break;
}
- case ARM::t2LDMIA_UPD: {
+ case ARM::LDMIA_UPD:
+ case ARM::LDMDB_UPD:
+ case ARM::LDMIB_UPD:
+ case ARM::LDMDA_UPD:
+ // ARM variants loading and updating the same register are only officially
+ // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
+ if (!hasV7Ops())
+ break;
+ // Fallthrough
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2LDMDB_UPD:
+ case ARM::t2STMIA_UPD:
+ case ARM::t2STMDB_UPD: {
if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
- return Error(Operands[4]->getStartLoc(),
- "writeback operator '!' not allowed when base register "
- "in register list");
+ return Error(Operands.back()->getStartLoc(),
+ "writeback register not allowed in register list");
break;
}
+ case ARM::sysLDMIA_UPD:
+ case ARM::sysLDMDA_UPD:
+ case ARM::sysLDMDB_UPD:
+ case ARM::sysLDMIB_UPD:
+ if (!listContainsReg(Inst, 3, ARM::PC))
+ return Error(Operands[4]->getStartLoc(),
+ "writeback register only allowed on system LDM "
+ "if PC in register-list");
+ break;
+ case ARM::sysSTMIA_UPD:
+ case ARM::sysSTMDA_UPD:
+ case ARM::sysSTMDB_UPD:
+ case ARM::sysSTMIB_UPD:
+ return Error(Operands[2]->getStartLoc(),
+ "system STM cannot have writeback register");
+ break;
case ARM::tMUL: {
// The second source operand must be the same register as the destination
// operand.
@@ -5434,26 +5520,35 @@ validateInstruction(MCInst &Inst,
// so only issue a diagnostic for thumb1. The instructions will be
// switched to the t2 encodings in processInstruction() if necessary.
case ARM::tPOP: {
- bool listContainsBase;
- if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
+ bool ListContainsBase;
+ if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
!isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or pc");
break;
}
case ARM::tPUSH: {
- bool listContainsBase;
- if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
+ bool ListContainsBase;
+ if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
!isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or lr");
break;
}
case ARM::tSTMIA_UPD: {
- bool listContainsBase;
- if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
+ bool ListContainsBase, InvalidLowList;
+ InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
+ 0, ListContainsBase);
+ if (InvalidLowList && !isThumbTwo())
return Error(Operands[4]->getStartLoc(),
"registers must be in range r0-r7");
+
+ // This would be converted to a 32-bit stm, but that's not valid if the
+ // writeback register is in the list.
+ if (InvalidLowList && ListContainsBase)
+ return Error(Operands[4]->getStartLoc(),
+ "writeback operator '!' not allowed when base register "
+ "in register list");
break;
}
case ARM::tADDrSP: {
@@ -5466,6 +5561,28 @@ validateInstruction(MCInst &Inst,
}
break;
}
+ // Final range checking for Thumb unconditional branch instructions.
+ case ARM::tB:
+ if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<11, 1>())
+ return Error(Operands[2]->getStartLoc(), "branch target out of range");
+ break;
+ case ARM::t2B: {
+ int op = (Operands[2]->isImm()) ? 2 : 3;
+ if (!(static_cast<ARMOperand*>(Operands[op]))->isSignedOffset<24, 1>())
+ return Error(Operands[op]->getStartLoc(), "branch target out of range");
+ break;
+ }
+ // Final range checking for Thumb conditional branch instructions.
+ case ARM::tBcc:
+ if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<8, 1>())
+ return Error(Operands[2]->getStartLoc(), "branch target out of range");
+ break;
+ case ARM::t2Bcc: {
+ int Op = (Operands[2]->isImm()) ? 2 : 3;
+ if (!(static_cast<ARMOperand*>(Operands[Op]))->isSignedOffset<20, 1>())
+ return Error(Operands[Op]->getStartLoc(), "branch target out of range");
+ break;
+ }
}
return false;
@@ -5749,7 +5866,9 @@ processInstruction(MCInst &Inst,
case ARM::t2LDRpcrel:
// Select the narrow version if the immediate will fit.
if (Inst.getOperand(1).getImm() > 0 &&
- Inst.getOperand(1).getImm() <= 0xff)
+ Inst.getOperand(1).getImm() <= 0xff &&
+ !(static_cast<ARMOperand*>(Operands[2])->isToken() &&
+ static_cast<ARMOperand*>(Operands[2])->getToken() == ".w"))
Inst.setOpcode(ARM::tLDRpci);
else
Inst.setOpcode(ARM::t2LDRpci);
@@ -7398,11 +7517,10 @@ processInstruction(MCInst &Inst,
MCOperand &MO = Inst.getOperand(1);
unsigned Mask = MO.getImm();
unsigned OrigMask = Mask;
- unsigned TZ = CountTrailingZeros_32(Mask);
+ unsigned TZ = countTrailingZeros(Mask);
if ((Inst.getOperand(0).getImm() & 1) == 0) {
assert(Mask && TZ <= 3 && "illegal IT mask value!");
- for (unsigned i = 3; i != TZ; --i)
- Mask ^= 1 << i;
+ Mask ^= (0xE << TZ) & 0xF;
}
MO.setImm(Mask);
@@ -7503,7 +7621,7 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
// 16-bit thumb arithmetic instructions either require or preclude the 'S'
// suffix depending on whether they're in an IT block or not.
unsigned Opc = Inst.getOpcode();
- const MCInstrDesc &MCID = getInstDesc(Opc);
+ const MCInstrDesc &MCID = MII.get(Opc);
if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
assert(MCID.hasOptionalDef() &&
"optionally flag setting instruction missing optional def operand");
@@ -7564,12 +7682,22 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return true;
}
- // Some instructions need post-processing to, for example, tweak which
- // encoding is selected. Loop on it while changes happen so the
- // individual transformations can chain off each other. E.g.,
- // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
- while (processInstruction(Inst, Operands))
- ;
+ { // processInstruction() updates inITBlock state, we need to save it away
+ bool wasInITBlock = inITBlock();
+
+ // Some instructions need post-processing to, for example, tweak which
+ // encoding is selected. Loop on it while changes happen so the
+ // individual transformations can chain off each other. E.g.,
+ // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
+ while (processInstruction(Inst, Operands))
+ ;
+
+ // Only after the instruction is fully processed, we can validate it
+ if (wasInITBlock && hasV8Ops() && isThumb() &&
+ !isV8EligibleForIT(&Inst, 2)) {
+ Warning(IDLoc, "deprecated instruction in IT block");
+ }
+ }
// Only move forward at the very end so that everything in validate
// and process gets a consistent answer about whether we're in an IT
@@ -7622,15 +7750,15 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return Error(IDLoc, "instruction variant requires ARMv6 or later");
case Match_RequiresThumb2:
return Error(IDLoc, "instruction variant requires Thumb2");
- case Match_ImmRange0_4: {
+ case Match_ImmRange0_15: {
SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
- return Error(ErrorLoc, "immediate operand must be in the range [0,4]");
+ return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
}
- case Match_ImmRange0_15: {
+ case Match_ImmRange0_239: {
SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
- return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
+ return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
}
}
@@ -7658,6 +7786,28 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
return parseDirectiveArch(DirectiveID.getLoc());
else if (IDVal == ".eabi_attribute")
return parseDirectiveEabiAttr(DirectiveID.getLoc());
+ else if (IDVal == ".cpu")
+ return parseDirectiveCPU(DirectiveID.getLoc());
+ else if (IDVal == ".fpu")
+ return parseDirectiveFPU(DirectiveID.getLoc());
+ else if (IDVal == ".fnstart")
+ return parseDirectiveFnStart(DirectiveID.getLoc());
+ else if (IDVal == ".fnend")
+ return parseDirectiveFnEnd(DirectiveID.getLoc());
+ else if (IDVal == ".cantunwind")
+ return parseDirectiveCantUnwind(DirectiveID.getLoc());
+ else if (IDVal == ".personality")
+ return parseDirectivePersonality(DirectiveID.getLoc());
+ else if (IDVal == ".handlerdata")
+ return parseDirectiveHandlerData(DirectiveID.getLoc());
+ else if (IDVal == ".setfp")
+ return parseDirectiveSetFP(DirectiveID.getLoc());
+ else if (IDVal == ".pad")
+ return parseDirectivePad(DirectiveID.getLoc());
+ else if (IDVal == ".save")
+ return parseDirectiveRegSave(DirectiveID.getLoc(), false);
+ else if (IDVal == ".vsave")
+ return parseDirectiveRegSave(DirectiveID.getLoc(), true);
return true;
}
@@ -7693,6 +7843,9 @@ bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
return Error(L, "unexpected token in directive");
Parser.Lex();
+ if (!hasThumb())
+ return Error(L, "target does not support Thumb mode");
+
if (!isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
@@ -7706,19 +7859,27 @@ bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
return Error(L, "unexpected token in directive");
Parser.Lex();
+ if (!hasARM())
+ return Error(L, "target does not support ARM mode");
+
if (isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
return false;
}
+void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
+ if (NextSymbolIsThumb) {
+ getParser().getStreamer().EmitThumbFunc(Symbol);
+ NextSymbolIsThumb = false;
+ }
+}
+
/// parseDirectiveThumbFunc
/// ::= .thumbfunc symbol_name
bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
- const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
- bool isMachO = MAI.hasSubsectionsViaSymbols();
- StringRef Name;
- bool needFuncName = true;
+ const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
+ bool isMachO = MAI->hasSubsectionsViaSymbols();
// Darwin asm has (optionally) function name after .thumb_func direction
// ELF doesn't
@@ -7727,29 +7888,19 @@ bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
if (Tok.isNot(AsmToken::EndOfStatement)) {
if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
return Error(L, "unexpected token in .thumb_func directive");
- Name = Tok.getIdentifier();
+ MCSymbol *Func =
+ getParser().getContext().GetOrCreateSymbol(Tok.getIdentifier());
+ getParser().getStreamer().EmitThumbFunc(Func);
Parser.Lex(); // Consume the identifier token.
- needFuncName = false;
+ return false;
}
}
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(L, "unexpected token in directive");
- // Eat the end of statement and any blank lines that follow.
- while (getLexer().is(AsmToken::EndOfStatement))
- Parser.Lex();
-
- // FIXME: assuming function name will be the line following .thumb_func
- // We really should be checking the next symbol definition even if there's
- // stuff in between.
- if (needFuncName) {
- Name = Parser.getTok().getIdentifier();
- }
+ NextSymbolIsThumb = true;
- // Mark symbol as a thumb symbol.
- MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
- getParser().getStreamer().EmitThumbFunc(Func);
return false;
}
@@ -7795,10 +7946,16 @@ bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
Parser.Lex();
if (Val == 16) {
+ if (!hasThumb())
+ return Error(L, "target does not support Thumb mode");
+
if (!isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
} else {
+ if (!hasARM())
+ return Error(L, "target does not support ARM mode");
+
if (isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
@@ -7855,7 +8012,267 @@ bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
/// parseDirectiveEabiAttr
/// ::= .eabi_attribute int, int
bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
- return true;
+ if (Parser.getTok().isNot(AsmToken::Integer))
+ return Error(L, "integer expected");
+ int64_t Tag = Parser.getTok().getIntVal();
+ Parser.Lex(); // eat tag integer
+
+ if (Parser.getTok().isNot(AsmToken::Comma))
+ return Error(L, "comma expected");
+ Parser.Lex(); // skip comma
+
+ L = Parser.getTok().getLoc();
+ if (Parser.getTok().isNot(AsmToken::Integer))
+ return Error(L, "integer expected");
+ int64_t Value = Parser.getTok().getIntVal();
+ Parser.Lex(); // eat value integer
+
+ getTargetStreamer().emitAttribute(Tag, Value);
+ return false;
+}
+
+/// parseDirectiveCPU
+/// ::= .cpu str
+bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
+ StringRef CPU = getParser().parseStringToEndOfStatement().trim();
+ getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
+ return false;
+}
+
+/// parseDirectiveFPU
+/// ::= .fpu str
+bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
+ StringRef FPU = getParser().parseStringToEndOfStatement().trim();
+
+ unsigned ID = StringSwitch<unsigned>(FPU)
+#define ARM_FPU_NAME(NAME, ID) .Case(NAME, ARM::ID)
+#include "ARMFPUName.def"
+ .Default(ARM::INVALID_FPU);
+
+ if (ID == ARM::INVALID_FPU)
+ return Error(L, "Unknown FPU name");
+
+ getTargetStreamer().emitFPU(ID);
+ return false;
+}
+
+/// parseDirectiveFnStart
+/// ::= .fnstart
+bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
+ if (FnStartLoc.isValid()) {
+ Error(L, ".fnstart starts before the end of previous one");
+ Error(FnStartLoc, "previous .fnstart starts here");
+ return true;
+ }
+
+ FnStartLoc = L;
+ getTargetStreamer().emitFnStart();
+ return false;
+}
+
+/// parseDirectiveFnEnd
+/// ::= .fnend
+bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .fnend directive");
+
+ // Reset the unwind directives parser state
+ resetUnwindDirectiveParserState();
+ getTargetStreamer().emitFnEnd();
+ return false;
+}
+
+/// parseDirectiveCantUnwind
+/// ::= .cantunwind
+bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
+ // Check the ordering of unwind directives
+ CantUnwindLoc = L;
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .cantunwind directive");
+ if (HandlerDataLoc.isValid()) {
+ Error(L, ".cantunwind can't be used with .handlerdata directive");
+ Error(HandlerDataLoc, ".handlerdata was specified here");
+ return true;
+ }
+ if (PersonalityLoc.isValid()) {
+ Error(L, ".cantunwind can't be used with .personality directive");
+ Error(PersonalityLoc, ".personality was specified here");
+ return true;
+ }
+
+ getTargetStreamer().emitCantUnwind();
+ return false;
+}
+
+/// parseDirectivePersonality
+/// ::= .personality name
+bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
+ // Check the ordering of unwind directives
+ PersonalityLoc = L;
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .personality directive");
+ if (CantUnwindLoc.isValid()) {
+ Error(L, ".personality can't be used with .cantunwind directive");
+ Error(CantUnwindLoc, ".cantunwind was specified here");
+ return true;
+ }
+ if (HandlerDataLoc.isValid()) {
+ Error(L, ".personality must precede .handlerdata directive");
+ Error(HandlerDataLoc, ".handlerdata was specified here");
+ return true;
+ }
+
+ // Parse the name of the personality routine
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Parser.eatToEndOfStatement();
+ return Error(L, "unexpected input in .personality directive.");
+ }
+ StringRef Name(Parser.getTok().getIdentifier());
+ Parser.Lex();
+
+ MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
+ getTargetStreamer().emitPersonality(PR);
+ return false;
+}
+
+/// parseDirectiveHandlerData
+/// ::= .handlerdata
+bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
+ // Check the ordering of unwind directives
+ HandlerDataLoc = L;
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .personality directive");
+ if (CantUnwindLoc.isValid()) {
+ Error(L, ".handlerdata can't be used with .cantunwind directive");
+ Error(CantUnwindLoc, ".cantunwind was specified here");
+ return true;
+ }
+
+ getTargetStreamer().emitHandlerData();
+ return false;
+}
+
+/// parseDirectiveSetFP
+/// ::= .setfp fpreg, spreg [, offset]
+bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .setfp directive");
+ if (HandlerDataLoc.isValid())
+ return Error(L, ".setfp must precede .handlerdata directive");
+
+ // Parse fpreg
+ SMLoc NewFPRegLoc = Parser.getTok().getLoc();
+ int NewFPReg = tryParseRegister();
+ if (NewFPReg == -1)
+ return Error(NewFPRegLoc, "frame pointer register expected");
+
+ // Consume comma
+ if (!Parser.getTok().is(AsmToken::Comma))
+ return Error(Parser.getTok().getLoc(), "comma expected");
+ Parser.Lex(); // skip comma
+
+ // Parse spreg
+ SMLoc NewSPRegLoc = Parser.getTok().getLoc();
+ int NewSPReg = tryParseRegister();
+ if (NewSPReg == -1)
+ return Error(NewSPRegLoc, "stack pointer register expected");
+
+ if (NewSPReg != ARM::SP && NewSPReg != FPReg)
+ return Error(NewSPRegLoc,
+ "register should be either $sp or the latest fp register");
+
+ // Update the frame pointer register
+ FPReg = NewFPReg;
+
+ // Parse offset
+ int64_t Offset = 0;
+ if (Parser.getTok().is(AsmToken::Comma)) {
+ Parser.Lex(); // skip comma
+
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
+ return Error(Parser.getTok().getLoc(), "'#' expected");
+ }
+ Parser.Lex(); // skip hash token.
+
+ const MCExpr *OffsetExpr;
+ SMLoc ExLoc = Parser.getTok().getLoc();
+ SMLoc EndLoc;
+ if (getParser().parseExpression(OffsetExpr, EndLoc))
+ return Error(ExLoc, "malformed setfp offset");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
+ if (!CE)
+ return Error(ExLoc, "setfp offset must be an immediate");
+
+ Offset = CE->getValue();
+ }
+
+ getTargetStreamer().emitSetFP(static_cast<unsigned>(NewFPReg),
+ static_cast<unsigned>(NewSPReg), Offset);
+ return false;
+}
+
+/// parseDirective
+/// ::= .pad offset
+bool ARMAsmParser::parseDirectivePad(SMLoc L) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .pad directive");
+ if (HandlerDataLoc.isValid())
+ return Error(L, ".pad must precede .handlerdata directive");
+
+ // Parse the offset
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
+ return Error(Parser.getTok().getLoc(), "'#' expected");
+ }
+ Parser.Lex(); // skip hash token.
+
+ const MCExpr *OffsetExpr;
+ SMLoc ExLoc = Parser.getTok().getLoc();
+ SMLoc EndLoc;
+ if (getParser().parseExpression(OffsetExpr, EndLoc))
+ return Error(ExLoc, "malformed pad offset");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
+ if (!CE)
+ return Error(ExLoc, "pad offset must be an immediate");
+
+ getTargetStreamer().emitPad(CE->getValue());
+ return false;
+}
+
+/// parseDirectiveRegSave
+/// ::= .save { registers }
+/// ::= .vsave { registers }
+bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .save or .vsave directives");
+ if (HandlerDataLoc.isValid())
+ return Error(L, ".save or .vsave must precede .handlerdata directive");
+
+ // RAII object to make sure parsed operands are deleted.
+ struct CleanupObject {
+ SmallVector<MCParsedAsmOperand *, 1> Operands;
+ ~CleanupObject() {
+ for (unsigned I = 0, E = Operands.size(); I != E; ++I)
+ delete Operands[I];
+ }
+ } CO;
+
+ // Parse the register list
+ if (parseRegisterList(CO.Operands))
+ return true;
+ ARMOperand *Op = (ARMOperand*)CO.Operands[0];
+ if (!IsVector && !Op->isRegList())
+ return Error(L, ".save expects GPR registers");
+ if (IsVector && !Op->isDPRRegList())
+ return Error(L, ".vsave expects DPR registers");
+
+ getTargetStreamer().emitRegSave(Op->getRegList(), IsVector);
+ return false;
}
/// Force static initialization.
diff --git a/lib/Target/ARM/CMakeLists.txt b/lib/Target/ARM/CMakeLists.txt
index b832508a086c..f271a932b540 100644
--- a/lib/Target/ARM/CMakeLists.txt
+++ b/lib/Target/ARM/CMakeLists.txt
@@ -49,7 +49,7 @@ add_llvm_target(ARMCodeGen
Thumb2SizeReduction.cpp
)
-add_dependencies(LLVMARMCodeGen intrinsics_gen)
+add_dependencies(LLVMARMCodeGen ARMCommonTableGen intrinsics_gen)
# workaround for hanging compilation on MSVC9, 10
if( MSVC_VERSION EQUAL 1600 OR MSVC_VERSION EQUAL 1500 )
diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index ac937f35345e..9c7988f8a857 100644
--- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -65,7 +65,7 @@ namespace {
void setITState(char Firstcond, char Mask) {
// (3 - the number of trailing zeros) is the number of then / else.
unsigned CondBit0 = Firstcond & 1;
- unsigned NumTZ = CountTrailingZeros_32(Mask);
+ unsigned NumTZ = countTrailingZeros<uint8_t>(Mask);
unsigned char CCBits = static_cast<unsigned char>(Firstcond & 0xf);
assert(NumTZ <= 3 && "Invalid IT mask!");
// push condition codes onto the stack the correct order for the pops
@@ -156,12 +156,17 @@ static DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, unsigned RegNo,
static DecodeStatus DecodeGPRnopcRegisterClass(MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeGPRwithAPSRRegisterClass(MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodetGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodetcGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
static DecodeStatus DecoderGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeGPRPairRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeSPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeDPRRegisterClass(MCInst &Inst, unsigned RegNo,
@@ -236,6 +241,14 @@ static DecodeStatus DecodeBranchImmInstruction(MCInst &Inst,unsigned Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeAddrMode6Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeVLDST1Instruction(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeVLDST2Instruction(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeVLDST3Instruction(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeVLDST4Instruction(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeVLDInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeVSTInstruction(MCInst &Inst, unsigned Val,
@@ -268,6 +281,8 @@ static DecodeStatus DecodeCoprocessor(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMemBarrierOption(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeInstSyncBarrierOption(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMSRMask(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeDoubleRegLoad(MCInst &Inst, unsigned Insn,
@@ -308,8 +323,6 @@ static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeImm0_4(MCInst &Inst, unsigned Insn, uint64_t Address,
- const void *Decoder);
static DecodeStatus DecodeThumbAddSpecialReg(MCInst &Inst, uint16_t Insn,
@@ -332,6 +345,14 @@ static DecodeStatus DecodeT2AddrModeSOReg(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeT2LoadImm8(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder);
+static DecodeStatus DecodeT2LoadImm12(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder);
+static DecodeStatus DecodeT2LoadT(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder);
+static DecodeStatus DecodeT2LoadLabel(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder);
static DecodeStatus DecodeT2Imm8S4(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeT2AddrModeImm8s4(MCInst &Inst, unsigned Val,
@@ -348,6 +369,8 @@ static DecodeStatus DecodeThumbAddSPReg(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeThumbCPS(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeQADDInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeThumbBLXOffset(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeT2AddrModeImm12(MCInst &Inst, unsigned Val,
@@ -402,7 +425,7 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
"Asked to disassemble an ARM instruction but Subtarget is in Thumb mode!");
// We want to read exactly 4 bytes of data.
- if (Region.readBytes(Address, 4, (uint8_t*)bytes, NULL) == -1) {
+ if (Region.readBytes(Address, 4, bytes) == -1) {
Size = 0;
return MCDisassembler::Fail;
}
@@ -431,6 +454,13 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
MI.clear();
+ result = decodeInstruction(DecoderTableVFPV832, MI, insn, Address, this, STI);
+ if (result != MCDisassembler::Fail) {
+ Size = 4;
+ return result;
+ }
+
+ MI.clear();
result = decodeInstruction(DecoderTableNEONData32, MI, insn, Address,
this, STI);
if (result != MCDisassembler::Fail) {
@@ -467,7 +497,22 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
MI.clear();
+ result = decodeInstruction(DecoderTablev8NEON32, MI, insn, Address,
+ this, STI);
+ if (result != MCDisassembler::Fail) {
+ Size = 4;
+ return result;
+ }
+
+ MI.clear();
+ result = decodeInstruction(DecoderTablev8Crypto32, MI, insn, Address,
+ this, STI);
+ if (result != MCDisassembler::Fail) {
+ Size = 4;
+ return result;
+ }
+ MI.clear();
Size = 0;
return MCDisassembler::Fail;
}
@@ -492,102 +537,9 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value,
bool isBranch, uint64_t InstSize,
MCInst &MI, const void *Decoder) {
const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
- LLVMOpInfoCallback getOpInfo = Dis->getLLVMOpInfoCallback();
- struct LLVMOpInfo1 SymbolicOp;
- memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
- SymbolicOp.Value = Value;
- void *DisInfo = Dis->getDisInfoBlock();
-
- if (!getOpInfo ||
- !getOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp)) {
- // Clear SymbolicOp.Value from above and also all other fields.
- memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
- LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
- if (!SymbolLookUp)
- return false;
- uint64_t ReferenceType;
- if (isBranch)
- ReferenceType = LLVMDisassembler_ReferenceType_In_Branch;
- else
- ReferenceType = LLVMDisassembler_ReferenceType_InOut_None;
- const char *ReferenceName;
- uint64_t SymbolValue = 0x00000000ffffffffULL & Value;
- const char *Name = SymbolLookUp(DisInfo, SymbolValue, &ReferenceType,
- Address, &ReferenceName);
- if (Name) {
- SymbolicOp.AddSymbol.Name = Name;
- SymbolicOp.AddSymbol.Present = true;
- }
- // For branches always create an MCExpr so it gets printed as hex address.
- else if (isBranch) {
- SymbolicOp.Value = Value;
- }
- if(ReferenceType == LLVMDisassembler_ReferenceType_Out_SymbolStub)
- (*Dis->CommentStream) << "symbol stub for: " << ReferenceName;
- if (!Name && !isBranch)
- return false;
- }
-
- MCContext *Ctx = Dis->getMCContext();
- const MCExpr *Add = NULL;
- if (SymbolicOp.AddSymbol.Present) {
- if (SymbolicOp.AddSymbol.Name) {
- StringRef Name(SymbolicOp.AddSymbol.Name);
- MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
- Add = MCSymbolRefExpr::Create(Sym, *Ctx);
- } else {
- Add = MCConstantExpr::Create(SymbolicOp.AddSymbol.Value, *Ctx);
- }
- }
-
- const MCExpr *Sub = NULL;
- if (SymbolicOp.SubtractSymbol.Present) {
- if (SymbolicOp.SubtractSymbol.Name) {
- StringRef Name(SymbolicOp.SubtractSymbol.Name);
- MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
- Sub = MCSymbolRefExpr::Create(Sym, *Ctx);
- } else {
- Sub = MCConstantExpr::Create(SymbolicOp.SubtractSymbol.Value, *Ctx);
- }
- }
-
- const MCExpr *Off = NULL;
- if (SymbolicOp.Value != 0)
- Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx);
-
- const MCExpr *Expr;
- if (Sub) {
- const MCExpr *LHS;
- if (Add)
- LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx);
- else
- LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx);
- if (Off != 0)
- Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx);
- else
- Expr = LHS;
- } else if (Add) {
- if (Off != 0)
- Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx);
- else
- Expr = Add;
- } else {
- if (Off != 0)
- Expr = Off;
- else
- Expr = MCConstantExpr::Create(0, *Ctx);
- }
-
- if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_HI16)
- MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateUpper16(Expr, *Ctx)));
- else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_LO16)
- MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateLower16(Expr, *Ctx)));
- else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_None)
- MI.addOperand(MCOperand::CreateExpr(Expr));
- else
- llvm_unreachable("bad SymbolicOp.VariantKind");
-
- return true;
+ // FIXME: Does it make sense for value to be negative?
+ return Dis->tryAddingSymbolicOperand(MI, (uint32_t)Value, Address, isBranch,
+ /* Offset */ 0, InstSize);
}
/// tryAddingPcLoadReferenceComment - trys to add a comment as to what is being
@@ -602,17 +554,7 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value,
static void tryAddingPcLoadReferenceComment(uint64_t Address, int Value,
const void *Decoder) {
const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
- LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
- if (SymbolLookUp) {
- void *DisInfo = Dis->getDisInfoBlock();
- uint64_t ReferenceType;
- ReferenceType = LLVMDisassembler_ReferenceType_In_PCrel_Load;
- const char *ReferenceName;
- (void)SymbolLookUp(DisInfo, Value, &ReferenceType, Address, &ReferenceName);
- if(ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr ||
- ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr)
- (*Dis->CommentStream) << "literal pool for: " << ReferenceName;
- }
+ Dis->tryAddingPcLoadReferenceComment(Value, Address);
}
// Thumb1 instructions don't have explicit S bits. Rather, they
@@ -751,7 +693,7 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
"Asked to disassemble in Thumb mode but Subtarget is in ARM mode!");
// We want to read exactly 2 bytes of data.
- if (Region.readBytes(Address, 2, (uint8_t*)bytes, NULL) == -1) {
+ if (Region.readBytes(Address, 2, bytes) == -1) {
Size = 0;
return MCDisassembler::Fail;
}
@@ -803,7 +745,7 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
// We want to read exactly 4 bytes of data.
- if (Region.readBytes(Address, 4, (uint8_t*)bytes, NULL) == -1) {
+ if (Region.readBytes(Address, 4, bytes) == -1) {
Size = 0;
return MCDisassembler::Fail;
}
@@ -832,23 +774,34 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
return result;
}
- MI.clear();
- result = decodeInstruction(DecoderTableVFP32, MI, insn32, Address, this, STI);
- if (result != MCDisassembler::Fail) {
- Size = 4;
- UpdateThumbVFPPredicate(MI);
- return result;
+ if (fieldFromInstruction(insn32, 28, 4) == 0xE) {
+ MI.clear();
+ result = decodeInstruction(DecoderTableVFP32, MI, insn32, Address, this, STI);
+ if (result != MCDisassembler::Fail) {
+ Size = 4;
+ UpdateThumbVFPPredicate(MI);
+ return result;
+ }
}
MI.clear();
- result = decodeInstruction(DecoderTableNEONDup32, MI, insn32, Address,
- this, STI);
+ result = decodeInstruction(DecoderTableVFPV832, MI, insn32, Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
- Check(result, AddThumbPredicate(MI));
return result;
}
+ if (fieldFromInstruction(insn32, 28, 4) == 0xE) {
+ MI.clear();
+ result = decodeInstruction(DecoderTableNEONDup32, MI, insn32, Address,
+ this, STI);
+ if (result != MCDisassembler::Fail) {
+ Size = 4;
+ Check(result, AddThumbPredicate(MI));
+ return result;
+ }
+ }
+
if (fieldFromInstruction(insn32, 24, 8) == 0xF9) {
MI.clear();
uint32_t NEONLdStInsn = insn32;
@@ -876,8 +829,31 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
Check(result, AddThumbPredicate(MI));
return result;
}
+
+ MI.clear();
+ uint32_t NEONCryptoInsn = insn32;
+ NEONCryptoInsn &= 0xF0FFFFFF; // Clear bits 27-24
+ NEONCryptoInsn |= (NEONCryptoInsn & 0x10000000) >> 4; // Move bit 28 to bit 24
+ NEONCryptoInsn |= 0x12000000; // Set bits 28 and 25
+ result = decodeInstruction(DecoderTablev8Crypto32, MI, NEONCryptoInsn,
+ Address, this, STI);
+ if (result != MCDisassembler::Fail) {
+ Size = 4;
+ return result;
+ }
+
+ MI.clear();
+ uint32_t NEONv8Insn = insn32;
+ NEONv8Insn &= 0xF3FFFFFF; // Clear bits 27-26
+ result = decodeInstruction(DecoderTablev8NEON32, MI, NEONv8Insn, Address,
+ this, STI);
+ if (result != MCDisassembler::Fail) {
+ Size = 4;
+ return result;
+ }
}
+ MI.clear();
Size = 0;
return MCDisassembler::Fail;
}
@@ -920,6 +896,21 @@ DecodeGPRnopcRegisterClass(MCInst &Inst, unsigned RegNo,
return S;
}
+static DecodeStatus
+DecodeGPRwithAPSRRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ if (RegNo == 15)
+ {
+ Inst.addOperand(MCOperand::CreateReg(ARM::APSR_NZCV));
+ return MCDisassembler::Success;
+ }
+
+ Check(S, DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder));
+ return S;
+}
+
static DecodeStatus DecodetGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 7)
@@ -927,6 +918,26 @@ static DecodeStatus DecodetGPRRegisterClass(MCInst &Inst, unsigned RegNo,
return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder);
}
+static const uint16_t GPRPairDecoderTable[] = {
+ ARM::R0_R1, ARM::R2_R3, ARM::R4_R5, ARM::R6_R7,
+ ARM::R8_R9, ARM::R10_R11, ARM::R12_SP
+};
+
+static DecodeStatus DecodeGPRPairRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ if (RegNo > 13)
+ return MCDisassembler::Fail;
+
+ if ((RegNo & 1) || RegNo == 0xe)
+ S = MCDisassembler::SoftFail;
+
+ unsigned RegisterPair = GPRPairDecoderTable[RegNo/2];
+ Inst.addOperand(MCOperand::CreateReg(RegisterPair));
+ return S;
+}
+
static DecodeStatus DecodetcGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
unsigned Register = 0;
@@ -959,8 +970,11 @@ static DecodeStatus DecodetcGPRRegisterClass(MCInst &Inst, unsigned RegNo,
static DecodeStatus DecoderGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
- if (RegNo == 13 || RegNo == 15) return MCDisassembler::Fail;
- return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder);
+ DecodeStatus S = MCDisassembler::Success;
+ if (RegNo == 13 || RegNo == 15)
+ S = MCDisassembler::SoftFail;
+ Check(S, DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder));
+ return S;
}
static const uint16_t SPRDecoderTable[] = {
@@ -1030,7 +1044,7 @@ static const uint16_t QPRDecoderTable[] = {
static DecodeStatus DecodeQPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
- if (RegNo > 31)
+ if (RegNo > 31 || (RegNo & 1) != 0)
return MCDisassembler::Fail;
RegNo >>= 1;
@@ -1189,30 +1203,32 @@ static DecodeStatus DecodeRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- bool writebackLoad = false;
- unsigned writebackReg = 0;
+ bool NeedDisjointWriteback = false;
+ unsigned WritebackReg = 0;
switch (Inst.getOpcode()) {
- default:
- break;
- case ARM::LDMIA_UPD:
- case ARM::LDMDB_UPD:
- case ARM::LDMIB_UPD:
- case ARM::LDMDA_UPD:
- case ARM::t2LDMIA_UPD:
- case ARM::t2LDMDB_UPD:
- writebackLoad = true;
- writebackReg = Inst.getOperand(0).getReg();
- break;
+ default:
+ break;
+ case ARM::LDMIA_UPD:
+ case ARM::LDMDB_UPD:
+ case ARM::LDMIB_UPD:
+ case ARM::LDMDA_UPD:
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2LDMDB_UPD:
+ case ARM::t2STMIA_UPD:
+ case ARM::t2STMDB_UPD:
+ NeedDisjointWriteback = true;
+ WritebackReg = Inst.getOperand(0).getReg();
+ break;
}
// Empty register lists are not allowed.
- if (CountPopulation_32(Val) == 0) return MCDisassembler::Fail;
+ if (Val == 0) return MCDisassembler::Fail;
for (unsigned i = 0; i < 16; ++i) {
if (Val & (1 << i)) {
if (!Check(S, DecodeGPRRegisterClass(Inst, i, Address, Decoder)))
return MCDisassembler::Fail;
// Writeback not allowed if Rn is in the target list.
- if (writebackLoad && writebackReg == Inst.end()[-1].getReg())
+ if (NeedDisjointWriteback && WritebackReg == Inst.end()[-1].getReg())
Check(S, MCDisassembler::SoftFail);
}
}
@@ -1227,6 +1243,13 @@ static DecodeStatus DecodeSPRRegListOperand(MCInst &Inst, unsigned Val,
unsigned Vd = fieldFromInstruction(Val, 8, 5);
unsigned regs = fieldFromInstruction(Val, 0, 8);
+ // In case of unpredictable encoding, tweak the operands.
+ if (regs == 0 || (Vd + regs) > 32) {
+ regs = Vd + regs > 32 ? 32 - Vd : regs;
+ regs = std::max( 1u, regs);
+ S = MCDisassembler::SoftFail;
+ }
+
if (!Check(S, DecodeSPRRegisterClass(Inst, Vd, Address, Decoder)))
return MCDisassembler::Fail;
for (unsigned i = 0; i < (regs - 1); ++i) {
@@ -1242,9 +1265,15 @@ static DecodeStatus DecodeDPRRegListOperand(MCInst &Inst, unsigned Val,
DecodeStatus S = MCDisassembler::Success;
unsigned Vd = fieldFromInstruction(Val, 8, 5);
- unsigned regs = fieldFromInstruction(Val, 0, 8);
+ unsigned regs = fieldFromInstruction(Val, 1, 7);
- regs = regs >> 1;
+ // In case of unpredictable encoding, tweak the operands.
+ if (regs == 0 || regs > 16 || (Vd + regs) > 32) {
+ regs = Vd + regs > 32 ? 32 - Vd : regs;
+ regs = std::max( 1u, regs);
+ regs = std::min(16u, regs);
+ S = MCDisassembler::SoftFail;
+ }
if (!Check(S, DecodeDPRRegisterClass(Inst, Vd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -1334,6 +1363,11 @@ static DecodeStatus DecodeCopMemInstruction(MCInst &Inst, unsigned Insn,
break;
}
+ uint64_t featureBits = ((const MCDisassembler*)Decoder)->getSubtargetInfo()
+ .getFeatureBits();
+ if ((featureBits & ARM::HasV8Ops) && (coproc != 14))
+ return MCDisassembler::Fail;
+
Inst.addOperand(MCOperand::CreateImm(coproc));
Inst.addOperand(MCOperand::CreateImm(CRd));
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
@@ -1797,6 +1831,29 @@ static DecodeStatus DecodeRFEInstruction(MCInst &Inst, unsigned Insn,
return S;
}
+static DecodeStatus DecodeQADDInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+
+ if (pred == 0xF)
+ return DecodeCPSInstruction(Inst, Insn, Address, Decoder);
+
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rn, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder)))
+ return MCDisassembler::Fail;
+ return S;
+}
+
static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
unsigned Insn,
uint64_t Address, const void *Decoder) {
@@ -1807,6 +1864,7 @@ static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
unsigned reglist = fieldFromInstruction(Insn, 0, 16);
if (pred == 0xF) {
+ // Ambiguous with RFE and SRS
switch (Inst.getOpcode()) {
case ARM::LDMDA:
Inst.setOpcode(ARM::RFEDA);
@@ -1857,11 +1915,16 @@ static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
Inst.setOpcode(ARM::SRSIB_UPD);
break;
default:
- if (!Check(S, MCDisassembler::Fail)) return MCDisassembler::Fail;
+ return MCDisassembler::Fail;
}
// For stores (which become SRS's, the only operand is the mode.
if (fieldFromInstruction(Insn, 20, 1) == 0) {
+ // Check SRS encoding constraints
+ if (!(fieldFromInstruction(Insn, 22, 1) == 1 &&
+ fieldFromInstruction(Insn, 20, 1) == 0))
+ return MCDisassembler::Fail;
+
Inst.addOperand(
MCOperand::CreateImm(fieldFromInstruction(Insn, 0, 4)));
return S;
@@ -1891,6 +1954,13 @@ static DecodeStatus DecodeCPSInstruction(MCInst &Inst, unsigned Insn,
DecodeStatus S = MCDisassembler::Success;
+ // This decoder is called from multiple location that do not check
+ // the full encoding is valid before they do.
+ if (fieldFromInstruction(Insn, 5, 1) != 0 ||
+ fieldFromInstruction(Insn, 16, 1) != 0 ||
+ fieldFromInstruction(Insn, 20, 8) != 0x10)
+ return MCDisassembler::Fail;
+
// imod == '01' --> UNPREDICTABLE
// NOTE: Even though this is technically UNPREDICTABLE, we choose to
// return failure here. The '01' imod value is unprintable, so there's
@@ -2106,7 +2176,7 @@ DecodeT2BInstruction(MCInst &Inst, unsigned Insn,
unsigned imm10 = fieldFromInstruction(Insn, 16, 10);
unsigned imm11 = fieldFromInstruction(Insn, 0, 11);
unsigned tmp = (S << 23) | (I1 << 22) | (I2 << 21) | (imm10 << 11) | imm11;
- int imm32 = SignExtend32<24>(tmp << 1);
+ int imm32 = SignExtend32<25>(tmp << 1);
if (!tryAddingSymbolicOperand(Address, Address + imm32 + 4,
true, 4, Inst, Decoder))
Inst.addOperand(MCOperand::CreateImm(imm32));
@@ -2432,6 +2502,57 @@ static DecodeStatus DecodeVLDInstruction(MCInst &Inst, unsigned Insn,
return S;
}
+static DecodeStatus DecodeVLDST1Instruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned type = fieldFromInstruction(Insn, 8, 4);
+ unsigned align = fieldFromInstruction(Insn, 4, 2);
+ if (type == 6 && (align & 2)) return MCDisassembler::Fail;
+ if (type == 7 && (align & 2)) return MCDisassembler::Fail;
+ if (type == 10 && align == 3) return MCDisassembler::Fail;
+
+ unsigned load = fieldFromInstruction(Insn, 21, 1);
+ return load ? DecodeVLDInstruction(Inst, Insn, Address, Decoder)
+ : DecodeVSTInstruction(Inst, Insn, Address, Decoder);
+}
+
+static DecodeStatus DecodeVLDST2Instruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned size = fieldFromInstruction(Insn, 6, 2);
+ if (size == 3) return MCDisassembler::Fail;
+
+ unsigned type = fieldFromInstruction(Insn, 8, 4);
+ unsigned align = fieldFromInstruction(Insn, 4, 2);
+ if (type == 8 && align == 3) return MCDisassembler::Fail;
+ if (type == 9 && align == 3) return MCDisassembler::Fail;
+
+ unsigned load = fieldFromInstruction(Insn, 21, 1);
+ return load ? DecodeVLDInstruction(Inst, Insn, Address, Decoder)
+ : DecodeVSTInstruction(Inst, Insn, Address, Decoder);
+}
+
+static DecodeStatus DecodeVLDST3Instruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned size = fieldFromInstruction(Insn, 6, 2);
+ if (size == 3) return MCDisassembler::Fail;
+
+ unsigned align = fieldFromInstruction(Insn, 4, 2);
+ if (align & 2) return MCDisassembler::Fail;
+
+ unsigned load = fieldFromInstruction(Insn, 21, 1);
+ return load ? DecodeVLDInstruction(Inst, Insn, Address, Decoder)
+ : DecodeVSTInstruction(Inst, Insn, Address, Decoder);
+}
+
+static DecodeStatus DecodeVLDST4Instruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned size = fieldFromInstruction(Insn, 6, 2);
+ if (size == 3) return MCDisassembler::Fail;
+
+ unsigned load = fieldFromInstruction(Insn, 21, 1);
+ return load ? DecodeVLDInstruction(Inst, Insn, Address, Decoder)
+ : DecodeVSTInstruction(Inst, Insn, Address, Decoder);
+}
+
static DecodeStatus DecodeVSTInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3115,6 +3236,17 @@ static DecodeStatus DecodeT2AddrModeSOReg(MCInst &Inst, unsigned Val,
unsigned Rm = fieldFromInstruction(Val, 2, 4);
unsigned imm = fieldFromInstruction(Val, 0, 2);
+ // Thumb stores cannot use PC as dest register.
+ switch (Inst.getOpcode()) {
+ case ARM::t2STRHs:
+ case ARM::t2STRBs:
+ case ARM::t2STRs:
+ if (Rn == 15)
+ return MCDisassembler::Fail;
+ default:
+ break;
+ }
+
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
if (!Check(S, DecoderGPRRegisterClass(Inst, Rm, Address, Decoder)))
@@ -3128,53 +3260,282 @@ static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+
+ if (Rn == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDRBs:
+ Inst.setOpcode(ARM::t2LDRBpci);
+ break;
+ case ARM::t2LDRHs:
+ Inst.setOpcode(ARM::t2LDRHpci);
+ break;
+ case ARM::t2LDRSHs:
+ Inst.setOpcode(ARM::t2LDRSHpci);
+ break;
+ case ARM::t2LDRSBs:
+ Inst.setOpcode(ARM::t2LDRSBpci);
+ break;
+ case ARM::t2LDRs:
+ Inst.setOpcode(ARM::t2LDRpci);
+ break;
+ case ARM::t2PLDs:
+ Inst.setOpcode(ARM::t2PLDpci);
+ break;
+ case ARM::t2PLIs:
+ Inst.setOpcode(ARM::t2PLIpci);
+ break;
+ default:
+ return MCDisassembler::Fail;
+ }
+
+ return DecodeT2LoadLabel(Inst, Insn, Address, Decoder);
+ }
+
+ if (Rt == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDRSHs:
+ return MCDisassembler::Fail;
+ case ARM::t2LDRHs:
+ // FIXME: this instruction is only available with MP extensions,
+ // this should be checked first but we don't have access to the
+ // feature bits here.
+ Inst.setOpcode(ARM::t2PLDWs);
+ break;
+ default:
+ break;
+ }
+ }
+
switch (Inst.getOpcode()) {
case ARM::t2PLDs:
case ARM::t2PLDWs:
case ARM::t2PLIs:
break;
- default: {
- unsigned Rt = fieldFromInstruction(Insn, 12, 4);
- if (!Check(S, DecoderGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ default:
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
+
+ unsigned addrmode = fieldFromInstruction(Insn, 4, 2);
+ addrmode |= fieldFromInstruction(Insn, 0, 4) << 2;
+ addrmode |= fieldFromInstruction(Insn, 16, 4) << 6;
+ if (!Check(S, DecodeT2AddrModeSOReg(Inst, addrmode, Address, Decoder)))
return MCDisassembler::Fail;
+
+ return S;
+}
+
+static DecodeStatus DecodeT2LoadImm8(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned U = fieldFromInstruction(Insn, 9, 1);
+ unsigned imm = fieldFromInstruction(Insn, 0, 8);
+ imm |= (U << 8);
+ imm |= (Rn << 9);
+
+ if (Rn == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDRi8:
+ Inst.setOpcode(ARM::t2LDRpci);
+ break;
+ case ARM::t2LDRBi8:
+ Inst.setOpcode(ARM::t2LDRBpci);
+ break;
+ case ARM::t2LDRSBi8:
+ Inst.setOpcode(ARM::t2LDRSBpci);
+ break;
+ case ARM::t2LDRHi8:
+ Inst.setOpcode(ARM::t2LDRHpci);
+ break;
+ case ARM::t2LDRSHi8:
+ Inst.setOpcode(ARM::t2LDRSHpci);
+ break;
+ case ARM::t2PLDi8:
+ Inst.setOpcode(ARM::t2PLDpci);
+ break;
+ case ARM::t2PLIi8:
+ Inst.setOpcode(ARM::t2PLIpci);
+ break;
+ default:
+ return MCDisassembler::Fail;
+ }
+ return DecodeT2LoadLabel(Inst, Insn, Address, Decoder);
+ }
+
+ if (Rt == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDRSHi8:
+ return MCDisassembler::Fail;
+ default:
+ break;
}
}
+ switch (Inst.getOpcode()) {
+ case ARM::t2PLDi8:
+ case ARM::t2PLIi8:
+ case ARM::t2PLDWi8:
+ break;
+ default:
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
+
+ if (!Check(S, DecodeT2AddrModeImm8(Inst, imm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ return S;
+}
+
+static DecodeStatus DecodeT2LoadImm12(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
unsigned Rn = fieldFromInstruction(Insn, 16, 4);
- if (Rn == 0xF) {
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 12);
+ imm |= (Rn << 13);
+
+ if (Rn == 15) {
switch (Inst.getOpcode()) {
- case ARM::t2LDRBs:
- Inst.setOpcode(ARM::t2LDRBpci);
- break;
- case ARM::t2LDRHs:
- Inst.setOpcode(ARM::t2LDRHpci);
- break;
- case ARM::t2LDRSHs:
- Inst.setOpcode(ARM::t2LDRSHpci);
- break;
- case ARM::t2LDRSBs:
- Inst.setOpcode(ARM::t2LDRSBpci);
+ case ARM::t2LDRi12:
+ Inst.setOpcode(ARM::t2LDRpci);
+ break;
+ case ARM::t2LDRHi12:
+ Inst.setOpcode(ARM::t2LDRHpci);
+ break;
+ case ARM::t2LDRSHi12:
+ Inst.setOpcode(ARM::t2LDRSHpci);
+ break;
+ case ARM::t2LDRBi12:
+ Inst.setOpcode(ARM::t2LDRBpci);
+ break;
+ case ARM::t2LDRSBi12:
+ Inst.setOpcode(ARM::t2LDRSBpci);
+ break;
+ case ARM::t2PLDi12:
+ Inst.setOpcode(ARM::t2PLDpci);
+ break;
+ case ARM::t2PLIi12:
+ Inst.setOpcode(ARM::t2PLIpci);
+ break;
+ default:
+ return MCDisassembler::Fail;
+ }
+ return DecodeT2LoadLabel(Inst, Insn, Address, Decoder);
+ }
+
+ if (Rt == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDRSHi12:
+ return MCDisassembler::Fail;
+ case ARM::t2LDRHi12:
+ Inst.setOpcode(ARM::t2PLDi12);
+ break;
+ default:
+ break;
+ }
+ }
+
+ switch (Inst.getOpcode()) {
+ case ARM::t2PLDi12:
+ case ARM::t2PLDWi12:
+ case ARM::t2PLIi12:
+ break;
+ default:
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
+
+ if (!Check(S, DecodeT2AddrModeImm12(Inst, imm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ return S;
+}
+
+static DecodeStatus DecodeT2LoadT(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 8);
+ imm |= (Rn << 9);
+
+ if (Rn == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDRT:
+ Inst.setOpcode(ARM::t2LDRpci);
+ break;
+ case ARM::t2LDRBT:
+ Inst.setOpcode(ARM::t2LDRBpci);
+ break;
+ case ARM::t2LDRHT:
+ Inst.setOpcode(ARM::t2LDRHpci);
+ break;
+ case ARM::t2LDRSBT:
+ Inst.setOpcode(ARM::t2LDRSBpci);
+ break;
+ case ARM::t2LDRSHT:
+ Inst.setOpcode(ARM::t2LDRSHpci);
+ break;
+ default:
+ return MCDisassembler::Fail;
+ }
+ return DecodeT2LoadLabel(Inst, Insn, Address, Decoder);
+ }
+
+ if (!Check(S, DecoderGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeT2AddrModeImm8(Inst, imm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ return S;
+}
+
+static DecodeStatus DecodeT2LoadLabel(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void* Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned U = fieldFromInstruction(Insn, 23, 1);
+ int imm = fieldFromInstruction(Insn, 0, 12);
+
+ if (Rt == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDRBpci:
+ case ARM::t2LDRHpci:
+ Inst.setOpcode(ARM::t2PLDpci);
break;
- case ARM::t2PLDs:
- Inst.setOpcode(ARM::t2PLDi12);
- Inst.addOperand(MCOperand::CreateReg(ARM::PC));
+ case ARM::t2LDRSBpci:
+ Inst.setOpcode(ARM::t2PLIpci);
break;
- default:
+ case ARM::t2LDRSHpci:
return MCDisassembler::Fail;
+ default:
+ break;
}
+ }
- int imm = fieldFromInstruction(Insn, 0, 12);
- if (!fieldFromInstruction(Insn, 23, 1)) imm *= -1;
- Inst.addOperand(MCOperand::CreateImm(imm));
-
- return S;
+ switch(Inst.getOpcode()) {
+ case ARM::t2PLDpci:
+ case ARM::t2PLIpci:
+ break;
+ default:
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
}
- unsigned addrmode = fieldFromInstruction(Insn, 4, 2);
- addrmode |= fieldFromInstruction(Insn, 0, 4) << 2;
- addrmode |= fieldFromInstruction(Insn, 16, 4) << 6;
- if (!Check(S, DecodeT2AddrModeSOReg(Inst, addrmode, Address, Decoder)))
- return MCDisassembler::Fail;
+ if (!U) {
+ // Special case for #-0.
+ if (imm == 0)
+ imm = INT32_MIN;
+ else
+ imm = -imm;
+ }
+ Inst.addOperand(MCOperand::CreateImm(imm));
return S;
}
@@ -3243,6 +3604,21 @@ static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val,
unsigned Rn = fieldFromInstruction(Val, 9, 4);
unsigned imm = fieldFromInstruction(Val, 0, 9);
+ // Thumb stores cannot use PC as dest register.
+ switch (Inst.getOpcode()) {
+ case ARM::t2STRT:
+ case ARM::t2STRBT:
+ case ARM::t2STRHT:
+ case ARM::t2STRi8:
+ case ARM::t2STRHi8:
+ case ARM::t2STRBi8:
+ if (Rn == 15)
+ return MCDisassembler::Fail;
+ break;
+ default:
+ break;
+ }
+
// Some instructions always use an additive offset.
switch (Inst.getOpcode()) {
case ARM::t2LDRT:
@@ -3278,6 +3654,37 @@ static DecodeStatus DecodeT2LdStPre(MCInst &Inst, unsigned Insn,
addr |= Rn << 9;
unsigned load = fieldFromInstruction(Insn, 20, 1);
+ if (Rn == 15) {
+ switch (Inst.getOpcode()) {
+ case ARM::t2LDR_PRE:
+ case ARM::t2LDR_POST:
+ Inst.setOpcode(ARM::t2LDRpci);
+ break;
+ case ARM::t2LDRB_PRE:
+ case ARM::t2LDRB_POST:
+ Inst.setOpcode(ARM::t2LDRBpci);
+ break;
+ case ARM::t2LDRH_PRE:
+ case ARM::t2LDRH_POST:
+ Inst.setOpcode(ARM::t2LDRHpci);
+ break;
+ case ARM::t2LDRSB_PRE:
+ case ARM::t2LDRSB_POST:
+ if (Rt == 15)
+ Inst.setOpcode(ARM::t2PLIpci);
+ else
+ Inst.setOpcode(ARM::t2LDRSBpci);
+ break;
+ case ARM::t2LDRSH_PRE:
+ case ARM::t2LDRSH_POST:
+ Inst.setOpcode(ARM::t2LDRSHpci);
+ break;
+ default:
+ return MCDisassembler::Fail;
+ }
+ return DecodeT2LoadLabel(Inst, Insn, Address, Decoder);
+ }
+
if (!load) {
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3304,6 +3711,17 @@ static DecodeStatus DecodeT2AddrModeImm12(MCInst &Inst, unsigned Val,
unsigned Rn = fieldFromInstruction(Val, 13, 4);
unsigned imm = fieldFromInstruction(Val, 0, 12);
+ // Thumb stores cannot use PC as dest register.
+ switch (Inst.getOpcode()) {
+ case ARM::t2STRi12:
+ case ARM::t2STRBi12:
+ case ARM::t2STRHi12:
+ if (Rn == 15)
+ return MCDisassembler::Fail;
+ default:
+ break;
+ }
+
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(imm));
@@ -3401,6 +3819,11 @@ static DecodeStatus DecodeCoprocessor(MCInst &Inst, unsigned Val,
if (Val == 0xA || Val == 0xB)
return MCDisassembler::Fail;
+ uint64_t featureBits = ((const MCDisassembler*)Decoder)->getSubtargetInfo()
+ .getFeatureBits();
+ if ((featureBits & ARM::HasV8Ops) && !(Val == 14 || Val == 15))
+ return MCDisassembler::Fail;
+
Inst.addOperand(MCOperand::CreateImm(Val));
return MCDisassembler::Success;
}
@@ -3536,6 +3959,15 @@ static DecodeStatus DecodeMemBarrierOption(MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeInstSyncBarrierOption(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder) {
+ if (Val & ~0xf)
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::CreateImm(Val));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeMSRMask(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
if (!Val) return MCDisassembler::Fail;
@@ -3551,11 +3983,10 @@ static DecodeStatus DecodeDoubleRegLoad(MCInst &Inst, unsigned Insn,
unsigned Rn = fieldFromInstruction(Insn, 16, 4);
unsigned pred = fieldFromInstruction(Insn, 28, 4);
- if ((Rt & 1) || Rt == 0xE || Rn == 0xF) return MCDisassembler::Fail;
+ if (Rn == 0xF)
+ S = MCDisassembler::SoftFail;
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
- return MCDisassembler::Fail;
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rt+1, Address, Decoder)))
+ if (!Check(S, DecodeGPRPairRegisterClass(Inst, Rt, Address, Decoder)))
return MCDisassembler::Fail;
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3565,7 +3996,6 @@ static DecodeStatus DecodeDoubleRegLoad(MCInst &Inst, unsigned Insn,
return S;
}
-
static DecodeStatus DecodeDoubleRegStore(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder){
DecodeStatus S = MCDisassembler::Success;
@@ -3578,12 +4008,10 @@ static DecodeStatus DecodeDoubleRegStore(MCInst &Inst, unsigned Insn,
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
- if ((Rt & 1) || Rt == 0xE || Rn == 0xF) return MCDisassembler::Fail;
- if (Rd == Rn || Rd == Rt || Rd == Rt+1) return MCDisassembler::Fail;
+ if (Rn == 0xF || Rd == Rn || Rd == Rt || Rd == Rt+1)
+ S = MCDisassembler::SoftFail;
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
- return MCDisassembler::Fail;
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rt+1, Address, Decoder)))
+ if (!Check(S, DecodeGPRPairRegisterClass(Inst, Rt, Address, Decoder)))
return MCDisassembler::Fail;
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -4310,10 +4738,8 @@ static DecodeStatus DecodeIT(MCInst &Inst, unsigned Insn,
S = MCDisassembler::SoftFail;
}
- if (mask == 0x0) {
- mask |= 0x8;
- S = MCDisassembler::SoftFail;
- }
+ if (mask == 0x0)
+ return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(pred));
Inst.addOperand(MCOperand::CreateImm(mask));
@@ -4453,16 +4879,18 @@ static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn,
Vm |= (fieldFromInstruction(Insn, 5, 1) << 4);
unsigned imm = fieldFromInstruction(Insn, 16, 6);
unsigned cmode = fieldFromInstruction(Insn, 8, 4);
+ unsigned op = fieldFromInstruction(Insn, 5, 1);
DecodeStatus S = MCDisassembler::Success;
// VMOVv2f32 is ambiguous with these decodings.
if (!(imm & 0x38) && cmode == 0xF) {
+ if (op == 1) return MCDisassembler::Fail;
Inst.setOpcode(ARM::VMOVv2f32);
return DecodeNEONModImmInstruction(Inst, Insn, Address, Decoder);
}
- if (!(imm & 0x20)) Check(S, MCDisassembler::SoftFail);
+ if (!(imm & 0x20)) return MCDisassembler::Fail;
if (!Check(S, DecodeDPRRegisterClass(Inst, Vd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -4481,16 +4909,18 @@ static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn,
Vm |= (fieldFromInstruction(Insn, 5, 1) << 4);
unsigned imm = fieldFromInstruction(Insn, 16, 6);
unsigned cmode = fieldFromInstruction(Insn, 8, 4);
+ unsigned op = fieldFromInstruction(Insn, 5, 1);
DecodeStatus S = MCDisassembler::Success;
// VMOVv4f32 is ambiguous with these decodings.
if (!(imm & 0x38) && cmode == 0xF) {
+ if (op == 1) return MCDisassembler::Fail;
Inst.setOpcode(ARM::VMOVv4f32);
return DecodeNEONModImmInstruction(Inst, Insn, Address, Decoder);
}
- if (!(imm & 0x20)) Check(S, MCDisassembler::SoftFail);
+ if (!(imm & 0x20)) return MCDisassembler::Fail;
if (!Check(S, DecodeQPRRegisterClass(Inst, Vd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -4501,15 +4931,6 @@ static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeImm0_4(MCInst &Inst, unsigned Insn, uint64_t Address,
- const void *Decoder)
-{
- unsigned Imm = fieldFromInstruction(Insn, 0, 3);
- if (Imm > 4) return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateImm(Imm));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index 3bcd083a35fb..f89702853d41 100644
--- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -76,14 +76,23 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot) {
unsigned Opcode = MI->getOpcode();
+ switch(Opcode) {
+
// Check for HINT instructions w/ canonical names.
- if (Opcode == ARM::HINT || Opcode == ARM::t2HINT) {
+ case ARM::HINT:
+ case ARM::tHINT:
+ case ARM::t2HINT:
switch (MI->getOperand(0).getImm()) {
case 0: O << "\tnop"; break;
case 1: O << "\tyield"; break;
case 2: O << "\twfe"; break;
case 3: O << "\twfi"; break;
case 4: O << "\tsev"; break;
+ case 5:
+ if ((getAvailableFeatures() & ARM::HasV8Ops)) {
+ O << "\tsevl";
+ break;
+ } // Fallthrough for non-v8
default:
// Anything else should just print normally.
printInstruction(MI, O);
@@ -95,10 +104,9 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
O << ".w";
printAnnotation(O, Annot);
return;
- }
// Check for MOVs and print canonical forms, instead.
- if (Opcode == ARM::MOVsr) {
+ case ARM::MOVsr: {
// FIXME: Thumb variants?
const MCOperand &Dst = MI->getOperand(0);
const MCOperand &MO1 = MI->getOperand(1);
@@ -121,7 +129,7 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
return;
}
- if (Opcode == ARM::MOVsi) {
+ case ARM::MOVsi: {
// FIXME: Thumb variants?
const MCOperand &Dst = MI->getOperand(0);
const MCOperand &MO1 = MI->getOperand(1);
@@ -149,81 +157,91 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
return;
}
-
// A8.6.123 PUSH
- if ((Opcode == ARM::STMDB_UPD || Opcode == ARM::t2STMDB_UPD) &&
- MI->getOperand(0).getReg() == ARM::SP &&
- MI->getNumOperands() > 5) {
- // Should only print PUSH if there are at least two registers in the list.
- O << '\t' << "push";
- printPredicateOperand(MI, 2, O);
- if (Opcode == ARM::t2STMDB_UPD)
- O << ".w";
- O << '\t';
- printRegisterList(MI, 4, O);
- printAnnotation(O, Annot);
- return;
- }
- if (Opcode == ARM::STR_PRE_IMM && MI->getOperand(2).getReg() == ARM::SP &&
- MI->getOperand(3).getImm() == -4) {
- O << '\t' << "push";
- printPredicateOperand(MI, 4, O);
- O << "\t{";
- printRegName(O, MI->getOperand(1).getReg());
- O << "}";
- printAnnotation(O, Annot);
- return;
- }
+ case ARM::STMDB_UPD:
+ case ARM::t2STMDB_UPD:
+ if (MI->getOperand(0).getReg() == ARM::SP && MI->getNumOperands() > 5) {
+ // Should only print PUSH if there are at least two registers in the list.
+ O << '\t' << "push";
+ printPredicateOperand(MI, 2, O);
+ if (Opcode == ARM::t2STMDB_UPD)
+ O << ".w";
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ printAnnotation(O, Annot);
+ return;
+ } else
+ break;
+
+ case ARM::STR_PRE_IMM:
+ if (MI->getOperand(2).getReg() == ARM::SP &&
+ MI->getOperand(3).getImm() == -4) {
+ O << '\t' << "push";
+ printPredicateOperand(MI, 4, O);
+ O << "\t{";
+ printRegName(O, MI->getOperand(1).getReg());
+ O << "}";
+ printAnnotation(O, Annot);
+ return;
+ } else
+ break;
// A8.6.122 POP
- if ((Opcode == ARM::LDMIA_UPD || Opcode == ARM::t2LDMIA_UPD) &&
- MI->getOperand(0).getReg() == ARM::SP &&
- MI->getNumOperands() > 5) {
- // Should only print POP if there are at least two registers in the list.
- O << '\t' << "pop";
- printPredicateOperand(MI, 2, O);
- if (Opcode == ARM::t2LDMIA_UPD)
- O << ".w";
- O << '\t';
- printRegisterList(MI, 4, O);
- printAnnotation(O, Annot);
- return;
- }
- if (Opcode == ARM::LDR_POST_IMM && MI->getOperand(2).getReg() == ARM::SP &&
- MI->getOperand(4).getImm() == 4) {
- O << '\t' << "pop";
- printPredicateOperand(MI, 5, O);
- O << "\t{";
- printRegName(O, MI->getOperand(0).getReg());
- O << "}";
- printAnnotation(O, Annot);
- return;
- }
-
+ case ARM::LDMIA_UPD:
+ case ARM::t2LDMIA_UPD:
+ if (MI->getOperand(0).getReg() == ARM::SP && MI->getNumOperands() > 5) {
+ // Should only print POP if there are at least two registers in the list.
+ O << '\t' << "pop";
+ printPredicateOperand(MI, 2, O);
+ if (Opcode == ARM::t2LDMIA_UPD)
+ O << ".w";
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ printAnnotation(O, Annot);
+ return;
+ } else
+ break;
+
+ case ARM::LDR_POST_IMM:
+ if (MI->getOperand(2).getReg() == ARM::SP &&
+ MI->getOperand(4).getImm() == 4) {
+ O << '\t' << "pop";
+ printPredicateOperand(MI, 5, O);
+ O << "\t{";
+ printRegName(O, MI->getOperand(0).getReg());
+ O << "}";
+ printAnnotation(O, Annot);
+ return;
+ } else
+ break;
// A8.6.355 VPUSH
- if ((Opcode == ARM::VSTMSDB_UPD || Opcode == ARM::VSTMDDB_UPD) &&
- MI->getOperand(0).getReg() == ARM::SP) {
- O << '\t' << "vpush";
- printPredicateOperand(MI, 2, O);
- O << '\t';
- printRegisterList(MI, 4, O);
- printAnnotation(O, Annot);
- return;
- }
+ case ARM::VSTMSDB_UPD:
+ case ARM::VSTMDDB_UPD:
+ if (MI->getOperand(0).getReg() == ARM::SP) {
+ O << '\t' << "vpush";
+ printPredicateOperand(MI, 2, O);
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ printAnnotation(O, Annot);
+ return;
+ } else
+ break;
// A8.6.354 VPOP
- if ((Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMDIA_UPD) &&
- MI->getOperand(0).getReg() == ARM::SP) {
- O << '\t' << "vpop";
- printPredicateOperand(MI, 2, O);
- O << '\t';
- printRegisterList(MI, 4, O);
- printAnnotation(O, Annot);
- return;
- }
+ case ARM::VLDMSIA_UPD:
+ case ARM::VLDMDIA_UPD:
+ if (MI->getOperand(0).getReg() == ARM::SP) {
+ O << '\t' << "vpop";
+ printPredicateOperand(MI, 2, O);
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ printAnnotation(O, Annot);
+ return;
+ } else
+ break;
- if (Opcode == ARM::tLDMIA) {
+ case ARM::tLDMIA: {
bool Writeback = true;
unsigned BaseReg = MI->getOperand(0).getReg();
for (unsigned i = 3; i < MI->getNumOperands(); ++i) {
@@ -243,24 +261,16 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
return;
}
- // Thumb1 NOP
- if (Opcode == ARM::tMOVr && MI->getOperand(0).getReg() == ARM::R8 &&
- MI->getOperand(1).getReg() == ARM::R8) {
- O << "\tnop";
- printPredicateOperand(MI, 2, O);
- printAnnotation(O, Annot);
- return;
- }
-
// Combine 2 GPRs from disassember into a GPRPair to match with instr def.
// ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
// a single GPRPair reg operand is used in the .td file to replace the two
// GPRs. However, when decoding them, the two GRPs cannot be automatically
// expressed as a GPRPair, so we have to manually merge them.
// FIXME: We would really like to be able to tablegen'erate this.
- if (Opcode == ARM::LDREXD || Opcode == ARM::STREXD) {
+ case ARM::LDREXD: case ARM::STREXD:
+ case ARM::LDAEXD: case ARM::STLEXD:
const MCRegisterClass& MRC = MRI.getRegClass(ARM::GPRRegClassID);
- bool isStore = Opcode == ARM::STREXD;
+ bool isStore = Opcode == ARM::STREXD || Opcode == ARM::STLEXD;
unsigned Reg = MI->getOperand(isStore ? 1 : 0).getReg();
if (MRC.contains(Reg)) {
MCInst NewMI;
@@ -315,15 +325,29 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
void ARMInstPrinter::printThumbLdrLabelOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
- if (MO1.isExpr())
+ if (MO1.isExpr()) {
O << *MO1.getExpr();
- else if (MO1.isImm()) {
- O << markup("<mem:") << "[pc, "
- << markup("<imm:") << "#" << formatImm(MO1.getImm())
- << markup(">]>", "]");
+ return;
}
- else
- llvm_unreachable("Unknown LDR label operand?");
+
+ O << markup("<mem:") << "[pc, ";
+
+ int32_t OffImm = (int32_t)MO1.getImm();
+ bool isSub = OffImm < 0;
+
+ // Special value for #-0. All others are normal.
+ if (OffImm == INT32_MIN)
+ OffImm = 0;
+ if (isSub) {
+ O << markup("<imm:")
+ << "#-" << formatImm(-OffImm)
+ << markup(">");
+ } else {
+ O << markup("<imm:")
+ << "#" << formatImm(OffImm)
+ << markup(">");
+ }
+ O << "]" << markup(">");
}
// so_reg is a 4-operand unit corresponding to register forms of the A5.1
@@ -660,8 +684,8 @@ void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
uint32_t v = ~MO.getImm();
- int32_t lsb = CountTrailingZeros_32(v);
- int32_t width = (32 - CountLeadingZeros_32 (v)) - lsb;
+ int32_t lsb = countTrailingZeros(v);
+ int32_t width = (32 - countLeadingZeros (v)) - lsb;
assert(MO.isImm() && "Not a valid bf_inv_mask_imm value!");
O << markup("<imm:") << '#' << lsb << markup(">")
<< ", "
@@ -671,7 +695,13 @@ void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI,
void ARMInstPrinter::printMemBOption(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
unsigned val = MI->getOperand(OpNum).getImm();
- O << ARM_MB::MemBOptToString(val);
+ O << ARM_MB::MemBOptToString(val, (getAvailableFeatures() & ARM::HasV8Ops));
+}
+
+void ARMInstPrinter::printInstSyncBOption(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ unsigned val = MI->getOperand(OpNum).getImm();
+ O << ARM_ISB::InstSyncBOptToString(val);
}
void ARMInstPrinter::printShiftImmOperand(const MCInst *MI, unsigned OpNum,
@@ -889,6 +919,7 @@ void ARMInstPrinter::printPCLabel(const MCInst *MI, unsigned OpNum,
llvm_unreachable("Unhandled PC-relative pseudo-instruction!");
}
+template<unsigned scale>
void ARMInstPrinter::printAdrLabelOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
@@ -898,7 +929,7 @@ void ARMInstPrinter::printAdrLabelOperand(const MCInst *MI, unsigned OpNum,
return;
}
- int32_t OffImm = (int32_t)MO.getImm();
+ int32_t OffImm = (int32_t)MO.getImm() << scale;
O << markup("<imm:");
if (OffImm == INT32_MIN)
@@ -931,7 +962,7 @@ void ARMInstPrinter::printThumbITMask(const MCInst *MI, unsigned OpNum,
unsigned Mask = MI->getOperand(OpNum).getImm();
unsigned Firstcond = MI->getOperand(OpNum-1).getImm();
unsigned CondBit0 = Firstcond & 1;
- unsigned NumTZ = CountTrailingZeros_32(Mask);
+ unsigned NumTZ = countTrailingZeros(Mask);
assert(NumTZ <= 3 && "Invalid IT mask!");
for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
bool T = ((Mask >> Pos) & 1) == CondBit0;
@@ -1059,6 +1090,7 @@ void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
O << "]" << markup(">");
}
+template<bool AlwaysPrintImm0>
void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI,
unsigned OpNum,
raw_ostream &O) {
@@ -1069,22 +1101,25 @@ void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI,
printRegName(O, MO1.getReg());
int32_t OffImm = (int32_t)MO2.getImm();
+ bool isSub = OffImm < 0;
// Don't print +0.
- if (OffImm != 0)
- O << ", ";
- if (OffImm != 0 && UseMarkup)
- O << "<imm:";
if (OffImm == INT32_MIN)
- O << "#-0";
- else if (OffImm < 0)
- O << "#-" << -OffImm;
- else if (OffImm > 0)
- O << "#" << OffImm;
- if (OffImm != 0 && UseMarkup)
- O << ">";
+ OffImm = 0;
+ if (isSub) {
+ O << ", "
+ << markup("<imm:")
+ << "#-" << -OffImm
+ << markup(">");
+ } else if (AlwaysPrintImm0 || OffImm > 0) {
+ O << ", "
+ << markup("<imm:")
+ << "#" << OffImm
+ << markup(">");
+ }
O << "]" << markup(">");
}
+template<bool AlwaysPrintImm0>
void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI,
unsigned OpNum,
raw_ostream &O) {
@@ -1100,22 +1135,24 @@ void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI,
printRegName(O, MO1.getReg());
int32_t OffImm = (int32_t)MO2.getImm();
+ bool isSub = OffImm < 0;
assert(((OffImm & 0x3) == 0) && "Not a valid immediate!");
// Don't print +0.
- if (OffImm != 0)
- O << ", ";
- if (OffImm != 0 && UseMarkup)
- O << "<imm:";
if (OffImm == INT32_MIN)
- O << "#-0";
- else if (OffImm < 0)
- O << "#-" << -OffImm;
- else if (OffImm > 0)
- O << "#" << OffImm;
- if (OffImm != 0 && UseMarkup)
- O << ">";
+ OffImm = 0;
+ if (isSub) {
+ O << ", "
+ << markup("<imm:")
+ << "#-" << -OffImm
+ << markup(">");
+ } else if (AlwaysPrintImm0 || OffImm > 0) {
+ O << ", "
+ << markup("<imm:")
+ << "#" << OffImm
+ << markup(">");
+ }
O << "]" << markup(">");
}
@@ -1142,7 +1179,9 @@ void ARMInstPrinter::printT2AddrModeImm8OffsetOperand(const MCInst *MI,
const MCOperand &MO1 = MI->getOperand(OpNum);
int32_t OffImm = (int32_t)MO1.getImm();
O << ", " << markup("<imm:");
- if (OffImm < 0)
+ if (OffImm == INT32_MIN)
+ O << "#-0";
+ else if (OffImm < 0)
O << "#-" << -OffImm;
else
O << "#" << OffImm;
@@ -1157,19 +1196,14 @@ void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI,
assert(((OffImm & 0x3) == 0) && "Not a valid immediate!");
- // Don't print +0.
- if (OffImm != 0)
- O << ", ";
- if (OffImm != 0 && UseMarkup)
- O << "<imm:";
+ O << ", " << markup("<imm:");
if (OffImm == INT32_MIN)
O << "#-0";
else if (OffImm < 0)
O << "#-" << -OffImm;
- else if (OffImm > 0)
+ else
O << "#" << OffImm;
- if (OffImm != 0 && UseMarkup)
- O << ">";
+ O << markup(">");
}
void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI,
diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index 344104e87359..15ae8d1267f7 100644
--- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
+++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -71,10 +71,12 @@ public:
void printBitfieldInvMaskImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
void printMemBOption(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printInstSyncBOption(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printShiftImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printPKHLSLShiftImm(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printPKHASRShiftImm(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ template <unsigned scale>
void printAdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printThumbSRImm(const MCInst *MI, unsigned OpNum, raw_ostream &O);
@@ -96,8 +98,10 @@ public:
template<bool AlwaysPrintImm0>
void printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
+ template<bool AlwaysPrintImm0>
void printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
+ template<bool AlwaysPrintImm0>
void printT2AddrModeImm8s4Operand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
void printT2AddrModeImm0_1020s4Operand(const MCInst *MI, unsigned OpNum,
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h b/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
index 62473b2bfdee..b6c85c2e9466 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
@@ -140,7 +140,7 @@ namespace ARM_AM {
if ((Imm & ~255U) == 0) return 0;
// Use CTZ to compute the rotate amount.
- unsigned TZ = CountTrailingZeros_32(Imm);
+ unsigned TZ = countTrailingZeros(Imm);
// Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
// not 9.
@@ -153,7 +153,7 @@ namespace ARM_AM {
// For values like 0xF000000F, we should ignore the low 6 bits, then
// retry the hunt.
if (Imm & 63U) {
- unsigned TZ2 = CountTrailingZeros_32(Imm & ~63U);
+ unsigned TZ2 = countTrailingZeros(Imm & ~63U);
unsigned RotAmt2 = TZ2 & ~1;
if ((rotr32(Imm, RotAmt2) & ~255U) == 0)
return (32-RotAmt2)&31; // HW rotates right, not left.
@@ -221,7 +221,7 @@ namespace ARM_AM {
if ((Imm & ~255U) == 0) return 0;
// Use CTZ to compute the shift amount.
- return CountTrailingZeros_32(Imm);
+ return countTrailingZeros(Imm);
}
/// isThumbImmShiftedVal - Return true if the specified value can be obtained
@@ -240,7 +240,7 @@ namespace ARM_AM {
if ((Imm & ~65535U) == 0) return 0;
// Use CTZ to compute the shift amount.
- return CountTrailingZeros_32(Imm);
+ return countTrailingZeros(Imm);
}
/// isThumbImm16ShiftedVal - Return true if the specified value can be
@@ -296,7 +296,7 @@ namespace ARM_AM {
/// encoding is possible.
/// See ARM Reference Manual A6.3.2.
static inline int getT2SOImmValRotateVal(unsigned V) {
- unsigned RotAmt = CountLeadingZeros_32(V);
+ unsigned RotAmt = countLeadingZeros(V);
if (RotAmt >= 24)
return -1;
@@ -328,7 +328,7 @@ namespace ARM_AM {
static inline unsigned getT2SOImmValRotate(unsigned V) {
if ((V & ~255U) == 0) return 0;
// Use CTZ to compute the rotate amount.
- unsigned RotAmt = CountTrailingZeros_32(V);
+ unsigned RotAmt = countTrailingZeros(V);
return (32 - RotAmt) & 31;
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index e66e98567873..5615b808fc11 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -25,9 +25,9 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCValue.h"
-#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachO.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -152,7 +152,7 @@ static unsigned getRelaxedOpcode(unsigned Op) {
switch (Op) {
default: return Op;
case ARM::tBcc: return ARM::t2Bcc;
- case ARM::tLDRpciASM: return ARM::t2LDRpci;
+ case ARM::tLDRpci: return ARM::t2LDRpci;
case ARM::tADR: return ARM::t2ADR;
case ARM::tB: return ARM::t2B;
}
@@ -419,7 +419,7 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
uint32_t imm11Bits = (offset & 0x000007FF);
-
+
uint32_t Binary = 0;
uint32_t firstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
uint32_t secondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
@@ -434,8 +434,8 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
// four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
// imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
// where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
- // The value is encoded into disjoint bit positions in the destination
- // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
+ // The value is encoded into disjoint bit positions in the destination
+ // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
// J = either J1 or J2 bit, 0 = zero.
//
// BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
@@ -450,10 +450,10 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
uint32_t imm10LBits = (offset & 0x3FF);
-
+
uint32_t Binary = 0;
uint32_t firstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
- uint32_t secondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
+ uint32_t secondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
((uint16_t)imm10LBits) << 1);
Binary |= secondHalf << 16;
Binary |= firstHalf;
@@ -640,16 +640,16 @@ public:
// FIXME: This should be in a separate file.
class DarwinARMAsmBackend : public ARMAsmBackend {
public:
- const object::mach::CPUSubtypeARM Subtype;
+ const MachO::CPUSubTypeARM Subtype;
DarwinARMAsmBackend(const Target &T, const StringRef TT,
- object::mach::CPUSubtypeARM st)
+ MachO::CPUSubTypeARM st)
: ARMAsmBackend(T, TT), Subtype(st) {
HasDataInCodeSupport = true;
}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return createARMMachObjectWriter(OS, /*Is64Bit=*/false,
- object::mach::CTM_ARM,
+ MachO::CPU_TYPE_ARM,
Subtype);
}
@@ -660,28 +660,33 @@ public:
} // end anonymous namespace
-MCAsmBackend *llvm::createARMAsmBackend(const Target &T, StringRef TT, StringRef CPU) {
+MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
Triple TheTriple(TT);
if (TheTriple.isOSDarwin()) {
- object::mach::CPUSubtypeARM CS =
- StringSwitch<object::mach::CPUSubtypeARM>(TheTriple.getArchName())
- .Cases("armv4t", "thumbv4t", object::mach::CSARM_V4T)
- .Cases("armv5e", "thumbv5e",object::mach::CSARM_V5TEJ)
- .Cases("armv6", "thumbv6", object::mach::CSARM_V6)
- .Cases("armv6m", "thumbv6m", object::mach::CSARM_V6M)
- .Cases("armv7em", "thumbv7em", object::mach::CSARM_V7EM)
- .Cases("armv7f", "thumbv7f", object::mach::CSARM_V7F)
- .Cases("armv7k", "thumbv7k", object::mach::CSARM_V7K)
- .Cases("armv7m", "thumbv7m", object::mach::CSARM_V7M)
- .Cases("armv7s", "thumbv7s", object::mach::CSARM_V7S)
- .Default(object::mach::CSARM_V7);
+ MachO::CPUSubTypeARM CS =
+ StringSwitch<MachO::CPUSubTypeARM>(TheTriple.getArchName())
+ .Cases("armv4t", "thumbv4t", MachO::CPU_SUBTYPE_ARM_V4T)
+ .Cases("armv5e", "thumbv5e", MachO::CPU_SUBTYPE_ARM_V5TEJ)
+ .Cases("armv6", "thumbv6", MachO::CPU_SUBTYPE_ARM_V6)
+ .Cases("armv6m", "thumbv6m", MachO::CPU_SUBTYPE_ARM_V6M)
+ .Cases("armv7em", "thumbv7em", MachO::CPU_SUBTYPE_ARM_V7EM)
+ .Cases("armv7f", "thumbv7f", MachO::CPU_SUBTYPE_ARM_V7F)
+ .Cases("armv7k", "thumbv7k", MachO::CPU_SUBTYPE_ARM_V7K)
+ .Cases("armv7m", "thumbv7m", MachO::CPU_SUBTYPE_ARM_V7M)
+ .Cases("armv7s", "thumbv7s", MachO::CPU_SUBTYPE_ARM_V7S)
+ .Default(MachO::CPU_SUBTYPE_ARM_V7);
return new DarwinARMAsmBackend(T, TT, CS);
}
- if (TheTriple.isOSWindows())
+#if 0
+ // FIXME: Introduce yet another checker but assert(0).
+ if (TheTriple.isOSBinFormatCOFF())
assert(0 && "Windows not supported on ARM");
+#endif
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
return new ELFARMAsmBackend(T, TT, OSABI);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
index de48a0e0f30a..af939fc19129 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
@@ -121,46 +121,89 @@ namespace ARM_MB {
// the option field for memory barrier operations.
enum MemBOpt {
RESERVED_0 = 0,
- RESERVED_1 = 1,
+ OSHLD = 1,
OSHST = 2,
OSH = 3,
RESERVED_4 = 4,
- RESERVED_5 = 5,
+ NSHLD = 5,
NSHST = 6,
NSH = 7,
RESERVED_8 = 8,
- RESERVED_9 = 9,
+ ISHLD = 9,
ISHST = 10,
ISH = 11,
RESERVED_12 = 12,
- RESERVED_13 = 13,
+ LD = 13,
ST = 14,
SY = 15
};
- inline static const char *MemBOptToString(unsigned val) {
+ inline static const char *MemBOptToString(unsigned val, bool HasV8) {
switch (val) {
default: llvm_unreachable("Unknown memory operation");
case SY: return "sy";
case ST: return "st";
- case RESERVED_13: return "#0xd";
+ case LD: return HasV8 ? "ld" : "#0xd";
case RESERVED_12: return "#0xc";
case ISH: return "ish";
case ISHST: return "ishst";
- case RESERVED_9: return "#0x9";
+ case ISHLD: return HasV8 ? "ishld" : "#0x9";
case RESERVED_8: return "#0x8";
case NSH: return "nsh";
case NSHST: return "nshst";
- case RESERVED_5: return "#0x5";
+ case NSHLD: return HasV8 ? "nshld" : "#0x5";
case RESERVED_4: return "#0x4";
case OSH: return "osh";
case OSHST: return "oshst";
- case RESERVED_1: return "#0x1";
+ case OSHLD: return HasV8 ? "oshld" : "#0x1";
case RESERVED_0: return "#0x0";
}
}
} // namespace ARM_MB
+namespace ARM_ISB {
+ enum InstSyncBOpt {
+ RESERVED_0 = 0,
+ RESERVED_1 = 1,
+ RESERVED_2 = 2,
+ RESERVED_3 = 3,
+ RESERVED_4 = 4,
+ RESERVED_5 = 5,
+ RESERVED_6 = 6,
+ RESERVED_7 = 7,
+ RESERVED_8 = 8,
+ RESERVED_9 = 9,
+ RESERVED_10 = 10,
+ RESERVED_11 = 11,
+ RESERVED_12 = 12,
+ RESERVED_13 = 13,
+ RESERVED_14 = 14,
+ SY = 15
+ };
+
+ inline static const char *InstSyncBOptToString(unsigned val) {
+ switch (val) {
+ default: llvm_unreachable("Unkown memory operation");
+ case RESERVED_0: return "#0x0";
+ case RESERVED_1: return "#0x1";
+ case RESERVED_2: return "#0x2";
+ case RESERVED_3: return "#0x3";
+ case RESERVED_4: return "#0x4";
+ case RESERVED_5: return "#0x5";
+ case RESERVED_6: return "#0x6";
+ case RESERVED_7: return "#0x7";
+ case RESERVED_8: return "#0x8";
+ case RESERVED_9: return "#0x9";
+ case RESERVED_10: return "#0xa";
+ case RESERVED_11: return "#0xb";
+ case RESERVED_12: return "#0xc";
+ case RESERVED_13: return "#0xd";
+ case RESERVED_14: return "#0xe";
+ case SY: return "sy";
+ }
+ }
+} // namespace ARM_ISB
+
/// isARMLowRegister - Returns true if the register is a low register (r0-r7).
///
static inline bool isARMLowRegister(unsigned Reg) {
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 6c3d2476688c..471897de5c1c 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -13,6 +13,8 @@
//
//===----------------------------------------------------------------------===//
+#include "ARMBuildAttrs.h"
+#include "ARMFPUName.h"
#include "ARMRegisterInfo.h"
#include "ARMUnwindOp.h"
#include "ARMUnwindOpAsm.h"
@@ -27,6 +29,7 @@
#include "llvm/MC/MCELFSymbolFlags.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
@@ -36,7 +39,9 @@
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
using namespace llvm;
@@ -45,8 +50,218 @@ static std::string GetAEABIUnwindPersonalityName(unsigned Index) {
return (Twine("__aeabi_unwind_cpp_pr") + Twine(Index)).str();
}
+static const char *GetFPUName(unsigned ID) {
+ switch (ID) {
+ default:
+ llvm_unreachable("Unknown FPU kind");
+ break;
+#define ARM_FPU_NAME(NAME, ID) case ARM::ID: return NAME;
+#include "ARMFPUName.def"
+ }
+ return NULL;
+}
+
namespace {
+class ARMELFStreamer;
+
+class ARMTargetAsmStreamer : public ARMTargetStreamer {
+ formatted_raw_ostream &OS;
+ MCInstPrinter &InstPrinter;
+
+ virtual void emitFnStart();
+ virtual void emitFnEnd();
+ virtual void emitCantUnwind();
+ virtual void emitPersonality(const MCSymbol *Personality);
+ virtual void emitHandlerData();
+ virtual void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0);
+ virtual void emitPad(int64_t Offset);
+ virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector);
+
+ virtual void switchVendor(StringRef Vendor);
+ virtual void emitAttribute(unsigned Attribute, unsigned Value);
+ virtual void emitTextAttribute(unsigned Attribute, StringRef String);
+ virtual void emitFPU(unsigned FPU);
+ virtual void finishAttributeSection();
+
+public:
+ ARMTargetAsmStreamer(formatted_raw_ostream &OS, MCInstPrinter &InstPrinter);
+};
+
+ARMTargetAsmStreamer::ARMTargetAsmStreamer(formatted_raw_ostream &OS,
+ MCInstPrinter &InstPrinter)
+ : OS(OS), InstPrinter(InstPrinter) {}
+void ARMTargetAsmStreamer::emitFnStart() { OS << "\t.fnstart\n"; }
+void ARMTargetAsmStreamer::emitFnEnd() { OS << "\t.fnend\n"; }
+void ARMTargetAsmStreamer::emitCantUnwind() { OS << "\t.cantunwind\n"; }
+void ARMTargetAsmStreamer::emitPersonality(const MCSymbol *Personality) {
+ OS << "\t.personality " << Personality->getName() << '\n';
+}
+void ARMTargetAsmStreamer::emitHandlerData() { OS << "\t.handlerdata\n"; }
+void ARMTargetAsmStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
+ int64_t Offset) {
+ OS << "\t.setfp\t";
+ InstPrinter.printRegName(OS, FpReg);
+ OS << ", ";
+ InstPrinter.printRegName(OS, SpReg);
+ if (Offset)
+ OS << ", #" << Offset;
+ OS << '\n';
+}
+void ARMTargetAsmStreamer::emitPad(int64_t Offset) {
+ OS << "\t.pad\t#" << Offset << '\n';
+}
+void ARMTargetAsmStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector) {
+ assert(RegList.size() && "RegList should not be empty");
+ if (isVector)
+ OS << "\t.vsave\t{";
+ else
+ OS << "\t.save\t{";
+
+ InstPrinter.printRegName(OS, RegList[0]);
+
+ for (unsigned i = 1, e = RegList.size(); i != e; ++i) {
+ OS << ", ";
+ InstPrinter.printRegName(OS, RegList[i]);
+ }
+
+ OS << "}\n";
+}
+void ARMTargetAsmStreamer::switchVendor(StringRef Vendor) {
+}
+void ARMTargetAsmStreamer::emitAttribute(unsigned Attribute, unsigned Value) {
+ OS << "\t.eabi_attribute\t" << Attribute << ", " << Twine(Value) << "\n";
+}
+void ARMTargetAsmStreamer::emitTextAttribute(unsigned Attribute,
+ StringRef String) {
+ switch (Attribute) {
+ default: llvm_unreachable("Unsupported Text attribute in ASM Mode");
+ case ARMBuildAttrs::CPU_name:
+ OS << "\t.cpu\t" << String.lower() << "\n";
+ break;
+ }
+}
+void ARMTargetAsmStreamer::emitFPU(unsigned FPU) {
+ OS << "\t.fpu\t" << GetFPUName(FPU) << "\n";
+}
+void ARMTargetAsmStreamer::finishAttributeSection() {
+}
+
+class ARMTargetELFStreamer : public ARMTargetStreamer {
+private:
+ // This structure holds all attributes, accounting for
+ // their string/numeric value, so we can later emmit them
+ // in declaration order, keeping all in the same vector
+ struct AttributeItem {
+ enum {
+ HiddenAttribute = 0,
+ NumericAttribute,
+ TextAttribute
+ } Type;
+ unsigned Tag;
+ unsigned IntValue;
+ StringRef StringValue;
+
+ static bool LessTag(const AttributeItem &LHS, const AttributeItem &RHS) {
+ return (LHS.Tag < RHS.Tag);
+ }
+ };
+
+ StringRef CurrentVendor;
+ unsigned FPU;
+ SmallVector<AttributeItem, 64> Contents;
+
+ const MCSection *AttributeSection;
+
+ // FIXME: this should be in a more generic place, but
+ // getULEBSize() is in MCAsmInfo and will be moved to MCDwarf
+ static size_t getULEBSize(int Value) {
+ size_t Size = 0;
+ do {
+ Value >>= 7;
+ Size += sizeof(int8_t); // Is this really necessary?
+ } while (Value);
+ return Size;
+ }
+
+ AttributeItem *getAttributeItem(unsigned Attribute) {
+ for (size_t i = 0; i < Contents.size(); ++i)
+ if (Contents[i].Tag == Attribute)
+ return &Contents[i];
+ return 0;
+ }
+
+ void setAttributeItem(unsigned Attribute, unsigned Value,
+ bool OverwriteExisting) {
+ // Look for existing attribute item
+ if (AttributeItem *Item = getAttributeItem(Attribute)) {
+ if (!OverwriteExisting)
+ return;
+ Item->IntValue = Value;
+ return;
+ }
+
+ // Create new attribute item
+ AttributeItem Item = {
+ AttributeItem::NumericAttribute,
+ Attribute,
+ Value,
+ StringRef("")
+ };
+ Contents.push_back(Item);
+ }
+
+ void setAttributeItem(unsigned Attribute, StringRef Value,
+ bool OverwriteExisting) {
+ // Look for existing attribute item
+ if (AttributeItem *Item = getAttributeItem(Attribute)) {
+ if (!OverwriteExisting)
+ return;
+ Item->StringValue = Value;
+ return;
+ }
+
+ // Create new attribute item
+ AttributeItem Item = {
+ AttributeItem::TextAttribute,
+ Attribute,
+ 0,
+ Value
+ };
+ Contents.push_back(Item);
+ }
+
+ void emitFPUDefaultAttributes();
+
+ ARMELFStreamer &getStreamer();
+
+ virtual void emitFnStart();
+ virtual void emitFnEnd();
+ virtual void emitCantUnwind();
+ virtual void emitPersonality(const MCSymbol *Personality);
+ virtual void emitHandlerData();
+ virtual void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0);
+ virtual void emitPad(int64_t Offset);
+ virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector);
+
+ virtual void switchVendor(StringRef Vendor);
+ virtual void emitAttribute(unsigned Attribute, unsigned Value);
+ virtual void emitTextAttribute(unsigned Attribute, StringRef String);
+ virtual void emitFPU(unsigned FPU);
+ virtual void finishAttributeSection();
+
+ size_t calculateContentSize() const;
+
+public:
+ ARMTargetELFStreamer()
+ : ARMTargetStreamer(), CurrentVendor("aeabi"), FPU(ARM::INVALID_FPU),
+ AttributeSection(0) {
+ }
+};
+
/// Extend the generic ELFStreamer class so that it can emit mapping symbols at
/// the appropriate points in the object files. These symbols are defined in the
/// ARM ELF ABI: infocenter.arm.com/help/topic/com.arm.../IHI0044D_aaelf.pdf.
@@ -61,27 +276,29 @@ namespace {
/// by MachO. Beware!
class ARMELFStreamer : public MCELFStreamer {
public:
- ARMELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
- MCCodeEmitter *Emitter, bool IsThumb)
- : MCELFStreamer(SK_ARMELFStreamer, Context, TAB, OS, Emitter),
+ friend class ARMTargetELFStreamer;
+
+ ARMELFStreamer(MCContext &Context, MCTargetStreamer *TargetStreamer,
+ MCAsmBackend &TAB, raw_ostream &OS, MCCodeEmitter *Emitter,
+ bool IsThumb)
+ : MCELFStreamer(Context, TargetStreamer, TAB, OS, Emitter),
IsThumb(IsThumb), MappingSymbolCounter(0), LastEMS(EMS_None) {
Reset();
}
~ARMELFStreamer() {}
+ virtual void FinishImpl();
+
// ARM exception handling directives
- virtual void EmitFnStart();
- virtual void EmitFnEnd();
- virtual void EmitCantUnwind();
- virtual void EmitPersonality(const MCSymbol *Per);
- virtual void EmitHandlerData();
- virtual void EmitSetFP(unsigned NewFpReg,
- unsigned NewSpReg,
- int64_t Offset = 0);
- virtual void EmitPad(int64_t Offset);
- virtual void EmitRegSave(const SmallVectorImpl<unsigned> &RegList,
- bool isVector);
+ void emitFnStart();
+ void emitFnEnd();
+ void emitCantUnwind();
+ void emitPersonality(const MCSymbol *Per);
+ void emitHandlerData();
+ void emitSetFP(unsigned NewFpReg, unsigned NewSpReg, int64_t Offset = 0);
+ void emitPad(int64_t Offset);
+ void emitRegSave(const SmallVectorImpl<unsigned> &RegList, bool isVector);
virtual void ChangeSection(const MCSection *Section,
const MCExpr *Subsection) {
@@ -109,18 +326,17 @@ public:
/// This is one of the functions used to emit data into an ELF section, so the
/// ARM streamer overrides it to add the appropriate mapping symbol ($d) if
/// necessary.
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace) {
+ virtual void EmitBytes(StringRef Data) {
EmitDataMappingSymbol();
- MCELFStreamer::EmitBytes(Data, AddrSpace);
+ MCELFStreamer::EmitBytes(Data);
}
/// This is one of the functions used to emit data into an ELF section, so the
/// ARM streamer overrides it to add the appropriate mapping symbol ($d) if
/// necessary.
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) {
EmitDataMappingSymbol();
- MCELFStreamer::EmitValueImpl(Value, Size, AddrSpace);
+ MCELFStreamer::EmitValueImpl(Value, Size);
}
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {
@@ -142,10 +358,6 @@ public:
}
}
- static bool classof(const MCStreamer *S) {
- return S->getKind() == SK_ARMELFStreamer;
- }
-
private:
enum ElfMappingSymbol {
EMS_None,
@@ -184,7 +396,7 @@ private:
MCELF::SetType(SD, ELF::STT_NOTYPE);
MCELF::SetBinding(SD, ELF::STB_LOCAL);
SD.setExternal(false);
- Symbol->setSection(*getCurrentSection().first);
+ AssignSection(Symbol, getCurrentSection().first);
const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext());
Symbol->setVariableValue(Value);
@@ -203,7 +415,8 @@ private:
void Reset();
void EmitPersonalityFixup(StringRef Name);
- void CollectUnwindOpcodes();
+ void FlushPendingOffset();
+ void FlushUnwindOpcodes(bool NoHandlerData);
void SwitchToEHSection(const char *Prefix, unsigned Type, unsigned Flags,
SectionKind Kind, const MCSymbol &Fn);
@@ -220,17 +433,236 @@ private:
MCSymbol *ExTab;
MCSymbol *FnStart;
const MCSymbol *Personality;
- uint32_t VFPRegSave; // Register mask for {d31-d0}
- uint32_t RegSave; // Register mask for {r15-r0}
- int64_t SPOffset;
- uint16_t FPReg;
- int64_t FPOffset;
+ unsigned PersonalityIndex;
+ unsigned FPReg; // Frame pointer register
+ int64_t FPOffset; // Offset: (final frame pointer) - (initial $sp)
+ int64_t SPOffset; // Offset: (final $sp) - (initial $sp)
+ int64_t PendingOffset; // Offset: (final $sp) - (emitted $sp)
bool UsedFP;
bool CantUnwind;
+ SmallVector<uint8_t, 64> Opcodes;
UnwindOpcodeAssembler UnwindOpAsm;
};
} // end anonymous namespace
+ARMELFStreamer &ARMTargetELFStreamer::getStreamer() {
+ ARMELFStreamer *S = static_cast<ARMELFStreamer *>(Streamer);
+ return *S;
+}
+
+void ARMTargetELFStreamer::emitFnStart() { getStreamer().emitFnStart(); }
+void ARMTargetELFStreamer::emitFnEnd() { getStreamer().emitFnEnd(); }
+void ARMTargetELFStreamer::emitCantUnwind() { getStreamer().emitCantUnwind(); }
+void ARMTargetELFStreamer::emitPersonality(const MCSymbol *Personality) {
+ getStreamer().emitPersonality(Personality);
+}
+void ARMTargetELFStreamer::emitHandlerData() {
+ getStreamer().emitHandlerData();
+}
+void ARMTargetELFStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
+ int64_t Offset) {
+ getStreamer().emitSetFP(FpReg, SpReg, Offset);
+}
+void ARMTargetELFStreamer::emitPad(int64_t Offset) {
+ getStreamer().emitPad(Offset);
+}
+void ARMTargetELFStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector) {
+ getStreamer().emitRegSave(RegList, isVector);
+}
+void ARMTargetELFStreamer::switchVendor(StringRef Vendor) {
+ assert(!Vendor.empty() && "Vendor cannot be empty.");
+
+ if (CurrentVendor == Vendor)
+ return;
+
+ if (!CurrentVendor.empty())
+ finishAttributeSection();
+
+ assert(Contents.empty() &&
+ ".ARM.attributes should be flushed before changing vendor");
+ CurrentVendor = Vendor;
+
+}
+void ARMTargetELFStreamer::emitAttribute(unsigned Attribute, unsigned Value) {
+ setAttributeItem(Attribute, Value, /* OverwriteExisting= */ true);
+}
+void ARMTargetELFStreamer::emitTextAttribute(unsigned Attribute,
+ StringRef Value) {
+ setAttributeItem(Attribute, Value, /* OverwriteExisting= */ true);
+}
+void ARMTargetELFStreamer::emitFPU(unsigned Value) {
+ FPU = Value;
+}
+void ARMTargetELFStreamer::emitFPUDefaultAttributes() {
+ switch (FPU) {
+ case ARM::VFP:
+ case ARM::VFPV2:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv2,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::VFPV3:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv3A,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::VFPV3_D16:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv3B,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::VFPV4:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv4A,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::VFPV4_D16:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv4B,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::FP_ARMV8:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPARMv8A,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::NEON:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv3A,
+ /* OverwriteExisting= */ false);
+ setAttributeItem(ARMBuildAttrs::Advanced_SIMD_arch,
+ ARMBuildAttrs::AllowNeon,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::NEON_VFPV4:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv4A,
+ /* OverwriteExisting= */ false);
+ setAttributeItem(ARMBuildAttrs::Advanced_SIMD_arch,
+ ARMBuildAttrs::AllowNeon2,
+ /* OverwriteExisting= */ false);
+ break;
+
+ case ARM::NEON_FP_ARMV8:
+ case ARM::CRYPTO_NEON_FP_ARMV8:
+ setAttributeItem(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPARMv8A,
+ /* OverwriteExisting= */ false);
+ setAttributeItem(ARMBuildAttrs::Advanced_SIMD_arch,
+ ARMBuildAttrs::AllowNeonARMv8,
+ /* OverwriteExisting= */ false);
+ break;
+
+ default:
+ report_fatal_error("Unknown FPU: " + Twine(FPU));
+ break;
+ }
+}
+size_t ARMTargetELFStreamer::calculateContentSize() const {
+ size_t Result = 0;
+ for (size_t i = 0; i < Contents.size(); ++i) {
+ AttributeItem item = Contents[i];
+ switch (item.Type) {
+ case AttributeItem::HiddenAttribute:
+ break;
+ case AttributeItem::NumericAttribute:
+ Result += getULEBSize(item.Tag);
+ Result += getULEBSize(item.IntValue);
+ break;
+ case AttributeItem::TextAttribute:
+ Result += getULEBSize(item.Tag);
+ Result += item.StringValue.size() + 1; // string + '\0'
+ break;
+ }
+ }
+ return Result;
+}
+void ARMTargetELFStreamer::finishAttributeSection() {
+ // <format-version>
+ // [ <section-length> "vendor-name"
+ // [ <file-tag> <size> <attribute>*
+ // | <section-tag> <size> <section-number>* 0 <attribute>*
+ // | <symbol-tag> <size> <symbol-number>* 0 <attribute>*
+ // ]+
+ // ]*
+
+ if (FPU != ARM::INVALID_FPU)
+ emitFPUDefaultAttributes();
+
+ if (Contents.empty())
+ return;
+
+ std::sort(Contents.begin(), Contents.end(), AttributeItem::LessTag);
+
+ ARMELFStreamer &Streamer = getStreamer();
+
+ // Switch to .ARM.attributes section
+ if (AttributeSection) {
+ Streamer.SwitchSection(AttributeSection);
+ } else {
+ AttributeSection =
+ Streamer.getContext().getELFSection(".ARM.attributes",
+ ELF::SHT_ARM_ATTRIBUTES,
+ 0,
+ SectionKind::getMetadata());
+ Streamer.SwitchSection(AttributeSection);
+
+ // Format version
+ Streamer.EmitIntValue(0x41, 1);
+ }
+
+ // Vendor size + Vendor name + '\0'
+ const size_t VendorHeaderSize = 4 + CurrentVendor.size() + 1;
+
+ // Tag + Tag Size
+ const size_t TagHeaderSize = 1 + 4;
+
+ const size_t ContentsSize = calculateContentSize();
+
+ Streamer.EmitIntValue(VendorHeaderSize + TagHeaderSize + ContentsSize, 4);
+ Streamer.EmitBytes(CurrentVendor);
+ Streamer.EmitIntValue(0, 1); // '\0'
+
+ Streamer.EmitIntValue(ARMBuildAttrs::File, 1);
+ Streamer.EmitIntValue(TagHeaderSize + ContentsSize, 4);
+
+ // Size should have been accounted for already, now
+ // emit each field as its type (ULEB or String)
+ for (size_t i = 0; i < Contents.size(); ++i) {
+ AttributeItem item = Contents[i];
+ Streamer.EmitULEB128IntValue(item.Tag);
+ switch (item.Type) {
+ default: llvm_unreachable("Invalid attribute type");
+ case AttributeItem::NumericAttribute:
+ Streamer.EmitULEB128IntValue(item.IntValue);
+ break;
+ case AttributeItem::TextAttribute:
+ Streamer.EmitBytes(item.StringValue.upper());
+ Streamer.EmitIntValue(0, 1); // '\0'
+ break;
+ }
+ }
+
+ Contents.clear();
+ FPU = ARM::INVALID_FPU;
+}
+
+void ARMELFStreamer::FinishImpl() {
+ MCTargetStreamer &TS = getTargetStreamer();
+ ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
+ ATS.finishAttributeSection();
+
+ MCELFStreamer::FinishImpl();
+}
+
inline void ARMELFStreamer::SwitchToEHSection(const char *Prefix,
unsigned Type,
unsigned Flags,
@@ -279,81 +711,37 @@ inline void ARMELFStreamer::SwitchToExIdxSection(const MCSymbol &FnStart) {
}
void ARMELFStreamer::Reset() {
- const MCRegisterInfo &MRI = getContext().getRegisterInfo();
-
ExTab = NULL;
FnStart = NULL;
Personality = NULL;
- VFPRegSave = 0;
- RegSave = 0;
- FPReg = MRI.getEncodingValue(ARM::SP);
+ PersonalityIndex = NUM_PERSONALITY_INDEX;
+ FPReg = ARM::SP;
FPOffset = 0;
SPOffset = 0;
+ PendingOffset = 0;
UsedFP = false;
CantUnwind = false;
+ Opcodes.clear();
UnwindOpAsm.Reset();
}
-// Add the R_ARM_NONE fixup at the same position
-void ARMELFStreamer::EmitPersonalityFixup(StringRef Name) {
- const MCSymbol *PersonalitySym = getContext().GetOrCreateSymbol(Name);
-
- const MCSymbolRefExpr *PersonalityRef =
- MCSymbolRefExpr::Create(PersonalitySym,
- MCSymbolRefExpr::VK_ARM_NONE,
- getContext());
-
- AddValueSymbols(PersonalityRef);
- MCDataFragment *DF = getOrCreateDataFragment();
- DF->getFixups().push_back(
- MCFixup::Create(DF->getContents().size(), PersonalityRef,
- MCFixup::getKindForSize(4, false)));
-}
-
-void ARMELFStreamer::CollectUnwindOpcodes() {
- if (UsedFP) {
- UnwindOpAsm.EmitSetFP(FPReg);
- UnwindOpAsm.EmitSPOffset(-FPOffset);
- } else {
- UnwindOpAsm.EmitSPOffset(SPOffset);
- }
- UnwindOpAsm.EmitVFPRegSave(VFPRegSave);
- UnwindOpAsm.EmitRegSave(RegSave);
- UnwindOpAsm.Finalize();
-}
-
-void ARMELFStreamer::EmitFnStart() {
+void ARMELFStreamer::emitFnStart() {
assert(FnStart == 0);
FnStart = getContext().CreateTempSymbol();
EmitLabel(FnStart);
}
-void ARMELFStreamer::EmitFnEnd() {
+void ARMELFStreamer::emitFnEnd() {
assert(FnStart && ".fnstart must preceeds .fnend");
// Emit unwind opcodes if there is no .handlerdata directive
- if (!ExTab && !CantUnwind) {
- CollectUnwindOpcodes();
-
- unsigned PersonalityIndex = UnwindOpAsm.getPersonalityIndex();
- if (PersonalityIndex == AEABI_UNWIND_CPP_PR1 ||
- PersonalityIndex == AEABI_UNWIND_CPP_PR2) {
- // For the __aeabi_unwind_cpp_pr1 and __aeabi_unwind_cpp_pr2, we have to
- // emit the unwind opcodes in the corresponding ".ARM.extab" section, and
- // then emit a reference to these unwind opcodes in the second word of
- // the exception index table entry.
- SwitchToExTabSection(*FnStart);
- ExTab = getContext().CreateTempSymbol();
- EmitLabel(ExTab);
- EmitBytes(UnwindOpAsm.data(), 0);
- }
- }
+ if (!ExTab && !CantUnwind)
+ FlushUnwindOpcodes(true);
// Emit the exception index table entry
SwitchToExIdxSection(*FnStart);
- unsigned PersonalityIndex = UnwindOpAsm.getPersonalityIndex();
if (PersonalityIndex < NUM_PERSONALITY_INDEX)
EmitPersonalityFixup(GetAEABIUnwindPersonalityName(PersonalityIndex));
@@ -362,37 +750,80 @@ void ARMELFStreamer::EmitFnEnd() {
MCSymbolRefExpr::VK_ARM_PREL31,
getContext());
- EmitValue(FnStartRef, 4, 0);
+ EmitValue(FnStartRef, 4);
if (CantUnwind) {
- EmitIntValue(EXIDX_CANTUNWIND, 4, 0);
+ EmitIntValue(EXIDX_CANTUNWIND, 4);
} else if (ExTab) {
// Emit a reference to the unwind opcodes in the ".ARM.extab" section.
const MCSymbolRefExpr *ExTabEntryRef =
MCSymbolRefExpr::Create(ExTab,
MCSymbolRefExpr::VK_ARM_PREL31,
getContext());
- EmitValue(ExTabEntryRef, 4, 0);
+ EmitValue(ExTabEntryRef, 4);
} else {
// For the __aeabi_unwind_cpp_pr0, we have to emit the unwind opcodes in
// the second word of exception index table entry. The size of the unwind
// opcodes should always be 4 bytes.
assert(PersonalityIndex == AEABI_UNWIND_CPP_PR0 &&
"Compact model must use __aeabi_cpp_unwind_pr0 as personality");
- assert(UnwindOpAsm.size() == 4u &&
+ assert(Opcodes.size() == 4u &&
"Unwind opcode size for __aeabi_cpp_unwind_pr0 must be equal to 4");
- EmitBytes(UnwindOpAsm.data(), 0);
+ EmitBytes(StringRef(reinterpret_cast<const char*>(Opcodes.data()),
+ Opcodes.size()));
}
+ // Switch to the section containing FnStart
+ SwitchSection(&FnStart->getSection());
+
// Clean exception handling frame information
Reset();
}
-void ARMELFStreamer::EmitCantUnwind() {
- CantUnwind = true;
+void ARMELFStreamer::emitCantUnwind() { CantUnwind = true; }
+
+// Add the R_ARM_NONE fixup at the same position
+void ARMELFStreamer::EmitPersonalityFixup(StringRef Name) {
+ const MCSymbol *PersonalitySym = getContext().GetOrCreateSymbol(Name);
+
+ const MCSymbolRefExpr *PersonalityRef = MCSymbolRefExpr::Create(
+ PersonalitySym, MCSymbolRefExpr::VK_ARM_NONE, getContext());
+
+ AddValueSymbols(PersonalityRef);
+ MCDataFragment *DF = getOrCreateDataFragment();
+ DF->getFixups().push_back(MCFixup::Create(DF->getContents().size(),
+ PersonalityRef,
+ MCFixup::getKindForSize(4, false)));
+}
+
+void ARMELFStreamer::FlushPendingOffset() {
+ if (PendingOffset != 0) {
+ UnwindOpAsm.EmitSPOffset(-PendingOffset);
+ PendingOffset = 0;
+ }
}
-void ARMELFStreamer::EmitHandlerData() {
+void ARMELFStreamer::FlushUnwindOpcodes(bool NoHandlerData) {
+ // Emit the unwind opcode to restore $sp.
+ if (UsedFP) {
+ const MCRegisterInfo *MRI = getContext().getRegisterInfo();
+ int64_t LastRegSaveSPOffset = SPOffset - PendingOffset;
+ UnwindOpAsm.EmitSPOffset(LastRegSaveSPOffset - FPOffset);
+ UnwindOpAsm.EmitSetSP(MRI->getEncodingValue(FPReg));
+ } else {
+ FlushPendingOffset();
+ }
+
+ // Finalize the unwind opcode sequence
+ UnwindOpAsm.Finalize(PersonalityIndex, Opcodes);
+
+ // For compact model 0, we have to emit the unwind opcodes in the .ARM.exidx
+ // section. Thus, we don't have to create an entry in the .ARM.extab
+ // section.
+ if (NoHandlerData && PersonalityIndex == AEABI_UNWIND_CPP_PR0)
+ return;
+
+ // Switch to .ARM.extab section.
SwitchToExTabSection(*FnStart);
// Create .ARM.extab label for offset in .ARM.exidx
@@ -400,73 +831,117 @@ void ARMELFStreamer::EmitHandlerData() {
ExTab = getContext().CreateTempSymbol();
EmitLabel(ExTab);
- // Emit Personality
- assert(Personality && ".personality directive must preceed .handlerdata");
-
- const MCSymbolRefExpr *PersonalityRef =
- MCSymbolRefExpr::Create(Personality,
- MCSymbolRefExpr::VK_ARM_PREL31,
- getContext());
+ // Emit personality
+ if (Personality) {
+ const MCSymbolRefExpr *PersonalityRef =
+ MCSymbolRefExpr::Create(Personality,
+ MCSymbolRefExpr::VK_ARM_PREL31,
+ getContext());
- EmitValue(PersonalityRef, 4, 0);
+ EmitValue(PersonalityRef, 4);
+ }
// Emit unwind opcodes
- CollectUnwindOpcodes();
- EmitBytes(UnwindOpAsm.data(), 0);
+ EmitBytes(StringRef(reinterpret_cast<const char *>(Opcodes.data()),
+ Opcodes.size()));
+
+ // According to ARM EHABI section 9.2, if the __aeabi_unwind_cpp_pr1() or
+ // __aeabi_unwind_cpp_pr2() is used, then the handler data must be emitted
+ // after the unwind opcodes. The handler data consists of several 32-bit
+ // words, and should be terminated by zero.
+ //
+ // In case that the .handlerdata directive is not specified by the
+ // programmer, we should emit zero to terminate the handler data.
+ if (NoHandlerData && !Personality)
+ EmitIntValue(0, 4);
}
-void ARMELFStreamer::EmitPersonality(const MCSymbol *Per) {
+void ARMELFStreamer::emitHandlerData() { FlushUnwindOpcodes(false); }
+
+void ARMELFStreamer::emitPersonality(const MCSymbol *Per) {
Personality = Per;
UnwindOpAsm.setPersonality(Per);
}
-void ARMELFStreamer::EmitSetFP(unsigned NewFPReg,
- unsigned NewSPReg,
+void ARMELFStreamer::emitSetFP(unsigned NewFPReg, unsigned NewSPReg,
int64_t Offset) {
- assert(SPOffset == 0 &&
- "Current implementation assumes .setfp precedes .pad");
-
- const MCRegisterInfo &MRI = getContext().getRegisterInfo();
-
- uint16_t NewFPRegEncVal = MRI.getEncodingValue(NewFPReg);
-#ifndef NDEBUG
- uint16_t NewSPRegEncVal = MRI.getEncodingValue(NewSPReg);
-#endif
-
- assert((NewSPReg == ARM::SP || NewSPRegEncVal == FPReg) &&
+ assert((NewSPReg == ARM::SP || NewSPReg == FPReg) &&
"the operand of .setfp directive should be either $sp or $fp");
UsedFP = true;
- FPReg = NewFPRegEncVal;
- FPOffset = Offset;
-}
+ FPReg = NewFPReg;
-void ARMELFStreamer::EmitPad(int64_t Offset) {
- SPOffset += Offset;
+ if (NewSPReg == ARM::SP)
+ FPOffset = SPOffset + Offset;
+ else
+ FPOffset += Offset;
}
-void ARMELFStreamer::EmitRegSave(const SmallVectorImpl<unsigned> &RegList,
- bool IsVector) {
- const MCRegisterInfo &MRI = getContext().getRegisterInfo();
+void ARMELFStreamer::emitPad(int64_t Offset) {
+ // Track the change of the $sp offset
+ SPOffset -= Offset;
-#ifndef NDEBUG
- unsigned Max = IsVector ? 32 : 16;
-#endif
- uint32_t &RegMask = IsVector ? VFPRegSave : RegSave;
+ // To squash multiple .pad directives, we should delay the unwind opcode
+ // until the .save, .vsave, .handlerdata, or .fnend directives.
+ PendingOffset -= Offset;
+}
+void ARMELFStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool IsVector) {
+ // Collect the registers in the register list
+ unsigned Count = 0;
+ uint32_t Mask = 0;
+ const MCRegisterInfo *MRI = getContext().getRegisterInfo();
for (size_t i = 0; i < RegList.size(); ++i) {
- unsigned Reg = MRI.getEncodingValue(RegList[i]);
- assert(Reg < Max && "Register encoded value out of range");
- RegMask |= 1u << Reg;
+ unsigned Reg = MRI->getEncodingValue(RegList[i]);
+ assert(Reg < (IsVector ? 32U : 16U) && "Register out of range");
+ unsigned Bit = (1u << Reg);
+ if ((Mask & Bit) == 0) {
+ Mask |= Bit;
+ ++Count;
+ }
}
+
+ // Track the change the $sp offset: For the .save directive, the
+ // corresponding push instruction will decrease the $sp by (4 * Count).
+ // For the .vsave directive, the corresponding vpush instruction will
+ // decrease $sp by (8 * Count).
+ SPOffset -= Count * (IsVector ? 8 : 4);
+
+ // Emit the opcode
+ FlushPendingOffset();
+ if (IsVector)
+ UnwindOpAsm.EmitVFPRegSave(Mask);
+ else
+ UnwindOpAsm.EmitRegSave(Mask);
}
namespace llvm {
+
+MCStreamer *createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useLoc, bool useCFI,
+ bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst) {
+ ARMTargetAsmStreamer *S = new ARMTargetAsmStreamer(OS, *InstPrint);
+
+ return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
+ useDwarfDirectory, InstPrint, CE, TAB,
+ ShowInst);
+}
+
MCELFStreamer* createARMELFStreamer(MCContext &Context, MCAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *Emitter,
bool RelaxAll, bool NoExecStack,
bool IsThumb) {
- ARMELFStreamer *S = new ARMELFStreamer(Context, TAB, OS, Emitter, IsThumb);
+ ARMTargetELFStreamer *TS = new ARMTargetELFStreamer();
+ ARMELFStreamer *S =
+ new ARMELFStreamer(Context, TS, TAB, OS, Emitter, IsThumb);
+ // FIXME: This should eventually end up somewhere else where more
+ // intelligent flag decisions can be made. For now we are just maintaining
+ // the status quo for ARM and setting EF_ARM_EABI_VER5 as the default.
+ S->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5);
+
if (RelaxAll)
S->getAssembler().setRelaxAll(true);
if (NoExecStack)
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.h b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.h
deleted file mode 100644
index 77ae5d23628e..000000000000
--- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.h
+++ /dev/null
@@ -1,27 +0,0 @@
-//===-- ARMELFStreamer.h - ELF Streamer for ARM ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF streamer information for the ARM backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARM_ELF_STREAMER_H
-#define ARM_ELF_STREAMER_H
-
-#include "llvm/MC/MCELFStreamer.h"
-
-namespace llvm {
-
- MCELFStreamer* createARMELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack,
- bool IsThumb);
-}
-
-#endif // ARM_ELF_STREAMER_H
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
index c1aab9c72cca..ad796e660e96 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
@@ -49,8 +49,6 @@ ARMELFMCAsmInfo::ARMELFMCAsmInfo() {
Code16Directive = ".code\t16";
Code32Directive = ".code\t32";
- WeakRefDirective = "\t.weak\t";
-
HasLEB128 = true;
SupportsDebugInformation = true;
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
index f0b289c6f3b6..e1f716d936ad 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
@@ -15,6 +15,7 @@
#define LLVM_ARMTARGETASMINFO_H
#include "llvm/MC/MCAsmInfoDarwin.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
@@ -24,7 +25,7 @@ namespace llvm {
explicit ARMMCAsmInfoDarwin();
};
- class ARMELFMCAsmInfo : public MCAsmInfo {
+ class ARMELFMCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
explicit ARMELFMCAsmInfo();
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index 7a59a7dd5055..4382d0d97144 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -58,8 +58,7 @@ public:
}
bool isTargetDarwin() const {
Triple TT(STI.getTargetTriple());
- Triple::OSType OS = TT.getOS();
- return OS == Triple::Darwin || OS == Triple::MacOSX || OS == Triple::IOS;
+ return TT.isOSDarwin();
}
unsigned getMachineSoImmOpValue(unsigned SoImm) const;
@@ -315,6 +314,8 @@ public:
unsigned EncodedValue) const;
unsigned NEONThumb2DupPostEncoder(const MCInst &MI,
unsigned EncodedValue) const;
+ unsigned NEONThumb2V8PostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const;
unsigned VFPThumb2PostEncoder(const MCInst &MI,
unsigned EncodedValue) const;
@@ -389,6 +390,17 @@ unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI,
return EncodedValue;
}
+/// Post-process encoded NEON v8 instructions, and rewrite them to Thumb2 form
+/// if we are in Thumb2.
+unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const {
+ if (isThumb2()) {
+ EncodedValue |= 0xC000000; // Set bits 27-26
+ }
+
+ return EncodedValue;
+}
+
/// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite
/// them to their Thumb2 form if we are currently in Thumb2 mode.
unsigned ARMMCCodeEmitter::
@@ -407,7 +419,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
- unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg);
+ unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg);
// Q registers are encoded as 2x their register number.
switch (Reg) {
@@ -436,7 +448,7 @@ EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg,
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
- Reg = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
int32_t SImm = MO1.getImm();
bool isAdd = true;
@@ -625,8 +637,14 @@ getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t ARMMCCodeEmitter::
getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups) const {
- unsigned Val =
- ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups);
+ unsigned Val = 0;
+ const MCOperand MO = MI.getOperand(OpIdx);
+
+ if(MO.isExpr())
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups);
+ else
+ Val = MO.getImm() >> 1;
+
bool I = (Val & 0x800000);
bool J1 = (Val & 0x400000);
bool J2 = (Val & 0x200000);
@@ -652,7 +670,7 @@ getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_adr_pcrel_12,
Fixups);
- int32_t offset = MO.getImm();
+ int64_t offset = MO.getImm();
uint32_t Val = 0x2000;
int SoImmVal;
@@ -724,8 +742,8 @@ getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
// {2-0} = Rn
const MCOperand &MO1 = MI.getOperand(OpIdx);
const MCOperand &MO2 = MI.getOperand(OpIdx + 1);
- unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
- unsigned Rm = CTX.getRegisterInfo().getEncodingValue(MO2.getReg());
+ unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
+ unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO2.getReg());
return (Rm << 3) | Rn;
}
@@ -741,12 +759,12 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
const MCOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
+ Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
Imm12 = 0;
- isAdd = false ; // 'U' bit is set as part of the fixup.
if (MO.isExpr()) {
const MCExpr *Expr = MO.getExpr();
+ isAdd = false ; // 'U' bit is set as part of the fixup.
MCFixupKind Kind;
if (isThumb2())
@@ -759,8 +777,10 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
} else {
Reg = ARM::PC;
int32_t Offset = MO.getImm();
- // FIXME: Handle #-0.
- if (Offset < 0) {
+ if (Offset == INT32_MIN) {
+ Offset = 0;
+ isAdd = false;
+ } else if (Offset < 0) {
Offset *= -1;
isAdd = false;
}
@@ -821,7 +841,7 @@ getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
const MCOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
+ Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
Imm8 = 0;
isAdd = false ; // 'U' bit is set as part of the fixup.
@@ -857,7 +877,7 @@ getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
// {7-0} = imm8
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
- unsigned Reg = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
unsigned Imm8 = MO1.getImm();
return (Reg << 8) | Imm8;
}
@@ -940,8 +960,8 @@ getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx+1);
const MCOperand &MO2 = MI.getOperand(OpIdx+2);
- unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
- unsigned Rm = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
+ unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
+ unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm());
bool isAdd = ARM_AM::getAM2Op(MO2.getImm()) == ARM_AM::add;
ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm());
@@ -975,7 +995,7 @@ getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
// {12} isAdd
// {11-0} imm12/Rm
const MCOperand &MO = MI.getOperand(OpIdx);
- unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups);
Binary |= Rn << 14;
return Binary;
@@ -998,7 +1018,7 @@ getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm);
Binary <<= 7; // Shift amount is bits [11:7]
Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5]
- Binary |= CTX.getRegisterInfo().getEncodingValue(MO.getReg()); // Rm is bits [3:0]
+ Binary |= CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Rm is bits [3:0]
}
return Binary | (isAdd << 12) | (isReg << 13);
}
@@ -1011,7 +1031,7 @@ getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx+1);
bool isAdd = MO1.getImm() != 0;
- return CTX.getRegisterInfo().getEncodingValue(MO.getReg()) | (isAdd << 4);
+ return CTX.getRegisterInfo()->getEncodingValue(MO.getReg()) | (isAdd << 4);
}
uint32_t ARMMCCodeEmitter::
@@ -1029,7 +1049,7 @@ getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
// if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
if (!isImm)
- Imm8 = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
return Imm8 | (isAdd << 8) | (isImm << 9);
}
@@ -1047,7 +1067,7 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
if (!MO.isReg()) {
- unsigned Rn = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
+ unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
assert(MO.isExpr() && "Unexpected machine operand type!");
const MCExpr *Expr = MO.getExpr();
@@ -1057,14 +1077,14 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
++MCNumCPRelocations;
return (Rn << 9) | (1 << 13);
}
- unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
unsigned Imm = MO2.getImm();
bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
bool isImm = MO1.getReg() == 0;
uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
// if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
if (!isImm)
- Imm8 = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
+ Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13);
}
@@ -1092,7 +1112,7 @@ getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
// {2-0} = Rn
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
- unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
unsigned Imm5 = MO1.getImm();
return ((Imm5 & 0x1f) << 3) | Rn;
}
@@ -1119,7 +1139,7 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
const MCOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
+ Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
Imm8 = 0;
isAdd = false; // 'U' bit is handled as part of the fixup.
@@ -1165,7 +1185,7 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm());
// Encode Rm.
- unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
// Encode the shift opcode.
unsigned SBits = 0;
@@ -1190,7 +1210,7 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
// Encode the shift operation Rs.
// Encode Rs bit[11:8].
assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
- return Binary | (CTX.getRegisterInfo().getEncodingValue(Rs) << ARMII::RegRsShift);
+ return Binary | (CTX.getRegisterInfo()->getEncodingValue(Rs) << ARMII::RegRsShift);
}
unsigned ARMMCCodeEmitter::
@@ -1209,7 +1229,7 @@ getSORegImmOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
// Encode Rm.
- unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
// Encode the shift opcode.
unsigned SBits = 0;
@@ -1248,9 +1268,9 @@ getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
// Encoded as [Rn, Rm, imm].
// FIXME: Needs fixup support.
- unsigned Value = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
+ unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
Value <<= 4;
- Value |= CTX.getRegisterInfo().getEncodingValue(MO2.getReg());
+ Value |= CTX.getRegisterInfo()->getEncodingValue(MO2.getReg());
Value <<= 2;
Value |= MO3.getImm();
@@ -1264,7 +1284,7 @@ getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum,
const MCOperand &MO2 = MI.getOperand(OpNum+1);
// FIXME: Needs fixup support.
- unsigned Value = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
+ unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
// Even though the immediate is 8 bits long, we need 9 bits in order
// to represent the (inverse of the) sign bit.
@@ -1326,7 +1346,7 @@ getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
// Encode Rm.
- unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
// Encode the shift opcode.
unsigned SBits = 0;
@@ -1359,8 +1379,8 @@ getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
// msb of the mask.
const MCOperand &MO = MI.getOperand(Op);
uint32_t v = ~MO.getImm();
- uint32_t lsb = CountTrailingZeros_32(v);
- uint32_t msb = (32 - CountLeadingZeros_32 (v)) - 1;
+ uint32_t lsb = countTrailingZeros(v);
+ uint32_t msb = (32 - countLeadingZeros (v)) - 1;
assert (v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!");
return lsb | (msb << 5);
}
@@ -1382,7 +1402,7 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op,
if (SPRRegs || DPRRegs) {
// VLDM/VSTM
- unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg);
+ unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg);
unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff;
Binary |= (RegNo & 0x1f) << 8;
if (SPRRegs)
@@ -1391,7 +1411,7 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op,
Binary |= NumRegs * 2;
} else {
for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) {
- unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(MI.getOperand(I).getReg());
+ unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(MI.getOperand(I).getReg());
Binary |= 1 << RegNo;
}
}
@@ -1407,7 +1427,7 @@ getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
- unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg());
+ unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
unsigned Align = 0;
switch (Imm.getImm()) {
@@ -1430,7 +1450,7 @@ getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
- unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg());
+ unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
unsigned Align = 0;
switch (Imm.getImm()) {
@@ -1456,7 +1476,7 @@ getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
- unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg());
+ unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
unsigned Align = 0;
switch (Imm.getImm()) {
@@ -1475,7 +1495,7 @@ getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(Op);
if (MO.getReg() == 0) return 0x0D;
- return CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ return CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
}
unsigned ARMMCCodeEmitter::
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index f09fb5a94fd8..a99de0e78230 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -12,30 +12,73 @@
//===----------------------------------------------------------------------===//
#include "ARMBaseInfo.h"
-#include "ARMELFStreamer.h"
#include "ARMMCAsmInfo.h"
#include "ARMMCTargetDesc.h"
#include "InstPrinter/ARMInstPrinter.h"
#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_REGINFO_MC_DESC
#include "ARMGenRegisterInfo.inc"
+static bool getMCRDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
+ std::string &Info) {
+ if (STI.getFeatureBits() & llvm::ARM::HasV7Ops &&
+ (MI.getOperand(0).isImm() && MI.getOperand(0).getImm() == 15) &&
+ (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) &&
+ // Checks for the deprecated CP15ISB encoding:
+ // mcr p15, #0, rX, c7, c5, #4
+ (MI.getOperand(3).isImm() && MI.getOperand(3).getImm() == 7)) {
+ if ((MI.getOperand(5).isImm() && MI.getOperand(5).getImm() == 4)) {
+ if (MI.getOperand(4).isImm() && MI.getOperand(4).getImm() == 5) {
+ Info = "deprecated since v7, use 'isb'";
+ return true;
+ }
+
+ // Checks for the deprecated CP15DSB encoding:
+ // mcr p15, #0, rX, c7, c10, #4
+ if (MI.getOperand(4).isImm() && MI.getOperand(4).getImm() == 10) {
+ Info = "deprecated since v7, use 'dsb'";
+ return true;
+ }
+ }
+ // Checks for the deprecated CP15DMB encoding:
+ // mcr p15, #0, rX, c7, c10, #5
+ if (MI.getOperand(4).isImm() && MI.getOperand(4).getImm() == 10 &&
+ (MI.getOperand(5).isImm() && MI.getOperand(5).getImm() == 5)) {
+ Info = "deprecated since v7, use 'dmb'";
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool getITDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
+ std::string &Info) {
+ if (STI.getFeatureBits() & llvm::ARM::HasV8Ops &&
+ MI.getOperand(1).isImm() && MI.getOperand(1).getImm() != 8) {
+ Info = "applying IT instruction to more than one subsequent instruction is deprecated";
+ return true;
+ }
+
+ return false;
+}
+
#define GET_INSTRINFO_MC_DESC
#include "ARMGenInstrInfo.inc"
#define GET_SUBTARGETINFO_MC_DESC
#include "ARMGenSubtargetInfo.inc"
-using namespace llvm;
std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
Triple triple(TT);
@@ -59,8 +102,17 @@ std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
std::string ARMArchFeature;
if (Idx) {
unsigned SubVer = TT[Idx];
- if (SubVer >= '7' && SubVer <= '9') {
+ if (SubVer == '8') {
+ if (NoCPU)
+ // v8a: FeatureDB, FeatureFPARMv8, FeatureNEON, FeatureDSPThumb2, FeatureMP,
+ // FeatureHWDiv, FeatureHWDivARM, FeatureTrustZone, FeatureT2XtPk, FeatureCrypto, FeatureCRC
+ ARMArchFeature = "+v8,+db,+fp-armv8,+neon,+t2dsp,+mp,+hwdiv,+hwdiv-arm,+trustzone,+t2xtpk,+crypto,+crc";
+ else
+ // Use CPU to figure out the exact features
+ ARMArchFeature = "+v8";
+ } else if (SubVer == '7') {
if (Len >= Idx+2 && TT[Idx+1] == 'm') {
+ isThumb = true;
if (NoCPU)
// v7m: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureMClass
ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+mclass";
@@ -99,9 +151,10 @@ std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2')
ARMArchFeature = "+v6t2";
else if (Len >= Idx+2 && TT[Idx+1] == 'm') {
+ isThumb = true;
if (NoCPU)
// v6m: FeatureNoARM, FeatureMClass
- ARMArchFeature = "+v6,+noarm,+mclass";
+ ARMArchFeature = "+v6m,+noarm,+mclass";
else
ARMArchFeature = "+v6";
} else
@@ -159,7 +212,7 @@ static MCRegisterInfo *createARMMCRegisterInfo(StringRef Triple) {
return X;
}
-static MCAsmInfo *createARMMCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createARMMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
Triple TheTriple(TT);
if (TheTriple.isOSDarwin())
@@ -212,6 +265,15 @@ static MCInstPrinter *createARMMCInstPrinter(const Target &T,
return 0;
}
+static MCRelocationInfo *createARMMCRelocationInfo(StringRef TT,
+ MCContext &Ctx) {
+ Triple TheTriple(TT);
+ if (TheTriple.isEnvironmentMachO())
+ return createARMMachORelocationInfo(Ctx);
+ // Default to the stock relocation info.
+ return llvm::createMCRelocationInfo(TT, Ctx);
+}
+
namespace {
class ARMMCInstrAnalysis : public MCInstrAnalysis {
@@ -232,15 +294,16 @@ public:
return MCInstrAnalysis::isConditionalBranch(Inst);
}
- uint64_t evaluateBranch(const MCInst &Inst, uint64_t Addr,
- uint64_t Size) const {
+ bool evaluateBranch(const MCInst &Inst, uint64_t Addr,
+ uint64_t Size, uint64_t &Target) const {
// We only handle PCRel branches for now.
if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType!=MCOI::OPERAND_PCREL)
- return -1ULL;
+ return false;
int64_t Imm = Inst.getOperand(0).getImm();
// FIXME: This is not right for thumb.
- return Addr+Imm+8; // In ARM mode the PC is always off by 8 bytes.
+ Target = Addr+Imm+8; // In ARM mode the PC is always off by 8 bytes.
+ return true;
}
};
@@ -292,7 +355,17 @@ extern "C" void LLVMInitializeARMTargetMC() {
TargetRegistry::RegisterMCObjectStreamer(TheARMTarget, createMCStreamer);
TargetRegistry::RegisterMCObjectStreamer(TheThumbTarget, createMCStreamer);
+ // Register the asm streamer.
+ TargetRegistry::RegisterAsmStreamer(TheARMTarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheThumbTarget, createMCAsmStreamer);
+
// Register the MCInstPrinter.
TargetRegistry::RegisterMCInstPrinter(TheARMTarget, createARMMCInstPrinter);
TargetRegistry::RegisterMCInstPrinter(TheThumbTarget, createARMMCInstPrinter);
+
+ // Register the MC relocation info.
+ TargetRegistry::RegisterMCRelocationInfo(TheARMTarget,
+ createARMMCRelocationInfo);
+ TargetRegistry::RegisterMCRelocationInfo(TheThumbTarget,
+ createARMMCRelocationInfo);
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
index a89981e4f060..959be8b55c3a 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
@@ -18,13 +18,17 @@
#include <string>
namespace llvm {
+class formatted_raw_ostream;
class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
+class MCInstPrinter;
class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
+class MCStreamer;
+class MCRelocationInfo;
class StringRef;
class Target;
class raw_ostream;
@@ -41,12 +45,19 @@ namespace ARM_MC {
StringRef FS);
}
+MCStreamer *createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useLoc, bool useCFI,
+ bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst);
+
MCCodeEmitter *createARMMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createARMAsmBackend(const Target &T, StringRef TT, StringRef CPU);
+MCAsmBackend *createARMAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
/// createARMELFObjectWriter - Construct an ELF Mach-O object writer.
MCObjectWriter *createARMELFObjectWriter(raw_ostream &OS,
@@ -58,6 +69,9 @@ MCObjectWriter *createARMMachObjectWriter(raw_ostream &OS,
uint32_t CPUType,
uint32_t CPUSubtype);
+
+/// createARMMachORelocationInfo - Construct ARM Mach-O relocation info.
+MCRelocationInfo *createARMMachORelocationInfo(MCContext &Ctx);
} // End llvm namespace
// Defines symbolic names for ARM registers. This defines a mapping from
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
new file mode 100644
index 000000000000..807c9483bc38
--- /dev/null
+++ b/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
@@ -0,0 +1,43 @@
+//===-- ARMMachORelocationInfo.cpp ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/ARMMCTargetDesc.h"
+#include "ARMMCExpr.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm-c/Disassembler.h"
+
+using namespace llvm;
+using namespace object;
+
+namespace {
+class ARMMachORelocationInfo : public MCRelocationInfo {
+public:
+ ARMMachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
+
+ const MCExpr *createExprForCAPIVariantKind(const MCExpr *SubExpr,
+ unsigned VariantKind) {
+ switch(VariantKind) {
+ case LLVMDisassembler_VariantKind_ARM_HI16:
+ return ARMMCExpr::CreateUpper16(SubExpr, Ctx);
+ case LLVMDisassembler_VariantKind_ARM_LO16:
+ return ARMMCExpr::CreateLower16(SubExpr, Ctx);
+ default:
+ return MCRelocationInfo::createExprForCAPIVariantKind(SubExpr,
+ VariantKind);
+ }
+ }
+};
+} // End unnamed namespace
+
+/// createARMMachORelocationInfo - Construct an ARM Mach-O RelocationInfo.
+MCRelocationInfo *llvm::createARMMachORelocationInfo(MCContext &Ctx) {
+ return new ARMMachORelocationInfo(Ctx);
+}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index b9efe74b41e5..1f681bac2242 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -20,10 +20,9 @@
#include "llvm/MC/MCMachOSymbolFlags.h"
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCValue.h"
-#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachO.h"
using namespace llvm;
-using namespace llvm::object;
namespace {
class ARMMachObjectWriter : public MCMachObjectTargetWriter {
@@ -63,7 +62,7 @@ public:
static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
unsigned &Log2Size) {
- RelocType = unsigned(macho::RIT_Vanilla);
+ RelocType = unsigned(MachO::ARM_RELOC_VANILLA);
Log2Size = ~0U;
switch (Kind) {
@@ -92,21 +91,21 @@ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
case ARM::fixup_arm_uncondbl:
case ARM::fixup_arm_condbl:
case ARM::fixup_arm_blx:
- RelocType = unsigned(macho::RIT_ARM_Branch24Bit);
+ RelocType = unsigned(MachO::ARM_RELOC_BR24);
// Report as 'long', even though that is not quite accurate.
Log2Size = llvm::Log2_32(4);
return true;
// Handle Thumb branches.
case ARM::fixup_arm_thumb_br:
- RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit);
+ RelocType = unsigned(MachO::ARM_THUMB_RELOC_BR22);
Log2Size = llvm::Log2_32(2);
return true;
case ARM::fixup_t2_uncondbranch:
case ARM::fixup_arm_thumb_bl:
case ARM::fixup_arm_thumb_blx:
- RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit);
+ RelocType = unsigned(MachO::ARM_THUMB_RELOC_BR22);
Log2Size = llvm::Log2_32(4);
return true;
@@ -121,23 +120,23 @@ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
// 1 - thumb instructions
case ARM::fixup_arm_movt_hi16:
case ARM::fixup_arm_movt_hi16_pcrel:
- RelocType = unsigned(macho::RIT_ARM_Half);
+ RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 1;
return true;
case ARM::fixup_t2_movt_hi16:
case ARM::fixup_t2_movt_hi16_pcrel:
- RelocType = unsigned(macho::RIT_ARM_Half);
+ RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 3;
return true;
case ARM::fixup_arm_movw_lo16:
case ARM::fixup_arm_movw_lo16_pcrel:
- RelocType = unsigned(macho::RIT_ARM_Half);
+ RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 0;
return true;
case ARM::fixup_t2_movw_lo16:
case ARM::fixup_t2_movw_lo16_pcrel:
- RelocType = unsigned(macho::RIT_ARM_Half);
+ RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 2;
return true;
}
@@ -153,7 +152,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
uint64_t &FixedValue) {
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
- unsigned Type = macho::RIT_ARM_Half;
+ unsigned Type = MachO::ARM_RELOC_HALF;
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
@@ -179,7 +178,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
"' can not be undefined in a subtraction expression");
// Select the appropriate difference relocation type.
- Type = macho::RIT_ARM_HalfDifference;
+ Type = MachO::ARM_RELOC_HALF_SECTDIFF;
Value2 = Writer->getSymbolAddress(B_SD, Layout);
FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent());
}
@@ -223,29 +222,29 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
break;
}
- if (Type == macho::RIT_ARM_HalfDifference) {
+ if (Type == MachO::ARM_RELOC_HALF_SECTDIFF) {
uint32_t OtherHalf = MovtBit
? (FixedValue & 0xffff) : ((FixedValue & 0xffff0000) >> 16);
- macho::RelocationEntry MRE;
- MRE.Word0 = ((OtherHalf << 0) |
- (macho::RIT_Pair << 24) |
- (MovtBit << 28) |
- (ThumbBit << 29) |
- (IsPCRel << 30) |
- macho::RF_Scattered);
- MRE.Word1 = Value2;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = ((OtherHalf << 0) |
+ (MachO::ARM_RELOC_PAIR << 24) |
+ (MovtBit << 28) |
+ (ThumbBit << 29) |
+ (IsPCRel << 30) |
+ MachO::R_SCATTERED);
+ MRE.r_word1 = Value2;
Writer->addRelocation(Fragment->getParent(), MRE);
}
- macho::RelocationEntry MRE;
- MRE.Word0 = ((FixupOffset << 0) |
- (Type << 24) |
- (MovtBit << 28) |
- (ThumbBit << 29) |
- (IsPCRel << 30) |
- macho::RF_Scattered);
- MRE.Word1 = Value;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = ((FixupOffset << 0) |
+ (Type << 24) |
+ (MovtBit << 28) |
+ (ThumbBit << 29) |
+ (IsPCRel << 30) |
+ MachO::R_SCATTERED);
+ MRE.r_word1 = Value;
Writer->addRelocation(Fragment->getParent(), MRE);
}
@@ -259,7 +258,7 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
uint64_t &FixedValue) {
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
- unsigned Type = macho::RIT_Vanilla;
+ unsigned Type = MachO::ARM_RELOC_VANILLA;
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
@@ -284,31 +283,31 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
"' can not be undefined in a subtraction expression");
// Select the appropriate difference relocation type.
- Type = macho::RIT_Difference;
+ Type = MachO::ARM_RELOC_SECTDIFF;
Value2 = Writer->getSymbolAddress(B_SD, Layout);
FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent());
}
// Relocations are written out in reverse order, so the PAIR comes first.
- if (Type == macho::RIT_Difference ||
- Type == macho::RIT_Generic_LocalDifference) {
- macho::RelocationEntry MRE;
- MRE.Word0 = ((0 << 0) |
- (macho::RIT_Pair << 24) |
- (Log2Size << 28) |
- (IsPCRel << 30) |
- macho::RF_Scattered);
- MRE.Word1 = Value2;
+ if (Type == MachO::ARM_RELOC_SECTDIFF ||
+ Type == MachO::ARM_RELOC_LOCAL_SECTDIFF) {
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = ((0 << 0) |
+ (MachO::ARM_RELOC_PAIR << 24) |
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ MachO::R_SCATTERED);
+ MRE.r_word1 = Value2;
Writer->addRelocation(Fragment->getParent(), MRE);
}
- macho::RelocationEntry MRE;
- MRE.Word0 = ((FixupOffset << 0) |
- (Type << 24) |
- (Log2Size << 28) |
- (IsPCRel << 30) |
- macho::RF_Scattered);
- MRE.Word1 = Value;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = ((FixupOffset << 0) |
+ (Type << 24) |
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ MachO::R_SCATTERED);
+ MRE.r_word1 = Value;
Writer->addRelocation(Fragment->getParent(), MRE);
}
@@ -326,13 +325,13 @@ bool ARMMachObjectWriter::requiresExternRelocation(MachObjectWriter *Writer,
switch (RelocType) {
default:
return false;
- case macho::RIT_ARM_Branch24Bit:
+ case MachO::ARM_RELOC_BR24:
// PC pre-adjustment of 8 for these instructions.
Value -= 8;
// ARM BL/BLX has a 25-bit offset.
Range = 0x1ffffff;
break;
- case macho::RIT_ARM_ThumbBranch22Bit:
+ case MachO::ARM_THUMB_RELOC_BR22:
// PC pre-adjustment of 4 for these instructions.
Value -= 4;
// Thumb BL/BLX has a 24-bit offset.
@@ -361,7 +360,7 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
uint64_t &FixedValue) {
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
unsigned Log2Size;
- unsigned RelocType = macho::RIT_Vanilla;
+ unsigned RelocType = MachO::ARM_RELOC_VANILLA;
if (!getARMFixupKindMachOInfo(Fixup.getKind(), RelocType, Log2Size))
// If we failed to get fixup kind info, it's because there's no legal
// relocation type for the fixup kind. This happens when it's a fixup that's
@@ -374,7 +373,7 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
// scattered relocation entry. Differences always require scattered
// relocations.
if (Target.getSymB()) {
- if (RelocType == macho::RIT_ARM_Half)
+ if (RelocType == MachO::ARM_RELOC_HALF)
return RecordARMScatteredHalfRelocation(Writer, Asm, Layout, Fragment,
Fixup, Target, FixedValue);
return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
@@ -392,7 +391,7 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
//
// Is this right for ARM?
uint32_t Offset = Target.getConstant();
- if (IsPCRel && RelocType == macho::RIT_Vanilla)
+ if (IsPCRel && RelocType == MachO::ARM_RELOC_VANILLA)
Offset += 1 << Log2Size;
if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD))
return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
@@ -445,17 +444,17 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
}
// struct relocation_info (8 bytes)
- macho::RelocationEntry MRE;
- MRE.Word0 = FixupOffset;
- MRE.Word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (IsExtern << 27) |
+ (Type << 28));
// Even when it's not a scattered relocation, movw/movt always uses
// a PAIR relocation.
- if (Type == macho::RIT_ARM_Half) {
+ if (Type == MachO::ARM_RELOC_HALF) {
// The other-half value only gets populated for the movt and movw
// relocation entries.
uint32_t Value = 0;
@@ -474,11 +473,11 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
Value = FixedValue & 0xffff;
break;
}
- macho::RelocationEntry MREPair;
- MREPair.Word0 = Value;
- MREPair.Word1 = ((0xffffff) |
- (Log2Size << 25) |
- (macho::RIT_Pair << 28));
+ MachO::any_relocation_info MREPair;
+ MREPair.r_word0 = Value;
+ MREPair.r_word1 = ((0xffffff << 0) |
+ (Log2Size << 25) |
+ (MachO::ARM_RELOC_PAIR << 28));
Writer->addRelocation(Fragment->getParent(), MREPair);
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
index 191db69fbc2b..c94337081884 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
@@ -20,6 +20,48 @@
using namespace llvm;
+namespace {
+ /// UnwindOpcodeStreamer - The simple wrapper over SmallVector to emit bytes
+ /// with MSB to LSB per uint32_t ordering. For example, the first byte will
+ /// be placed in Vec[3], and the following bytes will be placed in 2, 1, 0,
+ /// 7, 6, 5, 4, 11, 10, 9, 8, and so on.
+ class UnwindOpcodeStreamer {
+ private:
+ SmallVectorImpl<uint8_t> &Vec;
+ size_t Pos;
+
+ public:
+ UnwindOpcodeStreamer(SmallVectorImpl<uint8_t> &V) : Vec(V), Pos(3) {
+ }
+
+ /// Emit the byte in MSB to LSB per uint32_t order.
+ inline void EmitByte(uint8_t elem) {
+ Vec[Pos] = elem;
+ Pos = (((Pos ^ 0x3u) + 1) ^ 0x3u);
+ }
+
+ /// Emit the size prefix.
+ inline void EmitSize(size_t Size) {
+ size_t SizeInWords = (Size + 3) / 4;
+ assert(SizeInWords <= 0x100u &&
+ "Only 256 additional words are allowed for unwind opcodes");
+ EmitByte(static_cast<uint8_t>(SizeInWords - 1));
+ }
+
+ /// Emit the personality index prefix.
+ inline void EmitPersonalityIndex(unsigned PI) {
+ assert(PI < NUM_PERSONALITY_INDEX && "Invalid personality prefix");
+ EmitByte(EHT_COMPACT | PI);
+ }
+
+ /// Fill the rest of bytes with FINISH opcode.
+ inline void FillFinishOpcode() {
+ while (Pos < Vec.size())
+ EmitByte(UNWIND_OPCODE_FINISH);
+ }
+ };
+}
+
void UnwindOpcodeAssembler::EmitRegSave(uint32_t RegSave) {
if (RegSave == 0u)
return;
@@ -43,28 +85,22 @@ void UnwindOpcodeAssembler::EmitRegSave(uint32_t RegSave) {
uint32_t UnmaskedReg = RegSave & 0xfff0u & (~Mask);
if (UnmaskedReg == 0u) {
// Pop r[4 : (4 + n)]
- Ops.push_back(UNWIND_OPCODE_POP_REG_RANGE_R4 | Range);
+ EmitInt8(UNWIND_OPCODE_POP_REG_RANGE_R4 | Range);
RegSave &= 0x000fu;
} else if (UnmaskedReg == (1u << 14)) {
// Pop r[14] + r[4 : (4 + n)]
- Ops.push_back(UNWIND_OPCODE_POP_REG_RANGE_R4_R14 | Range);
+ EmitInt8(UNWIND_OPCODE_POP_REG_RANGE_R4_R14 | Range);
RegSave &= 0x000fu;
}
}
// Two bytes opcode to save register r15-r4
- if ((RegSave & 0xfff0u) != 0) {
- uint32_t Op = UNWIND_OPCODE_POP_REG_MASK_R4 | (RegSave >> 4);
- Ops.push_back(static_cast<uint8_t>(Op >> 8));
- Ops.push_back(static_cast<uint8_t>(Op & 0xff));
- }
+ if ((RegSave & 0xfff0u) != 0)
+ EmitInt16(UNWIND_OPCODE_POP_REG_MASK_R4 | (RegSave >> 4));
// Opcode to save register r3-r0
- if ((RegSave & 0x000fu) != 0) {
- uint32_t Op = UNWIND_OPCODE_POP_REG_MASK | (RegSave & 0x000fu);
- Ops.push_back(static_cast<uint8_t>(Op >> 8));
- Ops.push_back(static_cast<uint8_t>(Op & 0xff));
- }
+ if ((RegSave & 0x000fu) != 0)
+ EmitInt16(UNWIND_OPCODE_POP_REG_MASK | (RegSave & 0x000fu));
}
/// Emit unwind opcodes for .vsave directives
@@ -89,10 +125,8 @@ void UnwindOpcodeAssembler::EmitVFPRegSave(uint32_t VFPRegSave) {
Bit >>= 1;
}
- uint32_t Op =
- UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 | ((i - 16) << 4) | Range;
- Ops.push_back(static_cast<uint8_t>(Op >> 8));
- Ops.push_back(static_cast<uint8_t>(Op & 0xff));
+ EmitInt16(UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 |
+ ((i - 16) << 4) | Range);
}
while (i > 0) {
@@ -113,86 +147,75 @@ void UnwindOpcodeAssembler::EmitVFPRegSave(uint32_t VFPRegSave) {
Bit >>= 1;
}
- uint32_t Op = UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD | (i << 4) | Range;
- Ops.push_back(static_cast<uint8_t>(Op >> 8));
- Ops.push_back(static_cast<uint8_t>(Op & 0xff));
+ EmitInt16(UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD | (i << 4) | Range);
}
}
-/// Emit unwind opcodes for .setfp directives
-void UnwindOpcodeAssembler::EmitSetFP(uint16_t FPReg) {
- Ops.push_back(UNWIND_OPCODE_SET_VSP | FPReg);
+/// Emit unwind opcodes to copy address from source register to $sp.
+void UnwindOpcodeAssembler::EmitSetSP(uint16_t Reg) {
+ EmitInt8(UNWIND_OPCODE_SET_VSP | Reg);
}
-/// Emit unwind opcodes to update stack pointer
+/// Emit unwind opcodes to add $sp with an offset.
void UnwindOpcodeAssembler::EmitSPOffset(int64_t Offset) {
if (Offset > 0x200) {
- uint8_t Buff[10];
- size_t Size = encodeULEB128((Offset - 0x204) >> 2, Buff);
- Ops.push_back(UNWIND_OPCODE_INC_VSP_ULEB128);
- Ops.append(Buff, Buff + Size);
+ uint8_t Buff[16];
+ Buff[0] = UNWIND_OPCODE_INC_VSP_ULEB128;
+ size_t ULEBSize = encodeULEB128((Offset - 0x204) >> 2, Buff + 1);
+ EmitBytes(Buff, ULEBSize + 1);
} else if (Offset > 0) {
if (Offset > 0x100) {
- Ops.push_back(UNWIND_OPCODE_INC_VSP | 0x3fu);
+ EmitInt8(UNWIND_OPCODE_INC_VSP | 0x3fu);
Offset -= 0x100;
}
- Ops.push_back(UNWIND_OPCODE_INC_VSP |
- static_cast<uint8_t>((Offset - 4) >> 2));
+ EmitInt8(UNWIND_OPCODE_INC_VSP | static_cast<uint8_t>((Offset - 4) >> 2));
} else if (Offset < 0) {
while (Offset < -0x100) {
- Ops.push_back(UNWIND_OPCODE_DEC_VSP | 0x3fu);
+ EmitInt8(UNWIND_OPCODE_DEC_VSP | 0x3fu);
Offset += 0x100;
}
- Ops.push_back(UNWIND_OPCODE_DEC_VSP |
- static_cast<uint8_t>(((-Offset) - 4) >> 2));
+ EmitInt8(UNWIND_OPCODE_DEC_VSP |
+ static_cast<uint8_t>(((-Offset) - 4) >> 2));
}
}
-void UnwindOpcodeAssembler::AddOpcodeSizePrefix(size_t Pos) {
- size_t SizeInWords = (size() + 3) / 4;
- assert(SizeInWords <= 0x100u &&
- "Only 256 additional words are allowed for unwind opcodes");
- Ops[Pos] = static_cast<uint8_t>(SizeInWords - 1);
-}
+void UnwindOpcodeAssembler::Finalize(unsigned &PersonalityIndex,
+ SmallVectorImpl<uint8_t> &Result) {
-void UnwindOpcodeAssembler::AddPersonalityIndexPrefix(size_t Pos, unsigned PI) {
- assert(PI < NUM_PERSONALITY_INDEX && "Invalid personality prefix");
- Ops[Pos] = EHT_COMPACT | PI;
-}
+ UnwindOpcodeStreamer OpStreamer(Result);
-void UnwindOpcodeAssembler::EmitFinishOpcodes() {
- for (size_t i = (0x4u - (size() & 0x3u)) & 0x3u; i > 0; --i)
- Ops.push_back(UNWIND_OPCODE_FINISH);
-}
-
-void UnwindOpcodeAssembler::Finalize() {
if (HasPersonality) {
- // Personality specified by .personality directive
- Offset = 1;
- AddOpcodeSizePrefix(1);
+ // User-specifed personality routine: [ SIZE , OP1 , OP2 , ... ]
+ PersonalityIndex = NUM_PERSONALITY_INDEX;
+ size_t TotalSize = Ops.size() + 1;
+ size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
+ Result.resize(RoundUpSize);
+ OpStreamer.EmitSize(RoundUpSize);
} else {
- if (getOpcodeSize() <= 3) {
+ if (Ops.size() <= 3) {
// __aeabi_unwind_cpp_pr0: [ 0x80 , OP1 , OP2 , OP3 ]
- Offset = 1;
PersonalityIndex = AEABI_UNWIND_CPP_PR0;
- AddPersonalityIndexPrefix(Offset, PersonalityIndex);
+ Result.resize(4);
+ OpStreamer.EmitPersonalityIndex(PersonalityIndex);
} else {
// __aeabi_unwind_cpp_pr1: [ 0x81 , SIZE , OP1 , OP2 , ... ]
- Offset = 0;
PersonalityIndex = AEABI_UNWIND_CPP_PR1;
- AddPersonalityIndexPrefix(Offset, PersonalityIndex);
- AddOpcodeSizePrefix(1);
+ size_t TotalSize = Ops.size() + 2;
+ size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
+ Result.resize(RoundUpSize);
+ OpStreamer.EmitPersonalityIndex(PersonalityIndex);
+ OpStreamer.EmitSize(RoundUpSize);
}
}
- // Emit the padding finish opcodes if the size() is not multiple of 4.
- EmitFinishOpcodes();
+ // Copy the unwind opcodes
+ for (size_t i = OpBegins.size() - 1; i > 0; --i)
+ for (size_t j = OpBegins[i - 1], end = OpBegins[i]; j < end; ++j)
+ OpStreamer.EmitByte(Ops[j]);
- // Swap the byte order
- uint8_t *Ptr = Ops.begin() + Offset;
- assert(size() % 4 == 0 && "Final unwind opcodes should align to 4");
- for (size_t i = 0, n = size(); i < n; i += 4) {
- std::swap(Ptr[i], Ptr[i + 3]);
- std::swap(Ptr[i + 1], Ptr[i + 2]);
- }
+ // Emit the padding finish opcodes if the size is not multiple of 4.
+ OpStreamer.FillFinishOpcode();
+
+ // Reset the assembler state
+ Reset();
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h
index f6ecaeb8b29f..ac67c6efabb7 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h
@@ -27,86 +27,61 @@ class MCSymbol;
class UnwindOpcodeAssembler {
private:
- llvm::SmallVector<uint8_t, 8> Ops;
-
- unsigned Offset;
- unsigned PersonalityIndex;
+ llvm::SmallVector<uint8_t, 32> Ops;
+ llvm::SmallVector<unsigned, 8> OpBegins;
bool HasPersonality;
- enum {
- // The number of bytes to be preserved for the size and personality index
- // prefix of unwind opcodes.
- NUM_PRESERVED_PREFIX_BUF = 2
- };
-
public:
UnwindOpcodeAssembler()
- : Ops(NUM_PRESERVED_PREFIX_BUF), Offset(NUM_PRESERVED_PREFIX_BUF),
- PersonalityIndex(NUM_PERSONALITY_INDEX), HasPersonality(0) {
+ : HasPersonality(0) {
+ OpBegins.push_back(0);
}
/// Reset the unwind opcode assembler.
void Reset() {
- Ops.resize(NUM_PRESERVED_PREFIX_BUF);
- Offset = NUM_PRESERVED_PREFIX_BUF;
- PersonalityIndex = NUM_PERSONALITY_INDEX;
+ Ops.clear();
+ OpBegins.clear();
+ OpBegins.push_back(0);
HasPersonality = 0;
}
- /// Get the size of the payload (including the size byte)
- size_t size() const {
- return Ops.size() - Offset;
- }
-
- /// Get the beginning of the payload
- const uint8_t *begin() const {
- return Ops.begin() + Offset;
- }
-
- /// Get the payload
- StringRef data() const {
- return StringRef(reinterpret_cast<const char *>(begin()), size());
- }
-
/// Set the personality index
void setPersonality(const MCSymbol *Per) {
HasPersonality = 1;
}
- /// Get the personality index
- unsigned getPersonalityIndex() const {
- return PersonalityIndex;
- }
-
/// Emit unwind opcodes for .save directives
void EmitRegSave(uint32_t RegSave);
/// Emit unwind opcodes for .vsave directives
void EmitVFPRegSave(uint32_t VFPRegSave);
- /// Emit unwind opcodes for .setfp directives
- void EmitSetFP(uint16_t FPReg);
+ /// Emit unwind opcodes to copy address from source register to $sp.
+ void EmitSetSP(uint16_t Reg);
- /// Emit unwind opcodes to update stack pointer
+ /// Emit unwind opcodes to add $sp with an offset.
void EmitSPOffset(int64_t Offset);
/// Finalize the unwind opcode sequence for EmitBytes()
- void Finalize();
+ void Finalize(unsigned &PersonalityIndex,
+ SmallVectorImpl<uint8_t> &Result);
private:
- /// Get the size of the opcodes in bytes.
- size_t getOpcodeSize() const {
- return Ops.size() - NUM_PRESERVED_PREFIX_BUF;
+ void EmitInt8(unsigned Opcode) {
+ Ops.push_back(Opcode & 0xff);
+ OpBegins.push_back(OpBegins.back() + 1);
}
- /// Add the length prefix to the payload
- void AddOpcodeSizePrefix(size_t Pos);
-
- /// Add personality index prefix in some compact format
- void AddPersonalityIndexPrefix(size_t Pos, unsigned PersonalityIndex);
+ void EmitInt16(unsigned Opcode) {
+ Ops.push_back((Opcode >> 8) & 0xff);
+ Ops.push_back(Opcode & 0xff);
+ OpBegins.push_back(OpBegins.back() + 2);
+ }
- /// Fill the words with finish opcode if it is not aligned
- void EmitFinishOpcodes();
+ void EmitBytes(const uint8_t *Opcode, size_t Size) {
+ Ops.insert(Ops.end(), Opcode, Opcode + Size);
+ OpBegins.push_back(OpBegins.back() + Size);
+ }
};
} // namespace llvm
diff --git a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
index a7ac5ca061e6..bab59f41c989 100644
--- a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
@@ -9,6 +9,7 @@ add_llvm_library(LLVMARMDesc
ARMMachObjectWriter.cpp
ARMELFObjectWriter.cpp
ARMUnwindOpAsm.cpp
+ ARMMachORelocationInfo.cpp
)
add_dependencies(LLVMARMDesc ARMCommonTableGen)
diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp
index 1e2a8b03e1a0..cfb33f5b8212 100644
--- a/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -88,7 +88,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
const Thumb1InstrInfo &TII =
*static_cast<const Thumb1InstrInfo*>(MF.getTarget().getInstrInfo());
- unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize();
+ unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
+ unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
unsigned NumBytes = MFI->getStackSize();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
@@ -126,7 +127,6 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
case ARM::LR:
if (Reg == FramePtr)
FramePtrSpillFI = FI;
- AFI->addGPRCalleeSavedArea1Frame(FI);
GPRCS1Size += 4;
break;
case ARM::R8:
@@ -135,16 +135,12 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
case ARM::R11:
if (Reg == FramePtr)
FramePtrSpillFI = FI;
- if (STI.isTargetIOS()) {
- AFI->addGPRCalleeSavedArea2Frame(FI);
+ if (STI.isTargetIOS())
GPRCS2Size += 4;
- } else {
- AFI->addGPRCalleeSavedArea1Frame(FI);
+ else
GPRCS1Size += 4;
- }
break;
default:
- AFI->addDPRCalleeSavedAreaFrame(FI);
DPRCSSize += 8;
}
}
@@ -168,10 +164,17 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
NumBytes = DPRCSOffset;
+ int FramePtrOffsetInBlock = 0;
+ if (tryFoldSPUpdateIntoPushPop(MF, prior(MBBI), NumBytes)) {
+ FramePtrOffsetInBlock = NumBytes;
+ NumBytes = 0;
+ }
+
// Adjust FP so it point to the stack slot that contains the previous FP.
if (HasFP) {
+ FramePtrOffsetInBlock += MFI->getObjectOffset(FramePtrSpillFI) + GPRCS1Size;
AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
- .addFrameIndex(FramePtrSpillFI).addImm(0)
+ .addReg(ARM::SP).addImm(FramePtrOffsetInBlock / 4)
.setMIFlags(MachineInstr::FrameSetup));
if (NumBytes > 508)
// If offset is > 508 then sp cannot be adjusted in a single instruction,
@@ -212,13 +215,6 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setShouldRestoreSPFromFP(true);
}
-static bool isCalleeSavedRegister(unsigned Reg, const uint16_t *CSRegs) {
- for (unsigned i = 0; CSRegs[i]; ++i)
- if (Reg == CSRegs[i])
- return true;
- return false;
-}
-
static bool isCSRestore(MachineInstr *MI, const uint16_t *CSRegs) {
if (MI->getOpcode() == ARM::tLDRspi &&
MI->getOperand(1).isFI() &&
@@ -249,7 +245,8 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
const Thumb1InstrInfo &TII =
*static_cast<const Thumb1InstrInfo*>(MF.getTarget().getInstrInfo());
- unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize();
+ unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
+ unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
int NumBytes = (int)MFI->getStackSize();
const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
@@ -294,8 +291,9 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
&MBB.front() != MBBI &&
prior(MBBI)->getOpcode() == ARM::tPOP) {
MachineBasicBlock::iterator PMBBI = prior(MBBI);
- emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
- } else
+ if (!tryFoldSPUpdateIntoPushPop(MF, PMBBI, NumBytes))
+ emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
+ } else if (!tryFoldSPUpdateIntoPushPop(MF, MBBI, NumBytes))
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
}
}
diff --git a/lib/Target/ARM/Thumb1InstrInfo.cpp b/lib/Target/ARM/Thumb1InstrInfo.cpp
index 095736d52a88..22a925e0ffbc 100644
--- a/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -22,7 +22,7 @@
using namespace llvm;
Thumb1InstrInfo::Thumb1InstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(*this, STI) {
+ : ARMBaseInstrInfo(STI), RI(STI) {
}
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp
index 7452fb776ebd..65a7221d5db7 100644
--- a/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -40,9 +40,8 @@ extern cl::opt<bool> ReuseFrameIndexVals;
using namespace llvm;
-Thumb1RegisterInfo::Thumb1RegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMBaseRegisterInfo(tii, sti) {
+Thumb1RegisterInfo::Thumb1RegisterInfo(const ARMSubtarget &sti)
+ : ARMBaseRegisterInfo(sti) {
}
const TargetRegisterClass*
@@ -70,6 +69,7 @@ Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C = ConstantInt::get(
Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
@@ -426,7 +426,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
*this);
} else {
// Translate r0 = add sp, -imm to
- // r0 = -imm (this is then translated into a series of instructons)
+ // r0 = -imm (this is then translated into a series of instructions)
// r0 = add r0, sp
emitThumbConstant(MBB, II, DestReg, Offset, TII, *this, dl);
@@ -488,6 +488,9 @@ void
Thumb1RegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
unsigned BaseReg, int64_t Offset) const {
MachineInstr &MI = *I;
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(
+ MI.getParent()->getParent()->getTarget().getInstrInfo());
int Off = Offset; // ARM doesn't need the general 64-bit offsets
unsigned i = 0;
@@ -513,6 +516,7 @@ Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB,
// off the frame pointer (if, for example, there are alloca() calls in
// the function, the offset will be negative. Use R12 instead since that's
// a call clobbered register that we know won't be used in Thumb1 mode.
+ const TargetInstrInfo &TII = *MBB.getParent()->getTarget().getInstrInfo();
DebugLoc DL;
AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr))
.addReg(ARM::R12, RegState::Define)
@@ -558,6 +562,8 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
DebugLoc dl = MI.getDebugLoc();
MachineInstrBuilder MIB(*MBB.getParent(), &MI);
@@ -567,11 +573,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
MF.getFrameInfo()->getStackSize() + SPAdj;
- if (AFI->isGPRCalleeSavedArea1Frame(FrameIndex))
- Offset -= AFI->getGPRCalleeSavedArea1Offset();
- else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
- Offset -= AFI->getGPRCalleeSavedArea2Offset();
- else if (MF.getFrameInfo()->hasVarSizedObjects()) {
+ if (MF.getFrameInfo()->hasVarSizedObjects()) {
assert(SPAdj == 0 && MF.getTarget().getFrameLowering()->hasFP(MF) &&
"Unexpected");
// There are alloca()'s in this function, must reference off the frame
diff --git a/lib/Target/ARM/Thumb1RegisterInfo.h b/lib/Target/ARM/Thumb1RegisterInfo.h
index ebbab36dd7b8..9689b23146a0 100644
--- a/lib/Target/ARM/Thumb1RegisterInfo.h
+++ b/lib/Target/ARM/Thumb1RegisterInfo.h
@@ -25,7 +25,7 @@ namespace llvm {
struct Thumb1RegisterInfo : public ARMBaseRegisterInfo {
public:
- Thumb1RegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &STI);
+ Thumb1RegisterInfo(const ARMSubtarget &STI);
const TargetRegisterClass*
getLargestLegalSuperClass(const TargetRegisterClass *RC) const;
diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp
index 97c254ce75a5..0b7d3bb7754f 100644
--- a/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -28,6 +28,7 @@ namespace {
static char ID;
Thumb2ITBlockPass() : MachineFunctionPass(ID) {}
+ bool restrictIT;
const Thumb2InstrInfo *TII;
const TargetRegisterInfo *TRI;
ARMFunctionInfo *AFI;
@@ -73,15 +74,15 @@ static void TrackDefUses(MachineInstr *MI,
for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
unsigned Reg = LocalUses[i];
- Uses.insert(Reg);
- for (MCSubRegIterator Subreg(Reg, TRI); Subreg.isValid(); ++Subreg)
+ for (MCSubRegIterator Subreg(Reg, TRI, /*IncludeSelf=*/true);
+ Subreg.isValid(); ++Subreg)
Uses.insert(*Subreg);
}
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
- Defs.insert(Reg);
- for (MCSubRegIterator Subreg(Reg, TRI); Subreg.isValid(); ++Subreg)
+ for (MCSubRegIterator Subreg(Reg, TRI, /*IncludeSelf=*/true);
+ Subreg.isValid(); ++Subreg)
Defs.insert(*Subreg);
if (Reg == ARM::CPSR)
continue;
@@ -192,37 +193,42 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
// Form IT block.
ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
unsigned Mask = 0, Pos = 3;
- // Branches, including tricky ones like LDM_RET, need to end an IT
- // block so check the instruction we just put in the block.
- for (; MBBI != E && Pos &&
- (!MI->isBranch() && !MI->isReturn()) ; ++MBBI) {
- if (MBBI->isDebugValue())
- continue;
-
- MachineInstr *NMI = &*MBBI;
- MI = NMI;
-
- unsigned NPredReg = 0;
- ARMCC::CondCodes NCC = getITInstrPredicate(NMI, NPredReg);
- if (NCC == CC || NCC == OCC) {
- Mask |= (NCC & 1) << Pos;
- // Add implicit use of ITSTATE.
- NMI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
- true/*isImp*/, false/*isKill*/));
- LastITMI = NMI;
- } else {
- if (NCC == ARMCC::AL &&
- MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) {
- --MBBI;
- MBB.remove(NMI);
- MBB.insert(InsertPos, NMI);
- ++NumMovedInsts;
+
+ // v8 IT blocks are limited to one conditional op unless -arm-no-restrict-it
+ // is set: skip the loop
+ if (!restrictIT) {
+ // Branches, including tricky ones like LDM_RET, need to end an IT
+ // block so check the instruction we just put in the block.
+ for (; MBBI != E && Pos &&
+ (!MI->isBranch() && !MI->isReturn()) ; ++MBBI) {
+ if (MBBI->isDebugValue())
continue;
+
+ MachineInstr *NMI = &*MBBI;
+ MI = NMI;
+
+ unsigned NPredReg = 0;
+ ARMCC::CondCodes NCC = getITInstrPredicate(NMI, NPredReg);
+ if (NCC == CC || NCC == OCC) {
+ Mask |= (NCC & 1) << Pos;
+ // Add implicit use of ITSTATE.
+ NMI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
+ true/*isImp*/, false/*isKill*/));
+ LastITMI = NMI;
+ } else {
+ if (NCC == ARMCC::AL &&
+ MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) {
+ --MBBI;
+ MBB.remove(NMI);
+ MBB.insert(InsertPos, NMI);
+ ++NumMovedInsts;
+ continue;
+ }
+ break;
}
- break;
+ TrackDefUses(NMI, Defs, Uses, TRI);
+ --Pos;
}
- TrackDefUses(NMI, Defs, Uses, TRI);
- --Pos;
}
// Finalize IT mask.
@@ -250,6 +256,7 @@ bool Thumb2ITBlockPass::runOnMachineFunction(MachineFunction &Fn) {
AFI = Fn.getInfo<ARMFunctionInfo>();
TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
TRI = TM.getRegisterInfo();
+ restrictIT = TM.getSubtarget<ARMSubtarget>().restrictIT();
if (!AFI->isThumbFunction())
return false;
diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp
index a1b48c226aa8..91788ac4f893 100644
--- a/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -31,12 +31,13 @@ OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden,
cl::init(false));
Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(*this, STI) {
+ : ARMBaseInstrInfo(STI), RI(STI) {
}
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
void Thumb2InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
- NopInst.setOpcode(ARM::tNOP);
+ NopInst.setOpcode(ARM::tHINT);
+ NopInst.addOperand(MCOperand::CreateImm(0));
NopInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
NopInst.addOperand(MCOperand::CreateReg(0));
}
@@ -214,6 +215,13 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
unsigned DestReg, unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags) {
+ if (NumBytes == 0 && DestReg != BaseReg) {
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
+ .addReg(BaseReg, RegState::Kill)
+ .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
+ return;
+ }
+
bool isSub = NumBytes < 0;
if (isSub) NumBytes = -NumBytes;
@@ -285,7 +293,7 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
NumBytes = 0;
} else {
// FIXME: Move this to ARMAddressingModes.h?
- unsigned RotAmt = CountLeadingZeros_32(ThisVal);
+ unsigned RotAmt = countLeadingZeros(ThisVal);
ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
NumBytes &= ~ThisVal;
assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
@@ -302,7 +310,7 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
NumBytes = 0;
} else {
// FIXME: Move this to ARMAddressingModes.h?
- unsigned RotAmt = CountLeadingZeros_32(ThisVal);
+ unsigned RotAmt = countLeadingZeros(ThisVal);
ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
NumBytes &= ~ThisVal;
assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
@@ -334,6 +342,7 @@ negativeOffsetOpcode(unsigned opcode)
case ARM::t2STRi12: return ARM::t2STRi8;
case ARM::t2STRBi12: return ARM::t2STRBi8;
case ARM::t2STRHi12: return ARM::t2STRHi8;
+ case ARM::t2PLDi12: return ARM::t2PLDi8;
case ARM::t2LDRi8:
case ARM::t2LDRHi8:
@@ -343,6 +352,7 @@ negativeOffsetOpcode(unsigned opcode)
case ARM::t2STRi8:
case ARM::t2STRBi8:
case ARM::t2STRHi8:
+ case ARM::t2PLDi8:
return opcode;
default:
@@ -364,6 +374,7 @@ positiveOffsetOpcode(unsigned opcode)
case ARM::t2STRi8: return ARM::t2STRi12;
case ARM::t2STRBi8: return ARM::t2STRBi12;
case ARM::t2STRHi8: return ARM::t2STRHi12;
+ case ARM::t2PLDi8: return ARM::t2PLDi12;
case ARM::t2LDRi12:
case ARM::t2LDRHi12:
@@ -373,6 +384,7 @@ positiveOffsetOpcode(unsigned opcode)
case ARM::t2STRi12:
case ARM::t2STRBi12:
case ARM::t2STRHi12:
+ case ARM::t2PLDi12:
return opcode;
default:
@@ -394,6 +406,7 @@ immediateOffsetOpcode(unsigned opcode)
case ARM::t2STRs: return ARM::t2STRi12;
case ARM::t2STRBs: return ARM::t2STRBi12;
case ARM::t2STRHs: return ARM::t2STRHi12;
+ case ARM::t2PLDs: return ARM::t2PLDi12;
case ARM::t2LDRi12:
case ARM::t2LDRHi12:
@@ -403,6 +416,7 @@ immediateOffsetOpcode(unsigned opcode)
case ARM::t2STRi12:
case ARM::t2STRBi12:
case ARM::t2STRHi12:
+ case ARM::t2PLDi12:
case ARM::t2LDRi8:
case ARM::t2LDRHi8:
case ARM::t2LDRBi8:
@@ -411,6 +425,7 @@ immediateOffsetOpcode(unsigned opcode)
case ARM::t2STRi8:
case ARM::t2STRBi8:
case ARM::t2STRHi8:
+ case ARM::t2PLDi8:
return opcode;
default:
@@ -484,7 +499,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
// Otherwise, extract 8 adjacent bits from the immediate into this
// t2ADDri/t2SUBri.
- unsigned RotAmt = CountLeadingZeros_32(Offset);
+ unsigned RotAmt = countLeadingZeros<unsigned>(Offset);
unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
// We will handle these bits from offset, clear them.
diff --git a/lib/Target/ARM/Thumb2RegisterInfo.cpp b/lib/Target/ARM/Thumb2RegisterInfo.cpp
index 1a7a4d450cfe..4cb827f308ef 100644
--- a/lib/Target/ARM/Thumb2RegisterInfo.cpp
+++ b/lib/Target/ARM/Thumb2RegisterInfo.cpp
@@ -24,9 +24,8 @@
#include "llvm/IR/Function.h"
using namespace llvm;
-Thumb2RegisterInfo::Thumb2RegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMBaseRegisterInfo(tii, sti) {
+Thumb2RegisterInfo::Thumb2RegisterInfo(const ARMSubtarget &sti)
+ : ARMBaseRegisterInfo(sti) {
}
/// emitLoadConstPool - Emits a load from constpool to materialize the
@@ -40,6 +39,7 @@ Thumb2RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C = ConstantInt::get(
Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
diff --git a/lib/Target/ARM/Thumb2RegisterInfo.h b/lib/Target/ARM/Thumb2RegisterInfo.h
index 6b397e869683..b1d63fa86d01 100644
--- a/lib/Target/ARM/Thumb2RegisterInfo.h
+++ b/lib/Target/ARM/Thumb2RegisterInfo.h
@@ -20,12 +20,12 @@
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
- class ARMSubtarget;
- class ARMBaseInstrInfo;
+
+class ARMSubtarget;
struct Thumb2RegisterInfo : public ARMBaseRegisterInfo {
public:
- Thumb2RegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &STI);
+ Thumb2RegisterInfo(const ARMSubtarget &STI);
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp
index 3e69098edcc3..ddc7a66c9f39 100644
--- a/lib/Target/CppBackend/CPPBackend.cpp
+++ b/lib/Target/CppBackend/CPPBackend.cpp
@@ -33,6 +33,7 @@
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#include <algorithm>
+#include <cctype>
#include <cstdio>
#include <map>
#include <set>
@@ -291,8 +292,6 @@ void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
Out << "GlobalValue::LinkOnceAnyLinkage "; break;
case GlobalValue::LinkOnceODRLinkage:
Out << "GlobalValue::LinkOnceODRLinkage "; break;
- case GlobalValue::LinkOnceODRAutoHideLinkage:
- Out << "GlobalValue::LinkOnceODRAutoHideLinkage"; break;
case GlobalValue::WeakAnyLinkage:
Out << "GlobalValue::WeakAnyLinkage"; break;
case GlobalValue::WeakODRLinkage:
@@ -497,6 +496,7 @@ void CppWriter::printAttributes(const AttributeSet &PAL,
HANDLE_ATTR(ReadOnly);
HANDLE_ATTR(NoInline);
HANDLE_ATTR(AlwaysInline);
+ HANDLE_ATTR(OptimizeNone);
HANDLE_ATTR(OptimizeForSize);
HANDLE_ATTR(StackProtect);
HANDLE_ATTR(StackProtectReq);
@@ -1139,7 +1139,7 @@ void CppWriter::printInstruction(const Instruction *I,
nl(Out);
for (SwitchInst::ConstCaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
- const IntegersSubset CaseVal = i.getCaseValueEx();
+ const ConstantInt* CaseVal = i.getCaseValue();
const BasicBlock *BB = i.getCaseSuccessor();
Out << iName << "->addCase("
<< getOpName(CaseVal) << ", "
@@ -1160,8 +1160,7 @@ void CppWriter::printInstruction(const Instruction *I,
break;
}
case Instruction::Resume: {
- Out << "ResumeInst::Create(mod->getContext(), " << opNames[0]
- << ", " << bbname << ");";
+ Out << "ResumeInst::Create(" << opNames[0] << ", " << bbname << ");";
break;
}
case Instruction::Invoke: {
@@ -1175,7 +1174,7 @@ void CppWriter::printInstruction(const Instruction *I,
}
// FIXME: This shouldn't use magic numbers -3, -2, and -1.
Out << "InvokeInst *" << iName << " = InvokeInst::Create("
- << getOpName(inv->getCalledFunction()) << ", "
+ << getOpName(inv->getCalledValue()) << ", "
<< getOpName(inv->getNormalDest()) << ", "
<< getOpName(inv->getUnwindDest()) << ", "
<< iName << "_params, \"";
@@ -1589,6 +1588,20 @@ void CppWriter::printInstruction(const Instruction *I,
Out << "\");";
break;
}
+ case Instruction::LandingPad: {
+ const LandingPadInst *lpi = cast<LandingPadInst>(I);
+ Out << "LandingPadInst* " << iName << " = LandingPadInst::Create(";
+ printCppName(lpi->getType());
+ Out << ", " << opNames[0] << ", " << lpi->getNumClauses() << ", \"";
+ printEscapedString(lpi->getName());
+ Out << "\", " << bbname << ");";
+ nl(Out) << iName << "->setCleanup("
+ << (lpi->isCleanup() ? "true" : "false")
+ << ");";
+ for (unsigned i = 0, e = lpi->getNumClauses(); i != e; ++i)
+ nl(Out) << iName << "->addClause(" << opNames[i+1] << ");";
+ break;
+ }
}
DefinedValues.insert(I);
nl(Out);
@@ -1832,7 +1845,7 @@ void CppWriter::printInline(const std::string& fname,
unsigned arg_count = 1;
for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
AI != AE; ++AI) {
- Out << ", Value* arg_" << arg_count;
+ Out << ", Value* arg_" << arg_count++;
}
Out << ") {";
nl(Out);
diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt
index b5b887e7c7c8..ae3c9ebc2555 100644
--- a/lib/Target/Hexagon/CMakeLists.txt
+++ b/lib/Target/Hexagon/CMakeLists.txt
@@ -9,8 +9,6 @@ tablegen(LLVM HexagonGenSubtargetInfo.inc -gen-subtarget)
tablegen(LLVM HexagonGenDFAPacketizer.inc -gen-dfa-packetizer)
add_public_tablegen_target(HexagonCommonTableGen)
-set(LLVM_COMMON_DEPENDS intrinsics_gen)
-
add_llvm_target(HexagonCodeGen
HexagonAsmPrinter.cpp
HexagonCallingConvLower.cpp
@@ -19,6 +17,7 @@ add_llvm_target(HexagonCodeGen
HexagonFrameLowering.cpp
HexagonHardwareLoops.cpp
HexagonFixupHwLoops.cpp
+ HexagonMachineFunctionInfo.cpp
HexagonMachineScheduler.cpp
HexagonMCInstLower.cpp
HexagonInstrInfo.cpp
@@ -28,14 +27,18 @@ add_llvm_target(HexagonCodeGen
HexagonRegisterInfo.cpp
HexagonRemoveSZExtArgs.cpp
HexagonSelectionDAGInfo.cpp
+ HexagonSplitConst32AndConst64.cpp
HexagonSplitTFRCondSets.cpp
HexagonSubtarget.cpp
HexagonTargetMachine.cpp
HexagonTargetObjectFile.cpp
HexagonVLIWPacketizer.cpp
HexagonNewValueJump.cpp
+ HexagonCopyToCombine.cpp
)
+add_dependencies(LLVMHexagonCodeGen HexagonCommonTableGen intrinsics_gen)
+
add_subdirectory(TargetInfo)
add_subdirectory(InstPrinter)
add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/Hexagon/Hexagon.h b/lib/Target/Hexagon/Hexagon.h
index a9b00a22ac5d..5467ee361257 100644
--- a/lib/Target/Hexagon/Hexagon.h
+++ b/lib/Target/Hexagon/Hexagon.h
@@ -29,7 +29,7 @@ namespace llvm {
class HexagonTargetMachine;
class raw_ostream;
- FunctionPass *createHexagonISelDag(const HexagonTargetMachine &TM,
+ FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
CodeGenOpt::Level OptLevel);
FunctionPass *createHexagonDelaySlotFillerPass(const TargetMachine &TM);
FunctionPass *createHexagonFPMoverPass(const TargetMachine &TM);
@@ -37,11 +37,15 @@ namespace llvm {
FunctionPass *createHexagonCFGOptimizer(const HexagonTargetMachine &TM);
FunctionPass *createHexagonSplitTFRCondSets(const HexagonTargetMachine &TM);
+ FunctionPass *createHexagonSplitConst32AndConst64(
+ const HexagonTargetMachine &TM);
FunctionPass *createHexagonExpandPredSpillCode(
const HexagonTargetMachine &TM);
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonFixupHwLoops();
+ FunctionPass *createHexagonNewValueJump();
+ FunctionPass *createHexagonCopyToCombine();
FunctionPass *createHexagonPacketizer();
FunctionPass *createHexagonNewValueJump();
diff --git a/lib/Target/Hexagon/Hexagon.td b/lib/Target/Hexagon/Hexagon.td
index 9b3a64316aa0..568798c3a412 100644
--- a/lib/Target/Hexagon/Hexagon.td
+++ b/lib/Target/Hexagon/Hexagon.td
@@ -120,15 +120,39 @@ def getPredNewOpcode : InstrMapping {
}
//===----------------------------------------------------------------------===//
+// Generate mapping table to relate .new predicated instructions with their old
+// format.
+//
+def getPredOldOpcode : InstrMapping {
+ let FilterClass = "PredNewRel";
+ let RowFields = ["BaseOpcode", "PredSense", "isNVStore"];
+ let ColFields = ["PNewValue"];
+ let KeyCol = ["new"];
+ let ValueCols = [[""]];
+}
+
+//===----------------------------------------------------------------------===//
// Generate mapping table to relate store instructions with their new-value
// format.
//
def getNewValueOpcode : InstrMapping {
let FilterClass = "NewValueRel";
let RowFields = ["BaseOpcode", "PredSense", "PNewValue"];
- let ColFields = ["isNVStore"];
- let KeyCol = ["0"];
- let ValueCols = [["1"]];
+ let ColFields = ["NValueST"];
+ let KeyCol = ["false"];
+ let ValueCols = [["true"]];
+}
+
+//===----------------------------------------------------------------------===//
+// Generate mapping table to relate new-value store instructions with their old
+// format.
+//
+def getNonNVStore : InstrMapping {
+ let FilterClass = "NewValueRel";
+ let RowFields = ["BaseOpcode", "PredSense", "PNewValue"];
+ let ColFields = ["NValueST"];
+ let KeyCol = ["true"];
+ let ValueCols = [["false"]];
}
def getBasedWithImmOffset : InstrMapping {
diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
index 88cd3fbacea0..a2e04baea76a 100644
--- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp
+++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -99,7 +99,7 @@ void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
return;
case MachineOperand::MO_GlobalAddress:
// Computing the address of a global symbol, not calling it.
- O << *Mang->getSymbol(MO.getGlobal());
+ O << *getSymbol(MO.getGlobal());
printOffset(MO.getOffset(), O);
return;
}
@@ -267,7 +267,7 @@ void HexagonAsmPrinter::printGlobalOperand(const MachineInstr *MI, int OpNo,
assert( (MO.getType() == MachineOperand::MO_GlobalAddress) &&
"Expecting global address");
- O << *Mang->getSymbol(MO.getGlobal());
+ O << *getSymbol(MO.getGlobal());
if (MO.getOffset() != 0) {
O << " + ";
O << MO.getOffset();
diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/lib/Target/Hexagon/HexagonCallingConvLower.cpp
index 2c93d04f98e6..f5f958c101b1 100644
--- a/lib/Target/Hexagon/HexagonCallingConvLower.cpp
+++ b/lib/Target/Hexagon/HexagonCallingConvLower.cpp
@@ -25,14 +25,13 @@ using namespace llvm;
Hexagon_CCState::Hexagon_CCState(CallingConv::ID CC, bool isVarArg,
const TargetMachine &tm,
- SmallVector<CCValAssign, 16> &locs,
+ SmallVectorImpl<CCValAssign> &locs,
LLVMContext &c)
- : CallingConv(CC), IsVarArg(isVarArg), TM(tm),
- TRI(*TM.getRegisterInfo()), Locs(locs), Context(c) {
+ : CallingConv(CC), IsVarArg(isVarArg), TM(tm), Locs(locs), Context(c) {
// No stack is used.
StackOffset = 0;
- UsedRegs.resize((TRI.getNumRegs()+31)/32);
+ UsedRegs.resize((TM.getRegisterInfo()->getNumRegs()+31)/32);
}
// HandleByVal - Allocate a stack slot large enough to pass an argument by
@@ -56,6 +55,7 @@ void Hexagon_CCState::HandleByVal(unsigned ValNo, EVT ValVT,
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void Hexagon_CCState::MarkAllocated(unsigned Reg) {
+ const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
UsedRegs[*AI/32] |= 1 << (*AI&31);
}
diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.h b/lib/Target/Hexagon/HexagonCallingConvLower.h
index 489b3a3e5985..33c83064f90b 100644
--- a/lib/Target/Hexagon/HexagonCallingConvLower.h
+++ b/lib/Target/Hexagon/HexagonCallingConvLower.h
@@ -48,15 +48,14 @@ class Hexagon_CCState {
CallingConv::ID CallingConv;
bool IsVarArg;
const TargetMachine &TM;
- const TargetRegisterInfo &TRI;
- SmallVector<CCValAssign, 16> &Locs;
+ SmallVectorImpl<CCValAssign> &Locs;
LLVMContext &Context;
unsigned StackOffset;
SmallVector<uint32_t, 16> UsedRegs;
public:
Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
- SmallVector<CCValAssign, 16> &locs, LLVMContext &c);
+ SmallVectorImpl<CCValAssign> &locs, LLVMContext &c);
void addLoc(const CCValAssign &V) {
Locs.push_back(V);
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
new file mode 100644
index 000000000000..dc440cb1356c
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -0,0 +1,677 @@
+//===------- HexagonCopyToCombine.cpp - Hexagon Copy-To-Combine Pass ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This pass replaces transfer instructions by combine instructions.
+// We walk along a basic block and look for two combinable instructions and try
+// to move them together. If we can move them next to each other we do so and
+// replace them with a combine instruction.
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "hexagon-copy-combine"
+
+#include "llvm/PassSupport.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "Hexagon.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonMachineFunctionInfo.h"
+
+using namespace llvm;
+
+static
+cl::opt<bool> IsCombinesDisabled("disable-merge-into-combines",
+ cl::Hidden, cl::ZeroOrMore,
+ cl::init(false),
+ cl::desc("Disable merging into combines"));
+static
+cl::opt<unsigned>
+MaxNumOfInstsBetweenNewValueStoreAndTFR("max-num-inst-between-tfr-and-nv-store",
+ cl::Hidden, cl::init(4),
+ cl::desc("Maximum distance between a tfr feeding a store we "
+ "consider the store still to be newifiable"));
+
+namespace llvm {
+ void initializeHexagonCopyToCombinePass(PassRegistry&);
+}
+
+
+namespace {
+
+class HexagonCopyToCombine : public MachineFunctionPass {
+ const HexagonInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ bool ShouldCombineAggressively;
+
+ DenseSet<MachineInstr *> PotentiallyNewifiableTFR;
+public:
+ static char ID;
+
+ HexagonCopyToCombine() : MachineFunctionPass(ID) {
+ initializeHexagonCopyToCombinePass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ const char *getPassName() const {
+ return "Hexagon Copy-To-Combine Pass";
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &Fn);
+
+private:
+ MachineInstr *findPairable(MachineInstr *I1, bool &DoInsertAtI1);
+
+ void findPotentialNewifiableTFRs(MachineBasicBlock &);
+
+ void combine(MachineInstr *I1, MachineInstr *I2,
+ MachineBasicBlock::iterator &MI, bool DoInsertAtI1);
+
+ bool isSafeToMoveTogether(MachineInstr *I1, MachineInstr *I2,
+ unsigned I1DestReg, unsigned I2DestReg,
+ bool &DoInsertAtI1);
+
+ void emitCombineRR(MachineBasicBlock::iterator &Before, unsigned DestReg,
+ MachineOperand &HiOperand, MachineOperand &LoOperand);
+
+ void emitCombineRI(MachineBasicBlock::iterator &Before, unsigned DestReg,
+ MachineOperand &HiOperand, MachineOperand &LoOperand);
+
+ void emitCombineIR(MachineBasicBlock::iterator &Before, unsigned DestReg,
+ MachineOperand &HiOperand, MachineOperand &LoOperand);
+
+ void emitCombineII(MachineBasicBlock::iterator &Before, unsigned DestReg,
+ MachineOperand &HiOperand, MachineOperand &LoOperand);
+};
+
+} // End anonymous namespace.
+
+char HexagonCopyToCombine::ID = 0;
+
+INITIALIZE_PASS(HexagonCopyToCombine, "hexagon-copy-combine",
+ "Hexagon Copy-To-Combine Pass", false, false)
+
+static bool isCombinableInstType(MachineInstr *MI,
+ const HexagonInstrInfo *TII,
+ bool ShouldCombineAggressively) {
+ switch(MI->getOpcode()) {
+ case Hexagon::TFR: {
+ // A COPY instruction can be combined if its arguments are IntRegs (32bit).
+ assert(MI->getOperand(0).isReg() && MI->getOperand(1).isReg());
+
+ unsigned DestReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ return Hexagon::IntRegsRegClass.contains(DestReg) &&
+ Hexagon::IntRegsRegClass.contains(SrcReg);
+ }
+
+ case Hexagon::TFRI: {
+ // A transfer-immediate can be combined if its argument is a signed 8bit
+ // value.
+ assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
+ unsigned DestReg = MI->getOperand(0).getReg();
+
+ // Only combine constant extended TFRI if we are in aggressive mode.
+ return Hexagon::IntRegsRegClass.contains(DestReg) &&
+ (ShouldCombineAggressively || isInt<8>(MI->getOperand(1).getImm()));
+ }
+
+ case Hexagon::TFRI_V4: {
+ if (!ShouldCombineAggressively)
+ return false;
+ assert(MI->getOperand(0).isReg() && MI->getOperand(1).isGlobal());
+
+ // Ensure that TargetFlags are MO_NO_FLAG for a global. This is a
+ // workaround for an ABI bug that prevents GOT relocations on combine
+ // instructions
+ if (MI->getOperand(1).getTargetFlags() != HexagonII::MO_NO_FLAG)
+ return false;
+
+ unsigned DestReg = MI->getOperand(0).getReg();
+ return Hexagon::IntRegsRegClass.contains(DestReg);
+ }
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool isGreaterThan8BitTFRI(MachineInstr *I) {
+ return I->getOpcode() == Hexagon::TFRI &&
+ !isInt<8>(I->getOperand(1).getImm());
+}
+static bool isGreaterThan6BitTFRI(MachineInstr *I) {
+ return I->getOpcode() == Hexagon::TFRI &&
+ !isUInt<6>(I->getOperand(1).getImm());
+}
+
+/// areCombinableOperations - Returns true if the two instruction can be merge
+/// into a combine (ignoring register constraints).
+static bool areCombinableOperations(const TargetRegisterInfo *TRI,
+ MachineInstr *HighRegInst,
+ MachineInstr *LowRegInst) {
+ assert((HighRegInst->getOpcode() == Hexagon::TFR ||
+ HighRegInst->getOpcode() == Hexagon::TFRI ||
+ HighRegInst->getOpcode() == Hexagon::TFRI_V4) &&
+ (LowRegInst->getOpcode() == Hexagon::TFR ||
+ LowRegInst->getOpcode() == Hexagon::TFRI ||
+ LowRegInst->getOpcode() == Hexagon::TFRI_V4) &&
+ "Assume individual instructions are of a combinable type");
+
+ const HexagonRegisterInfo *QRI =
+ static_cast<const HexagonRegisterInfo *>(TRI);
+
+ // V4 added some combine variations (mixed immediate and register source
+ // operands), if we are on < V4 we can only combine 2 register-to-register
+ // moves and 2 immediate-to-register moves. We also don't have
+ // constant-extenders.
+ if (!QRI->Subtarget.hasV4TOps())
+ return HighRegInst->getOpcode() == LowRegInst->getOpcode() &&
+ !isGreaterThan8BitTFRI(HighRegInst) &&
+ !isGreaterThan6BitTFRI(LowRegInst);
+
+ // There is no combine of two constant extended values.
+ if ((HighRegInst->getOpcode() == Hexagon::TFRI_V4 ||
+ isGreaterThan8BitTFRI(HighRegInst)) &&
+ (LowRegInst->getOpcode() == Hexagon::TFRI_V4 ||
+ isGreaterThan6BitTFRI(LowRegInst)))
+ return false;
+
+ return true;
+}
+
+static bool isEvenReg(unsigned Reg) {
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
+ Hexagon::IntRegsRegClass.contains(Reg));
+ return (Reg - Hexagon::R0) % 2 == 0;
+}
+
+static void removeKillInfo(MachineInstr *MI, unsigned RegNotKilled) {
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ MachineOperand &Op = MI->getOperand(I);
+ if (!Op.isReg() || Op.getReg() != RegNotKilled || !Op.isKill())
+ continue;
+ Op.setIsKill(false);
+ }
+}
+
+/// isUnsafeToMoveAcross - Returns true if it is unsafe to move a copy
+/// instruction from \p UseReg to \p DestReg over the instruction \p I.
+static bool isUnsafeToMoveAcross(MachineInstr *I, unsigned UseReg,
+ unsigned DestReg,
+ const TargetRegisterInfo *TRI) {
+ return (UseReg && (I->modifiesRegister(UseReg, TRI))) ||
+ I->modifiesRegister(DestReg, TRI) ||
+ I->readsRegister(DestReg, TRI) ||
+ I->hasUnmodeledSideEffects() ||
+ I->isInlineAsm() || I->isDebugValue();
+}
+
+/// isSafeToMoveTogether - Returns true if it is safe to move I1 next to I2 such
+/// that the two instructions can be paired in a combine.
+bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
+ MachineInstr *I2,
+ unsigned I1DestReg,
+ unsigned I2DestReg,
+ bool &DoInsertAtI1) {
+
+ bool IsImmUseReg = I2->getOperand(1).isImm() || I2->getOperand(1).isGlobal();
+ unsigned I2UseReg = IsImmUseReg ? 0 : I2->getOperand(1).getReg();
+
+ // It is not safe to move I1 and I2 into one combine if I2 has a true
+ // dependence on I1.
+ if (I2UseReg && I1->modifiesRegister(I2UseReg, TRI))
+ return false;
+
+ bool isSafe = true;
+
+ // First try to move I2 towards I1.
+ {
+ // A reverse_iterator instantiated like below starts before I2, and I1
+ // respectively.
+ // Look at instructions I in between I2 and (excluding) I1.
+ MachineBasicBlock::reverse_iterator I(I2),
+ End = --(MachineBasicBlock::reverse_iterator(I1));
+ // At 03 we got better results (dhrystone!) by being more conservative.
+ if (!ShouldCombineAggressively)
+ End = MachineBasicBlock::reverse_iterator(I1);
+ // If I2 kills its operand and we move I2 over an instruction that also
+ // uses I2's use reg we need to modify that (first) instruction to now kill
+ // this reg.
+ unsigned KilledOperand = 0;
+ if (I2->killsRegister(I2UseReg))
+ KilledOperand = I2UseReg;
+ MachineInstr *KillingInstr = 0;
+
+ for (; I != End; ++I) {
+ // If the intervening instruction I:
+ // * modifies I2's use reg
+ // * modifies I2's def reg
+ // * reads I2's def reg
+ // * or has unmodelled side effects
+ // we can't move I2 across it.
+ if (isUnsafeToMoveAcross(&*I, I2UseReg, I2DestReg, TRI)) {
+ isSafe = false;
+ break;
+ }
+
+ // Update first use of the killed operand.
+ if (!KillingInstr && KilledOperand &&
+ I->readsRegister(KilledOperand, TRI))
+ KillingInstr = &*I;
+ }
+ if (isSafe) {
+ // Update the intermediate instruction to with the kill flag.
+ if (KillingInstr) {
+ bool Added = KillingInstr->addRegisterKilled(KilledOperand, TRI, true);
+ (void)Added; // supress compiler warning
+ assert(Added && "Must successfully update kill flag");
+ removeKillInfo(I2, KilledOperand);
+ }
+ DoInsertAtI1 = true;
+ return true;
+ }
+ }
+
+ // Try to move I1 towards I2.
+ {
+ // Look at instructions I in between I1 and (excluding) I2.
+ MachineBasicBlock::iterator I(I1), End(I2);
+ // At O3 we got better results (dhrystone) by being more conservative here.
+ if (!ShouldCombineAggressively)
+ End = llvm::next(MachineBasicBlock::iterator(I2));
+ IsImmUseReg = I1->getOperand(1).isImm() || I1->getOperand(1).isGlobal();
+ unsigned I1UseReg = IsImmUseReg ? 0 : I1->getOperand(1).getReg();
+ // Track killed operands. If we move across an instruction that kills our
+ // operand, we need to update the kill information on the moved I1. It kills
+ // the operand now.
+ MachineInstr *KillingInstr = 0;
+ unsigned KilledOperand = 0;
+
+ while(++I != End) {
+ // If the intervening instruction I:
+ // * modifies I1's use reg
+ // * modifies I1's def reg
+ // * reads I1's def reg
+ // * or has unmodelled side effects
+ // We introduce this special case because llvm has no api to remove a
+ // kill flag for a register (a removeRegisterKilled() analogous to
+ // addRegisterKilled) that handles aliased register correctly.
+ // * or has a killed aliased register use of I1's use reg
+ // %D4<def> = TFRI64 16
+ // %R6<def> = TFR %R9
+ // %R8<def> = KILL %R8, %D4<imp-use,kill>
+ // If we want to move R6 = across the KILL instruction we would have
+ // to remove the %D4<imp-use,kill> operand. For now, we are
+ // conservative and disallow the move.
+ // we can't move I1 across it.
+ if (isUnsafeToMoveAcross(I, I1UseReg, I1DestReg, TRI) ||
+ // Check for an aliased register kill. Bail out if we see one.
+ (!I->killsRegister(I1UseReg) && I->killsRegister(I1UseReg, TRI)))
+ return false;
+
+ // Check for an exact kill (registers match).
+ if (I1UseReg && I->killsRegister(I1UseReg)) {
+ assert(KillingInstr == 0 && "Should only see one killing instruction");
+ KilledOperand = I1UseReg;
+ KillingInstr = &*I;
+ }
+ }
+ if (KillingInstr) {
+ removeKillInfo(KillingInstr, KilledOperand);
+ // Update I1 to set the kill flag. This flag will later be picked up by
+ // the new COMBINE instruction.
+ bool Added = I1->addRegisterKilled(KilledOperand, TRI);
+ (void)Added; // supress compiler warning
+ assert(Added && "Must successfully update kill flag");
+ }
+ DoInsertAtI1 = false;
+ }
+
+ return true;
+}
+
+/// findPotentialNewifiableTFRs - Finds tranfers that feed stores that could be
+/// newified. (A use of a 64 bit register define can not be newified)
+void
+HexagonCopyToCombine::findPotentialNewifiableTFRs(MachineBasicBlock &BB) {
+ DenseMap<unsigned, MachineInstr *> LastDef;
+ for (MachineBasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
+ MachineInstr *MI = I;
+ // Mark TFRs that feed a potential new value store as such.
+ if(TII->mayBeNewStore(MI)) {
+ // Look for uses of TFR instructions.
+ for (unsigned OpdIdx = 0, OpdE = MI->getNumOperands(); OpdIdx != OpdE;
+ ++OpdIdx) {
+ MachineOperand &Op = MI->getOperand(OpdIdx);
+
+ // Skip over anything except register uses.
+ if (!Op.isReg() || !Op.isUse() || !Op.getReg())
+ continue;
+
+ // Look for the defining instruction.
+ unsigned Reg = Op.getReg();
+ MachineInstr *DefInst = LastDef[Reg];
+ if (!DefInst)
+ continue;
+ if (!isCombinableInstType(DefInst, TII, ShouldCombineAggressively))
+ continue;
+
+ // Only close newifiable stores should influence the decision.
+ MachineBasicBlock::iterator It(DefInst);
+ unsigned NumInstsToDef = 0;
+ while (&*It++ != MI)
+ ++NumInstsToDef;
+
+ if (NumInstsToDef > MaxNumOfInstsBetweenNewValueStoreAndTFR)
+ continue;
+
+ PotentiallyNewifiableTFR.insert(DefInst);
+ }
+ // Skip to next instruction.
+ continue;
+ }
+
+ // Put instructions that last defined integer or double registers into the
+ // map.
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ MachineOperand &Op = MI->getOperand(I);
+ if (!Op.isReg() || !Op.isDef() || !Op.getReg())
+ continue;
+ unsigned Reg = Op.getReg();
+ if (Hexagon::DoubleRegsRegClass.contains(Reg)) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ LastDef[*SubRegs] = MI;
+ }
+ } else if (Hexagon::IntRegsRegClass.contains(Reg))
+ LastDef[Reg] = MI;
+ }
+ }
+}
+
+bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
+
+ if (IsCombinesDisabled) return false;
+
+ bool HasChanged = false;
+
+ // Get target info.
+ TRI = MF.getTarget().getRegisterInfo();
+ TII = static_cast<const HexagonInstrInfo *>(MF.getTarget().getInstrInfo());
+
+ // Combine aggressively (for code size)
+ ShouldCombineAggressively =
+ MF.getTarget().getOptLevel() <= CodeGenOpt::Default;
+
+ // Traverse basic blocks.
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
+ ++BI) {
+ PotentiallyNewifiableTFR.clear();
+ findPotentialNewifiableTFRs(*BI);
+
+ // Traverse instructions in basic block.
+ for(MachineBasicBlock::iterator MI = BI->begin(), End = BI->end();
+ MI != End;) {
+ MachineInstr *I1 = MI++;
+ // Don't combine a TFR whose user could be newified (instructions that
+ // define double registers can not be newified - Programmer's Ref Manual
+ // 5.4.2 New-value stores).
+ if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(I1))
+ continue;
+
+ // Ignore instructions that are not combinable.
+ if (!isCombinableInstType(I1, TII, ShouldCombineAggressively))
+ continue;
+
+ // Find a second instruction that can be merged into a combine
+ // instruction.
+ bool DoInsertAtI1 = false;
+ MachineInstr *I2 = findPairable(I1, DoInsertAtI1);
+ if (I2) {
+ HasChanged = true;
+ combine(I1, I2, MI, DoInsertAtI1);
+ }
+ }
+ }
+
+ return HasChanged;
+}
+
+/// findPairable - Returns an instruction that can be merged with \p I1 into a
+/// COMBINE instruction or 0 if no such instruction can be found. Returns true
+/// in \p DoInsertAtI1 if the combine must be inserted at instruction \p I1
+/// false if the combine must be inserted at the returned instruction.
+MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1,
+ bool &DoInsertAtI1) {
+ MachineBasicBlock::iterator I2 = llvm::next(MachineBasicBlock::iterator(I1));
+ unsigned I1DestReg = I1->getOperand(0).getReg();
+
+ for (MachineBasicBlock::iterator End = I1->getParent()->end(); I2 != End;
+ ++I2) {
+ // Bail out early if we see a second definition of I1DestReg.
+ if (I2->modifiesRegister(I1DestReg, TRI))
+ break;
+
+ // Ignore non-combinable instructions.
+ if (!isCombinableInstType(I2, TII, ShouldCombineAggressively))
+ continue;
+
+ // Don't combine a TFR whose user could be newified.
+ if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(I2))
+ continue;
+
+ unsigned I2DestReg = I2->getOperand(0).getReg();
+
+ // Check that registers are adjacent and that the first destination register
+ // is even.
+ bool IsI1LowReg = (I2DestReg - I1DestReg) == 1;
+ bool IsI2LowReg = (I1DestReg - I2DestReg) == 1;
+ unsigned FirstRegIndex = IsI1LowReg ? I1DestReg : I2DestReg;
+ if ((!IsI1LowReg && !IsI2LowReg) || !isEvenReg(FirstRegIndex))
+ continue;
+
+ // Check that the two instructions are combinable. V4 allows more
+ // instructions to be merged into a combine.
+ // The order matters because in a TFRI we might can encode a int8 as the
+ // hi reg operand but only a uint6 as the low reg operand.
+ if ((IsI2LowReg && !areCombinableOperations(TRI, I1, I2)) ||
+ (IsI1LowReg && !areCombinableOperations(TRI, I2, I1)))
+ break;
+
+ if (isSafeToMoveTogether(I1, I2, I1DestReg, I2DestReg,
+ DoInsertAtI1))
+ return I2;
+
+ // Not safe. Stop searching.
+ break;
+ }
+ return 0;
+}
+
+void HexagonCopyToCombine::combine(MachineInstr *I1, MachineInstr *I2,
+ MachineBasicBlock::iterator &MI,
+ bool DoInsertAtI1) {
+ // We are going to delete I2. If MI points to I2 advance it to the next
+ // instruction.
+ if ((MachineInstr *)MI == I2) ++MI;
+
+ // Figure out whether I1 or I2 goes into the lowreg part.
+ unsigned I1DestReg = I1->getOperand(0).getReg();
+ unsigned I2DestReg = I2->getOperand(0).getReg();
+ bool IsI1Loreg = (I2DestReg - I1DestReg) == 1;
+ unsigned LoRegDef = IsI1Loreg ? I1DestReg : I2DestReg;
+
+ // Get the double word register.
+ unsigned DoubleRegDest =
+ TRI->getMatchingSuperReg(LoRegDef, Hexagon::subreg_loreg,
+ &Hexagon::DoubleRegsRegClass);
+ assert(DoubleRegDest != 0 && "Expect a valid register");
+
+
+ // Setup source operands.
+ MachineOperand &LoOperand = IsI1Loreg ? I1->getOperand(1) :
+ I2->getOperand(1);
+ MachineOperand &HiOperand = IsI1Loreg ? I2->getOperand(1) :
+ I1->getOperand(1);
+
+ // Figure out which source is a register and which a constant.
+ bool IsHiReg = HiOperand.isReg();
+ bool IsLoReg = LoOperand.isReg();
+
+ MachineBasicBlock::iterator InsertPt(DoInsertAtI1 ? I1 : I2);
+ // Emit combine.
+ if (IsHiReg && IsLoReg)
+ emitCombineRR(InsertPt, DoubleRegDest, HiOperand, LoOperand);
+ else if (IsHiReg)
+ emitCombineRI(InsertPt, DoubleRegDest, HiOperand, LoOperand);
+ else if (IsLoReg)
+ emitCombineIR(InsertPt, DoubleRegDest, HiOperand, LoOperand);
+ else
+ emitCombineII(InsertPt, DoubleRegDest, HiOperand, LoOperand);
+
+ I1->eraseFromParent();
+ I2->eraseFromParent();
+}
+
+void HexagonCopyToCombine::emitCombineII(MachineBasicBlock::iterator &InsertPt,
+ unsigned DoubleDestReg,
+ MachineOperand &HiOperand,
+ MachineOperand &LoOperand) {
+ DebugLoc DL = InsertPt->getDebugLoc();
+ MachineBasicBlock *BB = InsertPt->getParent();
+
+ // Handle globals.
+ if (HiOperand.isGlobal()) {
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ii), DoubleDestReg)
+ .addGlobalAddress(HiOperand.getGlobal(), HiOperand.getOffset(),
+ HiOperand.getTargetFlags())
+ .addImm(LoOperand.getImm());
+ return;
+ }
+ if (LoOperand.isGlobal()) {
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_iI_V4), DoubleDestReg)
+ .addImm(HiOperand.getImm())
+ .addGlobalAddress(LoOperand.getGlobal(), LoOperand.getOffset(),
+ LoOperand.getTargetFlags());
+ return;
+ }
+
+ // Handle constant extended immediates.
+ if (!isInt<8>(HiOperand.getImm())) {
+ assert(isInt<8>(LoOperand.getImm()));
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ii), DoubleDestReg)
+ .addImm(HiOperand.getImm())
+ .addImm(LoOperand.getImm());
+ return;
+ }
+
+ if (!isUInt<6>(LoOperand.getImm())) {
+ assert(isInt<8>(HiOperand.getImm()));
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_iI_V4), DoubleDestReg)
+ .addImm(HiOperand.getImm())
+ .addImm(LoOperand.getImm());
+ return;
+ }
+
+ // Insert new combine instruction.
+ // DoubleRegDest = combine #HiImm, #LoImm
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ii), DoubleDestReg)
+ .addImm(HiOperand.getImm())
+ .addImm(LoOperand.getImm());
+}
+
+void HexagonCopyToCombine::emitCombineIR(MachineBasicBlock::iterator &InsertPt,
+ unsigned DoubleDestReg,
+ MachineOperand &HiOperand,
+ MachineOperand &LoOperand) {
+ unsigned LoReg = LoOperand.getReg();
+ unsigned LoRegKillFlag = getKillRegState(LoOperand.isKill());
+
+ DebugLoc DL = InsertPt->getDebugLoc();
+ MachineBasicBlock *BB = InsertPt->getParent();
+
+ // Handle global.
+ if (HiOperand.isGlobal()) {
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ir_V4), DoubleDestReg)
+ .addGlobalAddress(HiOperand.getGlobal(), HiOperand.getOffset(),
+ HiOperand.getTargetFlags())
+ .addReg(LoReg, LoRegKillFlag);
+ return;
+ }
+ // Insert new combine instruction.
+ // DoubleRegDest = combine #HiImm, LoReg
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ir_V4), DoubleDestReg)
+ .addImm(HiOperand.getImm())
+ .addReg(LoReg, LoRegKillFlag);
+}
+
+void HexagonCopyToCombine::emitCombineRI(MachineBasicBlock::iterator &InsertPt,
+ unsigned DoubleDestReg,
+ MachineOperand &HiOperand,
+ MachineOperand &LoOperand) {
+ unsigned HiRegKillFlag = getKillRegState(HiOperand.isKill());
+ unsigned HiReg = HiOperand.getReg();
+
+ DebugLoc DL = InsertPt->getDebugLoc();
+ MachineBasicBlock *BB = InsertPt->getParent();
+
+ // Handle global.
+ if (LoOperand.isGlobal()) {
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_rI_V4), DoubleDestReg)
+ .addReg(HiReg, HiRegKillFlag)
+ .addGlobalAddress(LoOperand.getGlobal(), LoOperand.getOffset(),
+ LoOperand.getTargetFlags());
+ return;
+ }
+
+ // Insert new combine instruction.
+ // DoubleRegDest = combine HiReg, #LoImm
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_rI_V4), DoubleDestReg)
+ .addReg(HiReg, HiRegKillFlag)
+ .addImm(LoOperand.getImm());
+}
+
+void HexagonCopyToCombine::emitCombineRR(MachineBasicBlock::iterator &InsertPt,
+ unsigned DoubleDestReg,
+ MachineOperand &HiOperand,
+ MachineOperand &LoOperand) {
+ unsigned LoRegKillFlag = getKillRegState(LoOperand.isKill());
+ unsigned HiRegKillFlag = getKillRegState(HiOperand.isKill());
+ unsigned LoReg = LoOperand.getReg();
+ unsigned HiReg = HiOperand.getReg();
+
+ DebugLoc DL = InsertPt->getDebugLoc();
+ MachineBasicBlock *BB = InsertPt->getParent();
+
+ // Insert new combine instruction.
+ // DoubleRegDest = combine HiReg, LoReg
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_rr), DoubleDestReg)
+ .addReg(HiReg, HiRegKillFlag)
+ .addReg(LoReg, LoRegKillFlag);
+}
+
+FunctionPass *llvm::createHexagonCopyToCombine() {
+ return new HexagonCopyToCombine();
+}
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index de993ee8751a..2b04f25dd677 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -76,17 +76,12 @@ void HexagonFrameLowering::determineFrameLayout(MachineFunction &MF) const {
void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo &MMI = MF.getMMI();
MachineBasicBlock::iterator MBBI = MBB.begin();
const HexagonRegisterInfo *QRI =
static_cast<const HexagonRegisterInfo *>(MF.getTarget().getRegisterInfo());
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
determineFrameLayout(MF);
- // Check if frame moves are needed for EH.
- bool needsFrameMoves = MMI.hasDebugInfo() ||
- !MF.getFunction()->needsUnwindTableEntry();
-
// Get the number of bytes to allocate from the FrameInfo.
int NumBytes = (int) MFI->getStackSize();
@@ -113,28 +108,6 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
MO.setImm(MFI->getMaxCallFrameSize());
}
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
-
- if (needsFrameMoves) {
- // Advance CFA. DW_CFA_def_cfa
- unsigned FPReg = QRI->getFrameRegister();
- unsigned RAReg = QRI->getRARegister();
-
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(FPReg, -8);
- Moves.push_back(MachineMove(0, Dst, Src));
-
- // R31 = (R31 - #4)
- MachineLocation LRDst(RAReg, -4);
- MachineLocation LRSrc(RAReg);
- Moves.push_back(MachineMove(0, LRDst, LRSrc));
-
- // R30 = (R30 - #8)
- MachineLocation SPDst(FPReg, -8);
- MachineLocation SPSrc(FPReg);
- Moves.push_back(MachineMove(0, SPDst, SPSrc));
- }
-
//
// Only insert ALLOCFRAME if we need to.
//
@@ -174,30 +147,55 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = prior(MBB.end());
DebugLoc dl = MBBI->getDebugLoc();
//
- // Only insert deallocframe if we need to.
+ // Only insert deallocframe if we need to. Also at -O0. See comment
+ // in emitPrologue above.
//
- if (hasFP(MF)) {
+ if (hasFP(MF) || MF.getTarget().getOptLevel() == CodeGenOpt::None) {
MachineBasicBlock::iterator MBBI = prior(MBB.end());
MachineBasicBlock::iterator MBBI_end = MBB.end();
- //
- // For Hexagon, we don't need the frame size.
- //
- MachineFrameInfo *MFI = MF.getFrameInfo();
- int NumBytes = (int) MFI->getStackSize();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
-
+ // Handle EH_RETURN.
+ if (MBBI->getOpcode() == Hexagon::EH_RETURN_JMPR) {
+ assert(MBBI->getOperand(0).isReg() && "Offset should be in register!");
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME));
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::ADD_rr),
+ Hexagon::R29).addReg(Hexagon::R29).addReg(Hexagon::R28);
+ return;
+ }
// Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
// versions.
if (STI.hasV4TOps() && MBBI->getOpcode() == Hexagon::JMPret
&& !DisableDeallocRet) {
- // Remove jumpr node.
- MBB.erase(MBBI);
+ // Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
+ // instruction if we encounter it.
+ MachineBasicBlock::iterator BeforeJMPR =
+ MBB.begin() == MBBI ? MBBI : prior(MBBI);
+ if (BeforeJMPR != MBBI &&
+ BeforeJMPR->getOpcode() == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) {
+ // Remove the JMPR node.
+ MBB.erase(MBBI);
+ return;
+ }
+
// Add dealloc_return.
- BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::DEALLOC_RET_V4))
- .addImm(NumBytes);
- } else { // Add deallocframe for V2 and V3.
- BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME)).addImm(NumBytes);
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::DEALLOC_RET_V4));
+ // Transfer the function live-out registers.
+ MIB->copyImplicitOps(*MBB.getParent(), &*MBBI);
+ // Remove the JUMPR node.
+ MBB.erase(MBBI);
+ } else { // Add deallocframe for V2 and V3, and V4 tail calls.
+ // Check for RESTORE_DEALLOC_BEFORE_TAILCALL_V4. We don't need an extra
+ // DEALLOCFRAME instruction after it.
+ MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
+ MachineBasicBlock::iterator I =
+ Term == MBB.begin() ? MBB.end() : prior(Term);
+ if (I != MBB.end() &&
+ I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
+ return;
+
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME));
}
}
}
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index d00278811a4a..52d5ab2fee27 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -134,7 +134,7 @@ namespace {
/// has a computable trip count and, if so, return a value that represents
/// the trip count expression.
CountValue *getLoopTripCount(MachineLoop *L,
- SmallVector<MachineInstr*, 2> &OldInsts);
+ SmallVectorImpl<MachineInstr *> &OldInsts);
/// \brief Return the expression that represents the number of times
/// a loop iterates. The function takes the operands that represent the
@@ -164,7 +164,7 @@ namespace {
/// \brief Return true if the instruction is now dead.
bool isDead(const MachineInstr *MI,
- SmallVector<MachineInstr*, 1> &DeadPhis) const;
+ SmallVectorImpl<MachineInstr *> &DeadPhis) const;
/// \brief Remove the instruction if it is now dead.
void removeIfDead(MachineInstr *MI);
@@ -428,7 +428,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
/// induction variable patterns that are used in the calculation for
/// the number of time the loop is executed.
CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
- SmallVector<MachineInstr*, 2> &OldInsts) {
+ SmallVectorImpl<MachineInstr *> &OldInsts) {
MachineBasicBlock *TopMBB = L->getTopBlock();
MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
assert(PI != TopMBB->pred_end() &&
@@ -871,7 +871,7 @@ bool HexagonHardwareLoops::isInvalidLoopOperation(
/// \brief - Return true if the loop contains an instruction that inhibits
/// the use of the hardware loop function.
bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L) const {
- const std::vector<MachineBasicBlock*> Blocks = L->getBlocks();
+ const std::vector<MachineBasicBlock *> &Blocks = L->getBlocks();
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *MBB = Blocks[i];
for (MachineBasicBlock::iterator
@@ -890,7 +890,7 @@ bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L) const {
/// for inline asm, physical registers and instructions with side effects
/// removed.
bool HexagonHardwareLoops::isDead(const MachineInstr *MI,
- SmallVector<MachineInstr*, 1> &DeadPhis) const {
+ SmallVectorImpl<MachineInstr *> &DeadPhis) const {
// Examine each operand.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index 54ca2c97ea02..5ae93284269b 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -50,15 +50,13 @@ class HexagonDAGToDAGISel : public SelectionDAGISel {
// Keep a reference to HexagonTargetMachine.
const HexagonTargetMachine& TM;
- const HexagonInstrInfo *TII;
DenseMap<const GlobalValue *, unsigned> GlobalAddressUseCountMap;
public:
- explicit HexagonDAGToDAGISel(const HexagonTargetMachine &targetmachine,
+ explicit HexagonDAGToDAGISel(HexagonTargetMachine &targetmachine,
CodeGenOpt::Level OptLevel)
: SelectionDAGISel(targetmachine, OptLevel),
Subtarget(targetmachine.getSubtarget<HexagonSubtarget>()),
- TM(targetmachine),
- TII(static_cast<const HexagonInstrInfo*>(TM.getInstrInfo())) {
+ TM(targetmachine) {
initializeHexagonDAGToDAGISelPass(*PassRegistry::getPassRegistry());
}
bool hasNumUsesBelowThresGA(SDNode *N) const;
@@ -92,14 +90,14 @@ public:
bool SelectAddr(SDNode *Op, SDValue Addr, SDValue &Base, SDValue &Offset);
SDNode *SelectLoad(SDNode *N);
- SDNode *SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl);
- SDNode *SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl);
+ SDNode *SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl);
+ SDNode *SelectIndexedLoad(LoadSDNode *LD, SDLoc dl);
SDNode *SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode,
- DebugLoc dl);
+ SDLoc dl);
SDNode *SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode,
- DebugLoc dl);
- SDNode *SelectBaseOffsetStore(StoreSDNode *ST, DebugLoc dl);
- SDNode *SelectIndexedStore(StoreSDNode *ST, DebugLoc dl);
+ SDLoc dl);
+ SDNode *SelectBaseOffsetStore(StoreSDNode *ST, SDLoc dl);
+ SDNode *SelectIndexedStore(StoreSDNode *ST, SDLoc dl);
SDNode *SelectStore(SDNode *N);
SDNode *SelectSHL(SDNode *N);
SDNode *SelectSelect(SDNode *N);
@@ -180,7 +178,7 @@ inline SDValue XformUToUM1Imm(unsigned Imm) {
/// createHexagonISelDag - This pass converts a legalized DAG into a
/// Hexagon-specific DAG, ready for instruction scheduling.
///
-FunctionPass *llvm::createHexagonISelDag(const HexagonTargetMachine &TM,
+FunctionPass *llvm::createHexagonISelDag(HexagonTargetMachine &TM,
CodeGenOpt::Level OptLevel) {
return new HexagonDAGToDAGISel(TM, OptLevel);
}
@@ -385,7 +383,7 @@ static bool OffsetFitsS11(EVT MemType, int64_t Offset) {
// lowering for GlobalAddress nodes has already turned it into a
// CONST32.
//
-SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
+SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl) {
SDValue Chain = LD->getChain();
SDNode* Const32 = LD->getBasePtr().getNode();
unsigned Opcode = 0;
@@ -396,7 +394,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
EVT LoadedVT = LD->getMemoryVT();
int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
if (Offset != 0 && OffsetFitsS11(LoadedVT, Offset)) {
- MVT PointerTy = TLI.getPointerTy();
+ MVT PointerTy = getTargetLowering()->getPointerTy();
const GlobalValue* GV =
cast<GlobalAddressSDNode>(Base)->getGlobal();
SDValue TargAddr =
@@ -433,7 +431,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
unsigned Opcode,
- DebugLoc dl)
+ SDLoc dl)
{
SDValue Chain = LD->getChain();
EVT LoadedVT = LD->getMemoryVT();
@@ -444,8 +442,11 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
SDValue N1 = LD->getOperand(1);
SDValue CPTmpN1_0;
SDValue CPTmpN1_1;
+
if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
N1.getNode()->getValueType(0) == MVT::i32) {
+ const HexagonInstrInfo *TII =
+ static_cast<const HexagonInstrInfo*>(TM.getInstrInfo());
if (TII->isValidAutoIncImm(LoadedVT, Val)) {
SDValue TargetConst = CurDAG->getTargetConstant(Val, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32,
@@ -497,7 +498,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
unsigned Opcode,
- DebugLoc dl)
+ SDLoc dl)
{
SDValue Chain = LD->getChain();
EVT LoadedVT = LD->getMemoryVT();
@@ -508,8 +509,11 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
SDValue N1 = LD->getOperand(1);
SDValue CPTmpN1_0;
SDValue CPTmpN1_1;
+
if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
N1.getNode()->getValueType(0) == MVT::i32) {
+ const HexagonInstrInfo *TII =
+ static_cast<const HexagonInstrInfo*>(TM.getInstrInfo());
if (TII->isValidAutoIncImm(LoadedVT, Val)) {
SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
@@ -572,7 +576,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
}
-SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) {
+SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Offset = LD->getOffset();
@@ -586,6 +590,8 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) {
bool zextval = (LD->getExtensionType() == ISD::ZEXTLOAD);
// Figure out the opcode.
+ const HexagonInstrInfo *TII =
+ static_cast<const HexagonInstrInfo*>(TM.getInstrInfo());
if (LoadedVT == MVT::i64) {
if (TII->isValidAutoIncImm(LoadedVT, Val))
Opcode = Hexagon::POST_LDrid;
@@ -667,7 +673,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) {
SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
SDNode *result;
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::MemIndexedMode AM = LD->getAddressingMode();
@@ -682,7 +688,7 @@ SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
}
-SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
+SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
SDValue Chain = ST->getChain();
SDValue Base = ST->getBasePtr();
SDValue Offset = ST->getOffset();
@@ -694,6 +700,8 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
// Offset value must be within representable range
// and must have correct alignment properties.
+ const HexagonInstrInfo *TII =
+ static_cast<const HexagonInstrInfo*>(TM.getInstrInfo());
if (TII->isValidAutoIncImm(StoredVT, Val)) {
SDValue Ops[] = {Base, CurDAG->getTargetConstant(Val, MVT::i32), Value,
Chain};
@@ -751,7 +759,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
- DebugLoc dl) {
+ SDLoc dl) {
SDValue Chain = ST->getChain();
SDNode* Const32 = ST->getBasePtr().getNode();
SDValue Value = ST->getValue();
@@ -769,7 +777,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
EVT StoredVT = ST->getMemoryVT();
int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
if (Offset != 0 && OffsetFitsS11(StoredVT, Offset)) {
- MVT PointerTy = TLI.getPointerTy();
+ MVT PointerTy = getTargetLowering()->getPointerTy();
const GlobalValue* GV =
cast<GlobalAddressSDNode>(Base)->getGlobal();
SDValue TargAddr =
@@ -805,7 +813,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
@@ -818,7 +826,7 @@ SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
}
SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
//
// %conv.i = sext i32 %tmp1 to i64
@@ -902,7 +910,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue N0 = N->getOperand(0);
if (N0.getOpcode() == ISD::SETCC) {
SDValue N00 = N0.getOperand(0);
@@ -969,7 +977,7 @@ SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) {
SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue Shift = N->getOperand(0);
//
@@ -1082,7 +1090,7 @@ SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) {
SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
if (N->getValueType(0) == MVT::i32) {
SDValue Shl_0 = N->getOperand(0);
SDValue Shl_1 = N->getOperand(1);
@@ -1158,7 +1166,7 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
// We want to preserve all the lower 8-bits and, not just 1 LSB bit.
//
SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDNode *IsIntrinsic = N->getOperand(0).getNode();
if ((IsIntrinsic->getOpcode() == ISD::INTRINSIC_WO_CHAIN)) {
unsigned ID =
@@ -1201,7 +1209,7 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
// and lowering to the actual intrinsic.
//
SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
unsigned ID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
unsigned IntrinsicWithPred = doesIntrinsicContainPredicate(ID);
@@ -1209,6 +1217,8 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
// as at least one of the operands.
if (IntrinsicWithPred) {
SmallVector<SDValue, 8> Ops;
+ const HexagonInstrInfo *TII =
+ static_cast<const HexagonInstrInfo*>(TM.getInstrInfo());
const MCInstrDesc &MCID = TII->get(IntrinsicWithPred);
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
@@ -1251,7 +1261,7 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
// Map floating point constant values.
//
SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
APFloat APF = CN->getValueAPF();
if (N->getValueType(0) == MVT::f32) {
@@ -1271,7 +1281,7 @@ SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
// Map predicate true (encoded as -1 in LLVM) to a XOR.
//
SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
if (N->getValueType(0) == MVT::i1) {
SDNode* Result;
int32_t Val = cast<ConstantSDNode>(N)->getSExtValue();
@@ -1310,7 +1320,7 @@ SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
// Map add followed by a asr -> asr +=.
//
SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
if (N->getValueType(0) != MVT::i32) {
return SelectCode(N);
}
@@ -1334,8 +1344,10 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
SDNode *HexagonDAGToDAGISel::Select(SDNode *N) {
- if (N->isMachineOpcode())
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
return NULL; // Already selected.
+ }
switch (N->getOpcode()) {
@@ -1660,7 +1672,7 @@ bool HexagonDAGToDAGISel::foldGlobalAddressImpl(SDValue &N, SDValue &R,
!hasNumUsesBelowThresGA(GA))
return false;
R = CurDAG->getTargetGlobalAddress(GA->getGlobal(),
- Const->getDebugLoc(),
+ SDLoc(Const),
N.getValueType(),
GA->getOffset() +
(uint64_t)Const->getSExtValue());
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 0e5b8dcbe88b..137417966976 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -39,13 +39,24 @@
using namespace llvm;
-const unsigned Hexagon_MAX_RET_SIZE = 64;
-
static cl::opt<bool>
EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
cl::desc("Control jump table emission on Hexagon target"));
-int NumNamedVarArgParams = -1;
+namespace {
+class HexagonCCState : public CCState {
+ int NumNamedVarArgParams;
+
+public:
+ HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
+ const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
+ LLVMContext &C, int NumNamedVarArgParams)
+ : CCState(CC, isVarArg, MF, TM, locs, C),
+ NumNamedVarArgParams(NumNamedVarArgParams) {}
+
+ int getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
+};
+}
// Implement calling convention for Hexagon.
static bool
@@ -82,12 +93,13 @@ static bool
CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ HexagonCCState &HState = static_cast<HexagonCCState &>(State);
// NumNamedVarArgParams can not be zero for a VarArg function.
- assert ( (NumNamedVarArgParams > 0) &&
- "NumNamedVarArgParams is not bigger than zero.");
+ assert((HState.getNumNamedVarArgParams() > 0) &&
+ "NumNamedVarArgParams is not bigger than zero.");
- if ( (int)ValNo < NumNamedVarArgParams ) {
+ if ((int)ValNo < HState.getNumNamedVarArgParams()) {
// Deal with named arguments.
return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
}
@@ -285,7 +297,7 @@ const {
static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- DebugLoc dl) {
+ SDLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
@@ -302,7 +314,7 @@ HexagonTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
@@ -351,7 +363,7 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const
SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<SDValue> &OutVals,
SDValue Callee) const {
@@ -382,10 +394,10 @@ SDValue
HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -394,13 +406,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
-
// Check for varargs.
- NumNamedVarArgParams = -1;
+ int NumNamedVarArgParams = -1;
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
{
const Function* CalleeFn = NULL;
@@ -417,6 +424,12 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
}
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext(),
+ NumNamedVarArgParams);
+
if (NumNamedVarArgParams > 0)
CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
else
@@ -513,7 +526,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!isTailCall)
Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
- getPointerTy(), true));
+ getPointerTy(), true),
+ dl);
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
@@ -588,7 +602,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
@@ -730,7 +744,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
SDValue Chain = Op.getOperand(0);
SDValue Table = Op.getOperand(1);
SDValue Index = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
unsigned JTI = JT->getIndex();
MachineFunction &MF = DAG.getMachineFunction();
@@ -766,7 +780,7 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned SPReg = getStackPointerRegisterToSaveRestore();
@@ -812,7 +826,7 @@ HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
bool isVarArg,
const
SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
@@ -925,7 +939,7 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), Op.getDebugLoc(), Addr,
+ return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
Op.getOperand(1), MachinePointerInfo(SV), false,
false, 0);
}
@@ -937,7 +951,7 @@ HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue CC = Op.getOperand(4);
SDValue TrueVal = Op.getOperand(2);
SDValue FalseVal = Op.getOperand(3);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDNode* OpNode = Op.getNode();
EVT SVT = OpNode->getValueType(0);
@@ -948,8 +962,7 @@ HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue
HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
EVT ValTy = Op.getValueType();
-
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
SDValue Res;
if (CP->isMachineConstantPoolEntry())
@@ -969,7 +982,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setReturnAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
@@ -991,7 +1004,7 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
TRI->getFrameRegister(), VT);
@@ -1004,7 +1017,7 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
SelectionDAG& DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
}
@@ -1014,7 +1027,7 @@ SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
SDValue Result;
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
const HexagonTargetObjectFile &TLOF =
@@ -1030,7 +1043,7 @@ SDValue
HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
}
@@ -1361,7 +1374,6 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
// Increase jump tables cutover to 5, was 4.
setMinimumJumpTableEntries(5);
- setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
setOperationAction(ISD::BR_CC, MVT::f64, Expand);
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
@@ -1429,11 +1441,6 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
-
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
if (TM.getSubtargetImpl()->isSubtargetV2()) {
@@ -1510,12 +1517,25 @@ bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
}
+bool
+HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
+ // Assuming the caller does not have either a signext or zeroext modifier, and
+ // only one value is accepted, any reasonable truncation is allowed.
+ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+ return false;
+
+ // FIXME: in principle up to 64-bit could be made safe, but it would be very
+ // fragile at the moment: any support for multiple value returns would be
+ // liable to disallow tail calls involving i64 -> iN truncation in many cases.
+ return Ty1->getPrimitiveSizeInBits() <= 32;
+}
+
SDValue
HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Mark function as containing a call to EH_RETURN.
HexagonMachineFunctionInfo *FuncInfo =
@@ -1591,11 +1611,11 @@ const {
std::pair<unsigned, const TargetRegisterClass*>
HexagonTargetLowering::getRegForInlineAsmConstraint(const
std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r': // R0-R31
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default:
llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
case MVT::i32:
diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h
index bb1acc19e659..73da226a1727 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/lib/Target/Hexagon/HexagonISelLowering.h
@@ -95,6 +95,8 @@ namespace llvm {
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+ virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
virtual const char *getTargetNodeName(unsigned Opcode) const;
@@ -106,7 +108,7 @@ namespace llvm {
SDValue LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -117,7 +119,7 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<SDValue> &OutVals,
SDValue Callee) const;
@@ -131,7 +133,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
virtual MachineBasicBlock
*EmitInstrWithCustomInserter(MachineInstr *MI,
@@ -139,8 +141,11 @@ namespace llvm {
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
- virtual EVT getSetCCResultType(EVT VT) const {
- return MVT::i1;
+ virtual EVT getSetCCResultType(LLVMContext &C, EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i1;
+ else
+ return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
}
virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
@@ -150,7 +155,7 @@ namespace llvm {
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
// Intrinsics
virtual SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op,
diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td
index 587fa7d7f10e..d25bfa8b0d85 100644
--- a/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -54,6 +54,7 @@ def AbsoluteSet : AddrModeType<2>; // Absolute set addressing mode
def BaseImmOffset : AddrModeType<3>; // Indirect with offset
def BaseLongOffset : AddrModeType<4>; // Indirect with long offset
def BaseRegOffset : AddrModeType<5>; // Indirect with register offset
+def PostInc : AddrModeType<6>; // Post increment addressing mode
class MemAccessSize<bits<3> value> {
bits<3> Value = value;
@@ -62,7 +63,7 @@ class MemAccessSize<bits<3> value> {
def NoMemAccess : MemAccessSize<0>;// Not a memory acces instruction.
def ByteAccess : MemAccessSize<1>;// Byte access instruction (memb).
def HalfWordAccess : MemAccessSize<2>;// Half word access instruction (memh).
-def WordAccess : MemAccessSize<3>;// Word access instrution (memw).
+def WordAccess : MemAccessSize<3>;// Word access instruction (memw).
def DoubleWordAccess : MemAccessSize<4>;// Double word access instruction (memd)
@@ -157,6 +158,7 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
string CextOpcode = "";
string PredSense = "";
string PNewValue = "";
+ string NValueST = ""; // Set to "true" for new-value stores.
string InputType = ""; // Input is "imm" or "reg" type.
string isMEMri = "false"; // Set to "true" for load/store with MEMri operand.
string isFloat = "false"; // Set to "true" for the floating-point load/store.
@@ -165,6 +167,7 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
let PredSense = !if(isPredicated, !if(isPredicatedFalse, "false", "true"),
"");
let PNewValue = !if(isPredicatedNew, "new", "");
+ let NValueST = !if(isNVStore, "true", "false");
// *** Must match MCTargetDesc/HexagonBaseInfo.h ***
}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index f1141708b3c6..6b97609415a3 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -26,7 +26,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRMAP_INFO
#include "HexagonGenInstrInfo.inc"
#include "HexagonGenDFAPacketizer.inc"
@@ -55,10 +55,12 @@ const int Hexagon_MEMH_AUTOINC_MIN = -16;
const int Hexagon_MEMB_AUTOINC_MAX = 7;
const int Hexagon_MEMB_AUTOINC_MIN = -8;
+// Pin the vtable to this file.
+void HexagonInstrInfo::anchor() {}
HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
: HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
- RI(ST, *this), Subtarget(ST) {
+ RI(ST), Subtarget(ST) {
}
@@ -558,16 +560,6 @@ MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return(0);
}
-MachineInstr*
-HexagonInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx, uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const {
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(Hexagon::DBG_VALUE))
- .addImm(0).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
MachineRegisterInfo &RegInfo = MF->getRegInfo();
@@ -630,188 +622,6 @@ bool HexagonInstrInfo::isBranch (const MachineInstr *MI) const {
return MI->getDesc().isBranch();
}
-bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
- default: return false;
- // Store Byte
- case Hexagon::STrib_nv_V4:
- case Hexagon::STrib_indexed_nv_V4:
- case Hexagon::STrib_indexed_shl_nv_V4:
- case Hexagon::STrib_shl_nv_V4:
- case Hexagon::STb_GP_nv_V4:
- case Hexagon::POST_STbri_nv_V4:
- case Hexagon::STrib_cPt_nv_V4:
- case Hexagon::STrib_cdnPt_nv_V4:
- case Hexagon::STrib_cNotPt_nv_V4:
- case Hexagon::STrib_cdnNotPt_nv_V4:
- case Hexagon::STrib_indexed_cPt_nv_V4:
- case Hexagon::STrib_indexed_cdnPt_nv_V4:
- case Hexagon::STrib_indexed_cNotPt_nv_V4:
- case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::POST_STbri_cPt_nv_V4:
- case Hexagon::POST_STbri_cdnPt_nv_V4:
- case Hexagon::POST_STbri_cNotPt_nv_V4:
- case Hexagon::POST_STbri_cdnNotPt_nv_V4:
- case Hexagon::STb_GP_cPt_nv_V4:
- case Hexagon::STb_GP_cNotPt_nv_V4:
- case Hexagon::STb_GP_cdnPt_nv_V4:
- case Hexagon::STb_GP_cdnNotPt_nv_V4:
- case Hexagon::STrib_abs_nv_V4:
- case Hexagon::STrib_abs_cPt_nv_V4:
- case Hexagon::STrib_abs_cdnPt_nv_V4:
- case Hexagon::STrib_abs_cNotPt_nv_V4:
- case Hexagon::STrib_abs_cdnNotPt_nv_V4:
-
- // Store Halfword
- case Hexagon::STrih_nv_V4:
- case Hexagon::STrih_indexed_nv_V4:
- case Hexagon::STrih_indexed_shl_nv_V4:
- case Hexagon::STrih_shl_nv_V4:
- case Hexagon::STh_GP_nv_V4:
- case Hexagon::POST_SThri_nv_V4:
- case Hexagon::STrih_cPt_nv_V4:
- case Hexagon::STrih_cdnPt_nv_V4:
- case Hexagon::STrih_cNotPt_nv_V4:
- case Hexagon::STrih_cdnNotPt_nv_V4:
- case Hexagon::STrih_indexed_cPt_nv_V4:
- case Hexagon::STrih_indexed_cdnPt_nv_V4:
- case Hexagon::STrih_indexed_cNotPt_nv_V4:
- case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::POST_SThri_cPt_nv_V4:
- case Hexagon::POST_SThri_cdnPt_nv_V4:
- case Hexagon::POST_SThri_cNotPt_nv_V4:
- case Hexagon::POST_SThri_cdnNotPt_nv_V4:
- case Hexagon::STh_GP_cPt_nv_V4:
- case Hexagon::STh_GP_cNotPt_nv_V4:
- case Hexagon::STh_GP_cdnPt_nv_V4:
- case Hexagon::STh_GP_cdnNotPt_nv_V4:
- case Hexagon::STrih_abs_nv_V4:
- case Hexagon::STrih_abs_cPt_nv_V4:
- case Hexagon::STrih_abs_cdnPt_nv_V4:
- case Hexagon::STrih_abs_cNotPt_nv_V4:
- case Hexagon::STrih_abs_cdnNotPt_nv_V4:
-
- // Store Word
- case Hexagon::STriw_nv_V4:
- case Hexagon::STriw_indexed_nv_V4:
- case Hexagon::STriw_indexed_shl_nv_V4:
- case Hexagon::STriw_shl_nv_V4:
- case Hexagon::STw_GP_nv_V4:
- case Hexagon::POST_STwri_nv_V4:
- case Hexagon::STriw_cPt_nv_V4:
- case Hexagon::STriw_cdnPt_nv_V4:
- case Hexagon::STriw_cNotPt_nv_V4:
- case Hexagon::STriw_cdnNotPt_nv_V4:
- case Hexagon::STriw_indexed_cPt_nv_V4:
- case Hexagon::STriw_indexed_cdnPt_nv_V4:
- case Hexagon::STriw_indexed_cNotPt_nv_V4:
- case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::POST_STwri_cPt_nv_V4:
- case Hexagon::POST_STwri_cdnPt_nv_V4:
- case Hexagon::POST_STwri_cNotPt_nv_V4:
- case Hexagon::POST_STwri_cdnNotPt_nv_V4:
- case Hexagon::STw_GP_cPt_nv_V4:
- case Hexagon::STw_GP_cNotPt_nv_V4:
- case Hexagon::STw_GP_cdnPt_nv_V4:
- case Hexagon::STw_GP_cdnNotPt_nv_V4:
- case Hexagon::STriw_abs_nv_V4:
- case Hexagon::STriw_abs_cPt_nv_V4:
- case Hexagon::STriw_abs_cdnPt_nv_V4:
- case Hexagon::STriw_abs_cNotPt_nv_V4:
- case Hexagon::STriw_abs_cdnNotPt_nv_V4:
- return true;
- }
-}
-
-bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
- switch (MI->getOpcode())
- {
- default: return false;
- // Load Byte
- case Hexagon::POST_LDrib:
- case Hexagon::POST_LDrib_cPt:
- case Hexagon::POST_LDrib_cNotPt:
- case Hexagon::POST_LDrib_cdnPt_V4:
- case Hexagon::POST_LDrib_cdnNotPt_V4:
-
- // Load unsigned byte
- case Hexagon::POST_LDriub:
- case Hexagon::POST_LDriub_cPt:
- case Hexagon::POST_LDriub_cNotPt:
- case Hexagon::POST_LDriub_cdnPt_V4:
- case Hexagon::POST_LDriub_cdnNotPt_V4:
-
- // Load halfword
- case Hexagon::POST_LDrih:
- case Hexagon::POST_LDrih_cPt:
- case Hexagon::POST_LDrih_cNotPt:
- case Hexagon::POST_LDrih_cdnPt_V4:
- case Hexagon::POST_LDrih_cdnNotPt_V4:
-
- // Load unsigned halfword
- case Hexagon::POST_LDriuh:
- case Hexagon::POST_LDriuh_cPt:
- case Hexagon::POST_LDriuh_cNotPt:
- case Hexagon::POST_LDriuh_cdnPt_V4:
- case Hexagon::POST_LDriuh_cdnNotPt_V4:
-
- // Load word
- case Hexagon::POST_LDriw:
- case Hexagon::POST_LDriw_cPt:
- case Hexagon::POST_LDriw_cNotPt:
- case Hexagon::POST_LDriw_cdnPt_V4:
- case Hexagon::POST_LDriw_cdnNotPt_V4:
-
- // Load double word
- case Hexagon::POST_LDrid:
- case Hexagon::POST_LDrid_cPt:
- case Hexagon::POST_LDrid_cNotPt:
- case Hexagon::POST_LDrid_cdnPt_V4:
- case Hexagon::POST_LDrid_cdnNotPt_V4:
-
- // Store byte
- case Hexagon::POST_STbri:
- case Hexagon::POST_STbri_cPt:
- case Hexagon::POST_STbri_cNotPt:
- case Hexagon::POST_STbri_cdnPt_V4:
- case Hexagon::POST_STbri_cdnNotPt_V4:
-
- // Store halfword
- case Hexagon::POST_SThri:
- case Hexagon::POST_SThri_cPt:
- case Hexagon::POST_SThri_cNotPt:
- case Hexagon::POST_SThri_cdnPt_V4:
- case Hexagon::POST_SThri_cdnNotPt_V4:
-
- // Store word
- case Hexagon::POST_STwri:
- case Hexagon::POST_STwri_cPt:
- case Hexagon::POST_STwri_cNotPt:
- case Hexagon::POST_STwri_cdnPt_V4:
- case Hexagon::POST_STwri_cdnNotPt_V4:
-
- // Store double word
- case Hexagon::POST_STdri:
- case Hexagon::POST_STdri_cPt:
- case Hexagon::POST_STdri_cNotPt:
- case Hexagon::POST_STdri_cdnPt_V4:
- case Hexagon::POST_STdri_cdnNotPt_V4:
- return true;
- }
-}
-
bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
if (isNewValueJump(MI))
return true;
@@ -917,19 +727,6 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
// cPt ---> cNotPt
// cNotPt ---> cPt
//
-// however, these inversiones are NOT included:
-//
-// cdnPt -X-> cdnNotPt
-// cdnNotPt -X-> cdnPt
-// cPt_nv -X-> cNotPt_nv (new value stores)
-// cNotPt_nv -X-> cPt_nv (new value stores)
-//
-// because only the following transformations are allowed:
-//
-// cNotPt ---> cdnNotPt
-// cPt ---> cdnPt
-// cNotPt ---> cNotPt_nv
-// cPt ---> cPt_nv
unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
int InvPredOpcode;
InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
@@ -939,332 +736,12 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
switch(Opc) {
default: llvm_unreachable("Unexpected predicated instruction");
- case Hexagon::TFR_cPt:
- return Hexagon::TFR_cNotPt;
- case Hexagon::TFR_cNotPt:
- return Hexagon::TFR_cPt;
-
- case Hexagon::TFRI_cPt:
- return Hexagon::TFRI_cNotPt;
- case Hexagon::TFRI_cNotPt:
- return Hexagon::TFRI_cPt;
-
- case Hexagon::JMP_t:
- return Hexagon::JMP_f;
- case Hexagon::JMP_f:
- return Hexagon::JMP_t;
-
- case Hexagon::ADD_ri_cPt:
- return Hexagon::ADD_ri_cNotPt;
- case Hexagon::ADD_ri_cNotPt:
- return Hexagon::ADD_ri_cPt;
-
- case Hexagon::ADD_rr_cPt:
- return Hexagon::ADD_rr_cNotPt;
- case Hexagon::ADD_rr_cNotPt:
- return Hexagon::ADD_rr_cPt;
-
- case Hexagon::XOR_rr_cPt:
- return Hexagon::XOR_rr_cNotPt;
- case Hexagon::XOR_rr_cNotPt:
- return Hexagon::XOR_rr_cPt;
-
- case Hexagon::AND_rr_cPt:
- return Hexagon::AND_rr_cNotPt;
- case Hexagon::AND_rr_cNotPt:
- return Hexagon::AND_rr_cPt;
-
- case Hexagon::OR_rr_cPt:
- return Hexagon::OR_rr_cNotPt;
- case Hexagon::OR_rr_cNotPt:
- return Hexagon::OR_rr_cPt;
-
- case Hexagon::SUB_rr_cPt:
- return Hexagon::SUB_rr_cNotPt;
- case Hexagon::SUB_rr_cNotPt:
- return Hexagon::SUB_rr_cPt;
-
case Hexagon::COMBINE_rr_cPt:
return Hexagon::COMBINE_rr_cNotPt;
case Hexagon::COMBINE_rr_cNotPt:
return Hexagon::COMBINE_rr_cPt;
- case Hexagon::ASLH_cPt_V4:
- return Hexagon::ASLH_cNotPt_V4;
- case Hexagon::ASLH_cNotPt_V4:
- return Hexagon::ASLH_cPt_V4;
-
- case Hexagon::ASRH_cPt_V4:
- return Hexagon::ASRH_cNotPt_V4;
- case Hexagon::ASRH_cNotPt_V4:
- return Hexagon::ASRH_cPt_V4;
-
- case Hexagon::SXTB_cPt_V4:
- return Hexagon::SXTB_cNotPt_V4;
- case Hexagon::SXTB_cNotPt_V4:
- return Hexagon::SXTB_cPt_V4;
-
- case Hexagon::SXTH_cPt_V4:
- return Hexagon::SXTH_cNotPt_V4;
- case Hexagon::SXTH_cNotPt_V4:
- return Hexagon::SXTH_cPt_V4;
-
- case Hexagon::ZXTB_cPt_V4:
- return Hexagon::ZXTB_cNotPt_V4;
- case Hexagon::ZXTB_cNotPt_V4:
- return Hexagon::ZXTB_cPt_V4;
-
- case Hexagon::ZXTH_cPt_V4:
- return Hexagon::ZXTH_cNotPt_V4;
- case Hexagon::ZXTH_cNotPt_V4:
- return Hexagon::ZXTH_cPt_V4;
-
-
- case Hexagon::JMPR_t:
- return Hexagon::JMPR_f;
- case Hexagon::JMPR_f:
- return Hexagon::JMPR_t;
-
- // V4 indexed+scaled load.
- case Hexagon::LDrid_indexed_shl_cPt_V4:
- return Hexagon::LDrid_indexed_shl_cNotPt_V4;
- case Hexagon::LDrid_indexed_shl_cNotPt_V4:
- return Hexagon::LDrid_indexed_shl_cPt_V4;
-
- case Hexagon::LDrib_indexed_shl_cPt_V4:
- return Hexagon::LDrib_indexed_shl_cNotPt_V4;
- case Hexagon::LDrib_indexed_shl_cNotPt_V4:
- return Hexagon::LDrib_indexed_shl_cPt_V4;
-
- case Hexagon::LDriub_indexed_shl_cPt_V4:
- return Hexagon::LDriub_indexed_shl_cNotPt_V4;
- case Hexagon::LDriub_indexed_shl_cNotPt_V4:
- return Hexagon::LDriub_indexed_shl_cPt_V4;
-
- case Hexagon::LDrih_indexed_shl_cPt_V4:
- return Hexagon::LDrih_indexed_shl_cNotPt_V4;
- case Hexagon::LDrih_indexed_shl_cNotPt_V4:
- return Hexagon::LDrih_indexed_shl_cPt_V4;
-
- case Hexagon::LDriuh_indexed_shl_cPt_V4:
- return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
- case Hexagon::LDriuh_indexed_shl_cNotPt_V4:
- return Hexagon::LDriuh_indexed_shl_cPt_V4;
-
- case Hexagon::LDriw_indexed_shl_cPt_V4:
- return Hexagon::LDriw_indexed_shl_cNotPt_V4;
- case Hexagon::LDriw_indexed_shl_cNotPt_V4:
- return Hexagon::LDriw_indexed_shl_cPt_V4;
-
- // Byte.
- case Hexagon::POST_STbri_cPt:
- return Hexagon::POST_STbri_cNotPt;
- case Hexagon::POST_STbri_cNotPt:
- return Hexagon::POST_STbri_cPt;
-
- case Hexagon::STrib_cPt:
- return Hexagon::STrib_cNotPt;
- case Hexagon::STrib_cNotPt:
- return Hexagon::STrib_cPt;
-
- case Hexagon::STrib_indexed_cPt:
- return Hexagon::STrib_indexed_cNotPt;
- case Hexagon::STrib_indexed_cNotPt:
- return Hexagon::STrib_indexed_cPt;
-
- case Hexagon::STrib_imm_cPt_V4:
- return Hexagon::STrib_imm_cNotPt_V4;
- case Hexagon::STrib_imm_cNotPt_V4:
- return Hexagon::STrib_imm_cPt_V4;
-
- case Hexagon::STrib_indexed_shl_cPt_V4:
- return Hexagon::STrib_indexed_shl_cNotPt_V4;
- case Hexagon::STrib_indexed_shl_cNotPt_V4:
- return Hexagon::STrib_indexed_shl_cPt_V4;
-
- // Halfword.
- case Hexagon::POST_SThri_cPt:
- return Hexagon::POST_SThri_cNotPt;
- case Hexagon::POST_SThri_cNotPt:
- return Hexagon::POST_SThri_cPt;
-
- case Hexagon::STrih_cPt:
- return Hexagon::STrih_cNotPt;
- case Hexagon::STrih_cNotPt:
- return Hexagon::STrih_cPt;
-
- case Hexagon::STrih_indexed_cPt:
- return Hexagon::STrih_indexed_cNotPt;
- case Hexagon::STrih_indexed_cNotPt:
- return Hexagon::STrih_indexed_cPt;
-
- case Hexagon::STrih_imm_cPt_V4:
- return Hexagon::STrih_imm_cNotPt_V4;
- case Hexagon::STrih_imm_cNotPt_V4:
- return Hexagon::STrih_imm_cPt_V4;
-
- case Hexagon::STrih_indexed_shl_cPt_V4:
- return Hexagon::STrih_indexed_shl_cNotPt_V4;
- case Hexagon::STrih_indexed_shl_cNotPt_V4:
- return Hexagon::STrih_indexed_shl_cPt_V4;
-
- // Word.
- case Hexagon::POST_STwri_cPt:
- return Hexagon::POST_STwri_cNotPt;
- case Hexagon::POST_STwri_cNotPt:
- return Hexagon::POST_STwri_cPt;
-
- case Hexagon::STriw_cPt:
- return Hexagon::STriw_cNotPt;
- case Hexagon::STriw_cNotPt:
- return Hexagon::STriw_cPt;
-
- case Hexagon::STriw_indexed_cPt:
- return Hexagon::STriw_indexed_cNotPt;
- case Hexagon::STriw_indexed_cNotPt:
- return Hexagon::STriw_indexed_cPt;
-
- case Hexagon::STriw_indexed_shl_cPt_V4:
- return Hexagon::STriw_indexed_shl_cNotPt_V4;
- case Hexagon::STriw_indexed_shl_cNotPt_V4:
- return Hexagon::STriw_indexed_shl_cPt_V4;
-
- case Hexagon::STriw_imm_cPt_V4:
- return Hexagon::STriw_imm_cNotPt_V4;
- case Hexagon::STriw_imm_cNotPt_V4:
- return Hexagon::STriw_imm_cPt_V4;
-
- // Double word.
- case Hexagon::POST_STdri_cPt:
- return Hexagon::POST_STdri_cNotPt;
- case Hexagon::POST_STdri_cNotPt:
- return Hexagon::POST_STdri_cPt;
-
- case Hexagon::STrid_cPt:
- return Hexagon::STrid_cNotPt;
- case Hexagon::STrid_cNotPt:
- return Hexagon::STrid_cPt;
-
- case Hexagon::STrid_indexed_cPt:
- return Hexagon::STrid_indexed_cNotPt;
- case Hexagon::STrid_indexed_cNotPt:
- return Hexagon::STrid_indexed_cPt;
-
- case Hexagon::STrid_indexed_shl_cPt_V4:
- return Hexagon::STrid_indexed_shl_cNotPt_V4;
- case Hexagon::STrid_indexed_shl_cNotPt_V4:
- return Hexagon::STrid_indexed_shl_cPt_V4;
-
- // V4 Store to global address.
- case Hexagon::STd_GP_cPt_V4:
- return Hexagon::STd_GP_cNotPt_V4;
- case Hexagon::STd_GP_cNotPt_V4:
- return Hexagon::STd_GP_cPt_V4;
-
- case Hexagon::STb_GP_cPt_V4:
- return Hexagon::STb_GP_cNotPt_V4;
- case Hexagon::STb_GP_cNotPt_V4:
- return Hexagon::STb_GP_cPt_V4;
-
- case Hexagon::STh_GP_cPt_V4:
- return Hexagon::STh_GP_cNotPt_V4;
- case Hexagon::STh_GP_cNotPt_V4:
- return Hexagon::STh_GP_cPt_V4;
-
- case Hexagon::STw_GP_cPt_V4:
- return Hexagon::STw_GP_cNotPt_V4;
- case Hexagon::STw_GP_cNotPt_V4:
- return Hexagon::STw_GP_cPt_V4;
-
- // Load.
- case Hexagon::LDrid_cPt:
- return Hexagon::LDrid_cNotPt;
- case Hexagon::LDrid_cNotPt:
- return Hexagon::LDrid_cPt;
-
- case Hexagon::LDriw_cPt:
- return Hexagon::LDriw_cNotPt;
- case Hexagon::LDriw_cNotPt:
- return Hexagon::LDriw_cPt;
-
- case Hexagon::LDrih_cPt:
- return Hexagon::LDrih_cNotPt;
- case Hexagon::LDrih_cNotPt:
- return Hexagon::LDrih_cPt;
-
- case Hexagon::LDriuh_cPt:
- return Hexagon::LDriuh_cNotPt;
- case Hexagon::LDriuh_cNotPt:
- return Hexagon::LDriuh_cPt;
-
- case Hexagon::LDrib_cPt:
- return Hexagon::LDrib_cNotPt;
- case Hexagon::LDrib_cNotPt:
- return Hexagon::LDrib_cPt;
-
- case Hexagon::LDriub_cPt:
- return Hexagon::LDriub_cNotPt;
- case Hexagon::LDriub_cNotPt:
- return Hexagon::LDriub_cPt;
-
- // Load Indexed.
- case Hexagon::LDrid_indexed_cPt:
- return Hexagon::LDrid_indexed_cNotPt;
- case Hexagon::LDrid_indexed_cNotPt:
- return Hexagon::LDrid_indexed_cPt;
-
- case Hexagon::LDriw_indexed_cPt:
- return Hexagon::LDriw_indexed_cNotPt;
- case Hexagon::LDriw_indexed_cNotPt:
- return Hexagon::LDriw_indexed_cPt;
-
- case Hexagon::LDrih_indexed_cPt:
- return Hexagon::LDrih_indexed_cNotPt;
- case Hexagon::LDrih_indexed_cNotPt:
- return Hexagon::LDrih_indexed_cPt;
-
- case Hexagon::LDriuh_indexed_cPt:
- return Hexagon::LDriuh_indexed_cNotPt;
- case Hexagon::LDriuh_indexed_cNotPt:
- return Hexagon::LDriuh_indexed_cPt;
-
- case Hexagon::LDrib_indexed_cPt:
- return Hexagon::LDrib_indexed_cNotPt;
- case Hexagon::LDrib_indexed_cNotPt:
- return Hexagon::LDrib_indexed_cPt;
-
- case Hexagon::LDriub_indexed_cPt:
- return Hexagon::LDriub_indexed_cNotPt;
- case Hexagon::LDriub_indexed_cNotPt:
- return Hexagon::LDriub_indexed_cPt;
-
- // Post Inc Load.
- case Hexagon::POST_LDrid_cPt:
- return Hexagon::POST_LDrid_cNotPt;
- case Hexagon::POST_LDriw_cNotPt:
- return Hexagon::POST_LDriw_cPt;
-
- case Hexagon::POST_LDrih_cPt:
- return Hexagon::POST_LDrih_cNotPt;
- case Hexagon::POST_LDrih_cNotPt:
- return Hexagon::POST_LDrih_cPt;
-
- case Hexagon::POST_LDriuh_cPt:
- return Hexagon::POST_LDriuh_cNotPt;
- case Hexagon::POST_LDriuh_cNotPt:
- return Hexagon::POST_LDriuh_cPt;
-
- case Hexagon::POST_LDrib_cPt:
- return Hexagon::POST_LDrib_cNotPt;
- case Hexagon::POST_LDrib_cNotPt:
- return Hexagon::POST_LDrib_cPt;
-
- case Hexagon::POST_LDriub_cPt:
- return Hexagon::POST_LDriub_cNotPt;
- case Hexagon::POST_LDriub_cNotPt:
- return Hexagon::POST_LDriub_cPt;
-
- // Dealloc_return.
+ // Dealloc_return.
case Hexagon::DEALLOC_RET_cPt_V4:
return Hexagon::DEALLOC_RET_cNotPt_V4;
case Hexagon::DEALLOC_RET_cNotPt_V4:
@@ -1272,6 +749,18 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
}
}
+// New Value Store instructions.
+bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
+ const uint64_t F = MI->getDesc().TSFlags;
+
+ return ((F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask);
+}
+
+bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
+ const uint64_t F = get(Opcode).TSFlags;
+
+ return ((F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask);
+}
int HexagonInstrInfo::
getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
@@ -1285,218 +774,21 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
// This switch case will be removed once all the instructions have been
// modified to use relation maps.
switch(Opc) {
- case Hexagon::TFR:
- return !invertPredicate ? Hexagon::TFR_cPt :
- Hexagon::TFR_cNotPt;
case Hexagon::TFRI_f:
return !invertPredicate ? Hexagon::TFRI_cPt_f :
Hexagon::TFRI_cNotPt_f;
- case Hexagon::TFRI:
- return !invertPredicate ? Hexagon::TFRI_cPt :
- Hexagon::TFRI_cNotPt;
- case Hexagon::JMP:
- return !invertPredicate ? Hexagon::JMP_t :
- Hexagon::JMP_f;
-
case Hexagon::COMBINE_rr:
return !invertPredicate ? Hexagon::COMBINE_rr_cPt :
Hexagon::COMBINE_rr_cNotPt;
- case Hexagon::ASLH:
- return !invertPredicate ? Hexagon::ASLH_cPt_V4 :
- Hexagon::ASLH_cNotPt_V4;
- case Hexagon::ASRH:
- return !invertPredicate ? Hexagon::ASRH_cPt_V4 :
- Hexagon::ASRH_cNotPt_V4;
- case Hexagon::SXTB:
- return !invertPredicate ? Hexagon::SXTB_cPt_V4 :
- Hexagon::SXTB_cNotPt_V4;
- case Hexagon::SXTH:
- return !invertPredicate ? Hexagon::SXTH_cPt_V4 :
- Hexagon::SXTH_cNotPt_V4;
- case Hexagon::ZXTB:
- return !invertPredicate ? Hexagon::ZXTB_cPt_V4 :
- Hexagon::ZXTB_cNotPt_V4;
- case Hexagon::ZXTH:
- return !invertPredicate ? Hexagon::ZXTH_cPt_V4 :
- Hexagon::ZXTH_cNotPt_V4;
-
- case Hexagon::JMPR:
- return !invertPredicate ? Hexagon::JMPR_t :
- Hexagon::JMPR_f;
-
- // V4 indexed+scaled load.
- case Hexagon::LDrid_indexed_shl_V4:
- return !invertPredicate ? Hexagon::LDrid_indexed_shl_cPt_V4 :
- Hexagon::LDrid_indexed_shl_cNotPt_V4;
- case Hexagon::LDrib_indexed_shl_V4:
- return !invertPredicate ? Hexagon::LDrib_indexed_shl_cPt_V4 :
- Hexagon::LDrib_indexed_shl_cNotPt_V4;
- case Hexagon::LDriub_indexed_shl_V4:
- return !invertPredicate ? Hexagon::LDriub_indexed_shl_cPt_V4 :
- Hexagon::LDriub_indexed_shl_cNotPt_V4;
- case Hexagon::LDrih_indexed_shl_V4:
- return !invertPredicate ? Hexagon::LDrih_indexed_shl_cPt_V4 :
- Hexagon::LDrih_indexed_shl_cNotPt_V4;
- case Hexagon::LDriuh_indexed_shl_V4:
- return !invertPredicate ? Hexagon::LDriuh_indexed_shl_cPt_V4 :
- Hexagon::LDriuh_indexed_shl_cNotPt_V4;
- case Hexagon::LDriw_indexed_shl_V4:
- return !invertPredicate ? Hexagon::LDriw_indexed_shl_cPt_V4 :
- Hexagon::LDriw_indexed_shl_cNotPt_V4;
-
- // V4 Load from global address
- case Hexagon::LDd_GP_V4:
- return !invertPredicate ? Hexagon::LDd_GP_cPt_V4 :
- Hexagon::LDd_GP_cNotPt_V4;
- case Hexagon::LDb_GP_V4:
- return !invertPredicate ? Hexagon::LDb_GP_cPt_V4 :
- Hexagon::LDb_GP_cNotPt_V4;
- case Hexagon::LDub_GP_V4:
- return !invertPredicate ? Hexagon::LDub_GP_cPt_V4 :
- Hexagon::LDub_GP_cNotPt_V4;
- case Hexagon::LDh_GP_V4:
- return !invertPredicate ? Hexagon::LDh_GP_cPt_V4 :
- Hexagon::LDh_GP_cNotPt_V4;
- case Hexagon::LDuh_GP_V4:
- return !invertPredicate ? Hexagon::LDuh_GP_cPt_V4 :
- Hexagon::LDuh_GP_cNotPt_V4;
- case Hexagon::LDw_GP_V4:
- return !invertPredicate ? Hexagon::LDw_GP_cPt_V4 :
- Hexagon::LDw_GP_cNotPt_V4;
-
- // Byte.
- case Hexagon::POST_STbri:
- return !invertPredicate ? Hexagon::POST_STbri_cPt :
- Hexagon::POST_STbri_cNotPt;
- case Hexagon::STrib:
- return !invertPredicate ? Hexagon::STrib_cPt :
- Hexagon::STrib_cNotPt;
- case Hexagon::STrib_indexed:
- return !invertPredicate ? Hexagon::STrib_indexed_cPt :
- Hexagon::STrib_indexed_cNotPt;
- case Hexagon::STrib_imm_V4:
- return !invertPredicate ? Hexagon::STrib_imm_cPt_V4 :
- Hexagon::STrib_imm_cNotPt_V4;
- case Hexagon::STrib_indexed_shl_V4:
- return !invertPredicate ? Hexagon::STrib_indexed_shl_cPt_V4 :
- Hexagon::STrib_indexed_shl_cNotPt_V4;
- // Halfword.
- case Hexagon::POST_SThri:
- return !invertPredicate ? Hexagon::POST_SThri_cPt :
- Hexagon::POST_SThri_cNotPt;
- case Hexagon::STrih:
- return !invertPredicate ? Hexagon::STrih_cPt :
- Hexagon::STrih_cNotPt;
- case Hexagon::STrih_indexed:
- return !invertPredicate ? Hexagon::STrih_indexed_cPt :
- Hexagon::STrih_indexed_cNotPt;
- case Hexagon::STrih_imm_V4:
- return !invertPredicate ? Hexagon::STrih_imm_cPt_V4 :
- Hexagon::STrih_imm_cNotPt_V4;
- case Hexagon::STrih_indexed_shl_V4:
- return !invertPredicate ? Hexagon::STrih_indexed_shl_cPt_V4 :
- Hexagon::STrih_indexed_shl_cNotPt_V4;
+
// Word.
- case Hexagon::POST_STwri:
- return !invertPredicate ? Hexagon::POST_STwri_cPt :
- Hexagon::POST_STwri_cNotPt;
- case Hexagon::STriw:
+ case Hexagon::STriw_f:
return !invertPredicate ? Hexagon::STriw_cPt :
Hexagon::STriw_cNotPt;
- case Hexagon::STriw_indexed:
+ case Hexagon::STriw_indexed_f:
return !invertPredicate ? Hexagon::STriw_indexed_cPt :
Hexagon::STriw_indexed_cNotPt;
- case Hexagon::STriw_indexed_shl_V4:
- return !invertPredicate ? Hexagon::STriw_indexed_shl_cPt_V4 :
- Hexagon::STriw_indexed_shl_cNotPt_V4;
- case Hexagon::STriw_imm_V4:
- return !invertPredicate ? Hexagon::STriw_imm_cPt_V4 :
- Hexagon::STriw_imm_cNotPt_V4;
- // Double word.
- case Hexagon::POST_STdri:
- return !invertPredicate ? Hexagon::POST_STdri_cPt :
- Hexagon::POST_STdri_cNotPt;
- case Hexagon::STrid:
- return !invertPredicate ? Hexagon::STrid_cPt :
- Hexagon::STrid_cNotPt;
- case Hexagon::STrid_indexed:
- return !invertPredicate ? Hexagon::STrid_indexed_cPt :
- Hexagon::STrid_indexed_cNotPt;
- case Hexagon::STrid_indexed_shl_V4:
- return !invertPredicate ? Hexagon::STrid_indexed_shl_cPt_V4 :
- Hexagon::STrid_indexed_shl_cNotPt_V4;
-
- // V4 Store to global address
- case Hexagon::STd_GP_V4:
- return !invertPredicate ? Hexagon::STd_GP_cPt_V4 :
- Hexagon::STd_GP_cNotPt_V4;
- case Hexagon::STb_GP_V4:
- return !invertPredicate ? Hexagon::STb_GP_cPt_V4 :
- Hexagon::STb_GP_cNotPt_V4;
- case Hexagon::STh_GP_V4:
- return !invertPredicate ? Hexagon::STh_GP_cPt_V4 :
- Hexagon::STh_GP_cNotPt_V4;
- case Hexagon::STw_GP_V4:
- return !invertPredicate ? Hexagon::STw_GP_cPt_V4 :
- Hexagon::STw_GP_cNotPt_V4;
-
- // Load.
- case Hexagon::LDrid:
- return !invertPredicate ? Hexagon::LDrid_cPt :
- Hexagon::LDrid_cNotPt;
- case Hexagon::LDriw:
- return !invertPredicate ? Hexagon::LDriw_cPt :
- Hexagon::LDriw_cNotPt;
- case Hexagon::LDrih:
- return !invertPredicate ? Hexagon::LDrih_cPt :
- Hexagon::LDrih_cNotPt;
- case Hexagon::LDriuh:
- return !invertPredicate ? Hexagon::LDriuh_cPt :
- Hexagon::LDriuh_cNotPt;
- case Hexagon::LDrib:
- return !invertPredicate ? Hexagon::LDrib_cPt :
- Hexagon::LDrib_cNotPt;
- case Hexagon::LDriub:
- return !invertPredicate ? Hexagon::LDriub_cPt :
- Hexagon::LDriub_cNotPt;
- // Load Indexed.
- case Hexagon::LDrid_indexed:
- return !invertPredicate ? Hexagon::LDrid_indexed_cPt :
- Hexagon::LDrid_indexed_cNotPt;
- case Hexagon::LDriw_indexed:
- return !invertPredicate ? Hexagon::LDriw_indexed_cPt :
- Hexagon::LDriw_indexed_cNotPt;
- case Hexagon::LDrih_indexed:
- return !invertPredicate ? Hexagon::LDrih_indexed_cPt :
- Hexagon::LDrih_indexed_cNotPt;
- case Hexagon::LDriuh_indexed:
- return !invertPredicate ? Hexagon::LDriuh_indexed_cPt :
- Hexagon::LDriuh_indexed_cNotPt;
- case Hexagon::LDrib_indexed:
- return !invertPredicate ? Hexagon::LDrib_indexed_cPt :
- Hexagon::LDrib_indexed_cNotPt;
- case Hexagon::LDriub_indexed:
- return !invertPredicate ? Hexagon::LDriub_indexed_cPt :
- Hexagon::LDriub_indexed_cNotPt;
- // Post Increment Load.
- case Hexagon::POST_LDrid:
- return !invertPredicate ? Hexagon::POST_LDrid_cPt :
- Hexagon::POST_LDrid_cNotPt;
- case Hexagon::POST_LDriw:
- return !invertPredicate ? Hexagon::POST_LDriw_cPt :
- Hexagon::POST_LDriw_cNotPt;
- case Hexagon::POST_LDrih:
- return !invertPredicate ? Hexagon::POST_LDrih_cPt :
- Hexagon::POST_LDrih_cNotPt;
- case Hexagon::POST_LDriuh:
- return !invertPredicate ? Hexagon::POST_LDriuh_cPt :
- Hexagon::POST_LDriuh_cNotPt;
- case Hexagon::POST_LDrib:
- return !invertPredicate ? Hexagon::POST_LDrib_cPt :
- Hexagon::POST_LDrib_cNotPt;
- case Hexagon::POST_LDriub:
- return !invertPredicate ? Hexagon::POST_LDriub_cPt :
- Hexagon::POST_LDriub_cNotPt;
+
// DEALLOC_RETURN.
case Hexagon::DEALLOC_RET_V4:
return !invertPredicate ? Hexagon::DEALLOC_RET_cPt_V4 :
@@ -1727,6 +1019,16 @@ bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
}
+// Returns true, if a ST insn can be promoted to a new-value store.
+bool HexagonInstrInfo::mayBeNewStore(const MachineInstr *MI) const {
+ const HexagonRegisterInfo& QRI = getRegisterInfo();
+ const uint64_t F = MI->getDesc().TSFlags;
+
+ return ((F >> HexagonII::mayNVStorePos) &
+ HexagonII::mayNVStoreMask &
+ QRI.Subtarget.hasV4TOps());
+}
+
bool
HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
@@ -1911,6 +1213,8 @@ isValidAutoIncImm(const EVT VT, const int Offset) const {
bool HexagonInstrInfo::
isMemOp(const MachineInstr *MI) const {
+// return MI->getDesc().mayLoad() && MI->getDesc().mayStore();
+
switch (MI->getOpcode())
{
default: return false;
@@ -2202,6 +1506,10 @@ bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
return false;
}
+bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
+ return (getAddrMode(MI) == HexagonII::PostInc);
+}
+
bool HexagonInstrInfo::isNewValue(const MachineInstr* MI) const {
const uint64_t F = MI->getDesc().TSFlags;
return ((F >> HexagonII::NewValuePos) & HexagonII::NewValueMask);
@@ -2214,6 +1522,97 @@ bool HexagonInstrInfo::isDotNewInst (const MachineInstr* MI) const {
(isPredicated(MI) && isPredicatedNew(MI)));
}
+// Returns the most basic instruction for the .new predicated instructions and
+// new-value stores.
+// For example, all of the following instructions will be converted back to the
+// same instruction:
+// 1) if (p0.new) memw(R0+#0) = R1.new --->
+// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
+// 3) if (p0.new) memw(R0+#0) = R1 --->
+//
+
+int HexagonInstrInfo::GetDotOldOp(const int opc) const {
+ int NewOp = opc;
+ if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
+ NewOp = Hexagon::getPredOldOpcode(NewOp);
+ if (NewOp < 0)
+ assert(0 && "Couldn't change predicate new instruction to its old form.");
+ }
+
+ if (isNewValueStore(NewOp)) { // Convert into non new-value format
+ NewOp = Hexagon::getNonNVStore(NewOp);
+ if (NewOp < 0)
+ assert(0 && "Couldn't change new-value store to its old form.");
+ }
+ return NewOp;
+}
+
+// Return the new value instruction for a given store.
+int HexagonInstrInfo::GetDotNewOp(const MachineInstr* MI) const {
+ int NVOpcode = Hexagon::getNewValueOpcode(MI->getOpcode());
+ if (NVOpcode >= 0) // Valid new-value store instruction.
+ return NVOpcode;
+
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Unknown .new type");
+ // store new value byte
+ case Hexagon::STrib_shl_V4:
+ return Hexagon::STrib_shl_nv_V4;
+
+ case Hexagon::STrih_shl_V4:
+ return Hexagon::STrih_shl_nv_V4;
+
+ case Hexagon::STriw_f:
+ return Hexagon::STriw_nv_V4;
+
+ case Hexagon::STriw_indexed_f:
+ return Hexagon::STriw_indexed_nv_V4;
+
+ case Hexagon::STriw_shl_V4:
+ return Hexagon::STriw_shl_nv_V4;
+
+ }
+ return 0;
+}
+
+// Return .new predicate version for an instruction.
+int HexagonInstrInfo::GetDotNewPredOp(MachineInstr *MI,
+ const MachineBranchProbabilityInfo
+ *MBPI) const {
+
+ int NewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode());
+ if (NewOpcode >= 0) // Valid predicate new instruction
+ return NewOpcode;
+
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Unknown .new type");
+ // Condtional Jumps
+ case Hexagon::JMP_t:
+ case Hexagon::JMP_f:
+ return getDotNewPredJumpOp(MI, MBPI);
+
+ case Hexagon::JMPR_t:
+ return Hexagon::JMPR_tnew_tV3;
+
+ case Hexagon::JMPR_f:
+ return Hexagon::JMPR_fnew_tV3;
+
+ case Hexagon::JMPret_t:
+ return Hexagon::JMPret_tnew_tV3;
+
+ case Hexagon::JMPret_f:
+ return Hexagon::JMPret_fnew_tV3;
+
+
+ // Conditional combine
+ case Hexagon::COMBINE_rr_cPt :
+ return Hexagon::COMBINE_rr_cdnPt;
+ case Hexagon::COMBINE_rr_cNotPt :
+ return Hexagon::COMBINE_rr_cdnNotPt;
+ }
+}
+
+
unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
const uint64_t F = MI->getDesc().TSFlags;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index b721da4620c7..3f45b8b2986e 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -26,8 +26,9 @@
namespace llvm {
class HexagonInstrInfo : public HexagonGenInstrInfo {
+ virtual void anchor();
const HexagonRegisterInfo RI;
- const HexagonSubtarget& Subtarget;
+ const HexagonSubtarget &Subtarget;
typedef unsigned Opcode_t;
public:
@@ -148,11 +149,6 @@ public:
isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles,
const BranchProbability &Probability) const;
- virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx,
- uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const;
virtual DFAPacketizer*
CreateTargetScheduleState(const TargetMachine *TM,
const ScheduleDAG *DAG) const;
@@ -185,12 +181,19 @@ public:
bool isNewValueInst(const MachineInstr* MI) const;
bool isNewValue(const MachineInstr* MI) const;
bool isDotNewInst(const MachineInstr* MI) const;
+ int GetDotOldOp(const int opc) const;
+ int GetDotNewOp(const MachineInstr* MI) const;
+ int GetDotNewPredOp(MachineInstr *MI,
+ const MachineBranchProbabilityInfo
+ *MBPI) const;
+ bool mayBeNewStore(const MachineInstr* MI) const;
bool isDeallocRet(const MachineInstr *MI) const;
unsigned getInvertedPredicatedOpcode(const int Opc) const;
bool isExtendable(const MachineInstr* MI) const;
bool isExtended(const MachineInstr* MI) const;
bool isPostIncrement(const MachineInstr* MI) const;
bool isNewValueStore(const MachineInstr* MI) const;
+ bool isNewValueStore(unsigned Opcode) const;
bool isNewValueJump(const MachineInstr* MI) const;
bool isNewValueJumpCandidate(const MachineInstr *MI) const;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td
index 2a4b17b17368..c96aaca8f8d2 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -384,6 +384,12 @@ def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1),
// ALU32/PERM +
//===----------------------------------------------------------------------===//
+let neverHasSideEffects = 1 in
+def COMBINE_ii : ALU32_ii<(outs DoubleRegs:$dst),
+ (ins s8Imm:$src1, s8Imm:$src2),
+ "$dst = combine(#$src1, #$src2)",
+ []>;
+
// Mux.
def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
DoubleRegs:$src2,
@@ -932,12 +938,21 @@ multiclass LD_MEMri<string mnemonic, string CextOp, RegisterClass RC,
}
let addrMode = BaseImmOffset, isMEMri = "true" in {
- defm LDrib: LD_MEMri < "memb", "LDrib", IntRegs, 11, 6>, AddrModeRel;
- defm LDriub: LD_MEMri < "memub" , "LDriub", IntRegs, 11, 6>, AddrModeRel;
- defm LDrih: LD_MEMri < "memh", "LDrih", IntRegs, 12, 7>, AddrModeRel;
- defm LDriuh: LD_MEMri < "memuh", "LDriuh", IntRegs, 12, 7>, AddrModeRel;
- defm LDriw: LD_MEMri < "memw", "LDriw", IntRegs, 13, 8>, AddrModeRel;
- defm LDrid: LD_MEMri < "memd", "LDrid", DoubleRegs, 14, 9>, AddrModeRel;
+ let accessSize = ByteAccess in {
+ defm LDrib: LD_MEMri < "memb", "LDrib", IntRegs, 11, 6>, AddrModeRel;
+ defm LDriub: LD_MEMri < "memub" , "LDriub", IntRegs, 11, 6>, AddrModeRel;
+ }
+
+ let accessSize = HalfWordAccess in {
+ defm LDrih: LD_MEMri < "memh", "LDrih", IntRegs, 12, 7>, AddrModeRel;
+ defm LDriuh: LD_MEMri < "memuh", "LDriuh", IntRegs, 12, 7>, AddrModeRel;
+ }
+
+ let accessSize = WordAccess in
+ defm LDriw: LD_MEMri < "memw", "LDriw", IntRegs, 13, 8>, AddrModeRel;
+
+ let accessSize = DoubleWordAccess in
+ defm LDrid: LD_MEMri < "memd", "LDrid", DoubleRegs, 14, 9>, AddrModeRel;
}
def : Pat < (i32 (sextloadi8 ADDRriS11_0:$addr)),
@@ -1000,18 +1015,25 @@ multiclass LD_Idxd<string mnemonic, string CextOp, RegisterClass RC,
}
let addrMode = BaseImmOffset in {
- defm LDrib_indexed: LD_Idxd <"memb", "LDrib", IntRegs, s11_0Ext, u6_0Ext,
- 11, 6>, AddrModeRel;
- defm LDriub_indexed: LD_Idxd <"memub" , "LDriub", IntRegs, s11_0Ext, u6_0Ext,
- 11, 6>, AddrModeRel;
- defm LDrih_indexed: LD_Idxd <"memh", "LDrih", IntRegs, s11_1Ext, u6_1Ext,
- 12, 7>, AddrModeRel;
- defm LDriuh_indexed: LD_Idxd <"memuh", "LDriuh", IntRegs, s11_1Ext, u6_1Ext,
- 12, 7>, AddrModeRel;
- defm LDriw_indexed: LD_Idxd <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext,
- 13, 8>, AddrModeRel;
- defm LDrid_indexed: LD_Idxd <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext,
- 14, 9>, AddrModeRel;
+ let accessSize = ByteAccess in {
+ defm LDrib_indexed: LD_Idxd <"memb", "LDrib", IntRegs, s11_0Ext, u6_0Ext,
+ 11, 6>, AddrModeRel;
+ defm LDriub_indexed: LD_Idxd <"memub" , "LDriub", IntRegs, s11_0Ext, u6_0Ext,
+ 11, 6>, AddrModeRel;
+ }
+ let accessSize = HalfWordAccess in {
+ defm LDrih_indexed: LD_Idxd <"memh", "LDrih", IntRegs, s11_1Ext, u6_1Ext,
+ 12, 7>, AddrModeRel;
+ defm LDriuh_indexed: LD_Idxd <"memuh", "LDriuh", IntRegs, s11_1Ext, u6_1Ext,
+ 12, 7>, AddrModeRel;
+ }
+ let accessSize = WordAccess in
+ defm LDriw_indexed: LD_Idxd <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext,
+ 13, 8>, AddrModeRel;
+
+ let accessSize = DoubleWordAccess in
+ defm LDrid_indexed: LD_Idxd <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext,
+ 14, 9>, AddrModeRel;
}
let AddedComplexity = 20 in {
@@ -1036,8 +1058,6 @@ def : Pat < (i64 (load (add IntRegs:$src1, s11_3ExtPred:$offset))),
//===----------------------------------------------------------------------===//
// Post increment load
-// Make sure that in post increment load, the first operand is always the post
-// increment operand.
//===----------------------------------------------------------------------===//
multiclass LD_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp,
@@ -1079,7 +1099,7 @@ multiclass LD_PostInc<string mnemonic, string BaseOp, RegisterClass RC,
}
}
-let hasCtrlDep = 1, neverHasSideEffects = 1 in {
+let hasCtrlDep = 1, neverHasSideEffects = 1, addrMode = PostInc in {
defm POST_LDrib : LD_PostInc<"memb", "LDrib", IntRegs, s4_0Imm>,
PredNewRel;
defm POST_LDriub : LD_PostInc<"memub", "LDriub", IntRegs, s4_0Imm>,
@@ -1382,7 +1402,7 @@ multiclass ST_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp,
multiclass ST_PostInc_Pred<string mnemonic, RegisterClass RC,
Operand ImmOp, bit PredNot> {
let isPredicatedFalse = PredNot in {
- defm _c#NAME# : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>;
+ defm _c#NAME : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>;
// Predicate new
let Predicates = [HasV4T], validSubTargets = HasV4SubT in
defm _cdn#NAME#_V4 : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 1>;
@@ -1397,7 +1417,7 @@ multiclass ST_PostInc<string mnemonic, string BaseOp, RegisterClass RC,
let isPredicable = 1 in
def NAME : STInst2PI<(outs IntRegs:$dst),
(ins IntRegs:$src1, ImmOp:$offset, RC:$src2),
- #mnemonic#"($src1++#$offset) = $src2",
+ mnemonic#"($src1++#$offset) = $src2",
[],
"$src1 = $dst">;
@@ -1474,12 +1494,17 @@ multiclass ST_MEMri<string mnemonic, string CextOp, RegisterClass RC,
}
let addrMode = BaseImmOffset, isMEMri = "true" in {
- defm STrib: ST_MEMri < "memb", "STrib", IntRegs, 11, 6>, AddrModeRel;
- defm STrih: ST_MEMri < "memh", "STrih", IntRegs, 12, 7>, AddrModeRel;
- defm STriw: ST_MEMri < "memw", "STriw", IntRegs, 13, 8>, AddrModeRel;
+ let accessSize = ByteAccess in
+ defm STrib: ST_MEMri < "memb", "STrib", IntRegs, 11, 6>, AddrModeRel;
+
+ let accessSize = HalfWordAccess in
+ defm STrih: ST_MEMri < "memh", "STrih", IntRegs, 12, 7>, AddrModeRel;
- let isNVStorable = 0 in
- defm STrid: ST_MEMri < "memd", "STrid", DoubleRegs, 14, 9>, AddrModeRel;
+ let accessSize = WordAccess in
+ defm STriw: ST_MEMri < "memw", "STriw", IntRegs, 13, 8>, AddrModeRel;
+
+ let accessSize = DoubleWordAccess, isNVStorable = 0 in
+ defm STrid: ST_MEMri < "memd", "STrid", DoubleRegs, 14, 9>, AddrModeRel;
}
def : Pat<(truncstorei8 (i32 IntRegs:$src1), ADDRriS11_0:$addr),
@@ -1541,15 +1566,21 @@ multiclass ST_Idxd<string mnemonic, string CextOp, RegisterClass RC,
}
let addrMode = BaseImmOffset, InputType = "reg" in {
- defm STrib_indexed: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext,
- u6_0Ext, 11, 6>, AddrModeRel, ImmRegRel;
- defm STrih_indexed: ST_Idxd < "memh", "STrih", IntRegs, s11_1Ext,
- u6_1Ext, 12, 7>, AddrModeRel, ImmRegRel;
- defm STriw_indexed: ST_Idxd < "memw", "STriw", IntRegs, s11_2Ext,
- u6_2Ext, 13, 8>, AddrModeRel, ImmRegRel;
- let isNVStorable = 0 in
- defm STrid_indexed: ST_Idxd < "memd", "STrid", DoubleRegs, s11_3Ext,
- u6_3Ext, 14, 9>, AddrModeRel;
+ let accessSize = ByteAccess in
+ defm STrib_indexed: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext,
+ u6_0Ext, 11, 6>, AddrModeRel, ImmRegRel;
+
+ let accessSize = HalfWordAccess in
+ defm STrih_indexed: ST_Idxd < "memh", "STrih", IntRegs, s11_1Ext,
+ u6_1Ext, 12, 7>, AddrModeRel, ImmRegRel;
+
+ let accessSize = WordAccess in
+ defm STriw_indexed: ST_Idxd < "memw", "STriw", IntRegs, s11_2Ext,
+ u6_2Ext, 13, 8>, AddrModeRel, ImmRegRel;
+
+ let accessSize = DoubleWordAccess, isNVStorable = 0 in
+ defm STrid_indexed: ST_Idxd < "memd", "STrid", DoubleRegs, s11_3Ext,
+ u6_3Ext, 14, 9>, AddrModeRel;
}
let AddedComplexity = 10 in {
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td
index 933239d5c3a6..475c23d98bf7 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -213,7 +213,7 @@ def COMBINE_iI_V4 : ALU32_ii<(outs DoubleRegs:$dst),
// Template class for load instructions with Absolute set addressing mode.
//===----------------------------------------------------------------------===//
let isExtended = 1, opExtendable = 2, neverHasSideEffects = 1,
-validSubTargets = HasV4SubT in
+validSubTargets = HasV4SubT, addrMode = AbsoluteSet in
class T_LD_abs_set<string mnemonic, RegisterClass RC>:
LDInst2<(outs RC:$dst1, IntRegs:$dst2),
(ins u0AlwaysExt:$addr),
@@ -266,12 +266,23 @@ multiclass ld_idxd_shl<string mnemonic, string CextOp, RegisterClass RC> {
}
let addrMode = BaseRegOffset in {
- defm LDrib_indexed_shl: ld_idxd_shl<"memb", "LDrib", IntRegs>, AddrModeRel;
- defm LDriub_indexed_shl: ld_idxd_shl<"memub", "LDriub", IntRegs>, AddrModeRel;
- defm LDrih_indexed_shl: ld_idxd_shl<"memh", "LDrih", IntRegs>, AddrModeRel;
- defm LDriuh_indexed_shl: ld_idxd_shl<"memuh", "LDriuh", IntRegs>, AddrModeRel;
- defm LDriw_indexed_shl: ld_idxd_shl<"memw", "LDriw", IntRegs>, AddrModeRel;
- defm LDrid_indexed_shl: ld_idxd_shl<"memd", "LDrid", DoubleRegs>, AddrModeRel;
+ let accessSize = ByteAccess in {
+ defm LDrib_indexed_shl: ld_idxd_shl<"memb", "LDrib", IntRegs>,
+ AddrModeRel;
+ defm LDriub_indexed_shl: ld_idxd_shl<"memub", "LDriub", IntRegs>,
+ AddrModeRel;
+ }
+ let accessSize = HalfWordAccess in {
+ defm LDrih_indexed_shl: ld_idxd_shl<"memh", "LDrih", IntRegs>, AddrModeRel;
+ defm LDriuh_indexed_shl: ld_idxd_shl<"memuh", "LDriuh", IntRegs>,
+ AddrModeRel;
+ }
+ let accessSize = WordAccess in
+ defm LDriw_indexed_shl: ld_idxd_shl<"memw", "LDriw", IntRegs>, AddrModeRel;
+
+ let accessSize = DoubleWordAccess in
+ defm LDrid_indexed_shl: ld_idxd_shl<"memd", "LDrid", DoubleRegs>,
+ AddrModeRel;
}
// 'def pats' for load instructions with base + register offset and non-zero
@@ -456,7 +467,8 @@ def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
//===----------------------------------------------------------------------===//
// Template class for store instructions with Absolute set addressing mode.
//===----------------------------------------------------------------------===//
-let isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT in
+let isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT,
+addrMode = AbsoluteSet in
class T_ST_abs_set<string mnemonic, RegisterClass RC>:
STInst2<(outs IntRegs:$dst1),
(ins RC:$src1, u0AlwaysExt:$src2),
@@ -551,17 +563,20 @@ multiclass ST_Idxd_shl_nv<string mnemonic, string CextOp, RegisterClass RC> {
let addrMode = BaseRegOffset, neverHasSideEffects = 1,
validSubTargets = HasV4SubT in {
- defm STrib_indexed_shl: ST_Idxd_shl<"memb", "STrib", IntRegs>,
- ST_Idxd_shl_nv<"memb", "STrib", IntRegs>, AddrModeRel;
+ let accessSize = ByteAccess in
+ defm STrib_indexed_shl: ST_Idxd_shl<"memb", "STrib", IntRegs>,
+ ST_Idxd_shl_nv<"memb", "STrib", IntRegs>, AddrModeRel;
- defm STrih_indexed_shl: ST_Idxd_shl<"memh", "STrih", IntRegs>,
- ST_Idxd_shl_nv<"memh", "STrih", IntRegs>, AddrModeRel;
+ let accessSize = HalfWordAccess in
+ defm STrih_indexed_shl: ST_Idxd_shl<"memh", "STrih", IntRegs>,
+ ST_Idxd_shl_nv<"memh", "STrih", IntRegs>, AddrModeRel;
- defm STriw_indexed_shl: ST_Idxd_shl<"memw", "STriw", IntRegs>,
- ST_Idxd_shl_nv<"memw", "STriw", IntRegs>, AddrModeRel;
+ let accessSize = WordAccess in
+ defm STriw_indexed_shl: ST_Idxd_shl<"memw", "STriw", IntRegs>,
+ ST_Idxd_shl_nv<"memw", "STriw", IntRegs>, AddrModeRel;
- let isNVStorable = 0 in
- defm STrid_indexed_shl: ST_Idxd_shl<"memd", "STrid", DoubleRegs>, AddrModeRel;
+ let isNVStorable = 0, accessSize = DoubleWordAccess in
+ defm STrid_indexed_shl: ST_Idxd_shl<"memd", "STrid", DoubleRegs>, AddrModeRel;
}
let Predicates = [HasV4T], AddedComplexity = 10 in {
@@ -695,10 +710,15 @@ multiclass ST_Imm<string mnemonic, string CextOp, Operand OffsetOp> {
}
let addrMode = BaseImmOffset, InputType = "imm",
- validSubTargets = HasV4SubT in {
- defm STrib_imm : ST_Imm<"memb", "STrib", u6_0Imm>, ImmRegRel, PredNewRel;
- defm STrih_imm : ST_Imm<"memh", "STrih", u6_1Imm>, ImmRegRel, PredNewRel;
- defm STriw_imm : ST_Imm<"memw", "STriw", u6_2Imm>, ImmRegRel, PredNewRel;
+validSubTargets = HasV4SubT in {
+ let accessSize = ByteAccess in
+ defm STrib_imm : ST_Imm<"memb", "STrib", u6_0Imm>, ImmRegRel, PredNewRel;
+
+ let accessSize = HalfWordAccess in
+ defm STrih_imm : ST_Imm<"memh", "STrih", u6_1Imm>, ImmRegRel, PredNewRel;
+
+ let accessSize = WordAccess in
+ defm STriw_imm : ST_Imm<"memw", "STriw", u6_2Imm>, ImmRegRel, PredNewRel;
}
let Predicates = [HasV4T], AddedComplexity = 10 in {
@@ -834,12 +854,17 @@ multiclass ST_Idxd_nv<string mnemonic, string CextOp, RegisterClass RC,
}
let addrMode = BaseImmOffset, validSubTargets = HasV4SubT in {
- defm STrib_indexed: ST_Idxd_nv<"memb", "STrib", IntRegs, s11_0Ext,
- u6_0Ext, 11, 6>, AddrModeRel;
- defm STrih_indexed: ST_Idxd_nv<"memh", "STrih", IntRegs, s11_1Ext,
- u6_1Ext, 12, 7>, AddrModeRel;
- defm STriw_indexed: ST_Idxd_nv<"memw", "STriw", IntRegs, s11_2Ext,
- u6_2Ext, 13, 8>, AddrModeRel;
+ let accessSize = ByteAccess in
+ defm STrib_indexed: ST_Idxd_nv<"memb", "STrib", IntRegs, s11_0Ext,
+ u6_0Ext, 11, 6>, AddrModeRel;
+
+ let accessSize = HalfWordAccess in
+ defm STrih_indexed: ST_Idxd_nv<"memh", "STrih", IntRegs, s11_1Ext,
+ u6_1Ext, 12, 7>, AddrModeRel;
+
+ let accessSize = WordAccess in
+ defm STriw_indexed: ST_Idxd_nv<"memw", "STriw", IntRegs, s11_2Ext,
+ u6_2Ext, 13, 8>, AddrModeRel;
}
// multiclass for new-value store instructions with base + immediate offset.
@@ -887,9 +912,14 @@ multiclass ST_MEMri_nv<string mnemonic, string CextOp, RegisterClass RC,
let addrMode = BaseImmOffset, isMEMri = "true", validSubTargets = HasV4SubT,
mayStore = 1 in {
- defm STrib: ST_MEMri_nv<"memb", "STrib", IntRegs, 11, 6>, AddrModeRel;
- defm STrih: ST_MEMri_nv<"memh", "STrih", IntRegs, 12, 7>, AddrModeRel;
- defm STriw: ST_MEMri_nv<"memw", "STriw", IntRegs, 13, 8>, AddrModeRel;
+ let accessSize = ByteAccess in
+ defm STrib: ST_MEMri_nv<"memb", "STrib", IntRegs, 11, 6>, AddrModeRel;
+
+ let accessSize = HalfWordAccess in
+ defm STrih: ST_MEMri_nv<"memh", "STrih", IntRegs, 12, 7>, AddrModeRel;
+
+ let accessSize = WordAccess in
+ defm STriw: ST_MEMri_nv<"memw", "STriw", IntRegs, 13, 8>, AddrModeRel;
}
//===----------------------------------------------------------------------===//
@@ -939,7 +969,7 @@ multiclass ST_PostInc_nv<string mnemonic, string BaseOp, RegisterClass RC,
}
}
-let validSubTargets = HasV4SubT in {
+let addrMode = PostInc, validSubTargets = HasV4SubT in {
defm POST_STbri: ST_PostInc_nv <"memb", "STrib", IntRegs, s4_0Imm>, AddrModeRel;
defm POST_SThri: ST_PostInc_nv <"memh", "STrih", IntRegs, s4_1Imm>, AddrModeRel;
defm POST_STwri: ST_PostInc_nv <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel;
@@ -2534,8 +2564,9 @@ def NTSTBIT_ri : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
//Deallocate frame and return.
// dealloc_return
let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
- def DEALLOC_RET_V4 : NVInst_V4<(outs), (ins i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_V4 : LD0Inst<(outs), (ins),
"dealloc_return",
[]>,
Requires<[HasV4T]>;
@@ -2544,9 +2575,10 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1,
// Restore registers and dealloc return function call.
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
Defs = [R29, R30, R31, PC] in {
+let validSubTargets = HasV4SubT in
def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs),
(ins calltarget:$dst),
- "jump $dst // Restore_and_dealloc_return",
+ "jump $dst",
[]>,
Requires<[HasV4T]>;
}
@@ -2554,9 +2586,10 @@ let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
// Restore registers and dealloc frame before a tail call.
let isCall = 1, isBarrier = 1,
Defs = [R29, R30, R31, PC] in {
+let validSubTargets = HasV4SubT in
def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs),
(ins calltarget:$dst),
- "call $dst // Restore_and_dealloc_before_tailcall",
+ "call $dst",
[]>,
Requires<[HasV4T]>;
}
@@ -2573,10 +2606,11 @@ let isCall = 1, isBarrier = 1,
// if (Ps) dealloc_return
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
isPredicated = 1 in {
- def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs),
- (ins PredRegs:$src1, i32imm:$amt1),
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cPt_V4 : LD0Inst<(outs),
+ (ins PredRegs:$src1),
"if ($src1) dealloc_return",
[]>,
Requires<[HasV4T]>;
@@ -2584,10 +2618,10 @@ let isReturn = 1, isTerminator = 1,
// if (!Ps) dealloc_return
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
- isPredicated = 1 in {
- def DEALLOC_RET_cNotPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
+ isPredicated = 1, isPredicatedFalse = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cNotPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if (!$src1) dealloc_return",
[]>,
Requires<[HasV4T]>;
@@ -2595,10 +2629,10 @@ let isReturn = 1, isTerminator = 1,
// if (Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
isPredicated = 1 in {
- def DEALLOC_RET_cdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cdnPnt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if ($src1.new) dealloc_return:nt",
[]>,
Requires<[HasV4T]>;
@@ -2606,10 +2640,10 @@ let isReturn = 1, isTerminator = 1,
// if (!Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
- isPredicated = 1 in {
- def DEALLOC_RET_cNotdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
+ isPredicated = 1, isPredicatedFalse = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cNotdnPnt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if (!$src1.new) dealloc_return:nt",
[]>,
Requires<[HasV4T]>;
@@ -2617,21 +2651,21 @@ let isReturn = 1, isTerminator = 1,
// if (Ps.new) dealloc_return:t
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
isPredicated = 1 in {
- def DEALLOC_RET_cdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cdnPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if ($src1.new) dealloc_return:t",
[]>,
Requires<[HasV4T]>;
}
-// if (!Ps.new) dealloc_return:nt
+// if (!Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
- isPredicated = 1 in {
- def DEALLOC_RET_cNotdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
+ isPredicated = 1, isPredicatedFalse = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cNotdnPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if (!$src1.new) dealloc_return:t",
[]>,
Requires<[HasV4T]>;
@@ -3033,37 +3067,42 @@ def : Pat<(HexagonCONST32_GP tblockaddress:$src1),
(TFRI_V4 tblockaddress:$src1)>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if($src1) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if($src1) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50, isPredicatedFalse = 1,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if(!$src1) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if(!$src1) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if($src1.new) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if($src1.new) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50, isPredicatedFalse = 1,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if(!$src1.new) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if(!$src1.new) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
let AddedComplexity = 50, Predicates = [HasV4T] in
def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
- (TFRI_V4 tglobaladdr:$src1)>;
+ (TFRI_V4 tglobaladdr:$src1)>,
+ Requires<[HasV4T]>;
// Load - Indirect with long offset: These instructions take global address
@@ -3149,6 +3188,93 @@ def STriw_offset_ext_V4 : STInst<(outs),
(add IntRegs:$src1, u6_2ImmPred:$src2))]>,
Requires<[HasV4T]>;
+def : Pat<(i64 (ctlz (i64 DoubleRegs:$src1))),
+ (i64 (COMBINE_Ir_V4 (i32 0), (i32 (CTLZ64_rr DoubleRegs:$src1))))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(i64 (cttz (i64 DoubleRegs:$src1))),
+ (i64 (COMBINE_Ir_V4 (i32 0), (i32 (CTTZ64_rr DoubleRegs:$src1))))>,
+ Requires<[HasV4T]>;
+
+
+// i8 -> i64 loads
+// We need a complexity of 120 here to overide preceeding handling of
+// zextloadi8.
+let Predicates = [HasV4T], AddedComplexity = 120 in {
+def: Pat <(i64 (extloadi8 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (COMBINE_Ir_V4 0, (LDrib_abs_V4 tglobaladdr:$addr)))>;
+
+def: Pat <(i64 (zextloadi8 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (COMBINE_Ir_V4 0, (LDriub_abs_V4 tglobaladdr:$addr)))>;
+
+def: Pat <(i64 (sextloadi8 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (SXTW (LDrib_abs_V4 tglobaladdr:$addr)))>;
+
+def: Pat <(i64 (extloadi8 FoldGlobalAddr:$addr)),
+ (i64 (COMBINE_Ir_V4 0, (LDrib_abs_V4 FoldGlobalAddr:$addr)))>;
+
+def: Pat <(i64 (zextloadi8 FoldGlobalAddr:$addr)),
+ (i64 (COMBINE_Ir_V4 0, (LDriub_abs_V4 FoldGlobalAddr:$addr)))>;
+
+def: Pat <(i64 (sextloadi8 FoldGlobalAddr:$addr)),
+ (i64 (SXTW (LDrib_abs_V4 FoldGlobalAddr:$addr)))>;
+}
+// i16 -> i64 loads
+// We need a complexity of 120 here to overide preceeding handling of
+// zextloadi16.
+let AddedComplexity = 120 in {
+def: Pat <(i64 (extloadi16 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (COMBINE_Ir_V4 0, (LDrih_abs_V4 tglobaladdr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (zextloadi16 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (COMBINE_Ir_V4 0, (LDriuh_abs_V4 tglobaladdr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (sextloadi16 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (SXTW (LDrih_abs_V4 tglobaladdr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (extloadi16 FoldGlobalAddr:$addr)),
+ (i64 (COMBINE_Ir_V4 0, (LDrih_abs_V4 FoldGlobalAddr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (zextloadi16 FoldGlobalAddr:$addr)),
+ (i64 (COMBINE_Ir_V4 0, (LDriuh_abs_V4 FoldGlobalAddr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (sextloadi16 FoldGlobalAddr:$addr)),
+ (i64 (SXTW (LDrih_abs_V4 FoldGlobalAddr:$addr)))>,
+ Requires<[HasV4T]>;
+}
+// i32->i64 loads
+// We need a complexity of 120 here to overide preceeding handling of
+// zextloadi32.
+let AddedComplexity = 120 in {
+def: Pat <(i64 (extloadi32 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 tglobaladdr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (zextloadi32 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 tglobaladdr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (sextloadi32 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
+ (i64 (SXTW (LDriw_abs_V4 tglobaladdr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (extloadi32 FoldGlobalAddr:$addr)),
+ (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 FoldGlobalAddr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (zextloadi32 FoldGlobalAddr:$addr)),
+ (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 FoldGlobalAddr:$addr)))>,
+ Requires<[HasV4T]>;
+
+def: Pat <(i64 (sextloadi32 FoldGlobalAddr:$addr)),
+ (i64 (SXTW (LDriw_abs_V4 FoldGlobalAddr:$addr)))>,
+ Requires<[HasV4T]>;
+}
// Indexed store double word - global address.
// memw(Rs+#u6:2)=#S8
@@ -3264,4 +3390,3 @@ def : Pat<(i32 (load FoldGlobalAddrGP:$addr)),
def : Pat<(atomic_load_32 FoldGlobalAddrGP:$addr),
(i32 (LDriw_abs_V4 FoldGlobalAddrGP:$addr))>,
Requires<[HasV4T]>;
-
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV5.td b/lib/Target/Hexagon/HexagonInstrInfoV5.td
index 92d098cc0446..9da607455811 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV5.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV5.td
@@ -26,22 +26,29 @@ def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
// Only works with single precision fp value.
// For double precision, use CONST64_float_real, as 64bit transfer
// can only hold 40-bit values - 32 from const ext + 8 bit immediate.
-let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in
-def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32imm:$src1),
- "$dst = ##$src1",
+// Make sure that complexity is more than the CONST32 pattern in
+// HexagonInstrInfo.td patterns.
+let isExtended = 1, opExtendable = 1, isMoveImm = 1, isReMaterializable = 1,
+isPredicable = 1, AddedComplexity = 30, validSubTargets = HasV5SubT,
+isCodeGenOnly = 1 in
+def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32Ext:$src1),
+ "$dst = #$src1",
[(set IntRegs:$dst, fpimm:$src1)]>,
Requires<[HasV5T]>;
+let isExtended = 1, opExtendable = 2, isPredicated = 1,
+neverHasSideEffects = 1, validSubTargets = HasV5SubT in
def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, f32imm:$src2),
- "if ($src1) $dst = ##$src2",
+ (ins PredRegs:$src1, f32Ext:$src2),
+ "if ($src1) $dst = #$src2",
[]>,
Requires<[HasV5T]>;
-let isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, isPredicated = 1, isPredicatedFalse = 1,
+neverHasSideEffects = 1, validSubTargets = HasV5SubT in
def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, f32imm:$src2),
- "if (!$src1) $dst = ##$src2",
+ (ins PredRegs:$src1, f32Ext:$src2),
+ "if (!$src1) $dst =#$src2",
[]>,
Requires<[HasV5T]>;
diff --git a/lib/Target/Hexagon/HexagonMCInstLower.cpp b/lib/Target/Hexagon/HexagonMCInstLower.cpp
index f011d51bd61a..bbb2fa4c42de 100644
--- a/lib/Target/Hexagon/HexagonMCInstLower.cpp
+++ b/lib/Target/Hexagon/HexagonMCInstLower.cpp
@@ -73,7 +73,7 @@ void llvm::HexagonLowerToMC(const MachineInstr* MI, HexagonMCInst& MCI,
AP.OutContext));
break;
case MachineOperand::MO_GlobalAddress:
- MCO = GetSymbolRef(MO, AP.Mang->getSymbol(MO.getGlobal()), AP);
+ MCO = GetSymbolRef(MO, AP.getSymbol(MO.getGlobal()), AP);
break;
case MachineOperand::MO_ExternalSymbol:
MCO = GetSymbolRef(MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()),
diff --git a/lib/Target/MBlaze/MBlazeMachineFunction.cpp b/lib/Target/Hexagon/HexagonMachineFunctionInfo.cpp
index 2217b5477d6b..9579c8b6df16 100644
--- a/lib/Target/MBlaze/MBlazeMachineFunction.cpp
+++ b/lib/Target/Hexagon/HexagonMachineFunctionInfo.cpp
@@ -1,4 +1,4 @@
-//===-- MBlazeMachineFunctionInfo.cpp - Private data ----------------------===//
+//= HexagonMachineFunctionInfo.cpp - Hexagon machine function info *- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +7,10 @@
//
//===----------------------------------------------------------------------===//
-#include "MBlazeMachineFunction.h"
+#include "HexagonMachineFunctionInfo.h"
using namespace llvm;
-void MBlazeFunctionInfo::anchor() { }
+// pin vtable to this file
+void HexagonMachineFunctionInfo::anchor() {}
+
diff --git a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
index bd7b26a61906..a59c8c9dd2a2 100644
--- a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
+++ b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
@@ -1,4 +1,4 @@
-//=- HexagonMachineFuctionInfo.h - Hexagon machine function info --*- C++ -*-=//
+//=- HexagonMachineFunctionInfo.h - Hexagon machine function info -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -10,6 +10,7 @@
#ifndef HexagonMACHINEFUNCTIONINFO_H
#define HexagonMACHINEFUNCTIONINFO_H
+#include <map>
#include "llvm/CodeGen/MachineFunction.h"
namespace llvm {
@@ -30,9 +31,8 @@ class HexagonMachineFunctionInfo : public MachineFunctionInfo {
int VarArgsFrameIndex;
bool HasClobberLR;
bool HasEHReturn;
-
std::map<const MachineInstr*, unsigned> PacketInfo;
-
+ virtual void anchor();
public:
HexagonMachineFunctionInfo() : SRetReturnReg(0), HasClobberLR(0),
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 1388ad4f167d..c94f081ab13b 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -195,7 +195,6 @@ void VLIWMachineScheduler::schedule() {
void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
DAG = static_cast<VLIWMachineScheduler*>(dag);
SchedModel = DAG->getSchedModel();
- TRI = DAG->TRI;
Top.init(DAG, SchedModel);
Bot.init(DAG, SchedModel);
@@ -209,6 +208,8 @@ void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ delete Top.ResourceModel;
+ delete Bot.ResourceModel;
Top.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
Bot.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
@@ -223,7 +224,7 @@ void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
- unsigned MinLatency = I->getMinLatency();
+ unsigned MinLatency = I->getLatency();
#ifndef NDEBUG
Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
#endif
@@ -242,7 +243,7 @@ void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
- unsigned MinLatency = I->getMinLatency();
+ unsigned MinLatency = I->getLatency();
#ifndef NDEBUG
Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
#endif
@@ -406,11 +407,11 @@ SUnit *ConvergingVLIWScheduler::SchedBoundary::pickOnlyChoice() {
#ifndef NDEBUG
void ConvergingVLIWScheduler::traceCandidate(const char *Label,
const ReadyQueue &Q,
- SUnit *SU, PressureElement P) {
+ SUnit *SU, PressureChange P) {
dbgs() << Label << " " << Q.getName() << " ";
if (P.isValid())
- dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
- << " ";
+ dbgs() << DAG->TRI->getRegPressureSetName(P.getPSet()) << ":"
+ << P.getUnitInc() << " ";
else
dbgs() << " ";
SU->dump(DAG);
@@ -456,9 +457,7 @@ static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
// Constants used to denote relative importance of
// heuristic components for cost computation.
static const unsigned PriorityOne = 200;
-static const unsigned PriorityTwo = 100;
-static const unsigned PriorityThree = 50;
-static const unsigned PriorityFour = 20;
+static const unsigned PriorityTwo = 50;
static const unsigned ScaleTwo = 10;
static const unsigned FactorOne = 2;
@@ -516,8 +515,8 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
ResCount += (NumNodesBlocking * ScaleTwo);
// Factor in reg pressure as a heuristic.
- ResCount -= (Delta.Excess.UnitIncrease*PriorityThree);
- ResCount -= (Delta.CriticalMax.UnitIncrease*PriorityThree);
+ ResCount -= (Delta.Excess.getUnitInc()*PriorityTwo);
+ ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityTwo);
DEBUG(if (verbose) dbgs() << " Total(" << ResCount << ")");
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.h b/lib/Target/Hexagon/HexagonMachineScheduler.h
index f68dadf29210..8ac333fa7db3 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.h
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.h
@@ -190,7 +190,6 @@ class ConvergingVLIWScheduler : public MachineSchedStrategy {
VLIWMachineScheduler *DAG;
const TargetSchedModel *SchedModel;
- const TargetRegisterInfo *TRI;
// State of the top and bottom scheduled instruction boundaries.
SchedBoundary Top;
@@ -205,7 +204,7 @@ public:
};
ConvergingVLIWScheduler():
- DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+ DAG(0), SchedModel(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
virtual void initialize(ScheduleDAGMI *dag);
@@ -234,7 +233,7 @@ protected:
SchedCandidate &Candidate);
#ifndef NDEBUG
void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
- PressureElement P = PressureElement());
+ PressureChange P = PressureChange());
#endif
};
diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 05e696865fbf..f7c45132138e 100644
--- a/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -631,6 +631,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
.addMBB(jmpTarget);
assert(NewMI && "New Value Jump Instruction Not created!");
+ (void)NewMI;
if (cmpInstr->getOperand(0).isReg() &&
cmpInstr->getOperand(0).isKill())
cmpInstr->getOperand(0).setIsKill(false);
diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp
index 89e34068894e..5490ecd6e3e6 100644
--- a/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -29,7 +29,7 @@
//
// Note: The peephole pass makes the instrucstions like
// %vreg170<def> = SXTW %vreg166 or %vreg16<def> = NOT_p %vreg15<kill>
-// redundant and relies on some form of dead removal instrucions, like
+// redundant and relies on some form of dead removal instructions, like
// DCE or DIE to actually eliminate them.
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index d8b4e2fcb368..1786e9daa306 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -38,11 +38,9 @@
using namespace llvm;
-HexagonRegisterInfo::HexagonRegisterInfo(HexagonSubtarget &st,
- const HexagonInstrInfo &tii)
+HexagonRegisterInfo::HexagonRegisterInfo(HexagonSubtarget &st)
: HexagonGenRegisterInfo(Hexagon::R31),
- Subtarget(st),
- TII(tii) {
+ Subtarget(st) {
}
const uint16_t* HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction
@@ -130,6 +128,8 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Addressable stack objects are accessed using neg. offsets from %fp.
MachineFunction &MF = *MI.getParent()->getParent();
+ const HexagonInstrInfo &TII =
+ *static_cast<const HexagonInstrInfo*>(MF.getTarget().getInstrInfo());
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
MachineFrameInfo &MFI = *MF.getFrameInfo();
@@ -295,23 +295,5 @@ unsigned HexagonRegisterInfo::getStackRegister() const {
return Hexagon::R29;
}
-void HexagonRegisterInfo::getInitialFrameState(std::vector<MachineMove>
- &Moves) const
-{
- // VirtualFP = (R30 + #0).
- unsigned FPReg = getFrameRegister();
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(FPReg, 0);
- Moves.push_back(MachineMove(0, Dst, Src));
-}
-
-unsigned HexagonRegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
-}
-
-unsigned HexagonRegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
-}
-
#define GET_REGINFO_TARGET_DESC
#include "HexagonGenRegisterInfo.inc"
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.h b/lib/Target/Hexagon/HexagonRegisterInfo.h
index 8a3f94a3fd12..89af7c38cc6d 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.h
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.h
@@ -44,9 +44,8 @@ class Type;
struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
HexagonSubtarget &Subtarget;
- const HexagonInstrInfo &TII;
- HexagonRegisterInfo(HexagonSubtarget &st, const HexagonInstrInfo &tii);
+ HexagonRegisterInfo(HexagonSubtarget &st);
/// Code Generation virtual methods...
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
@@ -78,12 +77,7 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
unsigned getFrameRegister() const;
- void getInitialFrameState(std::vector<MachineMove> &Moves) const;
unsigned getStackRegister() const;
-
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
};
} // end namespace llvm
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td
index fe41fc3bc60c..8ea1b7e75db7 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -57,8 +57,8 @@ let Namespace = "Hexagon" in {
let Aliases = [R];
}
- def subreg_loreg : SubRegIndex;
- def subreg_hireg : SubRegIndex;
+ def subreg_loreg : SubRegIndex<32>;
+ def subreg_hireg : SubRegIndex<32, 32>;
// Integer registers.
def R0 : Ri< 0, "r0">, DwarfRegNum<[0]>;
diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
index a52c604505b8..c37bf9f0e800 100644
--- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
+++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
@@ -27,7 +27,7 @@ HexagonSelectionDAGInfo::~HexagonSelectionDAGInfo() {
SDValue
HexagonSelectionDAGInfo::
-EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, SDValue Chain,
+EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
index 0673e4d35472..31f278a18574 100644
--- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
+++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
@@ -26,7 +26,7 @@ public:
~HexagonSelectionDAGInfo();
virtual
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
diff --git a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
new file mode 100644
index 000000000000..5166f8e1748c
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
@@ -0,0 +1,174 @@
+//=== HexagonSplitConst32AndConst64.cpp - split CONST32/Const64 into HI/LO ===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// When the compiler is invoked with no small data, for instance, with the -G0
+// command line option, then all CONST32_* opcodes should be broken down into
+// appropriate LO and HI instructions. This splitting is done by this pass.
+// The only reason this is not done in the DAG lowering itself is that there
+// is no simple way of getting the register allocator to allot the same hard
+// register to the result of LO and HI instructions. This pass is always
+// scheduled after register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "xfer"
+
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
+#include <map>
+
+using namespace llvm;
+
+namespace {
+
+class HexagonSplitConst32AndConst64 : public MachineFunctionPass {
+ const HexagonTargetMachine& QTM;
+ const HexagonSubtarget &QST;
+
+ public:
+ static char ID;
+ HexagonSplitConst32AndConst64(const HexagonTargetMachine& TM)
+ : MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {}
+
+ const char *getPassName() const {
+ return "Hexagon Split Const32s and Const64s";
+ }
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+
+char HexagonSplitConst32AndConst64::ID = 0;
+
+
+bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
+
+ const TargetInstrInfo *TII = QTM.getInstrInfo();
+
+ // Loop over all of the basic blocks
+ for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+ // Traverse the basic block
+ MachineBasicBlock::iterator MII = MBB->begin();
+ MachineBasicBlock::iterator MIE = MBB->end ();
+ while (MII != MIE) {
+ MachineInstr *MI = MII;
+ int Opc = MI->getOpcode();
+ if (Opc == Hexagon::CONST32_set) {
+ int DestReg = MI->getOperand(0).getReg();
+ MachineOperand &Symbol = MI->getOperand (1);
+
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::LO), DestReg).addOperand(Symbol);
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::HI), DestReg).addOperand(Symbol);
+ // MBB->erase returns the iterator to the next instruction, which is the
+ // one we want to process next
+ MII = MBB->erase (MI);
+ continue;
+ }
+ else if (Opc == Hexagon::CONST32_set_jt) {
+ int DestReg = MI->getOperand(0).getReg();
+ MachineOperand &Symbol = MI->getOperand (1);
+
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::LO_jt), DestReg).addOperand(Symbol);
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::HI_jt), DestReg).addOperand(Symbol);
+ // MBB->erase returns the iterator to the next instruction, which is the
+ // one we want to process next
+ MII = MBB->erase (MI);
+ continue;
+ }
+ else if (Opc == Hexagon::CONST32_Label) {
+ int DestReg = MI->getOperand(0).getReg();
+ MachineOperand &Symbol = MI->getOperand (1);
+
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::LO_label), DestReg).addOperand(Symbol);
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::HI_label), DestReg).addOperand(Symbol);
+ // MBB->erase returns the iterator to the next instruction, which is the
+ // one we want to process next
+ MII = MBB->erase (MI);
+ continue;
+ }
+ else if (Opc == Hexagon::CONST32_Int_Real) {
+ int DestReg = MI->getOperand(0).getReg();
+ int64_t ImmValue = MI->getOperand(1).getImm ();
+
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::LOi), DestReg).addImm(ImmValue);
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::HIi), DestReg).addImm(ImmValue);
+ MII = MBB->erase (MI);
+ continue;
+ }
+ else if (Opc == Hexagon::CONST64_Int_Real) {
+ int DestReg = MI->getOperand(0).getReg();
+ int64_t ImmValue = MI->getOperand(1).getImm ();
+ unsigned DestLo =
+ QTM.getRegisterInfo()->getSubReg (DestReg, Hexagon::subreg_loreg);
+ unsigned DestHi =
+ QTM.getRegisterInfo()->getSubReg (DestReg, Hexagon::subreg_hireg);
+
+ int32_t LowWord = (ImmValue & 0xFFFFFFFF);
+ int32_t HighWord = (ImmValue >> 32) & 0xFFFFFFFF;
+
+ // Lower Registers Lower Half
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::LOi), DestLo).addImm(LowWord);
+ // Lower Registers Higher Half
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::HIi), DestLo).addImm(LowWord);
+ // Higher Registers Lower Half
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::LOi), DestHi).addImm(HighWord);
+ // Higher Registers Higher Half.
+ BuildMI (*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::HIi), DestHi).addImm(HighWord);
+ MII = MBB->erase (MI);
+ continue;
+ }
+ ++MII;
+ }
+ }
+
+ return true;
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *
+llvm::createHexagonSplitConst32AndConst64(const HexagonTargetMachine &TM) {
+ return new HexagonSplitConst32AndConst64(TM);
+}
diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp
index 07d5ce1d8ab0..fca67073ef38 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -86,3 +86,5 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
ModeIEEERndNear = false;
}
+// Pin the vtable to this file.
+void HexagonSubtarget::anchor() {}
diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h
index 76a8fba195f3..690bef0d7296 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/lib/Target/Hexagon/HexagonSubtarget.h
@@ -27,7 +27,7 @@
namespace llvm {
class HexagonSubtarget : public HexagonGenSubtargetInfo {
-
+ virtual void anchor();
bool UseMemOps;
bool ModeIEEERndNear;
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp
index caa1ba49649f..bb950a0ea75a 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -15,6 +15,7 @@
#include "Hexagon.h"
#include "HexagonISelLowering.h"
#include "HexagonMachineScheduler.h"
+#include "HexagonTargetObjectFile.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Module.h"
#include "llvm/PassManager.h"
@@ -78,6 +79,7 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
FrameLowering(Subtarget),
InstrItins(&Subtarget.getInstrItineraryData()) {
setMCUseCFI(false);
+ initAsmInfo();
}
// addPassesForOptimizations - Allow the backend (target) to add Target
@@ -100,17 +102,25 @@ class HexagonPassConfig : public TargetPassConfig {
public:
HexagonPassConfig(HexagonTargetMachine *TM, PassManagerBase &PM)
: TargetPassConfig(TM, PM) {
- // Enable MI scheduler.
- if (!DisableHexagonMISched) {
+ // FIXME: Rather than calling enablePass(&MachineSchedulerID) below, define
+ // HexagonSubtarget::enableMachineScheduler() { return true; }.
+ // That will bypass the SelectionDAG VLIW scheduler, which is probably just
+ // hurting compile time and will be removed eventually anyway.
+ if (DisableHexagonMISched)
+ disablePass(&MachineSchedulerID);
+ else
enablePass(&MachineSchedulerID);
- MachineSchedRegistry::setDefault(createVLIWMachineSched);
- }
}
HexagonTargetMachine &getHexagonTargetMachine() const {
return getTM<HexagonTargetMachine>();
}
+ virtual ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const {
+ return createVLIWMachineSched(C);
+ }
+
virtual bool addInstSelector();
virtual bool addPreRegAlloc();
virtual bool addPostRegAlloc();
@@ -124,7 +134,7 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) {
}
bool HexagonPassConfig::addInstSelector() {
- const HexagonTargetMachine &TM = getHexagonTargetMachine();
+ HexagonTargetMachine &TM = getHexagonTargetMachine();
bool NoOpt = (getOptLevel() == CodeGenOpt::None);
if (!NoOpt)
@@ -156,9 +166,18 @@ bool HexagonPassConfig::addPostRegAlloc() {
}
bool HexagonPassConfig::addPreSched2() {
+ const HexagonTargetMachine &TM = getHexagonTargetMachine();
+ const HexagonTargetObjectFile &TLOF =
+ (const HexagonTargetObjectFile &)getTargetLowering()->getObjFileLowering();
+
+ addPass(createHexagonCopyToCombine());
if (getOptLevel() != CodeGenOpt::None)
addPass(&IfConverterID);
- return false;
+ if (!TLOF.IsSmallDataEnabled()) {
+ addPass(createHexagonSplitConst32AndConst64(TM));
+ printAndVerify("After hexagon split const32/64 pass");
+ }
+ return true;
}
bool HexagonPassConfig::addPreEmitPass() {
diff --git a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
index 993fcfaed43e..7773cff2d21a 100644
--- a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
+++ b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
@@ -25,7 +25,8 @@
using namespace llvm;
static cl::opt<int> SmallDataThreshold("hexagon-small-data-threshold",
- cl::init(8), cl::Hidden);
+ cl::init(8), cl::Hidden,
+ cl::desc("The maximum size of an object in the sdata section"));
void HexagonTargetObjectFile::Initialize(MCContext &Ctx,
const TargetMachine &TM) {
@@ -46,6 +47,11 @@ void HexagonTargetObjectFile::Initialize(MCContext &Ctx,
static bool IsInSmallSection(uint64_t Size) {
return Size > 0 && Size <= (uint64_t)SmallDataThreshold;
}
+
+bool HexagonTargetObjectFile::IsSmallDataEnabled () const {
+ return SmallDataThreshold > 0;
+}
+
/// IsGlobalInSmallSection - Return true if this global value should be
/// placed into small data/bss section.
bool HexagonTargetObjectFile::IsGlobalInSmallSection(const GlobalValue *GV,
diff --git a/lib/Target/Hexagon/HexagonTargetObjectFile.h b/lib/Target/Hexagon/HexagonTargetObjectFile.h
index 693345081ee3..41f6792ca8e5 100644
--- a/lib/Target/Hexagon/HexagonTargetObjectFile.h
+++ b/lib/Target/Hexagon/HexagonTargetObjectFile.h
@@ -29,6 +29,7 @@ namespace llvm {
bool IsGlobalInSmallSection(const GlobalValue *GV,
const TargetMachine &TM) const;
+ bool IsSmallDataEnabled () const;
const MCSection* SelectSectionForGlobal(const GlobalValue *GV,
SectionKind Kind,
Mangler *Mang,
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 39995e10b96d..41e382dc072a 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -164,7 +164,6 @@ namespace {
unsigned, std::map <MachineInstr*, SUnit*>);
bool isNewifiable(MachineInstr* MI);
bool isCondInst(MachineInstr* MI);
- bool IsNewifyStore (MachineInstr* MI);
bool tryAllocateResourcesForConstExt(MachineInstr* MI);
bool canReserveResourcesForConstExt(MachineInstr *MI);
void reserveResourcesForConstExt(MachineInstr* MI);
@@ -383,104 +382,6 @@ static bool IsControlFlow(MachineInstr* MI) {
return (MI->getDesc().isTerminator() || MI->getDesc().isCall());
}
-// Function returns true if an instruction can be promoted to the new-value
-// store. It will always return false for v2 and v3.
-// It lists all the conditional and unconditional stores that can be promoted
-// to the new-value stores.
-
-bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
- const HexagonRegisterInfo* QRI =
- (const HexagonRegisterInfo *) TM.getRegisterInfo();
- switch (MI->getOpcode())
- {
- // store byte
- case Hexagon::STrib:
- case Hexagon::STrib_indexed:
- case Hexagon::STrib_indexed_shl_V4:
- case Hexagon::STrib_shl_V4:
- case Hexagon::STb_GP_V4:
- case Hexagon::POST_STbri:
- case Hexagon::STrib_cPt:
- case Hexagon::STrib_cdnPt_V4:
- case Hexagon::STrib_cNotPt:
- case Hexagon::STrib_cdnNotPt_V4:
- case Hexagon::STrib_indexed_cPt:
- case Hexagon::STrib_indexed_cdnPt_V4:
- case Hexagon::STrib_indexed_cNotPt:
- case Hexagon::STrib_indexed_cdnNotPt_V4:
- case Hexagon::STrib_indexed_shl_cPt_V4:
- case Hexagon::STrib_indexed_shl_cdnPt_V4:
- case Hexagon::STrib_indexed_shl_cNotPt_V4:
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
- case Hexagon::POST_STbri_cPt:
- case Hexagon::POST_STbri_cdnPt_V4:
- case Hexagon::POST_STbri_cNotPt:
- case Hexagon::POST_STbri_cdnNotPt_V4:
- case Hexagon::STb_GP_cPt_V4:
- case Hexagon::STb_GP_cNotPt_V4:
- case Hexagon::STb_GP_cdnPt_V4:
- case Hexagon::STb_GP_cdnNotPt_V4:
-
- // store halfword
- case Hexagon::STrih:
- case Hexagon::STrih_indexed:
- case Hexagon::STrih_indexed_shl_V4:
- case Hexagon::STrih_shl_V4:
- case Hexagon::STh_GP_V4:
- case Hexagon::POST_SThri:
- case Hexagon::STrih_cPt:
- case Hexagon::STrih_cdnPt_V4:
- case Hexagon::STrih_cNotPt:
- case Hexagon::STrih_cdnNotPt_V4:
- case Hexagon::STrih_indexed_cPt:
- case Hexagon::STrih_indexed_cdnPt_V4:
- case Hexagon::STrih_indexed_cNotPt:
- case Hexagon::STrih_indexed_cdnNotPt_V4:
- case Hexagon::STrih_indexed_shl_cPt_V4:
- case Hexagon::STrih_indexed_shl_cdnPt_V4:
- case Hexagon::STrih_indexed_shl_cNotPt_V4:
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
- case Hexagon::POST_SThri_cPt:
- case Hexagon::POST_SThri_cdnPt_V4:
- case Hexagon::POST_SThri_cNotPt:
- case Hexagon::POST_SThri_cdnNotPt_V4:
- case Hexagon::STh_GP_cPt_V4:
- case Hexagon::STh_GP_cNotPt_V4:
- case Hexagon::STh_GP_cdnPt_V4:
- case Hexagon::STh_GP_cdnNotPt_V4:
-
- // store word
- case Hexagon::STriw:
- case Hexagon::STriw_indexed:
- case Hexagon::STriw_indexed_shl_V4:
- case Hexagon::STriw_shl_V4:
- case Hexagon::STw_GP_V4:
- case Hexagon::POST_STwri:
- case Hexagon::STriw_cPt:
- case Hexagon::STriw_cdnPt_V4:
- case Hexagon::STriw_cNotPt:
- case Hexagon::STriw_cdnNotPt_V4:
- case Hexagon::STriw_indexed_cPt:
- case Hexagon::STriw_indexed_cdnPt_V4:
- case Hexagon::STriw_indexed_cNotPt:
- case Hexagon::STriw_indexed_cdnNotPt_V4:
- case Hexagon::STriw_indexed_shl_cPt_V4:
- case Hexagon::STriw_indexed_shl_cdnPt_V4:
- case Hexagon::STriw_indexed_shl_cNotPt_V4:
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
- case Hexagon::POST_STwri_cPt:
- case Hexagon::POST_STwri_cdnPt_V4:
- case Hexagon::POST_STwri_cNotPt:
- case Hexagon::POST_STwri_cdnNotPt_V4:
- case Hexagon::STw_GP_cPt_V4:
- case Hexagon::STw_GP_cNotPt_V4:
- case Hexagon::STw_GP_cdnPt_V4:
- case Hexagon::STw_GP_cdnNotPt_V4:
- return QRI->Subtarget.hasV4TOps();
- }
- return false;
-}
-
static bool IsLoopN(MachineInstr *MI) {
return (MI->getOpcode() == Hexagon::LOOP0_i ||
MI->getOpcode() == Hexagon::LOOP0_r);
@@ -498,769 +399,11 @@ static bool DoesModifyCalleeSavedReg(MachineInstr *MI,
return false;
}
-// Return the new value instruction for a given store.
-static int GetDotNewOp(const int opc) {
- switch (opc) {
- default: llvm_unreachable("Unknown .new type");
- // store new value byte
- case Hexagon::STrib:
- return Hexagon::STrib_nv_V4;
-
- case Hexagon::STrib_indexed:
- return Hexagon::STrib_indexed_nv_V4;
-
- case Hexagon::STrib_indexed_shl_V4:
- return Hexagon::STrib_indexed_shl_nv_V4;
-
- case Hexagon::STrib_shl_V4:
- return Hexagon::STrib_shl_nv_V4;
-
- case Hexagon::STb_GP_V4:
- return Hexagon::STb_GP_nv_V4;
-
- case Hexagon::POST_STbri:
- return Hexagon::POST_STbri_nv_V4;
-
- case Hexagon::STrib_cPt:
- return Hexagon::STrib_cPt_nv_V4;
-
- case Hexagon::STrib_cdnPt_V4:
- return Hexagon::STrib_cdnPt_nv_V4;
-
- case Hexagon::STrib_cNotPt:
- return Hexagon::STrib_cNotPt_nv_V4;
-
- case Hexagon::STrib_cdnNotPt_V4:
- return Hexagon::STrib_cdnNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_cPt:
- return Hexagon::STrib_indexed_cPt_nv_V4;
-
- case Hexagon::STrib_indexed_cdnPt_V4:
- return Hexagon::STrib_indexed_cdnPt_nv_V4;
-
- case Hexagon::STrib_indexed_cNotPt:
- return Hexagon::STrib_indexed_cNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_cdnNotPt_V4:
- return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cPt_V4:
- return Hexagon::STrib_indexed_shl_cPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cdnPt_V4:
- return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cNotPt_V4:
- return Hexagon::STrib_indexed_shl_cNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
- return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_STbri_cPt:
- return Hexagon::POST_STbri_cPt_nv_V4;
-
- case Hexagon::POST_STbri_cdnPt_V4:
- return Hexagon::POST_STbri_cdnPt_nv_V4;
-
- case Hexagon::POST_STbri_cNotPt:
- return Hexagon::POST_STbri_cNotPt_nv_V4;
-
- case Hexagon::POST_STbri_cdnNotPt_V4:
- return Hexagon::POST_STbri_cdnNotPt_nv_V4;
-
- case Hexagon::STb_GP_cPt_V4:
- return Hexagon::STb_GP_cPt_nv_V4;
-
- case Hexagon::STb_GP_cNotPt_V4:
- return Hexagon::STb_GP_cNotPt_nv_V4;
-
- case Hexagon::STb_GP_cdnPt_V4:
- return Hexagon::STb_GP_cdnPt_nv_V4;
-
- case Hexagon::STb_GP_cdnNotPt_V4:
- return Hexagon::STb_GP_cdnNotPt_nv_V4;
-
- // store new value halfword
- case Hexagon::STrih:
- return Hexagon::STrih_nv_V4;
-
- case Hexagon::STrih_indexed:
- return Hexagon::STrih_indexed_nv_V4;
-
- case Hexagon::STrih_indexed_shl_V4:
- return Hexagon::STrih_indexed_shl_nv_V4;
-
- case Hexagon::STrih_shl_V4:
- return Hexagon::STrih_shl_nv_V4;
-
- case Hexagon::STh_GP_V4:
- return Hexagon::STh_GP_nv_V4;
-
- case Hexagon::POST_SThri:
- return Hexagon::POST_SThri_nv_V4;
-
- case Hexagon::STrih_cPt:
- return Hexagon::STrih_cPt_nv_V4;
-
- case Hexagon::STrih_cdnPt_V4:
- return Hexagon::STrih_cdnPt_nv_V4;
-
- case Hexagon::STrih_cNotPt:
- return Hexagon::STrih_cNotPt_nv_V4;
-
- case Hexagon::STrih_cdnNotPt_V4:
- return Hexagon::STrih_cdnNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_cPt:
- return Hexagon::STrih_indexed_cPt_nv_V4;
-
- case Hexagon::STrih_indexed_cdnPt_V4:
- return Hexagon::STrih_indexed_cdnPt_nv_V4;
-
- case Hexagon::STrih_indexed_cNotPt:
- return Hexagon::STrih_indexed_cNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_cdnNotPt_V4:
- return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cPt_V4:
- return Hexagon::STrih_indexed_shl_cPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cdnPt_V4:
- return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cNotPt_V4:
- return Hexagon::STrih_indexed_shl_cNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
- return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_SThri_cPt:
- return Hexagon::POST_SThri_cPt_nv_V4;
-
- case Hexagon::POST_SThri_cdnPt_V4:
- return Hexagon::POST_SThri_cdnPt_nv_V4;
-
- case Hexagon::POST_SThri_cNotPt:
- return Hexagon::POST_SThri_cNotPt_nv_V4;
-
- case Hexagon::POST_SThri_cdnNotPt_V4:
- return Hexagon::POST_SThri_cdnNotPt_nv_V4;
-
- case Hexagon::STh_GP_cPt_V4:
- return Hexagon::STh_GP_cPt_nv_V4;
-
- case Hexagon::STh_GP_cNotPt_V4:
- return Hexagon::STh_GP_cNotPt_nv_V4;
-
- case Hexagon::STh_GP_cdnPt_V4:
- return Hexagon::STh_GP_cdnPt_nv_V4;
-
- case Hexagon::STh_GP_cdnNotPt_V4:
- return Hexagon::STh_GP_cdnNotPt_nv_V4;
-
- // store new value word
- case Hexagon::STriw:
- return Hexagon::STriw_nv_V4;
-
- case Hexagon::STriw_indexed:
- return Hexagon::STriw_indexed_nv_V4;
-
- case Hexagon::STriw_indexed_shl_V4:
- return Hexagon::STriw_indexed_shl_nv_V4;
-
- case Hexagon::STriw_shl_V4:
- return Hexagon::STriw_shl_nv_V4;
-
- case Hexagon::STw_GP_V4:
- return Hexagon::STw_GP_nv_V4;
-
- case Hexagon::POST_STwri:
- return Hexagon::POST_STwri_nv_V4;
-
- case Hexagon::STriw_cPt:
- return Hexagon::STriw_cPt_nv_V4;
-
- case Hexagon::STriw_cdnPt_V4:
- return Hexagon::STriw_cdnPt_nv_V4;
-
- case Hexagon::STriw_cNotPt:
- return Hexagon::STriw_cNotPt_nv_V4;
-
- case Hexagon::STriw_cdnNotPt_V4:
- return Hexagon::STriw_cdnNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_cPt:
- return Hexagon::STriw_indexed_cPt_nv_V4;
-
- case Hexagon::STriw_indexed_cdnPt_V4:
- return Hexagon::STriw_indexed_cdnPt_nv_V4;
-
- case Hexagon::STriw_indexed_cNotPt:
- return Hexagon::STriw_indexed_cNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_cdnNotPt_V4:
- return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cPt_V4:
- return Hexagon::STriw_indexed_shl_cPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cdnPt_V4:
- return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cNotPt_V4:
- return Hexagon::STriw_indexed_shl_cNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
- return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_STwri_cPt:
- return Hexagon::POST_STwri_cPt_nv_V4;
-
- case Hexagon::POST_STwri_cdnPt_V4:
- return Hexagon::POST_STwri_cdnPt_nv_V4;
-
- case Hexagon::POST_STwri_cNotPt:
- return Hexagon::POST_STwri_cNotPt_nv_V4;
-
- case Hexagon::POST_STwri_cdnNotPt_V4:
- return Hexagon::POST_STwri_cdnNotPt_nv_V4;
-
- case Hexagon::STw_GP_cPt_V4:
- return Hexagon::STw_GP_cPt_nv_V4;
-
- case Hexagon::STw_GP_cNotPt_V4:
- return Hexagon::STw_GP_cNotPt_nv_V4;
-
- case Hexagon::STw_GP_cdnPt_V4:
- return Hexagon::STw_GP_cdnPt_nv_V4;
-
- case Hexagon::STw_GP_cdnNotPt_V4:
- return Hexagon::STw_GP_cdnNotPt_nv_V4;
-
- }
-}
-
-// Return .new predicate version for an instruction
-static int GetDotNewPredOp(MachineInstr *MI,
- const MachineBranchProbabilityInfo *MBPI,
- const HexagonInstrInfo *QII) {
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Unknown .new type");
- // Conditional stores
- // Store byte conditionally
- case Hexagon::STrib_cPt :
- return Hexagon::STrib_cdnPt_V4;
-
- case Hexagon::STrib_cNotPt :
- return Hexagon::STrib_cdnNotPt_V4;
-
- case Hexagon::STrib_indexed_cPt :
- return Hexagon::STrib_indexed_cdnPt_V4;
-
- case Hexagon::STrib_indexed_cNotPt :
- return Hexagon::STrib_indexed_cdnNotPt_V4;
-
- case Hexagon::STrib_imm_cPt_V4 :
- return Hexagon::STrib_imm_cdnPt_V4;
-
- case Hexagon::STrib_imm_cNotPt_V4 :
- return Hexagon::STrib_imm_cdnNotPt_V4;
-
- case Hexagon::POST_STbri_cPt :
- return Hexagon::POST_STbri_cdnPt_V4;
-
- case Hexagon::POST_STbri_cNotPt :
- return Hexagon::POST_STbri_cdnNotPt_V4;
-
- case Hexagon::STrib_indexed_shl_cPt_V4 :
- return Hexagon::STrib_indexed_shl_cdnPt_V4;
-
- case Hexagon::STrib_indexed_shl_cNotPt_V4 :
- return Hexagon::STrib_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::STb_GP_cPt_V4 :
- return Hexagon::STb_GP_cdnPt_V4;
-
- case Hexagon::STb_GP_cNotPt_V4 :
- return Hexagon::STb_GP_cdnNotPt_V4;
-
- // Store doubleword conditionally
- case Hexagon::STrid_cPt :
- return Hexagon::STrid_cdnPt_V4;
-
- case Hexagon::STrid_cNotPt :
- return Hexagon::STrid_cdnNotPt_V4;
-
- case Hexagon::STrid_indexed_cPt :
- return Hexagon::STrid_indexed_cdnPt_V4;
-
- case Hexagon::STrid_indexed_cNotPt :
- return Hexagon::STrid_indexed_cdnNotPt_V4;
-
- case Hexagon::STrid_indexed_shl_cPt_V4 :
- return Hexagon::STrid_indexed_shl_cdnPt_V4;
-
- case Hexagon::STrid_indexed_shl_cNotPt_V4 :
- return Hexagon::STrid_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::POST_STdri_cPt :
- return Hexagon::POST_STdri_cdnPt_V4;
-
- case Hexagon::POST_STdri_cNotPt :
- return Hexagon::POST_STdri_cdnNotPt_V4;
-
- case Hexagon::STd_GP_cPt_V4 :
- return Hexagon::STd_GP_cdnPt_V4;
-
- case Hexagon::STd_GP_cNotPt_V4 :
- return Hexagon::STd_GP_cdnNotPt_V4;
-
- // Store halfword conditionally
- case Hexagon::STrih_cPt :
- return Hexagon::STrih_cdnPt_V4;
-
- case Hexagon::STrih_cNotPt :
- return Hexagon::STrih_cdnNotPt_V4;
-
- case Hexagon::STrih_indexed_cPt :
- return Hexagon::STrih_indexed_cdnPt_V4;
-
- case Hexagon::STrih_indexed_cNotPt :
- return Hexagon::STrih_indexed_cdnNotPt_V4;
-
- case Hexagon::STrih_imm_cPt_V4 :
- return Hexagon::STrih_imm_cdnPt_V4;
-
- case Hexagon::STrih_imm_cNotPt_V4 :
- return Hexagon::STrih_imm_cdnNotPt_V4;
-
- case Hexagon::STrih_indexed_shl_cPt_V4 :
- return Hexagon::STrih_indexed_shl_cdnPt_V4;
-
- case Hexagon::STrih_indexed_shl_cNotPt_V4 :
- return Hexagon::STrih_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::POST_SThri_cPt :
- return Hexagon::POST_SThri_cdnPt_V4;
-
- case Hexagon::POST_SThri_cNotPt :
- return Hexagon::POST_SThri_cdnNotPt_V4;
-
- case Hexagon::STh_GP_cPt_V4 :
- return Hexagon::STh_GP_cdnPt_V4;
-
- case Hexagon::STh_GP_cNotPt_V4 :
- return Hexagon::STh_GP_cdnNotPt_V4;
-
- // Store word conditionally
- case Hexagon::STriw_cPt :
- return Hexagon::STriw_cdnPt_V4;
-
- case Hexagon::STriw_cNotPt :
- return Hexagon::STriw_cdnNotPt_V4;
-
- case Hexagon::STriw_indexed_cPt :
- return Hexagon::STriw_indexed_cdnPt_V4;
-
- case Hexagon::STriw_indexed_cNotPt :
- return Hexagon::STriw_indexed_cdnNotPt_V4;
-
- case Hexagon::STriw_imm_cPt_V4 :
- return Hexagon::STriw_imm_cdnPt_V4;
-
- case Hexagon::STriw_imm_cNotPt_V4 :
- return Hexagon::STriw_imm_cdnNotPt_V4;
-
- case Hexagon::STriw_indexed_shl_cPt_V4 :
- return Hexagon::STriw_indexed_shl_cdnPt_V4;
-
- case Hexagon::STriw_indexed_shl_cNotPt_V4 :
- return Hexagon::STriw_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::POST_STwri_cPt :
- return Hexagon::POST_STwri_cdnPt_V4;
-
- case Hexagon::POST_STwri_cNotPt :
- return Hexagon::POST_STwri_cdnNotPt_V4;
-
- case Hexagon::STw_GP_cPt_V4 :
- return Hexagon::STw_GP_cdnPt_V4;
-
- case Hexagon::STw_GP_cNotPt_V4 :
- return Hexagon::STw_GP_cdnNotPt_V4;
-
- // Condtional Jumps
- case Hexagon::JMP_t:
- case Hexagon::JMP_f:
- return QII->getDotNewPredJumpOp(MI, MBPI);
-
- case Hexagon::JMPR_t:
- return Hexagon::JMPR_tnew_tV3;
-
- case Hexagon::JMPR_f:
- return Hexagon::JMPR_fnew_tV3;
-
- // Conditional Transfers
- case Hexagon::TFR_cPt:
- return Hexagon::TFR_cdnPt;
-
- case Hexagon::TFR_cNotPt:
- return Hexagon::TFR_cdnNotPt;
-
- case Hexagon::TFRI_cPt:
- return Hexagon::TFRI_cdnPt;
-
- case Hexagon::TFRI_cNotPt:
- return Hexagon::TFRI_cdnNotPt;
-
- // Load double word
- case Hexagon::LDrid_cPt :
- return Hexagon::LDrid_cdnPt;
-
- case Hexagon::LDrid_cNotPt :
- return Hexagon::LDrid_cdnNotPt;
-
- case Hexagon::LDrid_indexed_cPt :
- return Hexagon::LDrid_indexed_cdnPt;
-
- case Hexagon::LDrid_indexed_cNotPt :
- return Hexagon::LDrid_indexed_cdnNotPt;
-
- case Hexagon::POST_LDrid_cPt :
- return Hexagon::POST_LDrid_cdnPt_V4;
-
- case Hexagon::POST_LDrid_cNotPt :
- return Hexagon::POST_LDrid_cdnNotPt_V4;
-
- // Load word
- case Hexagon::LDriw_cPt :
- return Hexagon::LDriw_cdnPt;
-
- case Hexagon::LDriw_cNotPt :
- return Hexagon::LDriw_cdnNotPt;
-
- case Hexagon::LDriw_indexed_cPt :
- return Hexagon::LDriw_indexed_cdnPt;
-
- case Hexagon::LDriw_indexed_cNotPt :
- return Hexagon::LDriw_indexed_cdnNotPt;
-
- case Hexagon::POST_LDriw_cPt :
- return Hexagon::POST_LDriw_cdnPt_V4;
-
- case Hexagon::POST_LDriw_cNotPt :
- return Hexagon::POST_LDriw_cdnNotPt_V4;
-
- // Load halfword
- case Hexagon::LDrih_cPt :
- return Hexagon::LDrih_cdnPt;
-
- case Hexagon::LDrih_cNotPt :
- return Hexagon::LDrih_cdnNotPt;
-
- case Hexagon::LDrih_indexed_cPt :
- return Hexagon::LDrih_indexed_cdnPt;
-
- case Hexagon::LDrih_indexed_cNotPt :
- return Hexagon::LDrih_indexed_cdnNotPt;
-
- case Hexagon::POST_LDrih_cPt :
- return Hexagon::POST_LDrih_cdnPt_V4;
-
- case Hexagon::POST_LDrih_cNotPt :
- return Hexagon::POST_LDrih_cdnNotPt_V4;
-
- // Load byte
- case Hexagon::LDrib_cPt :
- return Hexagon::LDrib_cdnPt;
-
- case Hexagon::LDrib_cNotPt :
- return Hexagon::LDrib_cdnNotPt;
-
- case Hexagon::LDrib_indexed_cPt :
- return Hexagon::LDrib_indexed_cdnPt;
-
- case Hexagon::LDrib_indexed_cNotPt :
- return Hexagon::LDrib_indexed_cdnNotPt;
-
- case Hexagon::POST_LDrib_cPt :
- return Hexagon::POST_LDrib_cdnPt_V4;
-
- case Hexagon::POST_LDrib_cNotPt :
- return Hexagon::POST_LDrib_cdnNotPt_V4;
-
- // Load unsigned halfword
- case Hexagon::LDriuh_cPt :
- return Hexagon::LDriuh_cdnPt;
-
- case Hexagon::LDriuh_cNotPt :
- return Hexagon::LDriuh_cdnNotPt;
-
- case Hexagon::LDriuh_indexed_cPt :
- return Hexagon::LDriuh_indexed_cdnPt;
-
- case Hexagon::LDriuh_indexed_cNotPt :
- return Hexagon::LDriuh_indexed_cdnNotPt;
-
- case Hexagon::POST_LDriuh_cPt :
- return Hexagon::POST_LDriuh_cdnPt_V4;
-
- case Hexagon::POST_LDriuh_cNotPt :
- return Hexagon::POST_LDriuh_cdnNotPt_V4;
-
- // Load unsigned byte
- case Hexagon::LDriub_cPt :
- return Hexagon::LDriub_cdnPt;
-
- case Hexagon::LDriub_cNotPt :
- return Hexagon::LDriub_cdnNotPt;
-
- case Hexagon::LDriub_indexed_cPt :
- return Hexagon::LDriub_indexed_cdnPt;
-
- case Hexagon::LDriub_indexed_cNotPt :
- return Hexagon::LDriub_indexed_cdnNotPt;
-
- case Hexagon::POST_LDriub_cPt :
- return Hexagon::POST_LDriub_cdnPt_V4;
-
- case Hexagon::POST_LDriub_cNotPt :
- return Hexagon::POST_LDriub_cdnNotPt_V4;
-
- // V4 indexed+scaled load
-
- case Hexagon::LDrid_indexed_shl_cPt_V4 :
- return Hexagon::LDrid_indexed_shl_cdnPt_V4;
-
- case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
- return Hexagon::LDrid_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::LDrib_indexed_shl_cPt_V4 :
- return Hexagon::LDrib_indexed_shl_cdnPt_V4;
-
- case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
- return Hexagon::LDrib_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::LDriub_indexed_shl_cPt_V4 :
- return Hexagon::LDriub_indexed_shl_cdnPt_V4;
-
- case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
- return Hexagon::LDriub_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::LDrih_indexed_shl_cPt_V4 :
- return Hexagon::LDrih_indexed_shl_cdnPt_V4;
-
- case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
- return Hexagon::LDrih_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::LDriuh_indexed_shl_cPt_V4 :
- return Hexagon::LDriuh_indexed_shl_cdnPt_V4;
-
- case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
- return Hexagon::LDriuh_indexed_shl_cdnNotPt_V4;
-
- case Hexagon::LDriw_indexed_shl_cPt_V4 :
- return Hexagon::LDriw_indexed_shl_cdnPt_V4;
-
- case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
- return Hexagon::LDriw_indexed_shl_cdnNotPt_V4;
-
- // V4 global address load
-
- case Hexagon::LDd_GP_cPt_V4:
- return Hexagon::LDd_GP_cdnPt_V4;
-
- case Hexagon::LDd_GP_cNotPt_V4:
- return Hexagon::LDd_GP_cdnNotPt_V4;
-
- case Hexagon::LDb_GP_cPt_V4:
- return Hexagon::LDb_GP_cdnPt_V4;
-
- case Hexagon::LDb_GP_cNotPt_V4:
- return Hexagon::LDb_GP_cdnNotPt_V4;
-
- case Hexagon::LDub_GP_cPt_V4:
- return Hexagon::LDub_GP_cdnPt_V4;
-
- case Hexagon::LDub_GP_cNotPt_V4:
- return Hexagon::LDub_GP_cdnNotPt_V4;
-
- case Hexagon::LDh_GP_cPt_V4:
- return Hexagon::LDh_GP_cdnPt_V4;
-
- case Hexagon::LDh_GP_cNotPt_V4:
- return Hexagon::LDh_GP_cdnNotPt_V4;
-
- case Hexagon::LDuh_GP_cPt_V4:
- return Hexagon::LDuh_GP_cdnPt_V4;
-
- case Hexagon::LDuh_GP_cNotPt_V4:
- return Hexagon::LDuh_GP_cdnNotPt_V4;
-
- case Hexagon::LDw_GP_cPt_V4:
- return Hexagon::LDw_GP_cdnPt_V4;
-
- case Hexagon::LDw_GP_cNotPt_V4:
- return Hexagon::LDw_GP_cdnNotPt_V4;
-
- // Conditional store new-value byte
- case Hexagon::STrib_cPt_nv_V4 :
- return Hexagon::STrib_cdnPt_nv_V4;
- case Hexagon::STrib_cNotPt_nv_V4 :
- return Hexagon::STrib_cdnNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_cPt_nv_V4 :
- return Hexagon::STrib_indexed_cdnPt_nv_V4;
- case Hexagon::STrib_indexed_cNotPt_nv_V4 :
- return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
- return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
- case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
- return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_STbri_cPt_nv_V4 :
- return Hexagon::POST_STbri_cdnPt_nv_V4;
- case Hexagon::POST_STbri_cNotPt_nv_V4 :
- return Hexagon::POST_STbri_cdnNotPt_nv_V4;
-
- case Hexagon::STb_GP_cPt_nv_V4 :
- return Hexagon::STb_GP_cdnPt_nv_V4;
-
- case Hexagon::STb_GP_cNotPt_nv_V4 :
- return Hexagon::STb_GP_cdnNotPt_nv_V4;
-
- // Conditional store new-value halfword
- case Hexagon::STrih_cPt_nv_V4 :
- return Hexagon::STrih_cdnPt_nv_V4;
- case Hexagon::STrih_cNotPt_nv_V4 :
- return Hexagon::STrih_cdnNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_cPt_nv_V4 :
- return Hexagon::STrih_indexed_cdnPt_nv_V4;
- case Hexagon::STrih_indexed_cNotPt_nv_V4 :
- return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
- return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
- case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
- return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_SThri_cPt_nv_V4 :
- return Hexagon::POST_SThri_cdnPt_nv_V4;
- case Hexagon::POST_SThri_cNotPt_nv_V4 :
- return Hexagon::POST_SThri_cdnNotPt_nv_V4;
-
- case Hexagon::STh_GP_cPt_nv_V4 :
- return Hexagon::STh_GP_cdnPt_nv_V4;
-
- case Hexagon::STh_GP_cNotPt_nv_V4 :
- return Hexagon::STh_GP_cdnNotPt_nv_V4;
-
- // Conditional store new-value word
- case Hexagon::STriw_cPt_nv_V4 :
- return Hexagon::STriw_cdnPt_nv_V4;
- case Hexagon::STriw_cNotPt_nv_V4 :
- return Hexagon::STriw_cdnNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_cPt_nv_V4 :
- return Hexagon::STriw_indexed_cdnPt_nv_V4;
- case Hexagon::STriw_indexed_cNotPt_nv_V4 :
- return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
- return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
- case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
- return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_STwri_cPt_nv_V4 :
- return Hexagon::POST_STwri_cdnPt_nv_V4;
- case Hexagon::POST_STwri_cNotPt_nv_V4:
- return Hexagon::POST_STwri_cdnNotPt_nv_V4;
-
- case Hexagon::STw_GP_cPt_nv_V4 :
- return Hexagon::STw_GP_cdnPt_nv_V4;
-
- case Hexagon::STw_GP_cNotPt_nv_V4 :
- return Hexagon::STw_GP_cdnNotPt_nv_V4;
-
- // Conditional add
- case Hexagon::ADD_ri_cPt :
- return Hexagon::ADD_ri_cdnPt;
- case Hexagon::ADD_ri_cNotPt :
- return Hexagon::ADD_ri_cdnNotPt;
-
- case Hexagon::ADD_rr_cPt :
- return Hexagon::ADD_rr_cdnPt;
- case Hexagon::ADD_rr_cNotPt :
- return Hexagon::ADD_rr_cdnNotPt;
-
- // Conditional logical Operations
- case Hexagon::XOR_rr_cPt :
- return Hexagon::XOR_rr_cdnPt;
- case Hexagon::XOR_rr_cNotPt :
- return Hexagon::XOR_rr_cdnNotPt;
-
- case Hexagon::AND_rr_cPt :
- return Hexagon::AND_rr_cdnPt;
- case Hexagon::AND_rr_cNotPt :
- return Hexagon::AND_rr_cdnNotPt;
-
- case Hexagon::OR_rr_cPt :
- return Hexagon::OR_rr_cdnPt;
- case Hexagon::OR_rr_cNotPt :
- return Hexagon::OR_rr_cdnNotPt;
-
- // Conditional Subtract
- case Hexagon::SUB_rr_cPt :
- return Hexagon::SUB_rr_cdnPt;
- case Hexagon::SUB_rr_cNotPt :
- return Hexagon::SUB_rr_cdnNotPt;
-
- // Conditional combine
- case Hexagon::COMBINE_rr_cPt :
- return Hexagon::COMBINE_rr_cdnPt;
- case Hexagon::COMBINE_rr_cNotPt :
- return Hexagon::COMBINE_rr_cdnNotPt;
-
- case Hexagon::ASLH_cPt_V4 :
- return Hexagon::ASLH_cdnPt_V4;
- case Hexagon::ASLH_cNotPt_V4 :
- return Hexagon::ASLH_cdnNotPt_V4;
-
- case Hexagon::ASRH_cPt_V4 :
- return Hexagon::ASRH_cdnPt_V4;
- case Hexagon::ASRH_cNotPt_V4 :
- return Hexagon::ASRH_cdnNotPt_V4;
-
- case Hexagon::SXTB_cPt_V4 :
- return Hexagon::SXTB_cdnPt_V4;
- case Hexagon::SXTB_cNotPt_V4 :
- return Hexagon::SXTB_cdnNotPt_V4;
-
- case Hexagon::SXTH_cPt_V4 :
- return Hexagon::SXTH_cdnPt_V4;
- case Hexagon::SXTH_cNotPt_V4 :
- return Hexagon::SXTH_cdnNotPt_V4;
-
- case Hexagon::ZXTB_cPt_V4 :
- return Hexagon::ZXTB_cdnPt_V4;
- case Hexagon::ZXTB_cNotPt_V4 :
- return Hexagon::ZXTB_cdnNotPt_V4;
-
- case Hexagon::ZXTH_cPt_V4 :
- return Hexagon::ZXTH_cdnPt_V4;
- case Hexagon::ZXTH_cNotPt_V4 :
- return Hexagon::ZXTH_cdnNotPt_V4;
- }
-}
-
// Returns true if an instruction can be promoted to .new predicate
// or new-value store.
bool HexagonPacketizerList::isNewifiable(MachineInstr* MI) {
- if ( isCondInst(MI) || IsNewifyStore(MI))
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ if ( isCondInst(MI) || QII->mayBeNewStore(MI))
return true;
else
return false;
@@ -1294,896 +437,38 @@ bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI,
int NewOpcode;
if (RC == &Hexagon::PredRegsRegClass)
- NewOpcode = GetDotNewPredOp(MI, MBPI, QII);
+ NewOpcode = QII->GetDotNewPredOp(MI, MBPI);
else
- NewOpcode = GetDotNewOp(MI->getOpcode());
+ NewOpcode = QII->GetDotNewOp(MI);
MI->setDesc(QII->get(NewOpcode));
return true;
}
-// Returns the most basic instruction for the .new predicated instructions and
-// new-value stores.
-// For example, all of the following instructions will be converted back to the
-// same instruction:
-// 1) if (p0.new) memw(R0+#0) = R1.new --->
-// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
-// 3) if (p0.new) memw(R0+#0) = R1 --->
-//
-// To understand the translation of instruction 1 to its original form, consider
-// a packet with 3 instructions.
-// { p0 = cmp.eq(R0,R1)
-// if (p0.new) R2 = add(R3, R4)
-// R5 = add (R3, R1)
-// }
-// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
-//
-// This instruction can be part of the previous packet only if both p0 and R2
-// are promoted to .new values. This promotion happens in steps, first
-// predicate register is promoted to .new and in the next iteration R2 is
-// promoted. Therefore, in case of dependence check failure (due to R5) during
-// next iteration, it should be converted back to its most basic form.
-
-static int GetDotOldOp(const int opc) {
- switch (opc) {
- default: llvm_unreachable("Unknown .old type");
- case Hexagon::TFR_cdnPt:
- return Hexagon::TFR_cPt;
-
- case Hexagon::TFR_cdnNotPt:
- return Hexagon::TFR_cNotPt;
-
- case Hexagon::TFRI_cdnPt:
- return Hexagon::TFRI_cPt;
-
- case Hexagon::TFRI_cdnNotPt:
- return Hexagon::TFRI_cNotPt;
-
- case Hexagon::JMP_tnew_t:
- return Hexagon::JMP_t;
-
- case Hexagon::JMP_fnew_t:
- return Hexagon::JMP_f;
-
- case Hexagon::JMPR_tnew_tV3:
- return Hexagon::JMPR_t;
-
- case Hexagon::JMPR_fnew_tV3:
- return Hexagon::JMPR_f;
-
- // Load double word
-
- case Hexagon::LDrid_cdnPt :
- return Hexagon::LDrid_cPt;
-
- case Hexagon::LDrid_cdnNotPt :
- return Hexagon::LDrid_cNotPt;
-
- case Hexagon::LDrid_indexed_cdnPt :
- return Hexagon::LDrid_indexed_cPt;
-
- case Hexagon::LDrid_indexed_cdnNotPt :
- return Hexagon::LDrid_indexed_cNotPt;
-
- case Hexagon::POST_LDrid_cdnPt_V4 :
- return Hexagon::POST_LDrid_cPt;
-
- case Hexagon::POST_LDrid_cdnNotPt_V4 :
- return Hexagon::POST_LDrid_cNotPt;
-
- // Load word
-
- case Hexagon::LDriw_cdnPt :
- return Hexagon::LDriw_cPt;
-
- case Hexagon::LDriw_cdnNotPt :
- return Hexagon::LDriw_cNotPt;
-
- case Hexagon::LDriw_indexed_cdnPt :
- return Hexagon::LDriw_indexed_cPt;
-
- case Hexagon::LDriw_indexed_cdnNotPt :
- return Hexagon::LDriw_indexed_cNotPt;
-
- case Hexagon::POST_LDriw_cdnPt_V4 :
- return Hexagon::POST_LDriw_cPt;
-
- case Hexagon::POST_LDriw_cdnNotPt_V4 :
- return Hexagon::POST_LDriw_cNotPt;
-
- // Load half
-
- case Hexagon::LDrih_cdnPt :
- return Hexagon::LDrih_cPt;
-
- case Hexagon::LDrih_cdnNotPt :
- return Hexagon::LDrih_cNotPt;
-
- case Hexagon::LDrih_indexed_cdnPt :
- return Hexagon::LDrih_indexed_cPt;
-
- case Hexagon::LDrih_indexed_cdnNotPt :
- return Hexagon::LDrih_indexed_cNotPt;
-
- case Hexagon::POST_LDrih_cdnPt_V4 :
- return Hexagon::POST_LDrih_cPt;
-
- case Hexagon::POST_LDrih_cdnNotPt_V4 :
- return Hexagon::POST_LDrih_cNotPt;
-
- // Load byte
-
- case Hexagon::LDrib_cdnPt :
- return Hexagon::LDrib_cPt;
-
- case Hexagon::LDrib_cdnNotPt :
- return Hexagon::LDrib_cNotPt;
-
- case Hexagon::LDrib_indexed_cdnPt :
- return Hexagon::LDrib_indexed_cPt;
-
- case Hexagon::LDrib_indexed_cdnNotPt :
- return Hexagon::LDrib_indexed_cNotPt;
-
- case Hexagon::POST_LDrib_cdnPt_V4 :
- return Hexagon::POST_LDrib_cPt;
-
- case Hexagon::POST_LDrib_cdnNotPt_V4 :
- return Hexagon::POST_LDrib_cNotPt;
-
- // Load unsigned half
-
- case Hexagon::LDriuh_cdnPt :
- return Hexagon::LDriuh_cPt;
-
- case Hexagon::LDriuh_cdnNotPt :
- return Hexagon::LDriuh_cNotPt;
-
- case Hexagon::LDriuh_indexed_cdnPt :
- return Hexagon::LDriuh_indexed_cPt;
-
- case Hexagon::LDriuh_indexed_cdnNotPt :
- return Hexagon::LDriuh_indexed_cNotPt;
-
- case Hexagon::POST_LDriuh_cdnPt_V4 :
- return Hexagon::POST_LDriuh_cPt;
-
- case Hexagon::POST_LDriuh_cdnNotPt_V4 :
- return Hexagon::POST_LDriuh_cNotPt;
-
- // Load unsigned byte
- case Hexagon::LDriub_cdnPt :
- return Hexagon::LDriub_cPt;
-
- case Hexagon::LDriub_cdnNotPt :
- return Hexagon::LDriub_cNotPt;
-
- case Hexagon::LDriub_indexed_cdnPt :
- return Hexagon::LDriub_indexed_cPt;
-
- case Hexagon::LDriub_indexed_cdnNotPt :
- return Hexagon::LDriub_indexed_cNotPt;
-
- case Hexagon::POST_LDriub_cdnPt_V4 :
- return Hexagon::POST_LDriub_cPt;
-
- case Hexagon::POST_LDriub_cdnNotPt_V4 :
- return Hexagon::POST_LDriub_cNotPt;
-
- // V4 indexed+scaled Load
-
- case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
- return Hexagon::LDrid_indexed_shl_cPt_V4;
-
- case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDrid_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
- return Hexagon::LDrib_indexed_shl_cPt_V4;
-
- case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDrib_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
- return Hexagon::LDriub_indexed_shl_cPt_V4;
-
- case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDriub_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
- return Hexagon::LDrih_indexed_shl_cPt_V4;
-
- case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDrih_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
- return Hexagon::LDriuh_indexed_shl_cPt_V4;
-
- case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
- return Hexagon::LDriw_indexed_shl_cPt_V4;
-
- case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDriw_indexed_shl_cNotPt_V4;
-
- // V4 global address load
-
- case Hexagon::LDd_GP_cdnPt_V4:
- return Hexagon::LDd_GP_cPt_V4;
-
- case Hexagon::LDd_GP_cdnNotPt_V4:
- return Hexagon::LDd_GP_cNotPt_V4;
-
- case Hexagon::LDb_GP_cdnPt_V4:
- return Hexagon::LDb_GP_cPt_V4;
-
- case Hexagon::LDb_GP_cdnNotPt_V4:
- return Hexagon::LDb_GP_cNotPt_V4;
-
- case Hexagon::LDub_GP_cdnPt_V4:
- return Hexagon::LDub_GP_cPt_V4;
-
- case Hexagon::LDub_GP_cdnNotPt_V4:
- return Hexagon::LDub_GP_cNotPt_V4;
-
- case Hexagon::LDh_GP_cdnPt_V4:
- return Hexagon::LDh_GP_cPt_V4;
-
- case Hexagon::LDh_GP_cdnNotPt_V4:
- return Hexagon::LDh_GP_cNotPt_V4;
-
- case Hexagon::LDuh_GP_cdnPt_V4:
- return Hexagon::LDuh_GP_cPt_V4;
-
- case Hexagon::LDuh_GP_cdnNotPt_V4:
- return Hexagon::LDuh_GP_cNotPt_V4;
-
- case Hexagon::LDw_GP_cdnPt_V4:
- return Hexagon::LDw_GP_cPt_V4;
-
- case Hexagon::LDw_GP_cdnNotPt_V4:
- return Hexagon::LDw_GP_cNotPt_V4;
-
- // Conditional add
-
- case Hexagon::ADD_ri_cdnPt :
- return Hexagon::ADD_ri_cPt;
- case Hexagon::ADD_ri_cdnNotPt :
- return Hexagon::ADD_ri_cNotPt;
-
- case Hexagon::ADD_rr_cdnPt :
- return Hexagon::ADD_rr_cPt;
- case Hexagon::ADD_rr_cdnNotPt:
- return Hexagon::ADD_rr_cNotPt;
-
- // Conditional logical Operations
-
- case Hexagon::XOR_rr_cdnPt :
- return Hexagon::XOR_rr_cPt;
- case Hexagon::XOR_rr_cdnNotPt :
- return Hexagon::XOR_rr_cNotPt;
-
- case Hexagon::AND_rr_cdnPt :
- return Hexagon::AND_rr_cPt;
- case Hexagon::AND_rr_cdnNotPt :
- return Hexagon::AND_rr_cNotPt;
-
- case Hexagon::OR_rr_cdnPt :
- return Hexagon::OR_rr_cPt;
- case Hexagon::OR_rr_cdnNotPt :
- return Hexagon::OR_rr_cNotPt;
-
- // Conditional Subtract
-
- case Hexagon::SUB_rr_cdnPt :
- return Hexagon::SUB_rr_cPt;
- case Hexagon::SUB_rr_cdnNotPt :
- return Hexagon::SUB_rr_cNotPt;
-
- // Conditional combine
-
- case Hexagon::COMBINE_rr_cdnPt :
- return Hexagon::COMBINE_rr_cPt;
- case Hexagon::COMBINE_rr_cdnNotPt :
- return Hexagon::COMBINE_rr_cNotPt;
-
-// Conditional shift operations
-
- case Hexagon::ASLH_cdnPt_V4 :
- return Hexagon::ASLH_cPt_V4;
- case Hexagon::ASLH_cdnNotPt_V4 :
- return Hexagon::ASLH_cNotPt_V4;
-
- case Hexagon::ASRH_cdnPt_V4 :
- return Hexagon::ASRH_cPt_V4;
- case Hexagon::ASRH_cdnNotPt_V4 :
- return Hexagon::ASRH_cNotPt_V4;
-
- case Hexagon::SXTB_cdnPt_V4 :
- return Hexagon::SXTB_cPt_V4;
- case Hexagon::SXTB_cdnNotPt_V4 :
- return Hexagon::SXTB_cNotPt_V4;
-
- case Hexagon::SXTH_cdnPt_V4 :
- return Hexagon::SXTH_cPt_V4;
- case Hexagon::SXTH_cdnNotPt_V4 :
- return Hexagon::SXTH_cNotPt_V4;
-
- case Hexagon::ZXTB_cdnPt_V4 :
- return Hexagon::ZXTB_cPt_V4;
- case Hexagon::ZXTB_cdnNotPt_V4 :
- return Hexagon::ZXTB_cNotPt_V4;
-
- case Hexagon::ZXTH_cdnPt_V4 :
- return Hexagon::ZXTH_cPt_V4;
- case Hexagon::ZXTH_cdnNotPt_V4 :
- return Hexagon::ZXTH_cNotPt_V4;
-
- // Store byte
-
- case Hexagon::STrib_imm_cdnPt_V4 :
- return Hexagon::STrib_imm_cPt_V4;
-
- case Hexagon::STrib_imm_cdnNotPt_V4 :
- return Hexagon::STrib_imm_cNotPt_V4;
-
- case Hexagon::STrib_cdnPt_nv_V4 :
- case Hexagon::STrib_cPt_nv_V4 :
- case Hexagon::STrib_cdnPt_V4 :
- return Hexagon::STrib_cPt;
-
- case Hexagon::STrib_cdnNotPt_nv_V4 :
- case Hexagon::STrib_cNotPt_nv_V4 :
- case Hexagon::STrib_cdnNotPt_V4 :
- return Hexagon::STrib_cNotPt;
-
- case Hexagon::STrib_indexed_cdnPt_V4 :
- case Hexagon::STrib_indexed_cPt_nv_V4 :
- case Hexagon::STrib_indexed_cdnPt_nv_V4 :
- return Hexagon::STrib_indexed_cPt;
-
- case Hexagon::STrib_indexed_cdnNotPt_V4 :
- case Hexagon::STrib_indexed_cNotPt_nv_V4 :
- case Hexagon::STrib_indexed_cdnNotPt_nv_V4 :
- return Hexagon::STrib_indexed_cNotPt;
-
- case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
- case Hexagon::STrib_indexed_shl_cdnPt_V4 :
- return Hexagon::STrib_indexed_shl_cPt_V4;
-
- case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STrib_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_STbri_cdnPt_nv_V4 :
- case Hexagon::POST_STbri_cPt_nv_V4 :
- case Hexagon::POST_STbri_cdnPt_V4 :
- return Hexagon::POST_STbri_cPt;
-
- case Hexagon::POST_STbri_cdnNotPt_nv_V4 :
- case Hexagon::POST_STbri_cNotPt_nv_V4:
- case Hexagon::POST_STbri_cdnNotPt_V4 :
- return Hexagon::POST_STbri_cNotPt;
-
- case Hexagon::STb_GP_cdnPt_nv_V4:
- case Hexagon::STb_GP_cdnPt_V4:
- case Hexagon::STb_GP_cPt_nv_V4:
- return Hexagon::STb_GP_cPt_V4;
-
- case Hexagon::STb_GP_cdnNotPt_nv_V4:
- case Hexagon::STb_GP_cdnNotPt_V4:
- case Hexagon::STb_GP_cNotPt_nv_V4:
- return Hexagon::STb_GP_cNotPt_V4;
-
- // Store new-value byte - unconditional
- case Hexagon::STrib_nv_V4:
- return Hexagon::STrib;
-
- case Hexagon::STrib_indexed_nv_V4:
- return Hexagon::STrib_indexed;
-
- case Hexagon::STrib_indexed_shl_nv_V4:
- return Hexagon::STrib_indexed_shl_V4;
-
- case Hexagon::STrib_shl_nv_V4:
- return Hexagon::STrib_shl_V4;
-
- case Hexagon::STb_GP_nv_V4:
- return Hexagon::STb_GP_V4;
-
- case Hexagon::POST_STbri_nv_V4:
- return Hexagon::POST_STbri;
-
- // Store halfword
- case Hexagon::STrih_imm_cdnPt_V4 :
- return Hexagon::STrih_imm_cPt_V4;
-
- case Hexagon::STrih_imm_cdnNotPt_V4 :
- return Hexagon::STrih_imm_cNotPt_V4;
-
- case Hexagon::STrih_cdnPt_nv_V4 :
- case Hexagon::STrih_cPt_nv_V4 :
- case Hexagon::STrih_cdnPt_V4 :
- return Hexagon::STrih_cPt;
-
- case Hexagon::STrih_cdnNotPt_nv_V4 :
- case Hexagon::STrih_cNotPt_nv_V4 :
- case Hexagon::STrih_cdnNotPt_V4 :
- return Hexagon::STrih_cNotPt;
-
- case Hexagon::STrih_indexed_cdnPt_nv_V4:
- case Hexagon::STrih_indexed_cPt_nv_V4 :
- case Hexagon::STrih_indexed_cdnPt_V4 :
- return Hexagon::STrih_indexed_cPt;
-
- case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
- case Hexagon::STrih_indexed_cNotPt_nv_V4 :
- case Hexagon::STrih_indexed_cdnNotPt_V4 :
- return Hexagon::STrih_indexed_cNotPt;
-
- case Hexagon::STrih_indexed_shl_cdnPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cdnPt_V4 :
- return Hexagon::STrih_indexed_shl_cPt_V4;
-
- case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STrih_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_SThri_cdnPt_nv_V4 :
- case Hexagon::POST_SThri_cPt_nv_V4 :
- case Hexagon::POST_SThri_cdnPt_V4 :
- return Hexagon::POST_SThri_cPt;
-
- case Hexagon::POST_SThri_cdnNotPt_nv_V4 :
- case Hexagon::POST_SThri_cNotPt_nv_V4 :
- case Hexagon::POST_SThri_cdnNotPt_V4 :
- return Hexagon::POST_SThri_cNotPt;
-
- case Hexagon::STh_GP_cdnPt_nv_V4:
- case Hexagon::STh_GP_cdnPt_V4:
- case Hexagon::STh_GP_cPt_nv_V4:
- return Hexagon::STh_GP_cPt_V4;
-
- case Hexagon::STh_GP_cdnNotPt_nv_V4:
- case Hexagon::STh_GP_cdnNotPt_V4:
- case Hexagon::STh_GP_cNotPt_nv_V4:
- return Hexagon::STh_GP_cNotPt_V4;
-
- // Store new-value halfword - unconditional
-
- case Hexagon::STrih_nv_V4:
- return Hexagon::STrih;
-
- case Hexagon::STrih_indexed_nv_V4:
- return Hexagon::STrih_indexed;
-
- case Hexagon::STrih_indexed_shl_nv_V4:
- return Hexagon::STrih_indexed_shl_V4;
-
- case Hexagon::STrih_shl_nv_V4:
- return Hexagon::STrih_shl_V4;
-
- case Hexagon::STh_GP_nv_V4:
- return Hexagon::STh_GP_V4;
-
- case Hexagon::POST_SThri_nv_V4:
- return Hexagon::POST_SThri;
-
- // Store word
-
- case Hexagon::STriw_imm_cdnPt_V4 :
- return Hexagon::STriw_imm_cPt_V4;
-
- case Hexagon::STriw_imm_cdnNotPt_V4 :
- return Hexagon::STriw_imm_cNotPt_V4;
-
- case Hexagon::STriw_cdnPt_nv_V4 :
- case Hexagon::STriw_cPt_nv_V4 :
- case Hexagon::STriw_cdnPt_V4 :
- return Hexagon::STriw_cPt;
-
- case Hexagon::STriw_cdnNotPt_nv_V4 :
- case Hexagon::STriw_cNotPt_nv_V4 :
- case Hexagon::STriw_cdnNotPt_V4 :
- return Hexagon::STriw_cNotPt;
-
- case Hexagon::STriw_indexed_cdnPt_nv_V4 :
- case Hexagon::STriw_indexed_cPt_nv_V4 :
- case Hexagon::STriw_indexed_cdnPt_V4 :
- return Hexagon::STriw_indexed_cPt;
-
- case Hexagon::STriw_indexed_cdnNotPt_nv_V4 :
- case Hexagon::STriw_indexed_cNotPt_nv_V4 :
- case Hexagon::STriw_indexed_cdnNotPt_V4 :
- return Hexagon::STriw_indexed_cNotPt;
-
- case Hexagon::STriw_indexed_shl_cdnPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cdnPt_V4 :
- return Hexagon::STriw_indexed_shl_cPt_V4;
-
- case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STriw_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_STwri_cdnPt_nv_V4 :
- case Hexagon::POST_STwri_cPt_nv_V4 :
- case Hexagon::POST_STwri_cdnPt_V4 :
- return Hexagon::POST_STwri_cPt;
-
- case Hexagon::POST_STwri_cdnNotPt_nv_V4 :
- case Hexagon::POST_STwri_cNotPt_nv_V4 :
- case Hexagon::POST_STwri_cdnNotPt_V4 :
- return Hexagon::POST_STwri_cNotPt;
-
- case Hexagon::STw_GP_cdnPt_nv_V4:
- case Hexagon::STw_GP_cdnPt_V4:
- case Hexagon::STw_GP_cPt_nv_V4:
- return Hexagon::STw_GP_cPt_V4;
-
- case Hexagon::STw_GP_cdnNotPt_nv_V4:
- case Hexagon::STw_GP_cdnNotPt_V4:
- case Hexagon::STw_GP_cNotPt_nv_V4:
- return Hexagon::STw_GP_cNotPt_V4;
-
- // Store new-value word - unconditional
-
- case Hexagon::STriw_nv_V4:
- return Hexagon::STriw;
-
- case Hexagon::STriw_indexed_nv_V4:
- return Hexagon::STriw_indexed;
-
- case Hexagon::STriw_indexed_shl_nv_V4:
- return Hexagon::STriw_indexed_shl_V4;
-
- case Hexagon::STriw_shl_nv_V4:
- return Hexagon::STriw_shl_V4;
-
- case Hexagon::STw_GP_nv_V4:
- return Hexagon::STw_GP_V4;
-
- case Hexagon::POST_STwri_nv_V4:
- return Hexagon::POST_STwri;
-
- // Store doubleword
-
- case Hexagon::STrid_cdnPt_V4 :
- return Hexagon::STrid_cPt;
-
- case Hexagon::STrid_cdnNotPt_V4 :
- return Hexagon::STrid_cNotPt;
-
- case Hexagon::STrid_indexed_cdnPt_V4 :
- return Hexagon::STrid_indexed_cPt;
-
- case Hexagon::STrid_indexed_cdnNotPt_V4 :
- return Hexagon::STrid_indexed_cNotPt;
-
- case Hexagon::STrid_indexed_shl_cdnPt_V4 :
- return Hexagon::STrid_indexed_shl_cPt_V4;
-
- case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STrid_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_STdri_cdnPt_V4 :
- return Hexagon::POST_STdri_cPt;
-
- case Hexagon::POST_STdri_cdnNotPt_V4 :
- return Hexagon::POST_STdri_cNotPt;
-
- case Hexagon::STd_GP_cdnPt_V4 :
- return Hexagon::STd_GP_cPt_V4;
-
- case Hexagon::STd_GP_cdnNotPt_V4 :
- return Hexagon::STd_GP_cNotPt_V4;
-
- }
-}
-
bool HexagonPacketizerList::DemoteToDotOld(MachineInstr* MI) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
- int NewOpcode = GetDotOldOp(MI->getOpcode());
+ int NewOpcode = QII->GetDotOldOp(MI->getOpcode());
MI->setDesc(QII->get(NewOpcode));
return true;
}
-// Returns true if an instruction is predicated on p0 and false if it's
-// predicated on !p0.
+enum PredicateKind {
+ PK_False,
+ PK_True,
+ PK_Unknown
+};
-static bool GetPredicateSense(MachineInstr* MI,
- const HexagonInstrInfo *QII) {
+/// Returns true if an instruction is predicated on p0 and false if it's
+/// predicated on !p0.
+static PredicateKind getPredicateSense(MachineInstr* MI,
+ const HexagonInstrInfo *QII) {
+ if (!QII->isPredicated(MI))
+ return PK_Unknown;
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Unknown predicate sense of the instruction");
- case Hexagon::TFR_cPt:
- case Hexagon::TFR_cdnPt:
- case Hexagon::TFRI_cPt:
- case Hexagon::TFRI_cdnPt:
- case Hexagon::STrib_cPt :
- case Hexagon::STrib_cdnPt_V4 :
- case Hexagon::STrib_indexed_cPt :
- case Hexagon::STrib_indexed_cdnPt_V4 :
- case Hexagon::STrib_indexed_shl_cPt_V4 :
- case Hexagon::STrib_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_STbri_cPt :
- case Hexagon::POST_STbri_cdnPt_V4 :
- case Hexagon::STrih_cPt :
- case Hexagon::STrih_cdnPt_V4 :
- case Hexagon::STrih_indexed_cPt :
- case Hexagon::STrih_indexed_cdnPt_V4 :
- case Hexagon::STrih_indexed_shl_cPt_V4 :
- case Hexagon::STrih_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_SThri_cPt :
- case Hexagon::POST_SThri_cdnPt_V4 :
- case Hexagon::STriw_cPt :
- case Hexagon::STriw_cdnPt_V4 :
- case Hexagon::STriw_indexed_cPt :
- case Hexagon::STriw_indexed_cdnPt_V4 :
- case Hexagon::STriw_indexed_shl_cPt_V4 :
- case Hexagon::STriw_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_STwri_cPt :
- case Hexagon::POST_STwri_cdnPt_V4 :
- case Hexagon::STrib_imm_cPt_V4 :
- case Hexagon::STrib_imm_cdnPt_V4 :
- case Hexagon::STrid_cPt :
- case Hexagon::STrid_cdnPt_V4 :
- case Hexagon::STrid_indexed_cPt :
- case Hexagon::STrid_indexed_cdnPt_V4 :
- case Hexagon::STrid_indexed_shl_cPt_V4 :
- case Hexagon::STrid_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_STdri_cPt :
- case Hexagon::POST_STdri_cdnPt_V4 :
- case Hexagon::STrih_imm_cPt_V4 :
- case Hexagon::STrih_imm_cdnPt_V4 :
- case Hexagon::STriw_imm_cPt_V4 :
- case Hexagon::STriw_imm_cdnPt_V4 :
- case Hexagon::JMP_tnew_t :
- case Hexagon::LDrid_cPt :
- case Hexagon::LDrid_cdnPt :
- case Hexagon::LDrid_indexed_cPt :
- case Hexagon::LDrid_indexed_cdnPt :
- case Hexagon::POST_LDrid_cPt :
- case Hexagon::POST_LDrid_cdnPt_V4 :
- case Hexagon::LDriw_cPt :
- case Hexagon::LDriw_cdnPt :
- case Hexagon::LDriw_indexed_cPt :
- case Hexagon::LDriw_indexed_cdnPt :
- case Hexagon::POST_LDriw_cPt :
- case Hexagon::POST_LDriw_cdnPt_V4 :
- case Hexagon::LDrih_cPt :
- case Hexagon::LDrih_cdnPt :
- case Hexagon::LDrih_indexed_cPt :
- case Hexagon::LDrih_indexed_cdnPt :
- case Hexagon::POST_LDrih_cPt :
- case Hexagon::POST_LDrih_cdnPt_V4 :
- case Hexagon::LDrib_cPt :
- case Hexagon::LDrib_cdnPt :
- case Hexagon::LDrib_indexed_cPt :
- case Hexagon::LDrib_indexed_cdnPt :
- case Hexagon::POST_LDrib_cPt :
- case Hexagon::POST_LDrib_cdnPt_V4 :
- case Hexagon::LDriuh_cPt :
- case Hexagon::LDriuh_cdnPt :
- case Hexagon::LDriuh_indexed_cPt :
- case Hexagon::LDriuh_indexed_cdnPt :
- case Hexagon::POST_LDriuh_cPt :
- case Hexagon::POST_LDriuh_cdnPt_V4 :
- case Hexagon::LDriub_cPt :
- case Hexagon::LDriub_cdnPt :
- case Hexagon::LDriub_indexed_cPt :
- case Hexagon::LDriub_indexed_cdnPt :
- case Hexagon::POST_LDriub_cPt :
- case Hexagon::POST_LDriub_cdnPt_V4 :
- case Hexagon::LDrid_indexed_shl_cPt_V4 :
- case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
- case Hexagon::LDrib_indexed_shl_cPt_V4 :
- case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
- case Hexagon::LDriub_indexed_shl_cPt_V4 :
- case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
- case Hexagon::LDrih_indexed_shl_cPt_V4 :
- case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
- case Hexagon::LDriw_indexed_shl_cPt_V4 :
- case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
- case Hexagon::ADD_ri_cPt :
- case Hexagon::ADD_ri_cdnPt :
- case Hexagon::ADD_rr_cPt :
- case Hexagon::ADD_rr_cdnPt :
- case Hexagon::XOR_rr_cPt :
- case Hexagon::XOR_rr_cdnPt :
- case Hexagon::AND_rr_cPt :
- case Hexagon::AND_rr_cdnPt :
- case Hexagon::OR_rr_cPt :
- case Hexagon::OR_rr_cdnPt :
- case Hexagon::SUB_rr_cPt :
- case Hexagon::SUB_rr_cdnPt :
- case Hexagon::COMBINE_rr_cPt :
- case Hexagon::COMBINE_rr_cdnPt :
- case Hexagon::ASLH_cPt_V4 :
- case Hexagon::ASLH_cdnPt_V4 :
- case Hexagon::ASRH_cPt_V4 :
- case Hexagon::ASRH_cdnPt_V4 :
- case Hexagon::SXTB_cPt_V4 :
- case Hexagon::SXTB_cdnPt_V4 :
- case Hexagon::SXTH_cPt_V4 :
- case Hexagon::SXTH_cdnPt_V4 :
- case Hexagon::ZXTB_cPt_V4 :
- case Hexagon::ZXTB_cdnPt_V4 :
- case Hexagon::ZXTH_cPt_V4 :
- case Hexagon::ZXTH_cdnPt_V4 :
- case Hexagon::LDd_GP_cPt_V4 :
- case Hexagon::LDb_GP_cPt_V4 :
- case Hexagon::LDub_GP_cPt_V4 :
- case Hexagon::LDh_GP_cPt_V4 :
- case Hexagon::LDuh_GP_cPt_V4 :
- case Hexagon::LDw_GP_cPt_V4 :
- case Hexagon::STd_GP_cPt_V4 :
- case Hexagon::STb_GP_cPt_V4 :
- case Hexagon::STh_GP_cPt_V4 :
- case Hexagon::STw_GP_cPt_V4 :
- case Hexagon::LDd_GP_cdnPt_V4 :
- case Hexagon::LDb_GP_cdnPt_V4 :
- case Hexagon::LDub_GP_cdnPt_V4 :
- case Hexagon::LDh_GP_cdnPt_V4 :
- case Hexagon::LDuh_GP_cdnPt_V4 :
- case Hexagon::LDw_GP_cdnPt_V4 :
- case Hexagon::STd_GP_cdnPt_V4 :
- case Hexagon::STb_GP_cdnPt_V4 :
- case Hexagon::STh_GP_cdnPt_V4 :
- case Hexagon::STw_GP_cdnPt_V4 :
- return true;
+ if (QII->isPredicatedTrue(MI))
+ return PK_True;
- case Hexagon::TFR_cNotPt:
- case Hexagon::TFR_cdnNotPt:
- case Hexagon::TFRI_cNotPt:
- case Hexagon::TFRI_cdnNotPt:
- case Hexagon::STrib_cNotPt :
- case Hexagon::STrib_cdnNotPt_V4 :
- case Hexagon::STrib_indexed_cNotPt :
- case Hexagon::STrib_indexed_cdnNotPt_V4 :
- case Hexagon::STrib_indexed_shl_cNotPt_V4 :
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_STbri_cNotPt :
- case Hexagon::POST_STbri_cdnNotPt_V4 :
- case Hexagon::STrih_cNotPt :
- case Hexagon::STrih_cdnNotPt_V4 :
- case Hexagon::STrih_indexed_cNotPt :
- case Hexagon::STrih_indexed_cdnNotPt_V4 :
- case Hexagon::STrih_indexed_shl_cNotPt_V4 :
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_SThri_cNotPt :
- case Hexagon::POST_SThri_cdnNotPt_V4 :
- case Hexagon::STriw_cNotPt :
- case Hexagon::STriw_cdnNotPt_V4 :
- case Hexagon::STriw_indexed_cNotPt :
- case Hexagon::STriw_indexed_cdnNotPt_V4 :
- case Hexagon::STriw_indexed_shl_cNotPt_V4 :
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_STwri_cNotPt :
- case Hexagon::POST_STwri_cdnNotPt_V4 :
- case Hexagon::STrib_imm_cNotPt_V4 :
- case Hexagon::STrib_imm_cdnNotPt_V4 :
- case Hexagon::STrid_cNotPt :
- case Hexagon::STrid_cdnNotPt_V4 :
- case Hexagon::STrid_indexed_cdnNotPt_V4 :
- case Hexagon::STrid_indexed_cNotPt :
- case Hexagon::STrid_indexed_shl_cNotPt_V4 :
- case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_STdri_cNotPt :
- case Hexagon::POST_STdri_cdnNotPt_V4 :
- case Hexagon::STrih_imm_cNotPt_V4 :
- case Hexagon::STrih_imm_cdnNotPt_V4 :
- case Hexagon::STriw_imm_cNotPt_V4 :
- case Hexagon::STriw_imm_cdnNotPt_V4 :
- case Hexagon::JMP_fnew_t :
- case Hexagon::LDrid_cNotPt :
- case Hexagon::LDrid_cdnNotPt :
- case Hexagon::LDrid_indexed_cNotPt :
- case Hexagon::LDrid_indexed_cdnNotPt :
- case Hexagon::POST_LDrid_cNotPt :
- case Hexagon::POST_LDrid_cdnNotPt_V4 :
- case Hexagon::LDriw_cNotPt :
- case Hexagon::LDriw_cdnNotPt :
- case Hexagon::LDriw_indexed_cNotPt :
- case Hexagon::LDriw_indexed_cdnNotPt :
- case Hexagon::POST_LDriw_cNotPt :
- case Hexagon::POST_LDriw_cdnNotPt_V4 :
- case Hexagon::LDrih_cNotPt :
- case Hexagon::LDrih_cdnNotPt :
- case Hexagon::LDrih_indexed_cNotPt :
- case Hexagon::LDrih_indexed_cdnNotPt :
- case Hexagon::POST_LDrih_cNotPt :
- case Hexagon::POST_LDrih_cdnNotPt_V4 :
- case Hexagon::LDrib_cNotPt :
- case Hexagon::LDrib_cdnNotPt :
- case Hexagon::LDrib_indexed_cNotPt :
- case Hexagon::LDrib_indexed_cdnNotPt :
- case Hexagon::POST_LDrib_cNotPt :
- case Hexagon::POST_LDrib_cdnNotPt_V4 :
- case Hexagon::LDriuh_cNotPt :
- case Hexagon::LDriuh_cdnNotPt :
- case Hexagon::LDriuh_indexed_cNotPt :
- case Hexagon::LDriuh_indexed_cdnNotPt :
- case Hexagon::POST_LDriuh_cNotPt :
- case Hexagon::POST_LDriuh_cdnNotPt_V4 :
- case Hexagon::LDriub_cNotPt :
- case Hexagon::LDriub_cdnNotPt :
- case Hexagon::LDriub_indexed_cNotPt :
- case Hexagon::LDriub_indexed_cdnNotPt :
- case Hexagon::POST_LDriub_cNotPt :
- case Hexagon::POST_LDriub_cdnNotPt_V4 :
- case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
- case Hexagon::ADD_ri_cNotPt :
- case Hexagon::ADD_ri_cdnNotPt :
- case Hexagon::ADD_rr_cNotPt :
- case Hexagon::ADD_rr_cdnNotPt :
- case Hexagon::XOR_rr_cNotPt :
- case Hexagon::XOR_rr_cdnNotPt :
- case Hexagon::AND_rr_cNotPt :
- case Hexagon::AND_rr_cdnNotPt :
- case Hexagon::OR_rr_cNotPt :
- case Hexagon::OR_rr_cdnNotPt :
- case Hexagon::SUB_rr_cNotPt :
- case Hexagon::SUB_rr_cdnNotPt :
- case Hexagon::COMBINE_rr_cNotPt :
- case Hexagon::COMBINE_rr_cdnNotPt :
- case Hexagon::ASLH_cNotPt_V4 :
- case Hexagon::ASLH_cdnNotPt_V4 :
- case Hexagon::ASRH_cNotPt_V4 :
- case Hexagon::ASRH_cdnNotPt_V4 :
- case Hexagon::SXTB_cNotPt_V4 :
- case Hexagon::SXTB_cdnNotPt_V4 :
- case Hexagon::SXTH_cNotPt_V4 :
- case Hexagon::SXTH_cdnNotPt_V4 :
- case Hexagon::ZXTB_cNotPt_V4 :
- case Hexagon::ZXTB_cdnNotPt_V4 :
- case Hexagon::ZXTH_cNotPt_V4 :
- case Hexagon::ZXTH_cdnNotPt_V4 :
-
- case Hexagon::LDd_GP_cNotPt_V4 :
- case Hexagon::LDb_GP_cNotPt_V4 :
- case Hexagon::LDub_GP_cNotPt_V4 :
- case Hexagon::LDh_GP_cNotPt_V4 :
- case Hexagon::LDuh_GP_cNotPt_V4 :
- case Hexagon::LDw_GP_cNotPt_V4 :
- case Hexagon::STd_GP_cNotPt_V4 :
- case Hexagon::STb_GP_cNotPt_V4 :
- case Hexagon::STh_GP_cNotPt_V4 :
- case Hexagon::STw_GP_cNotPt_V4 :
- case Hexagon::LDd_GP_cdnNotPt_V4 :
- case Hexagon::LDb_GP_cdnNotPt_V4 :
- case Hexagon::LDub_GP_cdnNotPt_V4 :
- case Hexagon::LDh_GP_cdnNotPt_V4 :
- case Hexagon::LDuh_GP_cdnNotPt_V4 :
- case Hexagon::LDw_GP_cdnNotPt_V4 :
- case Hexagon::STd_GP_cdnNotPt_V4 :
- case Hexagon::STb_GP_cdnNotPt_V4 :
- case Hexagon::STh_GP_cdnNotPt_V4 :
- case Hexagon::STw_GP_cdnNotPt_V4 :
- return false;
- }
- // return *some value* to avoid compiler warning
- return false;
+ return PK_False;
}
static MachineOperand& GetPostIncrementOperand(MachineInstr *MI,
@@ -2252,10 +537,10 @@ static MachineOperand& GetStoreValueOperand(MachineInstr *MI) {
// Arch Spec: 3.4.4.2
bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
MachineInstr *PacketMI, unsigned DepReg,
- std::map <MachineInstr*, SUnit*> MIToSUnit)
-{
- // Make sure we are looking at the store
- if (!IsNewifyStore(MI))
+ std::map <MachineInstr*, SUnit*> MIToSUnit) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ // Make sure we are looking at the store, that can be promoted.
+ if (!QII->mayBeNewStore(MI))
return false;
// Make sure there is dependency and can be new value'ed
@@ -2263,12 +548,11 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
GetStoreValueOperand(MI).getReg() != DepReg)
return false;
- const HexagonRegisterInfo* QRI =
+ const HexagonRegisterInfo* QRI =
(const HexagonRegisterInfo *) TM.getRegisterInfo();
const MCInstrDesc& MCID = PacketMI->getDesc();
// first operand is always the result
- const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
const TargetRegisterClass* PacketRC = QII->getRegClass(MCID, 0, QRI, MF);
// if there is already an store in the packet, no can do new value store
@@ -2311,7 +595,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
}
// If the source that feeds the store is predicated, new value store must
- // also be also predicated.
+ // also be predicated.
if (QII->isPredicated(PacketMI)) {
if (!QII->isPredicated(MI))
return false;
@@ -2357,7 +641,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
if (( predRegNumDst != predRegNumSrc) ||
QII->isDotNewInst(PacketMI) != QII->isDotNewInst(MI) ||
- GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI, QII)) {
+ getPredicateSense(MI, QII) != getPredicateSense(PacketMI, QII)) {
return false;
}
}
@@ -2438,10 +722,11 @@ bool HexagonPacketizerList::CanPromoteToNewValue( MachineInstr *MI,
MachineBasicBlock::iterator &MII)
{
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
const HexagonRegisterInfo* QRI =
(const HexagonRegisterInfo *) TM.getRegisterInfo();
if (!QRI->Subtarget.hasV4TOps() ||
- !IsNewifyStore(MI))
+ !QII->mayBeNewStore(MI))
return false;
MachineInstr *PacketMI = PacketSU->getInstr();
@@ -2468,7 +753,7 @@ bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI,
{
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
// Already a dot new instruction.
- if (QII->isDotNewInst(MI) && !IsNewifyStore(MI))
+ if (QII->isDotNewInst(MI) && !QII->mayBeNewStore(MI))
return false;
if (!isNewifiable(MI))
@@ -2478,12 +763,12 @@ bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI,
if (RC == &Hexagon::PredRegsRegClass && isCondInst(MI))
return true;
else if (RC != &Hexagon::PredRegsRegClass &&
- !IsNewifyStore(MI)) // MI is not a new-value store
+ !QII->mayBeNewStore(MI)) // MI is not a new-value store
return false;
else {
// Create a dot new machine instruction to see if resources can be
// allocated. If not, bail out now.
- int NewOpcode = GetDotNewOp(MI->getOpcode());
+ int NewOpcode = QII->GetDotNewOp(MI);
const MCInstrDesc &desc = QII->get(NewOpcode);
DebugLoc dl;
MachineInstr *NewMI =
@@ -2552,16 +837,39 @@ bool HexagonPacketizerList::RestrictingDepExistInPacket (MachineInstr* MI,
}
+/// Gets the predicate register of a predicated instruction.
+static unsigned getPredicatedRegister(MachineInstr *MI,
+ const HexagonInstrInfo *QII) {
+ /// We use the following rule: The first predicate register that is a use is
+ /// the predicate register of a predicated instruction.
+
+ assert(QII->isPredicated(MI) && "Must be predicated instruction");
+
+ for (MachineInstr::mop_iterator OI = MI->operands_begin(),
+ OE = MI->operands_end(); OI != OE; ++OI) {
+ MachineOperand &Op = *OI;
+ if (Op.isReg() && Op.getReg() && Op.isUse() &&
+ Hexagon::PredRegsRegClass.contains(Op.getReg()))
+ return Op.getReg();
+ }
+
+ llvm_unreachable("Unknown instruction operand layout");
+
+ return 0;
+}
+
// Given two predicated instructions, this function detects whether
// the predicates are complements
bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1,
MachineInstr* MI2, std::map <MachineInstr*, SUnit*> MIToSUnit) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
- // Currently can only reason about conditional transfers
- if (!QII->isConditionalTransfer(MI1) || !QII->isConditionalTransfer(MI2)) {
+
+ // If we don't know the predicate sense of the instructions bail out early, we
+ // need it later.
+ if (getPredicateSense(MI1, QII) == PK_Unknown ||
+ getPredicateSense(MI2, QII) == PK_Unknown)
return false;
- }
// Scheduling unit for candidate
SUnit* SU = MIToSUnit[MI1];
@@ -2600,9 +908,9 @@ bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1,
// there already exist anti dep on the same pred in
// the packet.
if (PacketSU->Succs[i].getSUnit() == SU &&
+ PacketSU->Succs[i].getKind() == SDep::Data &&
Hexagon::PredRegsRegClass.contains(
PacketSU->Succs[i].getReg()) &&
- PacketSU->Succs[i].getKind() == SDep::Data &&
// Here I know that *VIN is predicate setting instruction
// with true data dep to candidate on the register
// we care about - c) in the above example.
@@ -2623,8 +931,12 @@ bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1,
// that the predicate sense is different
// We also need to differentiate .old vs. .new:
// !p0 is not complimentary to p0.new
- return ((MI1->getOperand(1).getReg() == MI2->getOperand(1).getReg()) &&
- (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2, QII)) &&
+ unsigned PReg1 = getPredicatedRegister(MI1, QII);
+ unsigned PReg2 = getPredicatedRegister(MI2, QII);
+ return ((PReg1 == PReg2) &&
+ Hexagon::PredRegsRegClass.contains(PReg1) &&
+ Hexagon::PredRegsRegClass.contains(PReg2) &&
+ (getPredicateSense(MI1, QII) != getPredicateSense(MI2, QII)) &&
(QII->isDotNewInst(MI1) == QII->isDotNewInst(MI2)));
}
@@ -2722,24 +1034,21 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
}
// A LoopN instruction cannot appear in the same packet as a jump or call.
- if (IsLoopN(I) && ( IsDirectJump(J)
- || MCIDJ.isCall()
- || QII->isDeallocRet(J))) {
+ if (IsLoopN(I) &&
+ (IsDirectJump(J) || MCIDJ.isCall() || QII->isDeallocRet(J))) {
Dependence = true;
return false;
}
- if (IsLoopN(J) && ( IsDirectJump(I)
- || MCIDI.isCall()
- || QII->isDeallocRet(I))) {
+ if (IsLoopN(J) &&
+ (IsDirectJump(I) || MCIDI.isCall() || QII->isDeallocRet(I))) {
Dependence = true;
return false;
}
// dealloc_return cannot appear in the same packet as a conditional or
// unconditional jump.
- if (QII->isDeallocRet(I) && ( MCIDJ.isBranch()
- || MCIDJ.isCall()
- || MCIDJ.isBarrier())) {
+ if (QII->isDeallocRet(I) &&
+ (MCIDJ.isBranch() || MCIDJ.isCall() || MCIDJ.isBarrier())) {
Dependence = true;
return false;
}
@@ -2764,7 +1073,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
}
//if dealloc_return
- if (MCIDJ.mayStore() && QII->isDeallocRet(I)){
+ if (MCIDJ.mayStore() && QII->isDeallocRet(I)) {
Dependence = true;
return false;
}
diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
index 36da6dfcc3d0..7c41507ede74 100644
--- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
+++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
@@ -21,7 +21,6 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/raw_ostream.h"
-#include <cstdio>
using namespace llvm;
@@ -179,7 +178,7 @@ void HexagonInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
// Branches can take an immediate operand. This is used by the branch
// selection pass to print $+8, an eight byte displacement from the PC.
- assert("Unknown branch operand.");
+ llvm_unreachable("Unknown branch operand.");
}
void HexagonInstPrinter::printCallOperand(const MCInst *MI, unsigned OpNo,
@@ -196,15 +195,9 @@ void HexagonInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
void HexagonInstPrinter::printSymbol(const MCInst *MI, unsigned OpNo,
raw_ostream &O, bool hi) const {
- const MCOperand& MO = MI->getOperand(OpNo);
+ assert(MI->getOperand(OpNo).isImm() && "Unknown symbol operand");
- O << '#' << (hi? "HI": "LO") << '(';
- if (MO.isImm()) {
- O << '#';
- printOperand(MI, OpNo, O);
- } else {
- assert("Unknown symbol operand");
- printOperand(MI, OpNo, O);
- }
+ O << '#' << (hi ? "HI" : "LO") << "(#";
+ printOperand(MI, OpNo, O);
O << ')';
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index d4a93b5c87a4..8519cf314e6f 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -65,14 +65,15 @@ namespace HexagonII {
AbsoluteSet = 2, // Absolute set addressing mode
BaseImmOffset = 3, // Indirect with offset
BaseLongOffset = 4, // Indirect with long offset
- BaseRegOffset = 5 // Indirect with register offset
+ BaseRegOffset = 5, // Indirect with register offset
+ PostInc = 6 // Post increment addressing mode
};
enum MemAccessSize {
NoMemAccess = 0, // Not a memory acces instruction.
ByteAccess = 1, // Byte access instruction (memb).
HalfWordAccess = 2, // Half word access instruction (memh).
- WordAccess = 3, // Word access instrution (memw).
+ WordAccess = 3, // Word access instruction (memw).
DoubleWordAccess = 4 // Double word access instruction (memd)
};
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
index 3deb8d1deb42..3f9415b94df9 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
@@ -15,7 +15,10 @@
using namespace llvm;
-HexagonMCAsmInfo::HexagonMCAsmInfo(const Target &T, StringRef TT) {
+// Pin the vtable to this file.
+void HexagonMCAsmInfo::anchor() {}
+
+HexagonMCAsmInfo::HexagonMCAsmInfo(StringRef TT) {
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
Data64bitsDirective = 0; // .xword is only supported by V9.
@@ -29,7 +32,6 @@ HexagonMCAsmInfo::HexagonMCAsmInfo(const Target &T, StringRef TT) {
InlineAsmEnd = "# InlineAsm End";
ZeroDirective = "\t.space\t";
AscizDirective = "\t.string\t";
- WeakRefDirective = "\t.weak\t";
SupportsDebugInformation = true;
UsesELFSectionDirectiveForBSS = true;
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
index d336cd5be917..bd8cb7637af7 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
@@ -15,14 +15,13 @@
#define HexagonMCASMINFO_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
- class Target;
-
- class HexagonMCAsmInfo : public MCAsmInfo {
+ class HexagonMCAsmInfo : public MCAsmInfoELF {
+ virtual void anchor();
public:
- explicit HexagonMCAsmInfo(const Target &T, StringRef TT);
+ explicit HexagonMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index 6b1d2d161958..2f93a5299c91 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -54,13 +54,14 @@ static MCSubtargetInfo *createHexagonMCSubtargetInfo(StringRef TT,
return X;
}
-static MCAsmInfo *createHexagonMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new HexagonMCAsmInfo(T, TT);
+static MCAsmInfo *createHexagonMCAsmInfo(const MCRegisterInfo &MRI,
+ StringRef TT) {
+ MCAsmInfo *MAI = new HexagonMCAsmInfo(TT);
// VirtualFP = (R30 + #0).
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(Hexagon::R30, 0);
- MAI->addInitialFrameState(0, Dst, Src);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(
+ 0, Hexagon::R30, 0);
+ MAI->addInitialFrameState(Inst);
return MAI;
}
diff --git a/lib/Target/LLVMBuild.txt b/lib/Target/LLVMBuild.txt
index 1022ae908897..98d26bcac8af 100644
--- a/lib/Target/LLVMBuild.txt
+++ b/lib/Target/LLVMBuild.txt
@@ -16,7 +16,7 @@
;===------------------------------------------------------------------------===;
[common]
-subdirectories = AArch64 ARM CppBackend Hexagon MBlaze MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore
+subdirectories = AArch64 ARM CppBackend Hexagon MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore
; This is a special group whose required libraries are extended (by llvm-build)
; with the best execution engine (the native JIT, if available, or the
diff --git a/lib/Target/MBlaze/AsmParser/CMakeLists.txt b/lib/Target/MBlaze/AsmParser/CMakeLists.txt
deleted file mode 100644
index 4a7d8e8d8887..000000000000
--- a/lib/Target/MBlaze/AsmParser/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/..
- ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMMBlazeAsmParser
- MBlazeAsmParser.cpp
- )
-
-add_dependencies(LLVMMBlazeAsmParser MBlazeCommonTableGen)
diff --git a/lib/Target/MBlaze/AsmParser/LLVMBuild.txt b/lib/Target/MBlaze/AsmParser/LLVMBuild.txt
deleted file mode 100644
index b10189a9dd97..000000000000
--- a/lib/Target/MBlaze/AsmParser/LLVMBuild.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-;===- ./lib/Target/MBlaze/AsmParser/LLVMBuild.txt --------------*- Conf -*--===;
-;
-; The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-; http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = MBlazeAsmParser
-parent = MBlaze
-required_libraries = MBlazeInfo MC MCParser Support
-add_to_library_groups = MBlaze
diff --git a/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp b/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
deleted file mode 100644
index dda6e247ac4f..000000000000
--- a/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
+++ /dev/null
@@ -1,572 +0,0 @@
-//===-- MBlazeAsmParser.cpp - Parse MBlaze asm to MCInst instructions -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/MBlazeBaseInfo.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCTargetAsmParser.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
-struct MBlazeOperand;
-
-class MBlazeAsmParser : public MCTargetAsmParser {
- MCAsmParser &Parser;
-
- MCAsmParser &getParser() const { return Parser; }
- MCAsmLexer &getLexer() const { return Parser.getLexer(); }
-
- void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
- bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
-
- MBlazeOperand *ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
- MBlazeOperand *ParseRegister();
- MBlazeOperand *ParseRegister(SMLoc &StartLoc, SMLoc &EndLoc);
- MBlazeOperand *ParseImmediate();
- MBlazeOperand *ParseFsl();
- MBlazeOperand* ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
-
- bool ParseDirectiveWord(unsigned Size, SMLoc L);
-
- bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
-
- /// @name Auto-generated Match Functions
- /// {
-
-#define GET_ASSEMBLER_HEADER
-#include "MBlazeGenAsmMatcher.inc"
-
- /// }
-
-public:
- MBlazeAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
- : MCTargetAsmParser(), Parser(_Parser) {}
-
- virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- virtual bool ParseDirective(AsmToken DirectiveID);
-};
-
-/// MBlazeOperand - Instances of this class represent a parsed MBlaze machine
-/// instruction.
-struct MBlazeOperand : public MCParsedAsmOperand {
- enum KindTy {
- Token,
- Immediate,
- Register,
- Memory,
- Fsl
- } Kind;
-
- SMLoc StartLoc, EndLoc;
-
- struct TokOp {
- const char *Data;
- unsigned Length;
- };
-
- struct RegOp {
- unsigned RegNum;
- };
-
- struct ImmOp {
- const MCExpr *Val;
- };
-
- struct MemOp {
- unsigned Base;
- unsigned OffReg;
- const MCExpr *Off;
- };
-
- struct FslImmOp {
- const MCExpr *Val;
- };
-
- union {
- struct TokOp Tok;
- struct RegOp Reg;
- struct ImmOp Imm;
- struct MemOp Mem;
- struct FslImmOp FslImm;
- };
-
- MBlazeOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
-public:
- MBlazeOperand(const MBlazeOperand &o) : MCParsedAsmOperand() {
- Kind = o.Kind;
- StartLoc = o.StartLoc;
- EndLoc = o.EndLoc;
- switch (Kind) {
- case Register:
- Reg = o.Reg;
- break;
- case Immediate:
- Imm = o.Imm;
- break;
- case Token:
- Tok = o.Tok;
- break;
- case Memory:
- Mem = o.Mem;
- break;
- case Fsl:
- FslImm = o.FslImm;
- break;
- }
- }
-
- /// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const { return StartLoc; }
-
- /// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const { return EndLoc; }
-
- unsigned getReg() const {
- assert(Kind == Register && "Invalid access!");
- return Reg.RegNum;
- }
-
- const MCExpr *getImm() const {
- assert(Kind == Immediate && "Invalid access!");
- return Imm.Val;
- }
-
- const MCExpr *getFslImm() const {
- assert(Kind == Fsl && "Invalid access!");
- return FslImm.Val;
- }
-
- unsigned getMemBase() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.Base;
- }
-
- const MCExpr* getMemOff() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.Off;
- }
-
- unsigned getMemOffReg() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.OffReg;
- }
-
- bool isToken() const { return Kind == Token; }
- bool isImm() const { return Kind == Immediate; }
- bool isMem() const { return Kind == Memory; }
- bool isFsl() const { return Kind == Fsl; }
- bool isReg() const { return Kind == Register; }
-
- void addExpr(MCInst &Inst, const MCExpr *Expr) const {
- // Add as immediates when possible. Null MCExpr = 0.
- if (Expr == 0)
- Inst.addOperand(MCOperand::CreateImm(0));
- else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- else
- Inst.addOperand(MCOperand::CreateExpr(Expr));
- }
-
- void addRegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getReg()));
- }
-
- void addImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addFslOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getFslImm());
- }
-
- void addMemOperands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && "Invalid number of operands!");
-
- Inst.addOperand(MCOperand::CreateReg(getMemBase()));
-
- unsigned RegOff = getMemOffReg();
- if (RegOff)
- Inst.addOperand(MCOperand::CreateReg(RegOff));
- else
- addExpr(Inst, getMemOff());
- }
-
- StringRef getToken() const {
- assert(Kind == Token && "Invalid access!");
- return StringRef(Tok.Data, Tok.Length);
- }
-
- virtual void print(raw_ostream &OS) const;
-
- static MBlazeOperand *CreateToken(StringRef Str, SMLoc S) {
- MBlazeOperand *Op = new MBlazeOperand(Token);
- Op->Tok.Data = Str.data();
- Op->Tok.Length = Str.size();
- Op->StartLoc = S;
- Op->EndLoc = S;
- return Op;
- }
-
- static MBlazeOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
- MBlazeOperand *Op = new MBlazeOperand(Register);
- Op->Reg.RegNum = RegNum;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static MBlazeOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- MBlazeOperand *Op = new MBlazeOperand(Immediate);
- Op->Imm.Val = Val;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static MBlazeOperand *CreateFslImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- MBlazeOperand *Op = new MBlazeOperand(Fsl);
- Op->Imm.Val = Val;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static MBlazeOperand *CreateMem(unsigned Base, const MCExpr *Off, SMLoc S,
- SMLoc E) {
- MBlazeOperand *Op = new MBlazeOperand(Memory);
- Op->Mem.Base = Base;
- Op->Mem.Off = Off;
- Op->Mem.OffReg = 0;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static MBlazeOperand *CreateMem(unsigned Base, unsigned Off, SMLoc S,
- SMLoc E) {
- MBlazeOperand *Op = new MBlazeOperand(Memory);
- Op->Mem.Base = Base;
- Op->Mem.OffReg = Off;
- Op->Mem.Off = 0;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-};
-
-} // end anonymous namespace.
-
-void MBlazeOperand::print(raw_ostream &OS) const {
- switch (Kind) {
- case Immediate:
- getImm()->print(OS);
- break;
- case Register:
- OS << "<register R";
- OS << getMBlazeRegisterNumbering(getReg()) << ">";
- break;
- case Token:
- OS << "'" << getToken() << "'";
- break;
- case Memory: {
- OS << "<memory R";
- OS << getMBlazeRegisterNumbering(getMemBase());
- OS << ", ";
-
- unsigned RegOff = getMemOffReg();
- if (RegOff)
- OS << "R" << getMBlazeRegisterNumbering(RegOff);
- else
- OS << getMemOff();
- OS << ">";
- }
- break;
- case Fsl:
- getFslImm()->print(OS);
- break;
- }
-}
-
-/// @name Auto-generated Match Functions
-/// {
-
-static unsigned MatchRegisterName(StringRef Name);
-
-/// }
-//
-bool MBlazeAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
- MCInst Inst;
- switch (MatchInstructionImpl(Operands, Inst, ErrorInfo,
- MatchingInlineAsm)) {
- default: break;
- case Match_Success:
- Out.EmitInstruction(Inst);
- return false;
- case Match_MissingFeature:
- return Error(IDLoc, "instruction use requires an option to be enabled");
- case Match_MnemonicFail:
- return Error(IDLoc, "unrecognized instruction mnemonic");
- case Match_InvalidOperand: {
- SMLoc ErrorLoc = IDLoc;
- if (ErrorInfo != ~0U) {
- if (ErrorInfo >= Operands.size())
- return Error(IDLoc, "too few operands for instruction");
-
- ErrorLoc = ((MBlazeOperand*)Operands[ErrorInfo])->getStartLoc();
- if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
- }
-
- return Error(ErrorLoc, "invalid operand for instruction");
- }
- }
-
- llvm_unreachable("Implement any new match types added!");
-}
-
-MBlazeOperand *MBlazeAsmParser::
-ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- if (Operands.size() != 4)
- return 0;
-
- MBlazeOperand &Base = *(MBlazeOperand*)Operands[2];
- MBlazeOperand &Offset = *(MBlazeOperand*)Operands[3];
-
- SMLoc S = Base.getStartLoc();
- SMLoc O = Offset.getStartLoc();
- SMLoc E = Offset.getEndLoc();
-
- if (!Base.isReg()) {
- Error(S, "base address must be a register");
- return 0;
- }
-
- if (!Offset.isReg() && !Offset.isImm()) {
- Error(O, "offset must be a register or immediate");
- return 0;
- }
-
- MBlazeOperand *Op;
- if (Offset.isReg())
- Op = MBlazeOperand::CreateMem(Base.getReg(), Offset.getReg(), S, E);
- else
- Op = MBlazeOperand::CreateMem(Base.getReg(), Offset.getImm(), S, E);
-
- delete Operands.pop_back_val();
- delete Operands.pop_back_val();
- Operands.push_back(Op);
-
- return Op;
-}
-
-bool MBlazeAsmParser::ParseRegister(unsigned &RegNo,
- SMLoc &StartLoc, SMLoc &EndLoc) {
- MBlazeOperand *Reg = ParseRegister(StartLoc, EndLoc);
- if (!Reg)
- return true;
- RegNo = Reg->getReg();
- return false;
-}
-
-MBlazeOperand *MBlazeAsmParser::ParseRegister() {
- SMLoc S, E;
- return ParseRegister(S, E);
-}
-
-MBlazeOperand *MBlazeAsmParser::ParseRegister(SMLoc &StartLoc, SMLoc &EndLoc) {
- StartLoc = Parser.getTok().getLoc();
- EndLoc = Parser.getTok().getEndLoc();
-
- if (getLexer().getKind() != AsmToken::Identifier)
- return 0;
-
- unsigned RegNo = MatchRegisterName(getLexer().getTok().getIdentifier());
- if (RegNo == 0)
- return 0;
-
- getLexer().Lex();
- return MBlazeOperand::CreateReg(RegNo, StartLoc, EndLoc);
-}
-
-static unsigned MatchFslRegister(StringRef String) {
- if (!String.startswith("rfsl"))
- return -1;
-
- unsigned regNum;
- if (String.substr(4).getAsInteger(10,regNum))
- return -1;
-
- return regNum;
-}
-
-MBlazeOperand *MBlazeAsmParser::ParseFsl() {
- SMLoc S = Parser.getTok().getLoc();
- SMLoc E = Parser.getTok().getEndLoc();
-
- switch (getLexer().getKind()) {
- default: return 0;
- case AsmToken::Identifier:
- unsigned reg = MatchFslRegister(getLexer().getTok().getIdentifier());
- if (reg >= 16)
- return 0;
-
- getLexer().Lex();
- const MCExpr *EVal = MCConstantExpr::Create(reg,getContext());
- return MBlazeOperand::CreateFslImm(EVal,S,E);
- }
-}
-
-MBlazeOperand *MBlazeAsmParser::ParseImmediate() {
- SMLoc S = Parser.getTok().getLoc();
- SMLoc E = Parser.getTok().getEndLoc();
-
- const MCExpr *EVal;
- switch (getLexer().getKind()) {
- default: return 0;
- case AsmToken::LParen:
- case AsmToken::Plus:
- case AsmToken::Minus:
- case AsmToken::Integer:
- case AsmToken::Identifier:
- if (getParser().parseExpression(EVal))
- return 0;
-
- return MBlazeOperand::CreateImm(EVal, S, E);
- }
-}
-
-MBlazeOperand *MBlazeAsmParser::
-ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- MBlazeOperand *Op;
-
- // Attempt to parse the next token as a register name
- Op = ParseRegister();
-
- // Attempt to parse the next token as an FSL immediate
- if (!Op)
- Op = ParseFsl();
-
- // Attempt to parse the next token as an immediate
- if (!Op)
- Op = ParseImmediate();
-
- // If the token could not be parsed then fail
- if (!Op) {
- Error(Parser.getTok().getLoc(), "unknown operand");
- return 0;
- }
-
- // Push the parsed operand into the list of operands
- Operands.push_back(Op);
- return Op;
-}
-
-/// Parse an mblaze instruction mnemonic followed by its operands.
-bool MBlazeAsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // The first operands is the token for the instruction name
- size_t dotLoc = Name.find('.');
- Operands.push_back(MBlazeOperand::CreateToken(Name.substr(0,dotLoc),NameLoc));
- if (dotLoc < Name.size())
- Operands.push_back(MBlazeOperand::CreateToken(Name.substr(dotLoc),NameLoc));
-
- // If there are no more operands then finish
- if (getLexer().is(AsmToken::EndOfStatement))
- return false;
-
- // Parse the first operand
- if (!ParseOperand(Operands))
- return true;
-
- while (getLexer().isNot(AsmToken::EndOfStatement) &&
- getLexer().is(AsmToken::Comma)) {
- // Consume the comma token
- getLexer().Lex();
-
- // Parse the next operand
- if (!ParseOperand(Operands))
- return true;
- }
-
- // If the instruction requires a memory operand then we need to
- // replace the last two operands (base+offset) with a single
- // memory operand.
- if (Name.startswith("lw") || Name.startswith("sw") ||
- Name.startswith("lh") || Name.startswith("sh") ||
- Name.startswith("lb") || Name.startswith("sb"))
- return (ParseMemory(Operands) == NULL);
-
- return false;
-}
-
-/// ParseDirective parses the MBlaze specific directives
-bool MBlazeAsmParser::ParseDirective(AsmToken DirectiveID) {
- StringRef IDVal = DirectiveID.getIdentifier();
- if (IDVal == ".word")
- return ParseDirectiveWord(2, DirectiveID.getLoc());
- return true;
-}
-
-/// ParseDirectiveWord
-/// ::= .word [ expression (, expression)* ]
-bool MBlazeAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- for (;;) {
- const MCExpr *Value;
- if (getParser().parseExpression(Value))
- return true;
-
- getParser().getStreamer().EmitValue(Value, Size);
-
- if (getLexer().is(AsmToken::EndOfStatement))
- break;
-
- // FIXME: Improve diagnostic.
- if (getLexer().isNot(AsmToken::Comma))
- return Error(L, "unexpected token in directive");
- Parser.Lex();
- }
- }
-
- Parser.Lex();
- return false;
-}
-
-/// Force static initialization.
-extern "C" void LLVMInitializeMBlazeAsmParser() {
- RegisterMCAsmParser<MBlazeAsmParser> X(TheMBlazeTarget);
-}
-
-#define GET_REGISTER_MATCHER
-#define GET_MATCHER_IMPLEMENTATION
-#include "MBlazeGenAsmMatcher.inc"
diff --git a/lib/Target/MBlaze/AsmParser/Makefile b/lib/Target/MBlaze/AsmParser/Makefile
deleted file mode 100644
index 1e68766a084f..000000000000
--- a/lib/Target/MBlaze/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/MBlaze/AsmParser/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMBlazeAsmParser
-
-# Hack: we need to include 'main' MBlaze target directory for private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/MBlaze/CMakeLists.txt b/lib/Target/MBlaze/CMakeLists.txt
deleted file mode 100644
index 91a41f39b5d8..000000000000
--- a/lib/Target/MBlaze/CMakeLists.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-set(LLVM_TARGET_DEFINITIONS MBlaze.td)
-
-tablegen(LLVM MBlazeGenRegisterInfo.inc -gen-register-info)
-tablegen(LLVM MBlazeGenInstrInfo.inc -gen-instr-info)
-tablegen(LLVM MBlazeGenCodeEmitter.inc -gen-emitter)
-tablegen(LLVM MBlazeGenAsmWriter.inc -gen-asm-writer)
-tablegen(LLVM MBlazeGenAsmMatcher.inc -gen-asm-matcher)
-tablegen(LLVM MBlazeGenDAGISel.inc -gen-dag-isel)
-tablegen(LLVM MBlazeGenCallingConv.inc -gen-callingconv)
-tablegen(LLVM MBlazeGenSubtargetInfo.inc -gen-subtarget)
-tablegen(LLVM MBlazeGenIntrinsics.inc -gen-tgt-intrinsic)
-add_public_tablegen_target(MBlazeCommonTableGen)
-
-add_llvm_target(MBlazeCodeGen
- MBlazeDelaySlotFiller.cpp
- MBlazeInstrInfo.cpp
- MBlazeISelDAGToDAG.cpp
- MBlazeISelLowering.cpp
- MBlazeFrameLowering.cpp
- MBlazeMachineFunction.cpp
- MBlazeRegisterInfo.cpp
- MBlazeSubtarget.cpp
- MBlazeTargetMachine.cpp
- MBlazeTargetObjectFile.cpp
- MBlazeIntrinsicInfo.cpp
- MBlazeSelectionDAGInfo.cpp
- MBlazeAsmPrinter.cpp
- MBlazeMCInstLower.cpp
- )
-
-add_dependencies(LLVMMBlazeCodeGen intrinsics_gen)
-
-add_subdirectory(AsmParser)
-add_subdirectory(Disassembler)
-add_subdirectory(InstPrinter)
-add_subdirectory(TargetInfo)
-add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/MBlaze/Disassembler/CMakeLists.txt b/lib/Target/MBlaze/Disassembler/CMakeLists.txt
deleted file mode 100644
index be2dce156d56..000000000000
--- a/lib/Target/MBlaze/Disassembler/CMakeLists.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/..
- ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMMBlazeDisassembler
- MBlazeDisassembler.cpp
- )
-
-# workaround for hanging compilation on MSVC9 and 10
-if( MSVC_VERSION EQUAL 1500 OR MSVC_VERSION EQUAL 1600 )
-set_property(
- SOURCE MBlazeDisassembler.cpp
- PROPERTY COMPILE_FLAGS "/Od"
- )
-endif()
-
-add_dependencies(LLVMMBlazeDisassembler MBlazeCommonTableGen)
diff --git a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp b/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
deleted file mode 100644
index c03ab3803b60..000000000000
--- a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
+++ /dev/null
@@ -1,719 +0,0 @@
-//===-- MBlazeDisassembler.cpp - Disassembler for MicroBlaze -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is part of the MBlaze Disassembler. It contains code to translate
-// the data produced by the decoder into MCInsts.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeDisassembler.h"
-#include "MBlaze.h"
-#include "llvm/MC/MCDisassembler.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrDesc.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/MemoryObject.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-
-// #include "MBlazeGenDecoderTables.inc"
-// #include "MBlazeGenRegisterNames.inc"
-
-namespace llvm {
-extern const MCInstrDesc MBlazeInsts[];
-}
-
-using namespace llvm;
-
-const uint16_t UNSUPPORTED = -1;
-
-static const uint16_t mblazeBinary2Opcode[] = {
- MBlaze::ADD, MBlaze::RSUB, MBlaze::ADDC, MBlaze::RSUBC, //00,01,02,03
- MBlaze::ADDK, MBlaze::RSUBK, MBlaze::ADDKC, MBlaze::RSUBKC, //04,05,06,07
- MBlaze::ADDI, MBlaze::RSUBI, MBlaze::ADDIC, MBlaze::RSUBIC, //08,09,0A,0B
- MBlaze::ADDIK, MBlaze::RSUBIK, MBlaze::ADDIKC, MBlaze::RSUBIKC, //0C,0D,0E,0F
-
- MBlaze::MUL, MBlaze::BSRL, MBlaze::IDIV, MBlaze::GETD, //10,11,12,13
- UNSUPPORTED, UNSUPPORTED, MBlaze::FADD, UNSUPPORTED, //14,15,16,17
- MBlaze::MULI, MBlaze::BSRLI, UNSUPPORTED, MBlaze::GET, //18,19,1A,1B
- UNSUPPORTED, UNSUPPORTED, UNSUPPORTED, UNSUPPORTED, //1C,1D,1E,1F
-
- MBlaze::OR, MBlaze::AND, MBlaze::XOR, MBlaze::ANDN, //20,21,22,23
- MBlaze::SEXT8, MBlaze::MFS, MBlaze::BR, MBlaze::BEQ, //24,25,26,27
- MBlaze::ORI, MBlaze::ANDI, MBlaze::XORI, MBlaze::ANDNI, //28,29,2A,2B
- MBlaze::IMM, MBlaze::RTSD, MBlaze::BRI, MBlaze::BEQI, //2C,2D,2E,2F
-
- MBlaze::LBU, MBlaze::LHU, MBlaze::LW, UNSUPPORTED, //30,31,32,33
- MBlaze::SB, MBlaze::SH, MBlaze::SW, UNSUPPORTED, //34,35,36,37
- MBlaze::LBUI, MBlaze::LHUI, MBlaze::LWI, UNSUPPORTED, //38,39,3A,3B
- MBlaze::SBI, MBlaze::SHI, MBlaze::SWI, UNSUPPORTED, //3C,3D,3E,3F
-};
-
-static unsigned getRD(uint32_t insn) {
- if (!isMBlazeRegister((insn>>21)&0x1F))
- return UNSUPPORTED;
- return getMBlazeRegisterFromNumbering((insn>>21)&0x1F);
-}
-
-static unsigned getRA(uint32_t insn) {
- if (!getMBlazeRegisterFromNumbering((insn>>16)&0x1F))
- return UNSUPPORTED;
- return getMBlazeRegisterFromNumbering((insn>>16)&0x1F);
-}
-
-static unsigned getRB(uint32_t insn) {
- if (!getMBlazeRegisterFromNumbering((insn>>11)&0x1F))
- return UNSUPPORTED;
- return getMBlazeRegisterFromNumbering((insn>>11)&0x1F);
-}
-
-static int64_t getRS(uint32_t insn) {
- if (!isSpecialMBlazeRegister(insn&0x3FFF))
- return UNSUPPORTED;
- return getSpecialMBlazeRegisterFromNumbering(insn&0x3FFF);
-}
-
-static int64_t getIMM(uint32_t insn) {
- int16_t val = (insn & 0xFFFF);
- return val;
-}
-
-static int64_t getSHT(uint32_t insn) {
- int16_t val = (insn & 0x1F);
- return val;
-}
-
-static unsigned getFLAGS(int32_t insn) {
- return (insn & 0x7FF);
-}
-
-static int64_t getFSL(uint32_t insn) {
- int16_t val = (insn & 0xF);
- return val;
-}
-
-static unsigned decodeMUL(uint32_t insn) {
- switch (getFLAGS(insn)) {
- default: return UNSUPPORTED;
- case 0: return MBlaze::MUL;
- case 1: return MBlaze::MULH;
- case 2: return MBlaze::MULHSU;
- case 3: return MBlaze::MULHU;
- }
-}
-
-static unsigned decodeSEXT(uint32_t insn) {
- switch (insn&0x7FF) {
- default: return UNSUPPORTED;
- case 0x60: return MBlaze::SEXT8;
- case 0x68: return MBlaze::WIC;
- case 0x64: return MBlaze::WDC;
- case 0x66: return MBlaze::WDCC;
- case 0x74: return MBlaze::WDCF;
- case 0x61: return MBlaze::SEXT16;
- case 0x41: return MBlaze::SRL;
- case 0x21: return MBlaze::SRC;
- case 0x01: return MBlaze::SRA;
- case 0xE0: return MBlaze::CLZ;
- }
-}
-
-static unsigned decodeBEQ(uint32_t insn) {
- switch ((insn>>21)&0x1F) {
- default: return UNSUPPORTED;
- case 0x00: return MBlaze::BEQ;
- case 0x10: return MBlaze::BEQD;
- case 0x05: return MBlaze::BGE;
- case 0x15: return MBlaze::BGED;
- case 0x04: return MBlaze::BGT;
- case 0x14: return MBlaze::BGTD;
- case 0x03: return MBlaze::BLE;
- case 0x13: return MBlaze::BLED;
- case 0x02: return MBlaze::BLT;
- case 0x12: return MBlaze::BLTD;
- case 0x01: return MBlaze::BNE;
- case 0x11: return MBlaze::BNED;
- }
-}
-
-static unsigned decodeBEQI(uint32_t insn) {
- switch ((insn>>21)&0x1F) {
- default: return UNSUPPORTED;
- case 0x00: return MBlaze::BEQI;
- case 0x10: return MBlaze::BEQID;
- case 0x05: return MBlaze::BGEI;
- case 0x15: return MBlaze::BGEID;
- case 0x04: return MBlaze::BGTI;
- case 0x14: return MBlaze::BGTID;
- case 0x03: return MBlaze::BLEI;
- case 0x13: return MBlaze::BLEID;
- case 0x02: return MBlaze::BLTI;
- case 0x12: return MBlaze::BLTID;
- case 0x01: return MBlaze::BNEI;
- case 0x11: return MBlaze::BNEID;
- }
-}
-
-static unsigned decodeBR(uint32_t insn) {
- switch ((insn>>16)&0x1F) {
- default: return UNSUPPORTED;
- case 0x00: return MBlaze::BR;
- case 0x08: return MBlaze::BRA;
- case 0x0C: return MBlaze::BRK;
- case 0x10: return MBlaze::BRD;
- case 0x14: return MBlaze::BRLD;
- case 0x18: return MBlaze::BRAD;
- case 0x1C: return MBlaze::BRALD;
- }
-}
-
-static unsigned decodeBRI(uint32_t insn) {
- switch (insn&0x3FFFFFF) {
- default: break;
- case 0x0020004: return MBlaze::IDMEMBAR;
- case 0x0220004: return MBlaze::DMEMBAR;
- case 0x0420004: return MBlaze::IMEMBAR;
- }
-
- switch ((insn>>16)&0x1F) {
- default: return UNSUPPORTED;
- case 0x00: return MBlaze::BRI;
- case 0x08: return MBlaze::BRAI;
- case 0x0C: return MBlaze::BRKI;
- case 0x10: return MBlaze::BRID;
- case 0x14: return MBlaze::BRLID;
- case 0x18: return MBlaze::BRAID;
- case 0x1C: return MBlaze::BRALID;
- }
-}
-
-static unsigned decodeBSRL(uint32_t insn) {
- switch ((insn>>9)&0x3) {
- default: return UNSUPPORTED;
- case 0x2: return MBlaze::BSLL;
- case 0x1: return MBlaze::BSRA;
- case 0x0: return MBlaze::BSRL;
- }
-}
-
-static unsigned decodeBSRLI(uint32_t insn) {
- switch ((insn>>9)&0x3) {
- default: return UNSUPPORTED;
- case 0x2: return MBlaze::BSLLI;
- case 0x1: return MBlaze::BSRAI;
- case 0x0: return MBlaze::BSRLI;
- }
-}
-
-static unsigned decodeRSUBK(uint32_t insn) {
- switch (getFLAGS(insn)) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::RSUBK;
- case 0x1: return MBlaze::CMP;
- case 0x3: return MBlaze::CMPU;
- }
-}
-
-static unsigned decodeFADD(uint32_t insn) {
- switch (getFLAGS(insn)) {
- default: return UNSUPPORTED;
- case 0x000: return MBlaze::FADD;
- case 0x080: return MBlaze::FRSUB;
- case 0x100: return MBlaze::FMUL;
- case 0x180: return MBlaze::FDIV;
- case 0x200: return MBlaze::FCMP_UN;
- case 0x210: return MBlaze::FCMP_LT;
- case 0x220: return MBlaze::FCMP_EQ;
- case 0x230: return MBlaze::FCMP_LE;
- case 0x240: return MBlaze::FCMP_GT;
- case 0x250: return MBlaze::FCMP_NE;
- case 0x260: return MBlaze::FCMP_GE;
- case 0x280: return MBlaze::FLT;
- case 0x300: return MBlaze::FINT;
- case 0x380: return MBlaze::FSQRT;
- }
-}
-
-static unsigned decodeGET(uint32_t insn) {
- switch ((insn>>10)&0x3F) {
- default: return UNSUPPORTED;
- case 0x00: return MBlaze::GET;
- case 0x01: return MBlaze::EGET;
- case 0x02: return MBlaze::AGET;
- case 0x03: return MBlaze::EAGET;
- case 0x04: return MBlaze::TGET;
- case 0x05: return MBlaze::TEGET;
- case 0x06: return MBlaze::TAGET;
- case 0x07: return MBlaze::TEAGET;
- case 0x08: return MBlaze::CGET;
- case 0x09: return MBlaze::ECGET;
- case 0x0A: return MBlaze::CAGET;
- case 0x0B: return MBlaze::ECAGET;
- case 0x0C: return MBlaze::TCGET;
- case 0x0D: return MBlaze::TECGET;
- case 0x0E: return MBlaze::TCAGET;
- case 0x0F: return MBlaze::TECAGET;
- case 0x10: return MBlaze::NGET;
- case 0x11: return MBlaze::NEGET;
- case 0x12: return MBlaze::NAGET;
- case 0x13: return MBlaze::NEAGET;
- case 0x14: return MBlaze::TNGET;
- case 0x15: return MBlaze::TNEGET;
- case 0x16: return MBlaze::TNAGET;
- case 0x17: return MBlaze::TNEAGET;
- case 0x18: return MBlaze::NCGET;
- case 0x19: return MBlaze::NECGET;
- case 0x1A: return MBlaze::NCAGET;
- case 0x1B: return MBlaze::NECAGET;
- case 0x1C: return MBlaze::TNCGET;
- case 0x1D: return MBlaze::TNECGET;
- case 0x1E: return MBlaze::TNCAGET;
- case 0x1F: return MBlaze::TNECAGET;
- case 0x20: return MBlaze::PUT;
- case 0x22: return MBlaze::APUT;
- case 0x24: return MBlaze::TPUT;
- case 0x26: return MBlaze::TAPUT;
- case 0x28: return MBlaze::CPUT;
- case 0x2A: return MBlaze::CAPUT;
- case 0x2C: return MBlaze::TCPUT;
- case 0x2E: return MBlaze::TCAPUT;
- case 0x30: return MBlaze::NPUT;
- case 0x32: return MBlaze::NAPUT;
- case 0x34: return MBlaze::TNPUT;
- case 0x36: return MBlaze::TNAPUT;
- case 0x38: return MBlaze::NCPUT;
- case 0x3A: return MBlaze::NCAPUT;
- case 0x3C: return MBlaze::TNCPUT;
- case 0x3E: return MBlaze::TNCAPUT;
- }
-}
-
-static unsigned decodeGETD(uint32_t insn) {
- switch ((insn>>5)&0x3F) {
- default: return UNSUPPORTED;
- case 0x00: return MBlaze::GETD;
- case 0x01: return MBlaze::EGETD;
- case 0x02: return MBlaze::AGETD;
- case 0x03: return MBlaze::EAGETD;
- case 0x04: return MBlaze::TGETD;
- case 0x05: return MBlaze::TEGETD;
- case 0x06: return MBlaze::TAGETD;
- case 0x07: return MBlaze::TEAGETD;
- case 0x08: return MBlaze::CGETD;
- case 0x09: return MBlaze::ECGETD;
- case 0x0A: return MBlaze::CAGETD;
- case 0x0B: return MBlaze::ECAGETD;
- case 0x0C: return MBlaze::TCGETD;
- case 0x0D: return MBlaze::TECGETD;
- case 0x0E: return MBlaze::TCAGETD;
- case 0x0F: return MBlaze::TECAGETD;
- case 0x10: return MBlaze::NGETD;
- case 0x11: return MBlaze::NEGETD;
- case 0x12: return MBlaze::NAGETD;
- case 0x13: return MBlaze::NEAGETD;
- case 0x14: return MBlaze::TNGETD;
- case 0x15: return MBlaze::TNEGETD;
- case 0x16: return MBlaze::TNAGETD;
- case 0x17: return MBlaze::TNEAGETD;
- case 0x18: return MBlaze::NCGETD;
- case 0x19: return MBlaze::NECGETD;
- case 0x1A: return MBlaze::NCAGETD;
- case 0x1B: return MBlaze::NECAGETD;
- case 0x1C: return MBlaze::TNCGETD;
- case 0x1D: return MBlaze::TNECGETD;
- case 0x1E: return MBlaze::TNCAGETD;
- case 0x1F: return MBlaze::TNECAGETD;
- case 0x20: return MBlaze::PUTD;
- case 0x22: return MBlaze::APUTD;
- case 0x24: return MBlaze::TPUTD;
- case 0x26: return MBlaze::TAPUTD;
- case 0x28: return MBlaze::CPUTD;
- case 0x2A: return MBlaze::CAPUTD;
- case 0x2C: return MBlaze::TCPUTD;
- case 0x2E: return MBlaze::TCAPUTD;
- case 0x30: return MBlaze::NPUTD;
- case 0x32: return MBlaze::NAPUTD;
- case 0x34: return MBlaze::TNPUTD;
- case 0x36: return MBlaze::TNAPUTD;
- case 0x38: return MBlaze::NCPUTD;
- case 0x3A: return MBlaze::NCAPUTD;
- case 0x3C: return MBlaze::TNCPUTD;
- case 0x3E: return MBlaze::TNCAPUTD;
- }
-}
-
-static unsigned decodeIDIV(uint32_t insn) {
- switch (insn&0x3) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::IDIV;
- case 0x2: return MBlaze::IDIVU;
- }
-}
-
-static unsigned decodeLBU(uint32_t insn) {
- switch ((insn>>9)&0x1) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::LBU;
- case 0x1: return MBlaze::LBUR;
- }
-}
-
-static unsigned decodeLHU(uint32_t insn) {
- switch ((insn>>9)&0x1) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::LHU;
- case 0x1: return MBlaze::LHUR;
- }
-}
-
-static unsigned decodeLW(uint32_t insn) {
- switch ((insn>>9)&0x3) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::LW;
- case 0x1: return MBlaze::LWR;
- case 0x2: return MBlaze::LWX;
- }
-}
-
-static unsigned decodeSB(uint32_t insn) {
- switch ((insn>>9)&0x1) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::SB;
- case 0x1: return MBlaze::SBR;
- }
-}
-
-static unsigned decodeSH(uint32_t insn) {
- switch ((insn>>9)&0x1) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::SH;
- case 0x1: return MBlaze::SHR;
- }
-}
-
-static unsigned decodeSW(uint32_t insn) {
- switch ((insn>>9)&0x3) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::SW;
- case 0x1: return MBlaze::SWR;
- case 0x2: return MBlaze::SWX;
- }
-}
-
-static unsigned decodeMFS(uint32_t insn) {
- switch ((insn>>15)&0x1) {
- default: return UNSUPPORTED;
- case 0x0:
- switch ((insn>>16)&0x1) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::MSRSET;
- case 0x1: return MBlaze::MSRCLR;
- }
- case 0x1:
- switch ((insn>>14)&0x1) {
- default: return UNSUPPORTED;
- case 0x0: return MBlaze::MFS;
- case 0x1: return MBlaze::MTS;
- }
- }
-}
-
-static unsigned decodeOR(uint32_t insn) {
- switch (getFLAGS(insn)) {
- default: return UNSUPPORTED;
- case 0x000: return MBlaze::OR;
- case 0x400: return MBlaze::PCMPBF;
- }
-}
-
-static unsigned decodeXOR(uint32_t insn) {
- switch (getFLAGS(insn)) {
- default: return UNSUPPORTED;
- case 0x000: return MBlaze::XOR;
- case 0x400: return MBlaze::PCMPEQ;
- }
-}
-
-static unsigned decodeANDN(uint32_t insn) {
- switch (getFLAGS(insn)) {
- default: return UNSUPPORTED;
- case 0x000: return MBlaze::ANDN;
- case 0x400: return MBlaze::PCMPNE;
- }
-}
-
-static unsigned decodeRTSD(uint32_t insn) {
- switch ((insn>>21)&0x1F) {
- default: return UNSUPPORTED;
- case 0x10: return MBlaze::RTSD;
- case 0x11: return MBlaze::RTID;
- case 0x12: return MBlaze::RTBD;
- case 0x14: return MBlaze::RTED;
- }
-}
-
-static unsigned getOPCODE(uint32_t insn) {
- unsigned opcode = mblazeBinary2Opcode[ (insn>>26)&0x3F ];
- switch (opcode) {
- case MBlaze::MUL: return decodeMUL(insn);
- case MBlaze::SEXT8: return decodeSEXT(insn);
- case MBlaze::BEQ: return decodeBEQ(insn);
- case MBlaze::BEQI: return decodeBEQI(insn);
- case MBlaze::BR: return decodeBR(insn);
- case MBlaze::BRI: return decodeBRI(insn);
- case MBlaze::BSRL: return decodeBSRL(insn);
- case MBlaze::BSRLI: return decodeBSRLI(insn);
- case MBlaze::RSUBK: return decodeRSUBK(insn);
- case MBlaze::FADD: return decodeFADD(insn);
- case MBlaze::GET: return decodeGET(insn);
- case MBlaze::GETD: return decodeGETD(insn);
- case MBlaze::IDIV: return decodeIDIV(insn);
- case MBlaze::LBU: return decodeLBU(insn);
- case MBlaze::LHU: return decodeLHU(insn);
- case MBlaze::LW: return decodeLW(insn);
- case MBlaze::SB: return decodeSB(insn);
- case MBlaze::SH: return decodeSH(insn);
- case MBlaze::SW: return decodeSW(insn);
- case MBlaze::MFS: return decodeMFS(insn);
- case MBlaze::OR: return decodeOR(insn);
- case MBlaze::XOR: return decodeXOR(insn);
- case MBlaze::ANDN: return decodeANDN(insn);
- case MBlaze::RTSD: return decodeRTSD(insn);
- default: return opcode;
- }
-}
-
-//
-// Public interface for the disassembler
-//
-
-MCDisassembler::DecodeStatus MBlazeDisassembler::getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const {
- // The machine instruction.
- uint32_t insn;
- uint64_t read;
- uint8_t bytes[4];
-
- // By default we consume 1 byte on failure
- size = 1;
-
- // We want to read exactly 4 bytes of data.
- if (region.readBytes(address, 4, (uint8_t*)bytes, &read) == -1 || read < 4)
- return Fail;
-
- // Encoded as a big-endian 32-bit word in the stream.
- insn = (bytes[0]<<24) | (bytes[1]<<16) | (bytes[2]<< 8) | (bytes[3]<<0);
-
- // Get the MCInst opcode from the binary instruction and make sure
- // that it is a valid instruction.
- unsigned opcode = getOPCODE(insn);
- if (opcode == UNSUPPORTED)
- return Fail;
-
- instr.setOpcode(opcode);
-
- unsigned RD = getRD(insn);
- unsigned RA = getRA(insn);
- unsigned RB = getRB(insn);
- unsigned RS = getRS(insn);
-
- uint64_t tsFlags = MBlazeInsts[opcode].TSFlags;
- switch ((tsFlags & MBlazeII::FormMask)) {
- default:
- return Fail;
-
- case MBlazeII::FC:
- break;
-
- case MBlazeII::FRRRR:
- if (RD == UNSUPPORTED || RA == UNSUPPORTED || RB == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RB));
- instr.addOperand(MCOperand::CreateReg(RA));
- break;
-
- case MBlazeII::FRRR:
- if (RD == UNSUPPORTED || RA == UNSUPPORTED || RB == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RA));
- instr.addOperand(MCOperand::CreateReg(RB));
- break;
-
- case MBlazeII::FRR:
- if (RD == UNSUPPORTED || RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RA));
- break;
-
- case MBlazeII::FRI:
- switch (opcode) {
- default:
- return Fail;
- case MBlaze::MFS:
- if (RD == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateImm(insn&0x3FFF));
- break;
- case MBlaze::MTS:
- if (RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateImm(insn&0x3FFF));
- instr.addOperand(MCOperand::CreateReg(RA));
- break;
- case MBlaze::MSRSET:
- case MBlaze::MSRCLR:
- if (RD == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateImm(insn&0x7FFF));
- break;
- }
- break;
-
- case MBlazeII::FRRI:
- if (RD == UNSUPPORTED || RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RA));
- switch (opcode) {
- default:
- instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
- break;
- case MBlaze::BSRLI:
- case MBlaze::BSRAI:
- case MBlaze::BSLLI:
- instr.addOperand(MCOperand::CreateImm(insn&0x1F));
- break;
- }
- break;
-
- case MBlazeII::FCRR:
- if (RA == UNSUPPORTED || RB == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RA));
- instr.addOperand(MCOperand::CreateReg(RB));
- break;
-
- case MBlazeII::FCRI:
- if (RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RA));
- instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
- break;
-
- case MBlazeII::FRCR:
- if (RD == UNSUPPORTED || RB == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RB));
- break;
-
- case MBlazeII::FRCI:
- if (RD == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
- break;
-
- case MBlazeII::FCCR:
- if (RB == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RB));
- break;
-
- case MBlazeII::FCCI:
- instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
- break;
-
- case MBlazeII::FRRCI:
- if (RD == UNSUPPORTED || RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RA));
- instr.addOperand(MCOperand::CreateImm(getSHT(insn)));
- break;
-
- case MBlazeII::FRRC:
- if (RD == UNSUPPORTED || RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RA));
- break;
-
- case MBlazeII::FRCX:
- if (RD == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateImm(getFSL(insn)));
- break;
-
- case MBlazeII::FRCS:
- if (RD == UNSUPPORTED || RS == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateReg(RS));
- break;
-
- case MBlazeII::FCRCS:
- if (RS == UNSUPPORTED || RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RS));
- instr.addOperand(MCOperand::CreateReg(RA));
- break;
-
- case MBlazeII::FCRCX:
- if (RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RA));
- instr.addOperand(MCOperand::CreateImm(getFSL(insn)));
- break;
-
- case MBlazeII::FCX:
- instr.addOperand(MCOperand::CreateImm(getFSL(insn)));
- break;
-
- case MBlazeII::FCR:
- if (RB == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RB));
- break;
-
- case MBlazeII::FRIR:
- if (RD == UNSUPPORTED || RA == UNSUPPORTED)
- return Fail;
- instr.addOperand(MCOperand::CreateReg(RD));
- instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
- instr.addOperand(MCOperand::CreateReg(RA));
- break;
- }
-
- // We always consume 4 bytes of data on success
- size = 4;
-
- return Success;
-}
-
-static MCDisassembler *createMBlazeDisassembler(const Target &T,
- const MCSubtargetInfo &STI) {
- return new MBlazeDisassembler(STI);
-}
-
-extern "C" void LLVMInitializeMBlazeDisassembler() {
- // Register the disassembler.
- TargetRegistry::RegisterMCDisassembler(TheMBlazeTarget,
- createMBlazeDisassembler);
-}
diff --git a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h b/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h
deleted file mode 100644
index b8ff8f607265..000000000000
--- a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===-- MBlazeDisassembler.h - Disassembler for MicroBlaze -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is part of the MBlaze Disassembler. It it the header for
-// MBlazeDisassembler, a subclass of MCDisassembler.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZEDISASSEMBLER_H
-#define MBLAZEDISASSEMBLER_H
-
-#include "llvm/MC/MCDisassembler.h"
-
-namespace llvm {
-
-class MCInst;
-class MemoryObject;
-class raw_ostream;
-
-/// MBlazeDisassembler - Disassembler for all MBlaze platforms.
-class MBlazeDisassembler : public MCDisassembler {
-public:
- /// Constructor - Initializes the disassembler.
- ///
- MBlazeDisassembler(const MCSubtargetInfo &STI) :
- MCDisassembler(STI) {
- }
-
- ~MBlazeDisassembler() {
- }
-
- /// getInstruction - See MCDisassembler.
- MCDisassembler::DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const;
-};
-
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/MBlaze/Disassembler/Makefile b/lib/Target/MBlaze/Disassembler/Makefile
deleted file mode 100644
index 0530b3286bc4..000000000000
--- a/lib/Target/MBlaze/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/MBlaze/Disassembler/Makefile -------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMBlazeDisassembler
-
-# Hack: we need to include 'main' MBlaze target directory to grab headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/MBlaze/InstPrinter/CMakeLists.txt b/lib/Target/MBlaze/InstPrinter/CMakeLists.txt
deleted file mode 100644
index 586e2d3eefc3..000000000000
--- a/lib/Target/MBlaze/InstPrinter/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/..
- ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMMBlazeAsmPrinter
- MBlazeInstPrinter.cpp
- )
-
-add_dependencies(LLVMMBlazeAsmPrinter MBlazeCommonTableGen)
diff --git a/lib/Target/MBlaze/InstPrinter/LLVMBuild.txt b/lib/Target/MBlaze/InstPrinter/LLVMBuild.txt
deleted file mode 100644
index 3a21a0560aef..000000000000
--- a/lib/Target/MBlaze/InstPrinter/LLVMBuild.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-;===- ./lib/Target/MBlaze/InstPrinter/LLVMBuild.txt ------------*- Conf -*--===;
-;
-; The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-; http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = MBlazeAsmPrinter
-parent = MBlaze
-required_libraries = MC Support
-add_to_library_groups = MBlaze
diff --git a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp b/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp
deleted file mode 100644
index fc2b3d51b44c..000000000000
--- a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-//===-- MBlazeInstPrinter.cpp - Convert MBlaze MCInst to assembly syntax --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an MBlaze MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "MBlazeInstPrinter.h"
-#include "MBlaze.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-using namespace llvm;
-
-
-// Include the auto-generated portion of the assembly writer.
-#include "MBlazeGenAsmWriter.inc"
-
-void MBlazeInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
- StringRef Annot) {
- printInstruction(MI, O);
- printAnnotation(O, Annot);
-}
-
-void MBlazeInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier) {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isReg()) {
- O << getRegisterName(Op.getReg());
- } else if (Op.isImm()) {
- O << (int32_t)Op.getImm();
- } else {
- assert(Op.isExpr() && "unknown operand kind in printOperand");
- O << *Op.getExpr();
- }
-}
-
-void MBlazeInstPrinter::printFSLImm(const MCInst *MI, int OpNo,
- raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(OpNo);
- if (MO.isImm())
- O << "rfsl" << MO.getImm();
- else
- printOperand(MI, OpNo, O, NULL);
-}
-
-void MBlazeInstPrinter::printUnsignedImm(const MCInst *MI, int OpNo,
- raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(OpNo);
- if (MO.isImm())
- O << (uint32_t)MO.getImm();
- else
- printOperand(MI, OpNo, O, NULL);
-}
-
-void MBlazeInstPrinter::printMemOperand(const MCInst *MI, int OpNo,
- raw_ostream &O, const char *Modifier) {
- printOperand(MI, OpNo, O, NULL);
- O << ", ";
- printOperand(MI, OpNo+1, O, NULL);
-}
diff --git a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h b/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h
deleted file mode 100644
index 51ba7c359a1b..000000000000
--- a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//= MBlazeInstPrinter.h - Convert MBlaze MCInst to assembly syntax -*- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints a MBlaze MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZEINSTPRINTER_H
-#define MBLAZEINSTPRINTER_H
-
-#include "llvm/MC/MCInstPrinter.h"
-
-namespace llvm {
- class MCOperand;
-
- class MBlazeInstPrinter : public MCInstPrinter {
- public:
- MBlazeInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
- const MCRegisterInfo &MRI)
- : MCInstPrinter(MAI, MII, MRI) {}
-
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
-
- // Autogenerated by tblgen.
- void printInstruction(const MCInst *MI, raw_ostream &O);
- static const char *getRegisterName(unsigned RegNo);
-
- void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
- const char *Modifier = 0);
- void printFSLImm(const MCInst *MI, int OpNo, raw_ostream &O);
- void printUnsignedImm(const MCInst *MI, int OpNo, raw_ostream &O);
- void printMemOperand(const MCInst *MI, int OpNo,raw_ostream &O,
- const char *Modifier = 0);
- };
-}
-
-#endif
diff --git a/lib/Target/MBlaze/InstPrinter/Makefile b/lib/Target/MBlaze/InstPrinter/Makefile
deleted file mode 100644
index 9fb6e869d945..000000000000
--- a/lib/Target/MBlaze/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/MBlaze/AsmPrinter/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMBlazeAsmPrinter
-
-# Hack: we need to include 'main' MBlaze target directory to grab
-# private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/MBlaze/LLVMBuild.txt b/lib/Target/MBlaze/LLVMBuild.txt
deleted file mode 100644
index 0b290076a4e9..000000000000
--- a/lib/Target/MBlaze/LLVMBuild.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-;===- ./lib/Target/MBlaze/LLVMBuild.txt ------------------------*- Conf -*--===;
-;
-; The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-; http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[common]
-subdirectories = AsmParser Disassembler InstPrinter MCTargetDesc TargetInfo
-
-[component_0]
-type = TargetGroup
-name = MBlaze
-parent = Target
-has_asmparser = 1
-has_asmprinter = 1
-has_disassembler = 1
-
-[component_1]
-type = Library
-name = MBlazeCodeGen
-parent = MBlaze
-required_libraries = AsmPrinter CodeGen Core MBlazeAsmPrinter MBlazeDesc MBlazeInfo MC SelectionDAG Support Target
-add_to_library_groups = MBlaze
diff --git a/lib/Target/MBlaze/MBlaze.h b/lib/Target/MBlaze/MBlaze.h
deleted file mode 100644
index 1399b85d34d2..000000000000
--- a/lib/Target/MBlaze/MBlaze.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- MBlaze.h - Top-level interface for MBlaze ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in
-// the LLVM MBlaze back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TARGET_MBLAZE_H
-#define TARGET_MBLAZE_H
-
-#include "MCTargetDesc/MBlazeBaseInfo.h"
-#include "MCTargetDesc/MBlazeMCTargetDesc.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
- class MBlazeTargetMachine;
- class FunctionPass;
- class MachineCodeEmitter;
-
- FunctionPass *createMBlazeISelDag(MBlazeTargetMachine &TM);
- FunctionPass *createMBlazeDelaySlotFillerPass(MBlazeTargetMachine &TM);
-
-} // end namespace llvm;
-
-#endif
diff --git a/lib/Target/MBlaze/MBlaze.td b/lib/Target/MBlaze/MBlaze.td
deleted file mode 100644
index c2888553c5e3..000000000000
--- a/lib/Target/MBlaze/MBlaze.td
+++ /dev/null
@@ -1,73 +0,0 @@
-//===-- MBlaze.td - Describe the MBlaze Target Machine -----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This is the top level entry point for the MBlaze target.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Target-independent interfaces
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-//===----------------------------------------------------------------------===//
-// Register File, Calling Conv, Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-include "MBlazeRegisterInfo.td"
-include "MBlazeSchedule.td"
-include "MBlazeIntrinsics.td"
-include "MBlazeInstrInfo.td"
-include "MBlazeCallingConv.td"
-
-def MBlazeInstrInfo : InstrInfo;
-
-//===----------------------------------------------------------------------===//
-// Microblaze Subtarget features //
-//===----------------------------------------------------------------------===//
-
-def FeatureBarrel : SubtargetFeature<"barrel", "HasBarrel", "true",
- "Implements barrel shifter">;
-def FeatureDiv : SubtargetFeature<"div", "HasDiv", "true",
- "Implements hardware divider">;
-def FeatureMul : SubtargetFeature<"mul", "HasMul", "true",
- "Implements hardware multiplier">;
-def FeaturePatCmp : SubtargetFeature<"patcmp", "HasPatCmp", "true",
- "Implements pattern compare instruction">;
-def FeatureFPU : SubtargetFeature<"fpu", "HasFPU", "true",
- "Implements floating point unit">;
-def FeatureMul64 : SubtargetFeature<"mul64", "HasMul64", "true",
- "Implements multiplier with 64-bit result">;
-def FeatureSqrt : SubtargetFeature<"sqrt", "HasSqrt", "true",
- "Implements sqrt and floating point convert">;
-
-//===----------------------------------------------------------------------===//
-// MBlaze processors supported.
-//===----------------------------------------------------------------------===//
-
-def : Processor<"mblaze", NoItineraries, []>;
-def : Processor<"mblaze3", MBlazePipe3Itineraries, []>;
-def : Processor<"mblaze5", MBlazePipe5Itineraries, []>;
-
-//===----------------------------------------------------------------------===//
-// Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-def MBlazeAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Target Declaration
-//===----------------------------------------------------------------------===//
-
-def MBlaze : Target {
- let InstructionSet = MBlazeInstrInfo;
- let AssemblyWriters = [MBlazeAsmWriter];
-}
diff --git a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
deleted file mode 100644
index 7dafaef0af08..000000000000
--- a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
+++ /dev/null
@@ -1,326 +0,0 @@
-//===-- MBlazeAsmPrinter.cpp - MBlaze LLVM assembly writer ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to GAS-format MBlaze assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mblaze-asm-printer"
-
-#include "MBlaze.h"
-#include "InstPrinter/MBlazeInstPrinter.h"
-#include "MBlazeInstrInfo.h"
-#include "MBlazeMCInstLower.h"
-#include "MBlazeMachineFunction.h"
-#include "MBlazeSubtarget.h"
-#include "MBlazeTargetMachine.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Module.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include <cctype>
-
-using namespace llvm;
-
-namespace {
- class MBlazeAsmPrinter : public AsmPrinter {
- const MBlazeSubtarget *Subtarget;
- public:
- explicit MBlazeAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
- Subtarget = &TM.getSubtarget<MBlazeSubtarget>();
- }
-
- virtual const char *getPassName() const {
- return "MBlaze Assembly Printer";
- }
-
- void printSavedRegsBitmask();
- void emitFrameDirective();
- virtual void EmitFunctionBodyStart();
- virtual void EmitFunctionBodyEnd();
- virtual void EmitFunctionEntryLabel();
-
- virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB)
- const;
-
- bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
- void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printUnsignedImm(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printFSLImm(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
- const char *Modifier = 0);
-
- void EmitInstruction(const MachineInstr *MI);
- };
-} // end of anonymous namespace
-
-// #include "MBlazeGenAsmWriter.inc"
-
-//===----------------------------------------------------------------------===//
-//
-// MBlaze Asm Directives
-//
-// -- Frame directive "frame Stackpointer, Stacksize, RARegister"
-// Describe the stack frame.
-//
-// -- Mask directives "mask bitmask, offset"
-// Tells the assembler which registers are saved and where.
-// bitmask - contain a little endian bitset indicating which registers are
-// saved on function prologue (e.g. with a 0x80000000 mask, the
-// assembler knows the register 31 (RA) is saved at prologue.
-// offset - the position before stack pointer subtraction indicating where
-// the first saved register on prologue is located. (e.g. with a
-//
-// Consider the following function prologue:
-//
-// .frame R19,48,R15
-// .mask 0xc0000000,-8
-// addiu R1, R1, -48
-// sw R15, 40(R1)
-// sw R19, 36(R1)
-//
-// With a 0xc0000000 mask, the assembler knows the register 15 (R15) and
-// 19 (R19) are saved at prologue. As the save order on prologue is from
-// left to right, R15 is saved first. A -8 offset means that after the
-// stack pointer subtration, the first register in the mask (R15) will be
-// saved at address 48-8=40.
-//
-//===----------------------------------------------------------------------===//
-
-// Print a 32 bit hex number with all numbers.
-static void printHex32(unsigned int Value, raw_ostream &O) {
- O << "0x";
- for (int i = 7; i >= 0; i--)
- O.write_hex((Value & (0xF << (i*4))) >> (i*4));
-}
-
-// Create a bitmask with all callee saved registers for CPU or Floating Point
-// registers. For CPU registers consider RA, GP and FP for saving if necessary.
-void MBlazeAsmPrinter::printSavedRegsBitmask() {
- const TargetFrameLowering *TFI = TM.getFrameLowering();
- const TargetRegisterInfo &RI = *TM.getRegisterInfo();
-
- // CPU Saved Registers Bitmasks
- unsigned int CPUBitmask = 0;
-
- // Set the CPU Bitmasks
- const MachineFrameInfo *MFI = MF->getFrameInfo();
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- unsigned RegNum = getMBlazeRegisterNumbering(Reg);
- if (MBlaze::GPRRegClass.contains(Reg))
- CPUBitmask |= (1 << RegNum);
- }
-
- // Return Address and Frame registers must also be set in CPUBitmask.
- if (TFI->hasFP(*MF))
- CPUBitmask |= (1 << getMBlazeRegisterNumbering(RI.getFrameRegister(*MF)));
-
- if (MFI->adjustsStack())
- CPUBitmask |= (1 << getMBlazeRegisterNumbering(RI.getRARegister()));
-
- // Print CPUBitmask
- OutStreamer.EmitRawText("\t.mask\t0x" + Twine::utohexstr(CPUBitmask));
-}
-
-/// Frame Directive
-void MBlazeAsmPrinter::emitFrameDirective() {
- if (!OutStreamer.hasRawTextSupport())
- return;
-
- const TargetRegisterInfo &RI = *TM.getRegisterInfo();
- unsigned stkReg = RI.getFrameRegister(*MF);
- unsigned retReg = RI.getRARegister();
- unsigned stkSze = MF->getFrameInfo()->getStackSize();
-
- OutStreamer.EmitRawText("\t.frame\t" +
- Twine(MBlazeInstPrinter::getRegisterName(stkReg)) +
- "," + Twine(stkSze) + "," +
- Twine(MBlazeInstPrinter::getRegisterName(retReg)));
-}
-
-void MBlazeAsmPrinter::EmitFunctionEntryLabel() {
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
- AsmPrinter::EmitFunctionEntryLabel();
-}
-
-void MBlazeAsmPrinter::EmitFunctionBodyStart() {
- if (!OutStreamer.hasRawTextSupport())
- return;
-
- emitFrameDirective();
- printSavedRegsBitmask();
-}
-
-void MBlazeAsmPrinter::EmitFunctionBodyEnd() {
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
-}
-
-//===----------------------------------------------------------------------===//
-void MBlazeAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- MBlazeMCInstLower MCInstLowering(OutContext, *this);
-
- MCInst TmpInst;
- MCInstLowering.Lower(MI, TmpInst);
- OutStreamer.EmitInstruction(TmpInst);
-}
-
-// Print out an operand for an inline asm expression.
-bool MBlazeAsmPrinter::
-PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,const char *ExtraCode, raw_ostream &O) {
- // Does this asm operand have a single letter operand modifier?
- if (ExtraCode && ExtraCode[0])
- if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
- switch (ExtraCode[0]) {
- default:
- // See if this is a generic print operand
- return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
- }
-
- printOperand(MI, OpNo, O);
- return false;
-}
-
-void MBlazeAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
-
- switch (MO.getType()) {
- case MachineOperand::MO_Register:
- O << MBlazeInstPrinter::getRegisterName(MO.getReg());
- break;
-
- case MachineOperand::MO_Immediate:
- O << (int32_t)MO.getImm();
- break;
-
- case MachineOperand::MO_FPImmediate: {
- const ConstantFP *fp = MO.getFPImm();
- printHex32(fp->getValueAPF().bitcastToAPInt().getZExtValue(), O);
- O << ";\t# immediate = " << *fp;
- break;
- }
-
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
-
- case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
- break;
-
- case MachineOperand::MO_ExternalSymbol:
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- break;
-
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
- << '_' << MO.getIndex();
- break;
-
- case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI"
- << getFunctionNumber() << "_" << MO.getIndex();
- if (MO.getOffset())
- O << "+" << MO.getOffset();
- break;
-
- default:
- llvm_unreachable("<unknown operand type>");
- }
-}
-
-void MBlazeAsmPrinter::printUnsignedImm(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
- if (MO.isImm())
- O << (uint32_t)MO.getImm();
- else
- printOperand(MI, opNum, O);
-}
-
-void MBlazeAsmPrinter::printFSLImm(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
- if (MO.isImm())
- O << "rfsl" << (unsigned int)MO.getImm();
- else
- printOperand(MI, opNum, O);
-}
-
-void MBlazeAsmPrinter::
-printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
- const char *Modifier) {
- printOperand(MI, opNum, O);
- O << ", ";
- printOperand(MI, opNum+1, O);
-}
-
-/// isBlockOnlyReachableByFallthough - Return true if the basic block has
-/// exactly one predecessor and the control transfer mechanism between
-/// the predecessor and this block is a fall-through.
-bool MBlazeAsmPrinter::
-isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
- // If this is a landing pad, it isn't a fall through. If it has no preds,
- // then nothing falls through to it.
- if (MBB->isLandingPad() || MBB->pred_empty())
- return false;
-
- // If there isn't exactly one predecessor, it can't be a fall through.
- MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI;
- ++PI2;
- if (PI2 != MBB->pred_end())
- return false;
-
- // The predecessor has to be immediately before this block.
- const MachineBasicBlock *Pred = *PI;
-
- if (!Pred->isLayoutSuccessor(MBB))
- return false;
-
- // If the block is completely empty, then it definitely does fall through.
- if (Pred->empty())
- return true;
-
- // Check if the last terminator is an unconditional branch.
- MachineBasicBlock::const_iterator I = Pred->end();
- while (I != Pred->begin() && !(--I)->isTerminator())
- ; // Noop
- return I == Pred->end() || !I->isBarrier();
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeMBlazeAsmPrinter() {
- RegisterAsmPrinter<MBlazeAsmPrinter> X(TheMBlazeTarget);
-}
diff --git a/lib/Target/MBlaze/MBlazeCallingConv.td b/lib/Target/MBlaze/MBlazeCallingConv.td
deleted file mode 100644
index 00a4219d047b..000000000000
--- a/lib/Target/MBlaze/MBlazeCallingConv.td
+++ /dev/null
@@ -1,24 +0,0 @@
-//===- MBlazeCallingConv.td - Calling Conventions for MBlaze -*- tablegen -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This describes the calling conventions for MBlaze architecture.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// MBlaze ABI Calling Convention
-//===----------------------------------------------------------------------===//
-
-def RetCC_MBlaze : CallingConv<[
- // i32 are returned in registers R3, R4
- CCIfType<[i32,f32], CCAssignToReg<[R3, R4]>>
-]>;
-
-def CC_MBlaze : CallingConv<[
- CCIfType<[i32,f32], CCCustom<"CC_MBlaze_AssignReg">>,
- CCIfType<[i32,f32], CCAssignToStack<4, 4>>
-]>;
diff --git a/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp b/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
deleted file mode 100644
index 3d0d1cecd1f1..000000000000
--- a/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
+++ /dev/null
@@ -1,254 +0,0 @@
-//===-- DelaySlotFiller.cpp - MBlaze delay slot filler --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// A pass that attempts to fill instructions with delay slots. If no
-// instructions can be moved into the delay slot then a NOP is placed there.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "delay-slot-filler"
-
-#include "MBlaze.h"
-#include "MBlazeTargetMachine.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-using namespace llvm;
-
-STATISTIC(FilledSlots, "Number of delay slots filled");
-
-static cl::opt<bool> MBDisableDelaySlotFiller(
- "disable-mblaze-delay-filler",
- cl::init(false),
- cl::desc("Disable the MBlaze delay slot filter."),
- cl::Hidden);
-
-namespace {
- struct Filler : public MachineFunctionPass {
-
- TargetMachine &TM;
- const TargetInstrInfo *TII;
-
- static char ID;
- Filler(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
-
- virtual const char *getPassName() const {
- return "MBlaze Delay Slot Filler";
- }
-
- bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
- bool runOnMachineFunction(MachineFunction &F) {
- bool Changed = false;
- for (MachineFunction::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI)
- Changed |= runOnMachineBasicBlock(*FI);
- return Changed;
- }
-
- };
- char Filler::ID = 0;
-} // end of anonymous namespace
-
-static bool hasImmInstruction(MachineBasicBlock::iterator &candidate) {
- // Any instruction with an immediate mode operand greater than
- // 16-bits requires an implicit IMM instruction.
- unsigned numOper = candidate->getNumOperands();
- for (unsigned op = 0; op < numOper; ++op) {
- MachineOperand &mop = candidate->getOperand(op);
-
- // The operand requires more than 16-bits to represent.
- if (mop.isImm() && (mop.getImm() < -0x8000 || mop.getImm() > 0x7fff))
- return true;
-
- // We must assume that unknown immediate values require more than
- // 16-bits to represent.
- if (mop.isGlobal() || mop.isSymbol() || mop.isJTI() || mop.isCPI())
- return true;
-
- // FIXME: we could probably check to see if the FP value happens
- // to not need an IMM instruction. For now we just always
- // assume that FP values do.
- if (mop.isFPImm())
- return true;
- }
-
- return false;
-}
-
-static unsigned getLastRealOperand(MachineBasicBlock::iterator &instr) {
- switch (instr->getOpcode()) {
- default: return instr->getNumOperands();
-
- // These instructions have a variable number of operands but the first two
- // are the "real" operands that we care about during hazard detection.
- case MBlaze::BRLID:
- case MBlaze::BRALID:
- case MBlaze::BRLD:
- case MBlaze::BRALD:
- return 2;
- }
-}
-
-static bool delayHasHazard(MachineBasicBlock::iterator &candidate,
- MachineBasicBlock::iterator &slot) {
- // Hazard check
- MachineBasicBlock::iterator a = candidate;
- MachineBasicBlock::iterator b = slot;
-
- // MBB layout:-
- // candidate := a0 = operation(a1, a2)
- // ...middle bit...
- // slot := b0 = operation(b1, b2)
-
- // Possible hazards:-/
- // 1. a1 or a2 was written during the middle bit
- // 2. a0 was read or written during the middle bit
- // 3. a0 is one or more of {b0, b1, b2}
- // 4. b0 is one or more of {a1, a2}
- // 5. a accesses memory, and the middle bit
- // contains a store operation.
- bool a_is_memory = candidate->mayLoad() || candidate->mayStore();
-
- // Determine the number of operands in the slot instruction and in the
- // candidate instruction.
- const unsigned aend = getLastRealOperand(a);
- const unsigned bend = getLastRealOperand(b);
-
- // Check hazards type 1, 2 and 5 by scanning the middle bit
- MachineBasicBlock::iterator m = a;
- for (++m; m != b; ++m) {
- for (unsigned aop = 0; aop<aend; ++aop) {
- bool aop_is_reg = a->getOperand(aop).isReg();
- if (!aop_is_reg) continue;
-
- bool aop_is_def = a->getOperand(aop).isDef();
- unsigned aop_reg = a->getOperand(aop).getReg();
-
- const unsigned mend = getLastRealOperand(m);
- for (unsigned mop = 0; mop<mend; ++mop) {
- bool mop_is_reg = m->getOperand(mop).isReg();
- if (!mop_is_reg) continue;
-
- bool mop_is_def = m->getOperand(mop).isDef();
- unsigned mop_reg = m->getOperand(mop).getReg();
-
- if (aop_is_def && (mop_reg == aop_reg))
- return true; // Hazard type 2, because aop = a0
- else if (mop_is_def && (mop_reg == aop_reg))
- return true; // Hazard type 1, because aop in {a1, a2}
- }
- }
-
- // Check hazard type 5
- if (a_is_memory && m->mayStore())
- return true;
- }
-
- // Check hazard type 3 & 4
- for (unsigned aop = 0; aop<aend; ++aop) {
- if (a->getOperand(aop).isReg()) {
- unsigned aop_reg = a->getOperand(aop).getReg();
-
- for (unsigned bop = 0; bop<bend; ++bop) {
- if (b->getOperand(bop).isReg() && !b->getOperand(bop).isImplicit()) {
- unsigned bop_reg = b->getOperand(bop).getReg();
- if (aop_reg == bop_reg)
- return true;
- }
- }
- }
- }
-
- return false;
-}
-
-static bool isDelayFiller(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator candidate) {
- if (candidate == MBB.begin())
- return false;
-
- --candidate;
- return (candidate->hasDelaySlot());
-}
-
-static bool hasUnknownSideEffects(MachineBasicBlock::iterator &I) {
- if (!I->hasUnmodeledSideEffects())
- return false;
-
- unsigned op = I->getOpcode();
- if (op == MBlaze::ADDK || op == MBlaze::ADDIK ||
- op == MBlaze::ADDC || op == MBlaze::ADDIC ||
- op == MBlaze::ADDKC || op == MBlaze::ADDIKC ||
- op == MBlaze::RSUBK || op == MBlaze::RSUBIK ||
- op == MBlaze::RSUBC || op == MBlaze::RSUBIC ||
- op == MBlaze::RSUBKC || op == MBlaze::RSUBIKC)
- return false;
-
- return true;
-}
-
-static MachineBasicBlock::iterator
-findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) {
- MachineBasicBlock::iterator I = slot;
- while (true) {
- if (I == MBB.begin())
- break;
-
- --I;
- if (I->hasDelaySlot() || I->isBranch() || isDelayFiller(MBB,I) ||
- I->isCall() || I->isReturn() || I->isBarrier() ||
- hasUnknownSideEffects(I))
- break;
-
- if (hasImmInstruction(I) || delayHasHazard(I,slot))
- continue;
-
- return I;
- }
-
- return MBB.end();
-}
-
-/// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
-/// Currently, we fill delay slots with NOPs. We assume there is only one
-/// delay slot per delayed instruction.
-bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
- bool Changed = false;
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
- if (I->hasDelaySlot()) {
- MachineBasicBlock::iterator D = MBB.end();
- MachineBasicBlock::iterator J = I;
-
- if (!MBDisableDelaySlotFiller)
- D = findDelayInstr(MBB,I);
-
- ++FilledSlots;
- Changed = true;
-
- if (D == MBB.end())
- BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(MBlaze::NOP));
- else
- MBB.splice(++J, &MBB, D);
- }
- return Changed;
-}
-
-/// createMBlazeDelaySlotFillerPass - Returns a pass that fills in delay
-/// slots in MBlaze MachineFunctions
-FunctionPass *llvm::createMBlazeDelaySlotFillerPass(MBlazeTargetMachine &tm) {
- return new Filler(tm);
-}
-
diff --git a/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/lib/Target/MBlaze/MBlazeFrameLowering.cpp
deleted file mode 100644
index 172304bd5b45..000000000000
--- a/lib/Target/MBlaze/MBlazeFrameLowering.cpp
+++ /dev/null
@@ -1,488 +0,0 @@
-//===-- MBlazeFrameLowering.cpp - MBlaze Frame Information ---------------====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the MBlaze implementation of TargetFrameLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mblaze-frame-lowering"
-
-#include "MBlazeFrameLowering.h"
-#include "InstPrinter/MBlazeInstPrinter.h"
-#include "MBlazeInstrInfo.h"
-#include "MBlazeMachineFunction.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetOptions.h"
-
-using namespace llvm;
-
-static cl::opt<bool> MBDisableStackAdjust(
- "disable-mblaze-stack-adjust",
- cl::init(false),
- cl::desc("Disable MBlaze stack layout adjustment."),
- cl::Hidden);
-
-static void replaceFrameIndexes(MachineFunction &MF,
- SmallVector<std::pair<int,int64_t>, 16> &FR) {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- const SmallVector<std::pair<int,int64_t>, 16>::iterator FRB = FR.begin();
- const SmallVector<std::pair<int,int64_t>, 16>::iterator FRE = FR.end();
-
- SmallVector<std::pair<int,int64_t>, 16>::iterator FRI = FRB;
- for (; FRI != FRE; ++FRI) {
- MFI->RemoveStackObject(FRI->first);
- int NFI = MFI->CreateFixedObject(4, FRI->second, true);
- MBlazeFI->recordReplacement(FRI->first, NFI);
-
- for (MachineFunction::iterator MB=MF.begin(), ME=MF.end(); MB!=ME; ++MB) {
- MachineBasicBlock::iterator MBB = MB->begin();
- const MachineBasicBlock::iterator MBE = MB->end();
-
- for (; MBB != MBE; ++MBB) {
- MachineInstr::mop_iterator MIB = MBB->operands_begin();
- const MachineInstr::mop_iterator MIE = MBB->operands_end();
-
- for (MachineInstr::mop_iterator MII = MIB; MII != MIE; ++MII) {
- if (!MII->isFI() || MII->getIndex() != FRI->first) continue;
- DEBUG(dbgs() << "FOUND FI#" << MII->getIndex() << "\n");
- MII->setIndex(NFI);
- }
- }
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-//
-// Stack Frame Processing methods
-// +----------------------------+
-//
-// The stack is allocated decrementing the stack pointer on
-// the first instruction of a function prologue. Once decremented,
-// all stack references are are done through a positive offset
-// from the stack/frame pointer, so the stack is considered
-// to grow up.
-//
-//===----------------------------------------------------------------------===//
-
-static void analyzeFrameIndexes(MachineFunction &MF) {
- if (MBDisableStackAdjust) return;
-
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- const MachineRegisterInfo &MRI = MF.getRegInfo();
-
- MachineRegisterInfo::livein_iterator LII = MRI.livein_begin();
- MachineRegisterInfo::livein_iterator LIE = MRI.livein_end();
- const SmallVector<int, 16> &LiveInFI = MBlazeFI->getLiveIn();
- SmallVector<MachineInstr*, 16> EraseInstr;
- SmallVector<std::pair<int,int64_t>, 16> FrameRelocate;
-
- MachineBasicBlock *MBB = MF.getBlockNumbered(0);
- MachineBasicBlock::iterator MIB = MBB->begin();
- MachineBasicBlock::iterator MIE = MBB->end();
-
- int StackAdjust = 0;
- int StackOffset = -28;
-
- // In this loop we are searching frame indexes that corrospond to incoming
- // arguments that are already in the stack. We look for instruction sequences
- // like the following:
- //
- // LWI REG, FI1, 0
- // ...
- // SWI REG, FI2, 0
- //
- // As long as there are no defs of REG in the ... part, we can eliminate
- // the SWI instruction because the value has already been stored to the
- // stack by the caller. All we need to do is locate FI at the correct
- // stack location according to the calling convensions.
- //
- // Additionally, if the SWI operation kills the def of REG then we don't
- // need the LWI operation so we can erase it as well.
- for (unsigned i = 0, e = LiveInFI.size(); i < e; ++i) {
- for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) {
- if (I->getOpcode() != MBlaze::LWI || I->getNumOperands() != 3 ||
- !I->getOperand(1).isFI() || !I->getOperand(0).isReg() ||
- I->getOperand(1).getIndex() != LiveInFI[i]) continue;
-
- unsigned FIReg = I->getOperand(0).getReg();
- MachineBasicBlock::iterator SI = I;
- for (SI++; SI != MIE; ++SI) {
- if (!SI->getOperand(0).isReg() ||
- !SI->getOperand(1).isFI() ||
- SI->getOpcode() != MBlaze::SWI) continue;
-
- int FI = SI->getOperand(1).getIndex();
- if (SI->getOperand(0).getReg() != FIReg ||
- MFI->isFixedObjectIndex(FI) ||
- MFI->getObjectSize(FI) != 4) continue;
-
- if (SI->getOperand(0).isDef()) break;
-
- if (SI->getOperand(0).isKill()) {
- DEBUG(dbgs() << "LWI for FI#" << I->getOperand(1).getIndex()
- << " removed\n");
- EraseInstr.push_back(I);
- }
-
- EraseInstr.push_back(SI);
- DEBUG(dbgs() << "SWI for FI#" << FI << " removed\n");
-
- FrameRelocate.push_back(std::make_pair(FI,StackOffset));
- DEBUG(dbgs() << "FI#" << FI << " relocated to " << StackOffset << "\n");
-
- StackOffset -= 4;
- StackAdjust += 4;
- break;
- }
- }
- }
-
- // In this loop we are searching for frame indexes that corrospond to
- // incoming arguments that are in registers. We look for instruction
- // sequences like the following:
- //
- // ... SWI REG, FI, 0
- //
- // As long as the ... part does not define REG and if REG is an incoming
- // parameter register then we know that, according to ABI convensions, the
- // caller has allocated stack space for it already. Instead of allocating
- // stack space on our frame, we record the correct location in the callers
- // frame.
- for (MachineRegisterInfo::livein_iterator LI = LII; LI != LIE; ++LI) {
- for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) {
- if (I->definesRegister(LI->first))
- break;
-
- if (I->getOpcode() != MBlaze::SWI || I->getNumOperands() != 3 ||
- !I->getOperand(1).isFI() || !I->getOperand(0).isReg() ||
- I->getOperand(1).getIndex() < 0) continue;
-
- if (I->getOperand(0).getReg() == LI->first) {
- int FI = I->getOperand(1).getIndex();
- MBlazeFI->recordLiveIn(FI);
-
- int FILoc = 0;
- switch (LI->first) {
- default: llvm_unreachable("invalid incoming parameter!");
- case MBlaze::R5: FILoc = -4; break;
- case MBlaze::R6: FILoc = -8; break;
- case MBlaze::R7: FILoc = -12; break;
- case MBlaze::R8: FILoc = -16; break;
- case MBlaze::R9: FILoc = -20; break;
- case MBlaze::R10: FILoc = -24; break;
- }
-
- StackAdjust += 4;
- FrameRelocate.push_back(std::make_pair(FI,FILoc));
- DEBUG(dbgs() << "FI#" << FI << " relocated to " << FILoc << "\n");
- break;
- }
- }
- }
-
- // Go ahead and erase all of the instructions that we determined were
- // no longer needed.
- for (int i = 0, e = EraseInstr.size(); i < e; ++i)
- MBB->erase(EraseInstr[i]);
-
- // Replace all of the frame indexes that we have relocated with new
- // fixed object frame indexes.
- replaceFrameIndexes(MF, FrameRelocate);
-}
-
-static void interruptFrameLayout(MachineFunction &MF) {
- const Function *F = MF.getFunction();
- CallingConv::ID CallConv = F->getCallingConv();
-
- // If this function is not using either the interrupt_handler
- // calling convention or the save_volatiles calling convention
- // then we don't need to do any additional frame layout.
- if (CallConv != CallingConv::MBLAZE_INTR &&
- CallConv != CallingConv::MBLAZE_SVOL)
- return;
-
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- const MBlazeInstrInfo &TII =
- *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo());
-
- // Determine if the calling convention is the interrupt_handler
- // calling convention. Some pieces of the prologue and epilogue
- // only need to be emitted if we are lowering and interrupt handler.
- bool isIntr = CallConv == CallingConv::MBLAZE_INTR;
-
- // Determine where to put prologue and epilogue additions
- MachineBasicBlock &MENT = MF.front();
- MachineBasicBlock &MEXT = MF.back();
-
- MachineBasicBlock::iterator MENTI = MENT.begin();
- MachineBasicBlock::iterator MEXTI = prior(MEXT.end());
-
- DebugLoc ENTDL = MENTI != MENT.end() ? MENTI->getDebugLoc() : DebugLoc();
- DebugLoc EXTDL = MEXTI != MEXT.end() ? MEXTI->getDebugLoc() : DebugLoc();
-
- // Store the frame indexes generated during prologue additions for use
- // when we are generating the epilogue additions.
- SmallVector<int, 10> VFI;
-
- // Build the prologue SWI for R3 - R12 if needed. Note that R11 must
- // always have a SWI because it is used when processing RMSR.
- for (unsigned r = MBlaze::R3; r <= MBlaze::R12; ++r) {
- if (!MRI.isPhysRegUsed(r) && !(isIntr && r == MBlaze::R11)) continue;
-
- int FI = MFI->CreateStackObject(4,4,false,false);
- VFI.push_back(FI);
-
- BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), r)
- .addFrameIndex(FI).addImm(0);
- }
-
- // Build the prologue SWI for R17, R18
- int R17FI = MFI->CreateStackObject(4,4,false,false);
- int R18FI = MFI->CreateStackObject(4,4,false,false);
-
- BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R17)
- .addFrameIndex(R17FI).addImm(0);
-
- BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R18)
- .addFrameIndex(R18FI).addImm(0);
-
- // Buid the prologue SWI and the epilogue LWI for RMSR if needed
- if (isIntr) {
- int MSRFI = MFI->CreateStackObject(4,4,false,false);
- BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::MFS), MBlaze::R11)
- .addReg(MBlaze::RMSR);
- BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R11)
- .addFrameIndex(MSRFI).addImm(0);
-
- BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R11)
- .addFrameIndex(MSRFI).addImm(0);
- BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::MTS), MBlaze::RMSR)
- .addReg(MBlaze::R11);
- }
-
- // Build the epilogue LWI for R17, R18
- BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R18)
- .addFrameIndex(R18FI).addImm(0);
-
- BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R17)
- .addFrameIndex(R17FI).addImm(0);
-
- // Build the epilogue LWI for R3 - R12 if needed
- for (unsigned r = MBlaze::R12, i = VFI.size(); r >= MBlaze::R3; --r) {
- if (!MRI.isPhysRegUsed(r)) continue;
- BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), r)
- .addFrameIndex(VFI[--i]).addImm(0);
- }
-}
-
-static void determineFrameLayout(MachineFunction &MF) {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
-
- // Replace the dummy '0' SPOffset by the negative offsets, as explained on
- // LowerFORMAL_ARGUMENTS. Leaving '0' for while is necessary to avoid
- // the approach done by calculateFrameObjectOffsets to the stack frame.
- MBlazeFI->adjustLoadArgsFI(MFI);
- MBlazeFI->adjustStoreVarArgsFI(MFI);
-
- // Get the number of bytes to allocate from the FrameInfo
- unsigned FrameSize = MFI->getStackSize();
- DEBUG(dbgs() << "Original Frame Size: " << FrameSize << "\n" );
-
- // Get the alignments provided by the target, and the maximum alignment
- // (if any) of the fixed frame objects.
- // unsigned MaxAlign = MFI->getMaxAlignment();
- unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
- unsigned AlignMask = TargetAlign - 1;
-
- // Make sure the frame is aligned.
- FrameSize = (FrameSize + AlignMask) & ~AlignMask;
- MFI->setStackSize(FrameSize);
- DEBUG(dbgs() << "Aligned Frame Size: " << FrameSize << "\n" );
-}
-
-int MBlazeFrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI)
- const {
- const MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- if (MBlazeFI->hasReplacement(FI))
- FI = MBlazeFI->getReplacement(FI);
- return TargetFrameLowering::getFrameIndexOffset(MF,FI);
-}
-
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-bool MBlazeFrameLowering::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return MF.getTarget().Options.DisableFramePointerElim(MF) ||
- MFI->hasVarSizedObjects();
-}
-
-void MBlazeFrameLowering::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const MBlazeInstrInfo &TII =
- *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo());
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
- bool requiresRA = CallConv == CallingConv::MBLAZE_INTR;
-
- // Determine the correct frame layout
- determineFrameLayout(MF);
-
- // Get the number of bytes to allocate from the FrameInfo.
- unsigned StackSize = MFI->getStackSize();
-
- // No need to allocate space on the stack.
- if (StackSize == 0 && !MFI->adjustsStack() && !requiresRA) return;
-
- int FPOffset = MBlazeFI->getFPStackOffset();
- int RAOffset = MBlazeFI->getRAStackOffset();
-
- // Adjust stack : addi R1, R1, -imm
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADDIK), MBlaze::R1)
- .addReg(MBlaze::R1).addImm(-StackSize);
-
- // swi R15, R1, stack_loc
- if (MFI->adjustsStack() || requiresRA) {
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI))
- .addReg(MBlaze::R15).addReg(MBlaze::R1).addImm(RAOffset);
- }
-
- if (hasFP(MF)) {
- // swi R19, R1, stack_loc
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI))
- .addReg(MBlaze::R19).addReg(MBlaze::R1).addImm(FPOffset);
-
- // add R19, R1, R0
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADD), MBlaze::R19)
- .addReg(MBlaze::R1).addReg(MBlaze::R0);
- }
-}
-
-void MBlazeFrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- const MBlazeInstrInfo &TII =
- *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo());
-
- DebugLoc dl = MBBI->getDebugLoc();
-
- CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
- bool requiresRA = CallConv == CallingConv::MBLAZE_INTR;
-
- // Get the FI's where RA and FP are saved.
- int FPOffset = MBlazeFI->getFPStackOffset();
- int RAOffset = MBlazeFI->getRAStackOffset();
-
- if (hasFP(MF)) {
- // add R1, R19, R0
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADD), MBlaze::R1)
- .addReg(MBlaze::R19).addReg(MBlaze::R0);
-
- // lwi R19, R1, stack_loc
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R19)
- .addReg(MBlaze::R1).addImm(FPOffset);
- }
-
- // lwi R15, R1, stack_loc
- if (MFI->adjustsStack() || requiresRA) {
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R15)
- .addReg(MBlaze::R1).addImm(RAOffset);
- }
-
- // Get the number of bytes from FrameInfo
- int StackSize = (int) MFI->getStackSize();
-
- // addi R1, R1, imm
- if (StackSize) {
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADDIK), MBlaze::R1)
- .addReg(MBlaze::R1).addImm(StackSize);
- }
-}
-
-// Eliminate ADJCALLSTACKDOWN/ADJCALLSTACKUP pseudo instructions
-void MBlazeFrameLowering::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- const MBlazeInstrInfo &TII =
- *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo());
- if (!hasReservedCallFrame(MF)) {
- // If we have a frame pointer, turn the adjcallstackup instruction into a
- // 'addi r1, r1, -<amt>' and the adjcallstackdown instruction into
- // 'addi r1, r1, <amt>'
- MachineInstr *Old = I;
- int Amount = Old->getOperand(0).getImm() + 4;
- if (Amount != 0) {
- // We need to keep the stack aligned properly. To do this, we round the
- // amount of space needed for the outgoing arguments up to the next
- // alignment boundary.
- unsigned Align = getStackAlignment();
- Amount = (Amount+Align-1)/Align*Align;
-
- MachineInstr *New;
- if (Old->getOpcode() == MBlaze::ADJCALLSTACKDOWN) {
- New = BuildMI(MF,Old->getDebugLoc(), TII.get(MBlaze::ADDIK),MBlaze::R1)
- .addReg(MBlaze::R1).addImm(-Amount);
- } else {
- assert(Old->getOpcode() == MBlaze::ADJCALLSTACKUP);
- New = BuildMI(MF,Old->getDebugLoc(), TII.get(MBlaze::ADDIK),MBlaze::R1)
- .addReg(MBlaze::R1).addImm(Amount);
- }
-
- // Replace the pseudo instruction with a new instruction...
- MBB.insert(I, New);
- }
- }
-
- // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
- MBB.erase(I);
-}
-
-
-void MBlazeFrameLowering::
-processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
- bool requiresRA = CallConv == CallingConv::MBLAZE_INTR;
-
- if (MFI->adjustsStack() || requiresRA) {
- MBlazeFI->setRAStackOffset(0);
- MFI->CreateFixedObject(4,0,true);
- }
-
- if (hasFP(MF)) {
- MBlazeFI->setFPStackOffset(4);
- MFI->CreateFixedObject(4,4,true);
- }
-
- interruptFrameLayout(MF);
- analyzeFrameIndexes(MF);
-}
diff --git a/lib/Target/MBlaze/MBlazeFrameLowering.h b/lib/Target/MBlaze/MBlazeFrameLowering.h
deleted file mode 100644
index f4228c5f0890..000000000000
--- a/lib/Target/MBlaze/MBlazeFrameLowering.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//=- MBlazeFrameLowering.h - Define frame lowering for MicroBlaze -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZE_FRAMEINFO_H
-#define MBLAZE_FRAMEINFO_H
-
-#include "MBlaze.h"
-#include "llvm/Target/TargetFrameLowering.h"
-
-namespace llvm {
-class MBlazeSubtarget;
-
-class MBlazeFrameLowering : public TargetFrameLowering {
-protected:
- const MBlazeSubtarget &STI;
-
-public:
- explicit MBlazeFrameLowering(const MBlazeSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 4, 0), STI(sti) {
- }
-
- /// targetHandlesStackFrameRounding - Returns true if the target is
- /// responsible for rounding up the stack frame (probably at emitPrologue
- /// time).
- bool targetHandlesStackFrameRounding() const { return true; }
-
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- bool hasFP(const MachineFunction &MF) const;
-
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
-
- virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp b/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp
deleted file mode 100644
index 34e33fdcfcc0..000000000000
--- a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp
+++ /dev/null
@@ -1,277 +0,0 @@
-//===-- MBlazeISelDAGToDAG.cpp - A dag to dag inst selector for MBlaze ----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the MBlaze target.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mblaze-isel"
-#include "MBlaze.h"
-#include "MBlazeMachineFunction.h"
-#include "MBlazeRegisterInfo.h"
-#include "MBlazeSubtarget.h"
-#include "MBlazeTargetMachine.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Instruction Selector Implementation
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// MBlazeDAGToDAGISel - MBlaze specific code to select MBlaze machine
-// instructions for SelectionDAG operations.
-//===----------------------------------------------------------------------===//
-namespace {
-
-class MBlazeDAGToDAGISel : public SelectionDAGISel {
-
- /// TM - Keep a reference to MBlazeTargetMachine.
- MBlazeTargetMachine &TM;
-
- /// Subtarget - Keep a pointer to the MBlazeSubtarget around so that we can
- /// make the right decision when generating code for different targets.
- const MBlazeSubtarget &Subtarget;
-
-public:
- explicit MBlazeDAGToDAGISel(MBlazeTargetMachine &tm) :
- SelectionDAGISel(tm),
- TM(tm), Subtarget(tm.getSubtarget<MBlazeSubtarget>()) {}
-
- // Pass Name
- virtual const char *getPassName() const {
- return "MBlaze DAG->DAG Pattern Instruction Selection";
- }
-private:
- // Include the pieces autogenerated from the target description.
- #include "MBlazeGenDAGISel.inc"
-
- /// getTargetMachine - Return a reference to the TargetMachine, casted
- /// to the target-specific type.
- const MBlazeTargetMachine &getTargetMachine() {
- return static_cast<const MBlazeTargetMachine &>(TM);
- }
-
- /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
- /// to the target-specific type.
- const MBlazeInstrInfo *getInstrInfo() {
- return getTargetMachine().getInstrInfo();
- }
-
- SDNode *getGlobalBaseReg();
- SDNode *Select(SDNode *N);
-
- // Address Selection
- bool SelectAddrRegReg(SDValue N, SDValue &Base, SDValue &Index);
- bool SelectAddrRegImm(SDValue N, SDValue &Disp, SDValue &Base);
-
- // getI32Imm - Return a target constant with the specified value, of type i32.
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
- }
-};
-
-}
-
-/// isIntS32Immediate - This method tests to see if the node is either a 32-bit
-/// or 64-bit immediate, and if the value can be accurately represented as a
-/// sign extension from a 32-bit value. If so, this returns true and the
-/// immediate.
-static bool isIntS32Immediate(SDNode *N, int32_t &Imm) {
- unsigned Opc = N->getOpcode();
- if (Opc != ISD::Constant)
- return false;
-
- Imm = (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
- if (N->getValueType(0) == MVT::i32)
- return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
- else
- return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
-}
-
-static bool isIntS32Immediate(SDValue Op, int32_t &Imm) {
- return isIntS32Immediate(Op.getNode(), Imm);
-}
-
-
-/// SelectAddressRegReg - Given the specified addressed, check to see if it
-/// can be represented as an indexed [r+r] operation. Returns false if it
-/// can be more efficiently represented with [r+imm].
-bool MBlazeDAGToDAGISel::
-SelectAddrRegReg(SDValue N, SDValue &Base, SDValue &Index) {
- if (N.getOpcode() == ISD::FrameIndex) return false;
- if (N.getOpcode() == ISD::TargetExternalSymbol ||
- N.getOpcode() == ISD::TargetGlobalAddress)
- return false; // direct calls.
-
- int32_t imm = 0;
- if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
- if (isIntS32Immediate(N.getOperand(1), imm))
- return false; // r+i
-
- if (N.getOperand(0).getOpcode() == ISD::TargetJumpTable ||
- N.getOperand(1).getOpcode() == ISD::TargetJumpTable)
- return false; // jump tables.
-
- Base = N.getOperand(0);
- Index = N.getOperand(1);
- return true;
- }
-
- return false;
-}
-
-/// Returns true if the address N can be represented by a base register plus
-/// a signed 32-bit displacement [r+imm], and if it is not better
-/// represented as reg+reg.
-bool MBlazeDAGToDAGISel::
-SelectAddrRegImm(SDValue N, SDValue &Base, SDValue &Disp) {
- // If this can be more profitably realized as r+r, fail.
- if (SelectAddrRegReg(N, Base, Disp))
- return false;
-
- if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
- int32_t imm = 0;
- if (isIntS32Immediate(N.getOperand(1), imm)) {
- Disp = CurDAG->getTargetConstant(imm, MVT::i32);
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
- Base = CurDAG->getTargetFrameIndex(FI->getIndex(), N.getValueType());
- } else {
- Base = N.getOperand(0);
- }
- return true; // [r+i]
- }
- } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
- // Loading from a constant address.
- uint32_t Imm = CN->getZExtValue();
- Disp = CurDAG->getTargetConstant(Imm, CN->getValueType(0));
- Base = CurDAG->getRegister(MBlaze::R0, CN->getValueType(0));
- return true;
- }
-
- Disp = CurDAG->getTargetConstant(0, TM.getTargetLowering()->getPointerTy());
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
- Base = CurDAG->getTargetFrameIndex(FI->getIndex(), N.getValueType());
- else
- Base = N;
- return true; // [r+0]
-}
-
-/// getGlobalBaseReg - Output the instructions required to put the
-/// GOT address into a register.
-SDNode *MBlazeDAGToDAGISel::getGlobalBaseReg() {
- unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
- return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
-}
-
-/// Select instructions not customized! Used for
-/// expanded, promoted and normal instructions
-SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) {
- unsigned Opcode = Node->getOpcode();
- DebugLoc dl = Node->getDebugLoc();
-
- // If we have a custom node, we already have selected!
- if (Node->isMachineOpcode())
- return NULL;
-
- ///
- // Instruction Selection not handled by the auto-generated
- // tablegen selection should be handled here.
- ///
- switch (Opcode) {
- default: break;
-
- // Get target GOT address.
- case ISD::GLOBAL_OFFSET_TABLE:
- return getGlobalBaseReg();
-
- case ISD::FrameIndex: {
- SDValue imm = CurDAG->getTargetConstant(0, MVT::i32);
- int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
- EVT VT = Node->getValueType(0);
- SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
- unsigned Opc = MBlaze::ADDIK;
- if (Node->hasOneUse())
- return CurDAG->SelectNodeTo(Node, Opc, VT, TFI, imm);
- return CurDAG->getMachineNode(Opc, dl, VT, TFI, imm);
- }
-
-
- /// Handle direct and indirect calls when using PIC. On PIC, when
- /// GOT is smaller than about 64k (small code) the GA target is
- /// loaded with only one instruction. Otherwise GA's target must
- /// be loaded with 3 instructions.
- case MBlazeISD::JmpLink: {
- if (TM.getRelocationModel() == Reloc::PIC_) {
- SDValue Chain = Node->getOperand(0);
- SDValue Callee = Node->getOperand(1);
- SDValue R20Reg = CurDAG->getRegister(MBlaze::R20, MVT::i32);
- SDValue InFlag(0, 0);
-
- if ((isa<GlobalAddressSDNode>(Callee)) ||
- (isa<ExternalSymbolSDNode>(Callee)))
- {
- /// Direct call for global addresses and external symbols
- SDValue GPReg = CurDAG->getRegister(MBlaze::R15, MVT::i32);
-
- // Use load to get GOT target
- SDValue Ops[] = { Callee, GPReg, Chain };
- SDValue Load = SDValue(CurDAG->getMachineNode(MBlaze::LW, dl,
- MVT::i32, MVT::Other, Ops), 0);
- Chain = Load.getValue(1);
-
- // Call target must be on T9
- Chain = CurDAG->getCopyToReg(Chain, dl, R20Reg, Load, InFlag);
- } else
- /// Indirect call
- Chain = CurDAG->getCopyToReg(Chain, dl, R20Reg, Callee, InFlag);
-
- // Emit Jump and Link Register
- SDNode *ResNode = CurDAG->getMachineNode(MBlaze::BRLID, dl, MVT::Other,
- MVT::Glue, R20Reg, Chain);
- Chain = SDValue(ResNode, 0);
- InFlag = SDValue(ResNode, 1);
- ReplaceUses(SDValue(Node, 0), Chain);
- ReplaceUses(SDValue(Node, 1), InFlag);
- return ResNode;
- }
- }
- }
-
- // Select the default instruction
- SDNode *ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ");
- if (ResNode == NULL || ResNode == Node)
- DEBUG(Node->dump(CurDAG));
- else
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(errs() << "\n");
- return ResNode;
-}
-
-/// createMBlazeISelDag - This pass converts a legalized DAG into a
-/// MBlaze-specific DAG, ready for instruction scheduling.
-FunctionPass *llvm::createMBlazeISelDag(MBlazeTargetMachine &TM) {
- return new MBlazeDAGToDAGISel(TM);
-}
diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp
deleted file mode 100644
index d4f943297acb..000000000000
--- a/lib/Target/MBlaze/MBlazeISelLowering.cpp
+++ /dev/null
@@ -1,1154 +0,0 @@
-//===-- MBlazeISelLowering.cpp - MBlaze DAG Lowering Implementation -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that MBlaze uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mblaze-lower"
-#include "MBlazeISelLowering.h"
-#include "MBlazeMachineFunction.h"
-#include "MBlazeSubtarget.h"
-#include "MBlazeTargetMachine.h"
-#include "MBlazeTargetObjectFile.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-
-const char *MBlazeTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- case MBlazeISD::JmpLink : return "MBlazeISD::JmpLink";
- case MBlazeISD::GPRel : return "MBlazeISD::GPRel";
- case MBlazeISD::Wrap : return "MBlazeISD::Wrap";
- case MBlazeISD::ICmp : return "MBlazeISD::ICmp";
- case MBlazeISD::Ret : return "MBlazeISD::Ret";
- case MBlazeISD::Select_CC : return "MBlazeISD::Select_CC";
- default : return NULL;
- }
-}
-
-MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
- : TargetLowering(TM, new MBlazeTargetObjectFile()) {
- Subtarget = &TM.getSubtarget<MBlazeSubtarget>();
-
- // MBlaze does not have i1 type, so use i32 for
- // setcc operations results (slt, sgt, ...).
- setBooleanContents(ZeroOrOneBooleanContent);
- setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
-
- // Set up the register classes
- addRegisterClass(MVT::i32, &MBlaze::GPRRegClass);
- if (Subtarget->hasFPU()) {
- addRegisterClass(MVT::f32, &MBlaze::GPRRegClass);
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- }
-
- // Floating point operations which are not supported
- setOperationAction(ISD::FREM, MVT::f32, Expand);
- setOperationAction(ISD::FMA, MVT::f32, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::i8, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::i16, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
- setOperationAction(ISD::FP_ROUND, MVT::f32, Expand);
- setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
- setOperationAction(ISD::FSIN, MVT::f32, Expand);
- setOperationAction(ISD::FCOS, MVT::f32, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
- setOperationAction(ISD::FPOWI, MVT::f32, Expand);
- setOperationAction(ISD::FPOW, MVT::f32, Expand);
- setOperationAction(ISD::FLOG, MVT::f32, Expand);
- setOperationAction(ISD::FLOG2, MVT::f32, Expand);
- setOperationAction(ISD::FLOG10, MVT::f32, Expand);
- setOperationAction(ISD::FEXP, MVT::f32, Expand);
-
- // Load extented operations for i1 types must be promoted
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
-
- // Sign extended loads must be expanded
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
-
- // MBlaze has no REM or DIVREM operations.
- setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
-
- // If the processor doesn't support multiply then expand it
- if (!Subtarget->hasMul()) {
- setOperationAction(ISD::MUL, MVT::i32, Expand);
- }
-
- // If the processor doesn't support 64-bit multiply then expand
- if (!Subtarget->hasMul() || !Subtarget->hasMul64()) {
- setOperationAction(ISD::MULHS, MVT::i32, Expand);
- setOperationAction(ISD::MULHS, MVT::i64, Expand);
- setOperationAction(ISD::MULHU, MVT::i32, Expand);
- setOperationAction(ISD::MULHU, MVT::i64, Expand);
- }
-
- // If the processor doesn't support division then expand
- if (!Subtarget->hasDiv()) {
- setOperationAction(ISD::UDIV, MVT::i32, Expand);
- setOperationAction(ISD::SDIV, MVT::i32, Expand);
- }
-
- // Expand unsupported conversions
- setOperationAction(ISD::BITCAST, MVT::f32, Expand);
- setOperationAction(ISD::BITCAST, MVT::i32, Expand);
-
- // Expand SELECT_CC
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
-
- // MBlaze doesn't have MUL_LOHI
- setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-
- // Used by legalize types to correctly generate the setcc result.
- // Without this, every float setcc comes with a AND/OR with the result,
- // we don't want this, since the fpcmp result goes to a flag register,
- // which is used implicitly by brcond and select operations.
- AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
- AddPromotedToType(ISD::SELECT, MVT::i1, MVT::i32);
- AddPromotedToType(ISD::SELECT_CC, MVT::i1, MVT::i32);
-
- // MBlaze Custom Operations
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
- setOperationAction(ISD::JumpTable, MVT::i32, Custom);
- setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
-
- // Variable Argument support
- setOperationAction(ISD::VASTART, MVT::Other, Custom);
- setOperationAction(ISD::VAEND, MVT::Other, Expand);
- setOperationAction(ISD::VAARG, MVT::Other, Expand);
- setOperationAction(ISD::VACOPY, MVT::Other, Expand);
-
-
- // Operations not directly supported by MBlaze.
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BR_CC, MVT::f32, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::ROTR, MVT::i32, Expand);
- setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::CTLZ, MVT::i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTPOP, MVT::i32, Expand);
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);
-
- // We don't have line number support yet.
- setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
-
- // Use the default for now
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
-
- // MBlaze doesn't have extending float->double load/store
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- setMinFunctionAlignment(2);
-
- setStackPointerRegisterToSaveRestore(MBlaze::R1);
- computeRegisterProperties();
-}
-
-EVT MBlazeTargetLowering::getSetCCResultType(EVT VT) const {
- return MVT::i32;
-}
-
-SDValue MBlazeTargetLowering::LowerOperation(SDValue Op,
- SelectionDAG &DAG) const {
- switch (Op.getOpcode())
- {
- case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
- case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
- case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
- case ISD::JumpTable: return LowerJumpTable(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::VASTART: return LowerVASTART(Op, DAG);
- }
- return SDValue();
-}
-
-//===----------------------------------------------------------------------===//
-// Lower helper functions
-//===----------------------------------------------------------------------===//
-MachineBasicBlock*
-MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB)
- const {
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Unexpected instr type to insert");
-
- case MBlaze::ShiftRL:
- case MBlaze::ShiftRA:
- case MBlaze::ShiftL:
- return EmitCustomShift(MI, MBB);
-
- case MBlaze::Select_FCC:
- case MBlaze::Select_CC:
- return EmitCustomSelect(MI, MBB);
-
- case MBlaze::CAS32:
- case MBlaze::SWP32:
- case MBlaze::LAA32:
- case MBlaze::LAS32:
- case MBlaze::LAD32:
- case MBlaze::LAO32:
- case MBlaze::LAX32:
- case MBlaze::LAN32:
- return EmitCustomAtomic(MI, MBB);
-
- case MBlaze::MEMBARRIER:
- // The Microblaze does not need memory barriers. Just delete the pseudo
- // instruction and finish.
- MI->eraseFromParent();
- return MBB;
- }
-}
-
-MachineBasicBlock*
-MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
-
- // To "insert" a shift left instruction, we actually have to insert a
- // simple loop. The incoming instruction knows the destination vreg to
- // set, the source vreg to operate over and the shift amount.
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- MachineFunction::iterator It = MBB;
- ++It;
-
- // start:
- // andi samt, samt, 31
- // beqid samt, finish
- // add dst, src, r0
- // loop:
- // addik samt, samt, -1
- // sra dst, dst
- // bneid samt, loop
- // nop
- // finish:
- MachineFunction *F = MBB->getParent();
- MachineRegisterInfo &R = F->getRegInfo();
- MachineBasicBlock *loop = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *finish = F->CreateMachineBasicBlock(LLVM_BB);
- F->insert(It, loop);
- F->insert(It, finish);
-
- // Update machine-CFG edges by transferring adding all successors and
- // remaining instructions from the current block to the new block which
- // will contain the Phi node for the select.
- finish->splice(finish->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- MBB->end());
- finish->transferSuccessorsAndUpdatePHIs(MBB);
-
- // Add the true and fallthrough blocks as its successors.
- MBB->addSuccessor(loop);
- MBB->addSuccessor(finish);
-
- // Next, add the finish block as a successor of the loop block
- loop->addSuccessor(finish);
- loop->addSuccessor(loop);
-
- unsigned IAMT = R.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(MBB, dl, TII->get(MBlaze::ANDI), IAMT)
- .addReg(MI->getOperand(2).getReg())
- .addImm(31);
-
- unsigned IVAL = R.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(MBB, dl, TII->get(MBlaze::ADDIK), IVAL)
- .addReg(MI->getOperand(1).getReg())
- .addImm(0);
-
- BuildMI(MBB, dl, TII->get(MBlaze::BEQID))
- .addReg(IAMT)
- .addMBB(finish);
-
- unsigned DST = R.createVirtualRegister(&MBlaze::GPRRegClass);
- unsigned NDST = R.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(loop, dl, TII->get(MBlaze::PHI), DST)
- .addReg(IVAL).addMBB(MBB)
- .addReg(NDST).addMBB(loop);
-
- unsigned SAMT = R.createVirtualRegister(&MBlaze::GPRRegClass);
- unsigned NAMT = R.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(loop, dl, TII->get(MBlaze::PHI), SAMT)
- .addReg(IAMT).addMBB(MBB)
- .addReg(NAMT).addMBB(loop);
-
- if (MI->getOpcode() == MBlaze::ShiftL)
- BuildMI(loop, dl, TII->get(MBlaze::ADD), NDST).addReg(DST).addReg(DST);
- else if (MI->getOpcode() == MBlaze::ShiftRA)
- BuildMI(loop, dl, TII->get(MBlaze::SRA), NDST).addReg(DST);
- else if (MI->getOpcode() == MBlaze::ShiftRL)
- BuildMI(loop, dl, TII->get(MBlaze::SRL), NDST).addReg(DST);
- else
- llvm_unreachable("Cannot lower unknown shift instruction");
-
- BuildMI(loop, dl, TII->get(MBlaze::ADDIK), NAMT)
- .addReg(SAMT)
- .addImm(-1);
-
- BuildMI(loop, dl, TII->get(MBlaze::BNEID))
- .addReg(NAMT)
- .addMBB(loop);
-
- BuildMI(*finish, finish->begin(), dl,
- TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
- .addReg(IVAL).addMBB(MBB)
- .addReg(NDST).addMBB(loop);
-
- // The pseudo instruction is no longer needed so remove it
- MI->eraseFromParent();
- return finish;
-}
-
-MachineBasicBlock*
-MBlazeTargetLowering::EmitCustomSelect(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
-
- // To "insert" a SELECT_CC instruction, we actually have to insert the
- // diamond control-flow pattern. The incoming instruction knows the
- // destination vreg to set, the condition code register to branch on, the
- // true/false values to select between, and a branch opcode to use.
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- MachineFunction::iterator It = MBB;
- ++It;
-
- // thisMBB:
- // ...
- // TrueVal = ...
- // setcc r1, r2, r3
- // bNE r1, r0, copy1MBB
- // fallthrough --> copy0MBB
- MachineFunction *F = MBB->getParent();
- MachineBasicBlock *flsBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *dneBB = F->CreateMachineBasicBlock(LLVM_BB);
-
- unsigned Opc;
- switch (MI->getOperand(4).getImm()) {
- default: llvm_unreachable("Unknown branch condition");
- case MBlazeCC::EQ: Opc = MBlaze::BEQID; break;
- case MBlazeCC::NE: Opc = MBlaze::BNEID; break;
- case MBlazeCC::GT: Opc = MBlaze::BGTID; break;
- case MBlazeCC::LT: Opc = MBlaze::BLTID; break;
- case MBlazeCC::GE: Opc = MBlaze::BGEID; break;
- case MBlazeCC::LE: Opc = MBlaze::BLEID; break;
- }
-
- F->insert(It, flsBB);
- F->insert(It, dneBB);
-
- // Transfer the remainder of MBB and its successor edges to dneBB.
- dneBB->splice(dneBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- MBB->end());
- dneBB->transferSuccessorsAndUpdatePHIs(MBB);
-
- MBB->addSuccessor(flsBB);
- MBB->addSuccessor(dneBB);
- flsBB->addSuccessor(dneBB);
-
- BuildMI(MBB, dl, TII->get(Opc))
- .addReg(MI->getOperand(3).getReg())
- .addMBB(dneBB);
-
- // sinkMBB:
- // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
- // ...
- //BuildMI(dneBB, dl, TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
- // .addReg(MI->getOperand(1).getReg()).addMBB(flsBB)
- // .addReg(MI->getOperand(2).getReg()).addMBB(BB);
-
- BuildMI(*dneBB, dneBB->begin(), dl,
- TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg()).addMBB(flsBB)
- .addReg(MI->getOperand(1).getReg()).addMBB(MBB);
-
- MI->eraseFromParent(); // The pseudo instruction is gone now.
- return dneBB;
-}
-
-MachineBasicBlock*
-MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
-
- // All atomic instructions on the Microblaze are implemented using the
- // load-linked / store-conditional style atomic instruction sequences.
- // Thus, all operations will look something like the following:
- //
- // start:
- // lwx RV, RP, 0
- // <do stuff>
- // swx RV, RP, 0
- // addic RC, R0, 0
- // bneid RC, start
- //
- // exit:
- //
- // To "insert" a shift left instruction, we actually have to insert a
- // simple loop. The incoming instruction knows the destination vreg to
- // set, the source vreg to operate over and the shift amount.
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- MachineFunction::iterator It = MBB;
- ++It;
-
- // start:
- // andi samt, samt, 31
- // beqid samt, finish
- // add dst, src, r0
- // loop:
- // addik samt, samt, -1
- // sra dst, dst
- // bneid samt, loop
- // nop
- // finish:
- MachineFunction *F = MBB->getParent();
- MachineRegisterInfo &R = F->getRegInfo();
-
- // Create the start and exit basic blocks for the atomic operation
- MachineBasicBlock *start = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exit = F->CreateMachineBasicBlock(LLVM_BB);
- F->insert(It, start);
- F->insert(It, exit);
-
- // Update machine-CFG edges by transferring adding all successors and
- // remaining instructions from the current block to the new block which
- // will contain the Phi node for the select.
- exit->splice(exit->begin(), MBB, llvm::next(MachineBasicBlock::iterator(MI)),
- MBB->end());
- exit->transferSuccessorsAndUpdatePHIs(MBB);
-
- // Add the fallthrough block as its successors.
- MBB->addSuccessor(start);
-
- BuildMI(start, dl, TII->get(MBlaze::LWX), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg())
- .addReg(MBlaze::R0);
-
- MachineBasicBlock *final = start;
- unsigned finalReg = 0;
-
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Cannot lower unknown atomic instruction!");
-
- case MBlaze::SWP32:
- finalReg = MI->getOperand(2).getReg();
- start->addSuccessor(exit);
- start->addSuccessor(start);
- break;
-
- case MBlaze::LAN32:
- case MBlaze::LAX32:
- case MBlaze::LAO32:
- case MBlaze::LAD32:
- case MBlaze::LAS32:
- case MBlaze::LAA32: {
- unsigned opcode = 0;
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Cannot lower unknown atomic load!");
- case MBlaze::LAA32: opcode = MBlaze::ADDIK; break;
- case MBlaze::LAS32: opcode = MBlaze::RSUBIK; break;
- case MBlaze::LAD32: opcode = MBlaze::AND; break;
- case MBlaze::LAO32: opcode = MBlaze::OR; break;
- case MBlaze::LAX32: opcode = MBlaze::XOR; break;
- case MBlaze::LAN32: opcode = MBlaze::AND; break;
- }
-
- finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass);
- start->addSuccessor(exit);
- start->addSuccessor(start);
-
- BuildMI(start, dl, TII->get(opcode), finalReg)
- .addReg(MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg());
-
- if (MI->getOpcode() == MBlaze::LAN32) {
- unsigned tmp = finalReg;
- finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(start, dl, TII->get(MBlaze::XORI), finalReg)
- .addReg(tmp)
- .addImm(-1);
- }
- break;
- }
-
- case MBlaze::CAS32: {
- finalReg = MI->getOperand(3).getReg();
- final = F->CreateMachineBasicBlock(LLVM_BB);
-
- F->insert(It, final);
- start->addSuccessor(exit);
- start->addSuccessor(final);
- final->addSuccessor(exit);
- final->addSuccessor(start);
-
- unsigned CMP = R.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(start, dl, TII->get(MBlaze::CMP), CMP)
- .addReg(MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg());
-
- BuildMI(start, dl, TII->get(MBlaze::BNEID))
- .addReg(CMP)
- .addMBB(exit);
-
- final->moveAfter(start);
- exit->moveAfter(final);
- break;
- }
- }
-
- unsigned CHK = R.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(final, dl, TII->get(MBlaze::SWX))
- .addReg(finalReg)
- .addReg(MI->getOperand(1).getReg())
- .addReg(MBlaze::R0);
-
- BuildMI(final, dl, TII->get(MBlaze::ADDIC), CHK)
- .addReg(MBlaze::R0)
- .addImm(0);
-
- BuildMI(final, dl, TII->get(MBlaze::BNEID))
- .addReg(CHK)
- .addMBB(start);
-
- // The pseudo instruction is no longer needed so remove it
- MI->eraseFromParent();
- return exit;
-}
-
-//===----------------------------------------------------------------------===//
-// Misc Lower Operation implementation
-//===----------------------------------------------------------------------===//
-//
-
-SDValue MBlazeTargetLowering::LowerSELECT_CC(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- SDValue TrueVal = Op.getOperand(2);
- SDValue FalseVal = Op.getOperand(3);
- DebugLoc dl = Op.getDebugLoc();
- unsigned Opc;
-
- SDValue CompareFlag;
- if (LHS.getValueType() == MVT::i32) {
- Opc = MBlazeISD::Select_CC;
- CompareFlag = DAG.getNode(MBlazeISD::ICmp, dl, MVT::i32, LHS, RHS)
- .getValue(1);
- } else {
- llvm_unreachable("Cannot lower select_cc with unknown type");
- }
-
- return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
- CompareFlag);
-}
-
-SDValue MBlazeTargetLowering::
-LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
- // FIXME there isn't actually debug info here
- DebugLoc dl = Op.getDebugLoc();
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
-
- return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, GA);
-}
-
-SDValue MBlazeTargetLowering::
-LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
- llvm_unreachable("TLS not implemented for MicroBlaze.");
-}
-
-SDValue MBlazeTargetLowering::
-LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
- SDValue ResNode;
- SDValue HiPart;
- // FIXME there isn't actually debug info here
- DebugLoc dl = Op.getDebugLoc();
-
- EVT PtrVT = Op.getValueType();
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
-
- SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 0);
- return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, JTI);
-}
-
-SDValue MBlazeTargetLowering::
-LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
- SDValue ResNode;
- ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
- const Constant *C = N->getConstVal();
- DebugLoc dl = Op.getDebugLoc();
-
- SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
- N->getOffset(), 0);
- return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, CP);
-}
-
-SDValue MBlazeTargetLowering::LowerVASTART(SDValue Op,
- SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- MBlazeFunctionInfo *FuncInfo = MF.getInfo<MBlazeFunctionInfo>();
-
- DebugLoc dl = Op.getDebugLoc();
- SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
- getPointerTy());
-
- // vastart just stores the address of the VarArgsFrameIndex slot into the
- // memory location argument.
- const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
- MachinePointerInfo(SV),
- false, false, 0);
-}
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeGenCallingConv.inc"
-
-static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- static const uint16_t ArgRegs[] = {
- MBlaze::R5, MBlaze::R6, MBlaze::R7,
- MBlaze::R8, MBlaze::R9, MBlaze::R10
- };
-
- const unsigned NumArgRegs = array_lengthof(ArgRegs);
- unsigned Reg = State.AllocateReg(ArgRegs, NumArgRegs);
- if (!Reg) return false;
-
- unsigned SizeInBytes = ValVT.getSizeInBits() >> 3;
- State.AllocateStack(SizeInBytes, SizeInBytes);
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
-
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// Call Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-/// LowerCall - functions arguments are copied from virtual regs to
-/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
-/// TODO: isVarArg, isTailCall.
-SDValue MBlazeTargetLowering::
-LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
- bool &isTailCall = CLI.IsTailCall;
- CallingConv::ID CallConv = CLI.CallConv;
- bool isVarArg = CLI.IsVarArg;
-
- // MBlaze does not yet support tail call optimization
- isTailCall = false;
-
- // The MBlaze requires stack slots for arguments passed to var arg
- // functions even if they are passed in registers.
- bool needsRegArgSlots = isVarArg;
-
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
-
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs, CC_MBlaze);
-
- // Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
-
- // Variable argument function calls require a minimum of 24-bytes of stack
- if (isVarArg && NumBytes < 24) NumBytes = 24;
-
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
-
- SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
- SmallVector<SDValue, 8> MemOpChains;
-
- // Walk the register/memloc assignments, inserting copies/loads.
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
- MVT RegVT = VA.getLocVT();
- SDValue Arg = OutVals[i];
-
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
- break;
- case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
- break;
- case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
- break;
- }
-
- // Arguments that can be passed on register must be kept at
- // RegsToPass vector
- if (VA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- } else {
- // Register can't get to this point...
- assert(VA.isMemLoc());
-
- // Since we are alread passing values on the stack we don't
- // need to worry about creating additional slots for the
- // values passed via registers.
- needsRegArgSlots = false;
-
- // Create the frame index object for this incoming parameter
- unsigned ArgSize = VA.getValVT().getSizeInBits()/8;
- unsigned StackLoc = VA.getLocMemOffset() + 4;
- int FI = MFI->CreateFixedObject(ArgSize, StackLoc, true);
-
- SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
-
- // emit ISD::STORE whichs stores the
- // parameter value to a stack Location
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
- }
- }
-
- // If we need to reserve stack space for the arguments passed via registers
- // then create a fixed stack object at the beginning of the stack.
- if (needsRegArgSlots && TFI.hasReservedCallFrame(MF))
- MFI->CreateFixedObject(28,0,true);
-
- // Transform all store nodes into one single node because all store
- // nodes are independent of each other.
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
-
- // Build a sequence of copy-to-reg nodes chained together with token
- // chain and flag operands which copy the outgoing args into registers.
- // The InFlag in necessary since all emitted instructions must be
- // stuck together.
- SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
-
- // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
- // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
- // node so that legalize doesn't hack it.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
- getPointerTy(), 0, 0);
- else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
- getPointerTy(), 0);
-
- // MBlazeJmpLink = #chain, #target_address, #opt_in_flags...
- // = Chain, Callee, Reg#1, Reg#2, ...
- //
- // Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
-
- // Add argument registers to the end of the list so that they are
- // known live into the call.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
- }
-
- if (InFlag.getNode())
- Ops.push_back(InFlag);
-
- Chain = DAG.getNode(MBlazeISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size());
- InFlag = Chain.getValue(1);
-
- // Create the CALLSEQ_END node.
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
- if (!Ins.empty())
- InFlag = Chain.getValue(1);
-
- // Handle result values, copying them out of physregs into vregs that we
- // return.
- return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
- Ins, dl, DAG, InVals);
-}
-
-/// LowerCallResult - Lower the result values of a call into the
-/// appropriate copies out of appropriate physical registers.
-SDValue MBlazeTargetLowering::
-LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv,
- bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
-
- CCInfo.AnalyzeCallResult(Ins, RetCC_MBlaze);
-
- // Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
- RVLocs[i].getValVT(), InFlag).getValue(1);
- InFlag = Chain.getValue(2);
- InVals.push_back(Chain.getValue(0));
- }
-
- return Chain;
-}
-
-//===----------------------------------------------------------------------===//
-// Formal Arguments Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-/// LowerFormalArguments - transform physical registers into
-/// virtual registers and generate load operations for
-/// arguments places on the stack.
-SDValue MBlazeTargetLowering::
-LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
-
- unsigned StackReg = MF.getTarget().getRegisterInfo()->getFrameRegister(MF);
- MBlazeFI->setVarArgsFrameIndex(0);
-
- // Used with vargs to acumulate store chains.
- std::vector<SDValue> OutChains;
-
- // Keep track of the last register used for arguments
- unsigned ArgRegEnd = 0;
-
- // Assign locations to all of the incoming arguments.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
-
- CCInfo.AnalyzeFormalArguments(Ins, CC_MBlaze);
- SDValue StackPtr;
-
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
-
- // Arguments stored on registers
- if (VA.isRegLoc()) {
- MVT RegVT = VA.getLocVT();
- ArgRegEnd = VA.getLocReg();
- const TargetRegisterClass *RC;
-
- if (RegVT == MVT::i32)
- RC = &MBlaze::GPRRegClass;
- else if (RegVT == MVT::f32)
- RC = &MBlaze::GPRRegClass;
- else
- llvm_unreachable("RegVT not supported by LowerFormalArguments");
-
- // Transform the arguments stored on
- // physical registers into virtual ones
- unsigned Reg = MF.addLiveIn(ArgRegEnd, RC);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
-
- // If this is an 8 or 16-bit value, it has been passed promoted
- // to 32 bits. Insert an assert[sz]ext to capture this, then
- // truncate to the right size. If if is a floating point value
- // then convert to the correct type.
- if (VA.getLocInfo() != CCValAssign::Full) {
- unsigned Opcode = 0;
- if (VA.getLocInfo() == CCValAssign::SExt)
- Opcode = ISD::AssertSext;
- else if (VA.getLocInfo() == CCValAssign::ZExt)
- Opcode = ISD::AssertZext;
- if (Opcode)
- ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
- }
-
- InVals.push_back(ArgValue);
- } else { // VA.isRegLoc()
- // sanity check
- assert(VA.isMemLoc());
-
- // The last argument is not a register
- ArgRegEnd = 0;
-
- // The stack pointer offset is relative to the caller stack frame.
- // Since the real stack size is unknown here, a negative SPOffset
- // is used so there's a way to adjust these offsets when the stack
- // size get known (on EliminateFrameIndex). A dummy SPOffset is
- // used instead of a direct negative address (which is recorded to
- // be used on emitPrologue) to avoid mis-calc of the first stack
- // offset on PEI::calculateFrameObjectOffsets.
- // Arguments are always 32-bit.
- unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
- unsigned StackLoc = VA.getLocMemOffset() + 4;
- int FI = MFI->CreateFixedObject(ArgSize, 0, true);
- MBlazeFI->recordLoadArgsFI(FI, -StackLoc);
- MBlazeFI->recordLiveIn(FI);
-
- // Create load nodes to retrieve arguments from the stack
- SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, false, 0));
- }
- }
-
- // To meet ABI, when VARARGS are passed on registers, the registers
- // must have their values written to the caller stack frame. If the last
- // argument was placed in the stack, there's no need to save any register.
- if ((isVarArg) && ArgRegEnd) {
- if (StackPtr.getNode() == 0)
- StackPtr = DAG.getRegister(StackReg, getPointerTy());
-
- // The last register argument that must be saved is MBlaze::R10
- const TargetRegisterClass *RC = &MBlaze::GPRRegClass;
-
- unsigned Begin = getMBlazeRegisterNumbering(MBlaze::R5);
- unsigned Start = getMBlazeRegisterNumbering(ArgRegEnd+1);
- unsigned End = getMBlazeRegisterNumbering(MBlaze::R10);
- unsigned StackLoc = Start - Begin + 1;
-
- for (; Start <= End; ++Start, ++StackLoc) {
- unsigned Reg = getMBlazeRegisterFromNumbering(Start);
- unsigned LiveReg = MF.addLiveIn(Reg, RC);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, LiveReg, MVT::i32);
-
- int FI = MFI->CreateFixedObject(4, 0, true);
- MBlazeFI->recordStoreVarArgsFI(FI, -(StackLoc*4));
- SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
- OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
-
- // Record the frame index of the first variable argument
- // which is a value necessary to VASTART.
- if (!MBlazeFI->getVarArgsFrameIndex())
- MBlazeFI->setVarArgsFrameIndex(FI);
- }
- }
-
- // All stores are grouped in one node to allow the matching between
- // the size of Ins and InVals. This only happens when on varg functions
- if (!OutChains.empty()) {
- OutChains.push_back(Chain);
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], OutChains.size());
- }
-
- return Chain;
-}
-
-//===----------------------------------------------------------------------===//
-// Return Value Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-SDValue MBlazeTargetLowering::
-LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
- // CCValAssign - represent the assignment of
- // the return value to a location
- SmallVector<CCValAssign, 16> RVLocs;
-
- // CCState - Info about the registers and stack slot.
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
-
- // Analize return values.
- CCInfo.AnalyzeReturn(Outs, RetCC_MBlaze);
-
- SDValue Flag;
- SmallVector<SDValue, 4> RetOps(1, Chain);
-
- // If this function is using the interrupt_handler calling convention
- // then use "rtid r14, 0" otherwise use "rtsd r15, 8"
- unsigned Ret = (CallConv == CallingConv::MBLAZE_INTR) ? MBlazeISD::IRet
- : MBlazeISD::Ret;
- unsigned Reg = (CallConv == CallingConv::MBLAZE_INTR) ? MBlaze::R14
- : MBlaze::R15;
- RetOps.push_back(DAG.getRegister(Reg, MVT::i32));
-
-
- // Copy the result values into the output registers.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign &VA = RVLocs[i];
- assert(VA.isRegLoc() && "Can only return in registers!");
-
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- OutVals[i], Flag);
-
- // guarantee that all emitted copies are
- // stuck together, avoiding something bad
- Flag = Chain.getValue(1);
- RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
- }
-
- RetOps[0] = Chain; // Update chain.
-
- // Add the flag if we have it.
- if (Flag.getNode())
- RetOps.push_back(Flag);
-
- return DAG.getNode(Ret, dl, MVT::Other, &RetOps[0], RetOps.size());
-}
-
-//===----------------------------------------------------------------------===//
-// MBlaze Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-MBlazeTargetLowering::ConstraintType MBlazeTargetLowering::
-getConstraintType(const std::string &Constraint) const
-{
- // MBlaze specific constrainy
- //
- // 'd' : An address register. Equivalent to r.
- // 'y' : Equivalent to r; retained for
- // backwards compatibility.
- // 'f' : Floating Point registers.
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default : break;
- case 'd':
- case 'y':
- case 'f':
- return C_RegisterClass;
- }
- }
- return TargetLowering::getConstraintType(Constraint);
-}
-
-/// Examine constraint type and operand type and determine a weight value.
-/// This object must already have been set up with the operand type
-/// and the current alternative constraint selected.
-TargetLowering::ConstraintWeight
-MBlazeTargetLowering::getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const {
- ConstraintWeight weight = CW_Invalid;
- Value *CallOperandVal = info.CallOperandVal;
- // If we don't have a value, we can't do a match,
- // but allow it at the lowest weight.
- if (CallOperandVal == NULL)
- return CW_Default;
- Type *type = CallOperandVal->getType();
- // Look at the constraint type.
- switch (*constraint) {
- default:
- weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
- break;
- case 'd':
- case 'y':
- if (type->isIntegerTy())
- weight = CW_Register;
- break;
- case 'f':
- if (type->isFloatTy())
- weight = CW_Register;
- break;
- }
- return weight;
-}
-
-/// Given a register class constraint, like 'r', if this corresponds directly
-/// to an LLVM register class, return a register of 0 and the register class
-/// pointer.
-std::pair<unsigned, const TargetRegisterClass*> MBlazeTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'r':
- return std::make_pair(0U, &MBlaze::GPRRegClass);
- // TODO: These can't possibly be right, but match what was in
- // getRegClassForInlineAsmConstraint.
- case 'd':
- case 'y':
- case 'f':
- if (VT == MVT::f32)
- return std::make_pair(0U, &MBlaze::GPRRegClass);
- }
- }
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
-}
-
-bool MBlazeTargetLowering::
-isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
- // The MBlaze target isn't yet aware of offsets.
- return false;
-}
-
-bool MBlazeTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
- return VT != MVT::f32;
-}
diff --git a/lib/Target/MBlaze/MBlazeISelLowering.h b/lib/Target/MBlaze/MBlazeISelLowering.h
deleted file mode 100644
index f6b4095a93dc..000000000000
--- a/lib/Target/MBlaze/MBlazeISelLowering.h
+++ /dev/null
@@ -1,179 +0,0 @@
-//===-- MBlazeISelLowering.h - MBlaze DAG Lowering Interface ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that MBlaze uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBlazeISELLOWERING_H
-#define MBlazeISELLOWERING_H
-
-#include "MBlaze.h"
-#include "MBlazeSubtarget.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetLowering.h"
-
-namespace llvm {
- namespace MBlazeCC {
- enum CC {
- FIRST = 0,
- EQ,
- NE,
- GT,
- LT,
- GE,
- LE
- };
-
- inline static CC getOppositeCondition(CC cc) {
- switch (cc) {
- default: llvm_unreachable("Unknown condition code");
- case EQ: return NE;
- case NE: return EQ;
- case GT: return LE;
- case LT: return GE;
- case GE: return LT;
- case LE: return GE;
- }
- }
-
- inline static const char *MBlazeCCToString(CC cc) {
- switch (cc) {
- default: llvm_unreachable("Unknown condition code");
- case EQ: return "eq";
- case NE: return "ne";
- case GT: return "gt";
- case LT: return "lt";
- case GE: return "ge";
- case LE: return "le";
- }
- }
- }
-
- namespace MBlazeISD {
- enum NodeType {
- // Start the numbering from where ISD NodeType finishes.
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
- // Jump and link (call)
- JmpLink,
-
- // Handle gp_rel (small data/bss sections) relocation.
- GPRel,
-
- // Select CC Pseudo Instruction
- Select_CC,
-
- // Wrap up multiple types of instructions
- Wrap,
-
- // Integer Compare
- ICmp,
-
- // Return from subroutine
- Ret,
-
- // Return from interrupt
- IRet
- };
- }
-
- //===--------------------------------------------------------------------===//
- // TargetLowering Implementation
- //===--------------------------------------------------------------------===//
-
- class MBlazeTargetLowering : public TargetLowering {
- public:
- explicit MBlazeTargetLowering(MBlazeTargetMachine &TM);
-
- /// LowerOperation - Provide custom lowering hooks for some operations.
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
-
- /// getTargetNodeName - This method returns the name of a target specific
- // DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
-
- /// getSetCCResultType - get the ISD::SETCC result ValueType
- EVT getSetCCResultType(EVT VT) const;
-
- private:
- // Subtarget Info
- const MBlazeSubtarget *Subtarget;
-
-
- // Lower Operand helpers
- SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- // Lower Operand specifics
- SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
-
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
-
- virtual MachineBasicBlock*
- EmitCustomShift(MachineInstr *MI, MachineBasicBlock *MBB) const;
-
- virtual MachineBasicBlock*
- EmitCustomSelect(MachineInstr *MI, MachineBasicBlock *MBB) const;
-
- virtual MachineBasicBlock*
- EmitCustomAtomic(MachineInstr *MI, MachineBasicBlock *MBB) const;
-
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
-
- // Inline asm support
- ConstraintType getConstraintType(const std::string &Constraint) const;
-
- /// Examine constraint string and operand type and determine a weight value.
- /// The operand object must already have been set up with the operand type.
- ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
-
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
-
- virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
-
- /// isFPImmLegal - Returns true if the target can instruction select the
- /// specified FP immediate natively. If false, the legalizer will
- /// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
- };
-}
-
-#endif // MBlazeISELLOWERING_H
diff --git a/lib/Target/MBlaze/MBlazeInstrFPU.td b/lib/Target/MBlaze/MBlazeInstrFPU.td
deleted file mode 100644
index 3f145938728c..000000000000
--- a/lib/Target/MBlaze/MBlazeInstrFPU.td
+++ /dev/null
@@ -1,219 +0,0 @@
-//===-- MBlazeInstrFPU.td - MBlaze FPU Instruction defs ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// MBlaze profiles and nodes
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// MBlaze Operand, Complex Patterns and Transformations Definitions.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Memory Access Instructions
-//===----------------------------------------------------------------------===//
-class LoadFM<bits<6> op, string instr_asm, PatFrag OpNode> :
- TA<op, 0x000, (outs GPR:$dst), (ins memrr:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(set (f32 GPR:$dst), (OpNode xaddr:$addr))], IIC_MEMl>;
-
-class LoadFMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TB<op, (outs GPR:$dst), (ins memri:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(set (f32 GPR:$dst), (OpNode iaddr:$addr))], IIC_MEMl>;
-
-class StoreFM<bits<6> op, string instr_asm, PatFrag OpNode> :
- TA<op, 0x000, (outs), (ins GPR:$dst, memrr:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(OpNode (f32 GPR:$dst), xaddr:$addr)], IIC_MEMs>;
-
-class StoreFMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TB<op, (outs), (ins GPR:$dst, memrr:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(OpNode (f32 GPR:$dst), iaddr:$addr)], IIC_MEMs>;
-
-class ArithF<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
- InstrItinClass itin> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
-
-class CmpFN<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], itin>;
-
-class ArithFR<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
- InstrItinClass itin> :
- TAR<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $c, $b"),
- [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
-
-class LogicFI<bits<6> op, string instr_asm> :
- TB<op, (outs GPR:$dst), (ins GPR:$b, fimm:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], IIC_ALU>;
-
-let rb=0 in {
- class ArithF2<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [], itin>;
-
- class ArithIF<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [], itin>;
-
- class ArithFI<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [], itin>;
-}
-
-//===----------------------------------------------------------------------===//
-// Pseudo instructions
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// FPU Arithmetic Instructions
-//===----------------------------------------------------------------------===//
-let Predicates=[HasFPU] in {
- def FORI : LogicFI<0x28, "ori ">;
- def FADD : ArithF<0x16, 0x000, "fadd ", fadd, IIC_FPU>;
- def FRSUB : ArithFR<0x16, 0x080, "frsub ", fsub, IIC_FPU>;
- def FMUL : ArithF<0x16, 0x100, "fmul ", fmul, IIC_FPU>;
- def FDIV : ArithF<0x16, 0x180, "fdiv ", fdiv, IIC_FPUd>;
-}
-
-let Predicates=[HasFPU], isCodeGenOnly=1 in {
- def LWF : LoadFM<0x32, "lw ", load>;
- def LWFI : LoadFMI<0x3A, "lwi ", load>;
-
- def SWF : StoreFM<0x36, "sw ", store>;
- def SWFI : StoreFMI<0x3E, "swi ", store>;
-}
-
-let Predicates=[HasFPU,HasSqrt] in {
- def FLT : ArithIF<0x16, 0x280, "flt ", IIC_FPUf>;
- def FINT : ArithFI<0x16, 0x300, "fint ", IIC_FPUi>;
- def FSQRT : ArithF2<0x16, 0x380, "fsqrt ", IIC_FPUs>;
-}
-
-let isAsCheapAsAMove = 1 in {
- def FCMP_UN : CmpFN<0x16, 0x200, "fcmp.un", IIC_FPUc>;
- def FCMP_LT : CmpFN<0x16, 0x210, "fcmp.lt", IIC_FPUc>;
- def FCMP_EQ : CmpFN<0x16, 0x220, "fcmp.eq", IIC_FPUc>;
- def FCMP_LE : CmpFN<0x16, 0x230, "fcmp.le", IIC_FPUc>;
- def FCMP_GT : CmpFN<0x16, 0x240, "fcmp.gt", IIC_FPUc>;
- def FCMP_NE : CmpFN<0x16, 0x250, "fcmp.ne", IIC_FPUc>;
- def FCMP_GE : CmpFN<0x16, 0x260, "fcmp.ge", IIC_FPUc>;
-}
-
-
-let usesCustomInserter = 1 in {
- def Select_FCC : MBlazePseudo<(outs GPR:$dst),
- (ins GPR:$T, GPR:$F, GPR:$CMP, i32imm:$CC),
- "; SELECT_FCC PSEUDO!",
- []>;
-}
-
-// Floating point conversions
-let Predicates=[HasFPU] in {
- def : Pat<(sint_to_fp GPR:$V), (FLT GPR:$V)>;
- def : Pat<(fp_to_sint GPR:$V), (FINT GPR:$V)>;
- def : Pat<(fsqrt GPR:$V), (FSQRT GPR:$V)>;
-}
-
-// SET_CC operations
-let Predicates=[HasFPU] in {
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETEQ),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_EQ GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETNE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_EQ GPR:$L, GPR:$R), 1)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOEQ),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_EQ GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETONE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (XOR (FCMP_UN GPR:$L, GPR:$R),
- (FCMP_EQ GPR:$L, GPR:$R)), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETONE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (OR (FCMP_UN GPR:$L, GPR:$R),
- (FCMP_EQ GPR:$L, GPR:$R)), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_GT GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETLT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_LT GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_GE GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETLE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_LE GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_GT GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOLT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_LT GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_GE GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOLE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_LE GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUEQ),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (OR (FCMP_UN GPR:$L, GPR:$R),
- (FCMP_EQ GPR:$L, GPR:$R)), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUNE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_NE GPR:$L, GPR:$R), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (OR (FCMP_UN GPR:$L, GPR:$R),
- (FCMP_GT GPR:$L, GPR:$R)), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETULT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (OR (FCMP_UN GPR:$L, GPR:$R),
- (FCMP_LT GPR:$L, GPR:$R)), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (OR (FCMP_UN GPR:$L, GPR:$R),
- (FCMP_GE GPR:$L, GPR:$R)), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETULE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (OR (FCMP_UN GPR:$L, GPR:$R),
- (FCMP_LE GPR:$L, GPR:$R)), 2)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETO),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_UN GPR:$L, GPR:$R), 1)>;
- def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUO),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (FCMP_UN GPR:$L, GPR:$R), 2)>;
-}
-
-// SELECT operations
-def : Pat<(select (i32 GPR:$C), (f32 GPR:$T), (f32 GPR:$F)),
- (Select_FCC GPR:$T, GPR:$F, GPR:$C, 2)>;
-
-//===----------------------------------------------------------------------===//
-// Patterns for Floating Point Instructions
-//===----------------------------------------------------------------------===//
-def : Pat<(f32 fpimm:$imm), (FORI (i32 R0), fpimm:$imm)>;
diff --git a/lib/Target/MBlaze/MBlazeInstrFSL.td b/lib/Target/MBlaze/MBlazeInstrFSL.td
deleted file mode 100644
index 91b69de05102..000000000000
--- a/lib/Target/MBlaze/MBlazeInstrFSL.td
+++ /dev/null
@@ -1,229 +0,0 @@
-//===-- MBlazeInstrFSL.td - MBlaze FSL Instruction defs ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// FSL Instruction Formats
-//===----------------------------------------------------------------------===//
-class FSLGet<bits<6> op, bits<5> flags, string instr_asm, Intrinsic OpNode> :
- MBlazeInst<op, FRCX, (outs GPR:$dst), (ins fslimm:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [(set GPR:$dst, (OpNode immZExt4:$b))],IIC_FSLg>
-{
- bits<5> rd;
- bits<4> fslno;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = 0x0;
- let Inst{16} = 0x0;
- let Inst{17-21} = flags; // NCTAE
- let Inst{22-27} = 0x0;
- let Inst{28-31} = fslno;
-}
-
-class FSLGetD<bits<6> op, bits<5> flags, string instr_asm, Intrinsic OpNode> :
- MBlazeInst<op, FRCR, (outs GPR:$dst), (ins GPR:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [(set GPR:$dst, (OpNode GPR:$b))], IIC_FSLg>
-{
- bits<5> rd;
- bits<5> rb;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = 0x0;
- let Inst{16-20} = rb;
- let Inst{21} = 0x0;
- let Inst{22-26} = flags; // NCTAE
- let Inst{27-31} = 0x0;
-}
-
-class FSLPut<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
- MBlazeInst<op, FCRCX, (outs), (ins GPR:$v, fslimm:$b),
- !strconcat(instr_asm, " $v, $b"),
- [(OpNode GPR:$v, immZExt4:$b)], IIC_FSLp>
-{
- bits<5> ra;
- bits<4> fslno;
-
- let Inst{6-10} = 0x0;
- let Inst{11-15} = ra;
- let Inst{16} = 0x1;
- let Inst{17-20} = flags; // NCTA
- let Inst{21-27} = 0x0;
- let Inst{28-31} = fslno;
-}
-
-class FSLPutD<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
- MBlazeInst<op, FCRR, (outs), (ins GPR:$v, GPR:$b),
- !strconcat(instr_asm, " $v, $b"),
- [(OpNode GPR:$v, GPR:$b)], IIC_FSLp>
-{
- bits<5> ra;
- bits<5> rb;
-
- let Inst{6-10} = 0x0;
- let Inst{11-15} = ra;
- let Inst{16-20} = rb;
- let Inst{21} = 0x1;
- let Inst{22-25} = flags; // NCTA
- let Inst{26-31} = 0x0;
-}
-
-class FSLPutT<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
- MBlazeInst<op, FCX, (outs), (ins fslimm:$b),
- !strconcat(instr_asm, " $b"),
- [(OpNode immZExt4:$b)], IIC_FSLp>
-{
- bits<4> fslno;
-
- let Inst{6-10} = 0x0;
- let Inst{11-15} = 0x0;
- let Inst{16} = 0x1;
- let Inst{17-20} = flags; // NCTA
- let Inst{21-27} = 0x0;
- let Inst{28-31} = fslno;
-}
-
-class FSLPutTD<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
- MBlazeInst<op, FCR, (outs), (ins GPR:$b),
- !strconcat(instr_asm, " $b"),
- [(OpNode GPR:$b)], IIC_FSLp>
-{
- bits<5> rb;
-
- let Inst{6-10} = 0x0;
- let Inst{11-15} = 0x0;
- let Inst{16-20} = rb;
- let Inst{21} = 0x1;
- let Inst{22-25} = flags; // NCTA
- let Inst{26-31} = 0x0;
-}
-
-//===----------------------------------------------------------------------===//
-// FSL Get Instructions
-//===----------------------------------------------------------------------===//
-def GET : FSLGet<0x1B, 0x00, "get ", int_mblaze_fsl_get>;
-def AGET : FSLGet<0x1B, 0x02, "aget ", int_mblaze_fsl_aget>;
-def CGET : FSLGet<0x1B, 0x08, "cget ", int_mblaze_fsl_cget>;
-def CAGET : FSLGet<0x1B, 0x0A, "caget ", int_mblaze_fsl_caget>;
-def EGET : FSLGet<0x1B, 0x01, "eget ", int_mblaze_fsl_eget>;
-def EAGET : FSLGet<0x1B, 0x03, "eaget ", int_mblaze_fsl_eaget>;
-def ECGET : FSLGet<0x1B, 0x09, "ecget ", int_mblaze_fsl_ecget>;
-def ECAGET : FSLGet<0x1B, 0x0B, "ecaget ", int_mblaze_fsl_ecaget>;
-def TGET : FSLGet<0x1B, 0x04, "tget ", int_mblaze_fsl_tget>;
-def TAGET : FSLGet<0x1B, 0x06, "taget ", int_mblaze_fsl_taget>;
-def TCGET : FSLGet<0x1B, 0x0C, "tcget ", int_mblaze_fsl_tcget>;
-def TCAGET : FSLGet<0x1B, 0x0E, "tcaget ", int_mblaze_fsl_tcaget>;
-def TEGET : FSLGet<0x1B, 0x05, "teget ", int_mblaze_fsl_teget>;
-def TEAGET : FSLGet<0x1B, 0x07, "teaget ", int_mblaze_fsl_teaget>;
-def TECGET : FSLGet<0x1B, 0x0D, "tecget ", int_mblaze_fsl_tecget>;
-def TECAGET : FSLGet<0x1B, 0x0F, "tecaget ", int_mblaze_fsl_tecaget>;
-
-let Defs = [CARRY] in {
- def NGET : FSLGet<0x1B, 0x10, "nget ", int_mblaze_fsl_nget>;
- def NAGET : FSLGet<0x1B, 0x12, "naget ", int_mblaze_fsl_naget>;
- def NCGET : FSLGet<0x1B, 0x18, "ncget ", int_mblaze_fsl_ncget>;
- def NCAGET : FSLGet<0x1B, 0x1A, "ncaget ", int_mblaze_fsl_ncaget>;
- def NEGET : FSLGet<0x1B, 0x11, "neget ", int_mblaze_fsl_neget>;
- def NEAGET : FSLGet<0x1B, 0x13, "neaget ", int_mblaze_fsl_neaget>;
- def NECGET : FSLGet<0x1B, 0x19, "necget ", int_mblaze_fsl_necget>;
- def NECAGET : FSLGet<0x1B, 0x1B, "necaget ", int_mblaze_fsl_necaget>;
- def TNGET : FSLGet<0x1B, 0x14, "tnget ", int_mblaze_fsl_tnget>;
- def TNAGET : FSLGet<0x1B, 0x16, "tnaget ", int_mblaze_fsl_tnaget>;
- def TNCGET : FSLGet<0x1B, 0x1C, "tncget ", int_mblaze_fsl_tncget>;
- def TNCAGET : FSLGet<0x1B, 0x1E, "tncaget ", int_mblaze_fsl_tncaget>;
- def TNEGET : FSLGet<0x1B, 0x15, "tneget ", int_mblaze_fsl_tneget>;
- def TNEAGET : FSLGet<0x1B, 0x17, "tneaget ", int_mblaze_fsl_tneaget>;
- def TNECGET : FSLGet<0x1B, 0x1D, "tnecget ", int_mblaze_fsl_tnecget>;
- def TNECAGET : FSLGet<0x1B, 0x1F, "tnecaget ", int_mblaze_fsl_tnecaget>;
-}
-
-//===----------------------------------------------------------------------===//
-// FSL Dynamic Get Instructions
-//===----------------------------------------------------------------------===//
-def GETD : FSLGetD<0x13, 0x00, "getd ", int_mblaze_fsl_get>;
-def AGETD : FSLGetD<0x13, 0x02, "agetd ", int_mblaze_fsl_aget>;
-def CGETD : FSLGetD<0x13, 0x08, "cgetd ", int_mblaze_fsl_cget>;
-def CAGETD : FSLGetD<0x13, 0x0A, "cagetd ", int_mblaze_fsl_caget>;
-def EGETD : FSLGetD<0x13, 0x01, "egetd ", int_mblaze_fsl_eget>;
-def EAGETD : FSLGetD<0x13, 0x03, "eagetd ", int_mblaze_fsl_eaget>;
-def ECGETD : FSLGetD<0x13, 0x09, "ecgetd ", int_mblaze_fsl_ecget>;
-def ECAGETD : FSLGetD<0x13, 0x0B, "ecagetd ", int_mblaze_fsl_ecaget>;
-def TGETD : FSLGetD<0x13, 0x04, "tgetd ", int_mblaze_fsl_tget>;
-def TAGETD : FSLGetD<0x13, 0x06, "tagetd ", int_mblaze_fsl_taget>;
-def TCGETD : FSLGetD<0x13, 0x0C, "tcgetd ", int_mblaze_fsl_tcget>;
-def TCAGETD : FSLGetD<0x13, 0x0E, "tcagetd ", int_mblaze_fsl_tcaget>;
-def TEGETD : FSLGetD<0x13, 0x05, "tegetd ", int_mblaze_fsl_teget>;
-def TEAGETD : FSLGetD<0x13, 0x07, "teagetd ", int_mblaze_fsl_teaget>;
-def TECGETD : FSLGetD<0x13, 0x0D, "tecgetd ", int_mblaze_fsl_tecget>;
-def TECAGETD : FSLGetD<0x13, 0x0F, "tecagetd ", int_mblaze_fsl_tecaget>;
-
-let Defs = [CARRY] in {
- def NGETD : FSLGetD<0x13, 0x10, "ngetd ", int_mblaze_fsl_nget>;
- def NAGETD : FSLGetD<0x13, 0x12, "nagetd ", int_mblaze_fsl_naget>;
- def NCGETD : FSLGetD<0x13, 0x18, "ncgetd ", int_mblaze_fsl_ncget>;
- def NCAGETD : FSLGetD<0x13, 0x1A, "ncagetd ", int_mblaze_fsl_ncaget>;
- def NEGETD : FSLGetD<0x13, 0x11, "negetd ", int_mblaze_fsl_neget>;
- def NEAGETD : FSLGetD<0x13, 0x13, "neagetd ", int_mblaze_fsl_neaget>;
- def NECGETD : FSLGetD<0x13, 0x19, "necgetd ", int_mblaze_fsl_necget>;
- def NECAGETD : FSLGetD<0x13, 0x1B, "necagetd ", int_mblaze_fsl_necaget>;
- def TNGETD : FSLGetD<0x13, 0x14, "tngetd ", int_mblaze_fsl_tnget>;
- def TNAGETD : FSLGetD<0x13, 0x16, "tnagetd ", int_mblaze_fsl_tnaget>;
- def TNCGETD : FSLGetD<0x13, 0x1C, "tncgetd ", int_mblaze_fsl_tncget>;
- def TNCAGETD : FSLGetD<0x13, 0x1E, "tncagetd ", int_mblaze_fsl_tncaget>;
- def TNEGETD : FSLGetD<0x13, 0x15, "tnegetd ", int_mblaze_fsl_tneget>;
- def TNEAGETD : FSLGetD<0x13, 0x17, "tneagetd ", int_mblaze_fsl_tneaget>;
- def TNECGETD : FSLGetD<0x13, 0x1D, "tnecgetd ", int_mblaze_fsl_tnecget>;
- def TNECAGETD : FSLGetD<0x13, 0x1F, "tnecagetd", int_mblaze_fsl_tnecaget>;
-}
-
-//===----------------------------------------------------------------------===//
-// FSL Put Instructions
-//===----------------------------------------------------------------------===//
-def PUT : FSLPut<0x1B, 0x0, "put ", int_mblaze_fsl_put>;
-def APUT : FSLPut<0x1B, 0x1, "aput ", int_mblaze_fsl_aput>;
-def CPUT : FSLPut<0x1B, 0x4, "cput ", int_mblaze_fsl_cput>;
-def CAPUT : FSLPut<0x1B, 0x5, "caput ", int_mblaze_fsl_caput>;
-def TPUT : FSLPutT<0x1B, 0x2, "tput ", int_mblaze_fsl_tput>;
-def TAPUT : FSLPutT<0x1B, 0x3, "taput ", int_mblaze_fsl_taput>;
-def TCPUT : FSLPutT<0x1B, 0x6, "tcput ", int_mblaze_fsl_tcput>;
-def TCAPUT : FSLPutT<0x1B, 0x7, "tcaput ", int_mblaze_fsl_tcaput>;
-
-let Defs = [CARRY] in {
- def NPUT : FSLPut<0x1B, 0x8, "nput ", int_mblaze_fsl_nput>;
- def NAPUT : FSLPut<0x1B, 0x9, "naput ", int_mblaze_fsl_naput>;
- def NCPUT : FSLPut<0x1B, 0xC, "ncput ", int_mblaze_fsl_ncput>;
- def NCAPUT : FSLPut<0x1B, 0xD, "ncaput ", int_mblaze_fsl_ncaput>;
- def TNPUT : FSLPutT<0x1B, 0xA, "tnput ", int_mblaze_fsl_tnput>;
- def TNAPUT : FSLPutT<0x1B, 0xB, "tnaput ", int_mblaze_fsl_tnaput>;
- def TNCPUT : FSLPutT<0x1B, 0xE, "tncput ", int_mblaze_fsl_tncput>;
- def TNCAPUT : FSLPutT<0x1B, 0xF, "tncaput ", int_mblaze_fsl_tncaput>;
-}
-
-//===----------------------------------------------------------------------===//
-// FSL Dynamic Put Instructions
-//===----------------------------------------------------------------------===//
-def PUTD : FSLPutD<0x13, 0x0, "putd ", int_mblaze_fsl_put>;
-def APUTD : FSLPutD<0x13, 0x1, "aputd ", int_mblaze_fsl_aput>;
-def CPUTD : FSLPutD<0x13, 0x4, "cputd ", int_mblaze_fsl_cput>;
-def CAPUTD : FSLPutD<0x13, 0x5, "caputd ", int_mblaze_fsl_caput>;
-def TPUTD : FSLPutTD<0x13, 0x2, "tputd ", int_mblaze_fsl_tput>;
-def TAPUTD : FSLPutTD<0x13, 0x3, "taputd ", int_mblaze_fsl_taput>;
-def TCPUTD : FSLPutTD<0x13, 0x6, "tcputd ", int_mblaze_fsl_tcput>;
-def TCAPUTD : FSLPutTD<0x13, 0x7, "tcaputd ", int_mblaze_fsl_tcaput>;
-
-let Defs = [CARRY] in {
- def NPUTD : FSLPutD<0x13, 0x8, "nputd ", int_mblaze_fsl_nput>;
- def NAPUTD : FSLPutD<0x13, 0x9, "naputd ", int_mblaze_fsl_naput>;
- def NCPUTD : FSLPutD<0x13, 0xC, "ncputd ", int_mblaze_fsl_ncput>;
- def NCAPUTD : FSLPutD<0x13, 0xD, "ncaputd ", int_mblaze_fsl_ncaput>;
- def TNPUTD : FSLPutTD<0x13, 0xA, "tnputd ", int_mblaze_fsl_tnput>;
- def TNAPUTD : FSLPutTD<0x13, 0xB, "tnaputd ", int_mblaze_fsl_tnaput>;
- def TNCPUTD : FSLPutTD<0x13, 0xE, "tncputd ", int_mblaze_fsl_tncput>;
- def TNCAPUTD : FSLPutTD<0x13, 0xF, "tncaputd ", int_mblaze_fsl_tncaput>;
-}
diff --git a/lib/Target/MBlaze/MBlazeInstrFormats.td b/lib/Target/MBlaze/MBlazeInstrFormats.td
deleted file mode 100644
index e40432a1b9a9..000000000000
--- a/lib/Target/MBlaze/MBlazeInstrFormats.td
+++ /dev/null
@@ -1,228 +0,0 @@
-//===-- MBlazeInstrFormats.td - MB Instruction defs --------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-// Format specifies the encoding used by the instruction. This is part of the
-// ad-hoc solution used to emit machine instruction encodings by our machine
-// code emitter.
-class Format<bits<6> val> {
- bits<6> Value = val;
-}
-
-def FPseudo : Format<0>;
-def FRRR : Format<1>; // ADD, OR, etc.
-def FRRI : Format<2>; // ADDI, ORI, etc.
-def FCRR : Format<3>; // PUTD, WDC, WIC, BEQ, BNE, BGE, etc.
-def FCRI : Format<4>; // RTID, RTED, RTSD, BEQI, BNEI, BGEI, etc.
-def FRCR : Format<5>; // BRLD, BRALD, GETD
-def FRCI : Format<6>; // BRLID, BRALID, MSRCLR, MSRSET
-def FCCR : Format<7>; // BR, BRA, BRD, etc.
-def FCCI : Format<8>; // IMM, BRI, BRAI, BRID, etc.
-def FRRCI : Format<9>; // BSRLI, BSRAI, BSLLI
-def FRRC : Format<10>; // SEXT8, SEXT16, SRA, SRC, SRL, FLT, FINT, FSQRT
-def FRCX : Format<11>; // GET
-def FRCS : Format<12>; // MFS
-def FCRCS : Format<13>; // MTS
-def FCRCX : Format<14>; // PUT
-def FCX : Format<15>; // TPUT
-def FCR : Format<16>; // TPUTD
-def FRIR : Format<17>; // RSUBI
-def FRRRR : Format<18>; // RSUB, FRSUB
-def FRI : Format<19>; // RSUB, FRSUB
-def FC : Format<20>; // NOP
-def FRR : Format<21>; // CLZ
-
-//===----------------------------------------------------------------------===//
-// Describe MBlaze instructions format
-//
-// CPU INSTRUCTION FORMATS
-//
-// opcode - operation code.
-// rd - dst reg.
-// ra - first src. reg.
-// rb - second src. reg.
-// imm16 - 16-bit immediate value.
-//
-//===----------------------------------------------------------------------===//
-
-// Generic MBlaze Format
-class MBlazeInst<bits<6> op, Format form, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> : Instruction {
- let Namespace = "MBlaze";
- field bits<32> Inst;
-
- bits<6> opcode = op;
- Format Form = form;
- bits<6> FormBits = Form.Value;
-
- // Top 6 bits are the 'opcode' field
- let Inst{0-5} = opcode;
-
- // If the instruction is marked as a pseudo, set isCodeGenOnly so that the
- // assembler and disassmbler ignore it.
- let isCodeGenOnly = !eq(!cast<string>(form), "FPseudo");
-
- dag OutOperandList = outs;
- dag InOperandList = ins;
-
- let AsmString = asmstr;
- let Pattern = pattern;
- let Itinerary = itin;
-
- // TSFlags layout should be kept in sync with MBlazeInstrInfo.h.
- let TSFlags{5-0} = FormBits;
-}
-
-//===----------------------------------------------------------------------===//
-// Pseudo instruction class
-//===----------------------------------------------------------------------===//
-class MBlazePseudo<dag outs, dag ins, string asmstr, list<dag> pattern>:
- MBlazeInst<0x0, FPseudo, outs, ins, asmstr, pattern, IIC_Pseudo>;
-
-//===----------------------------------------------------------------------===//
-// Type A instruction class in MBlaze : <|opcode|rd|ra|rb|flags|>
-//===----------------------------------------------------------------------===//
-
-class TA<bits<6> op, bits<11> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<op,FRRR,outs, ins, asmstr, pattern, itin>
-{
- bits<5> rd;
- bits<5> ra;
- bits<5> rb;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = ra;
- let Inst{16-20} = rb;
- let Inst{21-31} = flags;
-}
-
-//===----------------------------------------------------------------------===//
-// Type B instruction class in MBlaze : <|opcode|rd|ra|immediate|>
-//===----------------------------------------------------------------------===//
-
-class TB<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin> :
- MBlazeInst<op, FRRI, outs, ins, asmstr, pattern, itin>
-{
- bits<5> rd;
- bits<5> ra;
- bits<16> imm16;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = ra;
- let Inst{16-31} = imm16;
-}
-
-//===----------------------------------------------------------------------===//
-// Type A instruction class in MBlaze but with the operands reversed
-// in the LLVM DAG : <|opcode|rd|ra|rb|flags|>
-//===----------------------------------------------------------------------===//
-
-class TAR<bits<6> op, bits<11> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- TA<op, flags, outs, ins, asmstr, pattern, itin>
-{
- bits<5> rrd;
- bits<5> rrb;
- bits<5> rra;
-
- let Form = FRRRR;
-
- let rd = rrd;
- let ra = rra;
- let rb = rrb;
-}
-
-//===----------------------------------------------------------------------===//
-// Type B instruction class in MBlaze but with the operands reversed in
-// the LLVM DAG : <|opcode|rd|ra|immediate|>
-//===----------------------------------------------------------------------===//
-class TBR<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin> :
- TB<op, outs, ins, asmstr, pattern, itin> {
- bits<5> rrd;
- bits<16> rimm16;
- bits<5> rra;
-
- let Form = FRIR;
-
- let rd = rrd;
- let ra = rra;
- let imm16 = rimm16;
-}
-
-//===----------------------------------------------------------------------===//
-// Shift immediate instruction class in MBlaze : <|opcode|rd|ra|immediate|>
-//===----------------------------------------------------------------------===//
-class SHT<bits<6> op, bits<2> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<op, FRRI, outs, ins, asmstr, pattern, itin> {
- bits<5> rd;
- bits<5> ra;
- bits<5> imm5;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = ra;
- let Inst{16-20} = 0x0;
- let Inst{21-22} = flags;
- let Inst{23-26} = 0x0;
- let Inst{27-31} = imm5;
-}
-
-//===----------------------------------------------------------------------===//
-// Special instruction class in MBlaze : <|opcode|rd|imm14|>
-//===----------------------------------------------------------------------===//
-class SPC<bits<6> op, bits<2> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<op, FRI, outs, ins, asmstr, pattern, itin> {
- bits<5> rd;
- bits<14> imm14;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = 0x0;
- let Inst{16-17} = flags;
- let Inst{18-31} = imm14;
-}
-
-//===----------------------------------------------------------------------===//
-// MSR instruction class in MBlaze : <|opcode|rd|imm15|>
-//===----------------------------------------------------------------------===//
-class MSR<bits<6> op, bits<6> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<op, FRI, outs, ins, asmstr, pattern, itin> {
- bits<5> rd;
- bits<15> imm15;
-
- let Inst{6-10} = rd;
- let Inst{11-16} = flags;
- let Inst{17-31} = imm15;
-}
-
-//===----------------------------------------------------------------------===//
-// TCLZ instruction class in MBlaze : <|opcode|rd|imm15|>
-//===----------------------------------------------------------------------===//
-class TCLZ<bits<6> op, bits<16> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<op, FRR, outs, ins, asmstr, pattern, itin> {
- bits<5> rd;
- bits<5> ra;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = ra;
- let Inst{16-31} = flags;
-}
-
-//===----------------------------------------------------------------------===//
-// MBAR instruction class in MBlaze : <|opcode|rd|imm15|>
-//===----------------------------------------------------------------------===//
-class MBAR<bits<6> op, bits<26> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<op, FC, outs, ins, asmstr, pattern, itin> {
- let Inst{6-31} = flags;
-}
diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.cpp b/lib/Target/MBlaze/MBlazeInstrInfo.cpp
deleted file mode 100644
index 79449f73f74e..000000000000
--- a/lib/Target/MBlaze/MBlazeInstrInfo.cpp
+++ /dev/null
@@ -1,297 +0,0 @@
-//===-- MBlazeInstrInfo.cpp - MBlaze Instruction Information --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the MBlaze implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeInstrInfo.h"
-#include "MBlazeMachineFunction.h"
-#include "MBlazeTargetMachine.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_CTOR
-#include "MBlazeGenInstrInfo.inc"
-
-using namespace llvm;
-
-MBlazeInstrInfo::MBlazeInstrInfo(MBlazeTargetMachine &tm)
- : MBlazeGenInstrInfo(MBlaze::ADJCALLSTACKDOWN, MBlaze::ADJCALLSTACKUP),
- TM(tm), RI(*TM.getSubtargetImpl(), *this) {}
-
-static bool isZeroImm(const MachineOperand &op) {
- return op.isImm() && op.getImm() == 0;
-}
-
-/// isLoadFromStackSlot - If the specified machine instruction is a direct
-/// load from a stack slot, return the virtual or physical register number of
-/// the destination along with the FrameIndex of the loaded stack slot. If
-/// not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than loading from the stack slot.
-unsigned MBlazeInstrInfo::
-isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const {
- if (MI->getOpcode() == MBlaze::LWI) {
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2)))) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- }
-
- return 0;
-}
-
-/// isStoreToStackSlot - If the specified machine instruction is a direct
-/// store to a stack slot, return the virtual or physical register number of
-/// the source reg along with the FrameIndex of the loaded stack slot. If
-/// not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than storing to the stack slot.
-unsigned MBlazeInstrInfo::
-isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const {
- if (MI->getOpcode() == MBlaze::SWI) {
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2)))) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- }
- return 0;
-}
-
-/// insertNoop - If data hazard condition is found insert the target nop
-/// instruction.
-void MBlazeInstrInfo::
-insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const {
- DebugLoc DL;
- BuildMI(MBB, MI, DL, get(MBlaze::NOP));
-}
-
-void MBlazeInstrInfo::
-copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
- llvm::BuildMI(MBB, I, DL, get(MBlaze::ADDK), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc)).addReg(MBlaze::R0);
-}
-
-void MBlazeInstrInfo::
-storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL;
- BuildMI(MBB, I, DL, get(MBlaze::SWI)).addReg(SrcReg,getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0); //.addFrameIndex(FI);
-}
-
-void MBlazeInstrInfo::
-loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL;
- BuildMI(MBB, I, DL, get(MBlaze::LWI), DestReg)
- .addFrameIndex(FI).addImm(0); //.addFrameIndex(FI);
-}
-
-//===----------------------------------------------------------------------===//
-// Branch Analysis
-//===----------------------------------------------------------------------===//
-bool MBlazeInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
- // If the block has no terminators, it just falls into the block after it.
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin())
- return false;
- --I;
- while (I->isDebugValue()) {
- if (I == MBB.begin())
- return false;
- --I;
- }
- if (!isUnpredicatedTerminator(I))
- return false;
-
- // Get the last instruction in the block.
- MachineInstr *LastInst = I;
-
- // If there is only one terminator instruction, process it.
- unsigned LastOpc = LastInst->getOpcode();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
- if (MBlaze::isUncondBranchOpcode(LastOpc)) {
- TBB = LastInst->getOperand(0).getMBB();
- return false;
- }
- if (MBlaze::isCondBranchOpcode(LastOpc)) {
- // Block ends with fall-through condbranch.
- TBB = LastInst->getOperand(1).getMBB();
- Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
- Cond.push_back(LastInst->getOperand(0));
- return false;
- }
- // Otherwise, don't know what this is.
- return true;
- }
-
- // Get the instruction before it if it's a terminator.
- MachineInstr *SecondLastInst = I;
-
- // If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
- return true;
-
- // If the block ends with something like BEQID then BRID, handle it.
- if (MBlaze::isCondBranchOpcode(SecondLastInst->getOpcode()) &&
- MBlaze::isUncondBranchOpcode(LastInst->getOpcode())) {
- TBB = SecondLastInst->getOperand(1).getMBB();
- Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
- Cond.push_back(SecondLastInst->getOperand(0));
- FBB = LastInst->getOperand(0).getMBB();
- return false;
- }
-
- // If the block ends with two unconditional branches, handle it.
- // The second one is not executed, so remove it.
- if (MBlaze::isUncondBranchOpcode(SecondLastInst->getOpcode()) &&
- MBlaze::isUncondBranchOpcode(LastInst->getOpcode())) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- I = LastInst;
- if (AllowModify)
- I->eraseFromParent();
- return false;
- }
-
- // Otherwise, can't handle this.
- return true;
-}
-
-unsigned MBlazeInstrInfo::
-InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
- // Shouldn't be a fall through.
- assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 2 || Cond.size() == 0) &&
- "MBlaze branch conditions have two components!");
-
- unsigned Opc = MBlaze::BRID;
- if (!Cond.empty())
- Opc = (unsigned)Cond[0].getImm();
-
- if (FBB == 0) {
- if (Cond.empty()) // Unconditional branch
- BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
- else // Conditional branch
- BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg()).addMBB(TBB);
- return 1;
- }
-
- BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, DL, get(MBlaze::BRID)).addMBB(FBB);
- return 2;
-}
-
-unsigned MBlazeInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin()) return 0;
- --I;
- while (I->isDebugValue()) {
- if (I == MBB.begin())
- return 0;
- --I;
- }
-
- if (!MBlaze::isUncondBranchOpcode(I->getOpcode()) &&
- !MBlaze::isCondBranchOpcode(I->getOpcode()))
- return 0;
-
- // Remove the branch.
- I->eraseFromParent();
-
- I = MBB.end();
-
- if (I == MBB.begin()) return 1;
- --I;
- if (!MBlaze::isCondBranchOpcode(I->getOpcode()))
- return 1;
-
- // Remove the branch.
- I->eraseFromParent();
- return 2;
-}
-
-bool MBlazeInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand>
- &Cond) const {
- assert(Cond.size() == 2 && "Invalid MBlaze branch opcode!");
- switch (Cond[0].getImm()) {
- default: return true;
- case MBlaze::BEQ: Cond[0].setImm(MBlaze::BNE); return false;
- case MBlaze::BNE: Cond[0].setImm(MBlaze::BEQ); return false;
- case MBlaze::BGT: Cond[0].setImm(MBlaze::BLE); return false;
- case MBlaze::BGE: Cond[0].setImm(MBlaze::BLT); return false;
- case MBlaze::BLT: Cond[0].setImm(MBlaze::BGE); return false;
- case MBlaze::BLE: Cond[0].setImm(MBlaze::BGT); return false;
- case MBlaze::BEQI: Cond[0].setImm(MBlaze::BNEI); return false;
- case MBlaze::BNEI: Cond[0].setImm(MBlaze::BEQI); return false;
- case MBlaze::BGTI: Cond[0].setImm(MBlaze::BLEI); return false;
- case MBlaze::BGEI: Cond[0].setImm(MBlaze::BLTI); return false;
- case MBlaze::BLTI: Cond[0].setImm(MBlaze::BGEI); return false;
- case MBlaze::BLEI: Cond[0].setImm(MBlaze::BGTI); return false;
- case MBlaze::BEQD: Cond[0].setImm(MBlaze::BNED); return false;
- case MBlaze::BNED: Cond[0].setImm(MBlaze::BEQD); return false;
- case MBlaze::BGTD: Cond[0].setImm(MBlaze::BLED); return false;
- case MBlaze::BGED: Cond[0].setImm(MBlaze::BLTD); return false;
- case MBlaze::BLTD: Cond[0].setImm(MBlaze::BGED); return false;
- case MBlaze::BLED: Cond[0].setImm(MBlaze::BGTD); return false;
- case MBlaze::BEQID: Cond[0].setImm(MBlaze::BNEID); return false;
- case MBlaze::BNEID: Cond[0].setImm(MBlaze::BEQID); return false;
- case MBlaze::BGTID: Cond[0].setImm(MBlaze::BLEID); return false;
- case MBlaze::BGEID: Cond[0].setImm(MBlaze::BLTID); return false;
- case MBlaze::BLTID: Cond[0].setImm(MBlaze::BGEID); return false;
- case MBlaze::BLEID: Cond[0].setImm(MBlaze::BGTID); return false;
- }
-}
-
-/// getGlobalBaseReg - Return a virtual register initialized with the
-/// the global base register value. Output instructions required to
-/// initialize the register in the function entry block, if necessary.
-///
-unsigned MBlazeInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
- MBlazeFunctionInfo *MBlazeFI = MF->getInfo<MBlazeFunctionInfo>();
- unsigned GlobalBaseReg = MBlazeFI->getGlobalBaseReg();
- if (GlobalBaseReg != 0)
- return GlobalBaseReg;
-
- // Insert the set of GlobalBaseReg into the first MBB of the function
- MachineBasicBlock &FirstMBB = MF->front();
- MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
-
- GlobalBaseReg = RegInfo.createVirtualRegister(&MBlaze::GPRRegClass);
- BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
- GlobalBaseReg).addReg(MBlaze::R20);
- RegInfo.addLiveIn(MBlaze::R20);
-
- MBlazeFI->setGlobalBaseReg(GlobalBaseReg);
- return GlobalBaseReg;
-}
diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.h b/lib/Target/MBlaze/MBlazeInstrInfo.h
deleted file mode 100644
index 5252147b48e6..000000000000
--- a/lib/Target/MBlaze/MBlazeInstrInfo.h
+++ /dev/null
@@ -1,240 +0,0 @@
-//===-- MBlazeInstrInfo.h - MBlaze Instruction Information ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the MBlaze implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZEINSTRUCTIONINFO_H
-#define MBLAZEINSTRUCTIONINFO_H
-
-#include "MBlaze.h"
-#include "MBlazeRegisterInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-#define GET_INSTRINFO_HEADER
-#include "MBlazeGenInstrInfo.inc"
-
-namespace llvm {
-
-namespace MBlaze {
-
- // MBlaze Branch Codes
- enum FPBranchCode {
- BRANCH_F,
- BRANCH_T,
- BRANCH_FL,
- BRANCH_TL,
- BRANCH_INVALID
- };
-
- // MBlaze Condition Codes
- enum CondCode {
- // To be used with float branch True
- FCOND_F,
- FCOND_UN,
- FCOND_EQ,
- FCOND_UEQ,
- FCOND_OLT,
- FCOND_ULT,
- FCOND_OLE,
- FCOND_ULE,
- FCOND_SF,
- FCOND_NGLE,
- FCOND_SEQ,
- FCOND_NGL,
- FCOND_LT,
- FCOND_NGE,
- FCOND_LE,
- FCOND_NGT,
-
- // To be used with float branch False
- // This conditions have the same mnemonic as the
- // above ones, but are used with a branch False;
- FCOND_T,
- FCOND_OR,
- FCOND_NEQ,
- FCOND_OGL,
- FCOND_UGE,
- FCOND_OGE,
- FCOND_UGT,
- FCOND_OGT,
- FCOND_ST,
- FCOND_GLE,
- FCOND_SNE,
- FCOND_GL,
- FCOND_NLT,
- FCOND_GE,
- FCOND_NLE,
- FCOND_GT,
-
- // Only integer conditions
- COND_EQ,
- COND_GT,
- COND_GE,
- COND_LT,
- COND_LE,
- COND_NE,
- COND_INVALID
- };
-
- // Turn condition code into conditional branch opcode.
- inline static unsigned GetCondBranchFromCond(CondCode CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case COND_EQ: return MBlaze::BEQID;
- case COND_NE: return MBlaze::BNEID;
- case COND_GT: return MBlaze::BGTID;
- case COND_GE: return MBlaze::BGEID;
- case COND_LT: return MBlaze::BLTID;
- case COND_LE: return MBlaze::BLEID;
- }
- }
-
- /// GetOppositeBranchCondition - Return the inverse of the specified cond,
- /// e.g. turning COND_E to COND_NE.
- // CondCode GetOppositeBranchCondition(MBlaze::CondCode CC);
-
- /// MBlazeCCToString - Map each FP condition code to its string
- inline static const char *MBlazeFCCToString(MBlaze::CondCode CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case FCOND_F:
- case FCOND_T: return "f";
- case FCOND_UN:
- case FCOND_OR: return "un";
- case FCOND_EQ:
- case FCOND_NEQ: return "eq";
- case FCOND_UEQ:
- case FCOND_OGL: return "ueq";
- case FCOND_OLT:
- case FCOND_UGE: return "olt";
- case FCOND_ULT:
- case FCOND_OGE: return "ult";
- case FCOND_OLE:
- case FCOND_UGT: return "ole";
- case FCOND_ULE:
- case FCOND_OGT: return "ule";
- case FCOND_SF:
- case FCOND_ST: return "sf";
- case FCOND_NGLE:
- case FCOND_GLE: return "ngle";
- case FCOND_SEQ:
- case FCOND_SNE: return "seq";
- case FCOND_NGL:
- case FCOND_GL: return "ngl";
- case FCOND_LT:
- case FCOND_NLT: return "lt";
- case FCOND_NGE:
- case FCOND_GE: return "ge";
- case FCOND_LE:
- case FCOND_NLE: return "nle";
- case FCOND_NGT:
- case FCOND_GT: return "gt";
- }
- }
-
- inline static bool isUncondBranchOpcode(int Opc) {
- switch (Opc) {
- default: return false;
- case MBlaze::BRI:
- case MBlaze::BRAI:
- case MBlaze::BRID:
- case MBlaze::BRAID:
- return true;
- }
- }
-
- inline static bool isCondBranchOpcode(int Opc) {
- switch (Opc) {
- default: return false;
- case MBlaze::BEQI: case MBlaze::BEQID:
- case MBlaze::BNEI: case MBlaze::BNEID:
- case MBlaze::BGTI: case MBlaze::BGTID:
- case MBlaze::BGEI: case MBlaze::BGEID:
- case MBlaze::BLTI: case MBlaze::BLTID:
- case MBlaze::BLEI: case MBlaze::BLEID:
- return true;
- }
- }
-}
-
-class MBlazeInstrInfo : public MBlazeGenInstrInfo {
- MBlazeTargetMachine &TM;
- const MBlazeRegisterInfo RI;
-public:
- explicit MBlazeInstrInfo(MBlazeTargetMachine &TM);
-
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- ///
- virtual const MBlazeRegisterInfo &getRegisterInfo() const { return RI; }
-
- /// isLoadFromStackSlot - If the specified machine instruction is a direct
- /// load from a stack slot, return the virtual or physical register number of
- /// the destination along with the FrameIndex of the loaded stack slot. If
- /// not, return 0. This predicate must return 0 if the instruction has
- /// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- /// isStoreToStackSlot - If the specified machine instruction is a direct
- /// store to a stack slot, return the virtual or physical register number of
- /// the source reg along with the FrameIndex of the loaded stack slot. If
- /// not, return 0. This predicate must return 0 if the instruction has
- /// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- /// Branch Analysis
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-
- virtual bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
- const;
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- /// Insert nop instruction when hazard condition is found
- virtual void insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
-
- /// getGlobalBaseReg - Return a virtual register initialized with the
- /// the global base register value. Output instructions required to
- /// initialize the register in the function entry block, if necessary.
- ///
- unsigned getGlobalBaseReg(MachineFunction *MF) const;
-};
-
-}
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.td b/lib/Target/MBlaze/MBlazeInstrInfo.td
deleted file mode 100644
index d27cd39a6b92..000000000000
--- a/lib/Target/MBlaze/MBlazeInstrInfo.td
+++ /dev/null
@@ -1,1051 +0,0 @@
-//===-- MBlazeInstrInfo.td - MBlaze Instruction defs -------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Instruction format superclass
-//===----------------------------------------------------------------------===//
-include "MBlazeInstrFormats.td"
-
-//===----------------------------------------------------------------------===//
-// MBlaze type profiles
-//===----------------------------------------------------------------------===//
-
-// def SDTMBlazeSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>]>;
-def SDT_MBlazeRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDT_MBlazeIRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDT_MBlazeJmpLink : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
-def SDT_MBCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
-def SDT_MBCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-
-//===----------------------------------------------------------------------===//
-// MBlaze specific nodes
-//===----------------------------------------------------------------------===//
-
-def MBlazeRet : SDNode<"MBlazeISD::Ret", SDT_MBlazeRet,
- [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-def MBlazeIRet : SDNode<"MBlazeISD::IRet", SDT_MBlazeIRet,
- [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-
-def MBlazeJmpLink : SDNode<"MBlazeISD::JmpLink",SDT_MBlazeJmpLink,
- [SDNPHasChain,SDNPOptInGlue,SDNPOutGlue,
- SDNPVariadic]>;
-
-def MBWrapper : SDNode<"MBlazeISD::Wrap", SDTIntUnaryOp>;
-
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MBCallSeqStart,
- [SDNPHasChain, SDNPOutGlue]>;
-
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MBCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-
-//===----------------------------------------------------------------------===//
-// MBlaze Instruction Predicate Definitions.
-//===----------------------------------------------------------------------===//
-// def HasPipe3 : Predicate<"Subtarget.hasPipe3()">;
-def HasBarrel : Predicate<"Subtarget.hasBarrel()">;
-// def NoBarrel : Predicate<"!Subtarget.hasBarrel()">;
-def HasDiv : Predicate<"Subtarget.hasDiv()">;
-def HasMul : Predicate<"Subtarget.hasMul()">;
-// def HasFSL : Predicate<"Subtarget.hasFSL()">;
-// def HasEFSL : Predicate<"Subtarget.hasEFSL()">;
-// def HasMSRSet : Predicate<"Subtarget.hasMSRSet()">;
-// def HasException : Predicate<"Subtarget.hasException()">;
-def HasPatCmp : Predicate<"Subtarget.hasPatCmp()">;
-def HasFPU : Predicate<"Subtarget.hasFPU()">;
-// def HasESR : Predicate<"Subtarget.hasESR()">;
-// def HasPVR : Predicate<"Subtarget.hasPVR()">;
-def HasMul64 : Predicate<"Subtarget.hasMul64()">;
-def HasSqrt : Predicate<"Subtarget.hasSqrt()">;
-// def HasMMU : Predicate<"Subtarget.hasMMU()">;
-
-//===----------------------------------------------------------------------===//
-// MBlaze Operand, Complex Patterns and Transformations Definitions.
-//===----------------------------------------------------------------------===//
-
-def MBlazeMemAsmOperand : AsmOperandClass {
- let Name = "Mem";
- let SuperClasses = [];
-}
-
-def MBlazeFslAsmOperand : AsmOperandClass {
- let Name = "Fsl";
- let SuperClasses = [];
-}
-
-// Instruction operand types
-def brtarget : Operand<OtherVT>;
-def calltarget : Operand<i32>;
-def simm16 : Operand<i32>;
-def uimm5 : Operand<i32>;
-def uimm15 : Operand<i32>;
-def fimm : Operand<f32>;
-
-// Unsigned Operand
-def uimm16 : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
-}
-
-// FSL Operand
-def fslimm : Operand<i32> {
- let PrintMethod = "printFSLImm";
- let ParserMatchClass = MBlazeFslAsmOperand;
-}
-
-// Address operand
-def memri : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPR, simm16);
- let ParserMatchClass = MBlazeMemAsmOperand;
-}
-
-def memrr : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPR, GPR);
- let ParserMatchClass = MBlazeMemAsmOperand;
-}
-
-// Node immediate fits as 16-bit sign extended on target immediate.
-def immSExt16 : PatLeaf<(imm), [{
- return (N->getZExtValue() >> 16) == 0;
-}]>;
-
-// Node immediate fits as 16-bit zero extended on target immediate.
-// The LO16 param means that only the lower 16 bits of the node
-// immediate are caught.
-// e.g. addiu, sltiu
-def immZExt16 : PatLeaf<(imm), [{
- return (N->getZExtValue() >> 16) == 0;
-}]>;
-
-// FSL immediate field must fit in 4 bits.
-def immZExt4 : PatLeaf<(imm), [{
- return N->getZExtValue() == ((N->getZExtValue()) & 0xf) ;
-}]>;
-
-// shamt field must fit in 5 bits.
-def immZExt5 : PatLeaf<(imm), [{
- return N->getZExtValue() == ((N->getZExtValue()) & 0x1f) ;
-}]>;
-
-// MBlaze Address Mode. SDNode frameindex could possibily be a match
-// since load and store instructions from stack used it.
-def iaddr : ComplexPattern<i32, 2, "SelectAddrRegImm", [frameindex], []>;
-def xaddr : ComplexPattern<i32, 2, "SelectAddrRegReg", [], []>;
-
-//===----------------------------------------------------------------------===//
-// Pseudo instructions
-//===----------------------------------------------------------------------===//
-
-// As stack alignment is always done with addiu, we need a 16-bit immediate
-let Defs = [R1], Uses = [R1] in {
-def ADJCALLSTACKDOWN : MBlazePseudo<(outs), (ins simm16:$amt),
- "#ADJCALLSTACKDOWN $amt",
- [(callseq_start timm:$amt)]>;
-def ADJCALLSTACKUP : MBlazePseudo<(outs),
- (ins uimm16:$amt1, simm16:$amt2),
- "#ADJCALLSTACKUP $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Instructions specific format
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Arithmetic Instructions
-//===----------------------------------------------------------------------===//
-class Arith<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
- InstrItinClass itin> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
-
-class ArithI<bits<6> op, string instr_asm, SDNode OpNode,
- Operand Od, PatLeaf imm_type> :
- TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [(set GPR:$dst, (OpNode GPR:$b, imm_type:$c))], IIC_ALU>;
-
-class ArithI32<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> :
- TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], IIC_ALU>;
-
-class ShiftI<bits<6> op, bits<2> flags, string instr_asm, SDNode OpNode,
- Operand Od, PatLeaf imm_type> :
- SHT<op, flags, (outs GPR:$dst), (ins GPR:$b, Od:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [(set GPR:$dst, (OpNode GPR:$b, imm_type:$c))], IIC_SHT>;
-
-class ArithR<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
- InstrItinClass itin> :
- TAR<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $c, $b"),
- [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
-
-class ArithRI<bits<6> op, string instr_asm, SDNode OpNode,
- Operand Od, PatLeaf imm_type> :
- TBR<op, (outs GPR:$dst), (ins Od:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $c, $b"),
- [(set GPR:$dst, (OpNode imm_type:$b, GPR:$c))], IIC_ALU>;
-
-class ArithN<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], itin>;
-
-class ArithNI<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> :
- TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], IIC_ALU>;
-
-class ArithRN<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TAR<op, flags, (outs GPR:$dst), (ins GPR:$c, GPR:$b),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], itin>;
-
-class ArithRNI<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> :
- TBR<op, (outs GPR:$dst), (ins Od:$c, GPR:$b),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], IIC_ALU>;
-
-//===----------------------------------------------------------------------===//
-// Misc Arithmetic Instructions
-//===----------------------------------------------------------------------===//
-
-class Logic<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], IIC_ALU>;
-
-class LogicI<bits<6> op, string instr_asm, SDNode OpNode> :
- TB<op, (outs GPR:$dst), (ins GPR:$b, uimm16:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [(set GPR:$dst, (OpNode GPR:$b, immZExt16:$c))],
- IIC_ALU>;
-
-class LogicI32<bits<6> op, string instr_asm> :
- TB<op, (outs GPR:$dst), (ins GPR:$b, uimm16:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], IIC_ALU>;
-
-class PatCmp<bits<6> op, bits<11> flags, string instr_asm> :
- TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], IIC_ALU>;
-
-//===----------------------------------------------------------------------===//
-// Memory Access Instructions
-//===----------------------------------------------------------------------===//
-
-let mayLoad = 1 in {
-class LoadM<bits<6> op, bits<11> flags, string instr_asm> :
- TA<op, flags, (outs GPR:$dst), (ins memrr:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [], IIC_MEMl>;
-}
-
-class LoadMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TB<op, (outs GPR:$dst), (ins memri:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(set (i32 GPR:$dst), (OpNode iaddr:$addr))], IIC_MEMl>;
-
-let mayStore = 1 in {
-class StoreM<bits<6> op, bits<11> flags, string instr_asm> :
- TA<op, flags, (outs), (ins GPR:$dst, memrr:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [], IIC_MEMs>;
-}
-
-class StoreMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TB<op, (outs), (ins GPR:$dst, memri:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(OpNode (i32 GPR:$dst), iaddr:$addr)], IIC_MEMs>;
-
-//===----------------------------------------------------------------------===//
-// Branch Instructions
-//===----------------------------------------------------------------------===//
-class Branch<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
- TA<op, flags, (outs), (ins GPR:$target),
- !strconcat(instr_asm, " $target"),
- [], IIC_BR> {
- let rd = 0x0;
- let ra = br;
- let Form = FCCR;
-}
-
-class BranchI<bits<6> op, bits<5> br, string instr_asm> :
- TB<op, (outs), (ins brtarget:$target),
- !strconcat(instr_asm, " $target"),
- [], IIC_BR> {
- let rd = 0;
- let ra = br;
- let Form = FCCI;
-}
-
-//===----------------------------------------------------------------------===//
-// Branch and Link Instructions
-//===----------------------------------------------------------------------===//
-class BranchL<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
- TA<op, flags, (outs), (ins GPR:$link, GPR:$target),
- !strconcat(instr_asm, " $link, $target"),
- [], IIC_BRl> {
- let ra = br;
- let Form = FRCR;
-}
-
-class BranchLI<bits<6> op, bits<5> br, string instr_asm> :
- TB<op, (outs), (ins GPR:$link, calltarget:$target),
- !strconcat(instr_asm, " $link, $target"),
- [], IIC_BRl> {
- let ra = br;
- let Form = FRCI;
-}
-
-//===----------------------------------------------------------------------===//
-// Conditional Branch Instructions
-//===----------------------------------------------------------------------===//
-class BranchC<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
- TA<op, flags, (outs),
- (ins GPR:$a, GPR:$b),
- !strconcat(instr_asm, " $a, $b"),
- [], IIC_BRc> {
- let rd = br;
- let Form = FCRR;
-}
-
-class BranchCI<bits<6> op, bits<5> br, string instr_asm> :
- TB<op, (outs), (ins GPR:$a, brtarget:$offset),
- !strconcat(instr_asm, " $a, $offset"),
- [], IIC_BRc> {
- let rd = br;
- let Form = FCRI;
-}
-
-//===----------------------------------------------------------------------===//
-// MBlaze arithmetic instructions
-//===----------------------------------------------------------------------===//
-
-let isCommutable = 1, isAsCheapAsAMove = 1 in {
- def ADDK : Arith<0x04, 0x000, "addk ", add, IIC_ALU>;
- def AND : Logic<0x21, 0x000, "and ", and>;
- def OR : Logic<0x20, 0x000, "or ", or>;
- def XOR : Logic<0x22, 0x000, "xor ", xor>;
-
- let Predicates=[HasPatCmp] in {
- def PCMPBF : PatCmp<0x20, 0x400, "pcmpbf ">;
- def PCMPEQ : PatCmp<0x22, 0x400, "pcmpeq ">;
- def PCMPNE : PatCmp<0x23, 0x400, "pcmpne ">;
- }
-
- let Defs = [CARRY] in {
- def ADD : Arith<0x00, 0x000, "add ", addc, IIC_ALU>;
-
- let Uses = [CARRY] in {
- def ADDC : Arith<0x02, 0x000, "addc ", adde, IIC_ALU>;
- }
- }
-
- let Uses = [CARRY] in {
- def ADDKC : ArithN<0x06, 0x000, "addkc ", IIC_ALU>;
- }
-}
-
-let isAsCheapAsAMove = 1 in {
- def ANDN : ArithN<0x23, 0x000, "andn ", IIC_ALU>;
- def CMP : ArithN<0x05, 0x001, "cmp ", IIC_ALU>;
- def CMPU : ArithN<0x05, 0x003, "cmpu ", IIC_ALU>;
- def RSUBK : ArithR<0x05, 0x000, "rsubk ", sub, IIC_ALU>;
-
- let Defs = [CARRY] in {
- def RSUB : ArithR<0x01, 0x000, "rsub ", subc, IIC_ALU>;
-
- let Uses = [CARRY] in {
- def RSUBC : ArithR<0x03, 0x000, "rsubc ", sube, IIC_ALU>;
- }
- }
-
- let Uses = [CARRY] in {
- def RSUBKC : ArithRN<0x07, 0x000, "rsubkc ", IIC_ALU>;
- }
-}
-
-let isCommutable = 1, Predicates=[HasMul] in {
- def MUL : Arith<0x10, 0x000, "mul ", mul, IIC_ALUm>;
-}
-
-let isCommutable = 1, Predicates=[HasMul,HasMul64] in {
- def MULH : Arith<0x10, 0x001, "mulh ", mulhs, IIC_ALUm>;
- def MULHU : Arith<0x10, 0x003, "mulhu ", mulhu, IIC_ALUm>;
-}
-
-let Predicates=[HasMul,HasMul64] in {
- def MULHSU : ArithN<0x10, 0x002, "mulhsu ", IIC_ALUm>;
-}
-
-let Predicates=[HasBarrel] in {
- def BSRL : Arith<0x11, 0x000, "bsrl ", srl, IIC_SHT>;
- def BSRA : Arith<0x11, 0x200, "bsra ", sra, IIC_SHT>;
- def BSLL : Arith<0x11, 0x400, "bsll ", shl, IIC_SHT>;
- def BSRLI : ShiftI<0x19, 0x0, "bsrli ", srl, uimm5, immZExt5>;
- def BSRAI : ShiftI<0x19, 0x1, "bsrai ", sra, uimm5, immZExt5>;
- def BSLLI : ShiftI<0x19, 0x2, "bslli ", shl, uimm5, immZExt5>;
-}
-
-let Predicates=[HasDiv] in {
- def IDIV : ArithR<0x12, 0x000, "idiv ", sdiv, IIC_ALUd>;
- def IDIVU : ArithR<0x12, 0x002, "idivu ", udiv, IIC_ALUd>;
-}
-
-//===----------------------------------------------------------------------===//
-// MBlaze immediate mode arithmetic instructions
-//===----------------------------------------------------------------------===//
-
-let isAsCheapAsAMove = 1 in {
- def ADDIK : ArithI<0x0C, "addik ", add, simm16, immSExt16>;
- def RSUBIK : ArithRI<0x0D, "rsubik ", sub, simm16, immSExt16>;
- def ANDNI : ArithNI<0x2B, "andni ", uimm16, immZExt16>;
- def ANDI : LogicI<0x29, "andi ", and>;
- def ORI : LogicI<0x28, "ori ", or>;
- def XORI : LogicI<0x2A, "xori ", xor>;
-
- let Defs = [CARRY] in {
- def ADDI : ArithI<0x08, "addi ", addc, simm16, immSExt16>;
- def RSUBI : ArithRI<0x09, "rsubi ", subc, simm16, immSExt16>;
-
- let Uses = [CARRY] in {
- def ADDIC : ArithI<0x0A, "addic ", adde, simm16, immSExt16>;
- def RSUBIC : ArithRI<0x0B, "rsubic ", sube, simm16, immSExt16>;
- }
- }
-
- let Uses = [CARRY] in {
- def ADDIKC : ArithNI<0x0E, "addikc ", simm16, immSExt16>;
- def RSUBIKC : ArithRNI<0x0F, "rsubikc", simm16, immSExt16>;
- }
-}
-
-let Predicates=[HasMul] in {
- def MULI : ArithI<0x18, "muli ", mul, simm16, immSExt16>;
-}
-
-//===----------------------------------------------------------------------===//
-// MBlaze memory access instructions
-//===----------------------------------------------------------------------===//
-
-let canFoldAsLoad = 1, isReMaterializable = 1 in {
- let neverHasSideEffects = 1 in {
- def LBU : LoadM<0x30, 0x000, "lbu ">;
- def LBUR : LoadM<0x30, 0x200, "lbur ">;
-
- def LHU : LoadM<0x31, 0x000, "lhu ">;
- def LHUR : LoadM<0x31, 0x200, "lhur ">;
-
- def LW : LoadM<0x32, 0x000, "lw ">;
- def LWR : LoadM<0x32, 0x200, "lwr ">;
-
- let Defs = [CARRY] in {
- def LWX : LoadM<0x32, 0x400, "lwx ">;
- }
- }
-
- def LBUI : LoadMI<0x38, "lbui ", zextloadi8>;
- def LHUI : LoadMI<0x39, "lhui ", zextloadi16>;
- def LWI : LoadMI<0x3A, "lwi ", load>;
-}
-
-def SB : StoreM<0x34, 0x000, "sb ">;
-def SBR : StoreM<0x34, 0x200, "sbr ">;
-
-def SH : StoreM<0x35, 0x000, "sh ">;
-def SHR : StoreM<0x35, 0x200, "shr ">;
-
-def SW : StoreM<0x36, 0x000, "sw ">;
-def SWR : StoreM<0x36, 0x200, "swr ">;
-
-let Defs = [CARRY] in {
- def SWX : StoreM<0x36, 0x400, "swx ">;
-}
-
-def SBI : StoreMI<0x3C, "sbi ", truncstorei8>;
-def SHI : StoreMI<0x3D, "shi ", truncstorei16>;
-def SWI : StoreMI<0x3E, "swi ", store>;
-
-//===----------------------------------------------------------------------===//
-// MBlaze branch instructions
-//===----------------------------------------------------------------------===//
-
-let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
- def BRI : BranchI<0x2E, 0x00, "bri ">;
- def BRAI : BranchI<0x2E, 0x08, "brai ">;
-}
-
-let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
- def BEQI : BranchCI<0x2F, 0x00, "beqi ">;
- def BNEI : BranchCI<0x2F, 0x01, "bnei ">;
- def BLTI : BranchCI<0x2F, 0x02, "blti ">;
- def BLEI : BranchCI<0x2F, 0x03, "blei ">;
- def BGTI : BranchCI<0x2F, 0x04, "bgti ">;
- def BGEI : BranchCI<0x2F, 0x05, "bgei ">;
-}
-
-let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, hasCtrlDep = 1,
- isBarrier = 1 in {
- def BR : Branch<0x26, 0x00, 0x000, "br ">;
- def BRA : Branch<0x26, 0x08, 0x000, "bra ">;
-}
-
-let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
- def BEQ : BranchC<0x27, 0x00, 0x000, "beq ">;
- def BNE : BranchC<0x27, 0x01, 0x000, "bne ">;
- def BLT : BranchC<0x27, 0x02, 0x000, "blt ">;
- def BLE : BranchC<0x27, 0x03, 0x000, "ble ">;
- def BGT : BranchC<0x27, 0x04, 0x000, "bgt ">;
- def BGE : BranchC<0x27, 0x05, 0x000, "bge ">;
-}
-
-let isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasCtrlDep = 1,
- isBarrier = 1 in {
- def BRID : BranchI<0x2E, 0x10, "brid ">;
- def BRAID : BranchI<0x2E, 0x18, "braid ">;
-}
-
-let isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasCtrlDep = 1 in {
- def BEQID : BranchCI<0x2F, 0x10, "beqid ">;
- def BNEID : BranchCI<0x2F, 0x11, "bneid ">;
- def BLTID : BranchCI<0x2F, 0x12, "bltid ">;
- def BLEID : BranchCI<0x2F, 0x13, "bleid ">;
- def BGTID : BranchCI<0x2F, 0x14, "bgtid ">;
- def BGEID : BranchCI<0x2F, 0x15, "bgeid ">;
-}
-
-let isBranch = 1, isIndirectBranch = 1, isTerminator = 1,
- hasDelaySlot = 1, hasCtrlDep = 1, isBarrier = 1 in {
- def BRD : Branch<0x26, 0x10, 0x000, "brd ">;
- def BRAD : Branch<0x26, 0x18, 0x000, "brad ">;
-}
-
-let isBranch = 1, isIndirectBranch = 1, isTerminator = 1,
- hasDelaySlot = 1, hasCtrlDep = 1 in {
- def BEQD : BranchC<0x27, 0x10, 0x000, "beqd ">;
- def BNED : BranchC<0x27, 0x11, 0x000, "bned ">;
- def BLTD : BranchC<0x27, 0x12, 0x000, "bltd ">;
- def BLED : BranchC<0x27, 0x13, 0x000, "bled ">;
- def BGTD : BranchC<0x27, 0x14, 0x000, "bgtd ">;
- def BGED : BranchC<0x27, 0x15, 0x000, "bged ">;
-}
-
-let isCall =1, hasDelaySlot = 1,
- Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,CARRY],
- Uses = [R1] in {
- def BRLID : BranchLI<0x2E, 0x14, "brlid ">;
- def BRALID : BranchLI<0x2E, 0x1C, "bralid ">;
-}
-
-let isCall = 1, hasDelaySlot = 1,
- Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,CARRY],
- Uses = [R1] in {
- def BRLD : BranchL<0x26, 0x14, 0x000, "brld ">;
- def BRALD : BranchL<0x26, 0x1C, 0x000, "brald ">;
-}
-
-let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
- rd=0x10, Form=FCRI in {
- def RTSD : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
- "rtsd $target, $imm",
- [],
- IIC_BR>;
-}
-
-let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
- rd=0x11, Form=FCRI in {
- def RTID : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
- "rtid $target, $imm",
- [],
- IIC_BR>;
-}
-
-let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
- rd=0x12, Form=FCRI in {
- def RTBD : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
- "rtbd $target, $imm",
- [],
- IIC_BR>;
-}
-
-let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
- rd=0x14, Form=FCRI in {
- def RTED : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
- "rted $target, $imm",
- [],
- IIC_BR>;
-}
-
-//===----------------------------------------------------------------------===//
-// MBlaze misc instructions
-//===----------------------------------------------------------------------===//
-
-let neverHasSideEffects = 1 in {
- def NOP : MBlazeInst<0x20, FC, (outs), (ins), "nop ", [], IIC_ALU>;
-}
-
-let Predicates=[HasPatCmp] in {
- def CLZ : TCLZ<0x24, 0x00E0, (outs GPR:$dst), (ins GPR:$src),
- "clz $dst, $src", [], IIC_ALU>;
-}
-
-def IMEMBAR : MBAR<0x2E, 0x0420004, (outs), (ins), "mbar 2", [], IIC_ALU>;
-def DMEMBAR : MBAR<0x2E, 0x0220004, (outs), (ins), "mbar 1", [], IIC_ALU>;
-def IDMEMBAR : MBAR<0x2E, 0x0020004, (outs), (ins), "mbar 0", [], IIC_ALU>;
-
-let usesCustomInserter = 1 in {
- def Select_CC : MBlazePseudo<(outs GPR:$dst),
- (ins GPR:$T, GPR:$F, GPR:$CMP, i32imm:$CC), // F T reversed
- "; SELECT_CC PSEUDO!",
- []>;
-
- def ShiftL : MBlazePseudo<(outs GPR:$dst),
- (ins GPR:$L, GPR:$R),
- "; ShiftL PSEUDO!",
- []>;
-
- def ShiftRA : MBlazePseudo<(outs GPR:$dst),
- (ins GPR:$L, GPR:$R),
- "; ShiftRA PSEUDO!",
- []>;
-
- def ShiftRL : MBlazePseudo<(outs GPR:$dst),
- (ins GPR:$L, GPR:$R),
- "; ShiftRL PSEUDO!",
- []>;
-}
-
-let rb = 0 in {
- def SEXT16 : TA<0x24, 0x061, (outs GPR:$dst), (ins GPR:$src),
- "sext16 $dst, $src", [], IIC_ALU>;
- def SEXT8 : TA<0x24, 0x060, (outs GPR:$dst), (ins GPR:$src),
- "sext8 $dst, $src", [], IIC_ALU>;
- let Defs = [CARRY] in {
- def SRL : TA<0x24, 0x041, (outs GPR:$dst), (ins GPR:$src),
- "srl $dst, $src", [], IIC_ALU>;
- def SRA : TA<0x24, 0x001, (outs GPR:$dst), (ins GPR:$src),
- "sra $dst, $src", [], IIC_ALU>;
- let Uses = [CARRY] in {
- def SRC : TA<0x24, 0x021, (outs GPR:$dst), (ins GPR:$src),
- "src $dst, $src", [], IIC_ALU>;
- }
- }
-}
-
-let isCodeGenOnly=1 in {
- def ADDIK32 : ArithI32<0x08, "addik ", simm16, immSExt16>;
- def ORI32 : LogicI32<0x28, "ori ">;
- def BRLID32 : BranchLI<0x2E, 0x14, "brlid ">;
-}
-
-//===----------------------------------------------------------------------===//
-// Misc. instructions
-//===----------------------------------------------------------------------===//
-let Form=FRCS in {
- def MFS : SPC<0x25, 0x2, (outs GPR:$dst), (ins SPR:$src),
- "mfs $dst, $src", [], IIC_ALU>;
-}
-
-let Form=FCRCS in {
- def MTS : SPC<0x25, 0x3, (outs SPR:$dst), (ins GPR:$src),
- "mts $dst, $src", [], IIC_ALU>;
-}
-
-def MSRSET : MSR<0x25, 0x20, (outs GPR:$dst), (ins uimm15:$set),
- "msrset $dst, $set", [], IIC_ALU>;
-
-def MSRCLR : MSR<0x25, 0x22, (outs GPR:$dst), (ins uimm15:$clr),
- "msrclr $dst, $clr", [], IIC_ALU>;
-
-let rd=0x0, Form=FCRR in {
- def WDC : TA<0x24, 0x64, (outs), (ins GPR:$a, GPR:$b),
- "wdc $a, $b", [], IIC_WDC>;
- def WDCF : TA<0x24, 0x74, (outs), (ins GPR:$a, GPR:$b),
- "wdc.flush $a, $b", [], IIC_WDC>;
- def WDCC : TA<0x24, 0x66, (outs), (ins GPR:$a, GPR:$b),
- "wdc.clear $a, $b", [], IIC_WDC>;
- def WIC : TA<0x24, 0x68, (outs), (ins GPR:$a, GPR:$b),
- "wic $a, $b", [], IIC_WDC>;
-}
-
-def BRK : BranchL<0x26, 0x0C, 0x000, "brk ">;
-def BRKI : BranchLI<0x2E, 0x0C, "brki ">;
-
-def IMM : MBlazeInst<0x2C, FCCI, (outs), (ins simm16:$imm),
- "imm $imm", [], IIC_ALU>;
-
-//===----------------------------------------------------------------------===//
-// Pseudo instructions for atomic operations
-//===----------------------------------------------------------------------===//
-let usesCustomInserter=1 in {
- def CAS32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$cmp, GPR:$swp),
- "# atomic compare and swap",
- [(set GPR:$dst, (atomic_cmp_swap_32 GPR:$ptr, GPR:$cmp, GPR:$swp))]>;
-
- def SWP32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$swp),
- "# atomic swap",
- [(set GPR:$dst, (atomic_swap_32 GPR:$ptr, GPR:$swp))]>;
-
- def LAA32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
- "# atomic load and add",
- [(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$val))]>;
-
- def LAS32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
- "# atomic load and sub",
- [(set GPR:$dst, (atomic_load_sub_32 GPR:$ptr, GPR:$val))]>;
-
- def LAD32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
- "# atomic load and and",
- [(set GPR:$dst, (atomic_load_and_32 GPR:$ptr, GPR:$val))]>;
-
- def LAO32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
- "# atomic load and or",
- [(set GPR:$dst, (atomic_load_or_32 GPR:$ptr, GPR:$val))]>;
-
- def LAX32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
- "# atomic load and xor",
- [(set GPR:$dst, (atomic_load_xor_32 GPR:$ptr, GPR:$val))]>;
-
- def LAN32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
- "# atomic load and nand",
- [(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$val))]>;
-
- def MEMBARRIER : MBlazePseudo<(outs), (ins),
- "# memory barrier", []>;
-}
-
-//===----------------------------------------------------------------------===//
-// Arbitrary patterns that map to one or more instructions
-//===----------------------------------------------------------------------===//
-
-// Small immediates
-def : Pat<(i32 0), (ADDK (i32 R0), (i32 R0))>;
-def : Pat<(i32 immSExt16:$imm), (ADDIK (i32 R0), imm:$imm)>;
-def : Pat<(i32 immZExt16:$imm), (ORI (i32 R0), imm:$imm)>;
-
-// Arbitrary immediates
-def : Pat<(i32 imm:$imm), (ADDIK (i32 R0), imm:$imm)>;
-
-// In register sign extension
-def : Pat<(sext_inreg GPR:$src, i16), (SEXT16 GPR:$src)>;
-def : Pat<(sext_inreg GPR:$src, i8), (SEXT8 GPR:$src)>;
-
-// Call
-def : Pat<(MBlazeJmpLink (i32 tglobaladdr:$dst)),
- (BRLID (i32 R15), tglobaladdr:$dst)>;
-
-def : Pat<(MBlazeJmpLink (i32 texternalsym:$dst)),
- (BRLID (i32 R15), texternalsym:$dst)>;
-
-def : Pat<(MBlazeJmpLink GPR:$dst),
- (BRALD (i32 R15), GPR:$dst)>;
-
-// Shift Instructions
-def : Pat<(shl GPR:$L, GPR:$R), (ShiftL GPR:$L, GPR:$R)>;
-def : Pat<(sra GPR:$L, GPR:$R), (ShiftRA GPR:$L, GPR:$R)>;
-def : Pat<(srl GPR:$L, GPR:$R), (ShiftRL GPR:$L, GPR:$R)>;
-
-// SET_CC operations
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETEQ),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 1)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETNE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 2)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 3)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETLT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 4)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 5)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETLE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 6)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETUGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU (i32 R0), GPR:$L), 3)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETULT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU (i32 R0), GPR:$L), 4)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETUGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU (i32 R0), GPR:$L), 5)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 0), SETULE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU (i32 R0), GPR:$L), 6)>;
-
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETEQ),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 1)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETNE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 2)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 3)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETLT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 4)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 5)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETLE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 6)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETUGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, (i32 R0)), 3)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETULT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, (i32 R0)), 4)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETUGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, (i32 R0)), 5)>;
-def : Pat<(setcc (i32 0), (i32 GPR:$R), SETULE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, (i32 R0)), 6)>;
-
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMP GPR:$R, GPR:$L), 1)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETNE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMP GPR:$R, GPR:$L), 2)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMP GPR:$R, GPR:$L), 3)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETLT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMP GPR:$R, GPR:$L), 4)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMP GPR:$R, GPR:$L), 5)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETLE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMP GPR:$R, GPR:$L), 6)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETUGT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, GPR:$L), 3)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETULT),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, GPR:$L), 4)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETUGE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, GPR:$L), 5)>;
-def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETULE),
- (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
- (CMPU GPR:$R, GPR:$L), 6)>;
-
-// SELECT operations
-def : Pat<(select (i32 GPR:$C), (i32 GPR:$T), (i32 GPR:$F)),
- (Select_CC GPR:$T, GPR:$F, GPR:$C, 2)>;
-
-// SELECT_CC
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETEQ),
- (Select_CC GPR:$T, GPR:$F, GPR:$L, 1)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETNE),
- (Select_CC GPR:$T, GPR:$F, GPR:$L, 2)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETGT),
- (Select_CC GPR:$T, GPR:$F, GPR:$L, 3)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETLT),
- (Select_CC GPR:$T, GPR:$F, GPR:$L, 4)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETGE),
- (Select_CC GPR:$T, GPR:$F, GPR:$L, 5)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETLE),
- (Select_CC GPR:$T, GPR:$F, GPR:$L, 6)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETUGT),
- (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 3)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETULT),
- (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 4)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETUGE),
- (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 5)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 0),
- (i32 GPR:$T), (i32 GPR:$F), SETULE),
- (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 6)>;
-
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETEQ),
- (Select_CC GPR:$T, GPR:$F, GPR:$R, 1)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETNE),
- (Select_CC GPR:$T, GPR:$F, GPR:$R, 2)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETGT),
- (Select_CC GPR:$T, GPR:$F, GPR:$R, 3)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETLT),
- (Select_CC GPR:$T, GPR:$F, GPR:$R, 4)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETGE),
- (Select_CC GPR:$T, GPR:$F, GPR:$R, 5)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETLE),
- (Select_CC GPR:$T, GPR:$F, GPR:$R, 6)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETUGT),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 3)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETULT),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 4)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETUGE),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 5)>;
-def : Pat<(selectcc (i32 0), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETULE),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 6)>;
-
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETEQ),
- (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 1)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETNE),
- (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 2)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETGT),
- (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 3)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETLT),
- (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 4)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETGE),
- (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 5)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETLE),
- (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 6)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETUGT),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 3)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETULT),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 4)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETUGE),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 5)>;
-def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
- (i32 GPR:$T), (i32 GPR:$F), SETULE),
- (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 6)>;
-
-// Ret instructions
-def : Pat<(MBlazeRet GPR:$target), (RTSD GPR:$target, 0x8)>;
-def : Pat<(MBlazeIRet GPR:$target), (RTID GPR:$target, 0x0)>;
-
-// BR instructions
-def : Pat<(br bb:$T), (BRID bb:$T)>;
-def : Pat<(brind GPR:$T), (BRAD GPR:$T)>;
-
-// BRCOND instructions
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETEQ), bb:$T),
- (BEQID GPR:$L, bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETNE), bb:$T),
- (BNEID GPR:$L, bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETGT), bb:$T),
- (BGTID GPR:$L, bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETLT), bb:$T),
- (BLTID GPR:$L, bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETGE), bb:$T),
- (BGEID GPR:$L, bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETLE), bb:$T),
- (BLEID GPR:$L, bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETUGT), bb:$T),
- (BGTID (CMPU (i32 R0), GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETULT), bb:$T),
- (BLTID (CMPU (i32 R0), GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETUGE), bb:$T),
- (BGEID (CMPU (i32 R0), GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETULE), bb:$T),
- (BLEID (CMPU (i32 R0), GPR:$L), bb:$T)>;
-
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETEQ), bb:$T),
- (BEQID GPR:$R, bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETNE), bb:$T),
- (BNEID GPR:$R, bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETGT), bb:$T),
- (BGTID GPR:$R, bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETLT), bb:$T),
- (BLTID GPR:$R, bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETGE), bb:$T),
- (BGEID GPR:$R, bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETLE), bb:$T),
- (BLEID GPR:$R, bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETUGT), bb:$T),
- (BGTID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETULT), bb:$T),
- (BLTID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETUGE), bb:$T),
- (BGEID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
-def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETULE), bb:$T),
- (BLEID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
-
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ), bb:$T),
- (BEQID (CMP GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETNE), bb:$T),
- (BNEID (CMP GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETGT), bb:$T),
- (BGTID (CMP GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETLT), bb:$T),
- (BLTID (CMP GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETGE), bb:$T),
- (BGEID (CMP GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETLE), bb:$T),
- (BLEID (CMP GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETUGT), bb:$T),
- (BGTID (CMPU GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETULT), bb:$T),
- (BLTID (CMPU GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETUGE), bb:$T),
- (BGEID (CMPU GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETULE), bb:$T),
- (BLEID (CMPU GPR:$R, GPR:$L), bb:$T)>;
-def : Pat<(brcond (i32 GPR:$C), bb:$T),
- (BNEID GPR:$C, bb:$T)>;
-
-// Jump tables, global addresses, and constant pools
-def : Pat<(MBWrapper tglobaladdr:$in), (ORI (i32 R0), tglobaladdr:$in)>;
-def : Pat<(MBWrapper tjumptable:$in), (ORI (i32 R0), tjumptable:$in)>;
-def : Pat<(MBWrapper tconstpool:$in), (ORI (i32 R0), tconstpool:$in)>;
-
-// Misc instructions
-def : Pat<(and (i32 GPR:$lh), (not (i32 GPR:$rh))),(ANDN GPR:$lh, GPR:$rh)>;
-
-// Convert any extend loads into zero extend loads
-def : Pat<(extloadi8 iaddr:$src), (i32 (LBUI iaddr:$src))>;
-def : Pat<(extloadi16 iaddr:$src), (i32 (LHUI iaddr:$src))>;
-def : Pat<(extloadi8 xaddr:$src), (i32 (LBU xaddr:$src))>;
-def : Pat<(extloadi16 xaddr:$src), (i32 (LHU xaddr:$src))>;
-
-// 32-bit load and store
-def : Pat<(store (i32 GPR:$dst), xaddr:$addr), (SW GPR:$dst, xaddr:$addr)>;
-def : Pat<(load xaddr:$addr), (i32 (LW xaddr:$addr))>;
-
-// 16-bit load and store
-def : Pat<(truncstorei16 (i32 GPR:$dst), xaddr:$ad), (SH GPR:$dst, xaddr:$ad)>;
-def : Pat<(zextloadi16 xaddr:$addr), (i32 (LHU xaddr:$addr))>;
-
-// 8-bit load and store
-def : Pat<(truncstorei8 (i32 GPR:$dst), xaddr:$ad), (SB GPR:$dst, xaddr:$ad)>;
-def : Pat<(zextloadi8 xaddr:$addr), (i32 (LBU xaddr:$addr))>;
-
-// Peepholes
-def : Pat<(store (i32 0), iaddr:$dst), (SWI (i32 R0), iaddr:$dst)>;
-
-// Atomic fence
-def : Pat<(atomic_fence (imm), (imm)), (MEMBARRIER)>;
-
-//===----------------------------------------------------------------------===//
-// Floating Point Support
-//===----------------------------------------------------------------------===//
-include "MBlazeInstrFSL.td"
-include "MBlazeInstrFPU.td"
diff --git a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp b/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
deleted file mode 100644
index 8d262a01e706..000000000000
--- a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-//===-- MBlazeIntrinsicInfo.cpp - Intrinsic Information -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the MBlaze implementation of TargetIntrinsicInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeIntrinsicInfo.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/Type.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cstring>
-
-using namespace llvm;
-
-namespace mblazeIntrinsic {
-
- enum ID {
- last_non_mblaze_intrinsic = Intrinsic::num_intrinsics-1,
-#define GET_INTRINSIC_ENUM_VALUES
-#include "MBlazeGenIntrinsics.inc"
-#undef GET_INTRINSIC_ENUM_VALUES
- , num_mblaze_intrinsics
- };
-
-#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
-#include "MBlazeGenIntrinsics.inc"
-#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
-}
-
-std::string MBlazeIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
- unsigned numTys) const {
- static const char *const names[] = {
-#define GET_INTRINSIC_NAME_TABLE
-#include "MBlazeGenIntrinsics.inc"
-#undef GET_INTRINSIC_NAME_TABLE
- };
-
- assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded");
- if (IntrID < Intrinsic::num_intrinsics)
- return 0;
- assert(IntrID < mblazeIntrinsic::num_mblaze_intrinsics &&
- "Invalid intrinsic ID");
-
- std::string Result(names[IntrID - Intrinsic::num_intrinsics]);
- return Result;
-}
-
-unsigned MBlazeIntrinsicInfo::
-lookupName(const char *Name, unsigned Len) const {
- if (Len < 5 || Name[4] != '.' || Name[0] != 'l' || Name[1] != 'l'
- || Name[2] != 'v' || Name[3] != 'm')
- return 0; // All intrinsics start with 'llvm.'
-
-#define GET_FUNCTION_RECOGNIZER
-#include "MBlazeGenIntrinsics.inc"
-#undef GET_FUNCTION_RECOGNIZER
- return 0;
-}
-
-unsigned MBlazeIntrinsicInfo::
-lookupGCCName(const char *Name) const {
- return mblazeIntrinsic::getIntrinsicForGCCBuiltin("mblaze",Name);
-}
-
-bool MBlazeIntrinsicInfo::isOverloaded(unsigned IntrID) const {
- if (IntrID == 0)
- return false;
-
- unsigned id = IntrID - Intrinsic::num_intrinsics + 1;
-#define GET_INTRINSIC_OVERLOAD_TABLE
-#include "MBlazeGenIntrinsics.inc"
-#undef GET_INTRINSIC_OVERLOAD_TABLE
-}
-
-/// This defines the "getAttributes(LLVMContext &C, ID id)" method.
-#define GET_INTRINSIC_ATTRIBUTES
-#include "MBlazeGenIntrinsics.inc"
-#undef GET_INTRINSIC_ATTRIBUTES
-
-static FunctionType *getType(LLVMContext &Context, unsigned id) {
- Type *ResultTy = NULL;
- SmallVector<Type*, 8> ArgTys;
- bool IsVarArg = false;
-
-#define GET_INTRINSIC_GENERATOR
-#include "MBlazeGenIntrinsics.inc"
-#undef GET_INTRINSIC_GENERATOR
-
- return FunctionType::get(ResultTy, ArgTys, IsVarArg);
-}
-
-Function *MBlazeIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
- Type **Tys,
- unsigned numTy) const {
- assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded");
- AttributeSet AList = getAttributes(M->getContext(),
- (mblazeIntrinsic::ID) IntrID);
- return cast<Function>(M->getOrInsertFunction(getName(IntrID),
- getType(M->getContext(), IntrID),
- AList));
-}
diff --git a/lib/Target/MBlaze/MBlazeIntrinsicInfo.h b/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
deleted file mode 100644
index 34f379230def..000000000000
--- a/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
+++ /dev/null
@@ -1,33 +0,0 @@
-//===-- MBlazeIntrinsicInfo.h - MBlaze Intrinsic Information ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the MBlaze implementation of TargetIntrinsicInfo.
-//
-//===----------------------------------------------------------------------===//
-#ifndef MBLAZEINTRINSICS_H
-#define MBLAZEINTRINSICS_H
-
-#include "llvm/Target/TargetIntrinsicInfo.h"
-
-namespace llvm {
-
- class MBlazeIntrinsicInfo : public TargetIntrinsicInfo {
- public:
- std::string getName(unsigned IntrID, Type **Tys = 0,
- unsigned numTys = 0) const;
- unsigned lookupName(const char *Name, unsigned Len) const;
- unsigned lookupGCCName(const char *Name) const;
- bool isOverloaded(unsigned IID) const;
- Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
- unsigned numTys = 0) const;
- };
-
-}
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeIntrinsics.td b/lib/Target/MBlaze/MBlazeIntrinsics.td
deleted file mode 100644
index b5dc59547bbf..000000000000
--- a/lib/Target/MBlaze/MBlazeIntrinsics.td
+++ /dev/null
@@ -1,131 +0,0 @@
-//===-- IntrinsicsMBlaze.td - Defines MBlaze intrinsics ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines all of the MicroBlaze-specific intrinsics.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Definitions for all MBlaze intrinsics.
-//
-
-// MBlaze intrinsic classes.
-let TargetPrefix = "mblaze", isTarget = 1 in {
- class MBFSL_Get_Intrinsic : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
-
- class MBFSL_Put_Intrinsic : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;
-
- class MBFSL_PutT_Intrinsic : Intrinsic<[], [llvm_i32_ty], []>;
-}
-
-//===----------------------------------------------------------------------===//
-// MicroBlaze FSL Get Intrinsic Definitions.
-//
-
-def int_mblaze_fsl_get : GCCBuiltin<"__builtin_mblaze_fsl_get">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_aget : GCCBuiltin<"__builtin_mblaze_fsl_aget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_cget : GCCBuiltin<"__builtin_mblaze_fsl_cget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_caget : GCCBuiltin<"__builtin_mblaze_fsl_caget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_eget : GCCBuiltin<"__builtin_mblaze_fsl_eget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_eaget : GCCBuiltin<"__builtin_mblaze_fsl_eaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_ecget : GCCBuiltin<"__builtin_mblaze_fsl_ecget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_ecaget : GCCBuiltin<"__builtin_mblaze_fsl_ecaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_nget : GCCBuiltin<"__builtin_mblaze_fsl_nget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_naget : GCCBuiltin<"__builtin_mblaze_fsl_naget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_ncget : GCCBuiltin<"__builtin_mblaze_fsl_ncget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_ncaget : GCCBuiltin<"__builtin_mblaze_fsl_ncaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_neget : GCCBuiltin<"__builtin_mblaze_fsl_neget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_neaget : GCCBuiltin<"__builtin_mblaze_fsl_neaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_necget : GCCBuiltin<"__builtin_mblaze_fsl_necget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_necaget : GCCBuiltin<"__builtin_mblaze_fsl_necaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tget : GCCBuiltin<"__builtin_mblaze_fsl_tget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_taget : GCCBuiltin<"__builtin_mblaze_fsl_taget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tcget : GCCBuiltin<"__builtin_mblaze_fsl_tcget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tcaget : GCCBuiltin<"__builtin_mblaze_fsl_tcaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_teget : GCCBuiltin<"__builtin_mblaze_fsl_teget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_teaget : GCCBuiltin<"__builtin_mblaze_fsl_teaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tecget : GCCBuiltin<"__builtin_mblaze_fsl_tecget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tecaget : GCCBuiltin<"__builtin_mblaze_fsl_tecaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tnget : GCCBuiltin<"__builtin_mblaze_fsl_tnget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tnaget : GCCBuiltin<"__builtin_mblaze_fsl_tnaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tncget : GCCBuiltin<"__builtin_mblaze_fsl_tncget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tncaget : GCCBuiltin<"__builtin_mblaze_fsl_tncaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tneget : GCCBuiltin<"__builtin_mblaze_fsl_tneget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tneaget : GCCBuiltin<"__builtin_mblaze_fsl_tneaget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tnecget : GCCBuiltin<"__builtin_mblaze_fsl_tnecget">,
- MBFSL_Get_Intrinsic;
-def int_mblaze_fsl_tnecaget : GCCBuiltin<"__builtin_mblaze_fsl_tnecaget">,
- MBFSL_Get_Intrinsic;
-
-//===----------------------------------------------------------------------===//
-// MicroBlaze FSL Put Intrinsic Definitions.
-//
-
-def int_mblaze_fsl_put : GCCBuiltin<"__builtin_mblaze_fsl_put">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_aput : GCCBuiltin<"__builtin_mblaze_fsl_aput">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_cput : GCCBuiltin<"__builtin_mblaze_fsl_cput">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_caput : GCCBuiltin<"__builtin_mblaze_fsl_caput">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_nput : GCCBuiltin<"__builtin_mblaze_fsl_nput">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_naput : GCCBuiltin<"__builtin_mblaze_fsl_naput">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_ncput : GCCBuiltin<"__builtin_mblaze_fsl_ncput">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_ncaput : GCCBuiltin<"__builtin_mblaze_fsl_ncaput">,
- MBFSL_Put_Intrinsic;
-def int_mblaze_fsl_tput : GCCBuiltin<"__builtin_mblaze_fsl_tput">,
- MBFSL_PutT_Intrinsic;
-def int_mblaze_fsl_taput : GCCBuiltin<"__builtin_mblaze_fsl_taput">,
- MBFSL_PutT_Intrinsic;
-def int_mblaze_fsl_tcput : GCCBuiltin<"__builtin_mblaze_fsl_tcput">,
- MBFSL_PutT_Intrinsic;
-def int_mblaze_fsl_tcaput : GCCBuiltin<"__builtin_mblaze_fsl_tcaput">,
- MBFSL_PutT_Intrinsic;
-def int_mblaze_fsl_tnput : GCCBuiltin<"__builtin_mblaze_fsl_tnput">,
- MBFSL_PutT_Intrinsic;
-def int_mblaze_fsl_tnaput : GCCBuiltin<"__builtin_mblaze_fsl_tnaput">,
- MBFSL_PutT_Intrinsic;
-def int_mblaze_fsl_tncput : GCCBuiltin<"__builtin_mblaze_fsl_tncput">,
- MBFSL_PutT_Intrinsic;
-def int_mblaze_fsl_tncaput : GCCBuiltin<"__builtin_mblaze_fsl_tncaput">,
- MBFSL_PutT_Intrinsic;
diff --git a/lib/Target/MBlaze/MBlazeMCInstLower.cpp b/lib/Target/MBlaze/MBlazeMCInstLower.cpp
deleted file mode 100644
index ad414ac40fd7..000000000000
--- a/lib/Target/MBlaze/MBlazeMCInstLower.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-//===-- MBlazeMCInstLower.cpp - Convert MBlaze MachineInstr to an MCInst---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower MBlaze MachineInstrs to their corresponding
-// MCInst records.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeMCInstLower.h"
-#include "MBlazeInstrInfo.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
-using namespace llvm;
-
-MCSymbol *MBlazeMCInstLower::
-GetGlobalAddressSymbol(const MachineOperand &MO) const {
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- case 0: break;
- }
-
- return Printer.Mang->getSymbol(MO.getGlobal());
-}
-
-MCSymbol *MBlazeMCInstLower::
-GetExternalSymbolSymbol(const MachineOperand &MO) const {
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- case 0: break;
- }
-
- return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
-}
-
-MCSymbol *MBlazeMCInstLower::
-GetJumpTableSymbol(const MachineOperand &MO) const {
- SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "JTI"
- << Printer.getFunctionNumber() << '_'
- << MO.getIndex();
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- case 0: break;
- }
-
- // Create a symbol for the name.
- return Ctx.GetOrCreateSymbol(Name.str());
-}
-
-MCSymbol *MBlazeMCInstLower::
-GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
- SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "CPI"
- << Printer.getFunctionNumber() << '_'
- << MO.getIndex();
-
- switch (MO.getTargetFlags()) {
- default:
- llvm_unreachable("Unknown target flag on GV operand");
-
- case 0: break;
- }
-
- // Create a symbol for the name.
- return Ctx.GetOrCreateSymbol(Name.str());
-}
-
-MCSymbol *MBlazeMCInstLower::
-GetBlockAddressSymbol(const MachineOperand &MO) const {
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- case 0: break;
- }
-
- return Printer.GetBlockAddressSymbol(MO.getBlockAddress());
-}
-
-MCOperand MBlazeMCInstLower::
-LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const {
- // FIXME: We would like an efficient form for this, so we don't have to do a
- // lot of extra uniquing.
- const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx);
-
- switch (MO.getTargetFlags()) {
- default:
- llvm_unreachable("Unknown target flag on GV operand");
-
- case 0: break;
- }
-
- if (!MO.isJTI() && MO.getOffset())
- Expr = MCBinaryExpr::CreateAdd(Expr,
- MCConstantExpr::Create(MO.getOffset(), Ctx),
- Ctx);
- return MCOperand::CreateExpr(Expr);
-}
-
-void MBlazeMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
- OutMI.setOpcode(MI->getOpcode());
-
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
-
- MCOperand MCOp;
- switch (MO.getType()) {
- default: llvm_unreachable("unknown operand type");
- case MachineOperand::MO_Register:
- // Ignore all implicit register operands.
- if (MO.isImplicit()) continue;
- MCOp = MCOperand::CreateReg(MO.getReg());
- break;
- case MachineOperand::MO_Immediate:
- MCOp = MCOperand::CreateImm(MO.getImm());
- break;
- case MachineOperand::MO_MachineBasicBlock:
- MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(), Ctx));
- break;
- case MachineOperand::MO_GlobalAddress:
- MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
- break;
- case MachineOperand::MO_ExternalSymbol:
- MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
- break;
- case MachineOperand::MO_JumpTableIndex:
- MCOp = LowerSymbolOperand(MO, GetJumpTableSymbol(MO));
- break;
- case MachineOperand::MO_ConstantPoolIndex:
- MCOp = LowerSymbolOperand(MO, GetConstantPoolIndexSymbol(MO));
- break;
- case MachineOperand::MO_BlockAddress:
- MCOp = LowerSymbolOperand(MO, GetBlockAddressSymbol(MO));
- break;
- case MachineOperand::MO_FPImmediate: {
- bool ignored;
- APFloat FVal = MO.getFPImm()->getValueAPF();
- FVal.convert(APFloat::IEEEsingle, APFloat::rmTowardZero, &ignored);
-
- APInt IVal = FVal.bitcastToAPInt();
- uint64_t Val = *IVal.getRawData();
- MCOp = MCOperand::CreateImm(Val);
- break;
- }
- case MachineOperand::MO_RegisterMask:
- continue;
- }
-
- OutMI.addOperand(MCOp);
- }
-}
diff --git a/lib/Target/MBlaze/MBlazeMCInstLower.h b/lib/Target/MBlaze/MBlazeMCInstLower.h
deleted file mode 100644
index 8ab2c9a3a633..000000000000
--- a/lib/Target/MBlaze/MBlazeMCInstLower.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//===-- MBlazeMCInstLower.h - Lower MachineInstr to MCInst ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZE_MCINSTLOWER_H
-#define MBLAZE_MCINSTLOWER_H
-
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
- class AsmPrinter;
- class MCContext;
- class MCInst;
- class MCOperand;
- class MCSymbol;
- class MachineInstr;
- class MachineModuleInfoMachO;
- class MachineOperand;
-
- /// MBlazeMCInstLower - This class is used to lower an MachineInstr
- /// into an MCInst.
-class LLVM_LIBRARY_VISIBILITY MBlazeMCInstLower {
- MCContext &Ctx;
-
- AsmPrinter &Printer;
-public:
- MBlazeMCInstLower(MCContext &ctx, AsmPrinter &printer)
- : Ctx(ctx), Printer(printer) {}
- void Lower(const MachineInstr *MI, MCInst &OutMI) const;
-
- MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
-
- MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
- MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
- MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const;
- MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const;
- MCSymbol *GetBlockAddressSymbol(const MachineOperand &MO) const;
-};
-
-}
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeMachineFunction.h b/lib/Target/MBlaze/MBlazeMachineFunction.h
deleted file mode 100644
index 10d507f37bbc..000000000000
--- a/lib/Target/MBlaze/MBlazeMachineFunction.h
+++ /dev/null
@@ -1,169 +0,0 @@
-//===-- MBlazeMachineFunctionInfo.h - Private data --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the MBlaze specific subclass of MachineFunctionInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZE_MACHINE_FUNCTION_INFO_H
-#define MBLAZE_MACHINE_FUNCTION_INFO_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-
-namespace llvm {
-
-/// MBlazeFunctionInfo - This class is derived from MachineFunction private
-/// MBlaze target-specific information for each MachineFunction.
-class MBlazeFunctionInfo : public MachineFunctionInfo {
- virtual void anchor();
-
- /// Holds for each function where on the stack the Frame Pointer must be
- /// saved. This is used on Prologue and Epilogue to emit FP save/restore
- int FPStackOffset;
-
- /// Holds for each function where on the stack the Return Address must be
- /// saved. This is used on Prologue and Epilogue to emit RA save/restore
- int RAStackOffset;
-
- /// MBlazeFIHolder - Holds a FrameIndex and it's Stack Pointer Offset
- struct MBlazeFIHolder {
-
- int FI;
- int SPOffset;
-
- MBlazeFIHolder(int FrameIndex, int StackPointerOffset)
- : FI(FrameIndex), SPOffset(StackPointerOffset) {}
- };
-
- /// When PIC is used the GP must be saved on the stack on the function
- /// prologue and must be reloaded from this stack location after every
- /// call. A reference to its stack location and frame index must be kept
- /// to be used on emitPrologue and processFunctionBeforeFrameFinalized.
- MBlazeFIHolder GPHolder;
-
- /// On LowerFormalArguments the stack size is unknown, so the Stack
- /// Pointer Offset calculation of "not in register arguments" must be
- /// postponed to emitPrologue.
- SmallVector<MBlazeFIHolder, 16> FnLoadArgs;
- bool HasLoadArgs;
-
- // When VarArgs, we must write registers back to caller stack, preserving
- // on register arguments. Since the stack size is unknown on
- // LowerFormalArguments, the Stack Pointer Offset calculation must be
- // postponed to emitPrologue.
- SmallVector<MBlazeFIHolder, 4> FnStoreVarArgs;
- bool HasStoreVarArgs;
-
- // When determining the final stack layout some of the frame indexes may
- // be replaced by new frame indexes that reside in the caller's stack
- // frame. The replacements are recorded in this structure.
- DenseMap<int,int> FIReplacements;
-
- /// SRetReturnReg - Some subtargets require that sret lowering includes
- /// returning the value of the returned struct in a register. This field
- /// holds the virtual register into which the sret argument is passed.
- unsigned SRetReturnReg;
-
- /// GlobalBaseReg - keeps track of the virtual register initialized for
- /// use as the global base register. This is used for PIC in some PIC
- /// relocation models.
- unsigned GlobalBaseReg;
-
- // VarArgsFrameIndex - FrameIndex for start of varargs area.
- int VarArgsFrameIndex;
-
- /// LiveInFI - keeps track of the frame indexes in a callers stack
- /// frame that are live into a function.
- SmallVector<int, 16> LiveInFI;
-
-public:
- MBlazeFunctionInfo(MachineFunction& MF)
- : FPStackOffset(0), RAStackOffset(0), GPHolder(-1,-1), HasLoadArgs(false),
- HasStoreVarArgs(false), SRetReturnReg(0), GlobalBaseReg(0),
- VarArgsFrameIndex(0), LiveInFI()
- {}
-
- int getFPStackOffset() const { return FPStackOffset; }
- void setFPStackOffset(int Off) { FPStackOffset = Off; }
-
- int getRAStackOffset() const { return RAStackOffset; }
- void setRAStackOffset(int Off) { RAStackOffset = Off; }
-
- int getGPStackOffset() const { return GPHolder.SPOffset; }
- int getGPFI() const { return GPHolder.FI; }
- void setGPStackOffset(int Off) { GPHolder.SPOffset = Off; }
- void setGPFI(int FI) { GPHolder.FI = FI; }
- bool needGPSaveRestore() const { return GPHolder.SPOffset != -1; }
-
- bool hasLoadArgs() const { return HasLoadArgs; }
- bool hasStoreVarArgs() const { return HasStoreVarArgs; }
-
- void recordLiveIn(int FI) {
- LiveInFI.push_back(FI);
- }
-
- bool isLiveIn(int FI) {
- for (unsigned i = 0, e = LiveInFI.size(); i < e; ++i)
- if (FI == LiveInFI[i]) return true;
-
- return false;
- }
-
- const SmallVector<int, 16>& getLiveIn() const { return LiveInFI; }
-
- void recordReplacement(int OFI, int NFI) {
- FIReplacements.insert(std::make_pair(OFI,NFI));
- }
-
- bool hasReplacement(int OFI) const {
- return FIReplacements.find(OFI) != FIReplacements.end();
- }
-
- int getReplacement(int OFI) const {
- return FIReplacements.lookup(OFI);
- }
-
- void recordLoadArgsFI(int FI, int SPOffset) {
- if (!HasLoadArgs) HasLoadArgs=true;
- FnLoadArgs.push_back(MBlazeFIHolder(FI, SPOffset));
- }
-
- void recordStoreVarArgsFI(int FI, int SPOffset) {
- if (!HasStoreVarArgs) HasStoreVarArgs=true;
- FnStoreVarArgs.push_back(MBlazeFIHolder(FI, SPOffset));
- }
-
- void adjustLoadArgsFI(MachineFrameInfo *MFI) const {
- if (!hasLoadArgs()) return;
- for (unsigned i = 0, e = FnLoadArgs.size(); i != e; ++i)
- MFI->setObjectOffset(FnLoadArgs[i].FI, FnLoadArgs[i].SPOffset);
- }
-
- void adjustStoreVarArgsFI(MachineFrameInfo *MFI) const {
- if (!hasStoreVarArgs()) return;
- for (unsigned i = 0, e = FnStoreVarArgs.size(); i != e; ++i)
- MFI->setObjectOffset(FnStoreVarArgs[i].FI, FnStoreVarArgs[i].SPOffset);
- }
-
- unsigned getSRetReturnReg() const { return SRetReturnReg; }
- void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
-
- unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
- void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
-
- int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
- void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
-};
-
-} // end of namespace llvm
-
-#endif // MBLAZE_MACHINE_FUNCTION_INFO_H
diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.cpp b/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
deleted file mode 100644
index bd83afc1cc83..000000000000
--- a/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
+++ /dev/null
@@ -1,145 +0,0 @@
-//===-- MBlazeRegisterInfo.cpp - MBlaze Register Information --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the MBlaze implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mblaze-frame-info"
-
-#include "MBlazeRegisterInfo.h"
-#include "MBlaze.h"
-#include "MBlazeMachineFunction.h"
-#include "MBlazeSubtarget.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/Type.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-
-#define GET_REGINFO_TARGET_DESC
-#include "MBlazeGenRegisterInfo.inc"
-
-using namespace llvm;
-
-MBlazeRegisterInfo::
-MBlazeRegisterInfo(const MBlazeSubtarget &ST, const TargetInstrInfo &tii)
- : MBlazeGenRegisterInfo(MBlaze::R15), Subtarget(ST), TII(tii) {}
-
-unsigned MBlazeRegisterInfo::getPICCallReg() {
- return MBlaze::R20;
-}
-
-//===----------------------------------------------------------------------===//
-// Callee Saved Registers methods
-//===----------------------------------------------------------------------===//
-
-/// MBlaze Callee Saved Registers
-const uint16_t* MBlazeRegisterInfo::
-getCalleeSavedRegs(const MachineFunction *MF) const {
- // MBlaze callee-save register range is R20 - R31
- static const uint16_t CalleeSavedRegs[] = {
- MBlaze::R20, MBlaze::R21, MBlaze::R22, MBlaze::R23,
- MBlaze::R24, MBlaze::R25, MBlaze::R26, MBlaze::R27,
- MBlaze::R28, MBlaze::R29, MBlaze::R30, MBlaze::R31,
- 0
- };
-
- return CalleeSavedRegs;
-}
-
-BitVector MBlazeRegisterInfo::
-getReservedRegs(const MachineFunction &MF) const {
- BitVector Reserved(getNumRegs());
- Reserved.set(MBlaze::R0);
- Reserved.set(MBlaze::R1);
- Reserved.set(MBlaze::R2);
- Reserved.set(MBlaze::R13);
- Reserved.set(MBlaze::R14);
- Reserved.set(MBlaze::R15);
- Reserved.set(MBlaze::R16);
- Reserved.set(MBlaze::R17);
- Reserved.set(MBlaze::R18);
- Reserved.set(MBlaze::R19);
- return Reserved;
-}
-
-// FrameIndex represent objects inside a abstract stack.
-// We must replace FrameIndex with an stack/frame pointer
-// direct reference.
-void MBlazeRegisterInfo::
-eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
- unsigned FIOperandNum, RegScavenger *RS) const {
- MachineInstr &MI = *II;
- MachineFunction &MF = *MI.getParent()->getParent();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned OFIOperandNum = FIOperandNum == 2 ? 1 : 2;
-
- DEBUG(dbgs() << "\nFunction : " << MF.getName() << "\n";
- dbgs() << "<--------->\n" << MI);
-
- int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
- int stackSize = MFI->getStackSize();
- int spOffset = MFI->getObjectOffset(FrameIndex);
-
- DEBUG(MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- dbgs() << "FrameIndex : " << FrameIndex << "\n"
- << "spOffset : " << spOffset << "\n"
- << "stackSize : " << stackSize << "\n"
- << "isFixed : " << MFI->isFixedObjectIndex(FrameIndex) << "\n"
- << "isLiveIn : " << MBlazeFI->isLiveIn(FrameIndex) << "\n"
- << "isSpill : " << MFI->isSpillSlotObjectIndex(FrameIndex)
- << "\n" );
-
- // as explained on LowerFormalArguments, detect negative offsets
- // and adjust SPOffsets considering the final stack size.
- int Offset = (spOffset < 0) ? (stackSize - spOffset) : spOffset;
- Offset += MI.getOperand(OFIOperandNum).getImm();
-
- DEBUG(dbgs() << "Offset : " << Offset << "\n" << "<--------->\n");
-
- MI.getOperand(OFIOperandNum).ChangeToImmediate(Offset);
- MI.getOperand(FIOperandNum).ChangeToRegister(getFrameRegister(MF), false);
-}
-
-void MBlazeRegisterInfo::
-processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *) const {
- // Set the stack offset where GP must be saved/loaded from.
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- if (MBlazeFI->needGPSaveRestore())
- MFI->setObjectOffset(MBlazeFI->getGPFI(), MBlazeFI->getGPStackOffset());
-}
-
-unsigned MBlazeRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- return TFI->hasFP(MF) ? MBlaze::R19 : MBlaze::R1;
-}
-
-unsigned MBlazeRegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
-}
-
-unsigned MBlazeRegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
-}
diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.h b/lib/Target/MBlaze/MBlazeRegisterInfo.h
deleted file mode 100644
index 497f3866c9ca..000000000000
--- a/lib/Target/MBlaze/MBlazeRegisterInfo.h
+++ /dev/null
@@ -1,71 +0,0 @@
-//===-- MBlazeRegisterInfo.h - MBlaze Register Information Impl -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the MBlaze implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZEREGISTERINFO_H
-#define MBLAZEREGISTERINFO_H
-
-#include "MBlaze.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-
-#define GET_REGINFO_HEADER
-#include "MBlazeGenRegisterInfo.inc"
-
-namespace llvm {
-class MBlazeSubtarget;
-class TargetInstrInfo;
-class Type;
-
-namespace MBlaze {
- /// SubregIndex - The index of various sized subregister classes. Note that
- /// these indices must be kept in sync with the class indices in the
- /// MBlazeRegisterInfo.td file.
- enum SubregIndex {
- SUBREG_FPEVEN = 1, SUBREG_FPODD = 2
- };
-}
-
-struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo {
- const MBlazeSubtarget &Subtarget;
- const TargetInstrInfo &TII;
-
- MBlazeRegisterInfo(const MBlazeSubtarget &Subtarget,
- const TargetInstrInfo &tii);
-
- /// Get PIC indirect call register
- static unsigned getPICCallReg();
-
- /// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
-
- BitVector getReservedRegs(const MachineFunction &MF) const;
-
- /// Stack Frame Processing Methods
- void eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
-
- void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
-
- /// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
-
- /// Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.td b/lib/Target/MBlaze/MBlazeRegisterInfo.td
deleted file mode 100644
index 64cae5cff85d..000000000000
--- a/lib/Target/MBlaze/MBlazeRegisterInfo.td
+++ /dev/null
@@ -1,148 +0,0 @@
-//===-- MBlazeRegisterInfo.td - MBlaze Register defs -------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Declarations that describe the MicroBlaze register file
-//===----------------------------------------------------------------------===//
-
-// We have banks of 32 registers each.
-class MBlazeReg<string n> : Register<n> {
- field bits<5> Num;
- let Namespace = "MBlaze";
-}
-
-// Special purpose registers have 15-bit values
-class MBlazeSReg<string n> : Register<n> {
- field bits<15> Num;
- let Namespace = "MBlaze";
-}
-
-// MBlaze general purpose registers
-class MBlazeGPRReg<bits<5> num, string n> : MBlazeReg<n> {
- let Num = num;
-}
-
-// MBlaze special purpose registers
-class MBlazeSPRReg<bits<15> num, string n> : MBlazeSReg<n> {
- let Num = num;
-}
-
-//===----------------------------------------------------------------------===//
-// Registers
-//===----------------------------------------------------------------------===//
-
-let Namespace = "MBlaze" in {
- // General Purpose Registers
- def R0 : MBlazeGPRReg< 0, "r0">, DwarfRegNum<[0]>;
- def R1 : MBlazeGPRReg< 1, "r1">, DwarfRegNum<[1]>;
- def R2 : MBlazeGPRReg< 2, "r2">, DwarfRegNum<[2]>;
- def R3 : MBlazeGPRReg< 3, "r3">, DwarfRegNum<[3]>;
- def R4 : MBlazeGPRReg< 4, "r4">, DwarfRegNum<[4]>;
- def R5 : MBlazeGPRReg< 5, "r5">, DwarfRegNum<[5]>;
- def R6 : MBlazeGPRReg< 6, "r6">, DwarfRegNum<[6]>;
- def R7 : MBlazeGPRReg< 7, "r7">, DwarfRegNum<[7]>;
- def R8 : MBlazeGPRReg< 8, "r8">, DwarfRegNum<[8]>;
- def R9 : MBlazeGPRReg< 9, "r9">, DwarfRegNum<[9]>;
- def R10 : MBlazeGPRReg< 10, "r10">, DwarfRegNum<[10]>;
- def R11 : MBlazeGPRReg< 11, "r11">, DwarfRegNum<[11]>;
- def R12 : MBlazeGPRReg< 12, "r12">, DwarfRegNum<[12]>;
- def R13 : MBlazeGPRReg< 13, "r13">, DwarfRegNum<[13]>;
- def R14 : MBlazeGPRReg< 14, "r14">, DwarfRegNum<[14]>;
- def R15 : MBlazeGPRReg< 15, "r15">, DwarfRegNum<[15]>;
- def R16 : MBlazeGPRReg< 16, "r16">, DwarfRegNum<[16]>;
- def R17 : MBlazeGPRReg< 17, "r17">, DwarfRegNum<[17]>;
- def R18 : MBlazeGPRReg< 18, "r18">, DwarfRegNum<[18]>;
- def R19 : MBlazeGPRReg< 19, "r19">, DwarfRegNum<[19]>;
- def R20 : MBlazeGPRReg< 20, "r20">, DwarfRegNum<[20]>;
- def R21 : MBlazeGPRReg< 21, "r21">, DwarfRegNum<[21]>;
- def R22 : MBlazeGPRReg< 22, "r22">, DwarfRegNum<[22]>;
- def R23 : MBlazeGPRReg< 23, "r23">, DwarfRegNum<[23]>;
- def R24 : MBlazeGPRReg< 24, "r24">, DwarfRegNum<[24]>;
- def R25 : MBlazeGPRReg< 25, "r25">, DwarfRegNum<[25]>;
- def R26 : MBlazeGPRReg< 26, "r26">, DwarfRegNum<[26]>;
- def R27 : MBlazeGPRReg< 27, "r27">, DwarfRegNum<[27]>;
- def R28 : MBlazeGPRReg< 28, "r28">, DwarfRegNum<[28]>;
- def R29 : MBlazeGPRReg< 29, "r29">, DwarfRegNum<[29]>;
- def R30 : MBlazeGPRReg< 30, "r30">, DwarfRegNum<[30]>;
- def R31 : MBlazeGPRReg< 31, "r31">, DwarfRegNum<[31]>;
-
- // Special Purpose Registers
- def RPC : MBlazeSPRReg<0x0000, "rpc">, DwarfRegNum<[32]>;
- def RMSR : MBlazeSPRReg<0x0001, "rmsr">, DwarfRegNum<[33]>;
- def REAR : MBlazeSPRReg<0x0003, "rear">, DwarfRegNum<[34]>;
- def RESR : MBlazeSPRReg<0x0005, "resr">, DwarfRegNum<[35]>;
- def RFSR : MBlazeSPRReg<0x0007, "rfsr">, DwarfRegNum<[36]>;
- def RBTR : MBlazeSPRReg<0x000B, "rbtr">, DwarfRegNum<[37]>;
- def REDR : MBlazeSPRReg<0x000D, "redr">, DwarfRegNum<[38]>;
- def RPID : MBlazeSPRReg<0x1000, "rpid">, DwarfRegNum<[39]>;
- def RZPR : MBlazeSPRReg<0x1001, "rzpr">, DwarfRegNum<[40]>;
- def RTLBX : MBlazeSPRReg<0x1002, "rtlbx">, DwarfRegNum<[41]>;
- def RTLBLO : MBlazeSPRReg<0x1003, "rtlblo">, DwarfRegNum<[42]>;
- def RTLBHI : MBlazeSPRReg<0x1004, "rtlbhi">, DwarfRegNum<[43]>;
- def RTLBSX : MBlazeSPRReg<0x1004, "rtlbsx">, DwarfRegNum<[44]>;
- def RPVR0 : MBlazeSPRReg<0x2000, "rpvr0">, DwarfRegNum<[45]>;
- def RPVR1 : MBlazeSPRReg<0x2001, "rpvr1">, DwarfRegNum<[46]>;
- def RPVR2 : MBlazeSPRReg<0x2002, "rpvr2">, DwarfRegNum<[47]>;
- def RPVR3 : MBlazeSPRReg<0x2003, "rpvr3">, DwarfRegNum<[48]>;
- def RPVR4 : MBlazeSPRReg<0x2004, "rpvr4">, DwarfRegNum<[49]>;
- def RPVR5 : MBlazeSPRReg<0x2005, "rpvr5">, DwarfRegNum<[50]>;
- def RPVR6 : MBlazeSPRReg<0x2006, "rpvr6">, DwarfRegNum<[51]>;
- def RPVR7 : MBlazeSPRReg<0x2007, "rpvr7">, DwarfRegNum<[52]>;
- def RPVR8 : MBlazeSPRReg<0x2008, "rpvr8">, DwarfRegNum<[53]>;
- def RPVR9 : MBlazeSPRReg<0x2009, "rpvr9">, DwarfRegNum<[54]>;
- def RPVR10 : MBlazeSPRReg<0x200A, "rpvr10">, DwarfRegNum<[55]>;
- def RPVR11 : MBlazeSPRReg<0x200B, "rpvr11">, DwarfRegNum<[56]>;
-
- // The carry bit. In the Microblaze this is really bit 29 of the
- // MSR register but this is the only bit of that register that we
- // are interested in modeling.
- def CARRY : MBlazeSPRReg<0x0000, "rmsr[c]">;
-}
-
-//===----------------------------------------------------------------------===//
-// Register Classes
-//===----------------------------------------------------------------------===//
-
-def GPR : RegisterClass<"MBlaze", [i32,f32], 32, (sequence "R%u", 0, 31)>;
-
-def SPR : RegisterClass<"MBlaze", [i32], 32, (add
- // Reserved
- RPC,
- RMSR,
- REAR,
- RESR,
- RFSR,
- RBTR,
- REDR,
- RPID,
- RZPR,
- RTLBX,
- RTLBLO,
- RTLBHI,
- RPVR0,
- RPVR1,
- RPVR2,
- RPVR3,
- RPVR4,
- RPVR5,
- RPVR6,
- RPVR7,
- RPVR8,
- RPVR9,
- RPVR10,
- RPVR11
- )>
-{
- // None of the special purpose registers are allocatable.
- let isAllocatable = 0;
-}
-
-def CRC : RegisterClass<"MBlaze", [i32], 32, (add CARRY)> {
- let CopyCost = -1;
-}
diff --git a/lib/Target/MBlaze/MBlazeRelocations.h b/lib/Target/MBlaze/MBlazeRelocations.h
deleted file mode 100644
index 6387ee23ec9b..000000000000
--- a/lib/Target/MBlaze/MBlazeRelocations.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//===-- MBlazeRelocations.h - MBlaze Code Relocations -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the MBlaze target-specific relocation types.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZERELOCATIONS_H
-#define MBLAZERELOCATIONS_H
-
-#include "llvm/CodeGen/MachineRelocation.h"
-
-namespace llvm {
- namespace MBlaze {
- enum RelocationType {
- /// reloc_pcrel_word - PC relative relocation, add the relocated value to
- /// the value already in memory, after we adjust it for where the PC is.
- reloc_pcrel_word = 0,
-
- /// reloc_picrel_word - PIC base relative relocation, add the relocated
- /// value to the value already in memory, after we adjust it for where the
- /// PIC base is.
- reloc_picrel_word = 1,
-
- /// reloc_absolute_word - absolute relocation, just add the relocated
- /// value to the value already in memory.
- reloc_absolute_word = 2,
-
- /// reloc_absolute_word_sext - absolute relocation, just add the relocated
- /// value to the value already in memory. In object files, it represents a
- /// value which must be sign-extended when resolving the relocation.
- reloc_absolute_word_sext = 3,
-
- /// reloc_absolute_dword - absolute relocation, just add the relocated
- /// value to the value already in memory.
- reloc_absolute_dword = 4
- };
- }
-}
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeSchedule.td b/lib/Target/MBlaze/MBlazeSchedule.td
deleted file mode 100644
index cd5691ce644f..000000000000
--- a/lib/Target/MBlaze/MBlazeSchedule.td
+++ /dev/null
@@ -1,50 +0,0 @@
-//===-- MBlazeSchedule.td - MBlaze Scheduling Definitions --*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// MBlaze functional units.
-//===----------------------------------------------------------------------===//
-def IF : FuncUnit;
-def ID : FuncUnit;
-def EX : FuncUnit;
-def MA : FuncUnit;
-def WB : FuncUnit;
-
-//===----------------------------------------------------------------------===//
-// Instruction Itinerary classes used for MBlaze
-//===----------------------------------------------------------------------===//
-def IIC_ALU : InstrItinClass;
-def IIC_ALUm : InstrItinClass;
-def IIC_ALUd : InstrItinClass;
-def IIC_SHT : InstrItinClass;
-def IIC_FSLg : InstrItinClass;
-def IIC_FSLp : InstrItinClass;
-def IIC_MEMs : InstrItinClass;
-def IIC_MEMl : InstrItinClass;
-def IIC_FPU : InstrItinClass;
-def IIC_FPUd : InstrItinClass;
-def IIC_FPUf : InstrItinClass;
-def IIC_FPUi : InstrItinClass;
-def IIC_FPUs : InstrItinClass;
-def IIC_FPUc : InstrItinClass;
-def IIC_BR : InstrItinClass;
-def IIC_BRc : InstrItinClass;
-def IIC_BRl : InstrItinClass;
-def IIC_WDC : InstrItinClass;
-def IIC_Pseudo : InstrItinClass;
-
-//===----------------------------------------------------------------------===//
-// MBlaze instruction itineraries for three stage pipeline.
-//===----------------------------------------------------------------------===//
-include "MBlazeSchedule3.td"
-
-//===----------------------------------------------------------------------===//
-// MBlaze instruction itineraries for five stage pipeline.
-//===----------------------------------------------------------------------===//
-include "MBlazeSchedule5.td"
diff --git a/lib/Target/MBlaze/MBlazeSchedule3.td b/lib/Target/MBlaze/MBlazeSchedule3.td
deleted file mode 100644
index 20257a60a0fa..000000000000
--- a/lib/Target/MBlaze/MBlazeSchedule3.td
+++ /dev/null
@@ -1,236 +0,0 @@
-//===-- MBlazeSchedule3.td - MBlaze Scheduling Definitions -*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// MBlaze instruction itineraries for the three stage pipeline.
-//===----------------------------------------------------------------------===//
-def MBlazePipe3Itineraries : ProcessorItineraries<
- [IF,ID,EX], [], [
-
- // ALU instruction with one destination register and either two register
- // source operands or one register source operand and one immediate operand.
- // The instruction takes one cycle to execute in each of the stages. The
- // two source operands are read during the decode stage and the result is
- // ready after the execute stage.
- InstrItinData< IIC_ALU,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]>], // one cycle in execute stage
- [ 2 // result ready after two cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // ALU multiply instruction with one destination register and either two
- // register source operands or one register source operand and one immediate
- // operand. The instruction takes one cycle to execute in each of the
- // pipeline stages except the execute stage, which takes three cycles. The
- // two source operands are read during the decode stage and the result is
- // ready after the execute stage.
- InstrItinData< IIC_ALUm,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<3,[EX]>], // three cycles in execute stage
- [ 4 // result ready after four cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // ALU divide instruction with one destination register two register source
- // operands. The instruction takes one cycle to execute in each the pipeline
- // stages except the execute stage, which takes 34 cycles. The two
- // source operands are read during the decode stage and the result is ready
- // after the execute stage.
- InstrItinData< IIC_ALUd,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<34,[EX]>], // 34 cycles in execute stage
- [ 35 // result ready after 35 cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Shift instruction with one destination register and either two register
- // source operands or one register source operand and one immediate operand.
- // The instruction takes one cycle to execute in each of the pipeline stages
- // except the execute stage, which takes two cycles. The two source operands
- // are read during the decode stage and the result is ready after the execute
- // stage.
- InstrItinData< IIC_SHT,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<2,[EX]>], // two cycles in execute stage
- [ 3 // result ready after three cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Branch instruction with one source operand register. The instruction takes
- // one cycle to execute in each of the pipeline stages. The source operand is
- // read during the decode stage.
- InstrItinData< IIC_BR,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]>], // one cycle in execute stage
- [ 1 ]>, // first operand read after one cycle
-
- // Conditional branch instruction with two source operand registers. The
- // instruction takes one cycle to execute in each of the pipeline stages. The
- // two source operands are read during the decode stage.
- InstrItinData< IIC_BRc,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]>], // one cycle in execute stage
- [ 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Branch and link instruction with one destination register and one source
- // operand register. The instruction takes one cycle to execute in each of
- // the pipeline stages. The source operand is read during the decode stage
- // and the destination register is ready after the execute stage.
- InstrItinData< IIC_BRl,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]>], // one cycle in execute stage
- [ 2 // result ready after two cycles
- , 1 ]>, // first operand read after one cycle
-
- // Cache control instruction with two source operand registers. The
- // instruction takes one cycle to execute in each of the pipeline stages
- // except the memory access stage, which takes two cycles. The source
- // operands are read during the decode stage.
- InstrItinData< IIC_WDC,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<2,[EX]>], // two cycles in execute stage
- [ 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Floating point instruction with one destination register and two source
- // operand registers. The instruction takes one cycle to execute in each of
- // the pipeline stages except the execute stage, which takes six cycles. The
- // source operands are read during the decode stage and the results are ready
- // after the execute stage.
- InstrItinData< IIC_FPU,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<6,[EX]>], // six cycles in execute stage
- [ 7 // result ready after seven cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Floating point divide instruction with one destination register and two
- // source operand registers. The instruction takes one cycle to execute in
- // each of the pipeline stages except the execute stage, which takes 30
- // cycles. The source operands are read during the decode stage and the
- // results are ready after the execute stage.
- InstrItinData< IIC_FPUd,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<30,[EX]>], // one cycle in execute stage
- [ 31 // result ready after 31 cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Convert floating point to integer instruction with one destination
- // register and one source operand register. The instruction takes one cycle
- // to execute in each of the pipeline stages except the execute stage,
- // which takes seven cycles. The source operands are read during the decode
- // stage and the results are ready after the execute stage.
- InstrItinData< IIC_FPUi,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<7,[EX]>], // seven cycles in execute stage
- [ 8 // result ready after eight cycles
- , 1 ]>, // first operand read after one cycle
-
- // Convert integer to floating point instruction with one destination
- // register and one source operand register. The instruction takes one cycle
- // to execute in each of the pipeline stages except the execute stage,
- // which takes six cycles. The source operands are read during the decode
- // stage and the results are ready after the execute stage.
- InstrItinData< IIC_FPUf,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<6,[EX]>], // six cycles in execute stage
- [ 7 // result ready after seven cycles
- , 1 ]>, // first operand read after one cycle
-
- // Floating point square root instruction with one destination register and
- // one source operand register. The instruction takes one cycle to execute in
- // each of the pipeline stages except the execute stage, which takes 29
- // cycles. The source operands are read during the decode stage and the
- // results are ready after the execute stage.
- InstrItinData< IIC_FPUs,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<29,[EX]>], // 29 cycles in execute stage
- [ 30 // result ready after 30 cycles
- , 1 ]>, // first operand read after one cycle
-
- // Floating point comparison instruction with one destination register and
- // two source operand registers. The instruction takes one cycle to execute
- // in each of the pipeline stages except the execute stage, which takes three
- // cycles. The source operands are read during the decode stage and the
- // results are ready after the execute stage.
- InstrItinData< IIC_FPUc,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<3,[EX]>], // three cycles in execute stage
- [ 4 // result ready after four cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // FSL get instruction with one register or immediate source operand and one
- // destination register. The instruction takes one cycle to execute in each
- // of the pipeline stages except the execute stage, which takes two cycles.
- // The one source operand is read during the decode stage and the result is
- // ready after the execute stage.
- InstrItinData< IIC_FSLg,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<2,[EX]>], // two cycles in execute stage
- [ 3 // result ready after two cycles
- , 1 ]>, // first operand read after one cycle
-
- // FSL put instruction with either two register source operands or one
- // register source operand and one immediate operand. There is no result
- // produced by the instruction. The instruction takes one cycle to execute in
- // each of the pipeline stages except the execute stage, which takes two
- // cycles. The two source operands are read during the decode stage.
- InstrItinData< IIC_FSLp,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<2,[EX]>], // two cycles in execute stage
- [ 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Memory store instruction with either three register source operands or two
- // register source operands and one immediate operand. There is no result
- // produced by the instruction. The instruction takes one cycle to execute in
- // each of the pipeline stages except the execute stage, which takes two
- // cycles. All of the source operands are read during the decode stage.
- InstrItinData< IIC_MEMs,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<2,[EX]>], // two cycles in execute stage
- [ 1 // first operand read after one cycle
- , 1 // second operand read after one cycle
- , 1 ]>, // third operand read after one cycle
-
- // Memory load instruction with one destination register and either two
- // register source operands or one register source operand and one immediate
- // operand. The instruction takes one cycle to execute in each of the
- // pipeline stages except the execute stage, which takes two cycles. All of
- // the source operands are read during the decode stage and the result is
- // ready after the execute stage.
- InstrItinData< IIC_MEMl,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<2,[EX]>], // two cycles in execute stage
- [ 3 // result ready after four cycles
- , 1 // second operand read after one cycle
- , 1 ]> // third operand read after one cycle
-]>;
diff --git a/lib/Target/MBlaze/MBlazeSchedule5.td b/lib/Target/MBlaze/MBlazeSchedule5.td
deleted file mode 100644
index ab53b424ded3..000000000000
--- a/lib/Target/MBlaze/MBlazeSchedule5.td
+++ /dev/null
@@ -1,267 +0,0 @@
-//===-- MBlazeSchedule5.td - MBlaze Scheduling Definitions -*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// MBlaze instruction itineraries for the five stage pipeline.
-//===----------------------------------------------------------------------===//
-def MBlazePipe5Itineraries : ProcessorItineraries<
- [IF,ID,EX,MA,WB], [], [
-
- // ALU instruction with one destination register and either two register
- // source operands or one register source operand and one immediate operand.
- // The instruction takes one cycle to execute in each of the stages. The
- // two source operands are read during the decode stage and the result is
- // ready after the execute stage.
- InstrItinData< IIC_ALU,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 2 // result ready after two cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // ALU multiply instruction with one destination register and either two
- // register source operands or one register source operand and one immediate
- // operand. The instruction takes one cycle to execute in each of the
- // pipeline stages. The two source operands are read during the decode stage
- // and the result is ready after the execute stage.
- InstrItinData< IIC_ALUm,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 2 // result ready after two cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // ALU divide instruction with one destination register two register source
- // operands. The instruction takes one cycle to execute in each the pipeline
- // stages except the memory access stage, which takes 31 cycles. The two
- // source operands are read during the decode stage and the result is ready
- // after the memory access stage.
- InstrItinData< IIC_ALUd,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<31,[MA]> // 31 cycles in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 33 // result ready after 33 cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Shift instruction with one destination register and either two register
- // source operands or one register source operand and one immediate operand.
- // The instruction takes one cycle to execute in each of the pipeline stages.
- // The two source operands are read during the decode stage and the result is
- // ready after the memory access stage.
- InstrItinData< IIC_SHT,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 3 // result ready after three cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Branch instruction with one source operand register. The instruction takes
- // one cycle to execute in each of the pipeline stages. The source operand is
- // read during the decode stage.
- InstrItinData< IIC_BR,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 1 ]>, // first operand read after one cycle
-
- // Conditional branch instruction with two source operand registers. The
- // instruction takes one cycle to execute in each of the pipeline stages. The
- // two source operands are read during the decode stage.
- InstrItinData< IIC_BRc,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Branch and link instruction with one destination register and one source
- // operand register. The instruction takes one cycle to execute in each of
- // the pipeline stages. The source operand is read during the decode stage
- // and the destination register is ready after the writeback stage.
- InstrItinData< IIC_BRl,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 4 // result ready after four cycles
- , 1 ]>, // first operand read after one cycle
-
- // Cache control instruction with two source operand registers. The
- // instruction takes one cycle to execute in each of the pipeline stages
- // except the memory access stage, which takes two cycles. The source
- // operands are read during the decode stage.
- InstrItinData< IIC_WDC,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<2,[MA]> // two cycles in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Floating point instruction with one destination register and two source
- // operand registers. The instruction takes one cycle to execute in each of
- // the pipeline stages except the memory access stage, which takes two
- // cycles. The source operands are read during the decode stage and the
- // results are ready after the writeback stage.
- InstrItinData< IIC_FPU,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<2,[MA]> // two cycles in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 5 // result ready after five cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Floating point divide instruction with one destination register and two
- // source operand registers. The instruction takes one cycle to execute in
- // each of the pipeline stages except the memory access stage, which takes 26
- // cycles. The source operands are read during the decode stage and the
- // results are ready after the writeback stage.
- InstrItinData< IIC_FPUd,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<26,[MA]> // 26 cycles in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 29 // result ready after 29 cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Convert floating point to integer instruction with one destination
- // register and one source operand register. The instruction takes one cycle
- // to execute in each of the pipeline stages except the memory access stage,
- // which takes three cycles. The source operands are read during the decode
- // stage and the results are ready after the writeback stage.
- InstrItinData< IIC_FPUi,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<3,[MA]> // three cycles in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 6 // result ready after six cycles
- , 1 ]>, // first operand read after one cycle
-
- // Convert integer to floating point instruction with one destination
- // register and one source operand register. The instruction takes one cycle
- // to execute in each of the pipeline stages except the memory access stage,
- // which takes two cycles. The source operands are read during the decode
- // stage and the results are ready after the writeback stage.
- InstrItinData< IIC_FPUf,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<2,[MA]> // two cycles in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 5 // result ready after five cycles
- , 1 ]>, // first operand read after one cycle
-
- // Floating point square root instruction with one destination register and
- // one source operand register. The instruction takes one cycle to execute in
- // each of the pipeline stages except the memory access stage, which takes 25
- // cycles. The source operands are read during the decode stage and the
- // results are ready after the writeback stage.
- InstrItinData< IIC_FPUs,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<25,[MA]> // 25 cycles in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 28 // result ready after 28 cycles
- , 1 ]>, // first operand read after one cycle
-
- // Floating point comparison instruction with one destination register and
- // two source operand registers. The instruction takes one cycle to execute
- // in each of the pipeline stages. The source operands are read during the
- // decode stage and the results are ready after the execute stage.
- InstrItinData< IIC_FPUc,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 2 // result ready after two cycles
- , 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // FSL get instruction with one register or immediate source operand and one
- // destination register. The instruction takes one cycle to execute in each
- // of the pipeline stages. The one source operand is read during the decode
- // stage and the result is ready after the execute stage.
- InstrItinData< IIC_FSLg,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 2 // result ready after two cycles
- , 1 ]>, // first operand read after one cycle
-
- // FSL put instruction with either two register source operands or one
- // register source operand and one immediate operand. There is no result
- // produced by the instruction. The instruction takes one cycle to execute in
- // each of the pipeline stages. The two source operands are read during the
- // decode stage.
- InstrItinData< IIC_FSLp,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 1 // first operand read after one cycle
- , 1 ]>, // second operand read after one cycle
-
- // Memory store instruction with either three register source operands or two
- // register source operands and one immediate operand. There is no result
- // produced by the instruction. The instruction takes one cycle to execute in
- // each of the pipeline stages. All of the source operands are read during
- // the decode stage.
- InstrItinData< IIC_MEMs,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 1 // first operand read after one cycle
- , 1 // second operand read after one cycle
- , 1 ]>, // third operand read after one cycle
-
- // Memory load instruction with one destination register and either two
- // register source operands or one register source operand and one immediate
- // operand. The instruction takes one cycle to execute in each of the
- // pipeline stages. All of the source operands are read during the decode
- // stage and the result is ready after the writeback stage.
- InstrItinData< IIC_MEMl,
- [ InstrStage<1,[IF]> // one cycle in fetch stage
- , InstrStage<1,[ID]> // one cycle in decode stage
- , InstrStage<1,[EX]> // one cycle in execute stage
- , InstrStage<1,[MA]> // one cycle in memory access stage
- , InstrStage<1,[WB]>], // one cycle in write back stage
- [ 4 // result ready after four cycles
- , 1 // second operand read after one cycle
- , 1 ]> // third operand read after one cycle
-]>;
diff --git a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.h b/lib/Target/MBlaze/MBlazeSelectionDAGInfo.h
deleted file mode 100644
index 9f8e2aad4b20..000000000000
--- a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===-- MBlazeSelectionDAGInfo.h - MBlaze SelectionDAG Info -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the MBlaze subclass for TargetSelectionDAGInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZESELECTIONDAGINFO_H
-#define MBLAZESELECTIONDAGINFO_H
-
-#include "llvm/Target/TargetSelectionDAGInfo.h"
-
-namespace llvm {
-
-class MBlazeTargetMachine;
-
-class MBlazeSelectionDAGInfo : public TargetSelectionDAGInfo {
-public:
- explicit MBlazeSelectionDAGInfo(const MBlazeTargetMachine &TM);
- ~MBlazeSelectionDAGInfo();
-};
-
-}
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeSubtarget.cpp b/lib/Target/MBlaze/MBlazeSubtarget.cpp
deleted file mode 100644
index dc2ad29be2a2..000000000000
--- a/lib/Target/MBlaze/MBlazeSubtarget.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- MBlazeSubtarget.cpp - MBlaze Subtarget Information ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the MBlaze specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeSubtarget.h"
-#include "MBlaze.h"
-#include "MBlazeRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "MBlazeGenSubtargetInfo.inc"
-
-using namespace llvm;
-
-MBlazeSubtarget::MBlazeSubtarget(const std::string &TT,
- const std::string &CPU,
- const std::string &FS):
- MBlazeGenSubtargetInfo(TT, CPU, FS),
- HasBarrel(false), HasDiv(false), HasMul(false), HasPatCmp(false),
- HasFPU(false), HasMul64(false), HasSqrt(false)
-{
- // Parse features string.
- std::string CPUName = CPU;
- if (CPUName.empty())
- CPUName = "mblaze";
- ParseSubtargetFeatures(CPUName, FS);
-
- // Only use instruction scheduling if the selected CPU has an instruction
- // itinerary (the default CPU is the only one that doesn't).
- HasItin = CPUName != "mblaze";
- DEBUG(dbgs() << "CPU " << CPUName << "(" << HasItin << ")\n");
-
- // Initialize scheduling itinerary for the specified CPU.
- InstrItins = getInstrItineraryForCPU(CPUName);
-}
-
-bool MBlazeSubtarget::
-enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
- CriticalPathRCs.clear();
- CriticalPathRCs.push_back(&MBlaze::GPRRegClass);
- return HasItin && OptLevel >= CodeGenOpt::Default;
-}
diff --git a/lib/Target/MBlaze/MBlazeSubtarget.h b/lib/Target/MBlaze/MBlazeSubtarget.h
deleted file mode 100644
index ed43d21f30c5..000000000000
--- a/lib/Target/MBlaze/MBlazeSubtarget.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//===-- MBlazeSubtarget.h - Define Subtarget for the MBlaze ----*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the MBlaze specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZESUBTARGET_H
-#define MBLAZESUBTARGET_H
-
-#include "llvm/MC/MCInstrItineraries.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include <string>
-
-#define GET_SUBTARGETINFO_HEADER
-#include "MBlazeGenSubtargetInfo.inc"
-
-namespace llvm {
-class StringRef;
-
-class MBlazeSubtarget : public MBlazeGenSubtargetInfo {
-
-protected:
- bool HasBarrel;
- bool HasDiv;
- bool HasMul;
- bool HasPatCmp;
- bool HasFPU;
- bool HasMul64;
- bool HasSqrt;
- bool HasItin;
-
- InstrItineraryData InstrItins;
-
-public:
-
- /// This constructor initializes the data members to match that
- /// of the specified triple.
- MBlazeSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
-
- /// ParseSubtargetFeatures - Parses features string setting specified
- /// subtarget options. Definition of function is auto generated by tblgen.
- void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
- /// Compute the number of maximum number of issues per cycle for the
- /// MBlaze scheduling itineraries.
- void computeIssueWidth();
-
- /// enablePostRAScheduler - True at 'More' optimization.
- bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
-
- /// getInstrItins - Return the instruction itineraies based on subtarget.
- const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
-
- bool hasItin() const { return HasItin; }
- bool hasPCMP() const { return HasPatCmp; }
- bool hasFPU() const { return HasFPU; }
- bool hasSqrt() const { return HasSqrt; }
- bool hasMul() const { return HasMul; }
- bool hasMul64() const { return HasMul64; }
- bool hasDiv() const { return HasDiv; }
- bool hasBarrel() const { return HasBarrel; }
-};
-} // End llvm namespace
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/lib/Target/MBlaze/MBlazeTargetMachine.cpp
deleted file mode 100644
index bcdd32fed947..000000000000
--- a/lib/Target/MBlaze/MBlazeTargetMachine.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//===-- MBlazeTargetMachine.cpp - Define TargetMachine for MBlaze ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Implements the info about MBlaze target spec.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeTargetMachine.h"
-#include "MBlaze.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/PassManager.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/TargetOptions.h"
-using namespace llvm;
-
-extern "C" void LLVMInitializeMBlazeTarget() {
- // Register the target.
- RegisterTargetMachine<MBlazeTargetMachine> X(TheMBlazeTarget);
-}
-
-// DataLayout --> Big-endian, 32-bit pointer/ABI/alignment
-// The stack is always 8 byte aligned
-// On function prologue, the stack is created by decrementing
-// its pointer. Once decremented, all references are done with positive
-// offset from the stack/frame pointer, using StackGrowsUp enables
-// an easier handling.
-MBlazeTargetMachine::
-MBlazeTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS),
- DL("E-p:32:32:32-i8:8:8-i16:16:16"),
- InstrInfo(*this),
- FrameLowering(Subtarget),
- TLInfo(*this), TSInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
-}
-
-namespace {
-/// MBlaze Code Generator Pass Configuration Options.
-class MBlazePassConfig : public TargetPassConfig {
-public:
- MBlazePassConfig(MBlazeTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
-
- MBlazeTargetMachine &getMBlazeTargetMachine() const {
- return getTM<MBlazeTargetMachine>();
- }
-
- virtual bool addInstSelector();
- virtual bool addPreEmitPass();
-};
-} // namespace
-
-TargetPassConfig *MBlazeTargetMachine::createPassConfig(PassManagerBase &PM) {
- return new MBlazePassConfig(this, PM);
-}
-
-// Install an instruction selector pass using
-// the ISelDag to gen MBlaze code.
-bool MBlazePassConfig::addInstSelector() {
- addPass(createMBlazeISelDag(getMBlazeTargetMachine()));
- return false;
-}
-
-// Implemented by targets that want to run passes immediately before
-// machine code is emitted. return true if -print-machineinstrs should
-// print out the code after the passes.
-bool MBlazePassConfig::addPreEmitPass() {
- addPass(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine()));
- return true;
-}
diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.h b/lib/Target/MBlaze/MBlazeTargetMachine.h
deleted file mode 100644
index 956794dddaf9..000000000000
--- a/lib/Target/MBlaze/MBlazeTargetMachine.h
+++ /dev/null
@@ -1,80 +0,0 @@
-//===-- MBlazeTargetMachine.h - Define TargetMachine for MBlaze -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the MBlaze specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZE_TARGETMACHINE_H
-#define MBLAZE_TARGETMACHINE_H
-
-#include "MBlazeFrameLowering.h"
-#include "MBlazeISelLowering.h"
-#include "MBlazeInstrInfo.h"
-#include "MBlazeIntrinsicInfo.h"
-#include "MBlazeSelectionDAGInfo.h"
-#include "MBlazeSubtarget.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
- class formatted_raw_ostream;
-
- class MBlazeTargetMachine : public LLVMTargetMachine {
- MBlazeSubtarget Subtarget;
- const DataLayout DL; // Calculates type size & alignment
- MBlazeInstrInfo InstrInfo;
- MBlazeFrameLowering FrameLowering;
- MBlazeTargetLowering TLInfo;
- MBlazeSelectionDAGInfo TSInfo;
- MBlazeIntrinsicInfo IntrinsicInfo;
- InstrItineraryData InstrItins;
-
- public:
- MBlazeTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
-
- virtual const MBlazeInstrInfo *getInstrInfo() const
- { return &InstrInfo; }
-
- virtual const InstrItineraryData *getInstrItineraryData() const
- { return &InstrItins; }
-
- virtual const TargetFrameLowering *getFrameLowering() const
- { return &FrameLowering; }
-
- virtual const MBlazeSubtarget *getSubtargetImpl() const
- { return &Subtarget; }
-
- virtual const DataLayout *getDataLayout() const
- { return &DL;}
-
- virtual const MBlazeRegisterInfo *getRegisterInfo() const
- { return &InstrInfo.getRegisterInfo(); }
-
- virtual const MBlazeTargetLowering *getTargetLowering() const
- { return &TLInfo; }
-
- virtual const MBlazeSelectionDAGInfo* getSelectionDAGInfo() const
- { return &TSInfo; }
-
- const TargetIntrinsicInfo *getIntrinsicInfo() const
- { return &IntrinsicInfo; }
-
- // Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
- };
-} // End llvm namespace
-
-#endif
diff --git a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp b/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
deleted file mode 100644
index a7a0a68b1612..000000000000
--- a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-//===-- MBlazeTargetObjectFile.cpp - MBlaze object files ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeTargetObjectFile.h"
-#include "MBlazeSubtarget.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Target/TargetMachine.h"
-using namespace llvm;
-
-void MBlazeTargetObjectFile::
-Initialize(MCContext &Ctx, const TargetMachine &TM) {
- TargetLoweringObjectFileELF::Initialize(Ctx, TM);
-
- SmallDataSection =
- getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
- ELF::SHF_WRITE |ELF::SHF_ALLOC,
- SectionKind::getDataRel());
-
- SmallBSSSection =
- getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
- ELF::SHF_WRITE |ELF::SHF_ALLOC,
- SectionKind::getBSS());
-
-}
-
-// A address must be loaded from a small section if its size is less than the
-// small section size threshold. Data in this section must be addressed using
-// gp_rel operator.
-static bool IsInSmallSection(uint64_t Size) {
- return Size > 0 && Size <= 8;
-}
-
-bool MBlazeTargetObjectFile::
-IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM) const {
- if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
- return false;
-
- return IsGlobalInSmallSection(GV, TM, getKindForGlobal(GV, TM));
-}
-
-/// IsGlobalInSmallSection - Return true if this global address should be
-/// placed into small data/bss section.
-bool MBlazeTargetObjectFile::
-IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
- SectionKind Kind) const {
- // Only global variables, not functions.
- const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GV);
- if (!GVA)
- return false;
-
- // We can only do this for datarel or BSS objects for now.
- if (!Kind.isBSS() && !Kind.isDataRel())
- return false;
-
- // If this is a internal constant string, there is a special
- // section for it, but not in small data/bss.
- if (Kind.isMergeable1ByteCString())
- return false;
-
- Type *Ty = GV->getType()->getElementType();
- return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
-}
-
-const MCSection *MBlazeTargetObjectFile::
-SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
- // TODO: Could also support "weak" symbols as well with ".gnu.linkonce.s.*"
- // sections?
-
- // Handle Small Section classification here.
- if (Kind.isBSS() && IsGlobalInSmallSection(GV, TM, Kind))
- return SmallBSSSection;
- if (Kind.isDataNoRel() && IsGlobalInSmallSection(GV, TM, Kind))
- return SmallDataSection;
-
- // Otherwise, we work the same as ELF.
- return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang,TM);
-}
diff --git a/lib/Target/MBlaze/MBlazeTargetObjectFile.h b/lib/Target/MBlaze/MBlazeTargetObjectFile.h
deleted file mode 100644
index c313722427db..000000000000
--- a/lib/Target/MBlaze/MBlazeTargetObjectFile.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- llvm/Target/MBlazeTargetObjectFile.h - MBlaze Obj. Info -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_MBLAZE_TARGETOBJECTFILE_H
-#define LLVM_TARGET_MBLAZE_TARGETOBJECTFILE_H
-
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-
-namespace llvm {
-
- class MBlazeTargetObjectFile : public TargetLoweringObjectFileELF {
- const MCSection *SmallDataSection;
- const MCSection *SmallBSSSection;
- public:
-
- void Initialize(MCContext &Ctx, const TargetMachine &TM);
-
- /// IsGlobalInSmallSection - Return true if this global address should be
- /// placed into small data/bss section.
- bool IsGlobalInSmallSection(const GlobalValue *GV,
- const TargetMachine &TM,
- SectionKind Kind) const;
-
- bool IsGlobalInSmallSection(const GlobalValue *GV,
- const TargetMachine &TM) const;
-
- const MCSection *SelectSectionForGlobal(const GlobalValue *GV,
- SectionKind Kind,
- Mangler *Mang,
- const TargetMachine &TM) const;
- };
-} // end namespace llvm
-
-#endif
diff --git a/lib/Target/MBlaze/MCTargetDesc/CMakeLists.txt b/lib/Target/MBlaze/MCTargetDesc/CMakeLists.txt
deleted file mode 100644
index 36134a69387c..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-add_llvm_library(LLVMMBlazeDesc
- MBlazeAsmBackend.cpp
- MBlazeMCAsmInfo.cpp
- MBlazeMCCodeEmitter.cpp
- MBlazeMCTargetDesc.cpp
- MBlazeELFObjectWriter.cpp
- )
-
-add_dependencies(LLVMMBlazeDesc MBlazeCommonTableGen)
diff --git a/lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt b/lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt
deleted file mode 100644
index 4982f0f17218..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-;===- ./lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt -----------*- Conf -*--===;
-;
-; The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-; http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = MBlazeDesc
-parent = MBlaze
-required_libraries = MBlazeAsmPrinter MBlazeInfo MC Support
-add_to_library_groups = MBlaze
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp
deleted file mode 100644
index 6f9752c42951..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp
+++ /dev/null
@@ -1,171 +0,0 @@
-//===-- MBlazeAsmBackend.cpp - MBlaze Assembler Backend -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/MBlazeMCTargetDesc.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/MC/MCAsmLayout.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCELFSymbolFlags.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCValue.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-static unsigned getFixupKindSize(unsigned Kind) {
- switch (Kind) {
- default: llvm_unreachable("invalid fixup kind!");
- case FK_Data_1: return 1;
- case FK_PCRel_2:
- case FK_Data_2: return 2;
- case FK_PCRel_4:
- case FK_Data_4: return 4;
- case FK_Data_8: return 8;
- }
-}
-
-
-namespace {
-
-class MBlazeAsmBackend : public MCAsmBackend {
-public:
- MBlazeAsmBackend(const Target &T)
- : MCAsmBackend() {
- }
-
- unsigned getNumFixupKinds() const {
- return 2;
- }
-
- bool mayNeedRelaxation(const MCInst &Inst) const;
-
- bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const;
-
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
-
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
-
- unsigned getPointerSize() const {
- return 4;
- }
-};
-
-static unsigned getRelaxedOpcode(unsigned Op) {
- switch (Op) {
- default: return Op;
- case MBlaze::ADDIK: return MBlaze::ADDIK32;
- case MBlaze::ORI: return MBlaze::ORI32;
- case MBlaze::BRLID: return MBlaze::BRLID32;
- }
-}
-
-bool MBlazeAsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
- if (getRelaxedOpcode(Inst.getOpcode()) == Inst.getOpcode())
- return false;
-
- bool hasExprOrImm = false;
- for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
- hasExprOrImm |= Inst.getOperand(i).isExpr();
-
- return hasExprOrImm;
-}
-
-bool MBlazeAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const {
- // FIXME: Is this right? It's what the "generic" code was doing before,
- // but is X86 specific. Is it actually true for MBlaze also, or was it
- // just close enough to not be a big deal?
- //
- // Relax if the value is too big for a (signed) i8.
- return int64_t(Value) != int64_t(int8_t(Value));
-}
-
-void MBlazeAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
- Res = Inst;
- Res.setOpcode(getRelaxedOpcode(Inst.getOpcode()));
-}
-
-bool MBlazeAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
- if ((Count % 4) != 0)
- return false;
-
- for (uint64_t i = 0; i < Count; i += 4)
- OW->Write32(0x00000000);
-
- return true;
-}
-} // end anonymous namespace
-
-namespace {
-class ELFMBlazeAsmBackend : public MBlazeAsmBackend {
-public:
- uint8_t OSABI;
- ELFMBlazeAsmBackend(const Target &T, uint8_t _OSABI)
- : MBlazeAsmBackend(T), OSABI(_OSABI) { }
-
- void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const;
-
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createMBlazeELFObjectWriter(OS, OSABI);
- }
-};
-
-void ELFMBlazeAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
- unsigned DataSize, uint64_t Value) const {
- unsigned Size = getFixupKindSize(Fixup.getKind());
-
- assert(Fixup.getOffset() + Size <= DataSize &&
- "Invalid fixup offset!");
-
- char *data = Data + Fixup.getOffset();
- switch (Size) {
- default: llvm_unreachable("Cannot fixup unknown value.");
- case 1: llvm_unreachable("Cannot fixup 1 byte value.");
- case 8: llvm_unreachable("Cannot fixup 8 byte value.");
-
- case 4:
- *(data+7) = uint8_t(Value);
- *(data+6) = uint8_t(Value >> 8);
- *(data+3) = uint8_t(Value >> 16);
- *(data+2) = uint8_t(Value >> 24);
- break;
-
- case 2:
- *(data+3) = uint8_t(Value >> 0);
- *(data+2) = uint8_t(Value >> 8);
- }
-}
-} // end anonymous namespace
-
-MCAsmBackend *llvm::createMBlazeAsmBackend(const Target &T, StringRef TT,
- StringRef CPU) {
- Triple TheTriple(TT);
-
- if (TheTriple.isOSDarwin())
- assert(0 && "Mac not supported on MBlaze");
-
- if (TheTriple.isOSWindows())
- assert(0 && "Windows not supported on MBlaze");
-
- uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
- return new ELFMBlazeAsmBackend(T, OSABI);
-}
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h b/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h
deleted file mode 100644
index 437026e7bbc0..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h
+++ /dev/null
@@ -1,237 +0,0 @@
-//===-- MBlazeBaseInfo.h - Top level definitions for MBlaze -- --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains small standalone helper functions and enum definitions for
-// the MBlaze target useful for the compiler back-end and the MC libraries.
-// As such, it deliberately does not include references to LLVM core
-// code gen types, passes, etc..
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBlazeBASEINFO_H
-#define MBlazeBASEINFO_H
-
-#include "MBlazeMCTargetDesc.h"
-#include "llvm/Support/ErrorHandling.h"
-
-namespace llvm {
-
-/// MBlazeII - This namespace holds all of the target specific flags that
-/// instruction info tracks.
-///
-namespace MBlazeII {
- enum {
- // PseudoFrm - This represents an instruction that is a pseudo instruction
- // or one that has not been implemented yet. It is illegal to code generate
- // it, but tolerated for intermediate implementation stages.
- FPseudo = 0,
- FRRR,
- FRRI,
- FCRR,
- FCRI,
- FRCR,
- FRCI,
- FCCR,
- FCCI,
- FRRCI,
- FRRC,
- FRCX,
- FRCS,
- FCRCS,
- FCRCX,
- FCX,
- FCR,
- FRIR,
- FRRRR,
- FRI,
- FC,
- FRR,
- FormMask = 63
-
- //===------------------------------------------------------------------===//
- // MBlaze Specific MachineOperand flags.
- // MO_NO_FLAG,
-
- /// MO_GOT - Represents the offset into the global offset table at which
- /// the address the relocation entry symbol resides during execution.
- // MO_GOT,
-
- /// MO_GOT_CALL - Represents the offset into the global offset table at
- /// which the address of a call site relocation entry symbol resides
- /// during execution. This is different from the above since this flag
- /// can only be present in call instructions.
- // MO_GOT_CALL,
-
- /// MO_GPREL - Represents the offset from the current gp value to be used
- /// for the relocatable object file being produced.
- // MO_GPREL,
-
- /// MO_ABS_HILO - Represents the hi or low part of an absolute symbol
- /// address.
- // MO_ABS_HILO
-
- };
-}
-
-static inline bool isMBlazeRegister(unsigned Reg) {
- return Reg <= 31;
-}
-
-static inline bool isSpecialMBlazeRegister(unsigned Reg) {
- switch (Reg) {
- case 0x0000 : case 0x0001 : case 0x0003 : case 0x0005 :
- case 0x0007 : case 0x000B : case 0x000D : case 0x1000 :
- case 0x1001 : case 0x1002 : case 0x1003 : case 0x1004 :
- case 0x2000 : case 0x2001 : case 0x2002 : case 0x2003 :
- case 0x2004 : case 0x2005 : case 0x2006 : case 0x2007 :
- case 0x2008 : case 0x2009 : case 0x200A : case 0x200B :
- return true;
-
- default:
- return false;
- }
-}
-
-/// getMBlazeRegisterNumbering - Given the enum value for some register, e.g.
-/// MBlaze::R0, return the number that it corresponds to (e.g. 0).
-static inline unsigned getMBlazeRegisterNumbering(unsigned RegEnum) {
- switch (RegEnum) {
- case MBlaze::R0 : return 0;
- case MBlaze::R1 : return 1;
- case MBlaze::R2 : return 2;
- case MBlaze::R3 : return 3;
- case MBlaze::R4 : return 4;
- case MBlaze::R5 : return 5;
- case MBlaze::R6 : return 6;
- case MBlaze::R7 : return 7;
- case MBlaze::R8 : return 8;
- case MBlaze::R9 : return 9;
- case MBlaze::R10 : return 10;
- case MBlaze::R11 : return 11;
- case MBlaze::R12 : return 12;
- case MBlaze::R13 : return 13;
- case MBlaze::R14 : return 14;
- case MBlaze::R15 : return 15;
- case MBlaze::R16 : return 16;
- case MBlaze::R17 : return 17;
- case MBlaze::R18 : return 18;
- case MBlaze::R19 : return 19;
- case MBlaze::R20 : return 20;
- case MBlaze::R21 : return 21;
- case MBlaze::R22 : return 22;
- case MBlaze::R23 : return 23;
- case MBlaze::R24 : return 24;
- case MBlaze::R25 : return 25;
- case MBlaze::R26 : return 26;
- case MBlaze::R27 : return 27;
- case MBlaze::R28 : return 28;
- case MBlaze::R29 : return 29;
- case MBlaze::R30 : return 30;
- case MBlaze::R31 : return 31;
- case MBlaze::RPC : return 0x0000;
- case MBlaze::RMSR : return 0x0001;
- case MBlaze::REAR : return 0x0003;
- case MBlaze::RESR : return 0x0005;
- case MBlaze::RFSR : return 0x0007;
- case MBlaze::RBTR : return 0x000B;
- case MBlaze::REDR : return 0x000D;
- case MBlaze::RPID : return 0x1000;
- case MBlaze::RZPR : return 0x1001;
- case MBlaze::RTLBX : return 0x1002;
- case MBlaze::RTLBLO : return 0x1003;
- case MBlaze::RTLBHI : return 0x1004;
- case MBlaze::RPVR0 : return 0x2000;
- case MBlaze::RPVR1 : return 0x2001;
- case MBlaze::RPVR2 : return 0x2002;
- case MBlaze::RPVR3 : return 0x2003;
- case MBlaze::RPVR4 : return 0x2004;
- case MBlaze::RPVR5 : return 0x2005;
- case MBlaze::RPVR6 : return 0x2006;
- case MBlaze::RPVR7 : return 0x2007;
- case MBlaze::RPVR8 : return 0x2008;
- case MBlaze::RPVR9 : return 0x2009;
- case MBlaze::RPVR10 : return 0x200A;
- case MBlaze::RPVR11 : return 0x200B;
- default: llvm_unreachable("Unknown register number!");
- }
-}
-
-/// getRegisterFromNumbering - Given the enum value for some register, e.g.
-/// MBlaze::R0, return the number that it corresponds to (e.g. 0).
-static inline unsigned getMBlazeRegisterFromNumbering(unsigned Reg) {
- switch (Reg) {
- case 0 : return MBlaze::R0;
- case 1 : return MBlaze::R1;
- case 2 : return MBlaze::R2;
- case 3 : return MBlaze::R3;
- case 4 : return MBlaze::R4;
- case 5 : return MBlaze::R5;
- case 6 : return MBlaze::R6;
- case 7 : return MBlaze::R7;
- case 8 : return MBlaze::R8;
- case 9 : return MBlaze::R9;
- case 10 : return MBlaze::R10;
- case 11 : return MBlaze::R11;
- case 12 : return MBlaze::R12;
- case 13 : return MBlaze::R13;
- case 14 : return MBlaze::R14;
- case 15 : return MBlaze::R15;
- case 16 : return MBlaze::R16;
- case 17 : return MBlaze::R17;
- case 18 : return MBlaze::R18;
- case 19 : return MBlaze::R19;
- case 20 : return MBlaze::R20;
- case 21 : return MBlaze::R21;
- case 22 : return MBlaze::R22;
- case 23 : return MBlaze::R23;
- case 24 : return MBlaze::R24;
- case 25 : return MBlaze::R25;
- case 26 : return MBlaze::R26;
- case 27 : return MBlaze::R27;
- case 28 : return MBlaze::R28;
- case 29 : return MBlaze::R29;
- case 30 : return MBlaze::R30;
- case 31 : return MBlaze::R31;
- default: llvm_unreachable("Unknown register number!");
- }
-}
-
-static inline unsigned getSpecialMBlazeRegisterFromNumbering(unsigned Reg) {
- switch (Reg) {
- case 0x0000 : return MBlaze::RPC;
- case 0x0001 : return MBlaze::RMSR;
- case 0x0003 : return MBlaze::REAR;
- case 0x0005 : return MBlaze::RESR;
- case 0x0007 : return MBlaze::RFSR;
- case 0x000B : return MBlaze::RBTR;
- case 0x000D : return MBlaze::REDR;
- case 0x1000 : return MBlaze::RPID;
- case 0x1001 : return MBlaze::RZPR;
- case 0x1002 : return MBlaze::RTLBX;
- case 0x1003 : return MBlaze::RTLBLO;
- case 0x1004 : return MBlaze::RTLBHI;
- case 0x2000 : return MBlaze::RPVR0;
- case 0x2001 : return MBlaze::RPVR1;
- case 0x2002 : return MBlaze::RPVR2;
- case 0x2003 : return MBlaze::RPVR3;
- case 0x2004 : return MBlaze::RPVR4;
- case 0x2005 : return MBlaze::RPVR5;
- case 0x2006 : return MBlaze::RPVR6;
- case 0x2007 : return MBlaze::RPVR7;
- case 0x2008 : return MBlaze::RPVR8;
- case 0x2009 : return MBlaze::RPVR9;
- case 0x200A : return MBlaze::RPVR10;
- case 0x200B : return MBlaze::RPVR11;
- default: llvm_unreachable("Unknown register number!");
- }
-}
-
-} // end namespace llvm;
-
-#endif
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp
deleted file mode 100644
index 2824b3c35cf1..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-//===-- MBlazeELFObjectWriter.cpp - MBlaze ELF Writer ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/MBlazeMCTargetDesc.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCFixup.h"
-#include "llvm/Support/ErrorHandling.h"
-
-using namespace llvm;
-
-namespace {
- class MBlazeELFObjectWriter : public MCELFObjectTargetWriter {
- public:
- MBlazeELFObjectWriter(uint8_t OSABI);
-
- virtual ~MBlazeELFObjectWriter();
- protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const;
- };
-}
-
-MBlazeELFObjectWriter::MBlazeELFObjectWriter(uint8_t OSABI)
- : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_MBLAZE,
- /*HasRelocationAddend*/ false) {}
-
-MBlazeELFObjectWriter::~MBlazeELFObjectWriter() {
-}
-
-unsigned MBlazeELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
- // determine the type of the relocation
- unsigned Type;
- if (IsPCRel) {
- switch ((unsigned)Fixup.getKind()) {
- default:
- llvm_unreachable("Unimplemented");
- case FK_PCRel_4:
- Type = ELF::R_MICROBLAZE_64_PCREL;
- break;
- case FK_PCRel_2:
- Type = ELF::R_MICROBLAZE_32_PCREL;
- break;
- }
- } else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
- case FK_Data_4:
- Type = ((IsRelocWithSymbol || Addend !=0)
- ? ELF::R_MICROBLAZE_32
- : ELF::R_MICROBLAZE_64);
- break;
- case FK_Data_2:
- Type = ELF::R_MICROBLAZE_32;
- break;
- }
- }
- return Type;
-}
-
-
-
-MCObjectWriter *llvm::createMBlazeELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI) {
- MCELFObjectTargetWriter *MOTW = new MBlazeELFObjectWriter(OSABI);
- return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/ false);
-}
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp
deleted file mode 100644
index 8231f07dfa80..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-//===-- MBlazeMCAsmInfo.cpp - MBlaze asm properties -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the MBlazeMCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeMCAsmInfo.h"
-using namespace llvm;
-
-void MBlazeMCAsmInfo::anchor() { }
-
-MBlazeMCAsmInfo::MBlazeMCAsmInfo() {
- IsLittleEndian = false;
- StackGrowsUp = false;
- SupportsDebugInformation = true;
- AlignmentIsInBytes = false;
- PrivateGlobalPrefix = "$";
- GPRel32Directive = "\t.gpword\t";
-}
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h
deleted file mode 100644
index 977f9a6866d7..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//===-- MBlazeMCAsmInfo.h - MBlaze asm properties --------------*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the MBlazeMCAsmInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZETARGETASMINFO_H
-#define MBLAZETARGETASMINFO_H
-
-#include "llvm/MC/MCAsmInfo.h"
-
-namespace llvm {
- class Target;
-
- class MBlazeMCAsmInfo : public MCAsmInfo {
- virtual void anchor();
- public:
- explicit MBlazeMCAsmInfo();
- };
-
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
deleted file mode 100644
index 8faff6ade441..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
+++ /dev/null
@@ -1,222 +0,0 @@
-//===-- MBlazeMCCodeEmitter.cpp - Convert MBlaze code to machine code -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the MBlazeMCCodeEmitter class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mccodeemitter"
-#include "MCTargetDesc/MBlazeMCTargetDesc.h"
-#include "MCTargetDesc/MBlazeBaseInfo.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCFixup.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
-
-namespace {
-class MBlazeMCCodeEmitter : public MCCodeEmitter {
- MBlazeMCCodeEmitter(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- const MCInstrInfo &MCII;
-
-public:
- MBlazeMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx)
- : MCII(mcii) {
- }
-
- ~MBlazeMCCodeEmitter() {}
-
- // getBinaryCodeForInstr - TableGen'erated function for getting the
- // binary encoding for an instruction.
- uint64_t getBinaryCodeForInstr(const MCInst &MI) const;
-
- /// getMachineOpValue - Return binary encoding of operand. If the machine
- /// operand requires relocation, record the relocation and return zero.
- unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO) const;
- unsigned getMachineOpValue(const MCInst &MI, unsigned OpIdx) const {
- return getMachineOpValue(MI, MI.getOperand(OpIdx));
- }
-
- static unsigned GetMBlazeRegNum(const MCOperand &MO) {
- // FIXME: getMBlazeRegisterNumbering() is sufficient?
- llvm_unreachable("MBlazeMCCodeEmitter::GetMBlazeRegNum() not yet "
- "implemented.");
- }
-
- void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
- // The MicroBlaze uses a bit reversed format so we need to reverse the
- // order of the bits. Taken from:
- // http://graphics.stanford.edu/~seander/bithacks.html
- C = ((C * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32;
-
- OS << (char)C;
- ++CurByte;
- }
-
- void EmitRawByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
- OS << (char)C;
- ++CurByte;
- }
-
- void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
- raw_ostream &OS) const {
- assert(Size <= 8 && "size too big in emit constant");
-
- for (unsigned i = 0; i != Size; ++i) {
- EmitByte(Val & 255, CurByte, OS);
- Val >>= 8;
- }
- }
-
- void EmitIMM(const MCOperand &imm, unsigned &CurByte, raw_ostream &OS) const;
- void EmitIMM(const MCInst &MI, unsigned &CurByte, raw_ostream &OS) const;
-
- void EmitImmediate(const MCInst &MI, unsigned opNo, bool pcrel,
- unsigned &CurByte, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
-};
-
-} // end anonymous namespace
-
-
-MCCodeEmitter *llvm::createMBlazeMCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx) {
- return new MBlazeMCCodeEmitter(MCII, STI, Ctx);
-}
-
-/// getMachineOpValue - Return binary encoding of operand. If the machine
-/// operand requires relocation, record the relocation and return zero.
-unsigned MBlazeMCCodeEmitter::getMachineOpValue(const MCInst &MI,
- const MCOperand &MO) const {
- if (MO.isReg())
- return getMBlazeRegisterNumbering(MO.getReg());
- if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
- if (MO.isExpr())
- return 0; // The relocation has already been recorded at this point.
-#ifndef NDEBUG
- errs() << MO;
-#endif
- llvm_unreachable(0);
-}
-
-void MBlazeMCCodeEmitter::
-EmitIMM(const MCOperand &imm, unsigned &CurByte, raw_ostream &OS) const {
- int32_t val = (int32_t)imm.getImm();
- if (val > 32767 || val < -32768) {
- EmitByte(0x0D, CurByte, OS);
- EmitByte(0x00, CurByte, OS);
- EmitRawByte((val >> 24) & 0xFF, CurByte, OS);
- EmitRawByte((val >> 16) & 0xFF, CurByte, OS);
- }
-}
-
-void MBlazeMCCodeEmitter::
-EmitIMM(const MCInst &MI, unsigned &CurByte,raw_ostream &OS) const {
- switch (MI.getOpcode()) {
- default: break;
-
- case MBlaze::ADDIK32:
- case MBlaze::ORI32:
- case MBlaze::BRLID32:
- EmitByte(0x0D, CurByte, OS);
- EmitByte(0x00, CurByte, OS);
- EmitRawByte(0, CurByte, OS);
- EmitRawByte(0, CurByte, OS);
- }
-}
-
-void MBlazeMCCodeEmitter::
-EmitImmediate(const MCInst &MI, unsigned opNo, bool pcrel, unsigned &CurByte,
- raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const {
- assert(MI.getNumOperands()>opNo && "Not enought operands for instruction");
-
- MCOperand oper = MI.getOperand(opNo);
-
- if (oper.isImm()) {
- EmitIMM(oper, CurByte, OS);
- } else if (oper.isExpr()) {
- MCFixupKind FixupKind;
- switch (MI.getOpcode()) {
- default:
- FixupKind = pcrel ? FK_PCRel_2 : FK_Data_2;
- Fixups.push_back(MCFixup::Create(0,oper.getExpr(),FixupKind));
- break;
- case MBlaze::ORI32:
- case MBlaze::ADDIK32:
- case MBlaze::BRLID32:
- FixupKind = pcrel ? FK_PCRel_4 : FK_Data_4;
- Fixups.push_back(MCFixup::Create(0,oper.getExpr(),FixupKind));
- break;
- }
- }
-}
-
-
-
-void MBlazeMCCodeEmitter::
-EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
- unsigned Opcode = MI.getOpcode();
- const MCInstrDesc &Desc = MCII.get(Opcode);
- uint64_t TSFlags = Desc.TSFlags;
- // Keep track of the current byte being emitted.
- unsigned CurByte = 0;
-
- // Emit an IMM instruction if the instruction we are encoding requires it
- EmitIMM(MI,CurByte,OS);
-
- switch ((TSFlags & MBlazeII::FormMask)) {
- default: break;
- case MBlazeII::FPseudo:
- // Pseudo instructions don't get encoded.
- return;
- case MBlazeII::FRRI:
- EmitImmediate(MI, 2, false, CurByte, OS, Fixups);
- break;
- case MBlazeII::FRIR:
- EmitImmediate(MI, 1, false, CurByte, OS, Fixups);
- break;
- case MBlazeII::FCRI:
- EmitImmediate(MI, 1, true, CurByte, OS, Fixups);
- break;
- case MBlazeII::FRCI:
- EmitImmediate(MI, 1, true, CurByte, OS, Fixups);
- case MBlazeII::FCCI:
- EmitImmediate(MI, 0, true, CurByte, OS, Fixups);
- break;
- }
-
- ++MCNumEmitted; // Keep track of the # of mi's emitted
- unsigned Value = getBinaryCodeForInstr(MI);
- EmitConstant(Value, 4, CurByte, OS);
-}
-
-// FIXME: These #defines shouldn't be necessary. Instead, tblgen should
-// be able to generate code emitter helpers for either variant, like it
-// does for the AsmWriter.
-#define MBlazeCodeEmitter MBlazeMCCodeEmitter
-#define MachineInstr MCInst
-#include "MBlazeGenCodeEmitter.inc"
-#undef MBlazeCodeEmitter
-#undef MachineInstr
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
deleted file mode 100644
index 380750d50f4c..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- MBlazeMCTargetDesc.cpp - MBlaze Target Descriptions ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides MBlaze specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeMCTargetDesc.h"
-#include "InstPrinter/MBlazeInstPrinter.h"
-#include "MBlazeMCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_MC_DESC
-#include "MBlazeGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "MBlazeGenSubtargetInfo.inc"
-
-#define GET_REGINFO_MC_DESC
-#include "MBlazeGenRegisterInfo.inc"
-
-using namespace llvm;
-
-
-static MCInstrInfo *createMBlazeMCInstrInfo() {
- MCInstrInfo *X = new MCInstrInfo();
- InitMBlazeMCInstrInfo(X);
- return X;
-}
-
-static MCRegisterInfo *createMBlazeMCRegisterInfo(StringRef TT) {
- MCRegisterInfo *X = new MCRegisterInfo();
- InitMBlazeMCRegisterInfo(X, MBlaze::R15);
- return X;
-}
-
-static MCSubtargetInfo *createMBlazeMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
- MCSubtargetInfo *X = new MCSubtargetInfo();
- InitMBlazeMCSubtargetInfo(X, TT, CPU, FS);
- return X;
-}
-
-static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
- Triple TheTriple(TT);
- switch (TheTriple.getOS()) {
- default:
- return new MBlazeMCAsmInfo();
- }
-}
-
-static MCCodeGenInfo *createMBlazeMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default)
- RM = Reloc::Static;
- if (CM == CodeModel::Default)
- CM = CodeModel::Small;
- X->InitMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
-static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
- MCContext &Ctx, MCAsmBackend &MAB,
- raw_ostream &_OS,
- MCCodeEmitter *_Emitter,
- bool RelaxAll,
- bool NoExecStack) {
- Triple TheTriple(TT);
-
- if (TheTriple.isOSDarwin()) {
- llvm_unreachable("MBlaze does not support Darwin MACH-O format");
- }
-
- if (TheTriple.isOSWindows()) {
- llvm_unreachable("MBlaze does not support Windows COFF format");
- }
-
- return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
-}
-
-static MCInstPrinter *createMBlazeMCInstPrinter(const Target &T,
- unsigned SyntaxVariant,
- const MCAsmInfo &MAI,
- const MCInstrInfo &MII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI) {
- if (SyntaxVariant == 0)
- return new MBlazeInstPrinter(MAI, MII, MRI);
- return 0;
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeMBlazeTargetMC() {
- // Register the MC asm info.
- RegisterMCAsmInfoFn X(TheMBlazeTarget, createMCAsmInfo);
-
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheMBlazeTarget,
- createMBlazeMCCodeGenInfo);
-
- // Register the MC instruction info.
- TargetRegistry::RegisterMCInstrInfo(TheMBlazeTarget, createMBlazeMCInstrInfo);
-
- // Register the MC register info.
- TargetRegistry::RegisterMCRegInfo(TheMBlazeTarget,
- createMBlazeMCRegisterInfo);
-
- // Register the MC subtarget info.
- TargetRegistry::RegisterMCSubtargetInfo(TheMBlazeTarget,
- createMBlazeMCSubtargetInfo);
-
- // Register the MC code emitter
- TargetRegistry::RegisterMCCodeEmitter(TheMBlazeTarget,
- llvm::createMBlazeMCCodeEmitter);
-
- // Register the asm backend
- TargetRegistry::RegisterMCAsmBackend(TheMBlazeTarget,
- createMBlazeAsmBackend);
-
- // Register the object streamer
- TargetRegistry::RegisterMCObjectStreamer(TheMBlazeTarget,
- createMCStreamer);
-
- // Register the MCInstPrinter.
- TargetRegistry::RegisterMCInstPrinter(TheMBlazeTarget,
- createMBlazeMCInstPrinter);
-}
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
deleted file mode 100644
index 7bc7d8f20724..000000000000
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- MBlazeMCTargetDesc.h - MBlaze Target Descriptions -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides MBlaze specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZEMCTARGETDESC_H
-#define MBLAZEMCTARGETDESC_H
-
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-class MCAsmBackend;
-class MCContext;
-class MCCodeEmitter;
-class MCInstrInfo;
-class MCObjectWriter;
-class MCRegisterInfo;
-class MCSubtargetInfo;
-class Target;
-class StringRef;
-class raw_ostream;
-
-extern Target TheMBlazeTarget;
-
-MCCodeEmitter *createMBlazeMCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx);
-
-MCAsmBackend *createMBlazeAsmBackend(const Target &T, StringRef TT,
- StringRef CPU);
-
-MCObjectWriter *createMBlazeELFObjectWriter(raw_ostream &OS, uint8_t OSABI);
-} // End llvm namespace
-
-// Defines symbolic names for MBlaze registers. This defines a mapping from
-// register name to register number.
-#define GET_REGINFO_ENUM
-#include "MBlazeGenRegisterInfo.inc"
-
-// Defines symbolic names for the MBlaze instructions.
-#define GET_INSTRINFO_ENUM
-#include "MBlazeGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_ENUM
-#include "MBlazeGenSubtargetInfo.inc"
-
-#endif
diff --git a/lib/Target/MBlaze/Makefile b/lib/Target/MBlaze/Makefile
deleted file mode 100644
index 512ce9a08103..000000000000
--- a/lib/Target/MBlaze/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/MBlaze/Makefile --------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../..
-LIBRARYNAME = LLVMMBlazeCodeGen
-TARGET = MBlaze
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = MBlazeGenRegisterInfo.inc MBlazeGenInstrInfo.inc \
- MBlazeGenAsmWriter.inc \
- MBlazeGenDAGISel.inc MBlazeGenAsmMatcher.inc \
- MBlazeGenCodeEmitter.inc MBlazeGenCallingConv.inc \
- MBlazeGenSubtargetInfo.inc MBlazeGenIntrinsics.inc
-
-DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
-
diff --git a/lib/Target/MBlaze/TODO b/lib/Target/MBlaze/TODO
deleted file mode 100644
index 317d7c0a0b14..000000000000
--- a/lib/Target/MBlaze/TODO
+++ /dev/null
@@ -1,21 +0,0 @@
-* Writing out ELF files is close to working but the following needs to
- be examined more closely:
- - Relocations use 2-byte / 4-byte to terminology in reference to
- the size of the immediate value being changed. The Xilinx
- terminology seems to be (???) 4-byte / 8-byte in reference
- to the number of bytes of instructions that are being changed.
-
-* Code generation seems to work relatively well now but the following
- needs to be examined more closely:
- - The stack layout needs to be examined to make sure it meets
- the standard, especially in regards to var arg functions.
- - Look at the MBlazeGenFastISel.inc stuff and make use of it
- if appropriate.
-
-* A basic assembly parser is present now and seems to parse most things.
- There are a few things that need to be looked at:
- - There are some instructions that are not generated by the backend
- and have not been tested as far as the parser is concerned.
- - The assembly parser does not use many MicroBlaze specific directives.
- I should investigate if there are MicroBlaze specific directive and,
- if there are, add them.
diff --git a/lib/Target/MBlaze/TargetInfo/CMakeLists.txt b/lib/Target/MBlaze/TargetInfo/CMakeLists.txt
deleted file mode 100644
index b554d9b15e45..000000000000
--- a/lib/Target/MBlaze/TargetInfo/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/..
- ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMMBlazeInfo
- MBlazeTargetInfo.cpp
- )
-
-add_dependencies(LLVMMBlazeInfo MBlazeCommonTableGen)
diff --git a/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt b/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt
deleted file mode 100644
index ba7ee5d69188..000000000000
--- a/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-;===- ./lib/Target/MBlaze/TargetInfo/LLVMBuild.txt -------------*- Conf -*--===;
-;
-; The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-; http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = MBlazeInfo
-parent = MBlaze
-required_libraries = MC Support Target
-add_to_library_groups = MBlaze
diff --git a/lib/Target/MBlaze/TargetInfo/MBlazeTargetInfo.cpp b/lib/Target/MBlaze/TargetInfo/MBlazeTargetInfo.cpp
deleted file mode 100644
index 323a7f647d56..000000000000
--- a/lib/Target/MBlaze/TargetInfo/MBlazeTargetInfo.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//===-- MBlazeTargetInfo.cpp - MBlaze Target Implementation ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlaze.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-Target llvm::TheMBlazeTarget;
-
-extern "C" void LLVMInitializeMBlazeTargetInfo() {
- RegisterTarget<Triple::mblaze> X(TheMBlazeTarget, "mblaze", "MBlaze");
-}
diff --git a/lib/Target/MBlaze/TargetInfo/Makefile b/lib/Target/MBlaze/TargetInfo/Makefile
deleted file mode 100644
index fb7ea118a6b5..000000000000
--- a/lib/Target/MBlaze/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/MBlaze/TargetInfo/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMBlazeInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/MSP430/CMakeLists.txt b/lib/Target/MSP430/CMakeLists.txt
index f9ecaed83a6f..c9b3c3d0c8e3 100644
--- a/lib/Target/MSP430/CMakeLists.txt
+++ b/lib/Target/MSP430/CMakeLists.txt
@@ -23,7 +23,7 @@ add_llvm_target(MSP430CodeGen
MSP430MCInstLower.cpp
)
-add_dependencies(LLVMMSP430CodeGen intrinsics_gen)
+add_dependencies(LLVMMSP430CodeGen MSP430CommonTableGen intrinsics_gen)
add_subdirectory(InstPrinter)
add_subdirectory(TargetInfo)
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
index 3c9576056946..acf2ab869d59 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
@@ -17,15 +17,12 @@ using namespace llvm;
void MSP430MCAsmInfo::anchor() { }
-MSP430MCAsmInfo::MSP430MCAsmInfo(const Target &T, StringRef TT) {
+MSP430MCAsmInfo::MSP430MCAsmInfo(StringRef TT) {
PointerSize = CalleeSaveStackSlotSize = 2;
PrivateGlobalPrefix = ".L";
- WeakRefDirective ="\t.weak\t";
- PCSymbol=".";
CommentString = ";";
AlignmentIsInBytes = false;
- AllowNameToStartWithDigit = true;
UsesELFSectionDirectiveForBSS = true;
}
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
index e5c2fc283b17..a7e0e5894184 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
@@ -14,16 +14,15 @@
#ifndef MSP430TARGETASMINFO_H
#define MSP430TARGETASMINFO_H
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class StringRef;
- class Target;
- class MSP430MCAsmInfo : public MCAsmInfo {
+ class MSP430MCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
- explicit MSP430MCAsmInfo(const Target &T, StringRef TT);
+ explicit MSP430MCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/MSP430/MSP430AsmPrinter.cpp b/lib/Target/MSP430/MSP430AsmPrinter.cpp
index 0a04e5ddb75d..18311c3f5522 100644
--- a/lib/Target/MSP430/MSP430AsmPrinter.cpp
+++ b/lib/Target/MSP430/MSP430AsmPrinter.cpp
@@ -92,7 +92,7 @@ void MSP430AsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
if (Offset)
O << '(' << Offset << '+';
- O << *Mang->getSymbol(MO.getGlobal());
+ O << *getSymbol(MO.getGlobal());
if (Offset)
O << ')';
diff --git a/lib/Target/MSP430/MSP430CallingConv.td b/lib/Target/MSP430/MSP430CallingConv.td
index b448cc4ed9b8..8a69d1ecb287 100644
--- a/lib/Target/MSP430/MSP430CallingConv.td
+++ b/lib/Target/MSP430/MSP430CallingConv.td
@@ -23,18 +23,15 @@ def RetCC_MSP430 : CallingConv<[
//===----------------------------------------------------------------------===//
// MSP430 Argument Calling Conventions
//===----------------------------------------------------------------------===//
-def CC_MSP430 : CallingConv<[
+def CC_MSP430_AssignStack : CallingConv<[
// Pass by value if the byval attribute is given
CCIfByVal<CCPassByVal<2, 2>>,
// Promote i8 arguments to i16.
CCIfType<[i8], CCPromoteToType<i16>>,
- // The first 4 integer arguments of non-varargs functions are passed in
- // integer registers.
- CCIfNotVarArg<CCIfType<[i16], CCAssignToReg<[R15W, R14W, R13W, R12W]>>>,
-
// Integer values get stored in stack slots that are 2 bytes in
// size and 2-byte aligned.
CCIfType<[i16], CCAssignToStack<2, 2>>
]>;
+
diff --git a/lib/Target/MSP430/MSP430FrameLowering.h b/lib/Target/MSP430/MSP430FrameLowering.h
index c673f59b5efc..8370714a594b 100644
--- a/lib/Target/MSP430/MSP430FrameLowering.h
+++ b/lib/Target/MSP430/MSP430FrameLowering.h
@@ -27,8 +27,8 @@ protected:
public:
explicit MSP430FrameLowering(const MSP430Subtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 2, -2), STI(sti) {
- }
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 2, -2, 2),
+ STI(sti) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 1566c096037e..4152829b60da 100644
--- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -259,11 +259,12 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue N,
}
Base = (AM.BaseType == MSP430ISelAddressMode::FrameIndexBase) ?
- CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
+ CurDAG->getTargetFrameIndex(AM.Base.FrameIndex,
+ getTargetLowering()->getPointerTy()) :
AM.Base.Reg;
if (AM.GV)
- Disp = CurDAG->getTargetGlobalAddress(AM.GV, N->getDebugLoc(),
+ Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(N),
MVT::i16, AM.Disp,
0/*AM.SymbolFlags*/);
else if (AM.CP)
@@ -345,7 +346,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) {
return NULL;
}
- return CurDAG->getMachineNode(Opcode, N->getDebugLoc(),
+ return CurDAG->getMachineNode(Opcode, SDLoc(N),
VT, MVT::i16, MVT::Other,
LD->getBasePtr(), LD->getChain());
}
@@ -382,7 +383,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op,
SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
// Dump information about the Node being selected
DEBUG(errs() << "Selecting: ");
@@ -394,6 +395,7 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
DEBUG(errs() << "== ";
Node->dump(CurDAG);
errs() << "\n");
+ Node->setNodeId(-1);
return NULL;
}
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 09cdf3268553..745cdf5bc792 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -45,7 +45,7 @@ typedef enum {
} HWMultUseMode;
static cl::opt<HWMultUseMode>
-HWMultMode("msp430-hwmult-mode",
+HWMultMode("msp430-hwmult-mode", cl::Hidden,
cl::desc("Hardware multiplier use mode"),
cl::init(HWMultNoIntr),
cl::values(
@@ -169,6 +169,7 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
setOperationAction(ISD::VAARG, MVT::Other, Expand);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::JumpTable, MVT::i16, Custom);
// Libcalls names.
if (HWMultMode == HWMultIntr) {
@@ -199,6 +200,7 @@ SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
+ case ISD::JumpTable: return LowerJumpTable(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
}
@@ -226,7 +228,7 @@ MSP430TargetLowering::getConstraintType(const std::string &Constraint) const {
std::pair<unsigned, const TargetRegisterClass*>
MSP430TargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
@@ -248,13 +250,130 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
#include "MSP430GenCallingConv.inc"
+/// For each argument in a function store the number of pieces it is composed
+/// of.
+template<typename ArgT>
+static void ParseFunctionArgs(const SmallVectorImpl<ArgT> &Args,
+ SmallVectorImpl<unsigned> &Out) {
+ unsigned CurrentArgIndex = ~0U;
+ for (unsigned i = 0, e = Args.size(); i != e; i++) {
+ if (CurrentArgIndex == Args[i].OrigArgIndex) {
+ Out.back()++;
+ } else {
+ Out.push_back(1);
+ CurrentArgIndex++;
+ }
+ }
+}
+
+static void AnalyzeVarArgs(CCState &State,
+ const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ State.AnalyzeCallOperands(Outs, CC_MSP430_AssignStack);
+}
+
+static void AnalyzeVarArgs(CCState &State,
+ const SmallVectorImpl<ISD::InputArg> &Ins) {
+ State.AnalyzeFormalArguments(Ins, CC_MSP430_AssignStack);
+}
+
+/// Analyze incoming and outgoing function arguments. We need custom C++ code
+/// to handle special constraints in the ABI like reversing the order of the
+/// pieces of splitted arguments. In addition, all pieces of a certain argument
+/// have to be passed either using registers or the stack but never mixing both.
+template<typename ArgT>
+static void AnalyzeArguments(CCState &State,
+ SmallVectorImpl<CCValAssign> &ArgLocs,
+ const SmallVectorImpl<ArgT> &Args) {
+ static const uint16_t RegList[] = {
+ MSP430::R15W, MSP430::R14W, MSP430::R13W, MSP430::R12W
+ };
+ static const unsigned NbRegs = array_lengthof(RegList);
+
+ if (State.isVarArg()) {
+ AnalyzeVarArgs(State, Args);
+ return;
+ }
+
+ SmallVector<unsigned, 4> ArgsParts;
+ ParseFunctionArgs(Args, ArgsParts);
+
+ unsigned RegsLeft = NbRegs;
+ bool UseStack = false;
+ unsigned ValNo = 0;
+
+ for (unsigned i = 0, e = ArgsParts.size(); i != e; i++) {
+ MVT ArgVT = Args[ValNo].VT;
+ ISD::ArgFlagsTy ArgFlags = Args[ValNo].Flags;
+ MVT LocVT = ArgVT;
+ CCValAssign::LocInfo LocInfo = CCValAssign::Full;
+
+ // Promote i8 to i16
+ if (LocVT == MVT::i8) {
+ LocVT = MVT::i16;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
+
+ // Handle byval arguments
+ if (ArgFlags.isByVal()) {
+ State.HandleByVal(ValNo++, ArgVT, LocVT, LocInfo, 2, 2, ArgFlags);
+ continue;
+ }
+
+ unsigned Parts = ArgsParts[i];
+
+ if (!UseStack && Parts <= RegsLeft) {
+ unsigned FirstVal = ValNo;
+ for (unsigned j = 0; j < Parts; j++) {
+ unsigned Reg = State.AllocateReg(RegList, NbRegs);
+ State.addLoc(CCValAssign::getReg(ValNo++, ArgVT, Reg, LocVT, LocInfo));
+ RegsLeft--;
+ }
+
+ // Reverse the order of the pieces to agree with the "big endian" format
+ // required in the calling convention ABI.
+ SmallVectorImpl<CCValAssign>::iterator B = ArgLocs.begin() + FirstVal;
+ std::reverse(B, B + Parts);
+ } else {
+ UseStack = true;
+ for (unsigned j = 0; j < Parts; j++)
+ CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, State);
+ }
+ }
+}
+
+static void AnalyzeRetResult(CCState &State,
+ const SmallVectorImpl<ISD::InputArg> &Ins) {
+ State.AnalyzeCallResult(Ins, RetCC_MSP430);
+}
+
+static void AnalyzeRetResult(CCState &State,
+ const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ State.AnalyzeReturn(Outs, RetCC_MSP430);
+}
+
+template<typename ArgT>
+static void AnalyzeReturnValues(CCState &State,
+ SmallVectorImpl<CCValAssign> &RVLocs,
+ const SmallVectorImpl<ArgT> &Args) {
+ AnalyzeRetResult(State, Args);
+
+ // Reverse splitted return values to get the "big endian" format required
+ // to agree with the calling convention ABI.
+ std::reverse(RVLocs.begin(), RVLocs.end());
+}
+
SDValue
MSP430TargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl,
+ SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
@@ -276,10 +395,10 @@ SDValue
MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -310,7 +429,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl,
+ SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
@@ -323,7 +442,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeFormalArguments(Ins, CC_MSP430);
+ AnalyzeArguments(CCInfo, ArgLocs, Ins);
// Create frame index for the start of the first vararg value
if (isVarArg) {
@@ -407,7 +526,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@@ -421,7 +540,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
getTargetMachine(), RVLocs, *DAG.getContext());
// Analize return values.
- CCInfo.AnalyzeReturn(Outs, RetCC_MSP430);
+ AnalyzeReturnValues(CCInfo, RVLocs, Outs);
SDValue Flag;
SmallVector<SDValue, 4> RetOps(1, Chain);
@@ -454,7 +573,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
/// LowerCCCCallTo - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
-/// TODO: sret.
+// TODO: sret.
SDValue
MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
@@ -463,20 +582,20 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
&Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
-
- CCInfo.AnalyzeCallOperands(Outs, CC_MSP430);
+ AnalyzeArguments(CCInfo, ArgLocs, Outs);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain ,DAG.getConstant(NumBytes,
- getPointerTy(), true));
+ getPointerTy(), true),
+ dl);
SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
SmallVector<SDValue, 12> MemOpChains;
@@ -583,7 +702,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getConstant(NumBytes, getPointerTy(), true),
DAG.getConstant(0, getPointerTy(), true),
- InFlag);
+ InFlag, dl);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
@@ -599,7 +718,7 @@ SDValue
MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
@@ -607,7 +726,7 @@ MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), RVLocs, *DAG.getContext());
- CCInfo.AnalyzeCallResult(Ins, RetCC_MSP430);
+ AnalyzeReturnValues(CCInfo, RVLocs, Ins);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
@@ -625,7 +744,7 @@ SDValue MSP430TargetLowering::LowerShifts(SDValue Op,
unsigned Opc = Op.getOpcode();
SDNode* N = Op.getNode();
EVT VT = Op.getValueType();
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
// Expand non-constant shifts to loops:
if (!isa<ConstantSDNode>(N->getOperand(1)))
@@ -669,15 +788,15 @@ SDValue MSP430TargetLowering::LowerGlobalAddress(SDValue Op,
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
// Create the TargetGlobalAddress node, folding in the constant offset.
- SDValue Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
+ SDValue Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
getPointerTy(), Offset);
- return DAG.getNode(MSP430ISD::Wrapper, Op.getDebugLoc(),
+ return DAG.getNode(MSP430ISD::Wrapper, SDLoc(Op),
getPointerTy(), Result);
}
SDValue MSP430TargetLowering::LowerExternalSymbol(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
@@ -686,7 +805,7 @@ SDValue MSP430TargetLowering::LowerExternalSymbol(SDValue Op,
SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy());
@@ -695,7 +814,7 @@ SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
ISD::CondCode CC,
- DebugLoc dl, SelectionDAG &DAG) {
+ SDLoc dl, SelectionDAG &DAG) {
// FIXME: Handle bittests someday
assert(!LHS.getValueType().isFloatingPoint() && "We don't handle FP yet");
@@ -782,7 +901,7 @@ SDValue MSP430TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(2);
SDValue RHS = Op.getOperand(3);
SDValue Dest = Op.getOperand(4);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl (Op);
SDValue TargetCC;
SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
@@ -794,7 +913,7 @@ SDValue MSP430TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl (Op);
// If we are doing an AND and testing against zero, then the CMP
// will not be generated. The AND (or BIT) will generate the condition codes,
@@ -878,7 +997,7 @@ SDValue MSP430TargetLowering::LowerSELECT_CC(SDValue Op,
SDValue TrueV = Op.getOperand(2);
SDValue FalseV = Op.getOperand(3);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl (Op);
SDValue TargetCC;
SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
@@ -897,7 +1016,7 @@ SDValue MSP430TargetLowering::LowerSIGN_EXTEND(SDValue Op,
SelectionDAG &DAG) const {
SDValue Val = Op.getOperand(0);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
assert(VT == MVT::i16 && "Only support i16 for now!");
@@ -929,7 +1048,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
MFI->setReturnAddressIsTaken(true);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
@@ -953,7 +1072,7 @@ SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
+ SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
MSP430::FPW, VT);
@@ -975,11 +1094,19 @@ SDValue MSP430TargetLowering::LowerVASTART(SDValue Op,
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
// Create a store of the frame index to the location operand
- return DAG.getStore(Op.getOperand(0), Op.getDebugLoc(), FrameIndex,
+ return DAG.getStore(Op.getOperand(0), SDLoc(Op), FrameIndex,
Op.getOperand(1), MachinePointerInfo(SV),
false, false, 0);
}
+SDValue MSP430TargetLowering::LowerJumpTable(SDValue Op,
+ SelectionDAG &DAG) const {
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+ SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy());
+ return DAG.getNode(MSP430ISD::Wrapper, SDLoc(JT),
+ getPointerTy(), Result);
+}
+
/// getPostIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if this node can be
/// combined with a load / store to form a post-indexed load / store.
diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h
index e0ed870f5653..85a861ebecbb 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/lib/Target/MSP430/MSP430ISelLowering.h
@@ -93,12 +93,13 @@ namespace llvm {
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
TargetLowering::ConstraintType
getConstraintType(const std::string &Constraint) const;
std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
/// isTruncateFree - Return true if it's free to truncate a value of type
/// Ty1 to type Ty2. e.g. On msp430 it's free to truncate a i16 value in
@@ -130,28 +131,28 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCCCArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
+ SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
@@ -162,7 +163,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Base,
diff --git a/lib/Target/MSP430/MSP430InstrInfo.cpp b/lib/Target/MSP430/MSP430InstrInfo.cpp
index a6b5f2f6d0bd..7a0b00ae366d 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -22,14 +22,17 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "MSP430GenInstrInfo.inc"
using namespace llvm;
+// Pin the vtable to this file.
+void MSP430InstrInfo::anchor() {}
+
MSP430InstrInfo::MSP430InstrInfo(MSP430TargetMachine &tm)
: MSP430GenInstrInfo(MSP430::ADJCALLSTACKDOWN, MSP430::ADJCALLSTACKUP),
- RI(tm, *this) {}
+ RI(tm) {}
void MSP430InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
diff --git a/lib/Target/MSP430/MSP430InstrInfo.h b/lib/Target/MSP430/MSP430InstrInfo.h
index d79f99245e71..ad2b8cc7cd45 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/lib/Target/MSP430/MSP430InstrInfo.h
@@ -42,6 +42,7 @@ namespace MSP430II {
class MSP430InstrInfo : public MSP430GenInstrInfo {
const MSP430RegisterInfo RI;
+ virtual void anchor();
public:
explicit MSP430InstrInfo(MSP430TargetMachine &TM);
diff --git a/lib/Target/MSP430/MSP430InstrInfo.td b/lib/Target/MSP430/MSP430InstrInfo.td
index e45780d05803..50e3fdad1a9f 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.td
+++ b/lib/Target/MSP430/MSP430InstrInfo.td
@@ -183,10 +183,10 @@ let isBarrier = 1 in {
"br\t$brdst",
[(brind tblockaddress:$brdst)]>;
def Br : I16rr<0, (outs), (ins GR16:$brdst),
- "mov.w\t{$brdst, pc}",
+ "br\t$brdst",
[(brind GR16:$brdst)]>;
def Bm : I16rm<0, (outs), (ins memsrc:$brdst),
- "mov.w\t{$brdst, pc}",
+ "br\t$brdst",
[(brind (load addr:$brdst))]>;
}
}
diff --git a/lib/Target/MSP430/MSP430MCInstLower.cpp b/lib/Target/MSP430/MSP430MCInstLower.cpp
index 043e5becadbb..52f9ee57e2be 100644
--- a/lib/Target/MSP430/MSP430MCInstLower.cpp
+++ b/lib/Target/MSP430/MSP430MCInstLower.cpp
@@ -33,7 +33,7 @@ GetGlobalAddressSymbol(const MachineOperand &MO) const {
case 0: break;
}
- return Printer.Mang->getSymbol(MO.getGlobal());
+ return Printer.getSymbol(MO.getGlobal());
}
MCSymbol *MSP430MCInstLower::
diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp
index 0b3e9e259649..1a5e31240efc 100644
--- a/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -32,9 +32,8 @@
using namespace llvm;
// FIXME: Provide proper call frame setup / destroy opcodes.
-MSP430RegisterInfo::MSP430RegisterInfo(MSP430TargetMachine &tm,
- const TargetInstrInfo &tii)
- : MSP430GenRegisterInfo(MSP430::PCW), TM(tm), TII(tii) {
+MSP430RegisterInfo::MSP430RegisterInfo(MSP430TargetMachine &tm)
+ : MSP430GenRegisterInfo(MSP430::PCW), TM(tm) {
StackAlign = TM.getFrameLowering()->getStackAlignment();
}
@@ -132,6 +131,7 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// This is actually "load effective address" of the stack slot
// instruction. We have only two-address instructions, thus we need to
// expand it into mov + add
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MI.setDesc(TII.get(MSP430::MOV16rr));
MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
diff --git a/lib/Target/MSP430/MSP430RegisterInfo.h b/lib/Target/MSP430/MSP430RegisterInfo.h
index 69cccb275259..78047cc6e6f0 100644
--- a/lib/Target/MSP430/MSP430RegisterInfo.h
+++ b/lib/Target/MSP430/MSP430RegisterInfo.h
@@ -27,13 +27,12 @@ class MSP430TargetMachine;
struct MSP430RegisterInfo : public MSP430GenRegisterInfo {
private:
MSP430TargetMachine &TM;
- const TargetInstrInfo &TII;
/// StackAlign - Default stack alignment.
///
unsigned StackAlign;
public:
- MSP430RegisterInfo(MSP430TargetMachine &tm, const TargetInstrInfo &tii);
+ MSP430RegisterInfo(MSP430TargetMachine &tm);
/// Code Generation virtual methods...
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
diff --git a/lib/Target/MSP430/MSP430RegisterInfo.td b/lib/Target/MSP430/MSP430RegisterInfo.td
index 07619d0675b8..4010781a8608 100644
--- a/lib/Target/MSP430/MSP430RegisterInfo.td
+++ b/lib/Target/MSP430/MSP430RegisterInfo.td
@@ -43,7 +43,7 @@ def R13B : MSP430Reg<13, "r13">;
def R14B : MSP430Reg<14, "r14">;
def R15B : MSP430Reg<15, "r15">;
-def subreg_8bit : SubRegIndex { let Namespace = "MSP430"; }
+def subreg_8bit : SubRegIndex<8> { let Namespace = "MSP430"; }
let SubRegIndices = [subreg_8bit] in {
def PCW : MSP430RegWithSubregs<0, "r0", [PCB]>;
diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp
index 164e351df952..6710a097075b 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -36,7 +36,9 @@ MSP430TargetMachine::MSP430TargetMachine(const Target &T,
// FIXME: Check DataLayout string.
DL("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) { }
+ FrameLowering(Subtarget) {
+ initAsmInfo();
+}
namespace {
/// MSP430 Code Generator Pass Configuration Options.
diff --git a/lib/Target/Mangler.cpp b/lib/Target/Mangler.cpp
index d31efa86b34d..38be25c330ab 100644
--- a/lib/Target/Mangler.cpp
+++ b/lib/Target/Mangler.cpp
@@ -19,138 +19,48 @@
#include "llvm/IR/Function.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-static bool isAcceptableChar(char C, bool AllowPeriod, bool AllowUTF8) {
- if ((C < 'a' || C > 'z') &&
- (C < 'A' || C > 'Z') &&
- (C < '0' || C > '9') &&
- C != '_' && C != '$' && C != '@' &&
- !(AllowPeriod && C == '.') &&
- !(AllowUTF8 && (C & 0x80)))
- return false;
- return true;
-}
-
-static char HexDigit(int V) {
- return V < 10 ? V+'0' : V+'A'-10;
-}
-
-static void MangleLetter(SmallVectorImpl<char> &OutName, unsigned char C) {
- OutName.push_back('_');
- OutName.push_back(HexDigit(C >> 4));
- OutName.push_back(HexDigit(C & 15));
- OutName.push_back('_');
-}
-
-/// NameNeedsEscaping - Return true if the identifier \p Str needs quotes
-/// for this assembler.
-static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) {
- assert(!Str.empty() && "Cannot create an empty MCSymbol");
-
- // If the first character is a number and the target does not allow this, we
- // need quotes.
- if (!MAI.doesAllowNameToStartWithDigit() && Str[0] >= '0' && Str[0] <= '9')
- return true;
-
- // If any of the characters in the string is an unacceptable character, force
- // quotes.
- bool AllowPeriod = MAI.doesAllowPeriodsInName();
- bool AllowUTF8 = MAI.doesAllowUTF8();
- for (unsigned i = 0, e = Str.size(); i != e; ++i)
- if (!isAcceptableChar(Str[i], AllowPeriod, AllowUTF8))
- return true;
- return false;
-}
-
-/// appendMangledName - Add the specified string in mangled form if it uses
-/// any unusual characters.
-static void appendMangledName(SmallVectorImpl<char> &OutName, StringRef Str,
- const MCAsmInfo &MAI) {
- // The first character is not allowed to be a number unless the target
- // explicitly allows it.
- if (!MAI.doesAllowNameToStartWithDigit() && Str[0] >= '0' && Str[0] <= '9') {
- MangleLetter(OutName, Str[0]);
- Str = Str.substr(1);
- }
-
- bool AllowPeriod = MAI.doesAllowPeriodsInName();
- bool AllowUTF8 = MAI.doesAllowUTF8();
- for (unsigned i = 0, e = Str.size(); i != e; ++i) {
- if (!isAcceptableChar(Str[i], AllowPeriod, AllowUTF8))
- MangleLetter(OutName, Str[i]);
- else
- OutName.push_back(Str[i]);
- }
-}
-
-
-/// appendMangledQuotedName - On systems that support quoted symbols, we still
-/// have to escape some (obscure) characters like " and \n which would break the
-/// assembler's lexing.
-static void appendMangledQuotedName(SmallVectorImpl<char> &OutName,
- StringRef Str) {
- for (unsigned i = 0, e = Str.size(); i != e; ++i) {
- if (Str[i] == '"' || Str[i] == '\n')
- MangleLetter(OutName, Str[i]);
- else
- OutName.push_back(Str[i]);
- }
-}
-
-
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified name as the global variable name. GVName must not be
/// empty.
void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
- const Twine &GVName, ManglerPrefixTy PrefixTy) {
+ const Twine &GVName, ManglerPrefixTy PrefixTy,
+ bool UseGlobalPrefix) {
SmallString<256> TmpData;
StringRef Name = GVName.toStringRef(TmpData);
assert(!Name.empty() && "getNameWithPrefix requires non-empty name");
- const MCAsmInfo &MAI = Context.getAsmInfo();
+ const MCAsmInfo *MAI = TM->getMCAsmInfo();
// If the global name is not led with \1, add the appropriate prefixes.
if (Name[0] == '\1') {
Name = Name.substr(1);
} else {
if (PrefixTy == Mangler::Private) {
- const char *Prefix = MAI.getPrivateGlobalPrefix();
+ const char *Prefix = MAI->getPrivateGlobalPrefix();
OutName.append(Prefix, Prefix+strlen(Prefix));
} else if (PrefixTy == Mangler::LinkerPrivate) {
- const char *Prefix = MAI.getLinkerPrivateGlobalPrefix();
+ const char *Prefix = MAI->getLinkerPrivateGlobalPrefix();
OutName.append(Prefix, Prefix+strlen(Prefix));
}
- const char *Prefix = MAI.getGlobalPrefix();
- if (Prefix[0] == 0)
- ; // Common noop, no prefix.
- else if (Prefix[1] == 0)
- OutName.push_back(Prefix[0]); // Common, one character prefix.
- else
- OutName.append(Prefix, Prefix+strlen(Prefix)); // Arbitrary length prefix.
+ if (UseGlobalPrefix) {
+ const char *Prefix = MAI->getGlobalPrefix();
+ if (Prefix[0] == 0)
+ ; // Common noop, no prefix.
+ else if (Prefix[1] == 0)
+ OutName.push_back(Prefix[0]); // Common, one character prefix.
+ else
+ // Arbitrary length prefix.
+ OutName.append(Prefix, Prefix+strlen(Prefix));
+ }
}
-
+
// If this is a simple string that doesn't need escaping, just append it.
- if (!NameNeedsEscaping(Name, MAI) ||
- // If quotes are supported, they can be used unless the string contains
- // a quote or newline.
- (MAI.doesAllowQuotesInName() &&
- Name.find_first_of("\n\"") == StringRef::npos)) {
- OutName.append(Name.begin(), Name.end());
- return;
- }
-
- // On systems that do not allow quoted names, we need to mangle most
- // strange characters.
- if (!MAI.doesAllowQuotesInName())
- return appendMangledName(OutName, Name, MAI);
-
- // Okay, the system allows quoted strings. We can quote most anything, the
- // only characters that need escaping are " and \n.
- assert(Name.find_first_of("\n\"") != StringRef::npos);
- return appendMangledQuotedName(OutName, Name);
+ OutName.append(Name.begin(), Name.end());
}
/// AddFastCallStdCallSuffix - Microsoft fastcall and stdcall functions require
@@ -178,8 +88,8 @@ static void AddFastCallStdCallSuffix(SmallVectorImpl<char> &OutName,
/// and the specified global variable's name. If the global variable doesn't
/// have a name, this fills in a unique name for the global.
void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
- const GlobalValue *GV,
- bool isImplicitlyPrivate) {
+ const GlobalValue *GV, bool isImplicitlyPrivate,
+ bool UseGlobalPrefix) {
ManglerPrefixTy PrefixTy = Mangler::Default;
if (GV->hasPrivateLinkage() || isImplicitlyPrivate)
PrefixTy = Mangler::Private;
@@ -189,7 +99,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
// If this global has a name, handle it simply.
if (GV->hasName()) {
StringRef Name = GV->getName();
- getNameWithPrefix(OutName, Name, PrefixTy);
+ getNameWithPrefix(OutName, Name, PrefixTy, UseGlobalPrefix);
// No need to do anything else if the global has the special "do not mangle"
// flag in the name.
if (Name[0] == 1)
@@ -201,12 +111,13 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
if (ID == 0) ID = NextAnonGlobalID++;
// Must mangle the global into a unique ID.
- getNameWithPrefix(OutName, "__unnamed_" + Twine(ID), PrefixTy);
+ getNameWithPrefix(OutName, "__unnamed_" + Twine(ID), PrefixTy,
+ UseGlobalPrefix);
}
// If we are supposed to add a microsoft-style suffix for stdcall/fastcall,
// add it.
- if (Context.getAsmInfo().hasMicrosoftFastStdCallMangling()) {
+ if (TM->getMCAsmInfo()->hasMicrosoftFastStdCallMangling()) {
if (const Function *F = dyn_cast<Function>(GV)) {
CallingConv::ID CC = F->getCallingConv();
@@ -226,17 +137,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
// "Pure" variadic functions do not receive @0 suffix.
(!FT->isVarArg() || FT->getNumParams() == 0 ||
(FT->getNumParams() == 1 && F->hasStructRetAttr())))
- AddFastCallStdCallSuffix(OutName, F, TD);
+ AddFastCallStdCallSuffix(OutName, F, *TM->getDataLayout());
}
}
}
-
-/// getSymbol - Return the MCSymbol for the specified global value. This
-/// symbol is the main label that is the address of the global.
-MCSymbol *Mangler::getSymbol(const GlobalValue *GV) {
- SmallString<60> NameStr;
- getNameWithPrefix(NameStr, GV, false);
- return Context.GetOrCreateSymbol(NameStr.str());
-}
-
-
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 0795cb963b35..cdae6c2f37e5 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -9,6 +9,7 @@
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "MipsRegisterInfo.h"
+#include "MipsTargetStreamer.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
@@ -20,26 +21,29 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/ADT/APInt.h"
using namespace llvm;
+namespace llvm {
+class MCInstrInfo;
+}
+
namespace {
class MipsAssemblerOptions {
public:
- MipsAssemblerOptions():
- aTReg(1), reorder(true), macro(true) {
- }
+ MipsAssemblerOptions() : aTReg(1), reorder(true), macro(true) {}
- unsigned getATRegNum() {return aTReg;}
+ unsigned getATRegNum() { return aTReg; }
bool setATReg(unsigned Reg);
- bool isReorder() {return reorder;}
- void setReorder() {reorder = true;}
- void setNoreorder() {reorder = false;}
+ bool isReorder() { return reorder; }
+ void setReorder() { reorder = true; }
+ void setNoreorder() { reorder = false; }
- bool isMacro() {return macro;}
- void setMacro() {macro = true;}
- void setNomacro() {macro = false;}
+ bool isMacro() { return macro; }
+ void setMacro() { macro = true; }
+ void setNomacro() { macro = false; }
private:
unsigned aTReg;
@@ -51,23 +55,21 @@ private:
namespace {
class MipsAsmParser : public MCTargetAsmParser {
- enum FpFormatTy {
- FP_FORMAT_NONE = -1,
- FP_FORMAT_S,
- FP_FORMAT_D,
- FP_FORMAT_L,
- FP_FORMAT_W
- } FpFormat;
+ MipsTargetStreamer &getTargetStreamer() {
+ MCTargetStreamer &TS = Parser.getStreamer().getTargetStreamer();
+ return static_cast<MipsTargetStreamer &>(TS);
+ }
MCSubtargetInfo &STI;
MCAsmParser &Parser;
MipsAssemblerOptions Options;
+ bool hasConsumedDollar;
#define GET_ASSEMBLER_HEADER
#include "MipsGenAsmMatcher.inc"
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands,
MCStreamer &Out, unsigned &ErrorInfo,
bool MatchingInlineAsm);
@@ -75,40 +77,98 @@ class MipsAsmParser : public MCTargetAsmParser {
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- bool parseMathOperation(StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands);
bool ParseDirective(AsmToken DirectiveID);
MipsAsmParser::OperandMatchResultTy
- parseMemOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands, int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSARegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands, int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSACtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMemOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ bool parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseGPR32(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseGPR64(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseHWRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseAFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseFGRH32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseCPURegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseACC64DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseHWRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseLO32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseHW64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseHI32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseCOP2(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128BRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128HRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128WRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128DRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128CtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseInvNum(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseLSAImm(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
bool searchSymbolAlias(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- unsigned RegisterClass);
+ unsigned RegKind);
- bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &,
+ bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &,
StringRef Mnemonic);
int tryParseRegister(bool is64BitReg);
- bool tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ bool tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
bool is64BitReg);
bool needsExpansion(MCInst &Inst);
@@ -122,17 +182,19 @@ class MipsAsmParser : public MCTargetAsmParser {
void expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
void expandMemInst(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions,
- bool isLoad,bool isImmOpnd);
+ SmallVectorImpl<MCInst> &Instructions, bool isLoad,
+ bool isImmOpnd);
bool reportParseError(StringRef ErrorMsg);
bool parseMemOffset(const MCExpr *&Res, bool isParenExpr);
bool parseRelocOperand(const MCExpr *&Res);
- const MCExpr* evaluateRelocExpr(const MCExpr *Expr, StringRef RelocStr);
+ const MCExpr *evaluateRelocExpr(const MCExpr *Expr, StringRef RelocStr);
bool isEvaluated(const MCExpr *Expr);
bool parseDirectiveSet();
+ bool parseDirectiveMipsHackStocg();
+ bool parseDirectiveMipsHackELFFlags();
bool parseSetAtDirective();
bool parseSetNoAtDirective();
@@ -144,6 +206,7 @@ class MipsAsmParser : public MCTargetAsmParser {
bool parseSetAssignment();
bool parseDirectiveWord(unsigned Size, SMLoc L);
+ bool parseDirectiveGpWord();
MCSymbolRefExpr::VariantKind getVariantKind(StringRef Symbol);
@@ -155,40 +218,49 @@ class MipsAsmParser : public MCTargetAsmParser {
return (STI.getFeatureBits() & Mips::FeatureFP64Bit) != 0;
}
+ bool isN64() const { return STI.getFeatureBits() & Mips::FeatureN64; }
+
int matchRegisterName(StringRef Symbol, bool is64BitReg);
int matchCPURegisterName(StringRef Symbol);
int matchRegisterByNumber(unsigned RegNum, unsigned RegClass);
- void setFpFormat(FpFormatTy Format) {
- FpFormat = Format;
- }
+ int matchFPURegisterName(StringRef Name);
- void setDefaultFpFormat();
+ int matchFCCRegisterName(StringRef Name);
- void setFpFormat(StringRef Format);
+ int matchACRegisterName(StringRef Name);
- FpFormatTy getFpFormat() {return FpFormat;}
+ int matchMSA128RegisterName(StringRef Name);
- bool requestsDoubleOperand(StringRef Mnemonic);
+ int matchMSA128CtrlRegisterName(StringRef Name);
+
+ int regKindToRegClass(int RegKind);
unsigned getReg(int RC, int RegNo);
int getATReg();
bool processInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ SmallVectorImpl<MCInst> &Instructions);
+
+ // Helper function that checks if the value of a vector index is within the
+ // boundaries of accepted values for each RegisterKind
+ // Example: INSERT.B $w0[n], $1 => 16 > n >= 0
+ bool validateMSAIndex(int Val, int RegKind);
+
public:
- MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
- : MCTargetAsmParser(), STI(sti), Parser(parser) {
+ MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(sti), Parser(parser),
+ hasConsumedDollar(false) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
-
};
}
@@ -201,14 +273,24 @@ class MipsOperand : public MCParsedAsmOperand {
public:
enum RegisterKind {
Kind_None,
- Kind_CPURegs,
- Kind_CPU64Regs,
+ Kind_GPR32,
+ Kind_GPR64,
Kind_HWRegs,
- Kind_HW64Regs,
Kind_FGR32Regs,
+ Kind_FGRH32Regs,
Kind_FGR64Regs,
Kind_AFGR64Regs,
- Kind_CCRRegs
+ Kind_CCRRegs,
+ Kind_FCCRegs,
+ Kind_ACC64DSP,
+ Kind_LO32DSP,
+ Kind_HI32DSP,
+ Kind_COP2,
+ Kind_MSA128BRegs,
+ Kind_MSA128HRegs,
+ Kind_MSA128WRegs,
+ Kind_MSA128DRegs,
+ Kind_MSA128CtrlRegs
};
private:
@@ -219,7 +301,9 @@ private:
k_Memory,
k_PostIndexRegister,
k_Register,
- k_Token
+ k_PtrReg,
+ k_Token,
+ k_LSAImm
} Kind;
MipsOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
@@ -258,7 +342,12 @@ public:
Inst.addOperand(MCOperand::CreateReg(getReg()));
}
- void addExpr(MCInst &Inst, const MCExpr *Expr) const{
+ void addPtrRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getPtrReg()));
+ }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediate when possible. Null MCExpr = 0.
if (Expr == 0)
Inst.addOperand(MCOperand::CreateImm(0));
@@ -287,6 +376,9 @@ public:
bool isImm() const { return Kind == k_Immediate; }
bool isToken() const { return Kind == k_Token; }
bool isMem() const { return Kind == k_Memory; }
+ bool isPtrReg() const { return Kind == k_PtrReg; }
+ bool isInvNum() const { return Kind == k_Immediate; }
+ bool isLSAImm() const { return Kind == k_LSAImm; }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
@@ -298,13 +390,18 @@ public:
return Reg.RegNum;
}
+ unsigned getPtrReg() const {
+ assert((Kind == k_PtrReg) && "Invalid access!");
+ return Reg.RegNum;
+ }
+
void setRegKind(RegisterKind RegKind) {
- assert((Kind == k_Register) && "Invalid access!");
+ assert((Kind == k_Register || Kind == k_PtrReg) && "Invalid access!");
Reg.Kind = RegKind;
}
const MCExpr *getImm() const {
- assert((Kind == k_Immediate) && "Invalid access!");
+ assert((Kind == k_Immediate || Kind == k_LSAImm) && "Invalid access!");
return Imm.Val;
}
@@ -335,6 +432,14 @@ public:
return Op;
}
+ static MipsOperand *CreatePtrReg(unsigned RegNum, SMLoc S, SMLoc E) {
+ MipsOperand *Op = new MipsOperand(k_PtrReg);
+ Op->Reg.RegNum = RegNum;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static MipsOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
MipsOperand *Op = new MipsOperand(k_Immediate);
Op->Imm.Val = Val;
@@ -343,8 +448,16 @@ public:
return Op;
}
+ static MipsOperand *CreateLSAImm(const MCExpr *Val, SMLoc S, SMLoc E) {
+ MipsOperand *Op = new MipsOperand(k_LSAImm);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static MipsOperand *CreateMem(unsigned Base, const MCExpr *Off,
- SMLoc S, SMLoc E) {
+ SMLoc S, SMLoc E) {
MipsOperand *Op = new MipsOperand(k_Memory);
Op->Mem.Base = Base;
Op->Mem.Off = Off;
@@ -353,59 +466,91 @@ public:
return Op;
}
- bool isCPURegsAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_CPURegs;
+ bool isGPR32Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_GPR32;
}
- void addCPURegsAsmOperands(MCInst &Inst, unsigned N) const {
+ void addRegAsmOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
}
- bool isCPU64RegsAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_CPU64Regs;
- }
- void addCPU64RegsAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
+ bool isGPR64Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_GPR64;
}
bool isHWRegsAsm() const {
assert((Kind == k_Register) && "Invalid access!");
return Reg.Kind == Kind_HWRegs;
}
- void addHWRegsAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
- }
- bool isHW64RegsAsm() const {
+ bool isCCRAsm() const {
assert((Kind == k_Register) && "Invalid access!");
- return Reg.Kind == Kind_HW64Regs;
+ return Reg.Kind == Kind_CCRRegs;
}
- void addHW64RegsAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
+
+ bool isAFGR64Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_AFGR64Regs;
}
- void addCCRAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
+ bool isFGR64Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_FGR64Regs;
}
- bool isCCRAsm() const {
- assert((Kind == k_Register) && "Invalid access!");
- return Reg.Kind == Kind_CCRRegs;
+ bool isFGR32Asm() const {
+ return (Kind == k_Register) && Reg.Kind == Kind_FGR32Regs;
}
- /// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const {
- return StartLoc;
+ bool isFGRH32Asm() const {
+ return (Kind == k_Register) && Reg.Kind == Kind_FGRH32Regs;
}
- /// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const {
- return EndLoc;
+
+ bool isFCCRegsAsm() const {
+ return (Kind == k_Register) && Reg.Kind == Kind_FCCRegs;
+ }
+
+ bool isACC64DSPAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_ACC64DSP;
+ }
+
+ bool isLO32DSPAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_LO32DSP;
+ }
+
+ bool isHI32DSPAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_HI32DSP;
+ }
+
+ bool isCOP2Asm() const { return Kind == k_Register && Reg.Kind == Kind_COP2; }
+
+ bool isMSA128BAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128BRegs;
+ }
+
+ bool isMSA128HAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128HRegs;
+ }
+
+ bool isMSA128WAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128WRegs;
+ }
+
+ bool isMSA128DAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128DRegs;
+ }
+
+ bool isMSA128CRAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128CtrlRegs;
}
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const { return StartLoc; }
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const { return EndLoc; }
+
virtual void print(raw_ostream &OS) const {
llvm_unreachable("unimplemented!");
}
}; // class MipsOperand
-} // namespace
+} // namespace
namespace llvm {
extern const MCInstrDesc MipsInsts[];
@@ -436,8 +581,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
// reference or immediate we may have to expand instructions.
for (unsigned i = 0; i < MCID.getNumOperands(); i++) {
const MCOperandInfo &OpInfo = MCID.OpInfo[i];
- if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY)
- || (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
+ if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) ||
+ (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
MCOperand &Op = Inst.getOperand(i);
if (Op.isImm()) {
int MemOffset = Op.getImm();
@@ -450,7 +595,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
const MCExpr *Expr = Op.getExpr();
if (Expr->getKind() == MCExpr::SymbolRef) {
const MCSymbolRefExpr *SR =
- static_cast<const MCSymbolRefExpr*>(Expr);
+ static_cast<const MCSymbolRefExpr *>(Expr);
if (SR->getKind() == MCSymbolRefExpr::VK_None) {
// Expand symbol.
expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), false);
@@ -463,7 +608,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
}
}
} // for
- } // if load/store
+ } // if load/store
if (needsExpansion(Inst))
expandInstruction(Inst, IDLoc, Instructions);
@@ -486,7 +631,7 @@ bool MipsAsmParser::needsExpansion(MCInst &Inst) {
}
void MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ SmallVectorImpl<MCInst> &Instructions) {
switch (Inst.getOpcode()) {
case Mips::LoadImm32Reg:
return expandLoadImm(Inst, IDLoc, Instructions);
@@ -541,8 +686,9 @@ void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
}
}
-void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+void
+MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
const MCOperand &ImmOp = Inst.getOperand(2);
assert(ImmOp.isImm() && "expected immediate operand kind");
@@ -583,8 +729,9 @@ void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
}
}
-void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+void
+MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
const MCOperand &ImmOp = Inst.getOperand(1);
assert(ImmOp.isImm() && "expected immediate operand kind");
@@ -617,14 +764,15 @@ void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
}
void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions, bool isLoad, bool isImmOpnd) {
+ SmallVectorImpl<MCInst> &Instructions,
+ bool isLoad, bool isImmOpnd) {
const MCSymbolRefExpr *SR;
MCInst TempInst;
unsigned ImmOffset, HiOffset, LoOffset;
const MCExpr *ExprOffset;
unsigned TmpRegNum;
- unsigned AtRegNum = getReg((isMips64()) ? Mips::CPU64RegsRegClassID
- : Mips::CPURegsRegClassID, getATReg());
+ unsigned AtRegNum = getReg(
+ (isMips64()) ? Mips::GPR64RegClassID : Mips::GPR32RegClassID, getATReg());
// 1st operand is either the source or destination register.
assert(Inst.getOperand(0).isReg() && "expected register operand kind");
unsigned RegOpNum = Inst.getOperand(0).getReg();
@@ -654,7 +802,7 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.addOperand(MCOperand::CreateImm(HiOffset));
else {
if (ExprOffset->getKind() == MCExpr::SymbolRef) {
- SR = static_cast<const MCSymbolRefExpr*>(ExprOffset);
+ SR = static_cast<const MCSymbolRefExpr *>(ExprOffset);
const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::Create(
SR->getSymbol().getName(), MCSymbolRefExpr::VK_Mips_ABS_HI,
getContext());
@@ -697,15 +845,14 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.clear();
}
-bool MipsAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+bool MipsAsmParser::MatchAndEmitInstruction(
+ SMLoc IDLoc, unsigned &Opcode,
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo, bool MatchingInlineAsm) {
MCInst Inst;
SmallVector<MCInst, 8> Instructions;
- unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
- MatchingInlineAsm);
+ unsigned MatchResult =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
switch (MatchResult) {
default:
@@ -726,7 +873,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((MipsOperand*) Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((MipsOperand *)Operands[ErrorInfo])->getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
@@ -740,44 +887,44 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
}
int MipsAsmParser::matchCPURegisterName(StringRef Name) {
- int CC;
+ int CC;
if (Name == "at")
return getATReg();
- CC = StringSwitch<unsigned>(Name)
- .Case("zero", 0)
- .Case("a0", 4)
- .Case("a1", 5)
- .Case("a2", 6)
- .Case("a3", 7)
- .Case("v0", 2)
- .Case("v1", 3)
- .Case("s0", 16)
- .Case("s1", 17)
- .Case("s2", 18)
- .Case("s3", 19)
- .Case("s4", 20)
- .Case("s5", 21)
- .Case("s6", 22)
- .Case("s7", 23)
- .Case("k0", 26)
- .Case("k1", 27)
- .Case("sp", 29)
- .Case("fp", 30)
- .Case("gp", 28)
- .Case("ra", 31)
- .Case("t0", 8)
- .Case("t1", 9)
- .Case("t2", 10)
- .Case("t3", 11)
- .Case("t4", 12)
- .Case("t5", 13)
- .Case("t6", 14)
- .Case("t7", 15)
- .Case("t8", 24)
- .Case("t9", 25)
- .Default(-1);
+ CC = StringSwitch<unsigned>(Name)
+ .Case("zero", 0)
+ .Case("a0", 4)
+ .Case("a1", 5)
+ .Case("a2", 6)
+ .Case("a3", 7)
+ .Case("v0", 2)
+ .Case("v1", 3)
+ .Case("s0", 16)
+ .Case("s1", 17)
+ .Case("s2", 18)
+ .Case("s3", 19)
+ .Case("s4", 20)
+ .Case("s5", 21)
+ .Case("s6", 22)
+ .Case("s7", 23)
+ .Case("k0", 26)
+ .Case("k1", 27)
+ .Case("sp", 29)
+ .Case("fp", 30)
+ .Case("gp", 28)
+ .Case("ra", 31)
+ .Case("t0", 8)
+ .Case("t1", 9)
+ .Case("t2", 10)
+ .Case("t3", 11)
+ .Case("t4", 12)
+ .Case("t5", 13)
+ .Case("t6", 14)
+ .Case("t7", 15)
+ .Case("t8", 24)
+ .Case("t9", 25)
+ .Default(-1);
// Although SGI documentation just cuts out t0-t3 for n32/n64,
// GNU pushes the values of t0-t3 to override the o32/o64 values for t4-t7
@@ -787,83 +934,140 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) {
if (CC == -1 && isMips64())
CC = StringSwitch<unsigned>(Name)
- .Case("a4", 8)
- .Case("a5", 9)
- .Case("a6", 10)
- .Case("a7", 11)
- .Case("kt0", 26)
- .Case("kt1", 27)
- .Case("s8", 30)
- .Default(-1);
+ .Case("a4", 8)
+ .Case("a5", 9)
+ .Case("a6", 10)
+ .Case("a7", 11)
+ .Case("kt0", 26)
+ .Case("kt1", 27)
+ .Case("s8", 30)
+ .Default(-1);
return CC;
}
-int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) {
-
- if (Name.equals("fcc0"))
- return Mips::FCC0;
-
- int CC;
- CC = matchCPURegisterName(Name);
- if (CC != -1)
- return matchRegisterByNumber(CC, is64BitReg ? Mips::CPU64RegsRegClassID
- : Mips::CPURegsRegClassID);
+int MipsAsmParser::matchFPURegisterName(StringRef Name) {
if (Name[0] == 'f') {
StringRef NumString = Name.substr(1);
unsigned IntVal;
if (NumString.getAsInteger(10, IntVal))
- return -1; // This is not an integer.
- if (IntVal > 31)
+ return -1; // This is not an integer.
+ if (IntVal > 31) // Maximum index for fpu register.
return -1;
+ return IntVal;
+ }
+ return -1;
+}
- FpFormatTy Format = getFpFormat();
+int MipsAsmParser::matchFCCRegisterName(StringRef Name) {
- if (Format == FP_FORMAT_S || Format == FP_FORMAT_W)
- return getReg(Mips::FGR32RegClassID, IntVal);
- if (Format == FP_FORMAT_D) {
- if (isFP64()) {
- return getReg(Mips::FGR64RegClassID, IntVal);
- }
- // Only even numbers available as register pairs.
- if ((IntVal > 31) || (IntVal % 2 != 0))
- return -1;
- return getReg(Mips::AFGR64RegClassID, IntVal / 2);
- }
+ if (Name.startswith("fcc")) {
+ StringRef NumString = Name.substr(3);
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
+ return -1; // This is not an integer.
+ if (IntVal > 7) // There are only 8 fcc registers.
+ return -1;
+ return IntVal;
}
+ return -1;
+}
+int MipsAsmParser::matchACRegisterName(StringRef Name) {
+
+ if (Name.startswith("ac")) {
+ StringRef NumString = Name.substr(2);
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
+ return -1; // This is not an integer.
+ if (IntVal > 3) // There are only 3 acc registers.
+ return -1;
+ return IntVal;
+ }
return -1;
}
-void MipsAsmParser::setDefaultFpFormat() {
+int MipsAsmParser::matchMSA128RegisterName(StringRef Name) {
+ unsigned IntVal;
- if (isMips64() || isFP64())
- FpFormat = FP_FORMAT_D;
- else
- FpFormat = FP_FORMAT_S;
+ if (Name.front() != 'w' || Name.drop_front(1).getAsInteger(10, IntVal))
+ return -1;
+
+ if (IntVal > 31)
+ return -1;
+
+ return IntVal;
}
-bool MipsAsmParser::requestsDoubleOperand(StringRef Mnemonic){
+int MipsAsmParser::matchMSA128CtrlRegisterName(StringRef Name) {
+ int CC;
- bool IsDouble = StringSwitch<bool>(Mnemonic.lower())
- .Case("ldxc1", true)
- .Case("ldc1", true)
- .Case("sdxc1", true)
- .Case("sdc1", true)
- .Default(false);
+ CC = StringSwitch<unsigned>(Name)
+ .Case("msair", 0)
+ .Case("msacsr", 1)
+ .Case("msaaccess", 2)
+ .Case("msasave", 3)
+ .Case("msamodify", 4)
+ .Case("msarequest", 5)
+ .Case("msamap", 6)
+ .Case("msaunmap", 7)
+ .Default(-1);
- return IsDouble;
+ return CC;
}
-void MipsAsmParser::setFpFormat(StringRef Format) {
+int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) {
- FpFormat = StringSwitch<FpFormatTy>(Format.lower())
- .Case(".s", FP_FORMAT_S)
- .Case(".d", FP_FORMAT_D)
- .Case(".l", FP_FORMAT_L)
- .Case(".w", FP_FORMAT_W)
- .Default(FP_FORMAT_NONE);
+ int CC;
+ CC = matchCPURegisterName(Name);
+ if (CC != -1)
+ return matchRegisterByNumber(CC, is64BitReg ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID);
+ CC = matchFPURegisterName(Name);
+ // TODO: decide about fpu register class
+ if (CC != -1)
+ return matchRegisterByNumber(CC, isFP64() ? Mips::FGR64RegClassID
+ : Mips::FGR32RegClassID);
+ return matchMSA128RegisterName(Name);
+}
+
+int MipsAsmParser::regKindToRegClass(int RegKind) {
+
+ switch (RegKind) {
+ case MipsOperand::Kind_GPR32:
+ return Mips::GPR32RegClassID;
+ case MipsOperand::Kind_GPR64:
+ return Mips::GPR64RegClassID;
+ case MipsOperand::Kind_HWRegs:
+ return Mips::HWRegsRegClassID;
+ case MipsOperand::Kind_FGR32Regs:
+ return Mips::FGR32RegClassID;
+ case MipsOperand::Kind_FGRH32Regs:
+ return Mips::FGRH32RegClassID;
+ case MipsOperand::Kind_FGR64Regs:
+ return Mips::FGR64RegClassID;
+ case MipsOperand::Kind_AFGR64Regs:
+ return Mips::AFGR64RegClassID;
+ case MipsOperand::Kind_CCRRegs:
+ return Mips::CCRRegClassID;
+ case MipsOperand::Kind_ACC64DSP:
+ return Mips::ACC64DSPRegClassID;
+ case MipsOperand::Kind_FCCRegs:
+ return Mips::FCCRegClassID;
+ case MipsOperand::Kind_MSA128BRegs:
+ return Mips::MSA128BRegClassID;
+ case MipsOperand::Kind_MSA128HRegs:
+ return Mips::MSA128HRegClassID;
+ case MipsOperand::Kind_MSA128WRegs:
+ return Mips::MSA128WRegClassID;
+ case MipsOperand::Kind_MSA128DRegs:
+ return Mips::MSA128DRegClassID;
+ case MipsOperand::Kind_MSA128CtrlRegs:
+ return Mips::MSACtrlRegClassID;
+ default:
+ return -1;
+ }
}
bool MipsAssemblerOptions::setATReg(unsigned Reg) {
@@ -874,17 +1078,15 @@ bool MipsAssemblerOptions::setATReg(unsigned Reg) {
return true;
}
-int MipsAsmParser::getATReg() {
- return Options.getATRegNum();
-}
+int MipsAsmParser::getATReg() { return Options.getATRegNum(); }
unsigned MipsAsmParser::getReg(int RC, int RegNo) {
- return *(getContext().getRegisterInfo().getRegClass(RC).begin() + RegNo);
+ return *(getContext().getRegisterInfo()->getRegClass(RC).begin() + RegNo);
}
int MipsAsmParser::matchRegisterByNumber(unsigned RegNum, unsigned RegClass) {
-
- if (RegNum > 31)
+ if (RegNum >
+ getContext().getRegisterInfo()->getRegClass(RegClass).getNumRegs())
return -1;
return getReg(RegClass, RegNum);
@@ -899,12 +1101,13 @@ int MipsAsmParser::tryParseRegister(bool is64BitReg) {
RegNum = matchRegisterName(lowerCase, is64BitReg);
} else if (Tok.is(AsmToken::Integer))
RegNum = matchRegisterByNumber(static_cast<unsigned>(Tok.getIntVal()),
- is64BitReg ? Mips::CPU64RegsRegClassID : Mips::CPURegsRegClassID);
+ is64BitReg ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID);
return RegNum;
}
bool MipsAsmParser::tryParseRegisterOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands, bool is64BitReg) {
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands, bool is64BitReg) {
SMLoc S = Parser.getTok().getLoc();
int RegNo = -1;
@@ -913,14 +1116,15 @@ bool MipsAsmParser::tryParseRegisterOperand(
if (RegNo == -1)
return true;
- Operands.push_back(MipsOperand::CreateReg(RegNo, S,
- Parser.getTok().getLoc()));
+ Operands.push_back(
+ MipsOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
Parser.Lex(); // Eat register token.
return false;
}
-bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands,
- StringRef Mnemonic) {
+bool
+MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ StringRef Mnemonic) {
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
@@ -968,22 +1172,39 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands,
return true;
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
-
MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier);
-
// Otherwise create a symbol reference.
- const MCExpr *Res = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None,
- getContext());
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, getContext());
Operands.push_back(MipsOperand::CreateImm(Res, S, E));
return false;
}
case AsmToken::Identifier:
+ // For instruction aliases like "bc1f $Label" dedicated parser will
+ // eat the '$' sign before failing. So in order to look for appropriate
+ // label we must check first if we have already consumed '$'.
+ if (hasConsumedDollar) {
+ hasConsumedDollar = false;
+ SMLoc S = Parser.getTok().getLoc();
+ StringRef Identifier;
+ if (Parser.parseIdentifier(Identifier))
+ return true;
+ SMLoc E =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier);
+ // Create a symbol reference.
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, getContext());
+
+ Operands.push_back(MipsOperand::CreateImm(Res, S, E));
+ return false;
+ }
// Look for the existing symbol, we should check if
// we need to assigne the propper RegisterKind.
if (searchSymbolAlias(Operands, MipsOperand::Kind_None))
return false;
- // Else drop to expression parsing.
+ // Else drop to expression parsing.
case AsmToken::LParen:
case AsmToken::Minus:
case AsmToken::Plus:
@@ -1014,7 +1235,7 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands,
return true;
}
-const MCExpr* MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
+const MCExpr *MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
StringRef RelocStr) {
const MCExpr *Res;
// Check the type of the expression.
@@ -1084,7 +1305,7 @@ bool MipsAsmParser::isEvaluated(const MCExpr *Expr) {
}
bool MipsAsmParser::parseRelocOperand(const MCExpr *&Res) {
- Parser.Lex(); // Eat the % token.
+ Parser.Lex(); // Eat the % token.
const AsmToken &Tok = Parser.getTok(); // Get next token, operation.
if (Tok.isNot(AsmToken::Identifier))
return true;
@@ -1130,7 +1351,7 @@ bool MipsAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
StartLoc = Parser.getTok().getLoc();
RegNo = tryParseRegister(isMips64());
EndLoc = Parser.getTok().getLoc();
- return (RegNo == (unsigned) -1);
+ return (RegNo == (unsigned)-1);
}
bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) {
@@ -1162,11 +1383,12 @@ bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) {
}
MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&Operands) {
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
const MCExpr *IdVal = 0;
SMLoc S;
bool isParenExpr = false;
+ MipsAsmParser::OperandMatchResultTy Res = MatchOperand_NoMatch;
// First operand is the offset.
S = Parser.getTok().getLoc();
@@ -1181,21 +1403,20 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
const AsmToken &Tok = Parser.getTok(); // Get the next token.
if (Tok.isNot(AsmToken::LParen)) {
- MipsOperand *Mnemonic = static_cast<MipsOperand*>(Operands[0]);
+ MipsOperand *Mnemonic = static_cast<MipsOperand *>(Operands[0]);
if (Mnemonic->getToken() == "la") {
- SMLoc E = SMLoc::getFromPointer(
- Parser.getTok().getLoc().getPointer() - 1);
+ SMLoc E =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
return MatchOperand_Success;
}
if (Tok.is(AsmToken::EndOfStatement)) {
- SMLoc E = SMLoc::getFromPointer(
- Parser.getTok().getLoc().getPointer() - 1);
+ SMLoc E =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
// Zero register assumed, add a memory operand with ZERO as its base.
- Operands.push_back(MipsOperand::CreateMem(isMips64() ? Mips::ZERO_64
- : Mips::ZERO,
- IdVal, S, E));
+ Operands.push_back(MipsOperand::CreateMem(
+ isMips64() ? Mips::ZERO_64 : Mips::ZERO, IdVal, S, E));
return MatchOperand_Success;
}
Error(Parser.getTok().getLoc(), "'(' expected");
@@ -1205,21 +1426,12 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
Parser.Lex(); // Eat the '(' token.
}
- const AsmToken &Tok1 = Parser.getTok(); // Get next token
- if (Tok1.is(AsmToken::Dollar)) {
- Parser.Lex(); // Eat the '$' token.
- if (tryParseRegisterOperand(Operands, isMips64())) {
- Error(Parser.getTok().getLoc(), "unexpected token in operand");
- return MatchOperand_ParseFail;
- }
-
- } else {
- Error(Parser.getTok().getLoc(), "unexpected token in operand");
- return MatchOperand_ParseFail;
- }
+ Res = parseRegs(Operands, isMips64() ? (int)MipsOperand::Kind_GPR64
+ : (int)MipsOperand::Kind_GPR32);
+ if (Res != MatchOperand_Success)
+ return Res;
- const AsmToken &Tok2 = Parser.getTok(); // Get next token.
- if (Tok2.isNot(AsmToken::RParen)) {
+ if (Parser.getTok().isNot(AsmToken::RParen)) {
Error(Parser.getTok().getLoc(), "')' expected");
return MatchOperand_ParseFail;
}
@@ -1232,7 +1444,7 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
IdVal = MCConstantExpr::Create(0, getContext());
// Replace the register operand with the memory operand.
- MipsOperand* op = static_cast<MipsOperand*>(Operands.back());
+ MipsOperand *op = static_cast<MipsOperand *>(Operands.back());
int RegNo = op->getReg();
// Remove the register from the operands.
Operands.pop_back();
@@ -1251,336 +1463,694 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
return MatchOperand_Success;
}
+bool MipsAsmParser::parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ // If the first token is not '$' we have an error.
+ if (Parser.getTok().isNot(AsmToken::Dollar))
+ return false;
+
+ SMLoc S = Parser.getTok().getLoc();
+ Parser.Lex();
+ AsmToken::TokenKind TkKind = getLexer().getKind();
+ int Reg;
+
+ if (TkKind == AsmToken::Integer) {
+ Reg = matchRegisterByNumber(Parser.getTok().getIntVal(),
+ regKindToRegClass(RegKind));
+ if (Reg == -1)
+ return false;
+ } else if (TkKind == AsmToken::Identifier) {
+ if ((Reg = matchCPURegisterName(Parser.getTok().getString().lower())) == -1)
+ return false;
+ Reg = getReg(regKindToRegClass(RegKind), Reg);
+ } else {
+ return false;
+ }
+
+ MipsOperand *Op = MipsOperand::CreatePtrReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind((MipsOperand::RegisterKind)RegKind);
+ Operands.push_back(Op);
+ Parser.Lex();
+ return true;
+}
+
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ MipsOperand::RegisterKind RegKind =
+ isN64() ? MipsOperand::Kind_GPR64 : MipsOperand::Kind_GPR32;
- if (!isMips64())
+ // Parse index register.
+ if (!parsePtrReg(Operands, RegKind))
return MatchOperand_NoMatch;
- if (getLexer().getKind() == AsmToken::Identifier) {
- if (searchSymbolAlias(Operands, MipsOperand::Kind_CPU64Regs))
+
+ // Parse '('.
+ if (Parser.getTok().isNot(AsmToken::LParen))
+ return MatchOperand_NoMatch;
+
+ Operands.push_back(MipsOperand::CreateToken("(", getLexer().getLoc()));
+ Parser.Lex();
+
+ // Parse base register.
+ if (!parsePtrReg(Operands, RegKind))
+ return MatchOperand_NoMatch;
+
+ // Parse ')'.
+ if (Parser.getTok().isNot(AsmToken::RParen))
+ return MatchOperand_NoMatch;
+
+ Operands.push_back(MipsOperand::CreateToken(")", getLexer().getLoc()));
+ Parser.Lex();
+
+ return MatchOperand_Success;
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
+ if (getLexer().getKind() == AsmToken::Identifier && !hasConsumedDollar) {
+ if (searchSymbolAlias(Operands, Kind))
return MatchOperand_Success;
return MatchOperand_NoMatch;
}
+ SMLoc S = Parser.getTok().getLoc();
// If the first token is not '$', we have an error.
- if (Parser.getTok().isNot(AsmToken::Dollar))
+ if (Parser.getTok().isNot(AsmToken::Dollar) && !hasConsumedDollar)
return MatchOperand_NoMatch;
-
- Parser.Lex(); // Eat $
- if (!tryParseRegisterOperand(Operands, true)) {
- // Set the proper register kind.
- MipsOperand* op = static_cast<MipsOperand*>(Operands.back());
- op->setRegKind(MipsOperand::Kind_CPU64Regs);
+ if (!hasConsumedDollar) {
+ Parser.Lex(); // Eat the '$'
+ hasConsumedDollar = true;
+ }
+ if (getLexer().getKind() == AsmToken::Identifier) {
+ int RegNum = -1;
+ std::string RegName = Parser.getTok().getString().lower();
+ // Match register by name
+ switch (RegKind) {
+ case MipsOperand::Kind_GPR32:
+ case MipsOperand::Kind_GPR64:
+ RegNum = matchCPURegisterName(RegName);
+ break;
+ case MipsOperand::Kind_AFGR64Regs:
+ case MipsOperand::Kind_FGR64Regs:
+ case MipsOperand::Kind_FGR32Regs:
+ case MipsOperand::Kind_FGRH32Regs:
+ RegNum = matchFPURegisterName(RegName);
+ if (RegKind == MipsOperand::Kind_AFGR64Regs)
+ RegNum /= 2;
+ else if (RegKind == MipsOperand::Kind_FGRH32Regs && !isFP64())
+ if (RegNum != -1 && RegNum % 2 != 0)
+ Warning(S, "Float register should be even.");
+ break;
+ case MipsOperand::Kind_FCCRegs:
+ RegNum = matchFCCRegisterName(RegName);
+ break;
+ case MipsOperand::Kind_ACC64DSP:
+ RegNum = matchACRegisterName(RegName);
+ break;
+ default:
+ break; // No match, value is set to -1.
+ }
+ // No match found, return _NoMatch to give a chance to other round.
+ if (RegNum < 0)
+ return MatchOperand_NoMatch;
+
+ int RegVal = getReg(regKindToRegClass(Kind), RegNum);
+ if (RegVal == -1)
+ return MatchOperand_NoMatch;
+
+ MipsOperand *Op =
+ MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
+ Op->setRegKind(Kind);
+ Operands.push_back(Op);
+ hasConsumedDollar = false;
+ Parser.Lex(); // Eat the register name.
+ return MatchOperand_Success;
+ } else if (getLexer().getKind() == AsmToken::Integer) {
+ unsigned RegNum = Parser.getTok().getIntVal();
+ if (Kind == MipsOperand::Kind_HWRegs) {
+ if (RegNum != 29)
+ return MatchOperand_NoMatch;
+ // Only hwreg 29 is supported, found at index 0.
+ RegNum = 0;
+ }
+ int Reg = matchRegisterByNumber(RegNum, regKindToRegClass(Kind));
+ if (Reg == -1)
+ return MatchOperand_NoMatch;
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(Kind);
+ Operands.push_back(Op);
+ hasConsumedDollar = false;
+ Parser.Lex(); // Eat the register number.
+ if ((RegKind == MipsOperand::Kind_GPR32) &&
+ (getLexer().is(AsmToken::LParen))) {
+ // Check if it is indexed addressing operand.
+ Operands.push_back(MipsOperand::CreateToken("(", getLexer().getLoc()));
+ Parser.Lex(); // Eat the parenthesis.
+ if (parseRegs(Operands, RegKind) != MatchOperand_Success)
+ return MatchOperand_NoMatch;
+ if (getLexer().isNot(AsmToken::RParen))
+ return MatchOperand_NoMatch;
+ Operands.push_back(MipsOperand::CreateToken(")", getLexer().getLoc()));
+ Parser.Lex();
+ }
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
}
-bool MipsAsmParser::searchSymbolAlias(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands, unsigned RegisterKind) {
+bool MipsAsmParser::validateMSAIndex(int Val, int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier());
- if (Sym) {
- SMLoc S = Parser.getTok().getLoc();
- const MCExpr *Expr;
- if (Sym->isVariable())
- Expr = Sym->getVariableValue();
- else
- return false;
- if (Expr->getKind() == MCExpr::SymbolRef) {
- const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
- const StringRef DefSymbol = Ref->getSymbol().getName();
- if (DefSymbol.startswith("$")) {
- // Lookup for the register with the corresponding name.
- int RegNum = matchRegisterName(DefSymbol.substr(1), isMips64());
- if (RegNum > -1) {
- Parser.Lex();
- MipsOperand *op = MipsOperand::CreateReg(RegNum, S,
- Parser.getTok().getLoc());
- op->setRegKind((MipsOperand::RegisterKind) RegisterKind);
- Operands.push_back(op);
- return true;
- }
- }
- } else if (Expr->getKind() == MCExpr::Constant) {
- Parser.Lex();
- const MCConstantExpr *Const = static_cast<const MCConstantExpr*>(Expr);
- MipsOperand *op = MipsOperand::CreateImm(Const, S,
- Parser.getTok().getLoc());
- Operands.push_back(op);
- return true;
- }
+ if (Val < 0)
+ return false;
+
+ switch (Kind) {
+ default:
+ return false;
+ case MipsOperand::Kind_MSA128BRegs:
+ return Val < 16;
+ case MipsOperand::Kind_MSA128HRegs:
+ return Val < 8;
+ case MipsOperand::Kind_MSA128WRegs:
+ return Val < 4;
+ case MipsOperand::Kind_MSA128DRegs:
+ return Val < 2;
}
- return false;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCPURegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parseMSARegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
+ SMLoc S = Parser.getTok().getLoc();
+ std::string RegName;
- if (getLexer().getKind() == AsmToken::Identifier) {
- if (searchSymbolAlias(Operands, MipsOperand::Kind_CPURegs))
- return MatchOperand_Success;
- return MatchOperand_NoMatch;
- }
- // If the first token is not '$' we have an error.
if (Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
- Parser.Lex(); // Eat $
- if (!tryParseRegisterOperand(Operands, false)) {
- // Set the proper register kind.
- MipsOperand* op = static_cast<MipsOperand*>(Operands.back());
- op->setRegKind(MipsOperand::Kind_CPURegs);
+ switch (RegKind) {
+ default:
+ return MatchOperand_ParseFail;
+ case MipsOperand::Kind_MSA128BRegs:
+ case MipsOperand::Kind_MSA128HRegs:
+ case MipsOperand::Kind_MSA128WRegs:
+ case MipsOperand::Kind_MSA128DRegs:
+ break;
+ }
+
+ Parser.Lex(); // Eat the '$'.
+ if (getLexer().getKind() == AsmToken::Identifier)
+ RegName = Parser.getTok().getString().lower();
+ else
+ return MatchOperand_ParseFail;
+
+ int RegNum = matchMSA128RegisterName(RegName);
+
+ if (RegNum < 0 || RegNum > 31)
+ return MatchOperand_ParseFail;
+
+ int RegVal = getReg(regKindToRegClass(Kind), RegNum);
+ if (RegVal == -1)
+ return MatchOperand_ParseFail;
+
+ MipsOperand *Op = MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
+ Op->setRegKind(Kind);
+ Operands.push_back(Op);
+
+ Parser.Lex(); // Eat the register identifier.
+
+ // MSA registers may be suffixed with an index in the form of:
+ // 1) Immediate expression.
+ // 2) General Purpose Register.
+ // Examples:
+ // 1) copy_s.b $29,$w0[0]
+ // 2) sld.b $w0,$w1[$1]
+
+ if (Parser.getTok().isNot(AsmToken::LBrac))
+ return MatchOperand_Success;
+
+ MipsOperand *Mnemonic = static_cast<MipsOperand *>(Operands[0]);
+
+ Operands.push_back(MipsOperand::CreateToken("[", Parser.getTok().getLoc()));
+ Parser.Lex(); // Parse the '[' token.
+
+ if (Parser.getTok().is(AsmToken::Dollar)) {
+ // This must be a GPR.
+ MipsOperand *RegOp;
+ SMLoc VIdx = Parser.getTok().getLoc();
+ Parser.Lex(); // Parse the '$' token.
+
+ // GPR have aliases and we must account for that. Example: $30 == $fp
+ if (getLexer().getKind() == AsmToken::Integer) {
+ unsigned RegNum = Parser.getTok().getIntVal();
+ int Reg = matchRegisterByNumber(
+ RegNum, regKindToRegClass(MipsOperand::Kind_GPR32));
+ if (Reg == -1) {
+ Error(VIdx, "invalid general purpose register");
+ return MatchOperand_ParseFail;
+ }
+
+ RegOp = MipsOperand::CreateReg(Reg, VIdx, Parser.getTok().getLoc());
+ } else if (getLexer().getKind() == AsmToken::Identifier) {
+ int RegNum = -1;
+ std::string RegName = Parser.getTok().getString().lower();
+
+ RegNum = matchCPURegisterName(RegName);
+ if (RegNum == -1) {
+ Error(VIdx, "general purpose register expected");
+ return MatchOperand_ParseFail;
+ }
+ RegNum = getReg(regKindToRegClass(MipsOperand::Kind_GPR32), RegNum);
+ RegOp = MipsOperand::CreateReg(RegNum, VIdx, Parser.getTok().getLoc());
+ } else
+ return MatchOperand_ParseFail;
+
+ RegOp->setRegKind(MipsOperand::Kind_GPR32);
+ Operands.push_back(RegOp);
+ Parser.Lex(); // Eat the register identifier.
+
+ if (Parser.getTok().isNot(AsmToken::RBrac))
+ return MatchOperand_ParseFail;
+
+ Operands.push_back(MipsOperand::CreateToken("]", Parser.getTok().getLoc()));
+ Parser.Lex(); // Parse the ']' token.
+
return MatchOperand_Success;
}
- return MatchOperand_NoMatch;
+
+ // The index must be a constant expression then.
+ SMLoc VIdx = Parser.getTok().getLoc();
+ const MCExpr *ImmVal;
+
+ if (getParser().parseExpression(ImmVal))
+ return MatchOperand_ParseFail;
+
+ const MCConstantExpr *expr = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!expr || !validateMSAIndex((int)expr->getValue(), Kind)) {
+ Error(VIdx, "invalid immediate value");
+ return MatchOperand_ParseFail;
+ }
+
+ SMLoc E = Parser.getTok().getEndLoc();
+
+ if (Parser.getTok().isNot(AsmToken::RBrac))
+ return MatchOperand_ParseFail;
+
+ bool insve =
+ Mnemonic->getToken() == "insve.b" || Mnemonic->getToken() == "insve.h" ||
+ Mnemonic->getToken() == "insve.w" || Mnemonic->getToken() == "insve.d";
+
+ // The second vector index of insve instructions is always 0.
+ if (insve && Operands.size() > 6) {
+ if (expr->getValue() != 0) {
+ Error(VIdx, "immediate value must be 0");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(MipsOperand::CreateToken("0", VIdx));
+ } else
+ Operands.push_back(MipsOperand::CreateImm(expr, VIdx, E));
+
+ Operands.push_back(MipsOperand::CreateToken("]", Parser.getTok().getLoc()));
+
+ Parser.Lex(); // Parse the ']' token.
+
+ return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseHWRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parseMSACtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- if (isMips64())
+ if (Kind != MipsOperand::Kind_MSA128CtrlRegs)
return MatchOperand_NoMatch;
- // If the first token is not '$' we have error.
if (Parser.getTok().isNot(AsmToken::Dollar))
- return MatchOperand_NoMatch;
+ return MatchOperand_ParseFail;
+
SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat the '$'.
- const AsmToken &Tok = Parser.getTok(); // Get the next token.
- if (Tok.isNot(AsmToken::Integer))
- return MatchOperand_NoMatch;
+ Parser.Lex(); // Eat the '$' symbol.
- unsigned RegNum = Tok.getIntVal();
- // At the moment only hwreg29 is supported.
- if (RegNum != 29)
+ int RegNum = -1;
+ if (getLexer().getKind() == AsmToken::Identifier)
+ RegNum = matchMSA128CtrlRegisterName(Parser.getTok().getString().lower());
+ else if (getLexer().getKind() == AsmToken::Integer)
+ RegNum = Parser.getTok().getIntVal();
+ else
return MatchOperand_ParseFail;
- MipsOperand *op = MipsOperand::CreateReg(Mips::HWR29, S,
- Parser.getTok().getLoc());
- op->setRegKind(MipsOperand::Kind_HWRegs);
- Operands.push_back(op);
+ if (RegNum < 0 || RegNum > 7)
+ return MatchOperand_ParseFail;
+
+ int RegVal = getReg(regKindToRegClass(Kind), RegNum);
+ if (RegVal == -1)
+ return MatchOperand_ParseFail;
+
+ MipsOperand *RegOp =
+ MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
+ RegOp->setRegKind(MipsOperand::Kind_MSA128CtrlRegs);
+ Operands.push_back(RegOp);
+ Parser.Lex(); // Eat the register identifier.
- Parser.Lex(); // Eat the register number.
return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseHW64Regs(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parseGPR64(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
if (!isMips64())
return MatchOperand_NoMatch;
+ return parseRegs(Operands, (int)MipsOperand::Kind_GPR64);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseGPR32(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_GPR32);
+}
+
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseAFGR64Regs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+
+ if (isFP64())
+ return MatchOperand_NoMatch;
+ return parseRegs(Operands, (int)MipsOperand::Kind_AFGR64Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ if (!isFP64())
+ return MatchOperand_NoMatch;
+ return parseRegs(Operands, (int)MipsOperand::Kind_FGR64Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_FGR32Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseFGRH32Regs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_FGRH32Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_FCCRegs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseACC64DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_ACC64DSP);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseLO32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
// If the first token is not '$' we have an error.
if (Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
+
SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat $
+ Parser.Lex(); // Eat the '$'
- const AsmToken &Tok = Parser.getTok(); // Get the next token.
- if (Tok.isNot(AsmToken::Integer))
+ const AsmToken &Tok = Parser.getTok(); // Get next token.
+
+ if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- unsigned RegNum = Tok.getIntVal();
- // At the moment only hwreg29 is supported.
- if (RegNum != 29)
- return MatchOperand_ParseFail;
+ if (!Tok.getIdentifier().startswith("ac"))
+ return MatchOperand_NoMatch;
+
+ StringRef NumString = Tok.getIdentifier().substr(2);
+
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
+ return MatchOperand_NoMatch;
- MipsOperand *op = MipsOperand::CreateReg(Mips::HWR29_64, S,
- Parser.getTok().getLoc());
- op->setRegKind(MipsOperand::Kind_HW64Regs);
- Operands.push_back(op);
+ unsigned Reg = matchRegisterByNumber(IntVal, Mips::LO32DSPRegClassID);
+
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(MipsOperand::Kind_LO32DSP);
+ Operands.push_back(Op);
Parser.Lex(); // Eat the register number.
return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- unsigned RegNum;
+MipsAsmParser::parseHI32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
// If the first token is not '$' we have an error.
if (Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
+
SMLoc S = Parser.getTok().getLoc();
Parser.Lex(); // Eat the '$'
const AsmToken &Tok = Parser.getTok(); // Get next token.
- if (Tok.is(AsmToken::Integer)) {
- RegNum = Tok.getIntVal();
- // At the moment only fcc0 is supported.
- if (RegNum != 0)
- return MatchOperand_ParseFail;
- } else if (Tok.is(AsmToken::Identifier)) {
- // At the moment only fcc0 is supported.
- if (Tok.getIdentifier() != "fcc0")
- return MatchOperand_ParseFail;
- } else
+
+ if (Tok.isNot(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
+
+ if (!Tok.getIdentifier().startswith("ac"))
+ return MatchOperand_NoMatch;
+
+ StringRef NumString = Tok.getIdentifier().substr(2);
+
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
return MatchOperand_NoMatch;
- MipsOperand *op = MipsOperand::CreateReg(Mips::FCC0, S,
- Parser.getTok().getLoc());
- op->setRegKind(MipsOperand::Kind_CCRRegs);
- Operands.push_back(op);
+ unsigned Reg = matchRegisterByNumber(IntVal, Mips::HI32DSPRegClassID);
+
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(MipsOperand::Kind_HI32DSP);
+ Operands.push_back(Op);
Parser.Lex(); // Eat the register number.
return MatchOperand_Success;
}
-MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseCOP2(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ // If the first token is not '$' we have an error.
+ if (Parser.getTok().isNot(AsmToken::Dollar))
+ return MatchOperand_NoMatch;
- MCSymbolRefExpr::VariantKind VK
- = StringSwitch<MCSymbolRefExpr::VariantKind>(Symbol)
- .Case("hi", MCSymbolRefExpr::VK_Mips_ABS_HI)
- .Case("lo", MCSymbolRefExpr::VK_Mips_ABS_LO)
- .Case("gp_rel", MCSymbolRefExpr::VK_Mips_GPREL)
- .Case("call16", MCSymbolRefExpr::VK_Mips_GOT_CALL)
- .Case("got", MCSymbolRefExpr::VK_Mips_GOT)
- .Case("tlsgd", MCSymbolRefExpr::VK_Mips_TLSGD)
- .Case("tlsldm", MCSymbolRefExpr::VK_Mips_TLSLDM)
- .Case("dtprel_hi", MCSymbolRefExpr::VK_Mips_DTPREL_HI)
- .Case("dtprel_lo", MCSymbolRefExpr::VK_Mips_DTPREL_LO)
- .Case("gottprel", MCSymbolRefExpr::VK_Mips_GOTTPREL)
- .Case("tprel_hi", MCSymbolRefExpr::VK_Mips_TPREL_HI)
- .Case("tprel_lo", MCSymbolRefExpr::VK_Mips_TPREL_LO)
- .Case("got_disp", MCSymbolRefExpr::VK_Mips_GOT_DISP)
- .Case("got_page", MCSymbolRefExpr::VK_Mips_GOT_PAGE)
- .Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST)
- .Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI)
- .Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO)
- .Default(MCSymbolRefExpr::VK_None);
+ SMLoc S = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat the '$'
- return VK;
+ const AsmToken &Tok = Parser.getTok(); // Get next token.
+
+ if (Tok.isNot(AsmToken::Integer))
+ return MatchOperand_NoMatch;
+
+ unsigned IntVal = Tok.getIntVal();
+
+ unsigned Reg = matchRegisterByNumber(IntVal, Mips::COP2RegClassID);
+
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(MipsOperand::Kind_COP2);
+ Operands.push_back(Op);
+
+ Parser.Lex(); // Eat the register number.
+ return MatchOperand_Success;
}
-static int ConvertCcString(StringRef CondString) {
- int CC = StringSwitch<unsigned>(CondString)
- .Case(".f", 0)
- .Case(".un", 1)
- .Case(".eq", 2)
- .Case(".ueq", 3)
- .Case(".olt", 4)
- .Case(".ult", 5)
- .Case(".ole", 6)
- .Case(".ule", 7)
- .Case(".sf", 8)
- .Case(".ngle", 9)
- .Case(".seq", 10)
- .Case(".ngl", 11)
- .Case(".lt", 12)
- .Case(".nge", 13)
- .Case(".le", 14)
- .Case(".ngt", 15)
- .Default(-1);
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128BRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128BRegs);
+}
- return CC;
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128HRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128HRegs);
}
-bool MipsAsmParser::
-parseMathOperation(StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Split the format.
- size_t Start = Name.find('.'), Next = Name.rfind('.');
- StringRef Format1 = Name.slice(Start, Next);
- // Add the first format to the operands.
- Operands.push_back(MipsOperand::CreateToken(Format1, NameLoc));
- // Now for the second format.
- StringRef Format2 = Name.slice(Next, StringRef::npos);
- Operands.push_back(MipsOperand::CreateToken(Format2, NameLoc));
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128WRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128WRegs);
+}
- // Set the format for the first register.
- setFpFormat(Format1);
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128DRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128DRegs);
+}
- // Read the remaining operands.
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- // Read the first operand.
- if (ParseOperand(Operands, Name)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
- }
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128CtrlRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSACtrlRegs(Operands, (int)MipsOperand::Kind_MSA128CtrlRegs);
+}
- if (getLexer().isNot(AsmToken::Comma)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
+bool MipsAsmParser::searchSymbolAlias(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands, unsigned RegKind) {
+
+ MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier());
+ if (Sym) {
+ SMLoc S = Parser.getTok().getLoc();
+ const MCExpr *Expr;
+ if (Sym->isVariable())
+ Expr = Sym->getVariableValue();
+ else
+ return false;
+ if (Expr->getKind() == MCExpr::SymbolRef) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
+ const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
+ const StringRef DefSymbol = Ref->getSymbol().getName();
+ if (DefSymbol.startswith("$")) {
+ int RegNum = -1;
+ APInt IntVal(32, -1);
+ if (!DefSymbol.substr(1).getAsInteger(10, IntVal))
+ RegNum = matchRegisterByNumber(IntVal.getZExtValue(),
+ isMips64() ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID);
+ else {
+ // Lookup for the register with the corresponding name.
+ switch (Kind) {
+ case MipsOperand::Kind_AFGR64Regs:
+ case MipsOperand::Kind_FGR64Regs:
+ RegNum = matchFPURegisterName(DefSymbol.substr(1));
+ break;
+ case MipsOperand::Kind_FGR32Regs:
+ RegNum = matchFPURegisterName(DefSymbol.substr(1));
+ break;
+ case MipsOperand::Kind_GPR64:
+ case MipsOperand::Kind_GPR32:
+ default:
+ RegNum = matchCPURegisterName(DefSymbol.substr(1));
+ break;
+ }
+ if (RegNum > -1)
+ RegNum = getReg(regKindToRegClass(Kind), RegNum);
+ }
+ if (RegNum > -1) {
+ Parser.Lex();
+ MipsOperand *op =
+ MipsOperand::CreateReg(RegNum, S, Parser.getTok().getLoc());
+ op->setRegKind(Kind);
+ Operands.push_back(op);
+ return true;
+ }
+ }
+ } else if (Expr->getKind() == MCExpr::Constant) {
+ Parser.Lex();
+ const MCConstantExpr *Const = static_cast<const MCConstantExpr *>(Expr);
+ MipsOperand *op =
+ MipsOperand::CreateImm(Const, S, Parser.getTok().getLoc());
+ Operands.push_back(op);
+ return true;
}
- Parser.Lex(); // Eat the comma.
+ }
+ return false;
+}
- // Set the format for the first register
- setFpFormat(Format2);
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseHWRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_HWRegs);
+}
- // Parse and remember the operand.
- if (ParseOperand(Operands, Name)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
- }
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_CCRRegs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseInvNum(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ const MCExpr *IdVal;
+ // If the first token is '$' we may have register operand.
+ if (Parser.getTok().is(AsmToken::Dollar))
+ return MatchOperand_NoMatch;
+ SMLoc S = Parser.getTok().getLoc();
+ if (getParser().parseExpression(IdVal))
+ return MatchOperand_ParseFail;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(IdVal);
+ assert(MCE && "Unexpected MCExpr type.");
+ int64_t Val = MCE->getValue();
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(MipsOperand::CreateImm(
+ MCConstantExpr::Create(0 - Val, getContext()), S, E));
+ return MatchOperand_Success;
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseLSAImm(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ switch (getLexer().getKind()) {
+ default:
+ return MatchOperand_NoMatch;
+ case AsmToken::LParen:
+ case AsmToken::Plus:
+ case AsmToken::Minus:
+ case AsmToken::Integer:
+ break;
}
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
+ const MCExpr *Expr;
+ SMLoc S = Parser.getTok().getLoc();
+
+ if (getParser().parseExpression(Expr))
+ return MatchOperand_ParseFail;
+
+ int64_t Val;
+ if (!Expr->EvaluateAsAbsolute(Val)) {
+ Error(S, "expected immediate value");
+ return MatchOperand_ParseFail;
}
- Parser.Lex(); // Consume the EndOfStatement.
- return false;
+ // The LSA instruction allows a 2-bit unsigned immediate. For this reason
+ // and because the CPU always adds one to the immediate field, the allowed
+ // range becomes 1..4. We'll only check the range here and will deal
+ // with the addition/subtraction when actually decoding/encoding
+ // the instruction.
+ if (Val < 1 || Val > 4) {
+ Error(S, "immediate not in range (1..4)");
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(MipsOperand::CreateLSAImm(Expr, S,
+ Parser.getTok().getLoc()));
+ return MatchOperand_Success;
}
-bool MipsAsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- StringRef Mnemonic;
- // Floating point instructions: Should the register be treated as a double?
- if (requestsDoubleOperand(Name)) {
- setFpFormat(FP_FORMAT_D);
- Operands.push_back(MipsOperand::CreateToken(Name, NameLoc));
- Mnemonic = Name;
- } else {
- setDefaultFpFormat();
- // Create the leading tokens for the mnemonic, split by '.' characters.
- size_t Start = 0, Next = Name.find('.');
- Mnemonic = Name.slice(Start, Next);
-
- Operands.push_back(MipsOperand::CreateToken(Mnemonic, NameLoc));
-
- if (Next != StringRef::npos) {
- // There is a format token in mnemonic.
- size_t Dot = Name.find('.', Next + 1);
- StringRef Format = Name.slice(Next, Dot);
- if (Dot == StringRef::npos) // Only one '.' in a string, it's a format.
- Operands.push_back(MipsOperand::CreateToken(Format, NameLoc));
- else {
- if (Name.startswith("c.")) {
- // Floating point compare, add '.' and immediate represent for cc.
- Operands.push_back(MipsOperand::CreateToken(".", NameLoc));
- int Cc = ConvertCcString(Format);
- if (Cc == -1) {
- return Error(NameLoc, "Invalid conditional code");
- }
- SMLoc E = SMLoc::getFromPointer(
- Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(
- MipsOperand::CreateImm(MCConstantExpr::Create(Cc, getContext()),
- NameLoc, E));
- } else {
- // trunc, ceil, floor ...
- return parseMathOperation(Name, NameLoc, Operands);
- }
+MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
- // The rest is a format.
- Format = Name.slice(Dot, StringRef::npos);
- Operands.push_back(MipsOperand::CreateToken(Format, NameLoc));
- }
+ MCSymbolRefExpr::VariantKind VK =
+ StringSwitch<MCSymbolRefExpr::VariantKind>(Symbol)
+ .Case("hi", MCSymbolRefExpr::VK_Mips_ABS_HI)
+ .Case("lo", MCSymbolRefExpr::VK_Mips_ABS_LO)
+ .Case("gp_rel", MCSymbolRefExpr::VK_Mips_GPREL)
+ .Case("call16", MCSymbolRefExpr::VK_Mips_GOT_CALL)
+ .Case("got", MCSymbolRefExpr::VK_Mips_GOT)
+ .Case("tlsgd", MCSymbolRefExpr::VK_Mips_TLSGD)
+ .Case("tlsldm", MCSymbolRefExpr::VK_Mips_TLSLDM)
+ .Case("dtprel_hi", MCSymbolRefExpr::VK_Mips_DTPREL_HI)
+ .Case("dtprel_lo", MCSymbolRefExpr::VK_Mips_DTPREL_LO)
+ .Case("gottprel", MCSymbolRefExpr::VK_Mips_GOTTPREL)
+ .Case("tprel_hi", MCSymbolRefExpr::VK_Mips_TPREL_HI)
+ .Case("tprel_lo", MCSymbolRefExpr::VK_Mips_TPREL_LO)
+ .Case("got_disp", MCSymbolRefExpr::VK_Mips_GOT_DISP)
+ .Case("got_page", MCSymbolRefExpr::VK_Mips_GOT_PAGE)
+ .Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST)
+ .Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI)
+ .Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO)
+ .Default(MCSymbolRefExpr::VK_None);
- setFpFormat(Format);
- }
+ return VK;
+}
+
+bool MipsAsmParser::ParseInstruction(
+ ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ // Check if we have valid mnemonic
+ if (!mnemonicIsValid(Name, 0)) {
+ Parser.eatToEndOfStatement();
+ return Error(NameLoc, "Unknown instruction");
}
+ // First operand in MCInst is instruction mnemonic.
+ Operands.push_back(MipsOperand::CreateToken(Name, NameLoc));
// Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
- if (ParseOperand(Operands, Mnemonic)) {
+ if (ParseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
Parser.eatToEndOfStatement();
return Error(Loc, "unexpected token in argument list");
@@ -1588,7 +2158,6 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
while (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
-
// Parse and remember the operand.
if (ParseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
@@ -1597,13 +2166,11 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
}
}
}
-
if (getLexer().isNot(AsmToken::EndOfStatement)) {
SMLoc Loc = getLexer().getLoc();
Parser.eatToEndOfStatement();
return Error(Loc, "unexpected token in argument list");
}
-
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
@@ -1741,8 +2308,23 @@ bool MipsAsmParser::parseSetAssignment() {
return reportParseError("unexpected token in .set directive");
Lex(); // Eat comma
- if (Parser.parseExpression(Value))
- reportParseError("expected valid expression after comma");
+ if (getLexer().is(AsmToken::Dollar)) {
+ MCSymbol *Symbol;
+ SMLoc DollarLoc = getLexer().getLoc();
+ // Consume the dollar sign, and check for a following identifier.
+ Parser.Lex();
+ // We have a '$' followed by something, make sure they are adjacent.
+ if (DollarLoc.getPointer() + 1 != getTok().getLoc().getPointer())
+ return true;
+ StringRef Res =
+ StringRef(DollarLoc.getPointer(),
+ getTok().getEndLoc().getPointer() - DollarLoc.getPointer());
+ Symbol = getContext().GetOrCreateSymbol(Res);
+ Parser.Lex();
+ Value =
+ MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, getContext());
+ } else if (Parser.parseExpression(Value))
+ return reportParseError("expected valid expression after comma");
// Check if the Name already exists as a symbol.
MCSymbol *Sym = getContext().LookupSymbol(Name);
@@ -1788,6 +2370,34 @@ bool MipsAsmParser::parseDirectiveSet() {
return true;
}
+bool MipsAsmParser::parseDirectiveMipsHackStocg() {
+ MCAsmParser &Parser = getParser();
+ StringRef Name;
+ if (Parser.parseIdentifier(Name))
+ reportParseError("expected identifier");
+
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token");
+ Lex();
+
+ int64_t Flags = 0;
+ if (Parser.parseAbsoluteExpression(Flags))
+ return TokError("unexpected token");
+
+ getTargetStreamer().emitMipsHackSTOCG(Sym, Flags);
+ return false;
+}
+
+bool MipsAsmParser::parseDirectiveMipsHackELFFlags() {
+ int64_t Flags = 0;
+ if (Parser.parseAbsoluteExpression(Flags))
+ return TokError("unexpected token");
+
+ getTargetStreamer().emitMipsHackELFFlags(Flags);
+ return false;
+}
+
/// parseDirectiveWord
/// ::= .word [ expression (, expression)* ]
bool MipsAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
@@ -1813,6 +2423,22 @@ bool MipsAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
return false;
}
+/// parseDirectiveGpWord
+/// ::= .gpword local_sym
+bool MipsAsmParser::parseDirectiveGpWord() {
+ const MCExpr *Value;
+ // EmitGPRel32Value requires an expression, so we are using base class
+ // method to evaluate the expression.
+ if (getParser().parseExpression(Value))
+ return true;
+ getParser().getStreamer().EmitGPRel32Value(Value);
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return Error(getLexer().getLoc(), "unexpected token in directive");
+ Parser.Lex(); // Eat EndOfStatement token.
+ return false;
+}
+
bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getString();
@@ -1853,7 +2479,7 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
if (IDVal == ".gpword") {
// Ignore this directive for now.
- Parser.eatToEndOfStatement();
+ parseDirectiveGpWord();
return false;
}
@@ -1862,6 +2488,12 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
return false;
}
+ if (IDVal == ".mips_hack_stocg")
+ return parseDirectiveMipsHackStocg();
+
+ if (IDVal == ".mips_hack_elf_flags")
+ return parseDirectiveMipsHackELFFlags();
+
return true;
}
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index 78a9f70c6681..6acc9a88c06e 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -15,6 +15,7 @@ add_public_tablegen_target(MipsCommonTableGen)
add_llvm_target(MipsCodeGen
Mips16FrameLowering.cpp
+ Mips16HardFloat.cpp
Mips16InstrInfo.cpp
Mips16ISelDAGToDAG.cpp
Mips16ISelLowering.cpp
@@ -46,10 +47,11 @@ add_llvm_target(MipsCodeGen
MipsSelectionDAGInfo.cpp
)
-add_dependencies(LLVMMipsCodeGen intrinsics_gen)
+add_dependencies(LLVMMipsCodeGen MipsCommonTableGen intrinsics_gen)
add_subdirectory(InstPrinter)
add_subdirectory(Disassembler)
add_subdirectory(TargetInfo)
add_subdirectory(MCTargetDesc)
add_subdirectory(AsmParser)
+
diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index 0dba33a2767a..60508a8c4fcb 100644
--- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -35,26 +35,33 @@ public:
///
MipsDisassemblerBase(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
bool bigEndian) :
- MCDisassembler(STI), RegInfo(Info), isBigEndian(bigEndian) {}
+ MCDisassembler(STI), RegInfo(Info),
+ IsN64(STI.getFeatureBits() & Mips::FeatureN64), isBigEndian(bigEndian) {}
virtual ~MipsDisassemblerBase() {}
- const MCRegisterInfo *getRegInfo() const { return RegInfo; }
+ const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
+
+ bool isN64() const { return IsN64; }
private:
- const MCRegisterInfo *RegInfo;
+ OwningPtr<const MCRegisterInfo> RegInfo;
+ bool IsN64;
protected:
bool isBigEndian;
};
/// MipsDisassembler - a disasembler class for Mips32.
class MipsDisassembler : public MipsDisassemblerBase {
+ bool IsMicroMips;
public:
/// Constructor - Initializes the disassembler.
///
MipsDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
bool bigEndian) :
- MipsDisassemblerBase(STI, Info, bigEndian) {}
+ MipsDisassemblerBase(STI, Info, bigEndian) {
+ IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
+ }
/// getInstruction - See MCDisassembler.
virtual DecodeStatus getInstruction(MCInst &instr,
@@ -88,25 +95,30 @@ public:
// Forward declare these because the autogenerated code will reference them.
// Definitions are further down.
-static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeCPU16RegsRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
-static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodePtrRegisterClass(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeDSPRRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst,
unsigned RegNo,
@@ -118,11 +130,21 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFGRH32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -133,47 +155,88 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst,
- unsigned Insn,
+static DecodeStatus DecodeACC64DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeHI32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
-static DecodeStatus DecodeHIRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeLO32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
-static DecodeStatus DecodeLORegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeMSA128BRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSA128HRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSA128WRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSA128DRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSACtrlRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeBranchTarget(MCInst &Inst,
unsigned Offset,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeBC1(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder);
-
-
static DecodeStatus DecodeJumpTarget(MCInst &Inst,
unsigned Insn,
uint64_t Address,
const void *Decoder);
+// DecodeBranchTargetMM - Decode microMIPS branch offset, which is
+// shifted left by 1 bit.
+static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
+// DecodeJumpTargetMM - Decode microMIPS jump target, which is
+// shifted left by 1 bit.
+static DecodeStatus DecodeJumpTargetMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeMem(MCInst &Inst,
unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMemMMImm16(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
@@ -183,10 +246,12 @@ static DecodeStatus DecodeSimm16(MCInst &Inst,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeCondCode(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder);
+// Decode the immediate field of an LSA instruction which
+// is off by one.
+static DecodeStatus DecodeLSAImm(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeInsSize(MCInst &Inst,
unsigned Insn,
@@ -248,11 +313,12 @@ static DecodeStatus readInstruction32(const MemoryObject &region,
uint64_t address,
uint64_t &size,
uint32_t &insn,
- bool isBigEndian) {
+ bool isBigEndian,
+ bool IsMicroMips) {
uint8_t Bytes[4];
// We want to read exactly 4 Bytes of data.
- if (region.readBytes(address, 4, (uint8_t*)Bytes, NULL) == -1) {
+ if (region.readBytes(address, 4, Bytes) == -1) {
size = 0;
return MCDisassembler::Fail;
}
@@ -266,10 +332,20 @@ static DecodeStatus readInstruction32(const MemoryObject &region,
}
else {
// Encoded as a small-endian 32-bit word in the stream.
- insn = (Bytes[0] << 0) |
- (Bytes[1] << 8) |
- (Bytes[2] << 16) |
- (Bytes[3] << 24);
+ // Little-endian byte ordering:
+ // mips32r2: 4 | 3 | 2 | 1
+ // microMIPS: 2 | 1 | 4 | 3
+ if (IsMicroMips) {
+ insn = (Bytes[2] << 0) |
+ (Bytes[3] << 8) |
+ (Bytes[0] << 16) |
+ (Bytes[1] << 24);
+ } else {
+ insn = (Bytes[0] << 0) |
+ (Bytes[1] << 8) |
+ (Bytes[2] << 16) |
+ (Bytes[3] << 24);
+ }
}
return MCDisassembler::Success;
@@ -285,10 +361,21 @@ MipsDisassembler::getInstruction(MCInst &instr,
uint32_t Insn;
DecodeStatus Result = readInstruction32(Region, Address, Size,
- Insn, isBigEndian);
+ Insn, isBigEndian, IsMicroMips);
if (Result == MCDisassembler::Fail)
return MCDisassembler::Fail;
+ if (IsMicroMips) {
+ // Calling the auto-generated decoder function.
+ Result = decodeInstruction(DecoderTableMicroMips32, instr, Insn, Address,
+ this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ return MCDisassembler::Fail;
+ }
+
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableMips32, instr, Insn, Address,
this, STI);
@@ -310,7 +397,7 @@ Mips64Disassembler::getInstruction(MCInst &instr,
uint32_t Insn;
DecodeStatus Result = readInstruction32(Region, Address, Size,
- Insn, isBigEndian);
+ Insn, isBigEndian, false);
if (Result == MCDisassembler::Fail)
return MCDisassembler::Fail;
@@ -346,35 +433,45 @@ static DecodeStatus DecodeCPU16RegsRegisterClass(MCInst &Inst,
}
-static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::CPU64RegsRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::GPR64RegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::CPURegsRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- return DecodeCPURegsRegisterClass(Inst, RegNo, Address, Decoder);
+static DecodeStatus DecodePtrRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (static_cast<const MipsDisassembler *>(Decoder)->isN64())
+ return DecodeGPR64RegisterClass(Inst, RegNo, Address, Decoder);
+
+ return DecodeGPR32RegisterClass(Inst, RegNo, Address, Decoder);
+}
+
+static DecodeStatus DecodeDSPRRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeGPR32RegisterClass(Inst, RegNo, Address, Decoder);
}
static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst,
@@ -401,11 +498,37 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeFGRH32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::FGRH32RegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
- Inst.addOperand(MCOperand::CreateReg(RegNo));
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = getReg(Decoder, Mips::CCRRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 7)
+ return MCDisassembler::Fail;
+ unsigned Reg = getReg(Decoder, Mips::FCCRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -417,8 +540,8 @@ static DecodeStatus DecodeMem(MCInst &Inst,
unsigned Reg = fieldFromInstruction(Insn, 16, 5);
unsigned Base = fieldFromInstruction(Insn, 21, 5);
- Reg = getReg(Decoder, Mips::CPURegsRegClassID, Reg);
- Base = getReg(Decoder, Mips::CPURegsRegClassID, Base);
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
if(Inst.getOpcode() == Mips::SC){
Inst.addOperand(MCOperand::CreateReg(Reg));
@@ -431,6 +554,58 @@ static DecodeStatus DecodeMem(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ int Offset = SignExtend32<10>(fieldFromInstruction(Insn, 16, 10));
+ unsigned Reg = fieldFromInstruction(Insn, 6, 5);
+ unsigned Base = fieldFromInstruction(Insn, 11, 5);
+
+ Reg = getReg(Decoder, Mips::MSA128BRegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<12>(Insn & 0x0fff);
+ unsigned Reg = fieldFromInstruction(Insn, 21, 5);
+ unsigned Base = fieldFromInstruction(Insn, 16, 5);
+
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMemMMImm16(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<16>(Insn & 0xffff);
+ unsigned Reg = fieldFromInstruction(Insn, 21, 5);
+ unsigned Base = fieldFromInstruction(Insn, 16, 5);
+
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeFMem(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -440,7 +615,7 @@ static DecodeStatus DecodeFMem(MCInst &Inst,
unsigned Base = fieldFromInstruction(Insn, 21, 5);
Reg = getReg(Decoder, Mips::FGR64RegClassID, Reg);
- Base = getReg(Decoder, Mips::CPURegsRegClassID, Base);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
Inst.addOperand(MCOperand::CreateReg(Reg));
Inst.addOperand(MCOperand::CreateReg(Base));
@@ -461,15 +636,6 @@ static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeCondCode(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- int CondCode = Insn & 0xf;
- Inst.addOperand(MCOperand::CreateImm(CondCode));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -483,49 +649,98 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst,
+static DecodeStatus DecodeACC64DSPRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
- //Currently only hardware register 29 is supported
- if (RegNo != 29)
- return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(Mips::HWR29_64));
+ if (RegNo >= 4)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::ACC64DSPRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeHI32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo >= 4)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::ACRegsDSPRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::HI32DSPRegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeHIRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeLO32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo >= 4)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::HIRegsDSPRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::LO32DSPRegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeLORegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- if (RegNo >= 4)
+static DecodeStatus DecodeMSA128BRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128BRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSA128HRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128HRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSA128WRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128WRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSA128DRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128DRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSACtrlRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 7)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::LORegsDSPRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::MSACtrlRegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -540,16 +755,6 @@ static DecodeStatus DecodeBranchTarget(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeBC1(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- unsigned BranchOffset = Insn & 0xffff;
- BranchOffset = SignExtend32<18>(BranchOffset << 2) + 4;
- Inst.addOperand(MCOperand::CreateImm(BranchOffset));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeJumpTarget(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -560,6 +765,24 @@ static DecodeStatus DecodeJumpTarget(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned BranchOffset = Offset & 0xffff;
+ BranchOffset = SignExtend32<18>(BranchOffset << 1);
+ Inst.addOperand(MCOperand::CreateImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeJumpTargetMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned JumpOffset = fieldFromInstruction(Insn, 0, 26) << 1;
+ Inst.addOperand(MCOperand::CreateImm(JumpOffset));
+ return MCDisassembler::Success;
+}
static DecodeStatus DecodeSimm16(MCInst &Inst,
unsigned Insn,
@@ -569,6 +792,15 @@ static DecodeStatus DecodeSimm16(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeLSAImm(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // We add one to the immediate field as it was encoded as 'imm - 1'.
+ Inst.addOperand(MCOperand::CreateImm(Insn + 1));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeInsSize(MCInst &Inst,
unsigned Insn,
uint64_t Address,
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index fc23cd380352..78845898997c 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -26,6 +26,12 @@ using namespace llvm;
#define PRINT_ALIAS_INSTR
#include "MipsGenAsmWriter.inc"
+template<unsigned R>
+static bool isReg(const MCInst &MI, unsigned OpNo) {
+ assert(MI.getOperand(OpNo).isReg() && "Register operand expected.");
+ return MI.getOperand(OpNo).getReg() == R;
+}
+
const char* Mips::MipsFCCToString(Mips::CondCode CC) {
switch (CC) {
case FCOND_F:
@@ -80,7 +86,7 @@ void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
}
// Try to print any aliases first.
- if (!printAliasInstr(MI, O))
+ if (!printAliasInstr(MI, O) && !printAlias(*MI, O))
printInstruction(MI, O);
printAnnotation(O, Annot);
@@ -152,11 +158,6 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
OS << ')';
}
-void MipsInstPrinter::printCPURegs(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- printRegName(O, MI->getOperand(OpNo).getReg());
-}
-
void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
@@ -183,6 +184,15 @@ void MipsInstPrinter::printUnsignedImm(const MCInst *MI, int opNum,
printOperand(MI, opNum, O);
}
+void MipsInstPrinter::printUnsignedImm8(const MCInst *MI, int opNum,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(opNum);
+ if (MO.isImm())
+ O << (unsigned short int)(unsigned char)MO.getImm();
+ else
+ printOperand(MI, opNum, O);
+}
+
void MipsInstPrinter::
printMemOperand(const MCInst *MI, int opNum, raw_ostream &O) {
// Load/Store memory operands -- imm($reg)
@@ -209,3 +219,70 @@ printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O) {
const MCOperand& MO = MI->getOperand(opNum);
O << MipsFCCToString((Mips::CondCode)MO.getImm());
}
+
+void MipsInstPrinter::
+printSHFMask(const MCInst *MI, int opNum, raw_ostream &O) {
+ llvm_unreachable("TODO");
+}
+
+bool MipsInstPrinter::printAlias(const char *Str, const MCInst &MI,
+ unsigned OpNo, raw_ostream &OS) {
+ OS << "\t" << Str << "\t";
+ printOperand(&MI, OpNo, OS);
+ return true;
+}
+
+bool MipsInstPrinter::printAlias(const char *Str, const MCInst &MI,
+ unsigned OpNo0, unsigned OpNo1,
+ raw_ostream &OS) {
+ printAlias(Str, MI, OpNo0, OS);
+ OS << ", ";
+ printOperand(&MI, OpNo1, OS);
+ return true;
+}
+
+bool MipsInstPrinter::printAlias(const MCInst &MI, raw_ostream &OS) {
+ switch (MI.getOpcode()) {
+ case Mips::BEQ:
+ // beq $zero, $zero, $L2 => b $L2
+ // beq $r0, $zero, $L2 => beqz $r0, $L2
+ return (isReg<Mips::ZERO>(MI, 0) && isReg<Mips::ZERO>(MI, 1) &&
+ printAlias("b", MI, 2, OS)) ||
+ (isReg<Mips::ZERO>(MI, 1) && printAlias("beqz", MI, 0, 2, OS));
+ case Mips::BEQ64:
+ // beq $r0, $zero, $L2 => beqz $r0, $L2
+ return isReg<Mips::ZERO_64>(MI, 1) && printAlias("beqz", MI, 0, 2, OS);
+ case Mips::BNE:
+ // bne $r0, $zero, $L2 => bnez $r0, $L2
+ return isReg<Mips::ZERO>(MI, 1) && printAlias("bnez", MI, 0, 2, OS);
+ case Mips::BNE64:
+ // bne $r0, $zero, $L2 => bnez $r0, $L2
+ return isReg<Mips::ZERO_64>(MI, 1) && printAlias("bnez", MI, 0, 2, OS);
+ case Mips::BGEZAL:
+ // bgezal $zero, $L1 => bal $L1
+ return isReg<Mips::ZERO>(MI, 0) && printAlias("bal", MI, 1, OS);
+ case Mips::BC1T:
+ // bc1t $fcc0, $L1 => bc1t $L1
+ return isReg<Mips::FCC0>(MI, 0) && printAlias("bc1t", MI, 1, OS);
+ case Mips::BC1F:
+ // bc1f $fcc0, $L1 => bc1f $L1
+ return isReg<Mips::FCC0>(MI, 0) && printAlias("bc1f", MI, 1, OS);
+ case Mips::JALR:
+ // jalr $ra, $r1 => jalr $r1
+ return isReg<Mips::RA>(MI, 0) && printAlias("jalr", MI, 1, OS);
+ case Mips::JALR64:
+ // jalr $ra, $r1 => jalr $r1
+ return isReg<Mips::RA_64>(MI, 0) && printAlias("jalr", MI, 1, OS);
+ case Mips::NOR:
+ case Mips::NOR_MM:
+ // nor $r0, $r1, $zero => not $r0, $r1
+ return isReg<Mips::ZERO>(MI, 2) && printAlias("not", MI, 0, 1, OS);
+ case Mips::NOR64:
+ // nor $r0, $r1, $zero => not $r0, $r1
+ return isReg<Mips::ZERO_64>(MI, 2) && printAlias("not", MI, 0, 1, OS);
+ case Mips::OR:
+ // or $r0, $r1, $zero => move $r0, $r1
+ return isReg<Mips::ZERO>(MI, 2) && printAlias("move", MI, 0, 1, OS);
+ default: return false;
+ }
+}
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
index d1b561f9764e..f75ae249c3ee 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
@@ -87,16 +87,23 @@ public:
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
- void printCPURegs(const MCInst *MI, unsigned OpNo, raw_ostream &O);
bool printAliasInstr(const MCInst *MI, raw_ostream &OS);
private:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printUnsignedImm(const MCInst *MI, int opNum, raw_ostream &O);
+ void printUnsignedImm8(const MCInst *MI, int opNum, raw_ostream &O);
void printMemOperand(const MCInst *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MCInst *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O);
+ void printSHFMask(const MCInst *MI, int opNum, raw_ostream &O);
+
+ bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo,
+ raw_ostream &OS);
+ bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo0,
+ unsigned OpNo1, raw_ostream &OS);
+ bool printAlias(const MCInst &MI, raw_ostream &OS);
};
} // end namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
index 4212c94a5578..911674890c73 100644
--- a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
@@ -1,12 +1,11 @@
add_llvm_library(LLVMMipsDesc
MipsAsmBackend.cpp
- MipsDirectObjLower.cpp
MipsMCAsmInfo.cpp
MipsMCCodeEmitter.cpp
MipsMCTargetDesc.cpp
MipsELFObjectWriter.cpp
MipsReginfo.cpp
- MipsELFStreamer.cpp
+ MipsTargetStreamer.cpp
)
add_dependencies(LLVMMipsDesc MipsCommonTableGen)
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index 0b13607a572d..3e70b23dccc6 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -45,6 +45,10 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Mips::fixup_Mips_GOT_DISP:
case Mips::fixup_Mips_GOT_LO16:
case Mips::fixup_Mips_CALL_LO16:
+ case Mips::fixup_MICROMIPS_LO16:
+ case Mips::fixup_MICROMIPS_GOT_PAGE:
+ case Mips::fixup_MICROMIPS_GOT_OFST:
+ case Mips::fixup_MICROMIPS_GOT_DISP:
break;
case Mips::fixup_Mips_PC16:
// So far we are only using this type for branches.
@@ -65,6 +69,7 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Mips::fixup_Mips_GOT_Local:
case Mips::fixup_Mips_GOT_HI16:
case Mips::fixup_Mips_CALL_HI16:
+ case Mips::fixup_MICROMIPS_HI16:
// Get the 2nd 16-bits. Also add 1 if bit 15 is 1.
Value = ((Value + 0x8000) >> 16) & 0xffff;
break;
@@ -76,6 +81,13 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
// Get the 4th 16-bits.
Value = ((Value + 0x800080008000LL) >> 48) & 0xffff;
break;
+ case Mips::fixup_MICROMIPS_26_S1:
+ Value >>= 1;
+ break;
+ case Mips::fixup_MICROMIPS_PC16_S1:
+ Value -= 4;
+ Value >>= 1;
+ break;
}
return Value;
@@ -188,7 +200,20 @@ public:
{ "fixup_Mips_GOT_HI16", 0, 16, 0 },
{ "fixup_Mips_GOT_LO16", 0, 16, 0 },
{ "fixup_Mips_CALL_HI16", 0, 16, 0 },
- { "fixup_Mips_CALL_LO16", 0, 16, 0 }
+ { "fixup_Mips_CALL_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_26_S1", 0, 26, 0 },
+ { "fixup_MICROMIPS_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT16", 0, 16, 0 },
+ { "fixup_MICROMIPS_PC16_S1", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_CALL16", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_DISP", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_PAGE", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_OFST", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_LO16", 0, 16, 0 }
};
if (Kind < FirstTargetFixupKind)
@@ -253,25 +278,33 @@ public:
} // namespace
// MCAsmBackend
-MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/true, /*Is64Bit*/false);
}
-MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/false, /*Is64Bit*/false);
}
-MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/true, /*Is64Bit*/true);
}
-MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/false, /*Is64Bit*/true);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp
deleted file mode 100644
index 15c4282030db..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//===-- MipsDirectObjLower.cpp - Mips LLVM direct object lowering -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower Mips MCInst records that are normally
-// left to the assembler to lower such as large shifts.
-//
-//===----------------------------------------------------------------------===//
-#include "MipsInstrInfo.h"
-#include "MCTargetDesc/MipsDirectObjLower.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCStreamer.h"
-
-using namespace llvm;
-
-// If the D<shift> instruction has a shift amount that is greater
-// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
-void Mips::LowerLargeShift(MCInst& Inst) {
-
- assert(Inst.getNumOperands() == 3 && "Invalid no. of operands for shift!");
- assert(Inst.getOperand(2).isImm());
-
- int64_t Shift = Inst.getOperand(2).getImm();
- if (Shift <= 31)
- return; // Do nothing
- Shift -= 32;
-
- // saminus32
- Inst.getOperand(2).setImm(Shift);
-
- switch (Inst.getOpcode()) {
- default:
- // Calling function is not synchronized
- llvm_unreachable("Unexpected shift instruction");
- case Mips::DSLL:
- Inst.setOpcode(Mips::DSLL32);
- return;
- case Mips::DSRL:
- Inst.setOpcode(Mips::DSRL32);
- return;
- case Mips::DSRA:
- Inst.setOpcode(Mips::DSRA32);
- return;
- }
-}
-
-// Pick a DEXT or DINS instruction variant based on the pos and size operands
-void Mips::LowerDextDins(MCInst& InstIn) {
- int Opcode = InstIn.getOpcode();
-
- if (Opcode == Mips::DEXT)
- assert(InstIn.getNumOperands() == 4 &&
- "Invalid no. of machine operands for DEXT!");
- else // Only DEXT and DINS are possible
- assert(InstIn.getNumOperands() == 5 &&
- "Invalid no. of machine operands for DINS!");
-
- assert(InstIn.getOperand(2).isImm());
- int64_t pos = InstIn.getOperand(2).getImm();
- assert(InstIn.getOperand(3).isImm());
- int64_t size = InstIn.getOperand(3).getImm();
-
- if (size <= 32) {
- if (pos < 32) // DEXT/DINS, do nothing
- return;
- // DEXTU/DINSU
- InstIn.getOperand(2).setImm(pos - 32);
- InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU);
- return;
- }
- // DEXTM/DINSM
- assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32");
- InstIn.getOperand(3).setImm(size - 32);
- InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM);
- return;
-}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h
deleted file mode 100644
index 8813cc9ac7a4..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h
+++ /dev/null
@@ -1,28 +0,0 @@
-//===-- MipsDirectObjLower.h - Mips LLVM direct object lowering *- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MIPSDIRECTOBJLOWER_H
-#define MIPSDIRECTOBJLOWER_H
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
- class MCInst;
- class MCStreamer;
-
- namespace Mips {
- /// MipsDirectObjLower - This name space is used to lower MCInstr in cases
- // where the assembler usually finishes the lowering
- // such as large shifts.
- void LowerLargeShift(MCInst &Inst);
- void LowerDextDins(MCInst &Inst);
- }
-}
-
-#endif
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 6471b51583ce..83c7d4bcc3c6 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -183,6 +183,45 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
case Mips::fixup_Mips_CALL_LO16:
Type = ELF::R_MIPS_CALL_LO16;
break;
+ case Mips::fixup_MICROMIPS_26_S1:
+ Type = ELF::R_MICROMIPS_26_S1;
+ break;
+ case Mips::fixup_MICROMIPS_HI16:
+ Type = ELF::R_MICROMIPS_HI16;
+ break;
+ case Mips::fixup_MICROMIPS_LO16:
+ Type = ELF::R_MICROMIPS_LO16;
+ break;
+ case Mips::fixup_MICROMIPS_GOT16:
+ Type = ELF::R_MICROMIPS_GOT16;
+ break;
+ case Mips::fixup_MICROMIPS_PC16_S1:
+ Type = ELF::R_MICROMIPS_PC16_S1;
+ break;
+ case Mips::fixup_MICROMIPS_CALL16:
+ Type = ELF::R_MICROMIPS_CALL16;
+ break;
+ case Mips::fixup_MICROMIPS_GOT_DISP:
+ Type = ELF::R_MICROMIPS_GOT_DISP;
+ break;
+ case Mips::fixup_MICROMIPS_GOT_PAGE:
+ Type = ELF::R_MICROMIPS_GOT_PAGE;
+ break;
+ case Mips::fixup_MICROMIPS_GOT_OFST:
+ Type = ELF::R_MICROMIPS_GOT_OFST;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_DTPREL_HI16:
+ Type = ELF::R_MICROMIPS_TLS_DTPREL_HI16;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_DTPREL_LO16:
+ Type = ELF::R_MICROMIPS_TLS_DTPREL_LO16;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_TPREL_HI16:
+ Type = ELF::R_MICROMIPS_TLS_TPREL_HI16;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_TPREL_LO16:
+ Type = ELF::R_MICROMIPS_TLS_TPREL_LO16;
+ break;
}
return Type;
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp
deleted file mode 100644
index c33bc9ae3034..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-//===-- MipsELFStreamer.cpp - MipsELFStreamer ---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===-------------------------------------------------------------------===//
-#include "MCTargetDesc/MipsELFStreamer.h"
-#include "MipsSubtarget.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCELF.h"
-#include "llvm/MC/MCELFSymbolFlags.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/ErrorHandling.h"
-
-namespace llvm {
-
- MCELFStreamer* createMipsELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack) {
- MipsELFStreamer *S = new MipsELFStreamer(Context, TAB, OS, Emitter,
- RelaxAll, NoExecStack);
- return S;
- }
-
- // For llc. Set a group of ELF header flags
- void
- MipsELFStreamer::emitELFHeaderFlagsCG(const MipsSubtarget &Subtarget) {
-
- if (hasRawTextSupport())
- return;
-
- // Update e_header flags
- MCAssembler& MCA = getAssembler();
- unsigned EFlags = MCA.getELFHeaderEFlags();
-
- if (Subtarget.inMips16Mode())
- EFlags |= ELF::EF_MIPS_ARCH_ASE_M16;
- else
- EFlags |= ELF::EF_MIPS_NOREORDER;
-
- // Architecture
- if (Subtarget.hasMips64r2())
- EFlags |= ELF::EF_MIPS_ARCH_64R2;
- else if (Subtarget.hasMips64())
- EFlags |= ELF::EF_MIPS_ARCH_64;
- else if (Subtarget.hasMips32r2())
- EFlags |= ELF::EF_MIPS_ARCH_32R2;
- else
- EFlags |= ELF::EF_MIPS_ARCH_32;
-
- if (Subtarget.inMicroMipsMode())
- EFlags |= ELF::EF_MIPS_MICROMIPS;
-
- // ABI
- if (Subtarget.isABI_O32())
- EFlags |= ELF::EF_MIPS_ABI_O32;
-
- // Relocation Model
- Reloc::Model RM = Subtarget.getRelocationModel();
- if (RM == Reloc::PIC_ || RM == Reloc::Default)
- EFlags |= ELF::EF_MIPS_PIC;
- else if (RM == Reloc::Static)
- ; // Do nothing for Reloc::Static
- else
- llvm_unreachable("Unsupported relocation model for e_flags");
-
- MCA.setELFHeaderEFlags(EFlags);
- }
-
- // For llc. Set a symbol's STO flags
- void
- MipsELFStreamer::emitMipsSTOCG(const MipsSubtarget &Subtarget,
- MCSymbol *Sym,
- unsigned Val) {
-
- if (hasRawTextSupport())
- return;
-
- MCSymbolData &Data = getOrCreateSymbolData(Sym);
- // The "other" values are stored in the last 6 bits of the second byte
- // The traditional defines for STO values assume the full byte and thus
- // the shift to pack it.
- MCELF::setOther(Data, Val >> 2);
- }
-
-} // namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
deleted file mode 100644
index b10ccc78e665..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//=== MipsELFStreamer.h - MipsELFStreamer ------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENCE.TXT for details.
-//
-//===-------------------------------------------------------------------===//
-#ifndef MIPSELFSTREAMER_H_
-#define MIPSELFSTREAMER_H_
-
-#include "llvm/MC/MCELFStreamer.h"
-
-namespace llvm {
-class MipsAsmPrinter;
-class MipsSubtarget;
-class MCSymbol;
-
-class MipsELFStreamer : public MCELFStreamer {
-public:
- MipsELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack)
- : MCELFStreamer(SK_MipsELFStreamer, Context, TAB, OS, Emitter) {
- }
-
- ~MipsELFStreamer() {}
- void emitELFHeaderFlagsCG(const MipsSubtarget &Subtarget);
- void emitMipsSTOCG(const MipsSubtarget &Subtarget,
- MCSymbol *Sym,
- unsigned Val);
-
- static bool classof(const MCStreamer *S) {
- return S->getKind() == SK_MipsELFStreamer;
- }
-};
-
- MCELFStreamer* createMipsELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack);
-}
-
-#endif /* MIPSELFSTREAMER_H_ */
diff --git a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index f96390043a3b..6ed44b74cc4b 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -128,6 +128,45 @@ namespace Mips {
// resulting in - R_MIPS_CALL_LO16
fixup_Mips_CALL_LO16,
+ // resulting in - R_MICROMIPS_26_S1
+ fixup_MICROMIPS_26_S1,
+
+ // resulting in - R_MICROMIPS_HI16
+ fixup_MICROMIPS_HI16,
+
+ // resulting in - R_MICROMIPS_LO16
+ fixup_MICROMIPS_LO16,
+
+ // resulting in - R_MICROMIPS_GOT16
+ fixup_MICROMIPS_GOT16,
+
+ // resulting in - R_MICROMIPS_PC16_S1
+ fixup_MICROMIPS_PC16_S1,
+
+ // resulting in - R_MICROMIPS_CALL16
+ fixup_MICROMIPS_CALL16,
+
+ // resulting in - R_MICROMIPS_GOT_DISP
+ fixup_MICROMIPS_GOT_DISP,
+
+ // resulting in - R_MICROMIPS_GOT_PAGE
+ fixup_MICROMIPS_GOT_PAGE,
+
+ // resulting in - R_MICROMIPS_GOT_OFST
+ fixup_MICROMIPS_GOT_OFST,
+
+ // resulting in - R_MICROMIPS_TLS_DTPREL_HI16
+ fixup_MICROMIPS_TLS_DTPREL_HI16,
+
+ // resulting in - R_MICROMIPS_TLS_DTPREL_LO16
+ fixup_MICROMIPS_TLS_DTPREL_LO16,
+
+ // resulting in - R_MICROMIPS_TLS_TPREL_HI16
+ fixup_MICROMIPS_TLS_TPREL_HI16,
+
+ // resulting in - R_MICROMIPS_TLS_TPREL_LO16
+ fixup_MICROMIPS_TLS_TPREL_LO16,
+
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index 5d4b32d30578..6aa3c762d9db 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -18,7 +18,7 @@ using namespace llvm;
void MipsMCAsmInfo::anchor() { }
-MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
+MipsMCAsmInfo::MipsMCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
if ((TheTriple.getArch() == Triple::mips) ||
(TheTriple.getArch() == Triple::mips64))
@@ -38,7 +38,6 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
ZeroDirective = "\t.space\t";
GPRel32Directive = "\t.gpword\t";
GPRel64Directive = "\t.gpdword\t";
- WeakRefDirective = "\t.weak\t";
DebugLabelSuffix = "=.";
SupportsDebugInformation = true;
ExceptionsType = ExceptionHandling::DwarfCFI;
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
index e1d878936f31..1000113351b4 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
@@ -14,16 +14,15 @@
#ifndef MIPSTARGETASMINFO_H
#define MIPSTARGETASMINFO_H
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class StringRef;
- class Target;
- class MipsMCAsmInfo : public MCAsmInfo {
+ class MipsMCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
- explicit MipsMCAsmInfo(const Target &T, StringRef TT);
+ explicit MipsMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index 9460731c1914..66428bdfa747 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -13,7 +13,6 @@
//
#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/MipsBaseInfo.h"
-#include "MCTargetDesc/MipsDirectObjLower.h"
#include "MCTargetDesc/MipsFixupKinds.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/ADT/APFloat.h"
@@ -40,11 +39,14 @@ class MipsMCCodeEmitter : public MCCodeEmitter {
MCContext &Ctx;
const MCSubtargetInfo &STI;
bool IsLittleEndian;
+ bool IsMicroMips;
public:
MipsMCCodeEmitter(const MCInstrInfo &mcii, MCContext &Ctx_,
const MCSubtargetInfo &sti, bool IsLittle) :
- MCII(mcii), Ctx(Ctx_), STI (sti), IsLittleEndian(IsLittle) {}
+ MCII(mcii), Ctx(Ctx_), STI (sti), IsLittleEndian(IsLittle) {
+ IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
+ }
~MipsMCCodeEmitter() {}
@@ -54,9 +56,17 @@ public:
void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const {
// Output the instruction encoding in little endian byte order.
- for (unsigned i = 0; i < Size; ++i) {
- unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
- EmitByte((Val >> Shift) & 0xff, OS);
+ // Little-endian byte ordering:
+ // mips32r2: 4 | 3 | 2 | 1
+ // microMIPS: 2 | 1 | 4 | 3
+ if (IsLittleEndian && Size == 4 && IsMicroMips) {
+ EmitInstruction(Val>>16, 2, OS);
+ EmitInstruction(Val, 2, OS);
+ } else {
+ for (unsigned i = 0; i < Size; ++i) {
+ unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
+ EmitByte((Val >> Shift) & 0xff, OS);
+ }
}
}
@@ -74,12 +84,24 @@ public:
unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ // getBranchJumpOpValueMM - Return binary encoding of the microMIPS jump
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
// getBranchTargetOpValue - Return binary encoding of the branch
// target operand. If the machine operand requires relocation,
// record the relocation and return zero.
unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ // getBranchTargetOpValue - Return binary encoding of the microMIPS branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
// getMachineOpValue - Return binary encoding of operand. If the machin
// operand requires relocation, record the relocation and return zero.
unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
@@ -87,11 +109,17 @@ public:
unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ // getLSAImmEncoding - Return binary encoding of LSA immediate.
+ unsigned getLSAImmEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
unsigned
getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const;
@@ -114,8 +142,74 @@ MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
return new MipsMCCodeEmitter(MCII, Ctx, STI, true);
}
+
+// If the D<shift> instruction has a shift amount that is greater
+// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
+static void LowerLargeShift(MCInst& Inst) {
+
+ assert(Inst.getNumOperands() == 3 && "Invalid no. of operands for shift!");
+ assert(Inst.getOperand(2).isImm());
+
+ int64_t Shift = Inst.getOperand(2).getImm();
+ if (Shift <= 31)
+ return; // Do nothing
+ Shift -= 32;
+
+ // saminus32
+ Inst.getOperand(2).setImm(Shift);
+
+ switch (Inst.getOpcode()) {
+ default:
+ // Calling function is not synchronized
+ llvm_unreachable("Unexpected shift instruction");
+ case Mips::DSLL:
+ Inst.setOpcode(Mips::DSLL32);
+ return;
+ case Mips::DSRL:
+ Inst.setOpcode(Mips::DSRL32);
+ return;
+ case Mips::DSRA:
+ Inst.setOpcode(Mips::DSRA32);
+ return;
+ case Mips::DROTR:
+ Inst.setOpcode(Mips::DROTR32);
+ return;
+ }
+}
+
+// Pick a DEXT or DINS instruction variant based on the pos and size operands
+static void LowerDextDins(MCInst& InstIn) {
+ int Opcode = InstIn.getOpcode();
+
+ if (Opcode == Mips::DEXT)
+ assert(InstIn.getNumOperands() == 4 &&
+ "Invalid no. of machine operands for DEXT!");
+ else // Only DEXT and DINS are possible
+ assert(InstIn.getNumOperands() == 5 &&
+ "Invalid no. of machine operands for DINS!");
+
+ assert(InstIn.getOperand(2).isImm());
+ int64_t pos = InstIn.getOperand(2).getImm();
+ assert(InstIn.getOperand(3).isImm());
+ int64_t size = InstIn.getOperand(3).getImm();
+
+ if (size <= 32) {
+ if (pos < 32) // DEXT/DINS, do nothing
+ return;
+ // DEXTU/DINSU
+ InstIn.getOperand(2).setImm(pos - 32);
+ InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU);
+ return;
+ }
+ // DEXTM/DINSM
+ assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32");
+ InstIn.getOperand(3).setImm(size - 32);
+ InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM);
+ return;
+}
+
/// EncodeInstruction - Emit the instruction.
-/// Size the instruction (currently only 4 bytes
+/// Size the instruction with Desc.getSize().
void MipsMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const
@@ -131,14 +225,16 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case Mips::DSLL:
case Mips::DSRL:
case Mips::DSRA:
- Mips::LowerLargeShift(TmpInst);
+ case Mips::DROTR:
+ LowerLargeShift(TmpInst);
break;
// Double extract instruction is chosen by pos and size operands
case Mips::DEXT:
case Mips::DINS:
- Mips::LowerDextDins(TmpInst);
+ LowerDextDins(TmpInst);
}
+ unsigned long N = Fixups.size();
uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups);
// Check for unimplemented opcodes.
@@ -151,6 +247,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
if (STI.getFeatureBits() & Mips::FeatureMicroMips) {
int NewOpcode = Mips::Std2MicroMips (Opcode, Mips::Arch_micromips);
if (NewOpcode != -1) {
+ if (Fixups.size() > N)
+ Fixups.pop_back();
Opcode = NewOpcode;
TmpInst.setOpcode (NewOpcode);
Binary = getBinaryCodeForInstr(TmpInst, Fixups);
@@ -188,6 +286,28 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
return 0;
}
+/// getBranchTargetOpValue - Return binary encoding of the microMIPS branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getBranchTargetOpValueMM expects only expressions or immediates");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::
+ fixup_MICROMIPS_PC16_S1)));
+ return 0;
+}
+
/// getJumpTargetOpValue - Return binary encoding of the jump
/// target operand. If the machine operand requires relocation,
/// record the relocation and return zero.
@@ -209,6 +329,23 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
}
unsigned MipsMCCodeEmitter::
+getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getJumpTargetOpValueMM expects only expressions or an immediate");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MICROMIPS_26_S1)));
+ return 0;
+}
+
+unsigned MipsMCCodeEmitter::
getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
int64_t Res;
@@ -238,31 +375,39 @@ getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
FixupKind = Mips::fixup_Mips_GPOFF_LO;
break;
case MCSymbolRefExpr::VK_Mips_GOT_PAGE :
- FixupKind = Mips::fixup_Mips_GOT_PAGE;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_PAGE
+ : Mips::fixup_Mips_GOT_PAGE;
break;
case MCSymbolRefExpr::VK_Mips_GOT_OFST :
- FixupKind = Mips::fixup_Mips_GOT_OFST;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_OFST
+ : Mips::fixup_Mips_GOT_OFST;
break;
case MCSymbolRefExpr::VK_Mips_GOT_DISP :
- FixupKind = Mips::fixup_Mips_GOT_DISP;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_DISP
+ : Mips::fixup_Mips_GOT_DISP;
break;
case MCSymbolRefExpr::VK_Mips_GPREL:
FixupKind = Mips::fixup_Mips_GPREL16;
break;
case MCSymbolRefExpr::VK_Mips_GOT_CALL:
- FixupKind = Mips::fixup_Mips_CALL16;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_CALL16
+ : Mips::fixup_Mips_CALL16;
break;
case MCSymbolRefExpr::VK_Mips_GOT16:
- FixupKind = Mips::fixup_Mips_GOT_Global;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT16
+ : Mips::fixup_Mips_GOT_Global;
break;
case MCSymbolRefExpr::VK_Mips_GOT:
- FixupKind = Mips::fixup_Mips_GOT_Local;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT16
+ : Mips::fixup_Mips_GOT_Local;
break;
case MCSymbolRefExpr::VK_Mips_ABS_HI:
- FixupKind = Mips::fixup_Mips_HI16;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_HI16
+ : Mips::fixup_Mips_HI16;
break;
case MCSymbolRefExpr::VK_Mips_ABS_LO:
- FixupKind = Mips::fixup_Mips_LO16;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_LO16
+ : Mips::fixup_Mips_LO16;
break;
case MCSymbolRefExpr::VK_Mips_TLSGD:
FixupKind = Mips::fixup_Mips_TLSGD;
@@ -271,19 +416,23 @@ getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
FixupKind = Mips::fixup_Mips_TLSLDM;
break;
case MCSymbolRefExpr::VK_Mips_DTPREL_HI:
- FixupKind = Mips::fixup_Mips_DTPREL_HI;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_DTPREL_HI16
+ : Mips::fixup_Mips_DTPREL_HI;
break;
case MCSymbolRefExpr::VK_Mips_DTPREL_LO:
- FixupKind = Mips::fixup_Mips_DTPREL_LO;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_DTPREL_LO16
+ : Mips::fixup_Mips_DTPREL_LO;
break;
case MCSymbolRefExpr::VK_Mips_GOTTPREL:
FixupKind = Mips::fixup_Mips_GOTTPREL;
break;
case MCSymbolRefExpr::VK_Mips_TPREL_HI:
- FixupKind = Mips::fixup_Mips_TPREL_HI;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_TPREL_HI16
+ : Mips::fixup_Mips_TPREL_HI;
break;
case MCSymbolRefExpr::VK_Mips_TPREL_LO:
- FixupKind = Mips::fixup_Mips_TPREL_LO;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_TPREL_LO16
+ : Mips::fixup_Mips_TPREL_LO;
break;
case MCSymbolRefExpr::VK_Mips_HIGHER:
FixupKind = Mips::fixup_Mips_HIGHER;
@@ -318,7 +467,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
- unsigned RegNo = Ctx.getRegisterInfo().getEncodingValue(Reg);
+ unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Reg);
return RegNo;
} else if (MO.isImm()) {
return static_cast<unsigned>(MO.getImm());
@@ -344,6 +493,17 @@ MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
return (OffBits & 0xFFFF) | RegBits;
}
+unsigned MipsMCCodeEmitter::
+getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Base register is encoded in bits 20-16, offset is encoded in bits 11-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups);
+
+ return (OffBits & 0x0FFF) | RegBits;
+}
+
unsigned
MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const {
@@ -365,5 +525,13 @@ MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
return Position + Size - 1;
}
+unsigned
+MipsMCCodeEmitter::getLSAImmEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getOperand(OpNo).isImm());
+ // The immediate is encoded as 'immediate - 1'.
+ return getMachineOpValue(MI, MI.getOperand(OpNo), Fixups) - 1;
+}
+
#include "MipsGenMCCodeEmitter.inc"
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index be83b54b6124..5548aaa9a6d8 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -11,17 +11,21 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/MipsELFStreamer.h"
#include "MipsMCTargetDesc.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MipsMCAsmInfo.h"
+#include "MipsTargetStreamer.h"
#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -93,12 +97,12 @@ static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createMipsMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new MipsMCAsmInfo(T, TT);
+static MCAsmInfo *createMipsMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
+ MCAsmInfo *MAI = new MipsMCAsmInfo(TT);
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(Mips::SP, 0);
- MAI->addInitialFrameState(0, Dst, Src);
+ unsigned SP = MRI.getDwarfRegNum(Mips::SP, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, SP, 0);
+ MAI->addInitialFrameState(Inst);
return MAI;
}
@@ -125,14 +129,23 @@ static MCInstPrinter *createMipsMCInstPrinter(const Target &T,
}
static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
- MCContext &Ctx, MCAsmBackend &MAB,
- raw_ostream &_OS,
- MCCodeEmitter *_Emitter,
- bool RelaxAll,
- bool NoExecStack) {
- Triple TheTriple(TT);
-
- return createMipsELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
+ MCContext &Context, MCAsmBackend &MAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ bool RelaxAll, bool NoExecStack) {
+ MipsTargetELFStreamer *S = new MipsTargetELFStreamer();
+ return createELFStreamer(Context, S, MAB, OS, Emitter, RelaxAll, NoExecStack);
+}
+
+static MCStreamer *
+createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useLoc, bool useCFI,
+ bool useDwarfDirectory, MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE, MCAsmBackend *TAB, bool ShowInst) {
+ MipsTargetAsmStreamer *S = new MipsTargetAsmStreamer(OS);
+
+ return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
+ useDwarfDirectory, InstPrint, CE, TAB,
+ ShowInst);
}
extern "C" void LLVMInitializeMipsTargetMC() {
@@ -183,6 +196,12 @@ extern "C" void LLVMInitializeMipsTargetMC() {
TargetRegistry::RegisterMCObjectStreamer(TheMips64elTarget,
createMCStreamer);
+ // Register the asm streamer.
+ TargetRegistry::RegisterAsmStreamer(TheMipsTarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheMipselTarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheMips64Target, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheMips64elTarget, createMCAsmStreamer);
+
// Register the asm backend.
TargetRegistry::RegisterMCAsmBackend(TheMipsTarget,
createMipsAsmBackendEB32);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index 71954a4bd862..eabebfe1349e 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -42,14 +42,14 @@ MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createMipsAsmBackendEB32(const Target &T, StringRef TT,
- StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEL32(const Target &T, StringRef TT,
- StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEB64(const Target &T, StringRef TT,
- StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEL64(const Target &T, StringRef TT,
- StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEB32(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEL32(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEB64(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEL64(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS,
uint8_t OSABI,
diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
new file mode 100644
index 000000000000..5e90bbc635a5
--- /dev/null
+++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -0,0 +1,67 @@
+//===-- MipsTargetStreamer.cpp - Mips Target Streamer Methods -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Mips specific target streamer methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsTargetStreamer.h"
+#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+
+using namespace llvm;
+
+static cl::opt<bool> PrintHackDirectives("print-hack-directives",
+ cl::init(false), cl::Hidden);
+
+// pin vtable to this file
+void MipsTargetStreamer::anchor() {}
+
+MipsTargetAsmStreamer::MipsTargetAsmStreamer(formatted_raw_ostream &OS)
+ : OS(OS) {}
+
+void MipsTargetAsmStreamer::emitMipsHackELFFlags(unsigned Flags) {
+ if (!PrintHackDirectives)
+ return;
+
+ OS << "\t.mips_hack_elf_flags 0x";
+ OS.write_hex(Flags);
+ OS << '\n';
+}
+void MipsTargetAsmStreamer::emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) {
+ if (!PrintHackDirectives)
+ return;
+
+ OS << "\t.mips_hack_stocg ";
+ OS << Sym->getName();
+ OS << ", ";
+ OS << Val;
+ OS << '\n';
+}
+
+MCELFStreamer &MipsTargetELFStreamer::getStreamer() {
+ return static_cast<MCELFStreamer &>(*Streamer);
+}
+
+void MipsTargetELFStreamer::emitMipsHackELFFlags(unsigned Flags) {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+// Set a symbol's STO flags
+void MipsTargetELFStreamer::emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) {
+ MCSymbolData &Data = getStreamer().getOrCreateSymbolData(Sym);
+ // The "other" values are stored in the last 6 bits of the second byte
+ // The traditional defines for STO values assume the full byte and thus
+ // the shift to pack it.
+ MCELF::setOther(Data, Val >> 2);
+}
diff --git a/lib/Target/Mips/MSA.txt b/lib/Target/Mips/MSA.txt
new file mode 100644
index 000000000000..d1c41932fcb5
--- /dev/null
+++ b/lib/Target/Mips/MSA.txt
@@ -0,0 +1,78 @@
+Code Generation Notes for MSA
+=============================
+
+Intrinsics are lowered to SelectionDAG nodes where possible in order to enable
+optimisation, reduce the size of the ISel matcher, and reduce repetition in
+the implementation. In a small number of cases, this can cause different
+(semantically equivalent) instructions to be used in place of the requested
+instruction, even when no optimisation has taken place.
+
+Instructions
+============
+
+This section describes any quirks of instruction selection for MSA. For
+example, two instructions might be equally valid for some given IR and one is
+chosen in preference to the other.
+
+bclri.b:
+ It is not possible to emit bclri.b since andi.b covers exactly the
+ same cases. andi.b should use fractionally less power than bclri.b in
+ most hardware implementations so it is used in preference to bclri.b.
+
+vshf.w:
+ It is not possible to emit vshf.w when the shuffle description is
+ constant since shf.w covers exactly the same cases. shf.w is used
+ instead. It is also impossible for the shuffle description to be
+ unknown at compile-time due to the definition of shufflevector in
+ LLVM IR.
+
+vshf.[bhwd]
+ When the shuffle description describes a splat operation, splat.[bhwd]
+ instructions will be selected instead of vshf.[bhwd]. Unlike the ilv*,
+ and pck* instructions, this is matched from MipsISD::VSHF instead of
+ a special-case MipsISD node.
+
+ilvl.d, pckev.d:
+ It is not possible to emit ilvl.d, or pckev.d since ilvev.d covers the
+ same shuffle. ilvev.d will be emitted instead.
+
+ilvr.d, ilvod.d, pckod.d:
+ It is not possible to emit ilvr.d, or pckod.d since ilvod.d covers the
+ same shuffle. ilvod.d will be emitted instead.
+
+splat.[bhwd]
+ The intrinsic will work as expected. However, unlike other intrinsics
+ it lowers directly to MipsISD::VSHF instead of using common IR.
+
+splati.w:
+ It is not possible to emit splati.w since shf.w covers the same cases.
+ shf.w will be emitted instead.
+
+copy_s.w:
+ On MIPS32, the copy_u.d intrinsic will emit this instruction instead of
+ copy_u.w. This is semantically equivalent since the general-purpose
+ register file is 32-bits wide.
+
+binsri.[bhwd], binsli.[bhwd]:
+ These two operations are equivalent to each other with the operands
+ swapped and condition inverted. The compiler may use either one as
+ appropriate.
+ Furthermore, the compiler may use bsel.[bhwd] for some masks that do
+ not survive the legalization process (this is a bug and will be fixed).
+
+bmnz.v, bmz.v, bsel.v:
+ These three operations differ only in the operand that is tied to the
+ result.
+ It is (currently) not possible to emit bmz.v, or bsel.v since bmnz.v is
+ the same operation and will be emitted instead.
+ In future, the compiler may choose between these three instructions
+ according to register allocation.
+
+bmnzi.b, bmzi.b:
+ Like their non-immediate counterparts, bmnzi.v and bmzi.v are the same
+ operation with the operands swapped. bmnzi.v will (currently) be emitted
+ for both cases.
+
+bseli.v:
+ Unlike the non-immediate versions, bseli.v is distinguishable from
+ bmnzi.b and bmzi.b and can be emitted.
diff --git a/lib/Target/Mips/MicroMipsInstrFormats.td b/lib/Target/Mips/MicroMipsInstrFormats.td
index 665b4d2d8b41..c12a32e3d803 100644
--- a/lib/Target/Mips/MicroMipsInstrFormats.td
+++ b/lib/Target/Mips/MicroMipsInstrFormats.td
@@ -39,8 +39,8 @@ class SLTI_FM_MM<bits<6> op> : MMArch {
bits<32> Inst;
let Inst{31-26} = op;
- let Inst{25-21} = rs;
- let Inst{20-16} = rt;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
let Inst{15-0} = imm16;
}
@@ -110,3 +110,195 @@ class LW_FM_MM<bits<6> op> : MMArch {
let Inst{20-16} = addr{20-16};
let Inst{15-0} = addr{15-0};
}
+
+class LWL_FM_MM<bits<4> funct> {
+ bits<5> rt;
+ bits<21> addr;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x18;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = addr{20-16};
+ let Inst{15-12} = funct;
+ let Inst{11-0} = addr{11-0};
+}
+
+class CMov_F_I_FM_MM<bits<7> func> : MMArch {
+ bits<5> rd;
+ bits<5> rs;
+ bits<3> fcc;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rs;
+ let Inst{15-13} = fcc;
+ let Inst{12-6} = func;
+ let Inst{5-0} = 0x3b;
+}
+
+class MTLO_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = 0x00;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class MFLO_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = 0x00;
+ let Inst{20-16} = rd;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class CLO_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rd;
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class SEB_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rd;
+ bits<5> rt;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rt;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class EXT_FM_MM<bits<6> funct> : MMArch {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> pos;
+ bits<5> size;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = size;
+ let Inst{10-6} = pos;
+ let Inst{5-0} = funct;
+}
+
+class J_FM_MM<bits<6> op> : MMArch {
+ bits<26> target;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-0} = target;
+}
+
+class JR_FM_MM<bits<8> funct> : MMArch {
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-21} = 0x00;
+ let Inst{20-16} = rs;
+ let Inst{15-14} = 0x0;
+ let Inst{13-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class JALR_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rs;
+ bits<5> rd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class BEQ_FM_MM<bits<6> op> : MMArch {
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class BGEZ_FM_MM<bits<5> funct> : MMArch {
+ bits<5> rs;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = funct;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class BGEZAL_FM_MM<bits<5> funct> : MMArch {
+ bits<5> rs;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = funct;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class TEQ_FM_MM<bits<6> funct> : MMArch {
+ bits<5> rs;
+ bits<5> rt;
+ bits<4> code_;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-12} = code_;
+ let Inst{11-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class TEQI_FM_MM<bits<5> funct> : MMArch {
+ bits<5> rs;
+ bits<16> imm16;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = funct;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = imm16;
+}
diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td
index 74cdccd3ed57..d9507fa88ebc 100644
--- a/lib/Target/Mips/MicroMipsInstrInfo.td
+++ b/lib/Target/Mips/MicroMipsInstrInfo.td
@@ -1,67 +1,219 @@
-let isCodeGenOnly = 1 in {
+def addrimm12 : ComplexPattern<iPTR, 2, "selectIntAddrMM", [frameindex]>;
+
+def simm12 : Operand<i32> {
+ let DecoderMethod = "DecodeSimm12";
+}
+
+def mem_mm_12 : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops GPR32, simm12);
+ let EncoderMethod = "getMemEncodingMMImm12";
+ let ParserMatchClass = MipsMemAsmOperand;
+ let OperandType = "OPERAND_MEMORY";
+}
+
+def jmptarget_mm : Operand<OtherVT> {
+ let EncoderMethod = "getJumpTargetOpValueMM";
+}
+
+def calltarget_mm : Operand<iPTR> {
+ let EncoderMethod = "getJumpTargetOpValueMM";
+}
+
+def brtarget_mm : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValueMM";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTargetMM";
+}
+
+let canFoldAsLoad = 1 in
+class LoadLeftRightMM<string opstr, SDNode OpNode, RegisterOperand RO,
+ Operand MemOpnd> :
+ InstSE<(outs RO:$rt), (ins MemOpnd:$addr, RO:$src),
+ !strconcat(opstr, "\t$rt, $addr"),
+ [(set RO:$rt, (OpNode addrimm12:$addr, RO:$src))],
+ NoItinerary, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm12";
+ string Constraints = "$src = $rt";
+}
+
+class StoreLeftRightMM<string opstr, SDNode OpNode, RegisterOperand RO,
+ Operand MemOpnd>:
+ InstSE<(outs), (ins RO:$rt, MemOpnd:$addr),
+ !strconcat(opstr, "\t$rt, $addr"),
+ [(OpNode RO:$rt, addrimm12:$addr)], NoItinerary, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm12";
+}
+
+let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
/// Arithmetic Instructions (ALU Immediate)
- def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd>,
+ def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd>,
ADDI_FM_MM<0xc>;
- def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>,
+ def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>,
ADDI_FM_MM<0x4>;
- def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>,
+ def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>,
SLTI_FM_MM<0x24>;
- def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>,
+ def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>,
SLTI_FM_MM<0x2c>;
- def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>,
+ def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd>,
ADDI_FM_MM<0x34>;
- def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>,
+ def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd>,
ADDI_FM_MM<0x14>;
- def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>,
+ def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd>,
ADDI_FM_MM<0x1c>;
- def LUi_MM : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM_MM;
+ def LUi_MM : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM_MM;
/// Arithmetic Instructions (3-Operand, R-Type)
- def ADDu_MM : MMRel, ArithLogicR<"addu", CPURegsOpnd>, ADD_FM_MM<0, 0x150>;
- def SUBu_MM : MMRel, ArithLogicR<"subu", CPURegsOpnd>, ADD_FM_MM<0, 0x1d0>;
- def MUL_MM : MMRel, ArithLogicR<"mul", CPURegsOpnd>, ADD_FM_MM<0, 0x210>;
- def ADD_MM : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM_MM<0, 0x110>;
- def SUB_MM : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM_MM<0, 0x190>;
- def SLT_MM : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM_MM<0, 0x350>;
- def SLTu_MM : MMRel, SetCC_R<"sltu", setult, CPURegs>,
+ def ADDu_MM : MMRel, ArithLogicR<"addu", GPR32Opnd>, ADD_FM_MM<0, 0x150>;
+ def SUBu_MM : MMRel, ArithLogicR<"subu", GPR32Opnd>, ADD_FM_MM<0, 0x1d0>;
+ def MUL_MM : MMRel, ArithLogicR<"mul", GPR32Opnd>, ADD_FM_MM<0, 0x210>;
+ def ADD_MM : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM_MM<0, 0x110>;
+ def SUB_MM : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM_MM<0, 0x190>;
+ def SLT_MM : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM_MM<0, 0x350>;
+ def SLTu_MM : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>,
ADD_FM_MM<0, 0x390>;
- def AND_MM : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>,
+ def AND_MM : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IIAlu, and>,
ADD_FM_MM<0, 0x250>;
- def OR_MM : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>,
+ def OR_MM : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IIAlu, or>,
ADD_FM_MM<0, 0x290>;
- def XOR_MM : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>,
+ def XOR_MM : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IIAlu, xor>,
ADD_FM_MM<0, 0x310>;
- def NOR_MM : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM_MM<0, 0x2d0>;
- def MULT_MM : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>,
+ def NOR_MM : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM_MM<0, 0x2d0>;
+ def MULT_MM : MMRel, Mult<"mult", IIImul, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x22c>;
- def MULTu_MM : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>,
+ def MULTu_MM : MMRel, Mult<"multu", IIImul, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x26c>;
+ def SDIV_MM : MMRel, Div<"div", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM_MM<0x2ac>;
+ def UDIV_MM : MMRel, Div<"divu", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM_MM<0x2ec>;
/// Shift Instructions
- def SLL_MM : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd>,
+ def SLL_MM : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd>,
SRA_FM_MM<0, 0>;
- def SRL_MM : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd>,
+ def SRL_MM : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd>,
SRA_FM_MM<0x40, 0>;
- def SRA_MM : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd>,
+ def SRA_MM : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd>,
SRA_FM_MM<0x80, 0>;
- def SLLV_MM : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd>,
+ def SLLV_MM : MMRel, shift_rotate_reg<"sllv", GPR32Opnd>,
SRLV_FM_MM<0x10, 0>;
- def SRLV_MM : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd>,
+ def SRLV_MM : MMRel, shift_rotate_reg<"srlv", GPR32Opnd>,
SRLV_FM_MM<0x50, 0>;
- def SRAV_MM : MMRel, shift_rotate_reg<"srav", CPURegsOpnd>,
+ def SRAV_MM : MMRel, shift_rotate_reg<"srav", GPR32Opnd>,
SRLV_FM_MM<0x90, 0>;
- def ROTR_MM : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd>,
+ def ROTR_MM : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd>,
SRA_FM_MM<0xc0, 0>;
- def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd>,
+ def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd>,
SRLV_FM_MM<0xd0, 0>;
/// Load and Store Instructions - aligned
- defm LB_MM : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM_MM<0x7>;
- defm LBu_MM : LoadM<"lbu", CPURegs, zextloadi8>, MMRel, LW_FM_MM<0x5>;
- defm LH_MM : LoadM<"lh", CPURegs, sextloadi16>, MMRel, LW_FM_MM<0xf>;
- defm LHu_MM : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM_MM<0xd>;
- defm LW_MM : LoadM<"lw", CPURegs>, MMRel, LW_FM_MM<0x3f>;
- defm SB_MM : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM_MM<0x6>;
- defm SH_MM : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM_MM<0xe>;
- defm SW_MM : StoreM<"sw", CPURegs>, MMRel, LW_FM_MM<0x3e>;
+ let DecoderMethod = "DecodeMemMMImm16" in {
+ def LB_MM : Load<"lb", GPR32Opnd>, MMRel, LW_FM_MM<0x7>;
+ def LBu_MM : Load<"lbu", GPR32Opnd>, MMRel, LW_FM_MM<0x5>;
+ def LH_MM : Load<"lh", GPR32Opnd>, MMRel, LW_FM_MM<0xf>;
+ def LHu_MM : Load<"lhu", GPR32Opnd>, MMRel, LW_FM_MM<0xd>;
+ def LW_MM : Load<"lw", GPR32Opnd>, MMRel, LW_FM_MM<0x3f>;
+ def SB_MM : Store<"sb", GPR32Opnd>, MMRel, LW_FM_MM<0x6>;
+ def SH_MM : Store<"sh", GPR32Opnd>, MMRel, LW_FM_MM<0xe>;
+ def SW_MM : Store<"sw", GPR32Opnd>, MMRel, LW_FM_MM<0x3e>;
+ }
+
+ /// Load and Store Instructions - unaligned
+ def LWL_MM : LoadLeftRightMM<"lwl", MipsLWL, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x0>;
+ def LWR_MM : LoadLeftRightMM<"lwr", MipsLWR, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x1>;
+ def SWL_MM : StoreLeftRightMM<"swl", MipsSWL, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x8>;
+ def SWR_MM : StoreLeftRightMM<"swr", MipsSWR, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x9>;
+
+ /// Move Conditional
+ def MOVZ_I_MM : MMRel, CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd,
+ NoItinerary>, ADD_FM_MM<0, 0x58>;
+ def MOVN_I_MM : MMRel, CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd,
+ NoItinerary>, ADD_FM_MM<0, 0x18>;
+ def MOVT_I_MM : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, IIAlu>,
+ CMov_F_I_FM_MM<0x25>;
+ def MOVF_I_MM : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, IIAlu>,
+ CMov_F_I_FM_MM<0x5>;
+
+ /// Move to/from HI/LO
+ def MTHI_MM : MMRel, MoveToLOHI<"mthi", GPR32Opnd, [HI0]>,
+ MTLO_FM_MM<0x0b5>;
+ def MTLO_MM : MMRel, MoveToLOHI<"mtlo", GPR32Opnd, [LO0]>,
+ MTLO_FM_MM<0x0f5>;
+ def MFHI_MM : MMRel, MoveFromLOHI<"mfhi", GPR32Opnd, AC0>,
+ MFLO_FM_MM<0x035>;
+ def MFLO_MM : MMRel, MoveFromLOHI<"mflo", GPR32Opnd, AC0>,
+ MFLO_FM_MM<0x075>;
+
+ /// Multiply Add/Sub Instructions
+ def MADD_MM : MMRel, MArithR<"madd", 1>, MULT_FM_MM<0x32c>;
+ def MADDU_MM : MMRel, MArithR<"maddu", 1>, MULT_FM_MM<0x36c>;
+ def MSUB_MM : MMRel, MArithR<"msub">, MULT_FM_MM<0x3ac>;
+ def MSUBU_MM : MMRel, MArithR<"msubu">, MULT_FM_MM<0x3ec>;
+
+ /// Count Leading
+ def CLZ_MM : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM_MM<0x16c>;
+ def CLO_MM : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM_MM<0x12c>;
+
+ /// Sign Ext In Register Instructions.
+ def SEB_MM : MMRel, SignExtInReg<"seb", i8, GPR32Opnd>, SEB_FM_MM<0x0ac>;
+ def SEH_MM : MMRel, SignExtInReg<"seh", i16, GPR32Opnd>, SEB_FM_MM<0x0ec>;
+
+ /// Word Swap Bytes Within Halfwords
+ def WSBH_MM : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM_MM<0x1ec>;
+
+ def EXT_MM : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, MipsExt>,
+ EXT_FM_MM<0x2c>;
+ def INS_MM : MMRel, InsBase<"ins", GPR32Opnd, uimm5, MipsIns>,
+ EXT_FM_MM<0x0c>;
+
+ /// Jump Instructions
+ let DecoderMethod = "DecodeJumpTargetMM" in {
+ def J_MM : MMRel, JumpFJ<jmptarget_mm, "j", br, bb, "j">,
+ J_FM_MM<0x35>;
+ def JAL_MM : MMRel, JumpLink<"jal", calltarget_mm>, J_FM_MM<0x3d>;
+ def TAILCALL_MM : MMRel, JumpFJ<calltarget_mm, "j", MipsTailCall, imm,
+ "tcall">, J_FM_MM<0x3d>, IsTailCall;
+ }
+ def JR_MM : MMRel, IndirectBranch<"jr", GPR32Opnd>, JR_FM_MM<0x3c>;
+ def JALR_MM : MMRel, JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM_MM<0x03c>;
+ def TAILCALL_R_MM : MMRel, JumpFR<"tcallr", GPR32Opnd, MipsTailCall>,
+ JR_FM_MM<0x3c>, IsTailCall;
+ def RET_MM : MMRel, RetBase<"ret", GPR32Opnd>, JR_FM_MM<0x3c>;
+
+ /// Branch Instructions
+ def BEQ_MM : MMRel, CBranch<"beq", brtarget_mm, seteq, GPR32Opnd>,
+ BEQ_FM_MM<0x25>;
+ def BNE_MM : MMRel, CBranch<"bne", brtarget_mm, setne, GPR32Opnd>,
+ BEQ_FM_MM<0x2d>;
+ def BGEZ_MM : MMRel, CBranchZero<"bgez", brtarget_mm, setge, GPR32Opnd>,
+ BGEZ_FM_MM<0x2>;
+ def BGTZ_MM : MMRel, CBranchZero<"bgtz", brtarget_mm, setgt, GPR32Opnd>,
+ BGEZ_FM_MM<0x6>;
+ def BLEZ_MM : MMRel, CBranchZero<"blez", brtarget_mm, setle, GPR32Opnd>,
+ BGEZ_FM_MM<0x4>;
+ def BLTZ_MM : MMRel, CBranchZero<"bltz", brtarget_mm, setlt, GPR32Opnd>,
+ BGEZ_FM_MM<0x0>;
+ def BGEZAL_MM : MMRel, BGEZAL_FT<"bgezal", brtarget_mm, GPR32Opnd>,
+ BGEZAL_FM_MM<0x03>;
+ def BLTZAL_MM : MMRel, BGEZAL_FT<"bltzal", brtarget_mm, GPR32Opnd>,
+ BGEZAL_FM_MM<0x01>;
+
+ /// Trap Instructions
+ def TEQ_MM : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM_MM<0x0>;
+ def TGE_MM : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM_MM<0x08>;
+ def TGEU_MM : MMRel, TEQ_FT<"tgeu", GPR32Opnd>, TEQ_FM_MM<0x10>;
+ def TLT_MM : MMRel, TEQ_FT<"tlt", GPR32Opnd>, TEQ_FM_MM<0x20>;
+ def TLTU_MM : MMRel, TEQ_FT<"tltu", GPR32Opnd>, TEQ_FM_MM<0x28>;
+ def TNE_MM : MMRel, TEQ_FT<"tne", GPR32Opnd>, TEQ_FM_MM<0x30>;
+
+ def TEQI_MM : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM_MM<0x0e>;
+ def TGEI_MM : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM_MM<0x09>;
+ def TGEIU_MM : MMRel, TEQI_FT<"tgeiu", GPR32Opnd>, TEQI_FM_MM<0x0b>;
+ def TLTI_MM : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM_MM<0x08>;
+ def TLTIU_MM : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM_MM<0x0a>;
+ def TNEI_MM : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM_MM<0x0c>;
}
diff --git a/lib/Target/Mips/Mips.h b/lib/Target/Mips/Mips.h
index 8c65bb4020b5..e796debd79b6 100644
--- a/lib/Target/Mips/Mips.h
+++ b/lib/Target/Mips/Mips.h
@@ -28,7 +28,6 @@ namespace llvm {
FunctionPass *createMipsJITCodeEmitterPass(MipsTargetMachine &TM,
JITCodeEmitter &JCE);
FunctionPass *createMipsConstantIslandPass(MipsTargetMachine &tm);
-
} // end namespace llvm;
#endif
diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td
index eefb02a494ca..b8e3f39256da 100644
--- a/lib/Target/Mips/Mips.td
+++ b/lib/Target/Mips/Mips.td
@@ -78,6 +78,8 @@ def FeatureDSP : SubtargetFeature<"dsp", "HasDSP", "true", "Mips DSP ASE">;
def FeatureDSPR2 : SubtargetFeature<"dspr2", "HasDSPR2", "true",
"Mips DSP-R2 ASE", [FeatureDSP]>;
+def FeatureMSA : SubtargetFeature<"msa", "HasMSA", "true", "Mips MSA ASE">;
+
def FeatureMicroMips : SubtargetFeature<"micromips", "InMicroMipsMode", "true",
"microMips mode">;
@@ -101,6 +103,7 @@ def MipsAsmWriter : AsmWriter {
def MipsAsmParser : AsmParser {
let ShouldEmitMatchRegisterName = 0;
+ let MnemonicContainsDot = 1;
}
def MipsAsmParserVariant : AsmParserVariant {
diff --git a/lib/Target/Mips/Mips16FrameLowering.cpp b/lib/Target/Mips/Mips16FrameLowering.cpp
index 1bb6fe46295b..6655ff98e033 100644
--- a/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -40,7 +40,7 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
if (StackSize == 0 && !MFI->adjustsStack()) return;
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
MachineLocation DstML, SrcML;
// Adjust stack.
@@ -50,24 +50,23 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
- DstML = MachineLocation(MachineLocation::VirtualFP);
- SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
- Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(AdjustSPLabel, -StackSize));
MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
- DstML = MachineLocation(MachineLocation::VirtualFP, -8);
- SrcML = MachineLocation(Mips::S1);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ unsigned S2 = MRI->getDwarfRegNum(Mips::S2, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S2, -8));
- DstML = MachineLocation(MachineLocation::VirtualFP, -12);
- SrcML = MachineLocation(Mips::S0);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ unsigned S1 = MRI->getDwarfRegNum(Mips::S1, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S1, -12));
- DstML = MachineLocation(MachineLocation::VirtualFP, -4);
- SrcML = MachineLocation(Mips::RA);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ unsigned S0 = MRI->getDwarfRegNum(Mips::S0, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S0, -16));
+
+ unsigned RA = MRI->getDwarfRegNum(Mips::RA, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, RA, -4));
if (hasFP(MF))
BuildMI(MBB, MBBI, dl, TII.get(Mips::MoveR3216), Mips::S0)
@@ -172,6 +171,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
MF.getRegInfo().setPhysRegUsed(Mips::RA);
MF.getRegInfo().setPhysRegUsed(Mips::S0);
MF.getRegInfo().setPhysRegUsed(Mips::S1);
+ MF.getRegInfo().setPhysRegUsed(Mips::S2);
}
const MipsFrameLowering *
diff --git a/lib/Target/Mips/Mips16FrameLowering.h b/lib/Target/Mips/Mips16FrameLowering.h
index 54fdb7871466..8ce2ceda7c74 100644
--- a/lib/Target/Mips/Mips16FrameLowering.h
+++ b/lib/Target/Mips/Mips16FrameLowering.h
@@ -20,7 +20,7 @@ namespace llvm {
class Mips16FrameLowering : public MipsFrameLowering {
public:
explicit Mips16FrameLowering(const MipsSubtarget &STI)
- : MipsFrameLowering(STI, 8) {}
+ : MipsFrameLowering(STI, STI.stackAlignment()) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
new file mode 100644
index 000000000000..81bf18cd09d9
--- /dev/null
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -0,0 +1,517 @@
+//===---- Mips16HardFloat.cpp for Mips16 Hard Float --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass needed for Mips16 Hard Float
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mips16-hard-float"
+#include "Mips16HardFloat.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <string>
+
+static void inlineAsmOut
+ (LLVMContext &C, StringRef AsmString, BasicBlock *BB ) {
+ std::vector<llvm::Type *> AsmArgTypes;
+ std::vector<llvm::Value*> AsmArgs;
+ llvm::FunctionType *AsmFTy =
+ llvm::FunctionType::get(Type::getVoidTy(C),
+ AsmArgTypes, false);
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(AsmFTy, AsmString, "", true,
+ /* IsAlignStack */ false,
+ llvm::InlineAsm::AD_ATT);
+ CallInst::Create(IA, AsmArgs, "", BB);
+}
+
+namespace {
+
+class InlineAsmHelper {
+ LLVMContext &C;
+ BasicBlock *BB;
+public:
+ InlineAsmHelper(LLVMContext &C_, BasicBlock *BB_) :
+ C(C_), BB(BB_) {
+ }
+
+ void Out(StringRef AsmString) {
+ inlineAsmOut(C, AsmString, BB);
+ }
+
+};
+}
+//
+// Return types that matter for hard float are:
+// float, double, complex float, and complex double
+//
+enum FPReturnVariant {
+ FRet, DRet, CFRet, CDRet, NoFPRet
+};
+
+//
+// Determine which FP return type this function has
+//
+static FPReturnVariant whichFPReturnVariant(Type *T) {
+ switch (T->getTypeID()) {
+ case Type::FloatTyID:
+ return FRet;
+ case Type::DoubleTyID:
+ return DRet;
+ case Type::StructTyID:
+ if (T->getStructNumElements() != 2)
+ break;
+ if ((T->getContainedType(0)->isFloatTy()) &&
+ (T->getContainedType(1)->isFloatTy()))
+ return CFRet;
+ if ((T->getContainedType(0)->isDoubleTy()) &&
+ (T->getContainedType(1)->isDoubleTy()))
+ return CDRet;
+ break;
+ default:
+ break;
+ }
+ return NoFPRet;
+}
+
+//
+// Parameter type that matter are float, (float, float), (float, double),
+// double, (double, double), (double, float)
+//
+enum FPParamVariant {
+ FSig, FFSig, FDSig,
+ DSig, DDSig, DFSig, NoSig
+};
+
+// which floating point parameter signature variant we are dealing with
+//
+typedef Type::TypeID TypeID;
+const Type::TypeID FloatTyID = Type::FloatTyID;
+const Type::TypeID DoubleTyID = Type::DoubleTyID;
+
+static FPParamVariant whichFPParamVariantNeeded(Function &F) {
+ switch (F.arg_size()) {
+ case 0:
+ return NoSig;
+ case 1:{
+ TypeID ArgTypeID = F.getFunctionType()->getParamType(0)->getTypeID();
+ switch (ArgTypeID) {
+ case FloatTyID:
+ return FSig;
+ case DoubleTyID:
+ return DSig;
+ default:
+ return NoSig;
+ }
+ }
+ default: {
+ TypeID ArgTypeID0 = F.getFunctionType()->getParamType(0)->getTypeID();
+ TypeID ArgTypeID1 = F.getFunctionType()->getParamType(1)->getTypeID();
+ switch(ArgTypeID0) {
+ case FloatTyID: {
+ switch (ArgTypeID1) {
+ case FloatTyID:
+ return FFSig;
+ case DoubleTyID:
+ return FDSig;
+ default:
+ return FSig;
+ }
+ }
+ case DoubleTyID: {
+ switch (ArgTypeID1) {
+ case FloatTyID:
+ return DFSig;
+ case DoubleTyID:
+ return DDSig;
+ default:
+ return DSig;
+ }
+ }
+ default:
+ return NoSig;
+ }
+ }
+ }
+ llvm_unreachable("can't get here");
+}
+
+// Figure out if we need float point based on the function parameters.
+// We need to move variables in and/or out of floating point
+// registers because of the ABI
+//
+static bool needsFPStubFromParams(Function &F) {
+ if (F.arg_size() >=1) {
+ Type *ArgType = F.getFunctionType()->getParamType(0);
+ switch (ArgType->getTypeID()) {
+ case Type::FloatTyID:
+ case Type::DoubleTyID:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+static bool needsFPReturnHelper(Function &F) {
+ Type* RetType = F.getReturnType();
+ return whichFPReturnVariant(RetType) != NoFPRet;
+}
+
+static bool needsFPHelperFromSig(Function &F) {
+ return needsFPStubFromParams(F) || needsFPReturnHelper(F);
+}
+
+//
+// We swap between FP and Integer registers to allow Mips16 and Mips32 to
+// interoperate
+//
+
+static void swapFPIntParams
+ (FPParamVariant PV, Module *M, InlineAsmHelper &IAH,
+ bool LE, bool ToFP) {
+ //LLVMContext &Context = M->getContext();
+ std::string MI = ToFP? "mtc1 ": "mfc1 ";
+ switch (PV) {
+ case FSig:
+ IAH.Out(MI + "$$4,$$f12");
+ break;
+ case FFSig:
+ IAH.Out(MI +"$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f14");
+ break;
+ case FDSig:
+ IAH.Out(MI + "$$4,$$f12");
+ if (LE) {
+ IAH.Out(MI + "$$6,$$f14");
+ IAH.Out(MI + "$$7,$$f15");
+ } else {
+ IAH.Out(MI + "$$7,$$f14");
+ IAH.Out(MI + "$$6,$$f15");
+ }
+ break;
+ case DSig:
+ if (LE) {
+ IAH.Out(MI + "$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f13");
+ } else {
+ IAH.Out(MI + "$$5,$$f12");
+ IAH.Out(MI + "$$4,$$f13");
+ }
+ break;
+ case DDSig:
+ if (LE) {
+ IAH.Out(MI + "$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f13");
+ IAH.Out(MI + "$$6,$$f14");
+ IAH.Out(MI + "$$7,$$f15");
+ } else {
+ IAH.Out(MI + "$$5,$$f12");
+ IAH.Out(MI + "$$4,$$f13");
+ IAH.Out(MI + "$$7,$$f14");
+ IAH.Out(MI + "$$6,$$f15");
+ }
+ break;
+ case DFSig:
+ if (LE) {
+ IAH.Out(MI + "$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f13");
+ } else {
+ IAH.Out(MI + "$$5,$$f12");
+ IAH.Out(MI + "$$4,$$f13");
+ }
+ IAH.Out(MI + "$$6,$$f14");
+ break;
+ case NoSig:
+ return;
+ }
+}
+//
+// Make sure that we know we already need a stub for this function.
+// Having called needsFPHelperFromSig
+//
+static void assureFPCallStub(Function &F, Module *M,
+ const MipsSubtarget &Subtarget){
+ // for now we only need them for static relocation
+ if (Subtarget.getRelocationModel() == Reloc::PIC_)
+ return;
+ LLVMContext &Context = M->getContext();
+ bool LE = Subtarget.isLittle();
+ std::string Name = F.getName();
+ std::string SectionName = ".mips16.call.fp." + Name;
+ std::string StubName = "__call_stub_fp_" + Name;
+ //
+ // see if we already have the stub
+ //
+ Function *FStub = M->getFunction(StubName);
+ if (FStub && !FStub->isDeclaration()) return;
+ FStub = Function::Create(F.getFunctionType(),
+ Function::InternalLinkage, StubName, M);
+ FStub->addFnAttr("mips16_fp_stub");
+ FStub->addFnAttr(llvm::Attribute::Naked);
+ FStub->addFnAttr(llvm::Attribute::NoInline);
+ FStub->addFnAttr(llvm::Attribute::NoUnwind);
+ FStub->addFnAttr("nomips16");
+ FStub->setSection(SectionName);
+ BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub);
+ InlineAsmHelper IAH(Context, BB);
+ IAH.Out(".set reorder");
+ FPReturnVariant RV = whichFPReturnVariant(FStub->getReturnType());
+ FPParamVariant PV = whichFPParamVariantNeeded(F);
+ swapFPIntParams(PV, M, IAH, LE, true);
+ if (RV != NoFPRet) {
+ IAH.Out("move $$18, $$31");
+ IAH.Out("jal " + Name);
+ } else {
+ IAH.Out("lui $$25,%hi(" + Name + ")");
+ IAH.Out("addiu $$25,$$25,%lo(" + Name + ")" );
+ }
+ switch (RV) {
+ case FRet:
+ IAH.Out("mfc1 $$2,$$f0");
+ break;
+ case DRet:
+ if (LE) {
+ IAH.Out("mfc1 $$2,$$f0");
+ IAH.Out("mfc1 $$3,$$f1");
+ } else {
+ IAH.Out("mfc1 $$3,$$f0");
+ IAH.Out("mfc1 $$2,$$f1");
+ }
+ break;
+ case CFRet:
+ if (LE) {
+ IAH.Out("mfc1 $$2,$$f0");
+ IAH.Out("mfc1 $$3,$$f2");
+ } else {
+ IAH.Out("mfc1 $$3,$$f0");
+ IAH.Out("mfc1 $$3,$$f2");
+ }
+ break;
+ case CDRet:
+ if (LE) {
+ IAH.Out("mfc1 $$4,$$f2");
+ IAH.Out("mfc1 $$5,$$f3");
+ IAH.Out("mfc1 $$2,$$f0");
+ IAH.Out("mfc1 $$3,$$f1");
+
+ } else {
+ IAH.Out("mfc1 $$5,$$f2");
+ IAH.Out("mfc1 $$4,$$f3");
+ IAH.Out("mfc1 $$3,$$f0");
+ IAH.Out("mfc1 $$2,$$f1");
+ }
+ break;
+ case NoFPRet:
+ break;
+ }
+ if (RV != NoFPRet)
+ IAH.Out("jr $$18");
+ else
+ IAH.Out("jr $$25");
+ new UnreachableInst(Context, BB);
+}
+
+//
+// Functions that are llvm intrinsics and don't need helpers.
+//
+static const char *IntrinsicInline[] =
+ {"fabs",
+ "fabsf",
+ "llvm.ceil.f32", "llvm.ceil.f64",
+ "llvm.copysign.f32", "llvm.copysign.f64",
+ "llvm.cos.f32", "llvm.cos.f64",
+ "llvm.exp.f32", "llvm.exp.f64",
+ "llvm.exp2.f32", "llvm.exp2.f64",
+ "llvm.fabs.f32", "llvm.fabs.f64",
+ "llvm.floor.f32", "llvm.floor.f64",
+ "llvm.fma.f32", "llvm.fma.f64",
+ "llvm.log.f32", "llvm.log.f64",
+ "llvm.log10.f32", "llvm.log10.f64",
+ "llvm.nearbyint.f32", "llvm.nearbyint.f64",
+ "llvm.pow.f32", "llvm.pow.f64",
+ "llvm.powi.f32", "llvm.powi.f64",
+ "llvm.rint.f32", "llvm.rint.f64",
+ "llvm.round.f32", "llvm.round.f64",
+ "llvm.sin.f32", "llvm.sin.f64",
+ "llvm.sqrt.f32", "llvm.sqrt.f64",
+ "llvm.trunc.f32", "llvm.trunc.f64",
+ };
+
+static bool isIntrinsicInline(Function *F) {
+ return std::binary_search(
+ IntrinsicInline, array_endof(IntrinsicInline),
+ F->getName());
+}
+//
+// Returns of float, double and complex need to be handled with a helper
+// function.
+//
+static bool fixupFPReturnAndCall
+ (Function &F, Module *M, const MipsSubtarget &Subtarget) {
+ bool Modified = false;
+ LLVMContext &C = M->getContext();
+ Type *MyVoid = Type::getVoidTy(C);
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end();
+ I != E; ++I) {
+ Instruction &Inst = *I;
+ if (const ReturnInst *RI = dyn_cast<ReturnInst>(I)) {
+ Value *RVal = RI->getReturnValue();
+ if (!RVal) continue;
+ //
+ // If there is a return value and it needs a helper function,
+ // figure out which one and add a call before the actual
+ // return to this helper. The purpose of the helper is to move
+ // floating point values from their soft float return mapping to
+ // where they would have been mapped to in floating point registers.
+ //
+ Type *T = RVal->getType();
+ FPReturnVariant RV = whichFPReturnVariant(T);
+ if (RV == NoFPRet) continue;
+ static const char* Helper[NoFPRet] =
+ {"__mips16_ret_sf", "__mips16_ret_df", "__mips16_ret_sc",
+ "__mips16_ret_dc"};
+ const char *Name = Helper[RV];
+ AttributeSet A;
+ Value *Params[] = {RVal};
+ Modified = true;
+ //
+ // These helper functions have a different calling ABI so
+ // this __Mips16RetHelper indicates that so that later
+ // during call setup, the proper call lowering to the helper
+ // functions will take place.
+ //
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ "__Mips16RetHelper");
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::ReadNone);
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::NoInline);
+ Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL));
+ CallInst::Create(F, Params, "", &Inst );
+ } else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
+ // pic mode calls are handled by already defined
+ // helper functions
+ if (Subtarget.getRelocationModel() != Reloc::PIC_ ) {
+ Function *F_ = CI->getCalledFunction();
+ if (F_ && !isIntrinsicInline(F_) && needsFPHelperFromSig(*F_)) {
+ assureFPCallStub(*F_, M, Subtarget);
+ Modified=true;
+ }
+ }
+ }
+ }
+ return Modified;
+}
+
+static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
+ const MipsSubtarget &Subtarget ) {
+ bool PicMode = Subtarget.getRelocationModel() == Reloc::PIC_;
+ bool LE = Subtarget.isLittle();
+ LLVMContext &Context = M->getContext();
+ std::string Name = F->getName();
+ std::string SectionName = ".mips16.fn." + Name;
+ std::string StubName = "__fn_stub_" + Name;
+ std::string LocalName = "$$__fn_local_" + Name;
+ Function *FStub = Function::Create
+ (F->getFunctionType(),
+ Function::InternalLinkage, StubName, M);
+ FStub->addFnAttr("mips16_fp_stub");
+ FStub->addFnAttr(llvm::Attribute::Naked);
+ FStub->addFnAttr(llvm::Attribute::NoUnwind);
+ FStub->addFnAttr(llvm::Attribute::NoInline);
+ FStub->addFnAttr("nomips16");
+ FStub->setSection(SectionName);
+ BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub);
+ InlineAsmHelper IAH(Context, BB);
+ IAH.Out(" .set macro");
+ if (PicMode) {
+ IAH.Out(".set noreorder");
+ IAH.Out(".cpload $$25");
+ IAH.Out(".set reorder");
+ IAH.Out(".reloc 0,R_MIPS_NONE," + Name);
+ IAH.Out("la $$25," + LocalName);
+ }
+ else {
+ IAH.Out(".set reorder");
+ IAH.Out("la $$25," + Name);
+ }
+ swapFPIntParams(PV, M, IAH, LE, false);
+ IAH.Out("jr $$25");
+ IAH.Out(LocalName + " = " + Name);
+ new UnreachableInst(FStub->getContext(), BB);
+}
+
+//
+// remove the use-soft-float attribute
+//
+static void removeUseSoftFloat(Function &F) {
+ AttributeSet A;
+ DEBUG(errs() << "removing -use-soft-float\n");
+ A = A.addAttribute(F.getContext(), AttributeSet::FunctionIndex,
+ "use-soft-float", "false");
+ F.removeAttributes(AttributeSet::FunctionIndex, A);
+ if (F.hasFnAttribute("use-soft-float")) {
+ DEBUG(errs() << "still has -use-soft-float\n");
+ }
+ F.addAttributes(AttributeSet::FunctionIndex, A);
+}
+
+namespace llvm {
+
+//
+// This pass only makes sense when the underlying chip has floating point but
+// we are compiling as mips16.
+// For all mips16 functions (that are not stubs we have already generated), or
+// declared via attributes as nomips16, we must:
+// 1) fixup all returns of float, double, single and double complex
+// by calling a helper function before the actual return.
+// 2) generate helper functions (stubs) that can be called by mips32 functions
+// that will move parameters passed normally passed in floating point
+// registers the soft float equivalents.
+// 3) in the case of static relocation, generate helper functions so that
+// mips16 functions can call extern functions of unknown type (mips16 or
+// mips32).
+// 4) TBD. For pic, calls to extern functions of unknown type are handled by
+// predefined helper functions in libc but this work is currently done
+// during call lowering but it should be moved here in the future.
+//
+bool Mips16HardFloat::runOnModule(Module &M) {
+ DEBUG(errs() << "Run on Module Mips16HardFloat\n");
+ bool Modified = false;
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->hasFnAttribute("nomips16") &&
+ F->hasFnAttribute("use-soft-float")) {
+ removeUseSoftFloat(*F);
+ continue;
+ }
+ if (F->isDeclaration() || F->hasFnAttribute("mips16_fp_stub") ||
+ F->hasFnAttribute("nomips16")) continue;
+ Modified |= fixupFPReturnAndCall(*F, &M, Subtarget);
+ FPParamVariant V = whichFPParamVariantNeeded(*F);
+ if (V != NoSig) {
+ Modified = true;
+ createFPFnStub(F, &M, V, Subtarget);
+ }
+ }
+ return Modified;
+}
+
+char Mips16HardFloat::ID = 0;
+
+}
+
+ModulePass *llvm::createMips16HardFloat(MipsTargetMachine &TM) {
+ return new Mips16HardFloat(TM);
+}
+
diff --git a/lib/Target/Mips/Mips16HardFloat.h b/lib/Target/Mips/Mips16HardFloat.h
new file mode 100644
index 000000000000..b7f712af5b28
--- /dev/null
+++ b/lib/Target/Mips/Mips16HardFloat.h
@@ -0,0 +1,54 @@
+//===---- Mips16HardFloat.h for Mips16 Hard Float --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a phase which implements part of the floating point
+// interoperability between Mips16 and Mips32 code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "MipsTargetMachine.h"
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetMachine.h"
+
+
+#ifndef MIPS16HARDFLOAT_H
+#define MIPS16HARDFLOAT_H
+
+using namespace llvm;
+
+namespace llvm {
+
+class Mips16HardFloat : public ModulePass {
+
+public:
+ static char ID;
+
+ Mips16HardFloat(MipsTargetMachine &TM_) : ModulePass(ID),
+ TM(TM_), Subtarget(TM.getSubtarget<MipsSubtarget>()) {
+ }
+
+ virtual const char *getPassName() const {
+ return "MIPS16 Hard Float Pass";
+ }
+
+ virtual bool runOnModule(Module &M);
+
+protected:
+ /// Keep a pointer to the MipsSubtarget around so that we can make the right
+ /// decision when generating code for different targets.
+ const TargetMachine &TM;
+ const MipsSubtarget &Subtarget;
+
+};
+
+ModulePass *createMips16HardFloat(MipsTargetMachine &TM);
+
+}
+#endif
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index c1c635cb9f4f..4948f40734c8 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -42,7 +42,7 @@ bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
}
/// Select multiply instructions.
std::pair<SDNode*, SDNode*>
-Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, DebugLoc DL, EVT Ty,
+Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty,
bool HasLo, bool HasHi) {
SDNode *Lo = 0, *Hi = 0;
SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0),
@@ -80,10 +80,11 @@ void Mips16DAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
V1 = RegInfo.createVirtualRegister(RC);
V2 = RegInfo.createVirtualRegister(RC);
- BuildMI(MBB, I, DL, TII.get(Mips::LiRxImmX16), V0)
- .addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI);
- BuildMI(MBB, I, DL, TII.get(Mips::AddiuRxPcImmX16), V1)
- .addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
+ BuildMI(MBB, I, DL, TII.get(Mips::GotPrologue16), V0).
+ addReg(V1, RegState::Define).
+ addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI).
+ addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
+
BuildMI(MBB, I, DL, TII.get(Mips::SllX16), V2).addReg(V0).addImm(16);
BuildMI(MBB, I, DL, TII.get(Mips::AdduRxRyRz16), GlobalBaseReg)
.addReg(V1).addReg(V2);
@@ -118,11 +119,13 @@ void Mips16DAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
SDValue Mips16DAGToDAGISel::getMips16SPAliasReg() {
unsigned Mips16SPAliasReg =
MF->getInfo<MipsFunctionInfo>()->getMips16SPAliasReg();
- return CurDAG->getRegister(Mips16SPAliasReg, TLI.getPointerTy());
+ return CurDAG->getRegister(Mips16SPAliasReg,
+ getTargetLowering()->getPointerTy());
}
void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
- SDValue AliasFPReg = CurDAG->getRegister(Mips::S0, TLI.getPointerTy());
+ SDValue AliasFPReg = CurDAG->getRegister(Mips::S0,
+ getTargetLowering()->getPointerTy());
if (Parent) {
switch (Parent->getOpcode()) {
case ISD::LOAD: {
@@ -149,7 +152,7 @@ void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
}
}
}
- AliasReg = CurDAG->getRegister(Mips::SP, TLI.getPointerTy());
+ AliasReg = CurDAG->getRegister(Mips::SP, getTargetLowering()->getPointerTy());
return;
}
@@ -235,7 +238,7 @@ bool Mips16DAGToDAGISel::selectAddr16(
/// expanded, promoted and normal instructions
std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
///
// Instruction Selection not handled by the auto-generated
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.h b/lib/Target/Mips/Mips16ISelDAGToDAG.h
index f05f9b766df8..49dc6e587b62 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.h
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.h
@@ -23,7 +23,7 @@ public:
explicit Mips16DAGToDAGISel(MipsTargetMachine &TM) : MipsDAGToDAGISel(TM) {}
private:
- std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, DebugLoc DL,
+ std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc DL,
EVT Ty, bool HasLo, bool HasHi);
SDValue getMips16SPAliasReg();
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index f63318f1e6de..61d8bb8e5582 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -13,19 +13,14 @@
#define DEBUG_TYPE "mips-lower"
#include "Mips16ISelLowering.h"
#include "MipsRegisterInfo.h"
+#include "MipsTargetMachine.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include <set>
using namespace llvm;
-static cl::opt<bool>
-Mips16HardFloat("mips16-hard-float", cl::NotHidden,
- cl::desc("MIPS: mips16 hard float enable."),
- cl::init(false));
-
static cl::opt<bool> DontExpandCondPseudos16(
"mips16-dont-expand-cond-pseudo",
cl::init(false),
@@ -34,15 +29,98 @@ static cl::opt<bool> DontExpandCondPseudos16(
cl::Hidden);
namespace {
- std::set<const char*, MipsTargetLowering::LTStr> NoHelperNeeded;
+struct Mips16Libcall {
+ RTLIB::Libcall Libcall;
+ const char *Name;
+
+ bool operator<(const Mips16Libcall &RHS) const {
+ return std::strcmp(Name, RHS.Name) < 0;
+ }
+};
+
+struct Mips16IntrinsicHelperType{
+ const char* Name;
+ const char* Helper;
+
+ bool operator<(const Mips16IntrinsicHelperType &RHS) const {
+ return std::strcmp(Name, RHS.Name) < 0;
+ }
+ bool operator==(const Mips16IntrinsicHelperType &RHS) const {
+ return std::strcmp(Name, RHS.Name) == 0;
+ }
+};
}
+// Libcalls for which no helper is generated. Sorted by name for binary search.
+static const Mips16Libcall HardFloatLibCalls[] = {
+ { RTLIB::ADD_F64, "__mips16_adddf3" },
+ { RTLIB::ADD_F32, "__mips16_addsf3" },
+ { RTLIB::DIV_F64, "__mips16_divdf3" },
+ { RTLIB::DIV_F32, "__mips16_divsf3" },
+ { RTLIB::OEQ_F64, "__mips16_eqdf2" },
+ { RTLIB::OEQ_F32, "__mips16_eqsf2" },
+ { RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2" },
+ { RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi" },
+ { RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi" },
+ { RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf" },
+ { RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf" },
+ { RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf" },
+ { RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf" },
+ { RTLIB::OGE_F64, "__mips16_gedf2" },
+ { RTLIB::OGE_F32, "__mips16_gesf2" },
+ { RTLIB::OGT_F64, "__mips16_gtdf2" },
+ { RTLIB::OGT_F32, "__mips16_gtsf2" },
+ { RTLIB::OLE_F64, "__mips16_ledf2" },
+ { RTLIB::OLE_F32, "__mips16_lesf2" },
+ { RTLIB::OLT_F64, "__mips16_ltdf2" },
+ { RTLIB::OLT_F32, "__mips16_ltsf2" },
+ { RTLIB::MUL_F64, "__mips16_muldf3" },
+ { RTLIB::MUL_F32, "__mips16_mulsf3" },
+ { RTLIB::UNE_F64, "__mips16_nedf2" },
+ { RTLIB::UNE_F32, "__mips16_nesf2" },
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_dc" }, // No associated libcall.
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_df" }, // No associated libcall.
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sc" }, // No associated libcall.
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sf" }, // No associated libcall.
+ { RTLIB::SUB_F64, "__mips16_subdf3" },
+ { RTLIB::SUB_F32, "__mips16_subsf3" },
+ { RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2" },
+ { RTLIB::UO_F64, "__mips16_unorddf2" },
+ { RTLIB::UO_F32, "__mips16_unordsf2" }
+};
+
+static const Mips16IntrinsicHelperType Mips16IntrinsicHelper[] = {
+ {"__fixunsdfsi", "__mips16_call_stub_2" },
+ {"ceil", "__mips16_call_stub_df_2"},
+ {"ceilf", "__mips16_call_stub_sf_1"},
+ {"copysign", "__mips16_call_stub_df_10"},
+ {"copysignf", "__mips16_call_stub_sf_5"},
+ {"cos", "__mips16_call_stub_df_2"},
+ {"cosf", "__mips16_call_stub_sf_1"},
+ {"exp2", "__mips16_call_stub_df_2"},
+ {"exp2f", "__mips16_call_stub_sf_1"},
+ {"floor", "__mips16_call_stub_df_2"},
+ {"floorf", "__mips16_call_stub_sf_1"},
+ {"log2", "__mips16_call_stub_df_2"},
+ {"log2f", "__mips16_call_stub_sf_1"},
+ {"nearbyint", "__mips16_call_stub_df_2"},
+ {"nearbyintf", "__mips16_call_stub_sf_1"},
+ {"rint", "__mips16_call_stub_df_2"},
+ {"rintf", "__mips16_call_stub_sf_1"},
+ {"sin", "__mips16_call_stub_df_2"},
+ {"sinf", "__mips16_call_stub_sf_1"},
+ {"sqrt", "__mips16_call_stub_df_2"},
+ {"sqrtf", "__mips16_call_stub_sf_1"},
+ {"trunc", "__mips16_call_stub_df_2"},
+ {"truncf", "__mips16_call_stub_sf_1"},
+};
+
Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
: MipsTargetLowering(TM) {
//
// set up as if mips32 and then revert so we can test the mechanism
// for switching
- addRegisterClass(MVT::i32, &Mips::CPURegsRegClass);
+ addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
computeRegisterProperties();
clearRegisterClasses();
@@ -50,7 +128,7 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
// Set up the register classes
addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
- if (Mips16HardFloat)
+ if (Subtarget->inMips16HardFloat())
setMips16HardFloatLibCalls();
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
@@ -67,6 +145,11 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
computeRegisterProperties();
}
@@ -91,57 +174,57 @@ Mips16TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case Mips::SelBneZ:
return emitSel16(Mips::BnezRxImm16, MI, BB);
case Mips::SelTBteqZCmpi:
- return emitSeliT16(Mips::BteqzX16, Mips::CmpiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Bteqz16, Mips::CmpiRxImmX16, MI, BB);
case Mips::SelTBteqZSlti:
- return emitSeliT16(Mips::BteqzX16, Mips::SltiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Bteqz16, Mips::SltiRxImmX16, MI, BB);
case Mips::SelTBteqZSltiu:
- return emitSeliT16(Mips::BteqzX16, Mips::SltiuRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Bteqz16, Mips::SltiuRxImmX16, MI, BB);
case Mips::SelTBtneZCmpi:
- return emitSeliT16(Mips::BtnezX16, Mips::CmpiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Btnez16, Mips::CmpiRxImmX16, MI, BB);
case Mips::SelTBtneZSlti:
- return emitSeliT16(Mips::BtnezX16, Mips::SltiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Btnez16, Mips::SltiRxImmX16, MI, BB);
case Mips::SelTBtneZSltiu:
- return emitSeliT16(Mips::BtnezX16, Mips::SltiuRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Btnez16, Mips::SltiuRxImmX16, MI, BB);
case Mips::SelTBteqZCmp:
- return emitSelT16(Mips::BteqzX16, Mips::CmpRxRy16, MI, BB);
+ return emitSelT16(Mips::Bteqz16, Mips::CmpRxRy16, MI, BB);
case Mips::SelTBteqZSlt:
- return emitSelT16(Mips::BteqzX16, Mips::SltRxRy16, MI, BB);
+ return emitSelT16(Mips::Bteqz16, Mips::SltRxRy16, MI, BB);
case Mips::SelTBteqZSltu:
- return emitSelT16(Mips::BteqzX16, Mips::SltuRxRy16, MI, BB);
+ return emitSelT16(Mips::Bteqz16, Mips::SltuRxRy16, MI, BB);
case Mips::SelTBtneZCmp:
- return emitSelT16(Mips::BtnezX16, Mips::CmpRxRy16, MI, BB);
+ return emitSelT16(Mips::Btnez16, Mips::CmpRxRy16, MI, BB);
case Mips::SelTBtneZSlt:
- return emitSelT16(Mips::BtnezX16, Mips::SltRxRy16, MI, BB);
+ return emitSelT16(Mips::Btnez16, Mips::SltRxRy16, MI, BB);
case Mips::SelTBtneZSltu:
- return emitSelT16(Mips::BtnezX16, Mips::SltuRxRy16, MI, BB);
+ return emitSelT16(Mips::Btnez16, Mips::SltuRxRy16, MI, BB);
case Mips::BteqzT8CmpX16:
- return emitFEXT_T8I816_ins(Mips::BteqzX16, Mips::CmpRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::CmpRxRy16, MI, BB);
case Mips::BteqzT8SltX16:
- return emitFEXT_T8I816_ins(Mips::BteqzX16, Mips::SltRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::SltRxRy16, MI, BB);
case Mips::BteqzT8SltuX16:
// TBD: figure out a way to get this or remove the instruction
// altogether.
- return emitFEXT_T8I816_ins(Mips::BteqzX16, Mips::SltuRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::SltuRxRy16, MI, BB);
case Mips::BtnezT8CmpX16:
- return emitFEXT_T8I816_ins(Mips::BtnezX16, Mips::CmpRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::CmpRxRy16, MI, BB);
case Mips::BtnezT8SltX16:
- return emitFEXT_T8I816_ins(Mips::BtnezX16, Mips::SltRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::SltRxRy16, MI, BB);
case Mips::BtnezT8SltuX16:
// TBD: figure out a way to get this or remove the instruction
// altogether.
- return emitFEXT_T8I816_ins(Mips::BtnezX16, Mips::SltuRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::SltuRxRy16, MI, BB);
case Mips::BteqzT8CmpiX16: return emitFEXT_T8I8I16_ins(
- Mips::BteqzX16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, MI, BB);
+ Mips::Bteqz16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, false, MI, BB);
case Mips::BteqzT8SltiX16: return emitFEXT_T8I8I16_ins(
- Mips::BteqzX16, Mips::SltiRxImm16, Mips::SltiRxImmX16, MI, BB);
+ Mips::Bteqz16, Mips::SltiRxImm16, Mips::SltiRxImmX16, true, MI, BB);
case Mips::BteqzT8SltiuX16: return emitFEXT_T8I8I16_ins(
- Mips::BteqzX16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, MI, BB);
+ Mips::Bteqz16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, false, MI, BB);
case Mips::BtnezT8CmpiX16: return emitFEXT_T8I8I16_ins(
- Mips::BtnezX16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, MI, BB);
+ Mips::Btnez16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, false, MI, BB);
case Mips::BtnezT8SltiX16: return emitFEXT_T8I8I16_ins(
- Mips::BtnezX16, Mips::SltiRxImm16, Mips::SltiRxImmX16, MI, BB);
+ Mips::Btnez16, Mips::SltiRxImm16, Mips::SltiRxImmX16, true, MI, BB);
case Mips::BtnezT8SltiuX16: return emitFEXT_T8I8I16_ins(
- Mips::BtnezX16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, MI, BB);
+ Mips::Btnez16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, false, MI, BB);
break;
case Mips::SltCCRxRy16:
return emitFEXT_CCRX16_ins(Mips::SltRxRy16, MI, BB);
@@ -166,47 +249,17 @@ isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
return false;
}
-void Mips16TargetLowering::setMips16LibcallName
- (RTLIB::Libcall L, const char *Name) {
- setLibcallName(L, Name);
- NoHelperNeeded.insert(Name);
-}
-
void Mips16TargetLowering::setMips16HardFloatLibCalls() {
- setMips16LibcallName(RTLIB::ADD_F32, "__mips16_addsf3");
- setMips16LibcallName(RTLIB::ADD_F64, "__mips16_adddf3");
- setMips16LibcallName(RTLIB::SUB_F32, "__mips16_subsf3");
- setMips16LibcallName(RTLIB::SUB_F64, "__mips16_subdf3");
- setMips16LibcallName(RTLIB::MUL_F32, "__mips16_mulsf3");
- setMips16LibcallName(RTLIB::MUL_F64, "__mips16_muldf3");
- setMips16LibcallName(RTLIB::DIV_F32, "__mips16_divsf3");
- setMips16LibcallName(RTLIB::DIV_F64, "__mips16_divdf3");
- setMips16LibcallName(RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2");
- setMips16LibcallName(RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2");
- setMips16LibcallName(RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi");
- setMips16LibcallName(RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi");
- setMips16LibcallName(RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf");
- setMips16LibcallName(RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf");
- setMips16LibcallName(RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf");
- setMips16LibcallName(RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf");
- setMips16LibcallName(RTLIB::OEQ_F32, "__mips16_eqsf2");
- setMips16LibcallName(RTLIB::OEQ_F64, "__mips16_eqdf2");
- setMips16LibcallName(RTLIB::UNE_F32, "__mips16_nesf2");
- setMips16LibcallName(RTLIB::UNE_F64, "__mips16_nedf2");
- setMips16LibcallName(RTLIB::OGE_F32, "__mips16_gesf2");
- setMips16LibcallName(RTLIB::OGE_F64, "__mips16_gedf2");
- setMips16LibcallName(RTLIB::OLT_F32, "__mips16_ltsf2");
- setMips16LibcallName(RTLIB::OLT_F64, "__mips16_ltdf2");
- setMips16LibcallName(RTLIB::OLE_F32, "__mips16_lesf2");
- setMips16LibcallName(RTLIB::OLE_F64, "__mips16_ledf2");
- setMips16LibcallName(RTLIB::OGT_F32, "__mips16_gtsf2");
- setMips16LibcallName(RTLIB::OGT_F64, "__mips16_gtdf2");
- setMips16LibcallName(RTLIB::UO_F32, "__mips16_unordsf2");
- setMips16LibcallName(RTLIB::UO_F64, "__mips16_unorddf2");
- setMips16LibcallName(RTLIB::O_F32, "__mips16_unordsf2");
- setMips16LibcallName(RTLIB::O_F64, "__mips16_unorddf2");
-}
+ for (unsigned I = 0; I != array_lengthof(HardFloatLibCalls); ++I) {
+ assert((I == 0 || HardFloatLibCalls[I - 1] < HardFloatLibCalls[I]) &&
+ "Array not sorted!");
+ if (HardFloatLibCalls[I].Libcall != RTLIB::UNKNOWN_LIBCALL)
+ setLibcallName(HardFloatLibCalls[I].Libcall, HardFloatLibCalls[I].Name);
+ }
+ setLibcallName(RTLIB::O_F64, "__mips16_unorddf2");
+ setLibcallName(RTLIB::O_F32, "__mips16_unordsf2");
+}
//
// The Mips16 hard float is a crazy quilt inherited from gcc. I have a much
@@ -371,10 +424,13 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
SelectionDAG &DAG = CLI.DAG;
+ MachineFunction &MF = DAG.getMachineFunction();
+ MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
const char* Mips16HelperFunction = 0;
bool NeedMips16Helper = false;
- if (getTargetMachine().Options.UseSoftFloat && Mips16HardFloat) {
+ if (getTargetMachine().Options.UseSoftFloat &&
+ Subtarget->inMips16HardFloat()) {
//
// currently we don't have symbols tagged with the mips16 or mips32
// qualifier so we will assume that we don't know what kind it is.
@@ -382,9 +438,34 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
//
bool LookupHelper = true;
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(CLI.Callee)) {
- if (NoHelperNeeded.find(S->getSymbol()) != NoHelperNeeded.end()) {
+ Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL, S->getSymbol() };
+
+ if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls),
+ Find))
LookupHelper = false;
+ else {
+ Mips16IntrinsicHelperType IntrinsicFind = {S->getSymbol(), ""};
+ // one more look at list of intrinsics
+ if (std::binary_search(Mips16IntrinsicHelper,
+ array_endof(Mips16IntrinsicHelper),
+ IntrinsicFind)) {
+ const Mips16IntrinsicHelperType *h =(std::find(Mips16IntrinsicHelper,
+ array_endof(Mips16IntrinsicHelper),
+ IntrinsicFind));
+ Mips16HelperFunction = h->Helper;
+ NeedMips16Helper = true;
+ LookupHelper = false;
+ }
+
}
+ } else if (GlobalAddressSDNode *G =
+ dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
+ Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL,
+ G->getGlobal()->getName().data() };
+
+ if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls),
+ Find))
+ LookupHelper = false;
}
if (LookupHelper) Mips16HelperFunction =
getMips16HelperFunction(CLI.RetTy, CLI.Args, NeedMips16Helper);
@@ -400,7 +481,10 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
if (NeedMips16Helper) {
RegsToPass.push_front(std::make_pair(V0Reg, Callee));
JumpTarget = DAG.getExternalSymbol(Mips16HelperFunction, getPointerTy());
- JumpTarget = getAddrGlobal(JumpTarget, DAG, MipsII::MO_GOT);
+ ExternalSymbolSDNode *S = cast<ExternalSymbolSDNode>(JumpTarget);
+ JumpTarget = getAddrGlobal(S, JumpTarget.getValueType(), DAG,
+ MipsII::MO_GOT, Chain,
+ FuncInfo->callPtrInfo(S->getSymbol()));
} else
RegsToPass.push_front(std::make_pair((unsigned)Mips::T9, Callee));
}
@@ -621,7 +705,7 @@ MachineBasicBlock
}
MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
- unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc,
+ unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
MachineInstr *MI, MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
@@ -632,7 +716,8 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
unsigned CmpOpc;
if (isUInt<8>(imm))
CmpOpc = CmpiOpc;
- else if (isUInt<16>(imm))
+ else if ((!ImmSigned && isUInt<16>(imm)) ||
+ (ImmSigned && isInt<16>(imm)))
CmpOpc = CmpiXOpc;
else
llvm_unreachable("immediate field not usable");
diff --git a/lib/Target/Mips/Mips16ISelLowering.h b/lib/Target/Mips/Mips16ISelLowering.h
index b23e2a1f37db..33b953f6ff39 100644
--- a/lib/Target/Mips/Mips16ISelLowering.h
+++ b/lib/Target/Mips/Mips16ISelLowering.h
@@ -32,8 +32,6 @@ namespace llvm {
unsigned NextStackOffset,
const MipsFunctionInfo& FI) const;
- void setMips16LibcallName(RTLIB::Libcall, const char *Name);
-
void setMips16HardFloatLibCalls();
unsigned int
@@ -64,7 +62,7 @@ namespace llvm {
MachineBasicBlock *BB) const;
MachineBasicBlock *emitFEXT_T8I8I16_ins(
- unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc,
+ unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
MachineInstr *MI, MachineBasicBlock *BB) const;
MachineBasicBlock *emitFEXT_CCRX16_ins(
diff --git a/lib/Target/Mips/Mips16InstrFormats.td b/lib/Target/Mips/Mips16InstrFormats.td
index 4ff62ef3b6f9..da3a1f114af3 100644
--- a/lib/Target/Mips/Mips16InstrFormats.td
+++ b/lib/Target/Mips/Mips16InstrFormats.td
@@ -61,7 +61,7 @@ class MipsInst16<dag outs, dag ins, string asmstr, list<dag> pattern,
// Top 5 bits are the 'opcode' field
let Inst{15-11} = Opcode;
-
+
let Size=2;
field bits<16> SoftFail = 0;
}
@@ -74,7 +74,7 @@ class MipsInst16_32<dag outs, dag ins, string asmstr, list<dag> pattern,
MipsInst16_Base<outs, ins, asmstr, pattern, itin>
{
field bits<32> Inst;
-
+
let Size=4;
field bits<32> SoftFail = 0;
}
@@ -148,6 +148,20 @@ class FRR16<bits<5> _funct, dag outs, dag ins, string asmstr,
let Inst{4-0} = funct;
}
+class FRRBreak16<dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin>
+{
+ bits<6> Code;
+ bits<5> funct;
+
+ let Opcode = 0b11101;
+ let funct = 0b00101;
+
+ let Inst{10-5} = Code;
+ let Inst{4-0} = funct;
+}
+
//
// For conversion functions.
//
diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp
index 17dd2c07967a..000ea2897f43 100644
--- a/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -10,7 +10,6 @@
// This file contains the Mips16 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
-
#include "Mips16InstrInfo.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MipsMachineFunction.h"
@@ -20,10 +19,12 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+#include <cctype>
using namespace llvm;
@@ -36,8 +37,8 @@ static cl::opt<bool> NeverUseSaveRestore(
Mips16InstrInfo::Mips16InstrInfo(MipsTargetMachine &tm)
- : MipsInstrInfo(tm, Mips::BimmX16),
- RI(*tm.getSubtargetImpl(), *this) {}
+ : MipsInstrInfo(tm, Mips::Bimm16),
+ RI(*tm.getSubtargetImpl()) {}
const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
return RI;
@@ -72,16 +73,16 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned Opc = 0;
if (Mips::CPU16RegsRegClass.contains(DestReg) &&
- Mips::CPURegsRegClass.contains(SrcReg))
+ Mips::GPR32RegClass.contains(SrcReg))
Opc = Mips::MoveR3216;
- else if (Mips::CPURegsRegClass.contains(DestReg) &&
+ else if (Mips::GPR32RegClass.contains(DestReg) &&
Mips::CPU16RegsRegClass.contains(SrcReg))
Opc = Mips::Move32R16;
- else if ((SrcReg == Mips::HI) &&
+ else if ((SrcReg == Mips::HI0) &&
(Mips::CPU16RegsRegClass.contains(DestReg)))
Opc = Mips::Mfhi16, SrcReg = 0;
- else if ((SrcReg == Mips::LO) &&
+ else if ((SrcReg == Mips::LO0) &&
(Mips::CPU16RegsRegClass.contains(DestReg)))
Opc = Mips::Mflo16, SrcReg = 0;
@@ -109,8 +110,9 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (Mips::CPU16RegsRegClass.hasSubClassEq(RC))
Opc = Mips::SwRxSpImmX16;
assert(Opc && "Register class not handled!");
- BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
+ BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)).
+ addFrameIndex(FI).addImm(Offset)
+ .addMemOperand(MMO);
}
void Mips16InstrInfo::
@@ -145,18 +147,22 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
/// GetOppositeBranchOpc - Return the inverse of the specified
/// opcode, e.g. turning BEQ to BNE.
-unsigned Mips16InstrInfo::GetOppositeBranchOpc(unsigned Opc) const {
+unsigned Mips16InstrInfo::getOppositeBranchOpc(unsigned Opc) const {
switch (Opc) {
default: llvm_unreachable("Illegal opcode!");
case Mips::BeqzRxImmX16: return Mips::BnezRxImmX16;
case Mips::BnezRxImmX16: return Mips::BeqzRxImmX16;
+ case Mips::BeqzRxImm16: return Mips::BnezRxImm16;
+ case Mips::BnezRxImm16: return Mips::BeqzRxImm16;
case Mips::BteqzT8CmpX16: return Mips::BtnezT8CmpX16;
case Mips::BteqzT8SltX16: return Mips::BtnezT8SltX16;
case Mips::BteqzT8SltiX16: return Mips::BtnezT8SltiX16;
+ case Mips::Btnez16: return Mips::Bteqz16;
case Mips::BtnezX16: return Mips::BteqzX16;
case Mips::BtnezT8CmpiX16: return Mips::BteqzT8CmpiX16;
case Mips::BtnezT8SltuX16: return Mips::BteqzT8SltuX16;
case Mips::BtnezT8SltiuX16: return Mips::BteqzT8SltiuX16;
+ case Mips::Bteqz16: return Mips::Btnez16;
case Mips::BteqzX16: return Mips::BtnezX16;
case Mips::BteqzT8CmpiX16: return Mips::BtnezT8CmpiX16;
case Mips::BteqzT8SltuX16: return Mips::BtnezT8SltuX16;
@@ -323,48 +329,88 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
//
RegScavenger rs;
int32_t lo = Imm & 0xFFFF;
- int32_t hi = ((Imm >> 16) + (lo >> 15)) & 0xFFFF;
NewImm = lo;
- unsigned Reg =0;
- unsigned SpReg = 0;
+ int Reg =0;
+ int SpReg = 0;
+
rs.enterBasicBlock(&MBB);
rs.forward(II);
//
+ // We need to know which registers can be used, in the case where there
+ // are not enough free registers. We exclude all registers that
+ // are used in the instruction that we are helping.
+ // // Consider all allocatable registers in the register class initially
+ BitVector Candidates =
+ RI.getAllocatableSet
+ (*II->getParent()->getParent(), &Mips::CPU16RegsRegClass);
+ // Exclude all the registers being used by the instruction.
+ for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = II->getOperand(i);
+ if (MO.isReg() && MO.getReg() != 0 && !MO.isDef() &&
+ !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ Candidates.reset(MO.getReg());
+ }
+ //
+ // If the same register was used and defined in an instruction, then
+ // it will not be in the list of candidates.
+ //
+ // we need to analyze the instruction that we are helping.
+ // we need to know if it defines register x but register x is not
+ // present as an operand of the instruction. this tells
+ // whether the register is live before the instruction. if it's not
+ // then we don't need to save it in case there are no free registers.
+ //
+ int DefReg = 0;
+ for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = II->getOperand(i);
+ if (MO.isReg() && MO.isDef()) {
+ DefReg = MO.getReg();
+ break;
+ }
+ }
+ //
+ BitVector Available = rs.getRegsAvailable(&Mips::CPU16RegsRegClass);
+
+ Available &= Candidates;
+ //
// we use T0 for the first register, if we need to save something away.
// we use T1 for the second register, if we need to save something away.
//
unsigned FirstRegSaved =0, SecondRegSaved=0;
unsigned FirstRegSavedTo = 0, SecondRegSavedTo = 0;
- Reg = rs.FindUnusedReg(&Mips::CPU16RegsRegClass);
- if (Reg == 0) {
- FirstRegSaved = Reg = Mips::V0;
- FirstRegSavedTo = Mips::T0;
- copyPhysReg(MBB, II, DL, FirstRegSavedTo, FirstRegSaved, true);
+
+ Reg = Available.find_first();
+
+ if (Reg == -1) {
+ Reg = Candidates.find_first();
+ Candidates.reset(Reg);
+ if (DefReg != Reg) {
+ FirstRegSaved = Reg;
+ FirstRegSavedTo = Mips::T0;
+ copyPhysReg(MBB, II, DL, FirstRegSavedTo, FirstRegSaved, true);
+ }
}
else
- rs.setUsed(Reg);
- BuildMI(MBB, II, DL, get(Mips::LiRxImmX16), Reg).addImm(hi);
- BuildMI(MBB, II, DL, get(Mips::SllX16), Reg).addReg(Reg).
- addImm(16);
+ Available.reset(Reg);
+ BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm);
+ NewImm = 0;
if (FrameReg == Mips::SP) {
- SpReg = rs.FindUnusedReg(&Mips::CPU16RegsRegClass);
- if (SpReg == 0) {
- if (Reg != Mips::V1) {
- SecondRegSaved = SpReg = Mips::V1;
+ SpReg = Available.find_first();
+ if (SpReg == -1) {
+ SpReg = Candidates.find_first();
+ // Candidates.reset(SpReg); // not really needed
+ if (DefReg!= SpReg) {
+ SecondRegSaved = SpReg;
SecondRegSavedTo = Mips::T1;
}
- else {
- SecondRegSaved = SpReg = Mips::V0;
- SecondRegSavedTo = Mips::T0;
- }
- copyPhysReg(MBB, II, DL, SecondRegSavedTo, SecondRegSaved, true);
+ if (SecondRegSaved)
+ copyPhysReg(MBB, II, DL, SecondRegSavedTo, SecondRegSaved, true);
}
- else
- rs.setUsed(SpReg);
-
+ else
+ Available.reset(SpReg);
copyPhysReg(MBB, II, DL, SpReg, Mips::SP, false);
- BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(SpReg)
+ BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(SpReg, RegState::Kill)
.addReg(Reg);
}
else
@@ -380,8 +426,27 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
return Reg;
}
-unsigned Mips16InstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
+/// This function generates the sequence of instructions needed to get the
+/// result of adding register REG and immediate IMM.
+unsigned
+Mips16InstrInfo::basicLoadImmediate(
+ unsigned FrameReg,
+ int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL,
+ unsigned &NewImm) const {
+ const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass;
+ MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
+ unsigned Reg = RegInfo.createVirtualRegister(RC);
+ BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm);
+ NewImm = 0;
+ return Reg;
+}
+
+unsigned Mips16InstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
return (Opc == Mips::BeqzRxImmX16 || Opc == Mips::BimmX16 ||
+ Opc == Mips::Bimm16 ||
+ Opc == Mips::Bteqz16 || Opc == Mips::Btnez16 ||
+ Opc == Mips::BeqzRxImm16 || Opc == Mips::BnezRxImm16 ||
Opc == Mips::BnezRxImmX16 || Opc == Mips::BteqzX16 ||
Opc == Mips::BteqzT8CmpX16 || Opc == Mips::BteqzT8CmpiX16 ||
Opc == Mips::BteqzT8SltX16 || Opc == Mips::BteqzT8SltuX16 ||
@@ -415,3 +480,69 @@ void Mips16InstrInfo::BuildAddiuSpImm
const MipsInstrInfo *llvm::createMips16InstrInfo(MipsTargetMachine &TM) {
return new Mips16InstrInfo(TM);
}
+
+bool Mips16InstrInfo::validImmediate(unsigned Opcode, unsigned Reg,
+ int64_t Amount) {
+ switch (Opcode) {
+ case Mips::LbRxRyOffMemX16:
+ case Mips::LbuRxRyOffMemX16:
+ case Mips::LhRxRyOffMemX16:
+ case Mips::LhuRxRyOffMemX16:
+ case Mips::SbRxRyOffMemX16:
+ case Mips::ShRxRyOffMemX16:
+ case Mips::LwRxRyOffMemX16:
+ case Mips::SwRxRyOffMemX16:
+ case Mips::SwRxSpImmX16:
+ case Mips::LwRxSpImmX16:
+ return isInt<16>(Amount);
+ case Mips::AddiuRxRyOffMemX16:
+ if ((Reg == Mips::PC) || (Reg == Mips::SP))
+ return isInt<16>(Amount);
+ return isInt<15>(Amount);
+ }
+ llvm_unreachable("unexpected Opcode in validImmediate");
+}
+
+/// Measure the specified inline asm to determine an approximation of its
+/// length.
+/// Comments (which run till the next SeparatorString or newline) do not
+/// count as an instruction.
+/// Any other non-whitespace text is considered an instruction, with
+/// multiple instructions separated by SeparatorString or newlines.
+/// Variable-length instructions are not handled here; this function
+/// may be overloaded in the target code to do that.
+/// We implement the special case of the .space directive taking only an
+/// integer argument, which is the size in bytes. This is used for creating
+/// inline code spacing for testing purposes using inline assembly.
+///
+unsigned Mips16InstrInfo::getInlineAsmLength(const char *Str,
+ const MCAsmInfo &MAI) const {
+
+
+ // Count the number of instructions in the asm.
+ bool atInsnStart = true;
+ unsigned Length = 0;
+ for (; *Str; ++Str) {
+ if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
+ strlen(MAI.getSeparatorString())) == 0)
+ atInsnStart = true;
+ if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
+ if (strncmp(Str, ".space", 6)==0) {
+ char *EStr; int Sz;
+ Sz = strtol(Str+6, &EStr, 10);
+ while (isspace(*EStr)) ++EStr;
+ if (*EStr=='\0') {
+ DEBUG(dbgs() << "parsed .space " << Sz << '\n');
+ return Sz;
+ }
+ }
+ Length += MAI.getMaxInstLength();
+ atInsnStart = false;
+ }
+ if (atInsnStart && strncmp(Str, MAI.getCommentString(),
+ strlen(MAI.getCommentString())) == 0)
+ atInsnStart = false;
+ }
+
+ return Length;
+}
diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h
index a77a9043bb17..d9a594b537a2 100644
--- a/lib/Target/Mips/Mips16InstrInfo.h
+++ b/lib/Target/Mips/Mips16InstrInfo.h
@@ -64,11 +64,11 @@ public:
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
- virtual unsigned GetOppositeBranchOpc(unsigned Opc) const;
+ virtual unsigned getOppositeBranchOpc(unsigned Opc) const;
// Adjust SP by FrameSize bytes. Save RA, S0, S1
void makeFrame(unsigned SP, int64_t FrameSize, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock::iterator I) const;
// Adjust SP by FrameSize bytes. Restore RA, S0, S1
void restoreFrame(unsigned SP, int64_t FrameSize, MachineBasicBlock &MBB,
@@ -88,6 +88,13 @@ public:
MachineBasicBlock::iterator II, DebugLoc DL,
unsigned &NewImm) const;
+ unsigned basicLoadImmediate(unsigned FrameReg,
+ int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL,
+ unsigned &NewImm) const;
+
+ static bool validImmediate(unsigned Opcode, unsigned Reg, int64_t Amount);
+
static bool validSpImm8(int offset) {
return ((offset & 7) == 0) && isInt<11>(offset);
}
@@ -101,8 +108,10 @@ public:
void BuildAddiuSpImm
(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, int64_t Imm) const;
+ unsigned getInlineAsmLength(const char *Str,
+ const MCAsmInfo &MAI) const;
private:
- virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
+ virtual unsigned getAnalyzableBrOpc(unsigned Opc) const;
void ExpandRetRA16(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc) const;
diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td
index aa51aaf46565..7441c78a0330 100644
--- a/lib/Target/Mips/Mips16InstrInfo.td
+++ b/lib/Target/Mips/Mips16InstrInfo.td
@@ -21,17 +21,27 @@ def addr16 :
// Address operand
def mem16 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPU16Regs, simm16, CPU16Regs);
+ let MIOperandInfo = (ops CPU16Regs, simm16, CPU16RegsPlusSP);
let EncoderMethod = "getMemEncoding";
}
def mem16_ea : Operand<i32> {
let PrintMethod = "printMemOperandEA";
- let MIOperandInfo = (ops CPU16Regs, simm16);
+ let MIOperandInfo = (ops CPU16RegsPlusSP, simm16);
let EncoderMethod = "getMemEncoding";
}
//
+// I-type instruction format
+//
+// this is only used by bimm. the actual assembly value is a 12 bit signed
+// number
+//
+class FI16_ins<bits<5> op, string asmstr, InstrItinClass itin>:
+ FI16<op, (outs), (ins brtarget:$imm16),
+ !strconcat(asmstr, "\t$imm16 # 16 bit inst"), [], itin>;
+
+//
//
// I8 instruction format
//
@@ -41,7 +51,10 @@ class FI816_ins_base<bits<3> _func, string asmstr,
FI816<_func, (outs), (ins simm16:$imm), !strconcat(asmstr, asmstr2),
[], itin>;
-
+class FI816_ins<bits<3> _func, string asmstr,
+ InstrItinClass itin>:
+ FI816_ins_base<_func, asmstr, "\t$imm # 16 bit inst", itin>;
+
class FI816_SP_ins<bits<3> _func, string asmstr,
InstrItinClass itin>:
FI816_ins_base<_func, asmstr, "\t$$sp, $imm # 16 bit inst", itin>;
@@ -60,6 +73,11 @@ class FRI16_ins<bits<5> op, string asmstr,
InstrItinClass itin>:
FRI16_ins_base<op, asmstr, "\t$rx, $imm \t# 16 bit inst", itin>;
+class FRI16_TCP_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FRI16<_op, (outs CPU16Regs:$rx), (ins pcrel16:$imm, i32imm:$size),
+ !strconcat(asmstr, "\t$rx, $imm\t# 16 bit inst"), [], itin>;
+
class FRI16R_ins_base<bits<5> op, string asmstr, string asmstr2,
InstrItinClass itin>:
FRI16<op, (outs), (ins CPU16Regs:$rx, simm16:$imm),
@@ -172,6 +190,11 @@ class FEXT_RI16_B_ins<bits<5> _op, string asmstr,
FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, brtarget:$imm),
!strconcat(asmstr, "\t$rx, $imm"), [], itin>;
+class FEXT_RI16_TCP_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins pcrel16:$imm, i32imm:$size),
+ !strconcat(asmstr, "\t$rx, $imm"), [], itin>;
+
class FEXT_2RI16_ins<bits<5> _op, string asmstr,
InstrItinClass itin>:
FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPU16Regs:$rx_, simm16:$imm),
@@ -187,6 +210,11 @@ class FEXT_RI16_SP_explicit_ins<bits<5> _op, string asmstr,
FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPUSPReg:$ry, simm16:$imm),
!strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>;
+class FEXT_RI16_SP_Store_explicit_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, CPUSPReg:$ry, simm16:$imm),
+ !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>;
+
//
// EXT-RRI instruction format
//
@@ -215,7 +243,7 @@ class FEXT_RRI_A16_mem_ins<bits<1> op, string asmstr, Operand MemOpnd,
// EXT-SHIFT instruction format
//
class FEXT_SHIFT16_ins<bits<2> _f, string asmstr, InstrItinClass itin>:
- FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, shamt:$sa),
+ FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, uimm5:$sa),
!strconcat(asmstr, "\t$rx, $ry, $sa"), [], itin>;
//
@@ -248,7 +276,7 @@ class FEXT_T8I8I16_ins<string asmstr, string asmstr2>:
// I8_MOVR32 instruction format (used only by the MOVR32 instructio
//
class FI8_MOVR3216_ins<string asmstr, InstrItinClass itin>:
- FI8_MOVR3216<(outs CPU16Regs:$rz), (ins CPURegs:$r32),
+ FI8_MOVR3216<(outs CPU16Regs:$rz), (ins GPR32:$r32),
!strconcat(asmstr, "\t$rz, $r32"), [], itin>;
//
@@ -256,7 +284,7 @@ class FI8_MOVR3216_ins<string asmstr, InstrItinClass itin>:
//
class FI8_MOV32R16_ins<string asmstr, InstrItinClass itin>:
- FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz),
+ FI8_MOV32R16<(outs GPR32:$r32), (ins CPU16Regs:$rz),
!strconcat(asmstr, "\t$r32, $rz"), [], itin>;
//
@@ -287,6 +315,11 @@ class FRR16_ins<bits<5> f, string asmstr, InstrItinClass itin> :
!strconcat(asmstr, "\t$rx, $ry"), [], itin> {
}
+class FRRBreakNull16_ins<string asmstr, InstrItinClass itin> :
+ FRRBreak16<(outs), (ins), asmstr, [], itin> {
+ let Code=0;
+}
+
class FRR16R_ins<bits<5> f, string asmstr, InstrItinClass itin> :
FRR16<f, (outs), (ins CPU16Regs:$rx, CPU16Regs:$ry),
!strconcat(asmstr, "\t$rx, $ry"), [], itin> {
@@ -333,6 +366,14 @@ class FRR16_JALRC_ins<bits<1> nd, bits<1> l, bits<1> ra,
FRR16_JALRC<nd, l, ra, (outs), (ins CPU16Regs:$rx),
!strconcat(asmstr, "\t $rx"), [], itin> ;
+class FRR_SF16_ins
+ <bits<5> _funct, bits<3> _subfunc,
+ string asmstr, InstrItinClass itin>:
+ FRR_SF16<_funct, _subfunc, (outs CPU16Regs:$rx), (ins CPU16Regs:$rx_),
+ !strconcat(asmstr, "\t $rx"),
+ [], itin> {
+ let Constraints = "$rx_ = $rx";
+ }
//
// RRR-type instruction format
//
@@ -437,7 +478,7 @@ def Constant32:
MipsPseudo16<(outs), (ins imm32:$imm), "\t.word $imm", []>;
def LwConstant32:
- MipsPseudo16<(outs), (ins CPU16Regs:$rx, imm32:$imm),
+ MipsPseudo16<(outs CPU16Regs:$rx), (ins imm32:$imm, imm32:$constid),
"lw\t$rx, 1f\n\tb\t2f\n\t.align\t2\n1: \t.word\t$imm\n2:", []>;
@@ -549,6 +590,14 @@ def BeqzRxImm16: FRI16_B_ins<0b00100, "beqz", IIAlu>, cbranch16;
//
def BeqzRxImmX16: FEXT_RI16_B_ins<0b00100, "beqz", IIAlu>, cbranch16;
+//
+// Format: B offset MIPS16e
+// Purpose: Unconditional Branch (Extended)
+// To do an unconditional PC-relative branch.
+//
+
+def Bimm16: FI16_ins<0b00010, "b", IIAlu>, branch16;
+
// Format: B offset MIPS16e
// Purpose: Unconditional Branch
// To do an unconditional PC-relative branch.
@@ -569,11 +618,22 @@ def BnezRxImm16: FRI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16;
//
def BnezRxImmX16: FEXT_RI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16;
+
+//
+//Format: BREAK immediate
+// Purpose: Breakpoint
+// To cause a Breakpoint exception.
+
+def Break16: FRRBreakNull16_ins<"break 0", NoItinerary>;
//
// Format: BTEQZ offset MIPS16e
// Purpose: Branch on T Equal to Zero (Extended)
// To test special register T then do a PC-relative conditional branch.
//
+def Bteqz16: FI816_ins<0b000, "bteqz", IIAlu>, cbranch16 {
+ let Uses = [T8];
+}
+
def BteqzX16: FEXT_I816_ins<0b000, "bteqz", IIAlu>, cbranch16 {
let Uses = [T8];
}
@@ -597,6 +657,11 @@ def BteqzT8SltiuX16: FEXT_T8I8I16_ins<"bteqz", "sltiu">,
// Purpose: Branch on T Not Equal to Zero (Extended)
// To test special register T then do a PC-relative conditional branch.
//
+
+def Btnez16: FI816_ins<0b001, "btnez", IIAlu>, cbranch16 {
+ let Uses = [T8];
+}
+
def BtnezX16: FEXT_I816_ins<0b001, "btnez", IIAlu> ,cbranch16 {
let Uses = [T8];
}
@@ -648,7 +713,7 @@ def CmpiRxImmX16: FEXT_RI16R_ins<0b01110, "cmpi", IIAlu> {
// To divide 32-bit signed integers.
//
def DivRxRy16: FRR16_div_ins<0b11010, "div", IIAlu> {
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -657,7 +722,7 @@ def DivRxRy16: FRR16_div_ins<0b11010, "div", IIAlu> {
// To divide 32-bit unsigned integers.
//
def DivuRxRy16: FRR16_div_ins<0b11011, "divu", IIAlu> {
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
// Format: JAL target MIPS16e
@@ -667,10 +732,8 @@ def DivuRxRy16: FRR16_div_ins<0b11011, "divu", IIAlu> {
//
def Jal16 : FJAL16_ins<0b0, "jal", IIAlu> {
- let isBranch = 1;
let hasDelaySlot = 0; // not true, but we add the nop for now
- let isTerminator=1;
- let isBarrier=1;
+ let isCall=1;
}
//
@@ -753,6 +816,10 @@ def LiRxImm16: FRI16_ins<0b01101, "li", IIAlu>;
//
def LiRxImmX16: FEXT_RI16_ins<0b01101, "li", IIAlu>;
+def LiRxImmAlignX16: FEXT_RI16_ins<0b01101, ".align 2\n\tli", IIAlu> {
+ let isCodeGenOnly = 1;
+}
+
//
// Format: LW ry, offset(rx) MIPS16e
// Purpose: Load Word (Extended)
@@ -766,10 +833,13 @@ def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IILoad>, MayLoad{
// Purpose: Load Word (SP-Relative, Extended)
// To load an SP-relative word from memory as a signed value.
//
-def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10110, "lw", IILoad>, MayLoad{
+def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10010, "lw", IILoad>, MayLoad{
let Uses = [SP];
}
+def LwRxPcTcp16: FRI16_TCP_ins<0b10110, "lw", IILoad>, MayLoad;
+
+def LwRxPcTcpX16: FEXT_RI16_TCP_ins<0b10110, "lw", IILoad>, MayLoad;
//
// Format: MOVE r32, rz MIPS16e
// Purpose: Move
@@ -790,7 +860,7 @@ def MoveR3216: FI8_MOVR3216_ins<"move", IIAlu>;
// To copy the special purpose HI register to a GPR.
//
def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> {
- let Uses = [HI];
+ let Uses = [HI0];
let neverHasSideEffects = 1;
}
@@ -800,7 +870,7 @@ def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> {
// To copy the special purpose LO register to a GPR.
//
def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> {
- let Uses = [LO];
+ let Uses = [LO0];
let neverHasSideEffects = 1;
}
@@ -810,13 +880,13 @@ def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> {
def MultRxRy16: FMULT16_ins<"mult", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
def MultuRxRy16: FMULT16_ins<"multu", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -827,7 +897,7 @@ def MultuRxRy16: FMULT16_ins<"multu", IIAlu> {
def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -838,7 +908,7 @@ def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> {
def MultuRxRyRz16: FMULT16_LO_ins<"multu", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -878,9 +948,9 @@ def OrRxRxRy16: FRxRxRy16_ins<0b01101, "or", IIAlu>, ArithLogic16Defs<1>;
let ra=1, s=0,s0=1,s1=1 in
def RestoreRaF16:
FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "restore\t$$ra, $$s0, $$s1, $frame_size", [], IILoad >, MayLoad {
+ "restore\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IILoad >, MayLoad {
let isCodeGenOnly = 1;
- let Defs = [S0, S1, RA, SP];
+ let Defs = [S0, S1, S2, RA, SP];
let Uses = [SP];
}
@@ -906,9 +976,9 @@ def RestoreIncSpF16:
let ra=1, s=1,s0=1,s1=1 in
def SaveRaF16:
FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "save\t$$ra, $$s0, $$s1, $frame_size", [], IIStore >, MayStore {
+ "save\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IIStore >, MayStore {
let isCodeGenOnly = 1;
- let Uses = [RA, SP, S0, S1];
+ let Uses = [RA, SP, S0, S1, S2];
let Defs = [SP];
}
@@ -933,6 +1003,22 @@ def SbRxRyOffMemX16:
FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIStore>, MayStore;
//
+// Format: SEB rx MIPS16e
+// Purpose: Sign-Extend Byte
+// Sign-extend least significant byte in register rx.
+//
+def SebRx16
+ : FRR_SF16_ins<0b10001, 0b100, "seb", IIAlu>;
+
+//
+// Format: SEH rx MIPS16e
+// Purpose: Sign-Extend Halfword
+// Sign-extend least significant word in register rx.
+//
+def SehRx16
+ : FRR_SF16_ins<0b10001, 0b101, "seh", IIAlu>;
+
+//
// The Sel(T) instructions are pseudos
// T means that they use T8 implicitly.
//
@@ -1057,7 +1143,7 @@ def ShRxRyOffMemX16:
//
// Format: SLL rx, ry, sa MIPS16e
// Purpose: Shift Word Left Logical (Extended)
-// To execute a left-shift of a word by a fixed number of bits—0 to 31 bits.
+// To execute a left-shift of a word by a fixed number of bits-0 to 31 bits.
//
def SllX16: FEXT_SHIFT16_ins<0b00, "sll", IIAlu>;
@@ -1153,7 +1239,7 @@ def SravRxRy16: FRxRxRy16_ins<0b00111, "srav", IIAlu>;
// Format: SRA rx, ry, sa MIPS16e
// Purpose: Shift Word Right Arithmetic (Extended)
// To execute an arithmetic right-shift of a word by a fixed
-// number of bits—1 to 8 bits.
+// number of bits-1 to 8 bits.
//
def SraX16: FEXT_SHIFT16_ins<0b11, "sra", IIAlu>;
@@ -1171,7 +1257,7 @@ def SrlvRxRy16: FRxRxRy16_ins<0b00110, "srlv", IIAlu>;
// Format: SRL rx, ry, sa MIPS16e
// Purpose: Shift Word Right Logical (Extended)
// To execute a logical right-shift of a word by a fixed
-// number of bits—1 to 31 bits.
+// number of bits-1 to 31 bits.
//
def SrlX16: FEXT_SHIFT16_ins<0b10, "srl", IIAlu>;
@@ -1195,7 +1281,8 @@ def SwRxRyOffMemX16:
// Purpose: Store Word rx (SP-Relative)
// To store an SP-relative word to memory.
//
-def SwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b11010, "sw", IIStore>, MayStore;
+def SwRxSpImmX16: FEXT_RI16_SP_Store_explicit_ins
+ <0b11010, "sw", IIStore>, MayStore;
//
//
@@ -1311,9 +1398,7 @@ def: Mips16Pat<(i32 addr16:$addr),
// Large (>16 bit) immediate loads
-def : Mips16Pat<(i32 imm:$imm),
- (OrRxRxRy16 (SllX16 (LiRxImmX16 (HI16 imm:$imm)), 16),
- (LiRxImmX16 (LO16 imm:$imm)))>;
+def : Mips16Pat<(i32 imm:$imm), (LwConstant32 imm:$imm, -1)>;
// Carry MipsPatterns
def : Mips16Pat<(subc CPU16Regs:$lhs, CPU16Regs:$rhs),
@@ -1354,7 +1439,7 @@ def: Mips16Pat
def: Mips16Pat
<(brcond (i32 (seteq CPU16Regs:$rx, 0)), bb:$targ16),
- (BeqzRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ (BeqzRxImm16 CPU16Regs:$rx, bb:$targ16)
>;
//
@@ -1416,7 +1501,7 @@ def: Mips16Pat
def: Mips16Pat
<(brcond (i32 (setne CPU16Regs:$rx, 0)), bb:$targ16),
- (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ (BnezRxImm16 CPU16Regs:$rx, bb:$targ16)
>;
//
@@ -1424,7 +1509,7 @@ def: Mips16Pat
//
def: Mips16Pat
<(brcond CPU16Regs:$rx, bb:$targ16),
- (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ (BnezRxImm16 CPU16Regs:$rx, bb:$targ16)
>;
//
@@ -1454,7 +1539,7 @@ def: Mips16Pat
// (BtnezT8SltuX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
// >;
-def: UncondBranch16_pat<br, BimmX16>;
+def: UncondBranch16_pat<br, Bimm16>;
// Small immediates
def: Mips16Pat<(i32 immSExt16:$in),
@@ -1768,7 +1853,8 @@ def: Mips16Pat<(add CPU16Regs:$hi, (MipsLo tglobaladdr:$lo)),
(AddiuRxRxImmX16 CPU16Regs:$hi, tglobaladdr:$lo)>;
// hi/lo relocs
-
+def : Mips16Pat<(MipsHi tblockaddress:$in),
+ (SllX16 (LiRxImmX16 tblockaddress:$in), 16)>;
def : Mips16Pat<(MipsHi tglobaladdr:$in),
(SllX16 (LiRxImmX16 tglobaladdr:$in), 16)>;
def : Mips16Pat<(MipsHi tjumptable:$in),
@@ -1776,6 +1862,8 @@ def : Mips16Pat<(MipsHi tjumptable:$in),
def : Mips16Pat<(MipsHi tglobaltlsaddr:$in),
(SllX16 (LiRxImmX16 tglobaltlsaddr:$in), 16)>;
+def : Mips16Pat<(MipsLo tblockaddress:$in), (LiRxImmX16 tblockaddress:$in)>;
+
// wrapper_pic
class Wrapper16Pat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
Mips16Pat<(MipsWrapper RC:$gp, node:$in),
@@ -1789,3 +1877,33 @@ def : Mips16Pat<(i32 (extloadi8 addr16:$src)),
(LbuRxRyOffMemX16 addr16:$src)>;
def : Mips16Pat<(i32 (extloadi16 addr16:$src)),
(LhuRxRyOffMemX16 addr16:$src)>;
+
+def: Mips16Pat<(trap), (Break16)>;
+
+def : Mips16Pat<(sext_inreg CPU16Regs:$val, i8),
+ (SebRx16 CPU16Regs:$val)>;
+
+def : Mips16Pat<(sext_inreg CPU16Regs:$val, i16),
+ (SehRx16 CPU16Regs:$val)>;
+
+def GotPrologue16:
+ MipsPseudo16<
+ (outs CPU16Regs:$rh, CPU16Regs:$rl),
+ (ins simm16:$immHi, simm16:$immLo),
+ ".align 2\n\tli\t$rh, $immHi\n\taddiu\t$rl, $$pc, $immLo\n ",[]> ;
+
+// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
+def cpinst_operand : Operand<i32> {
+ // let PrintMethod = "printCPInstOperand";
+}
+
+// CONSTPOOL_ENTRY - This instruction represents a floating constant pool in
+// the function. The first operand is the ID# for this instruction, the second
+// is the index into the MachineConstantPool that this is, the third is the
+// size in bytes of this constant pool entry.
+//
+let neverHasSideEffects = 1, isNotDuplicable = 1 in
+def CONSTPOOL_ENTRY :
+MipsPseudo16<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
+ i32imm:$size), "foo", []>;
+
diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp
index 7ad18f2b4d98..9d0f2c927e4d 100644
--- a/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -41,17 +41,16 @@
using namespace llvm;
-Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST,
- const Mips16InstrInfo &I)
- : MipsRegisterInfo(ST), TII(I) {}
+Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST)
+ : MipsRegisterInfo(ST) {}
bool Mips16RegisterInfo::requiresRegisterScavenging
(const MachineFunction &MF) const {
- return true;
+ return false;
}
bool Mips16RegisterInfo::requiresFrameIndexScavenging
(const MachineFunction &MF) const {
- return true;
+ return false;
}
bool Mips16RegisterInfo::useFPForScavengingIndex
@@ -66,6 +65,7 @@ bool Mips16RegisterInfo::saveScavengerRegister
const TargetRegisterClass *RC,
unsigned Reg) const {
DebugLoc DL;
+ const TargetInstrInfo &TII = *MBB.getParent()->getTarget().getInstrInfo();
TII.copyPhysReg(MBB, I, DL, Mips::T0, Reg, true);
TII.copyPhysReg(MBB, UseMI, DL, Reg, Mips::T0, true);
return true;
@@ -134,11 +134,14 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
- if (!MI.isDebugValue() && ( ((FrameReg != Mips::SP) && !isInt<16>(Offset)) ||
- ((FrameReg == Mips::SP) && !isInt<15>(Offset)) )) {
+ if (!MI.isDebugValue() &&
+ !Mips16InstrInfo::validImmediate(MI.getOpcode(), FrameReg, Offset)) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = II->getDebugLoc();
unsigned NewImm;
+ const Mips16InstrInfo &TII =
+ *static_cast<const Mips16InstrInfo*>(
+ MBB.getParent()->getTarget().getInstrInfo());
FrameReg = TII.loadImmediate(FrameReg, Offset, MBB, II, DL, NewImm);
Offset = SignExtend64<16>(NewImm);
IsKill = true;
diff --git a/lib/Target/Mips/Mips16RegisterInfo.h b/lib/Target/Mips/Mips16RegisterInfo.h
index 2b3d2b1a4ecb..13e82a3ffba9 100644
--- a/lib/Target/Mips/Mips16RegisterInfo.h
+++ b/lib/Target/Mips/Mips16RegisterInfo.h
@@ -20,10 +20,8 @@ namespace llvm {
class Mips16InstrInfo;
class Mips16RegisterInfo : public MipsRegisterInfo {
- const Mips16InstrInfo &TII;
public:
- Mips16RegisterInfo(const MipsSubtarget &Subtarget,
- const Mips16InstrInfo &TII);
+ Mips16RegisterInfo(const MipsSubtarget &Subtarget);
bool requiresRegisterScavenging(const MachineFunction &MF) const;
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index fc533fb03f63..15ef654555d6 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -15,9 +15,6 @@
// Mips Operand, Complex Patterns and Transformations Definitions.
//===----------------------------------------------------------------------===//
-// Instruction operand types
-def shamt_64 : Operand<i64>;
-
// Unsigned Operand
def uimm16_64 : Operand<i64> {
let PrintMethod = "printUnsignedImm";
@@ -34,42 +31,21 @@ def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>;
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
-let DecoderNamespace = "Mips64" in {
-
-multiclass Atomic2Ops64<PatFrag Op> {
- def NAME : Atomic2Ops<Op, CPU64Regs, CPURegs>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : Atomic2Ops<Op, CPU64Regs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let isCodeGenOnly = 1;
- }
-}
-
-multiclass AtomicCmpSwap64<PatFrag Op> {
- def NAME : AtomicCmpSwap<Op, CPU64Regs, CPURegs>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : AtomicCmpSwap<Op, CPU64Regs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let isCodeGenOnly = 1;
- }
-}
-}
-let usesCustomInserter = 1, Predicates = [HasStdEnc],
- DecoderNamespace = "Mips64" in {
- defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64>;
- defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64>;
- defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64>;
- defm ATOMIC_LOAD_OR_I64 : Atomic2Ops64<atomic_load_or_64>;
- defm ATOMIC_LOAD_XOR_I64 : Atomic2Ops64<atomic_load_xor_64>;
- defm ATOMIC_LOAD_NAND_I64 : Atomic2Ops64<atomic_load_nand_64>;
- defm ATOMIC_SWAP_I64 : Atomic2Ops64<atomic_swap_64>;
- defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64>;
+let usesCustomInserter = 1 in {
+ def ATOMIC_LOAD_ADD_I64 : Atomic2Ops<atomic_load_add_64, GPR64>;
+ def ATOMIC_LOAD_SUB_I64 : Atomic2Ops<atomic_load_sub_64, GPR64>;
+ def ATOMIC_LOAD_AND_I64 : Atomic2Ops<atomic_load_and_64, GPR64>;
+ def ATOMIC_LOAD_OR_I64 : Atomic2Ops<atomic_load_or_64, GPR64>;
+ def ATOMIC_LOAD_XOR_I64 : Atomic2Ops<atomic_load_xor_64, GPR64>;
+ def ATOMIC_LOAD_NAND_I64 : Atomic2Ops<atomic_load_nand_64, GPR64>;
+ def ATOMIC_SWAP_I64 : Atomic2Ops<atomic_swap_64, GPR64>;
+ def ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap<atomic_cmp_swap_64, GPR64>;
}
/// Pseudo instructions for loading and storing accumulator registers.
-let isPseudo = 1 in {
- defm LOAD_AC128 : LoadM<"load_ac128", ACRegs128>;
- defm STORE_AC128 : StoreM<"store_ac128", ACRegs128>;
+let isPseudo = 1, isCodeGenOnly = 1 in {
+ def LOAD_ACC128 : Load<"", ACC128>;
+ def STORE_ACC128 : Store<"", ACC128>;
}
//===----------------------------------------------------------------------===//
@@ -77,166 +53,174 @@ let isPseudo = 1 in {
//===----------------------------------------------------------------------===//
let DecoderNamespace = "Mips64" in {
/// Arithmetic Instructions (ALU Immediate)
-def DADDi : ArithLogicI<"daddi", simm16_64, CPU64RegsOpnd>, ADDI_FM<0x18>;
-def DADDiu : ArithLogicI<"daddiu", simm16_64, CPU64RegsOpnd, immSExt16, add>,
+def DADDi : ArithLogicI<"daddi", simm16_64, GPR64Opnd>, ADDI_FM<0x18>;
+def DADDiu : ArithLogicI<"daddiu", simm16_64, GPR64Opnd, IIArith,
+ immSExt16, add>,
ADDI_FM<0x19>, IsAsCheapAsAMove;
-def DANDi : ArithLogicI<"andi", uimm16_64, CPU64RegsOpnd, immZExt16, and>,
- ADDI_FM<0xc>;
-def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, CPU64Regs>,
+
+let isCodeGenOnly = 1 in {
+def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, GPR64Opnd>,
SLTI_FM<0xa>;
-def SLTiu64 : SetCC_I<"sltiu", setult, simm16_64, immSExt16, CPU64Regs>,
+def SLTiu64 : SetCC_I<"sltiu", setult, simm16_64, immSExt16, GPR64Opnd>,
SLTI_FM<0xb>;
-def ORi64 : ArithLogicI<"ori", uimm16_64, CPU64RegsOpnd, immZExt16, or>,
+def ANDi64 : ArithLogicI<"andi", uimm16_64, GPR64Opnd, IILogic, immZExt16,
+ and>,
+ ADDI_FM<0xc>;
+def ORi64 : ArithLogicI<"ori", uimm16_64, GPR64Opnd, IILogic, immZExt16,
+ or>,
ADDI_FM<0xd>;
-def XORi64 : ArithLogicI<"xori", uimm16_64, CPU64RegsOpnd, immZExt16, xor>,
+def XORi64 : ArithLogicI<"xori", uimm16_64, GPR64Opnd, IILogic, immZExt16,
+ xor>,
ADDI_FM<0xe>;
-def LUi64 : LoadUpper<"lui", CPU64Regs, uimm16_64>, LUI_FM;
+def LUi64 : LoadUpper<"lui", GPR64Opnd, uimm16_64>, LUI_FM;
+}
/// Arithmetic Instructions (3-Operand, R-Type)
-def DADD : ArithLogicR<"dadd", CPU64RegsOpnd>, ADD_FM<0, 0x2c>;
-def DADDu : ArithLogicR<"daddu", CPU64RegsOpnd, 1, IIAlu, add>,
+def DADD : ArithLogicR<"dadd", GPR64Opnd>, ADD_FM<0, 0x2c>;
+def DADDu : ArithLogicR<"daddu", GPR64Opnd, 1, IIArith, add>,
ADD_FM<0, 0x2d>;
-def DSUBu : ArithLogicR<"dsubu", CPU64RegsOpnd, 0, IIAlu, sub>,
+def DSUBu : ArithLogicR<"dsubu", GPR64Opnd, 0, IIArith, sub>,
ADD_FM<0, 0x2f>;
-def SLT64 : SetCC_R<"slt", setlt, CPU64Regs>, ADD_FM<0, 0x2a>;
-def SLTu64 : SetCC_R<"sltu", setult, CPU64Regs>, ADD_FM<0, 0x2b>;
-def AND64 : ArithLogicR<"and", CPU64RegsOpnd, 1, IIAlu, and>, ADD_FM<0, 0x24>;
-def OR64 : ArithLogicR<"or", CPU64RegsOpnd, 1, IIAlu, or>, ADD_FM<0, 0x25>;
-def XOR64 : ArithLogicR<"xor", CPU64RegsOpnd, 1, IIAlu, xor>, ADD_FM<0, 0x26>;
-def NOR64 : LogicNOR<"nor", CPU64RegsOpnd>, ADD_FM<0, 0x27>;
+
+let isCodeGenOnly = 1 in {
+def SLT64 : SetCC_R<"slt", setlt, GPR64Opnd>, ADD_FM<0, 0x2a>;
+def SLTu64 : SetCC_R<"sltu", setult, GPR64Opnd>, ADD_FM<0, 0x2b>;
+def AND64 : ArithLogicR<"and", GPR64Opnd, 1, IIArith, and>, ADD_FM<0, 0x24>;
+def OR64 : ArithLogicR<"or", GPR64Opnd, 1, IIArith, or>, ADD_FM<0, 0x25>;
+def XOR64 : ArithLogicR<"xor", GPR64Opnd, 1, IIArith, xor>, ADD_FM<0, 0x26>;
+def NOR64 : LogicNOR<"nor", GPR64Opnd>, ADD_FM<0, 0x27>;
+}
/// Shift Instructions
-def DSLL : shift_rotate_imm<"dsll", shamt, CPU64RegsOpnd, shl, immZExt6>,
+def DSLL : shift_rotate_imm<"dsll", uimm6, GPR64Opnd, shl, immZExt6>,
SRA_FM<0x38, 0>;
-def DSRL : shift_rotate_imm<"dsrl", shamt, CPU64RegsOpnd, srl, immZExt6>,
+def DSRL : shift_rotate_imm<"dsrl", uimm6, GPR64Opnd, srl, immZExt6>,
SRA_FM<0x3a, 0>;
-def DSRA : shift_rotate_imm<"dsra", shamt, CPU64RegsOpnd, sra, immZExt6>,
+def DSRA : shift_rotate_imm<"dsra", uimm6, GPR64Opnd, sra, immZExt6>,
SRA_FM<0x3b, 0>;
-def DSLLV : shift_rotate_reg<"dsllv", CPU64RegsOpnd, shl>, SRLV_FM<0x14, 0>;
-def DSRLV : shift_rotate_reg<"dsrlv", CPU64RegsOpnd, srl>, SRLV_FM<0x16, 0>;
-def DSRAV : shift_rotate_reg<"dsrav", CPU64RegsOpnd, sra>, SRLV_FM<0x17, 0>;
-def DSLL32 : shift_rotate_imm<"dsll32", shamt, CPU64RegsOpnd>, SRA_FM<0x3c, 0>;
-def DSRL32 : shift_rotate_imm<"dsrl32", shamt, CPU64RegsOpnd>, SRA_FM<0x3e, 0>;
-def DSRA32 : shift_rotate_imm<"dsra32", shamt, CPU64RegsOpnd>, SRA_FM<0x3f, 0>;
-}
+def DSLLV : shift_rotate_reg<"dsllv", GPR64Opnd, shl>, SRLV_FM<0x14, 0>;
+def DSRLV : shift_rotate_reg<"dsrlv", GPR64Opnd, srl>, SRLV_FM<0x16, 0>;
+def DSRAV : shift_rotate_reg<"dsrav", GPR64Opnd, sra>, SRLV_FM<0x17, 0>;
+def DSLL32 : shift_rotate_imm<"dsll32", uimm5, GPR64Opnd>, SRA_FM<0x3c, 0>;
+def DSRL32 : shift_rotate_imm<"dsrl32", uimm5, GPR64Opnd>, SRA_FM<0x3e, 0>;
+def DSRA32 : shift_rotate_imm<"dsra32", uimm5, GPR64Opnd>, SRA_FM<0x3f, 0>;
+
// Rotate Instructions
-let Predicates = [HasMips64r2, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def DROTR : shift_rotate_imm<"drotr", shamt, CPU64RegsOpnd, rotr, immZExt6>,
+let Predicates = [HasMips64r2, HasStdEnc] in {
+ def DROTR : shift_rotate_imm<"drotr", uimm6, GPR64Opnd, rotr, immZExt6>,
SRA_FM<0x3a, 1>;
- def DROTRV : shift_rotate_reg<"drotrv", CPU64RegsOpnd, rotr>,
+ def DROTRV : shift_rotate_reg<"drotrv", GPR64Opnd, rotr>,
SRLV_FM<0x16, 1>;
+ def DROTR32 : shift_rotate_imm<"drotr32", uimm5, GPR64Opnd>, SRA_FM<0x3e, 1>;
}
-let DecoderNamespace = "Mips64" in {
/// Load and Store Instructions
/// aligned
-defm LB64 : LoadM<"lb", CPU64Regs, sextloadi8>, LW_FM<0x20>;
-defm LBu64 : LoadM<"lbu", CPU64Regs, zextloadi8>, LW_FM<0x24>;
-defm LH64 : LoadM<"lh", CPU64Regs, sextloadi16>, LW_FM<0x21>;
-defm LHu64 : LoadM<"lhu", CPU64Regs, zextloadi16>, LW_FM<0x25>;
-defm LW64 : LoadM<"lw", CPU64Regs, sextloadi32>, LW_FM<0x23>;
-defm LWu64 : LoadM<"lwu", CPU64Regs, zextloadi32>, LW_FM<0x27>;
-defm SB64 : StoreM<"sb", CPU64Regs, truncstorei8>, LW_FM<0x28>;
-defm SH64 : StoreM<"sh", CPU64Regs, truncstorei16>, LW_FM<0x29>;
-defm SW64 : StoreM<"sw", CPU64Regs, truncstorei32>, LW_FM<0x2b>;
-defm LD : LoadM<"ld", CPU64Regs, load>, LW_FM<0x37>;
-defm SD : StoreM<"sd", CPU64Regs, store>, LW_FM<0x3f>;
+let isCodeGenOnly = 1 in {
+def LB64 : Load<"lb", GPR64Opnd, sextloadi8, IILoad>, LW_FM<0x20>;
+def LBu64 : Load<"lbu", GPR64Opnd, zextloadi8, IILoad>, LW_FM<0x24>;
+def LH64 : Load<"lh", GPR64Opnd, sextloadi16, IILoad>, LW_FM<0x21>;
+def LHu64 : Load<"lhu", GPR64Opnd, zextloadi16, IILoad>, LW_FM<0x25>;
+def LW64 : Load<"lw", GPR64Opnd, sextloadi32, IILoad>, LW_FM<0x23>;
+def SB64 : Store<"sb", GPR64Opnd, truncstorei8, IIStore>, LW_FM<0x28>;
+def SH64 : Store<"sh", GPR64Opnd, truncstorei16, IIStore>, LW_FM<0x29>;
+def SW64 : Store<"sw", GPR64Opnd, truncstorei32, IIStore>, LW_FM<0x2b>;
+}
+
+def LWu : Load<"lwu", GPR64Opnd, zextloadi32, IILoad>, LW_FM<0x27>;
+def LD : Load<"ld", GPR64Opnd, load, IILoad>, LW_FM<0x37>;
+def SD : Store<"sd", GPR64Opnd, store, IIStore>, LW_FM<0x3f>;
/// load/store left/right
-defm LWL64 : LoadLeftRightM<"lwl", MipsLWL, CPU64Regs>, LW_FM<0x22>;
-defm LWR64 : LoadLeftRightM<"lwr", MipsLWR, CPU64Regs>, LW_FM<0x26>;
-defm SWL64 : StoreLeftRightM<"swl", MipsSWL, CPU64Regs>, LW_FM<0x2a>;
-defm SWR64 : StoreLeftRightM<"swr", MipsSWR, CPU64Regs>, LW_FM<0x2e>;
+let isCodeGenOnly = 1 in {
+def LWL64 : LoadLeftRight<"lwl", MipsLWL, GPR64Opnd, IILoad>, LW_FM<0x22>;
+def LWR64 : LoadLeftRight<"lwr", MipsLWR, GPR64Opnd, IILoad>, LW_FM<0x26>;
+def SWL64 : StoreLeftRight<"swl", MipsSWL, GPR64Opnd, IIStore>, LW_FM<0x2a>;
+def SWR64 : StoreLeftRight<"swr", MipsSWR, GPR64Opnd, IIStore>, LW_FM<0x2e>;
+}
-defm LDL : LoadLeftRightM<"ldl", MipsLDL, CPU64Regs>, LW_FM<0x1a>;
-defm LDR : LoadLeftRightM<"ldr", MipsLDR, CPU64Regs>, LW_FM<0x1b>;
-defm SDL : StoreLeftRightM<"sdl", MipsSDL, CPU64Regs>, LW_FM<0x2c>;
-defm SDR : StoreLeftRightM<"sdr", MipsSDR, CPU64Regs>, LW_FM<0x2d>;
+def LDL : LoadLeftRight<"ldl", MipsLDL, GPR64Opnd, IILoad>, LW_FM<0x1a>;
+def LDR : LoadLeftRight<"ldr", MipsLDR, GPR64Opnd, IILoad>, LW_FM<0x1b>;
+def SDL : StoreLeftRight<"sdl", MipsSDL, GPR64Opnd, IIStore>, LW_FM<0x2c>;
+def SDR : StoreLeftRight<"sdr", MipsSDR, GPR64Opnd, IIStore>, LW_FM<0x2d>;
/// Load-linked, Store-conditional
-let Predicates = [NotN64, HasStdEnc] in {
- def LLD : LLBase<"lld", CPU64RegsOpnd, mem>, LW_FM<0x34>;
- def SCD : SCBase<"scd", CPU64RegsOpnd, mem>, LW_FM<0x3c>;
-}
-
-let Predicates = [IsN64, HasStdEnc], isCodeGenOnly = 1 in {
- def LLD_P8 : LLBase<"lld", CPU64RegsOpnd, mem64>, LW_FM<0x34>;
- def SCD_P8 : SCBase<"scd", CPU64RegsOpnd, mem64>, LW_FM<0x3c>;
-}
+def LLD : LLBase<"lld", GPR64Opnd>, LW_FM<0x34>;
+def SCD : SCBase<"scd", GPR64Opnd>, LW_FM<0x3c>;
/// Jump and Branch Instructions
-def JR64 : IndirectBranch<CPU64Regs>, MTLO_FM<8>;
-def BEQ64 : CBranch<"beq", seteq, CPU64Regs>, BEQ_FM<4>;
-def BNE64 : CBranch<"bne", setne, CPU64Regs>, BEQ_FM<5>;
-def BGEZ64 : CBranchZero<"bgez", setge, CPU64Regs>, BGEZ_FM<1, 1>;
-def BGTZ64 : CBranchZero<"bgtz", setgt, CPU64Regs>, BGEZ_FM<7, 0>;
-def BLEZ64 : CBranchZero<"blez", setle, CPU64Regs>, BGEZ_FM<6, 0>;
-def BLTZ64 : CBranchZero<"bltz", setlt, CPU64Regs>, BGEZ_FM<1, 0>;
+let isCodeGenOnly = 1 in {
+def JR64 : IndirectBranch<"jr", GPR64Opnd>, MTLO_FM<8>;
+def BEQ64 : CBranch<"beq", brtarget, seteq, GPR64Opnd>, BEQ_FM<4>;
+def BNE64 : CBranch<"bne", brtarget, setne, GPR64Opnd>, BEQ_FM<5>;
+def BGEZ64 : CBranchZero<"bgez", brtarget, setge, GPR64Opnd>, BGEZ_FM<1, 1>;
+def BGTZ64 : CBranchZero<"bgtz", brtarget, setgt, GPR64Opnd>, BGEZ_FM<7, 0>;
+def BLEZ64 : CBranchZero<"blez", brtarget, setle, GPR64Opnd>, BGEZ_FM<6, 0>;
+def BLTZ64 : CBranchZero<"bltz", brtarget, setlt, GPR64Opnd>, BGEZ_FM<1, 0>;
+def JALR64 : JumpLinkReg<"jalr", GPR64Opnd>, JALR_FM;
+def JALR64Pseudo : JumpLinkRegPseudo<GPR64Opnd, JALR, RA, GPR32Opnd>;
+def TAILCALL64_R : JumpFR<"tcallr", GPR64Opnd, MipsTailCall>,
+ MTLO_FM<8>, IsTailCall;
}
-let DecoderNamespace = "Mips64" in
-def JALR64 : JumpLinkReg<"jalr", CPU64Regs>, JALR_FM;
-def JALR64Pseudo : JumpLinkRegPseudo<CPU64Regs, JALR64, RA_64>;
-def TAILCALL64_R : JumpFR<CPU64Regs, MipsTailCall>, MTLO_FM<8>, IsTailCall;
-let DecoderNamespace = "Mips64" in {
/// Multiply and Divide Instructions.
-def DMULT : Mult<"dmult", IIImul, CPU64RegsOpnd, [HI64, LO64]>,
+def DMULT : Mult<"dmult", IIImult, GPR64Opnd, [HI0_64, LO0_64]>,
MULT_FM<0, 0x1c>;
-def DMULTu : Mult<"dmultu", IIImul, CPU64RegsOpnd, [HI64, LO64]>,
+def DMULTu : Mult<"dmultu", IIImult, GPR64Opnd, [HI0_64, LO0_64]>,
MULT_FM<0, 0x1d>;
-def PseudoDMULT : MultDivPseudo<DMULT, ACRegs128, CPU64RegsOpnd, MipsMult,
- IIImul>;
-def PseudoDMULTu : MultDivPseudo<DMULTu, ACRegs128, CPU64RegsOpnd, MipsMultu,
- IIImul>;
-def DSDIV : Div<"ddiv", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1e>;
-def DUDIV : Div<"ddivu", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1f>;
-def PseudoDSDIV : MultDivPseudo<DSDIV, ACRegs128, CPU64RegsOpnd, MipsDivRem,
- IIIdiv, 0>;
-def PseudoDUDIV : MultDivPseudo<DUDIV, ACRegs128, CPU64RegsOpnd, MipsDivRemU,
- IIIdiv, 0>;
-
-def MTHI64 : MoveToLOHI<"mthi", CPU64Regs, [HI64]>, MTLO_FM<0x11>;
-def MTLO64 : MoveToLOHI<"mtlo", CPU64Regs, [LO64]>, MTLO_FM<0x13>;
-def MFHI64 : MoveFromLOHI<"mfhi", CPU64Regs, [HI64]>, MFLO_FM<0x10>;
-def MFLO64 : MoveFromLOHI<"mflo", CPU64Regs, [LO64]>, MFLO_FM<0x12>;
+def PseudoDMULT : MultDivPseudo<DMULT, ACC128, GPR64Opnd, MipsMult,
+ IIImult>;
+def PseudoDMULTu : MultDivPseudo<DMULTu, ACC128, GPR64Opnd, MipsMultu,
+ IIImult>;
+def DSDIV : Div<"ddiv", IIIdiv, GPR64Opnd, [HI0_64, LO0_64]>, MULT_FM<0, 0x1e>;
+def DUDIV : Div<"ddivu", IIIdiv, GPR64Opnd, [HI0_64, LO0_64]>, MULT_FM<0, 0x1f>;
+def PseudoDSDIV : MultDivPseudo<DSDIV, ACC128, GPR64Opnd, MipsDivRem,
+ IIIdiv, 0, 1, 1>;
+def PseudoDUDIV : MultDivPseudo<DUDIV, ACC128, GPR64Opnd, MipsDivRemU,
+ IIIdiv, 0, 1, 1>;
+
+let isCodeGenOnly = 1 in {
+def MTHI64 : MoveToLOHI<"mthi", GPR64Opnd, [HI0_64]>, MTLO_FM<0x11>;
+def MTLO64 : MoveToLOHI<"mtlo", GPR64Opnd, [LO0_64]>, MTLO_FM<0x13>;
+def MFHI64 : MoveFromLOHI<"mfhi", GPR64Opnd, AC0_64>, MFLO_FM<0x10>;
+def MFLO64 : MoveFromLOHI<"mflo", GPR64Opnd, AC0_64>, MFLO_FM<0x12>;
+def PseudoMFHI64 : PseudoMFLOHI<GPR64, ACC128, MipsMFHI>;
+def PseudoMFLO64 : PseudoMFLOHI<GPR64, ACC128, MipsMFLO>;
+def PseudoMTLOHI64 : PseudoMTLOHI<ACC128, GPR64>;
/// Sign Ext In Register Instructions.
-def SEB64 : SignExtInReg<"seb", i8, CPU64Regs>, SEB_FM<0x10, 0x20>;
-def SEH64 : SignExtInReg<"seh", i16, CPU64Regs>, SEB_FM<0x18, 0x20>;
+def SEB64 : SignExtInReg<"seb", i8, GPR64Opnd>, SEB_FM<0x10, 0x20>;
+def SEH64 : SignExtInReg<"seh", i16, GPR64Opnd>, SEB_FM<0x18, 0x20>;
+}
/// Count Leading
-def DCLZ : CountLeading0<"dclz", CPU64RegsOpnd>, CLO_FM<0x24>;
-def DCLO : CountLeading1<"dclo", CPU64RegsOpnd>, CLO_FM<0x25>;
+def DCLZ : CountLeading0<"dclz", GPR64Opnd>, CLO_FM<0x24>;
+def DCLO : CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>;
/// Double Word Swap Bytes/HalfWords
-def DSBH : SubwordSwap<"dsbh", CPU64RegsOpnd>, SEB_FM<2, 0x24>;
-def DSHD : SubwordSwap<"dshd", CPU64RegsOpnd>, SEB_FM<5, 0x24>;
+def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>;
+def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>;
-def LEA_ADDiu64 : EffectiveAddress<"daddiu", CPU64Regs, mem_ea_64>, LW_FM<0x19>;
+def LEA_ADDiu64 : EffectiveAddress<"daddiu", GPR64Opnd>, LW_FM<0x19>;
-}
-let DecoderNamespace = "Mips64" in {
-def RDHWR64 : ReadHardware<CPU64Regs, HW64RegsOpnd>, RDHWR_FM;
+let isCodeGenOnly = 1 in
+def RDHWR64 : ReadHardware<GPR64Opnd, HWRegsOpnd>, RDHWR_FM;
-def DEXT : ExtBase<"dext", CPU64RegsOpnd>, EXT_FM<3>;
-let Pattern = []<dag> in {
- def DEXTU : ExtBase<"dextu", CPU64RegsOpnd>, EXT_FM<2>;
- def DEXTM : ExtBase<"dextm", CPU64RegsOpnd>, EXT_FM<1>;
-}
-def DINS : InsBase<"dins", CPU64RegsOpnd>, EXT_FM<7>;
-let Pattern = []<dag> in {
- def DINSU : InsBase<"dinsu", CPU64RegsOpnd>, EXT_FM<6>;
- def DINSM : InsBase<"dinsm", CPU64RegsOpnd>, EXT_FM<5>;
-}
+def DEXT : ExtBase<"dext", GPR64Opnd, uimm6, MipsExt>, EXT_FM<3>;
+def DEXTU : ExtBase<"dextu", GPR64Opnd, uimm6>, EXT_FM<2>;
+def DEXTM : ExtBase<"dextm", GPR64Opnd, uimm5>, EXT_FM<1>;
+
+def DINS : InsBase<"dins", GPR64Opnd, uimm6, MipsIns>, EXT_FM<7>;
+def DINSU : InsBase<"dinsu", GPR64Opnd, uimm6>, EXT_FM<6>;
+def DINSM : InsBase<"dinsm", GPR64Opnd, uimm5>, EXT_FM<5>;
let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
- def DSLL64_32 : FR<0x00, 0x3c, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
- "dsll\t$rd, $rt, 32", [], IIAlu>;
- def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
- "sll\t$rd, $rt, 0", [], IIAlu>;
- def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt),
- "sll\t$rd, $rt, 0", [], IIAlu>;
+ def DSLL64_32 : FR<0x00, 0x3c, (outs GPR64:$rd), (ins GPR32:$rt),
+ "dsll\t$rd, $rt, 32", [], IIArith>;
+ def SLL64_32 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR32:$rt),
+ "sll\t$rd, $rt, 0", [], IIArith>;
+ def SLL64_64 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR64:$rt),
+ "sll\t$rd, $rt, 0", [], IIArith>;
}
}
//===----------------------------------------------------------------------===//
@@ -244,18 +228,12 @@ let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
//===----------------------------------------------------------------------===//
// extended loads
-let Predicates = [NotN64, HasStdEnc] in {
+let Predicates = [HasStdEnc] in {
def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64 addr:$src)>;
def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64 addr:$src)>;
}
-let Predicates = [IsN64, HasStdEnc] in {
- def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64_P8 addr:$src)>;
-}
// hi/lo relocs
def : MipsPat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>;
@@ -273,118 +251,80 @@ def : MipsPat<(MipsLo tglobaltlsaddr:$in),
(DADDiu ZERO_64, tglobaltlsaddr:$in)>;
def : MipsPat<(MipsLo texternalsym:$in), (DADDiu ZERO_64, texternalsym:$in)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaladdr:$lo)),
- (DADDiu CPU64Regs:$hi, tglobaladdr:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tblockaddress:$lo)),
- (DADDiu CPU64Regs:$hi, tblockaddress:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tjumptable:$lo)),
- (DADDiu CPU64Regs:$hi, tjumptable:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tconstpool:$lo)),
- (DADDiu CPU64Regs:$hi, tconstpool:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (DADDiu CPU64Regs:$hi, tglobaltlsaddr:$lo)>;
-
-def : WrapperPat<tglobaladdr, DADDiu, CPU64Regs>;
-def : WrapperPat<tconstpool, DADDiu, CPU64Regs>;
-def : WrapperPat<texternalsym, DADDiu, CPU64Regs>;
-def : WrapperPat<tblockaddress, DADDiu, CPU64Regs>;
-def : WrapperPat<tjumptable, DADDiu, CPU64Regs>;
-def : WrapperPat<tglobaltlsaddr, DADDiu, CPU64Regs>;
-
-defm : BrcondPats<CPU64Regs, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64,
+def : MipsPat<(add GPR64:$hi, (MipsLo tglobaladdr:$lo)),
+ (DADDiu GPR64:$hi, tglobaladdr:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tblockaddress:$lo)),
+ (DADDiu GPR64:$hi, tblockaddress:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tjumptable:$lo)),
+ (DADDiu GPR64:$hi, tjumptable:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tconstpool:$lo)),
+ (DADDiu GPR64:$hi, tconstpool:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (DADDiu GPR64:$hi, tglobaltlsaddr:$lo)>;
+
+def : WrapperPat<tglobaladdr, DADDiu, GPR64>;
+def : WrapperPat<tconstpool, DADDiu, GPR64>;
+def : WrapperPat<texternalsym, DADDiu, GPR64>;
+def : WrapperPat<tblockaddress, DADDiu, GPR64>;
+def : WrapperPat<tjumptable, DADDiu, GPR64>;
+def : WrapperPat<tglobaltlsaddr, DADDiu, GPR64>;
+
+defm : BrcondPats<GPR64, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64,
ZERO_64>;
+def : MipsPat<(brcond (i32 (setlt i64:$lhs, 1)), bb:$dst),
+ (BLEZ64 i64:$lhs, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setgt i64:$lhs, -1)), bb:$dst),
+ (BGEZ64 i64:$lhs, bb:$dst)>;
+
// setcc patterns
-defm : SeteqPats<CPU64Regs, SLTiu64, XOR64, SLTu64, ZERO_64>;
-defm : SetlePats<CPU64Regs, SLT64, SLTu64>;
-defm : SetgtPats<CPU64Regs, SLT64, SLTu64>;
-defm : SetgePats<CPU64Regs, SLT64, SLTu64>;
-defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>;
+defm : SeteqPats<GPR64, SLTiu64, XOR64, SLTu64, ZERO_64>;
+defm : SetlePats<GPR64, SLT64, SLTu64>;
+defm : SetgtPats<GPR64, SLT64, SLTu64>;
+defm : SetgePats<GPR64, SLT64, SLTu64>;
+defm : SetgeImmPats<GPR64, SLTi64, SLTiu64>;
// truncate
-def : MipsPat<(i32 (trunc CPU64Regs:$src)),
- (SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>,
- Requires<[IsN64, HasStdEnc]>;
+def : MipsPat<(i32 (trunc GPR64:$src)),
+ (SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>,
+ Requires<[HasStdEnc]>;
// 32-to-64-bit extension
-def : MipsPat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
-def : MipsPat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>;
-def : MipsPat<(i64 (sext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
+def : MipsPat<(i64 (anyext GPR32:$src)), (SLL64_32 GPR32:$src)>;
+def : MipsPat<(i64 (zext GPR32:$src)), (DSRL (DSLL64_32 GPR32:$src), 32)>;
+def : MipsPat<(i64 (sext GPR32:$src)), (SLL64_32 GPR32:$src)>;
// Sign extend in register
-def : MipsPat<(i64 (sext_inreg CPU64Regs:$src, i32)),
- (SLL64_64 CPU64Regs:$src)>;
+def : MipsPat<(i64 (sext_inreg GPR64:$src, i32)),
+ (SLL64_64 GPR64:$src)>;
// bswap MipsPattern
-def : MipsPat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>;
-
-// mflo/hi patterns.
-def : MipsPat<(i64 (ExtractLOHI ACRegs128:$ac, imm:$lohi_idx)),
- (EXTRACT_SUBREG ACRegs128:$ac, imm:$lohi_idx)>;
+def : MipsPat<(bswap GPR64:$rt), (DSHD (DSBH GPR64:$rt))>;
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
def : InstAlias<"move $dst, $src",
- (DADDu CPU64RegsOpnd:$dst, CPU64RegsOpnd:$src, ZERO_64), 1>,
+ (DADDu GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
Requires<[HasMips64]>;
-def : InstAlias<"move $dst, $src",
- (OR64 CPU64RegsOpnd:$dst, CPU64RegsOpnd:$src, ZERO_64), 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"and $rs, $rt, $imm",
- (DANDi CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm),
- 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"slt $rs, $rt, $imm",
- (SLTi64 CPURegsOpnd:$rs, CPU64Regs:$rt, simm16_64:$imm), 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"xor $rs, $rt, $imm",
- (XORi64 CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm),
- 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"not $rt, $rs",
- (NOR64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rs, ZERO_64), 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"j $rs", (JR64 CPU64Regs:$rs), 0>, Requires<[HasMips64]>;
-def : InstAlias<"jalr $rs", (JALR64 RA_64, CPU64Regs:$rs)>,
- Requires<[HasMips64]>;
-def : InstAlias<"jal $rs", (JALR64 RA_64, CPU64Regs:$rs), 0>,
- Requires<[HasMips64]>;
-def : InstAlias<"jal $rd,$rs", (JALR64 CPU64Regs:$rd, CPU64Regs:$rs), 0>,
- Requires<[HasMips64]>;
def : InstAlias<"daddu $rs, $rt, $imm",
- (DADDiu CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm),
- 1>;
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>;
def : InstAlias<"dadd $rs, $rt, $imm",
- (DADDi CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm),
- 1>;
-def : InstAlias<"or $rs, $rt, $imm",
- (ORi64 CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm),
- 1>, Requires<[HasMips64]>;
-/// Move between CPU and coprocessor registers
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>;
-let DecoderNamespace = "Mips64" in {
-def DMFC0_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rt),
- (ins CPU64RegsOpnd:$rd, uimm16:$sel),
- "dmfc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 1>;
-def DMTC0_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rd, uimm16:$sel),
- (ins CPU64RegsOpnd:$rt),
- "dmtc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 5>;
-def DMFC2_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rt),
- (ins CPU64RegsOpnd:$rd, uimm16:$sel),
- "dmfc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 1>;
-def DMTC2_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rd, uimm16:$sel),
- (ins CPU64RegsOpnd:$rt),
- "dmtc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 5>;
+/// Move between CPU and coprocessor registers
+let DecoderNamespace = "Mips64", Predicates = [HasMips64] in {
+def DMFC0 : MFC3OP<"dmfc0", GPR64Opnd>, MFC3OP_FM<0x10, 1>;
+def DMTC0 : MFC3OP<"dmtc0", GPR64Opnd>, MFC3OP_FM<0x10, 5>;
+def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd>, MFC3OP_FM<0x12, 1>;
+def DMTC2 : MFC3OP<"dmtc2", GPR64Opnd>, MFC3OP_FM<0x12, 5>;
}
// Two operand (implicit 0 selector) versions:
-def : InstAlias<"dmfc0 $rt, $rd",
- (DMFC0_3OP64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rd, 0), 0>;
-def : InstAlias<"dmtc0 $rt, $rd",
- (DMTC0_3OP64 CPU64RegsOpnd:$rd, 0, CPU64RegsOpnd:$rt), 0>;
-def : InstAlias<"dmfc2 $rt, $rd",
- (DMFC2_3OP64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rd, 0), 0>;
-def : InstAlias<"dmtc2 $rt, $rd",
- (DMTC2_3OP64 CPU64RegsOpnd:$rd, 0, CPU64RegsOpnd:$rt), 0>;
+def : InstAlias<"dmfc0 $rt, $rd", (DMFC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : InstAlias<"dmtc0 $rt, $rd", (DMTC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : InstAlias<"dmfc2 $rt, $rd", (DMFC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : InstAlias<"dmtc2 $rt, $rd", (DMTC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
diff --git a/lib/Target/Mips/MipsAnalyzeImmediate.cpp b/lib/Target/Mips/MipsAnalyzeImmediate.cpp
index 99b163ec33ac..31a9b7d63983 100644
--- a/lib/Target/Mips/MipsAnalyzeImmediate.cpp
+++ b/lib/Target/Mips/MipsAnalyzeImmediate.cpp
@@ -40,7 +40,7 @@ void MipsAnalyzeImmediate::GetInstSeqLsORi(uint64_t Imm, unsigned RemSize,
void MipsAnalyzeImmediate::GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize,
InstSeqLs &SeqLs) {
- unsigned Shamt = CountTrailingZeros_64(Imm);
+ unsigned Shamt = countTrailingZeros(Imm);
GetInstSeqLs(Imm >> Shamt, RemSize - Shamt, SeqLs);
AddInstr(SeqLs, Inst(SLL, Shamt));
}
diff --git a/lib/Target/Mips/MipsAnalyzeImmediate.h b/lib/Target/Mips/MipsAnalyzeImmediate.h
index a094ddae45de..cc09034a9c39 100644
--- a/lib/Target/Mips/MipsAnalyzeImmediate.h
+++ b/lib/Target/Mips/MipsAnalyzeImmediate.h
@@ -22,7 +22,7 @@ namespace llvm {
};
typedef SmallVector<Inst, 7 > InstSeq;
- /// Analyze - Get an instrucion sequence to load immediate Imm. The last
+ /// Analyze - Get an instruction sequence to load immediate Imm. The last
/// instruction in the sequence must be an ADDiu if LastInstrIsADDiu is
/// true;
const InstSeq &Analyze(uint64_t Imm, unsigned Size, bool LastInstrIsADDiu);
@@ -32,19 +32,19 @@ namespace llvm {
/// AddInstr - Add I to all instruction sequences in SeqLs.
void AddInstr(InstSeqLs &SeqLs, const Inst &I);
- /// GetInstSeqLsADDiu - Get instrucion sequences which end with an ADDiu to
+ /// GetInstSeqLsADDiu - Get instruction sequences which end with an ADDiu to
/// load immediate Imm
void GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
- /// GetInstSeqLsORi - Get instrucion sequences which end with an ORi to
+ /// GetInstSeqLsORi - Get instrutcion sequences which end with an ORi to
/// load immediate Imm
void GetInstSeqLsORi(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
- /// GetInstSeqLsSLL - Get instrucion sequences which end with a SLL to
+ /// GetInstSeqLsSLL - Get instruction sequences which end with a SLL to
/// load immediate Imm
void GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
- /// GetInstSeqLs - Get instrucion sequences to load immediate Imm.
+ /// GetInstSeqLs - Get instruction sequences to load immediate Imm.
void GetInstSeqLs(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
/// ReplaceADDiuSLLWithLUi - Replace an ADDiu & SLL pair with a LUi.
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 6e4feda4f531..45c439826422 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -15,11 +15,11 @@
#define DEBUG_TYPE "mips-asm-printer"
#include "InstPrinter/MipsInstPrinter.h"
#include "MCTargetDesc/MipsBaseInfo.h"
-#include "MCTargetDesc/MipsELFStreamer.h"
#include "Mips.h"
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
#include "MipsMCInstLower.h"
+#include "MipsTargetStreamer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
@@ -33,8 +33,8 @@
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/TargetRegistry.h"
@@ -45,12 +45,17 @@
using namespace llvm;
+MipsTargetStreamer &MipsAsmPrinter::getTargetStreamer() {
+ return static_cast<MipsTargetStreamer &>(OutStreamer.getTargetStreamer());
+}
+
bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Initialize TargetLoweringObjectFile.
if (Subtarget->allowMixed16_32())
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
.Initialize(OutContext, TM);
MipsFI = MF.getInfo<MipsFunctionInfo>();
+ MCP = MF.getConstantPool();
AsmPrinter::runOnMachineFunction(MF);
return true;
}
@@ -71,6 +76,39 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
+ // If we just ended a constant pool, mark it as such.
+ if (InConstantPool && MI->getOpcode() != Mips::CONSTPOOL_ENTRY) {
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
+ InConstantPool = false;
+ }
+ if (MI->getOpcode() == Mips::CONSTPOOL_ENTRY) {
+ // CONSTPOOL_ENTRY - This instruction represents a floating
+ //constant pool in the function. The first operand is the ID#
+ // for this instruction, the second is the index into the
+ // MachineConstantPool that this is, the third is the size in
+ // bytes of this constant pool entry.
+ // The required alignment is specified on the basic block holding this MI.
+ //
+ unsigned LabelId = (unsigned)MI->getOperand(0).getImm();
+ unsigned CPIdx = (unsigned)MI->getOperand(1).getIndex();
+
+ // If this is the first entry of the pool, mark it.
+ if (!InConstantPool) {
+ OutStreamer.EmitDataRegion(MCDR_DataRegion);
+ InConstantPool = true;
+ }
+
+ OutStreamer.EmitLabel(GetCPISymbol(LabelId));
+
+ const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPIdx];
+ if (MCPE.isMachineConstantPoolEntry())
+ EmitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
+ else
+ EmitGlobalConstant(MCPE.Val.ConstVal);
+ return;
+ }
+
+
MachineBasicBlock::const_instr_iterator I = MI;
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
@@ -141,7 +179,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
const MachineFrameInfo *MFI = MF->getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
// size of stack area to which FP callee-saved regs are saved.
- unsigned CPURegSize = Mips::CPURegsRegClass.getSize();
+ unsigned CPURegSize = Mips::GPR32RegClass.getSize();
unsigned FGR32RegSize = Mips::FGR32RegClass.getSize();
unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize();
bool HasAFGR64Reg = false;
@@ -151,7 +189,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
// Set FPU Bitmask.
for (i = 0; i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (Mips::CPURegsRegClass.contains(Reg))
+ if (Mips::GPR32RegClass.contains(Reg))
break;
unsigned RegNum = TM.getRegisterInfo()->getEncodingValue(Reg);
@@ -238,16 +276,15 @@ void MipsAsmPrinter::EmitFunctionEntryLabel() {
}
if (Subtarget->inMicroMipsMode())
- if (MipsELFStreamer *MES = dyn_cast<MipsELFStreamer>(&OutStreamer))
- MES->emitMipsSTOCG(*Subtarget, CurrentFnSym,
- (unsigned)ELF::STO_MIPS_MICROMIPS);
+ getTargetStreamer().emitMipsHackSTOCG(CurrentFnSym,
+ (unsigned)ELF::STO_MIPS_MICROMIPS);
OutStreamer.EmitLabel(CurrentFnSym);
}
/// EmitFunctionBodyStart - Targets can override this to emit stuff before
/// the first basic block in the function.
void MipsAsmPrinter::EmitFunctionBodyStart() {
- MCInstLowering.Initialize(Mang, &MF->getContext());
+ MCInstLowering.Initialize(&MF->getContext());
bool IsNakedFunction =
MF->getFunction()->
@@ -284,6 +321,12 @@ void MipsAsmPrinter::EmitFunctionBodyEnd() {
}
OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
}
+ // Make sure to terminate any constant pools that were at the end
+ // of the function.
+ if (!InConstantPool)
+ return;
+ InConstantPool = false;
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
}
/// isBlockOnlyReachableByFallthough - Return true if the basic block has
@@ -418,6 +461,11 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
return false;
}
}
+ case 'w':
+ // Print MSA registers for the 'f' constraint
+ // In LLVM, the 'w' modifier doesn't need to do anything.
+ // We can just call printOperand as normal.
+ break;
}
}
@@ -485,7 +533,7 @@ void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
return;
case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
+ O << *getSymbol(MO.getGlobal());
break;
case MachineOperand::MO_BlockAddress: {
@@ -526,6 +574,15 @@ void MipsAsmPrinter::printUnsignedImm(const MachineInstr *MI, int opNum,
printOperand(MI, opNum, O);
}
+void MipsAsmPrinter::printUnsignedImm8(const MachineInstr *MI, int opNum,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(opNum);
+ if (MO.isImm())
+ O << (unsigned short int)(unsigned char)MO.getImm();
+ else
+ printOperand(MI, opNum, O);
+}
+
void MipsAsmPrinter::
printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O) {
// Load/Store memory operands -- imm($reg)
@@ -557,6 +614,15 @@ printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
// FIXME: Use SwitchSection.
+ // TODO: Need to add -mabicalls and -mno-abicalls flags.
+ // Currently we assume that -mabicalls is the default.
+ if (OutStreamer.hasRawTextSupport()) {
+ OutStreamer.EmitRawText(StringRef("\t.abicalls"));
+ Reloc::Model RM = Subtarget->getRelocationModel();
+ if (RM == Reloc::Static && !Subtarget->hasMips64())
+ OutStreamer.EmitRawText(StringRef("\t.option\tpic0"));
+ }
+
// Tell the assembler which ABI we are using
if (OutStreamer.hasRawTextSupport())
OutStreamer.EmitRawText("\t.section .mdebug." +
@@ -578,25 +644,54 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
}
-void MipsAsmPrinter::EmitEndOfAsmFile(Module &M) {
+static void emitELFHeaderFlagsCG(MipsTargetStreamer &TargetStreamer,
+ const MipsSubtarget &Subtarget) {
+ // Update e_header flags
+ unsigned EFlags = 0;
+
+ // TODO: Need to add -mabicalls and -mno-abicalls flags.
+ // Currently we assume that -mabicalls is the default.
+ EFlags |= ELF::EF_MIPS_CPIC;
+
+ if (Subtarget.inMips16Mode())
+ EFlags |= ELF::EF_MIPS_ARCH_ASE_M16;
+ else
+ EFlags |= ELF::EF_MIPS_NOREORDER;
+
+ // Architecture
+ if (Subtarget.hasMips64r2())
+ EFlags |= ELF::EF_MIPS_ARCH_64R2;
+ else if (Subtarget.hasMips64())
+ EFlags |= ELF::EF_MIPS_ARCH_64;
+ else if (Subtarget.hasMips32r2())
+ EFlags |= ELF::EF_MIPS_ARCH_32R2;
+ else
+ EFlags |= ELF::EF_MIPS_ARCH_32;
+
+ if (Subtarget.inMicroMipsMode())
+ EFlags |= ELF::EF_MIPS_MICROMIPS;
- if (OutStreamer.hasRawTextSupport()) return;
+ // ABI
+ if (Subtarget.isABI_O32())
+ EFlags |= ELF::EF_MIPS_ABI_O32;
+ // Relocation Model
+ Reloc::Model RM = Subtarget.getRelocationModel();
+ if (RM == Reloc::PIC_ || RM == Reloc::Default)
+ EFlags |= ELF::EF_MIPS_PIC;
+ else if (RM == Reloc::Static)
+ ; // Do nothing for Reloc::Static
+ else
+ llvm_unreachable("Unsupported relocation model for e_flags");
+
+ TargetStreamer.emitMipsHackELFFlags(EFlags);
+}
+
+void MipsAsmPrinter::EmitEndOfAsmFile(Module &M) {
// Emit Mips ELF register info
Subtarget->getMReginfo().emitMipsReginfoSectionCG(
OutStreamer, getObjFileLowering(), *Subtarget);
- if (MipsELFStreamer *MES = dyn_cast<MipsELFStreamer>(&OutStreamer))
- MES->emitELFHeaderFlagsCG(*Subtarget);
-}
-
-MachineLocation
-MipsAsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
- // Handles frame addresses emitted in MipsInstrInfo::emitFrameIndexDebugValue.
- assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() &&
- "Unexpected MachineOperand types");
- return MachineLocation(MI->getOperand(0).getReg(),
- MI->getOperand(1).getImm());
+ emitELFHeaderFlagsCG(getTargetStreamer(), *Subtarget);
}
void MipsAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h
index dbdaf266b75f..11c6acd208d1 100644
--- a/lib/Target/Mips/MipsAsmPrinter.h
+++ b/lib/Target/Mips/MipsAsmPrinter.h
@@ -25,10 +25,12 @@ namespace llvm {
class MCStreamer;
class MachineInstr;
class MachineBasicBlock;
+class MipsTargetStreamer;
class Module;
class raw_ostream;
class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter {
+ MipsTargetStreamer &getTargetStreamer();
void EmitInstrWithMacroNoAT(const MachineInstr *MI);
@@ -40,6 +42,16 @@ private:
// lowerOperand - Convert a MachineOperand into the equivalent MCOperand.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
+ /// MCP - Keep a pointer to constantpool entries of the current
+ /// MachineFunction.
+ const MachineConstantPool *MCP;
+
+ /// InConstantPool - Maintain state when emitting a sequence of constant
+ /// pool entries so we can properly mark them as data regions.
+ bool InConstantPool;
+
+ bool UsingConstantPools;
+
public:
const MipsSubtarget *Subtarget;
@@ -47,8 +59,11 @@ public:
MipsMCInstLower MCInstLowering;
explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), MCInstLowering(*this) {
+ : AsmPrinter(TM, Streamer), MCP(0), InConstantPool(false),
+ MCInstLowering(*this) {
Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ UsingConstantPools =
+ (Subtarget->inMips16Mode() && Subtarget->useConstantIslands());
}
virtual const char *getPassName() const {
@@ -57,6 +72,12 @@ public:
virtual bool runOnMachineFunction(MachineFunction &MF);
+ virtual void EmitConstantPool() LLVM_OVERRIDE {
+ if (!UsingConstantPools)
+ AsmPrinter::EmitConstantPool();
+ // we emit constant pools customly!
+ }
+
void EmitInstruction(const MachineInstr *MI);
void printSavedRegsBitmask(raw_ostream &O);
void printHex32(unsigned int Value, raw_ostream &O);
@@ -75,13 +96,13 @@ public:
raw_ostream &O);
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
void printUnsignedImm(const MachineInstr *MI, int opNum, raw_ostream &O);
+ void printUnsignedImm8(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MachineInstr *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
const char *Modifier = 0);
void EmitStartOfAsmFile(Module &M);
void EmitEndOfAsmFile(Module &M);
- virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
};
}
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index 462def76cc80..66391cb9cb1e 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -26,8 +26,10 @@ def RetCC_MipsO32 : CallingConv<[
// f32 are returned in registers F0, F2
CCIfType<[f32], CCAssignToReg<[F0, F2]>>,
- // f64 are returned in register D0, D1
- CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToReg<[D0, D1]>>>
+ // f64 arguments are returned in D0_64 and D1_64 in FP64bit mode or
+ // in D0 and D1 in FP32bit mode.
+ CCIfType<[f64], CCIfSubtarget<"isFP64bit()", CCAssignToReg<[D0_64, D1_64]>>>,
+ CCIfType<[f64], CCIfSubtarget<"isNotFP64bit()", CCAssignToReg<[D0, D1]>>>
]>;
//===----------------------------------------------------------------------===//
@@ -149,7 +151,16 @@ def RetCC_MipsEABI : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_MipsO32_FastCC : CallingConv<[
// f64 arguments are passed in double-precision floating pointer registers.
- CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7, D8, D9]>>,
+ CCIfType<[f64], CCIfSubtarget<"isNotFP64bit()",
+ CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7,
+ D8, D9]>>>,
+ CCIfType<[f64], CCIfSubtarget<"isFP64bit()",
+ CCAssignToReg<[D0_64, D1_64, D2_64, D3_64,
+ D4_64, D5_64, D6_64, D7_64,
+ D8_64, D9_64, D10_64, D11_64,
+ D12_64, D13_64, D14_64, D15_64,
+ D16_64, D17_64, D18_64,
+ D19_64]>>>,
// Stack parameter slots for f64 are 64-bit doublewords and 8-byte aligned.
CCIfType<[f64], CCAssignToStack<8, 8>>
@@ -196,6 +207,13 @@ def CC_Mips_FastCC : CallingConv<[
CCDelegateTo<CC_MipsN_FastCC>
]>;
+//==
+
+def CC_Mips16RetHelper : CallingConv<[
+ // Integer arguments are passed in integer registers.
+ CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>
+]>;
+
//===----------------------------------------------------------------------===//
// Mips Calling Convention Dispatch
//===----------------------------------------------------------------------===//
@@ -217,9 +235,15 @@ def CSR_SingleFloatOnly : CalleeSavedRegs<(add (sequence "F%u", 31, 20), RA, FP,
def CSR_O32 : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP,
(sequence "S%u", 7, 0))>;
+def CSR_O32_FP64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 20), RA, FP,
+ (sequence "S%u", 7, 0))>;
+
def CSR_N32 : CalleeSavedRegs<(add D31_64, D29_64, D27_64, D25_64, D24_64,
D23_64, D22_64, D21_64, RA_64, FP_64, GP_64,
(sequence "S%u_64", 7, 0))>;
def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64,
GP_64, (sequence "S%u_64", 7, 0))>;
+
+def CSR_Mips16RetHelper :
+ CalleeSavedRegs<(add V0, V1, (sequence "A%u", 3, 0), S0, S1)>;
diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp
index 3fc402ba6423..ca4163d4e58c 100644
--- a/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -65,8 +65,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
public:
MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
- : MachineFunctionPass(ID), JTI(0),
- II((const MipsInstrInfo *) tm.getInstrInfo()), TD(tm.getDataLayout()),
+ : MachineFunctionPass(ID), JTI(0), II(0), TD(0),
TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
@@ -106,11 +105,16 @@ private:
const MachineOperand &MO) const;
unsigned getJumpTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getJumpTargetOpValueMM(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getBranchTargetOpValueMM(const MachineInstr &MI,
+ unsigned OpNo) const;
unsigned getBranchTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getMemEncodingMMImm12(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeExtEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeInsEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getLSAImmEncoding(const MachineInstr &MI, unsigned OpNo) const;
void emitGlobalAddressUnaligned(const GlobalValue *GV, unsigned Reloc,
int Offset) const;
@@ -187,6 +191,18 @@ unsigned MipsCodeEmitter::getJumpTargetOpValue(const MachineInstr &MI,
return 0;
}
+unsigned MipsCodeEmitter::getJumpTargetOpValueMM(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getBranchTargetOpValueMM(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
unsigned MipsCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI,
unsigned OpNo) const {
MachineOperand MO = MI.getOperand(OpNo);
@@ -202,6 +218,12 @@ unsigned MipsCodeEmitter::getMemEncoding(const MachineInstr &MI,
return (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
}
+unsigned MipsCodeEmitter::getMemEncodingMMImm12(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
unsigned MipsCodeEmitter::getSizeExtEncoding(const MachineInstr &MI,
unsigned OpNo) const {
// size is encoded as size-1.
@@ -215,6 +237,12 @@ unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
}
+unsigned MipsCodeEmitter::getLSAImmEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
@@ -317,6 +345,14 @@ bool MipsCodeEmitter::expandPseudos(MachineBasicBlock::instr_iterator &MI,
BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::SLL), Mips::ZERO)
.addReg(Mips::ZERO).addImm(0);
break;
+ case Mips::B:
+ BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::BEQ)).addReg(Mips::ZERO)
+ .addReg(Mips::ZERO).addOperand(MI->getOperand(0));
+ break;
+ case Mips::TRAP:
+ BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::BREAK)).addImm(0)
+ .addImm(0);
+ break;
case Mips::JALRPseudo:
BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::JALR), Mips::RA)
.addReg(MI->getOperand(0).getReg());
diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td
index 42e4c99f05d6..2de1430a395f 100644
--- a/lib/Target/Mips/MipsCondMov.td
+++ b/lib/Target/Mips/MipsCondMov.td
@@ -16,15 +16,15 @@
// MipsISelLowering::EmitInstrWithCustomInserter if target does not have
// conditional move instructions.
// cond:int, data:int
-class CMov_I_I_FT<string opstr, RegisterClass CRC, RegisterClass DRC,
+class CMov_I_I_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC,
InstrItinClass Itin> :
InstSE<(outs DRC:$rd), (ins DRC:$rs, CRC:$rt, DRC:$F),
- !strconcat(opstr, "\t$rd, $rs, $rt"), [], Itin, FrmFR> {
+ !strconcat(opstr, "\t$rd, $rs, $rt"), [], Itin, FrmFR, opstr> {
let Constraints = "$F = $rd";
}
// cond:int, data:float
-class CMov_I_F_FT<string opstr, RegisterClass CRC, RegisterClass DRC,
+class CMov_I_F_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC,
InstrItinClass Itin> :
InstSE<(outs DRC:$fd), (ins DRC:$fs, CRC:$rt, DRC:$F),
!strconcat(opstr, "\t$fd, $fs, $rt"), [], Itin, FrmFR> {
@@ -32,22 +32,22 @@ class CMov_I_F_FT<string opstr, RegisterClass CRC, RegisterClass DRC,
}
// cond:float, data:int
-class CMov_F_I_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class CMov_F_I_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
- InstSE<(outs RC:$rd), (ins RC:$rs, RC:$F),
- !strconcat(opstr, "\t$rd, $rs, $$fcc0"),
- [(set RC:$rd, (OpNode RC:$rs, RC:$F))], Itin, FrmFR> {
- let Uses = [FCR31];
+ InstSE<(outs RC:$rd), (ins RC:$rs, FCCRegsOpnd:$fcc, RC:$F),
+ !strconcat(opstr, "\t$rd, $rs, $fcc"),
+ [(set RC:$rd, (OpNode RC:$rs, FCCRegsOpnd:$fcc, RC:$F))],
+ Itin, FrmFR, opstr> {
let Constraints = "$F = $rd";
}
// cond:float, data:float
-class CMov_F_F_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class CMov_F_F_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
- InstSE<(outs RC:$fd), (ins RC:$fs, RC:$F),
- !strconcat(opstr, "\t$fd, $fs, $$fcc0"),
- [(set RC:$fd, (OpNode RC:$fs, RC:$F))], Itin, FrmFR> {
- let Uses = [FCR31];
+ InstSE<(outs RC:$fd), (ins RC:$fs, FCCRegsOpnd:$fcc, RC:$F),
+ !strconcat(opstr, "\t$fd, $fs, $fcc"),
+ [(set RC:$fd, (OpNode RC:$fs, FCCRegsOpnd:$fcc, RC:$F))],
+ Itin, FrmFR> {
let Constraints = "$F = $fd";
}
@@ -103,151 +103,143 @@ multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst,
}
// Instantiation of instructions.
-def MOVZ_I_I : CMov_I_I_FT<"movz", CPURegs, CPURegs, NoItinerary>,
+def MOVZ_I_I : MMRel, CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd, IIArith>,
ADD_FM<0, 0xa>;
-let Predicates = [HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVZ_I_I64 : CMov_I_I_FT<"movz", CPURegs, CPU64Regs, NoItinerary>,
+
+let Predicates = [HasStdEnc], isCodeGenOnly = 1 in {
+ def MOVZ_I_I64 : CMov_I_I_FT<"movz", GPR32Opnd, GPR64Opnd, IIArith>,
+ ADD_FM<0, 0xa>;
+ def MOVZ_I64_I : CMov_I_I_FT<"movz", GPR64Opnd, GPR32Opnd, IIArith>,
+ ADD_FM<0, 0xa>;
+ def MOVZ_I64_I64 : CMov_I_I_FT<"movz", GPR64Opnd, GPR64Opnd, IIArith>,
ADD_FM<0, 0xa>;
- def MOVZ_I64_I : CMov_I_I_FT<"movz", CPU64Regs, CPURegs, NoItinerary>,
- ADD_FM<0, 0xa> {
- let isCodeGenOnly = 1;
- }
- def MOVZ_I64_I64 : CMov_I_I_FT<"movz", CPU64Regs, CPU64Regs, NoItinerary>,
- ADD_FM<0, 0xa> {
- let isCodeGenOnly = 1;
- }
}
-def MOVN_I_I : CMov_I_I_FT<"movn", CPURegs, CPURegs, NoItinerary>,
+def MOVN_I_I : MMRel, CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd, IIArith>,
ADD_FM<0, 0xb>;
-let Predicates = [HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVN_I_I64 : CMov_I_I_FT<"movn", CPURegs, CPU64Regs, NoItinerary>,
+
+let Predicates = [HasStdEnc], isCodeGenOnly = 1 in {
+ def MOVN_I_I64 : CMov_I_I_FT<"movn", GPR32Opnd, GPR64Opnd, IIArith>,
+ ADD_FM<0, 0xb>;
+ def MOVN_I64_I : CMov_I_I_FT<"movn", GPR64Opnd, GPR32Opnd, IIArith>,
+ ADD_FM<0, 0xb>;
+ def MOVN_I64_I64 : CMov_I_I_FT<"movn", GPR64Opnd, GPR64Opnd, IIArith>,
ADD_FM<0, 0xb>;
- def MOVN_I64_I : CMov_I_I_FT<"movn", CPU64Regs, CPURegs, NoItinerary>,
- ADD_FM<0, 0xb> {
- let isCodeGenOnly = 1;
- }
- def MOVN_I64_I64 : CMov_I_I_FT<"movn", CPU64Regs, CPU64Regs, NoItinerary>,
- ADD_FM<0, 0xb> {
- let isCodeGenOnly = 1;
- }
}
-def MOVZ_I_S : CMov_I_F_FT<"movz.s", CPURegs, FGR32, IIFmove>,
+def MOVZ_I_S : CMov_I_F_FT<"movz.s", GPR32Opnd, FGR32Opnd, IIFmove>,
CMov_I_F_FM<18, 16>;
-def MOVZ_I64_S : CMov_I_F_FT<"movz.s", CPU64Regs, FGR32, IIFmove>,
- CMov_I_F_FM<18, 16>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
-def MOVN_I_S : CMov_I_F_FT<"movn.s", CPURegs, FGR32, IIFmove>,
+let isCodeGenOnly = 1 in
+def MOVZ_I64_S : CMov_I_F_FT<"movz.s", GPR64Opnd, FGR32Opnd, IIFmove>,
+ CMov_I_F_FM<18, 16>, Requires<[HasMips64, HasStdEnc]>;
+
+def MOVN_I_S : CMov_I_F_FT<"movn.s", GPR32Opnd, FGR32Opnd, IIFmove>,
CMov_I_F_FM<19, 16>;
-def MOVN_I64_S : CMov_I_F_FT<"movn.s", CPU64Regs, FGR32, IIFmove>,
- CMov_I_F_FM<19, 16>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
+
+let isCodeGenOnly = 1 in
+def MOVN_I64_S : CMov_I_F_FT<"movn.s", GPR64Opnd, FGR32Opnd, IIFmove>,
+ CMov_I_F_FM<19, 16>, Requires<[HasMips64, HasStdEnc]>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def MOVZ_I_D32 : CMov_I_F_FT<"movz.d", CPURegs, AFGR64, IIFmove>,
+ def MOVZ_I_D32 : CMov_I_F_FT<"movz.d", GPR32Opnd, AFGR64Opnd, IIFmove>,
CMov_I_F_FM<18, 17>;
- def MOVN_I_D32 : CMov_I_F_FT<"movn.d", CPURegs, AFGR64, IIFmove>,
+ def MOVN_I_D32 : CMov_I_F_FT<"movn.d", GPR32Opnd, AFGR64Opnd, IIFmove>,
CMov_I_F_FM<19, 17>;
}
-let Predicates = [IsFP64bit, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", CPURegs, FGR64, IIFmove>,
+
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
+ def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", GPR32Opnd, FGR64Opnd, IIFmove>,
CMov_I_F_FM<18, 17>;
- def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", CPU64Regs, FGR64, IIFmove>,
- CMov_I_F_FM<18, 17> {
- let isCodeGenOnly = 1;
- }
- def MOVN_I_D64 : CMov_I_F_FT<"movn.d", CPURegs, FGR64, IIFmove>,
+ def MOVN_I_D64 : CMov_I_F_FT<"movn.d", GPR32Opnd, FGR64Opnd, IIFmove>,
CMov_I_F_FM<19, 17>;
- def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", CPU64Regs, FGR64, IIFmove>,
- CMov_I_F_FM<19, 17> {
- let isCodeGenOnly = 1;
+ let isCodeGenOnly = 1 in {
+ def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", GPR64Opnd, FGR64Opnd,
+ IIFmove>, CMov_I_F_FM<18, 17>;
+ def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", GPR64Opnd, FGR64Opnd,
+ IIFmove>, CMov_I_F_FM<19, 17>;
}
}
-def MOVT_I : CMov_F_I_FT<"movt", CPURegs, IIAlu, MipsCMovFP_T>, CMov_F_I_FM<1>;
-def MOVT_I64 : CMov_F_I_FT<"movt", CPU64Regs, IIAlu, MipsCMovFP_T>,
- CMov_F_I_FM<1>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
+def MOVT_I : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, IIArith, MipsCMovFP_T>,
+ CMov_F_I_FM<1>;
-def MOVF_I : CMov_F_I_FT<"movf", CPURegs, IIAlu, MipsCMovFP_F>, CMov_F_I_FM<0>;
-def MOVF_I64 : CMov_F_I_FT<"movf", CPU64Regs, IIAlu, MipsCMovFP_F>,
- CMov_F_I_FM<0>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
+let isCodeGenOnly = 1 in
+def MOVT_I64 : CMov_F_I_FT<"movt", GPR64Opnd, IIArith, MipsCMovFP_T>,
+ CMov_F_I_FM<1>, Requires<[HasMips64, HasStdEnc]>;
+
+def MOVF_I : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, IIArith, MipsCMovFP_F>,
+ CMov_F_I_FM<0>;
-def MOVT_S : CMov_F_F_FT<"movt.s", FGR32, IIFmove, MipsCMovFP_T>,
+let isCodeGenOnly = 1 in
+def MOVF_I64 : CMov_F_I_FT<"movf", GPR64Opnd, IIArith, MipsCMovFP_F>,
+ CMov_F_I_FM<0>, Requires<[HasMips64, HasStdEnc]>;
+
+def MOVT_S : CMov_F_F_FT<"movt.s", FGR32Opnd, IIFmove, MipsCMovFP_T>,
CMov_F_F_FM<16, 1>;
-def MOVF_S : CMov_F_F_FT<"movf.s", FGR32, IIFmove, MipsCMovFP_F>,
+def MOVF_S : CMov_F_F_FT<"movf.s", FGR32Opnd, IIFmove, MipsCMovFP_F>,
CMov_F_F_FM<16, 0>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def MOVT_D32 : CMov_F_F_FT<"movt.d", AFGR64, IIFmove, MipsCMovFP_T>,
+ def MOVT_D32 : CMov_F_F_FT<"movt.d", AFGR64Opnd, IIFmove, MipsCMovFP_T>,
CMov_F_F_FM<17, 1>;
- def MOVF_D32 : CMov_F_F_FT<"movf.d", AFGR64, IIFmove, MipsCMovFP_F>,
+ def MOVF_D32 : CMov_F_F_FT<"movf.d", AFGR64Opnd, IIFmove, MipsCMovFP_F>,
CMov_F_F_FM<17, 0>;
}
-let Predicates = [IsFP64bit, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64, IIFmove, MipsCMovFP_T>,
+
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
+ def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64Opnd, IIFmove, MipsCMovFP_T>,
CMov_F_F_FM<17, 1>;
- def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64, IIFmove, MipsCMovFP_F>,
+ def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64Opnd, IIFmove, MipsCMovFP_F>,
CMov_F_F_FM<17, 0>;
}
// Instantiation of conditional move patterns.
-defm : MovzPats0<CPURegs, CPURegs, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>;
-defm : MovzPats1<CPURegs, CPURegs, MOVZ_I_I, XOR>;
-defm : MovzPats2<CPURegs, CPURegs, MOVZ_I_I, XORi>;
+defm : MovzPats0<GPR32, GPR32, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>;
+defm : MovzPats1<GPR32, GPR32, MOVZ_I_I, XOR>;
+defm : MovzPats2<GPR32, GPR32, MOVZ_I_I, XORi>;
let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovzPats0<CPURegs, CPU64Regs, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats0<CPU64Regs, CPURegs, MOVZ_I_I, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR32, GPR64, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats0<GPR64, GPR32, MOVZ_I_I, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats0<CPU64Regs, CPU64Regs, MOVZ_I_I64, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR64, GPR64, MOVZ_I_I64, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats1<CPURegs, CPU64Regs, MOVZ_I_I64, XOR>;
- defm : MovzPats1<CPU64Regs, CPURegs, MOVZ_I64_I, XOR64>;
- defm : MovzPats1<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XOR64>;
- defm : MovzPats2<CPURegs, CPU64Regs, MOVZ_I_I64, XORi>;
- defm : MovzPats2<CPU64Regs, CPURegs, MOVZ_I64_I, XORi64>;
- defm : MovzPats2<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XORi64>;
+ defm : MovzPats1<GPR32, GPR64, MOVZ_I_I64, XOR>;
+ defm : MovzPats1<GPR64, GPR32, MOVZ_I64_I, XOR64>;
+ defm : MovzPats1<GPR64, GPR64, MOVZ_I64_I64, XOR64>;
+ defm : MovzPats2<GPR32, GPR64, MOVZ_I_I64, XORi>;
+ defm : MovzPats2<GPR64, GPR32, MOVZ_I64_I, XORi64>;
+ defm : MovzPats2<GPR64, GPR64, MOVZ_I64_I64, XORi64>;
}
-defm : MovnPats<CPURegs, CPURegs, MOVN_I_I, XOR>;
+defm : MovnPats<GPR32, GPR32, MOVN_I_I, XOR>;
let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovnPats<CPURegs, CPU64Regs, MOVN_I_I64, XOR>;
- defm : MovnPats<CPU64Regs, CPURegs, MOVN_I64_I, XOR64>;
- defm : MovnPats<CPU64Regs, CPU64Regs, MOVN_I64_I64, XOR64>;
+ defm : MovnPats<GPR32, GPR64, MOVN_I_I64, XOR>;
+ defm : MovnPats<GPR64, GPR32, MOVN_I64_I, XOR64>;
+ defm : MovnPats<GPR64, GPR64, MOVN_I64_I64, XOR64>;
}
-defm : MovzPats0<CPURegs, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>;
-defm : MovzPats1<CPURegs, FGR32, MOVZ_I_S, XOR>;
-defm : MovnPats<CPURegs, FGR32, MOVN_I_S, XOR>;
+defm : MovzPats0<GPR32, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>;
+defm : MovzPats1<GPR32, FGR32, MOVZ_I_S, XOR>;
+defm : MovnPats<GPR32, FGR32, MOVN_I_S, XOR>;
let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovzPats0<CPU64Regs, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR64, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats1<CPU64Regs, FGR32, MOVZ_I64_S, XOR64>;
- defm : MovnPats<CPU64Regs, FGR32, MOVN_I64_S, XOR64>;
+ defm : MovzPats1<GPR64, FGR32, MOVZ_I64_S, XOR64>;
+ defm : MovnPats<GPR64, FGR32, MOVN_I64_S, XOR64>;
}
let Predicates = [NotFP64bit, HasStdEnc] in {
- defm : MovzPats0<CPURegs, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats1<CPURegs, AFGR64, MOVZ_I_D32, XOR>;
- defm : MovnPats<CPURegs, AFGR64, MOVN_I_D32, XOR>;
+ defm : MovzPats0<GPR32, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats1<GPR32, AFGR64, MOVZ_I_D32, XOR>;
+ defm : MovnPats<GPR32, AFGR64, MOVN_I_D32, XOR>;
}
let Predicates = [IsFP64bit, HasStdEnc] in {
- defm : MovzPats0<CPURegs, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats0<CPU64Regs, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR32, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats0<GPR64, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats1<CPURegs, FGR64, MOVZ_I_D64, XOR>;
- defm : MovzPats1<CPU64Regs, FGR64, MOVZ_I64_D64, XOR64>;
- defm : MovnPats<CPURegs, FGR64, MOVN_I_D64, XOR>;
- defm : MovnPats<CPU64Regs, FGR64, MOVN_I64_D64, XOR64>;
+ defm : MovzPats1<GPR32, FGR64, MOVZ_I_D64, XOR>;
+ defm : MovzPats1<GPR64, FGR64, MOVZ_I64_D64, XOR64>;
+ defm : MovnPats<GPR32, FGR64, MOVN_I_D64, XOR>;
+ defm : MovnPats<GPR64, FGR64, MOVN_I64_D64, XOR64>;
}
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index 1951324cf1a1..c46bbacf6585 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -9,9 +9,7 @@
//
//
// This pass is used to make Pc relative loads of constants.
-// For now, only Mips16 will use this. While it has the same name and
-// uses many ideas from the LLVM ARM Constant Island Pass, it's not intended
-// to reuse any of the code from the ARM version.
+// For now, only Mips16 will use this.
//
// Loading constants inline is expensive on Mips16 and it's in general better
// to place the constant nearby in code space and then it can be loaded with a
@@ -27,32 +25,244 @@
#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "Mips16InstrInfo.h"
+#include "MipsMachineFunction.h"
#include "MipsTargetMachine.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/InstIterator.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
using namespace llvm;
+STATISTIC(NumCPEs, "Number of constpool entries");
+STATISTIC(NumSplit, "Number of uncond branches inserted");
+STATISTIC(NumCBrFixed, "Number of cond branches fixed");
+STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
+
+// FIXME: This option should be removed once it has received sufficient testing.
+static cl::opt<bool>
+AlignConstantIslands("mips-align-constant-islands", cl::Hidden, cl::init(true),
+ cl::desc("Align constant islands in code"));
+
+
+// Rather than do make check tests with huge amounts of code, we force
+// the test to use this amount.
+//
+static cl::opt<int> ConstantIslandsSmallOffset(
+ "mips-constant-islands-small-offset",
+ cl::init(0),
+ cl::desc("Make small offsets be this amount for testing purposes"),
+ cl::Hidden);
+
+//
+// For testing purposes we tell it to not use relaxed load forms so that it
+// will split blocks.
+//
+static cl::opt<bool> NoLoadRelaxation(
+ "mips-constant-islands-no-load-relaxation",
+ cl::init(false),
+ cl::desc("Don't relax loads to long loads - for testing purposes"),
+ cl::Hidden);
+
+
namespace {
+
+
typedef MachineBasicBlock::iterator Iter;
typedef MachineBasicBlock::reverse_iterator ReverseIter;
+ /// MipsConstantIslands - Due to limited PC-relative displacements, Mips
+ /// requires constant pool entries to be scattered among the instructions
+ /// inside a function. To do this, it completely ignores the normal LLVM
+ /// constant pool; instead, it places constants wherever it feels like with
+ /// special instructions.
+ ///
+ /// The terminology used in this pass includes:
+ /// Islands - Clumps of constants placed in the function.
+ /// Water - Potential places where an island could be formed.
+ /// CPE - A constant pool entry that has been placed somewhere, which
+ /// tracks a list of users.
+
class MipsConstantIslands : public MachineFunctionPass {
+ /// BasicBlockInfo - Information about the offset and size of a single
+ /// basic block.
+ struct BasicBlockInfo {
+ /// Offset - Distance from the beginning of the function to the beginning
+ /// of this basic block.
+ ///
+ /// Offsets are computed assuming worst case padding before an aligned
+ /// block. This means that subtracting basic block offsets always gives a
+ /// conservative estimate of the real distance which may be smaller.
+ ///
+ /// Because worst case padding is used, the computed offset of an aligned
+ /// block may not actually be aligned.
+ unsigned Offset;
+
+ /// Size - Size of the basic block in bytes. If the block contains
+ /// inline assembly, this is a worst case estimate.
+ ///
+ /// The size does not include any alignment padding whether from the
+ /// beginning of the block, or from an aligned jump table at the end.
+ unsigned Size;
+
+ // FIXME: ignore LogAlign for this patch
+ //
+ unsigned postOffset(unsigned LogAlign = 0) const {
+ unsigned PO = Offset + Size;
+ return PO;
+ }
+
+ BasicBlockInfo() : Offset(0), Size(0) {}
+
+ };
+
+ std::vector<BasicBlockInfo> BBInfo;
+
+ /// WaterList - A sorted list of basic blocks where islands could be placed
+ /// (i.e. blocks that don't fall through to the following block, due
+ /// to a return, unreachable, or unconditional branch).
+ std::vector<MachineBasicBlock*> WaterList;
+
+ /// NewWaterList - The subset of WaterList that was created since the
+ /// previous iteration by inserting unconditional branches.
+ SmallSet<MachineBasicBlock*, 4> NewWaterList;
+
+ typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
+
+ /// CPUser - One user of a constant pool, keeping the machine instruction
+ /// pointer, the constant pool being referenced, and the max displacement
+ /// allowed from the instruction to the CP. The HighWaterMark records the
+ /// highest basic block where a new CPEntry can be placed. To ensure this
+ /// pass terminates, the CP entries are initially placed at the end of the
+ /// function and then move monotonically to lower addresses. The
+ /// exception to this rule is when the current CP entry for a particular
+ /// CPUser is out of range, but there is another CP entry for the same
+ /// constant value in range. We want to use the existing in-range CP
+ /// entry, but if it later moves out of range, the search for new water
+ /// should resume where it left off. The HighWaterMark is used to record
+ /// that point.
+ struct CPUser {
+ MachineInstr *MI;
+ MachineInstr *CPEMI;
+ MachineBasicBlock *HighWaterMark;
+ private:
+ unsigned MaxDisp;
+ unsigned LongFormMaxDisp; // mips16 has 16/32 bit instructions
+ // with different displacements
+ unsigned LongFormOpcode;
+ public:
+ bool NegOk;
+ CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
+ bool neg,
+ unsigned longformmaxdisp, unsigned longformopcode)
+ : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp),
+ LongFormMaxDisp(longformmaxdisp), LongFormOpcode(longformopcode),
+ NegOk(neg){
+ HighWaterMark = CPEMI->getParent();
+ }
+ /// getMaxDisp - Returns the maximum displacement supported by MI.
+ unsigned getMaxDisp() const {
+ unsigned xMaxDisp = ConstantIslandsSmallOffset?
+ ConstantIslandsSmallOffset: MaxDisp;
+ return xMaxDisp;
+ }
+ void setMaxDisp(unsigned val) {
+ MaxDisp = val;
+ }
+ unsigned getLongFormMaxDisp() const {
+ return LongFormMaxDisp;
+ }
+ unsigned getLongFormOpcode() const {
+ return LongFormOpcode;
+ }
+ };
+
+ /// CPUsers - Keep track of all of the machine instructions that use various
+ /// constant pools and their max displacement.
+ std::vector<CPUser> CPUsers;
+
+ /// CPEntry - One per constant pool entry, keeping the machine instruction
+ /// pointer, the constpool index, and the number of CPUser's which
+ /// reference this entry.
+ struct CPEntry {
+ MachineInstr *CPEMI;
+ unsigned CPI;
+ unsigned RefCount;
+ CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
+ : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
+ };
+
+ /// CPEntries - Keep track of all of the constant pool entry machine
+ /// instructions. For each original constpool index (i.e. those that
+ /// existed upon entry to this pass), it keeps a vector of entries.
+ /// Original elements are cloned as we go along; the clones are
+ /// put in the vector of the original element, but have distinct CPIs.
+ std::vector<std::vector<CPEntry> > CPEntries;
+
+ /// ImmBranch - One per immediate branch, keeping the machine instruction
+ /// pointer, conditional or unconditional, the max displacement,
+ /// and (if isCond is true) the corresponding unconditional branch
+ /// opcode.
+ struct ImmBranch {
+ MachineInstr *MI;
+ unsigned MaxDisp : 31;
+ bool isCond : 1;
+ int UncondBr;
+ ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr)
+ : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
+ };
+
+ /// ImmBranches - Keep track of all the immediate branch instructions.
+ ///
+ std::vector<ImmBranch> ImmBranches;
+
+ /// HasFarJump - True if any far jump instruction has been emitted during
+ /// the branch fix up pass.
+ bool HasFarJump;
+
+ const TargetMachine &TM;
+ bool IsPIC;
+ unsigned ABI;
+ const MipsSubtarget *STI;
+ const Mips16InstrInfo *TII;
+ MipsFunctionInfo *MFI;
+ MachineFunction *MF;
+ MachineConstantPool *MCP;
+
+ unsigned PICLabelUId;
+ bool PrescannedForConstants;
+
+ void initPICLabelUId(unsigned UId) {
+ PICLabelUId = UId;
+ }
+
+
+ unsigned createPICLabelUId() {
+ return PICLabelUId++;
+ }
+
public:
static char ID;
MipsConstantIslands(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm),
- TII(static_cast<const MipsInstrInfo*>(tm.getInstrInfo())),
IsPIC(TM.getRelocationModel() == Reloc::PIC_),
- ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()) {}
+ ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
+ STI(&TM.getSubtarget<MipsSubtarget>()), MF(0), MCP(0),
+ PrescannedForConstants(false){}
virtual const char *getPassName() const {
return "Mips Constant Islands";
@@ -60,30 +270,1264 @@ namespace {
bool runOnMachineFunction(MachineFunction &F);
- private:
+ void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
+ CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
+ unsigned getCPELogAlign(const MachineInstr *CPEMI);
+ void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
+ unsigned getOffsetOf(MachineInstr *MI) const;
+ unsigned getUserOffset(CPUser&) const;
+ void dumpBBs();
+ void verify();
+
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ unsigned Disp, bool NegativeOK);
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U);
+
+ bool isLongFormOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U);
+ void computeBlockSize(MachineBasicBlock *MBB);
+ MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
+ void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
+ void adjustBBOffsetsAfter(MachineBasicBlock *BB);
+ bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
+ int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
+ int findLongFormInRangeCPEntry(CPUser& U, unsigned UserOffset);
+ bool findAvailableWater(CPUser&U, unsigned UserOffset,
+ water_iterator &WaterIter);
+ void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
+ MachineBasicBlock *&NewMBB);
+ bool handleConstantPoolUser(unsigned CPUserIndex);
+ void removeDeadCPEMI(MachineInstr *CPEMI);
+ bool removeUnusedCPEntries();
+ bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
+ MachineInstr *CPEMI, unsigned Disp, bool NegOk,
+ bool DoDump = false);
+ bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
+ CPUser &U, unsigned &Growth);
+ bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
+ bool fixupImmediateBr(ImmBranch &Br);
+ bool fixupConditionalBr(ImmBranch &Br);
+ bool fixupUnconditionalBr(ImmBranch &Br);
- const TargetMachine &TM;
- const MipsInstrInfo *TII;
- bool IsPIC;
- unsigned ABI;
+ void prescanForConstants();
+
+ private:
};
char MipsConstantIslands::ID = 0;
} // end of anonymous namespace
+
+bool MipsConstantIslands::isLongFormOffsetInRange
+ (unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U) {
+ return isOffsetInRange(UserOffset, TrialOffset,
+ U.getLongFormMaxDisp(), U.NegOk);
+}
+
+bool MipsConstantIslands::isOffsetInRange
+ (unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U) {
+ return isOffsetInRange(UserOffset, TrialOffset,
+ U.getMaxDisp(), U.NegOk);
+}
+/// print block size and offset information - debugging
+void MipsConstantIslands::dumpBBs() {
+ DEBUG({
+ for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
+ const BasicBlockInfo &BBI = BBInfo[J];
+ dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
+ << format(" size=%#x\n", BBInfo[J].Size);
+ }
+ });
+}
/// createMipsLongBranchPass - Returns a pass that converts branches to long
/// branches.
FunctionPass *llvm::createMipsConstantIslandPass(MipsTargetMachine &tm) {
return new MipsConstantIslands(tm);
}
-bool MipsConstantIslands::runOnMachineFunction(MachineFunction &F) {
+bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// The intention is for this to be a mips16 only pass for now
// FIXME:
- // if (!TM.getSubtarget<MipsSubtarget>().inMips16Mode())
- // return false;
+ MF = &mf;
+ MCP = mf.getConstantPool();
+ DEBUG(dbgs() << "constant island machine function " << "\n");
+ if (!TM.getSubtarget<MipsSubtarget>().inMips16Mode() ||
+ !MipsSubtarget::useConstantIslands()) {
+ return false;
+ }
+ TII = (const Mips16InstrInfo*)MF->getTarget().getInstrInfo();
+ MFI = MF->getInfo<MipsFunctionInfo>();
+ DEBUG(dbgs() << "constant island processing " << "\n");
+ //
+ // will need to make predermination if there is any constants we need to
+ // put in constant islands. TBD.
+ //
+ if (!PrescannedForConstants) prescanForConstants();
+
+ HasFarJump = false;
+ // This pass invalidates liveness information when it splits basic blocks.
+ MF->getRegInfo().invalidateLiveness();
+
+ // Renumber all of the machine basic blocks in the function, guaranteeing that
+ // the numbers agree with the position of the block in the function.
+ MF->RenumberBlocks();
+
+ bool MadeChange = false;
+
+ // Perform the initial placement of the constant pool entries. To start with,
+ // we put them all at the end of the function.
+ std::vector<MachineInstr*> CPEMIs;
+ if (!MCP->isEmpty())
+ doInitialPlacement(CPEMIs);
+
+ /// The next UID to take is the first unused one.
+ initPICLabelUId(CPEMIs.size());
+
+ // Do the initial scan of the function, building up information about the
+ // sizes of each block, the location of all the water, and finding all of the
+ // constant pool users.
+ initializeFunctionInfo(CPEMIs);
+ CPEMIs.clear();
+ DEBUG(dumpBBs());
+
+ /// Remove dead constant pool entries.
+ MadeChange |= removeUnusedCPEntries();
+
+ // Iteratively place constant pool entries and fix up branches until there
+ // is no change.
+ unsigned NoCPIters = 0, NoBRIters = 0;
+ (void)NoBRIters;
+ while (true) {
+ DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
+ bool CPChange = false;
+ for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
+ CPChange |= handleConstantPoolUser(i);
+ if (CPChange && ++NoCPIters > 30)
+ report_fatal_error("Constant Island pass failed to converge!");
+ DEBUG(dumpBBs());
+
+ // Clear NewWaterList now. If we split a block for branches, it should
+ // appear as "new water" for the next iteration of constant pool placement.
+ NewWaterList.clear();
+
+ DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
+ bool BRChange = false;
+ for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
+ BRChange |= fixupImmediateBr(ImmBranches[i]);
+ if (BRChange && ++NoBRIters > 30)
+ report_fatal_error("Branch Fix Up pass failed to converge!");
+ DEBUG(dumpBBs());
+ if (!CPChange && !BRChange)
+ break;
+ MadeChange = true;
+ }
+
+ DEBUG(dbgs() << '\n'; dumpBBs());
+
+ BBInfo.clear();
+ WaterList.clear();
+ CPUsers.clear();
+ CPEntries.clear();
+ ImmBranches.clear();
+ return MadeChange;
+}
+
+/// doInitialPlacement - Perform the initial placement of the constant pool
+/// entries. To start with, we put them all at the end of the function.
+void
+MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
+ // Create the basic block to hold the CPE's.
+ MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
+ MF->push_back(BB);
+
+
+ // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
+ unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
+
+ // Mark the basic block as required by the const-pool.
+ // If AlignConstantIslands isn't set, use 4-byte alignment for everything.
+ BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
+
+ // The function needs to be as aligned as the basic blocks. The linker may
+ // move functions around based on their alignment.
+ MF->ensureAlignment(BB->getAlignment());
+
+ // Order the entries in BB by descending alignment. That ensures correct
+ // alignment of all entries as long as BB is sufficiently aligned. Keep
+ // track of the insertion point for each alignment. We are going to bucket
+ // sort the entries as they are created.
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
+
+ // Add all of the constants from the constant pool to the end block, use an
+ // identity mapping of CPI's to CPE's.
+ const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
+
+ const DataLayout &TD = *MF->getTarget().getDataLayout();
+ for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
+ unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
+ assert(Size >= 4 && "Too small constant pool entry");
+ unsigned Align = CPs[i].getAlignment();
+ assert(isPowerOf2_32(Align) && "Invalid alignment");
+ // Verify that all constant pool entries are a multiple of their alignment.
+ // If not, we would have to pad them out so that instructions stay aligned.
+ assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
+
+ // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
+ unsigned LogAlign = Log2_32(Align);
+ MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
+
+ MachineInstr *CPEMI =
+ BuildMI(*BB, InsAt, DebugLoc(), TII->get(Mips::CONSTPOOL_ENTRY))
+ .addImm(i).addConstantPoolIndex(i).addImm(Size);
+
+ CPEMIs.push_back(CPEMI);
+
+ // Ensure that future entries with higher alignment get inserted before
+ // CPEMI. This is bucket sort with iterators.
+ for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
+ if (InsPoint[a] == InsAt)
+ InsPoint[a] = CPEMI;
+ // Add a new CPEntry, but no corresponding CPUser yet.
+ std::vector<CPEntry> CPEs;
+ CPEs.push_back(CPEntry(CPEMI, i));
+ CPEntries.push_back(CPEs);
+ ++NumCPEs;
+ DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
+ << Size << ", align = " << Align <<'\n');
+ }
+ DEBUG(BB->dump());
+}
+
+/// BBHasFallthrough - Return true if the specified basic block can fallthrough
+/// into the block immediately after it.
+static bool BBHasFallthrough(MachineBasicBlock *MBB) {
+ // Get the next machine basic block in the function.
+ MachineFunction::iterator MBBI = MBB;
+ // Can't fall off end of function.
+ if (llvm::next(MBBI) == MBB->getParent()->end())
+ return false;
+
+ MachineBasicBlock *NextBB = llvm::next(MBBI);
+ for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
+ E = MBB->succ_end(); I != E; ++I)
+ if (*I == NextBB)
+ return true;
+
+ return false;
+}
+
+/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
+/// look up the corresponding CPEntry.
+MipsConstantIslands::CPEntry
+*MipsConstantIslands::findConstPoolEntry(unsigned CPI,
+ const MachineInstr *CPEMI) {
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ // Number of entries per constpool index should be small, just do a
+ // linear search.
+ for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ if (CPEs[i].CPEMI == CPEMI)
+ return &CPEs[i];
+ }
+ return NULL;
+}
+
+/// getCPELogAlign - Returns the required alignment of the constant pool entry
+/// represented by CPEMI. Alignment is measured in log2(bytes) units.
+unsigned MipsConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
+ assert(CPEMI && CPEMI->getOpcode() == Mips::CONSTPOOL_ENTRY);
+
+ // Everything is 4-byte aligned unless AlignConstantIslands is set.
+ if (!AlignConstantIslands)
+ return 2;
+
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
+ unsigned Align = MCP->getConstants()[CPI].getAlignment();
+ assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
+ return Log2_32(Align);
+}
+
+/// initializeFunctionInfo - Do the initial scan of the function, building up
+/// information about the sizes of each block, the location of all the water,
+/// and finding all of the constant pool users.
+void MipsConstantIslands::
+initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
+ BBInfo.clear();
+ BBInfo.resize(MF->getNumBlockIDs());
+
+ // First thing, compute the size of all basic blocks, and see if the function
+ // has any inline assembly in it. If so, we have to be conservative about
+ // alignment assumptions, as we don't know for sure the size of any
+ // instructions in the inline assembly.
+ for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
+ computeBlockSize(I);
+
+
+ // Compute block offsets.
+ adjustBBOffsetsAfter(MF->begin());
+
+ // Now go back through the instructions and build up our data structures.
+ for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
+ MBBI != E; ++MBBI) {
+ MachineBasicBlock &MBB = *MBBI;
+
+ // If this block doesn't fall through into the next MBB, then this is
+ // 'water' that a constant pool island could be placed.
+ if (!BBHasFallthrough(&MBB))
+ WaterList.push_back(&MBB);
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I) {
+ if (I->isDebugValue())
+ continue;
+
+ int Opc = I->getOpcode();
+ if (I->isBranch()) {
+ bool isCond = false;
+ unsigned Bits = 0;
+ unsigned Scale = 1;
+ int UOpc = Opc;
+ switch (Opc) {
+ default:
+ continue; // Ignore other branches for now
+ case Mips::Bimm16:
+ Bits = 11;
+ Scale = 2;
+ isCond = false;
+ break;
+ case Mips::BimmX16:
+ Bits = 16;
+ Scale = 2;
+ isCond = false;
+ }
+ // Record this immediate branch.
+ unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
+ ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
+ }
+
+ if (Opc == Mips::CONSTPOOL_ENTRY)
+ continue;
+
+
+ // Scan the instructions for constant pool operands.
+ for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
+ if (I->getOperand(op).isCPI()) {
+
+ // We found one. The addressing mode tells us the max displacement
+ // from the PC that this instruction permits.
+
+ // Basic size info comes from the TSFlags field.
+ unsigned Bits = 0;
+ unsigned Scale = 1;
+ bool NegOk = false;
+ unsigned LongFormBits = 0;
+ unsigned LongFormScale = 0;
+ unsigned LongFormOpcode = 0;
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unknown addressing mode for CP reference!");
+ case Mips::LwRxPcTcp16:
+ Bits = 8;
+ Scale = 4;
+ LongFormOpcode = Mips::LwRxPcTcpX16;
+ LongFormBits = 16;
+ LongFormScale = 1;
+ break;
+ case Mips::LwRxPcTcpX16:
+ Bits = 16;
+ Scale = 1;
+ NegOk = true;
+ break;
+ }
+ // Remember that this is a user of a CP entry.
+ unsigned CPI = I->getOperand(op).getIndex();
+ MachineInstr *CPEMI = CPEMIs[CPI];
+ unsigned MaxOffs = ((1 << Bits)-1) * Scale;
+ unsigned LongFormMaxOffs = ((1 << LongFormBits)-1) * LongFormScale;
+ CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk,
+ LongFormMaxOffs, LongFormOpcode));
+
+ // Increment corresponding CPEntry reference count.
+ CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
+ assert(CPE && "Cannot find a corresponding CPEntry!");
+ CPE->RefCount++;
+
+ // Instructions can only use one CP entry, don't bother scanning the
+ // rest of the operands.
+ break;
+
+ }
+
+ }
+ }
+
+}
+
+/// computeBlockSize - Compute the size and some alignment information for MBB.
+/// This function updates BBInfo directly.
+void MipsConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
+ BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
+ BBI.Size = 0;
+
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ ++I)
+ BBI.Size += TII->GetInstSizeInBytes(I);
+
+}
+
+/// getOffsetOf - Return the current offset of the specified machine instruction
+/// from the start of the function. This offset changes as stuff is moved
+/// around inside the function.
+unsigned MipsConstantIslands::getOffsetOf(MachineInstr *MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+
+ // The offset is composed of two things: the sum of the sizes of all MBB's
+ // before this instruction's block, and the offset from the start of the block
+ // it is in.
+ unsigned Offset = BBInfo[MBB->getNumber()].Offset;
+
+ // Sum instructions before MI in MBB.
+ for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
+ assert(I != MBB->end() && "Didn't find MI in its own basic block?");
+ Offset += TII->GetInstSizeInBytes(I);
+ }
+ return Offset;
+}
+
+/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
+/// ID.
+static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
+ const MachineBasicBlock *RHS) {
+ return LHS->getNumber() < RHS->getNumber();
+}
+
+/// updateForInsertedWaterBlock - When a block is newly inserted into the
+/// machine function, it upsets all of the block numbers. Renumber the blocks
+/// and update the arrays that parallel this numbering.
+void MipsConstantIslands::updateForInsertedWaterBlock
+ (MachineBasicBlock *NewBB) {
+ // Renumber the MBB's to keep them consecutive.
+ NewBB->getParent()->RenumberBlocks(NewBB);
+
+ // Insert an entry into BBInfo to align it properly with the (newly
+ // renumbered) block numbers.
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
+
+ // Next, update WaterList. Specifically, we need to add NewMBB as having
+ // available water after it.
+ water_iterator IP =
+ std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
+ CompareMBBNumbers);
+ WaterList.insert(IP, NewBB);
+}
+
+unsigned MipsConstantIslands::getUserOffset(CPUser &U) const {
+ return getOffsetOf(U.MI);
+}
+
+/// Split the basic block containing MI into two blocks, which are joined by
+/// an unconditional branch. Update data structures and renumber blocks to
+/// account for this change and returns the newly created block.
+MachineBasicBlock *MipsConstantIslands::splitBlockBeforeInstr
+ (MachineInstr *MI) {
+ MachineBasicBlock *OrigBB = MI->getParent();
+
+ // Create a new MBB for the code after the OrigBB.
+ MachineBasicBlock *NewBB =
+ MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
+ MachineFunction::iterator MBBI = OrigBB; ++MBBI;
+ MF->insert(MBBI, NewBB);
+
+ // Splice the instructions starting with MI over to NewBB.
+ NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
+
+ // Add an unconditional branch from OrigBB to NewBB.
+ // Note the new unconditional branch is not being recorded.
+ // There doesn't seem to be meaningful DebugInfo available; this doesn't
+ // correspond to anything in the source.
+ BuildMI(OrigBB, DebugLoc(), TII->get(Mips::Bimm16)).addMBB(NewBB);
+ ++NumSplit;
+
+ // Update the CFG. All succs of OrigBB are now succs of NewBB.
+ NewBB->transferSuccessors(OrigBB);
+
+ // OrigBB branches to NewBB.
+ OrigBB->addSuccessor(NewBB);
+
+ // Update internal data structures to account for the newly inserted MBB.
+ // This is almost the same as updateForInsertedWaterBlock, except that
+ // the Water goes after OrigBB, not NewBB.
+ MF->RenumberBlocks(NewBB);
+
+ // Insert an entry into BBInfo to align it properly with the (newly
+ // renumbered) block numbers.
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
+
+ // Next, update WaterList. Specifically, we need to add OrigMBB as having
+ // available water after it (but not if it's already there, which happens
+ // when splitting before a conditional branch that is followed by an
+ // unconditional branch - in that case we want to insert NewBB).
+ water_iterator IP =
+ std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
+ CompareMBBNumbers);
+ MachineBasicBlock* WaterBB = *IP;
+ if (WaterBB == OrigBB)
+ WaterList.insert(llvm::next(IP), NewBB);
+ else
+ WaterList.insert(IP, OrigBB);
+ NewWaterList.insert(OrigBB);
+
+ // Figure out how large the OrigBB is. As the first half of the original
+ // block, it cannot contain a tablejump. The size includes
+ // the new jump we added. (It should be possible to do this without
+ // recounting everything, but it's very confusing, and this is rarely
+ // executed.)
+ computeBlockSize(OrigBB);
+
+ // Figure out how large the NewMBB is. As the second half of the original
+ // block, it may contain a tablejump.
+ computeBlockSize(NewBB);
+
+ // All BBOffsets following these blocks must be modified.
+ adjustBBOffsetsAfter(OrigBB);
+
+ return NewBB;
+}
+
+
+
+/// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
+/// reference) is within MaxDisp of TrialOffset (a proposed location of a
+/// constant pool entry).
+bool MipsConstantIslands::isOffsetInRange(unsigned UserOffset,
+ unsigned TrialOffset, unsigned MaxDisp,
+ bool NegativeOK) {
+ if (UserOffset <= TrialOffset) {
+ // User before the Trial.
+ if (TrialOffset - UserOffset <= MaxDisp)
+ return true;
+ } else if (NegativeOK) {
+ if (UserOffset - TrialOffset <= MaxDisp)
+ return true;
+ }
+ return false;
+}
+
+/// isWaterInRange - Returns true if a CPE placed after the specified
+/// Water (a basic block) will be in range for the specific MI.
+///
+/// Compute how much the function will grow by inserting a CPE after Water.
+bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
+ MachineBasicBlock* Water, CPUser &U,
+ unsigned &Growth) {
+ unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
+ unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
+ unsigned NextBlockOffset, NextBlockAlignment;
+ MachineFunction::const_iterator NextBlock = Water;
+ if (++NextBlock == MF->end()) {
+ NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
+ NextBlockAlignment = 0;
+ } else {
+ NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
+ NextBlockAlignment = NextBlock->getAlignment();
+ }
+ unsigned Size = U.CPEMI->getOperand(2).getImm();
+ unsigned CPEEnd = CPEOffset + Size;
+
+ // The CPE may be able to hide in the alignment padding before the next
+ // block. It may also cause more padding to be required if it is more aligned
+ // that the next block.
+ if (CPEEnd > NextBlockOffset) {
+ Growth = CPEEnd - NextBlockOffset;
+ // Compute the padding that would go at the end of the CPE to align the next
+ // block.
+ Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
+
+ // If the CPE is to be inserted before the instruction, that will raise
+ // the offset of the instruction. Also account for unknown alignment padding
+ // in blocks between CPE and the user.
+ if (CPEOffset < UserOffset)
+ UserOffset += Growth;
+ } else
+ // CPE fits in existing padding.
+ Growth = 0;
+
+ return isOffsetInRange(UserOffset, CPEOffset, U);
+}
+
+/// isCPEntryInRange - Returns true if the distance between specific MI and
+/// specific ConstPool entry instruction can fit in MI's displacement field.
+bool MipsConstantIslands::isCPEntryInRange
+ (MachineInstr *MI, unsigned UserOffset,
+ MachineInstr *CPEMI, unsigned MaxDisp,
+ bool NegOk, bool DoDump) {
+ unsigned CPEOffset = getOffsetOf(CPEMI);
+
+ if (DoDump) {
+ DEBUG({
+ unsigned Block = MI->getParent()->getNumber();
+ const BasicBlockInfo &BBI = BBInfo[Block];
+ dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
+ << " max delta=" << MaxDisp
+ << format(" insn address=%#x", UserOffset)
+ << " in BB#" << Block << ": "
+ << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
+ << format("CPE address=%#x offset=%+d: ", CPEOffset,
+ int(CPEOffset-UserOffset));
+ });
+ }
+
+ return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
+}
+
+#ifndef NDEBUG
+/// BBIsJumpedOver - Return true of the specified basic block's only predecessor
+/// unconditionally branches to its only successor.
+static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
+ if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
+ return false;
+ MachineBasicBlock *Succ = *MBB->succ_begin();
+ MachineBasicBlock *Pred = *MBB->pred_begin();
+ MachineInstr *PredMI = &Pred->back();
+ if (PredMI->getOpcode() == Mips::Bimm16)
+ return PredMI->getOperand(0).getMBB() == Succ;
+ return false;
+}
+#endif
+
+void MipsConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
+ unsigned BBNum = BB->getNumber();
+ for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
+ // Get the offset and known bits at the end of the layout predecessor.
+ // Include the alignment of the current block.
+ unsigned Offset = BBInfo[i - 1].Offset + BBInfo[i - 1].Size;
+ BBInfo[i].Offset = Offset;
+ }
+}
+
+/// decrementCPEReferenceCount - find the constant pool entry with index CPI
+/// and instruction CPEMI, and decrement its refcount. If the refcount
+/// becomes 0 remove the entry and instruction. Returns true if we removed
+/// the entry, false if we didn't.
+
+bool MipsConstantIslands::decrementCPEReferenceCount(unsigned CPI,
+ MachineInstr *CPEMI) {
+ // Find the old entry. Eliminate it if it is no longer used.
+ CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
+ assert(CPE && "Unexpected!");
+ if (--CPE->RefCount == 0) {
+ removeDeadCPEMI(CPEMI);
+ CPE->CPEMI = NULL;
+ --NumCPEs;
+ return true;
+ }
+ return false;
+}
+
+/// LookForCPEntryInRange - see if the currently referenced CPE is in range;
+/// if not, see if an in-range clone of the CPE is in range, and if so,
+/// change the data structures so the user references the clone. Returns:
+/// 0 = no existing entry found
+/// 1 = entry found, and there were no code insertions or deletions
+/// 2 = entry found, and there were code insertions or deletions
+int MipsConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
+{
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+
+ // Check to see if the CPE is already in-range.
+ if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
+ true)) {
+ DEBUG(dbgs() << "In range\n");
+ return 1;
+ }
+
+ // No. Look for previously created clones of the CPE that are in range.
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ // We already tried this one
+ if (CPEs[i].CPEMI == CPEMI)
+ continue;
+ // Removing CPEs can leave empty entries, skip
+ if (CPEs[i].CPEMI == NULL)
+ continue;
+ if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
+ U.NegOk)) {
+ DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[i].CPI << "\n");
+ // Point the CPUser node to the replacement
+ U.CPEMI = CPEs[i].CPEMI;
+ // Change the CPI in the instruction operand to refer to the clone.
+ for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
+ if (UserMI->getOperand(j).isCPI()) {
+ UserMI->getOperand(j).setIndex(CPEs[i].CPI);
+ break;
+ }
+ // Adjust the refcount of the clone...
+ CPEs[i].RefCount++;
+ // ...and the original. If we didn't remove the old entry, none of the
+ // addresses changed, so we don't need another pass.
+ return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
+ }
+ }
+ return 0;
+}
+
+/// LookForCPEntryInRange - see if the currently referenced CPE is in range;
+/// This version checks if the longer form of the instruction can be used to
+/// to satisfy things.
+/// if not, see if an in-range clone of the CPE is in range, and if so,
+/// change the data structures so the user references the clone. Returns:
+/// 0 = no existing entry found
+/// 1 = entry found, and there were no code insertions or deletions
+/// 2 = entry found, and there were code insertions or deletions
+int MipsConstantIslands::findLongFormInRangeCPEntry
+ (CPUser& U, unsigned UserOffset)
+{
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+
+ // Check to see if the CPE is already in-range.
+ if (isCPEntryInRange(UserMI, UserOffset, CPEMI,
+ U.getLongFormMaxDisp(), U.NegOk,
+ true)) {
+ DEBUG(dbgs() << "In range\n");
+ UserMI->setDesc(TII->get(U.getLongFormOpcode()));
+ U.setMaxDisp(U.getLongFormMaxDisp());
+ return 2; // instruction is longer length now
+ }
+
+ // No. Look for previously created clones of the CPE that are in range.
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ // We already tried this one
+ if (CPEs[i].CPEMI == CPEMI)
+ continue;
+ // Removing CPEs can leave empty entries, skip
+ if (CPEs[i].CPEMI == NULL)
+ continue;
+ if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI,
+ U.getLongFormMaxDisp(), U.NegOk)) {
+ DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[i].CPI << "\n");
+ // Point the CPUser node to the replacement
+ U.CPEMI = CPEs[i].CPEMI;
+ // Change the CPI in the instruction operand to refer to the clone.
+ for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
+ if (UserMI->getOperand(j).isCPI()) {
+ UserMI->getOperand(j).setIndex(CPEs[i].CPI);
+ break;
+ }
+ // Adjust the refcount of the clone...
+ CPEs[i].RefCount++;
+ // ...and the original. If we didn't remove the old entry, none of the
+ // addresses changed, so we don't need another pass.
+ return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
+ }
+ }
+ return 0;
+}
+
+/// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
+/// the specific unconditional branch instruction.
+static inline unsigned getUnconditionalBrDisp(int Opc) {
+ switch (Opc) {
+ case Mips::Bimm16:
+ return ((1<<10)-1)*2;
+ case Mips::BimmX16:
+ return ((1<<16)-1)*2;
+ default:
+ break;
+ }
+ return ((1<<16)-1)*2;
+}
+
+/// findAvailableWater - Look for an existing entry in the WaterList in which
+/// we can place the CPE referenced from U so it's within range of U's MI.
+/// Returns true if found, false if not. If it returns true, WaterIter
+/// is set to the WaterList entry.
+/// To ensure that this pass
+/// terminates, the CPE location for a particular CPUser is only allowed to
+/// move to a lower address, so search backward from the end of the list and
+/// prefer the first water that is in range.
+bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
+ water_iterator &WaterIter) {
+ if (WaterList.empty())
+ return false;
+
+ unsigned BestGrowth = ~0u;
+ for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
+ --IP) {
+ MachineBasicBlock* WaterBB = *IP;
+ // Check if water is in range and is either at a lower address than the
+ // current "high water mark" or a new water block that was created since
+ // the previous iteration by inserting an unconditional branch. In the
+ // latter case, we want to allow resetting the high water mark back to
+ // this new water since we haven't seen it before. Inserting branches
+ // should be relatively uncommon and when it does happen, we want to be
+ // sure to take advantage of it for all the CPEs near that block, so that
+ // we don't insert more branches than necessary.
+ unsigned Growth;
+ if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
+ (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
+ NewWaterList.count(WaterBB)) && Growth < BestGrowth) {
+ // This is the least amount of required padding seen so far.
+ BestGrowth = Growth;
+ WaterIter = IP;
+ DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
+ << " Growth=" << Growth << '\n');
+
+ // Keep looking unless it is perfect.
+ if (BestGrowth == 0)
+ return true;
+ }
+ if (IP == B)
+ break;
+ }
+ return BestGrowth != ~0u;
+}
+
+/// createNewWater - No existing WaterList entry will work for
+/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
+/// block is used if in range, and the conditional branch munged so control
+/// flow is correct. Otherwise the block is split to create a hole with an
+/// unconditional branch around it. In either case NewMBB is set to a
+/// block following which the new island can be inserted (the WaterList
+/// is not adjusted).
+void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
+ unsigned UserOffset,
+ MachineBasicBlock *&NewMBB) {
+ CPUser &U = CPUsers[CPUserIndex];
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+ unsigned CPELogAlign = getCPELogAlign(CPEMI);
+ MachineBasicBlock *UserMBB = UserMI->getParent();
+ const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
+
+ // If the block does not end in an unconditional branch already, and if the
+ // end of the block is within range, make new water there.
+ if (BBHasFallthrough(UserMBB)) {
+ // Size of branch to insert.
+ unsigned Delta = 2;
+ // Compute the offset where the CPE will begin.
+ unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
+
+ if (isOffsetInRange(UserOffset, CPEOffset, U)) {
+ DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
+ << format(", expected CPE offset %#x\n", CPEOffset));
+ NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
+ // Add an unconditional branch from UserMBB to fallthrough block. Record
+ // it for branch lengthening; this new branch will not get out of range,
+ // but if the preceding conditional branch is out of range, the targets
+ // will be exchanged, and the altered branch may be out of range, so the
+ // machinery has to know about it.
+ int UncondBr = Mips::Bimm16;
+ BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
+ unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
+ ImmBranches.push_back(ImmBranch(&UserMBB->back(),
+ MaxDisp, false, UncondBr));
+ BBInfo[UserMBB->getNumber()].Size += Delta;
+ adjustBBOffsetsAfter(UserMBB);
+ return;
+ }
+ }
+
+ // What a big block. Find a place within the block to split it.
+
+ // Try to split the block so it's fully aligned. Compute the latest split
+ // point where we can add a 4-byte branch instruction, and then align to
+ // LogAlign which is the largest possible alignment in the function.
+ unsigned LogAlign = MF->getAlignment();
+ assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
+ unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
+ DEBUG(dbgs() << format("Split in middle of big block before %#x",
+ BaseInsertOffset));
+
+ // The 4 in the following is for the unconditional branch we'll be inserting
+ // Alignment of the island is handled
+ // inside isOffsetInRange.
+ BaseInsertOffset -= 4;
+
+ DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
+ << " la=" << LogAlign << '\n');
+
+ // This could point off the end of the block if we've already got constant
+ // pool entries following this block; only the last one is in the water list.
+ // Back past any possible branches (allow for a conditional and a maximally
+ // long unconditional).
+ if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
+ BaseInsertOffset = UserBBI.postOffset() - 8;
+ DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
+ }
+ unsigned EndInsertOffset = BaseInsertOffset + 4 +
+ CPEMI->getOperand(2).getImm();
+ MachineBasicBlock::iterator MI = UserMI;
+ ++MI;
+ unsigned CPUIndex = CPUserIndex+1;
+ unsigned NumCPUsers = CPUsers.size();
+ //MachineInstr *LastIT = 0;
+ for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
+ Offset < BaseInsertOffset;
+ Offset += TII->GetInstSizeInBytes(MI),
+ MI = llvm::next(MI)) {
+ assert(MI != UserMBB->end() && "Fell off end of block");
+ if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
+ CPUser &U = CPUsers[CPUIndex];
+ if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
+ // Shift intertion point by one unit of alignment so it is within reach.
+ BaseInsertOffset -= 1u << LogAlign;
+ EndInsertOffset -= 1u << LogAlign;
+ }
+ // This is overly conservative, as we don't account for CPEMIs being
+ // reused within the block, but it doesn't matter much. Also assume CPEs
+ // are added in order with alignment padding. We may eventually be able
+ // to pack the aligned CPEs better.
+ EndInsertOffset += U.CPEMI->getOperand(2).getImm();
+ CPUIndex++;
+ }
+ }
+
+ --MI;
+ NewMBB = splitBlockBeforeInstr(MI);
+}
+
+/// handleConstantPoolUser - Analyze the specified user, checking to see if it
+/// is out-of-range. If so, pick up the constant pool value and move it some
+/// place in-range. Return true if we changed any addresses (thus must run
+/// another pass of branch lengthening), false otherwise.
+bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
+ CPUser &U = CPUsers[CPUserIndex];
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ unsigned Size = CPEMI->getOperand(2).getImm();
+ // Compute this only once, it's expensive.
+ unsigned UserOffset = getUserOffset(U);
+
+ // See if the current entry is within range, or there is a clone of it
+ // in range.
+ int result = findInRangeCPEntry(U, UserOffset);
+ if (result==1) return false;
+ else if (result==2) return true;
+
+
+ // Look for water where we can place this CPE.
+ MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
+ MachineBasicBlock *NewMBB;
+ water_iterator IP;
+ if (findAvailableWater(U, UserOffset, IP)) {
+ DEBUG(dbgs() << "Found water in range\n");
+ MachineBasicBlock *WaterBB = *IP;
+
+ // If the original WaterList entry was "new water" on this iteration,
+ // propagate that to the new island. This is just keeping NewWaterList
+ // updated to match the WaterList, which will be updated below.
+ if (NewWaterList.erase(WaterBB))
+ NewWaterList.insert(NewIsland);
+
+ // The new CPE goes before the following block (NewMBB).
+ NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
+
+ } else {
+ // No water found.
+ // we first see if a longer form of the instrucion could have reached
+ // the constant. in that case we won't bother to split
+ if (!NoLoadRelaxation) {
+ result = findLongFormInRangeCPEntry(U, UserOffset);
+ if (result != 0) return true;
+ }
+ DEBUG(dbgs() << "No water found\n");
+ createNewWater(CPUserIndex, UserOffset, NewMBB);
+
+ // splitBlockBeforeInstr adds to WaterList, which is important when it is
+ // called while handling branches so that the water will be seen on the
+ // next iteration for constant pools, but in this context, we don't want
+ // it. Check for this so it will be removed from the WaterList.
+ // Also remove any entry from NewWaterList.
+ MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
+ IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
+ if (IP != WaterList.end())
+ NewWaterList.erase(WaterBB);
+
+ // We are adding new water. Update NewWaterList.
+ NewWaterList.insert(NewIsland);
+ }
+
+ // Remove the original WaterList entry; we want subsequent insertions in
+ // this vicinity to go after the one we're about to insert. This
+ // considerably reduces the number of times we have to move the same CPE
+ // more than once and is also important to ensure the algorithm terminates.
+ if (IP != WaterList.end())
+ WaterList.erase(IP);
+
+ // Okay, we know we can put an island before NewMBB now, do it!
+ MF->insert(NewMBB, NewIsland);
+
+ // Update internal data structures to account for the newly inserted MBB.
+ updateForInsertedWaterBlock(NewIsland);
+
+ // Decrement the old entry, and remove it if refcount becomes 0.
+ decrementCPEReferenceCount(CPI, CPEMI);
+
+ // Now that we have an island to add the CPE to, clone the original CPE and
+ // add it to the island.
+ U.HighWaterMark = NewIsland;
+ U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(Mips::CONSTPOOL_ENTRY))
+ .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
+ CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
+ ++NumCPEs;
+
+ // Mark the basic block as aligned as required by the const-pool entry.
+ NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
+
+ // Increase the size of the island block to account for the new entry.
+ BBInfo[NewIsland->getNumber()].Size += Size;
+ adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
+
+ // No existing clone of this CPE is within range.
+ // We will be generating a new clone. Get a UID for it.
+ unsigned ID = createPICLabelUId();
+
+ // Finally, change the CPI in the instruction operand to be ID.
+ for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
+ if (UserMI->getOperand(i).isCPI()) {
+ UserMI->getOperand(i).setIndex(ID);
+ break;
+ }
+
+ DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
+ << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
+
+ return true;
+}
+
+/// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
+/// sizes and offsets of impacted basic blocks.
+void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
+ MachineBasicBlock *CPEBB = CPEMI->getParent();
+ unsigned Size = CPEMI->getOperand(2).getImm();
+ CPEMI->eraseFromParent();
+ BBInfo[CPEBB->getNumber()].Size -= Size;
+ // All succeeding offsets have the current size value added in, fix this.
+ if (CPEBB->empty()) {
+ BBInfo[CPEBB->getNumber()].Size = 0;
+
+ // This block no longer needs to be aligned.
+ CPEBB->setAlignment(0);
+ } else
+ // Entries are sorted by descending alignment, so realign from the front.
+ CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
+
+ adjustBBOffsetsAfter(CPEBB);
+ // An island has only one predecessor BB and one successor BB. Check if
+ // this BB's predecessor jumps directly to this BB's successor. This
+ // shouldn't happen currently.
+ assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
+ // FIXME: remove the empty blocks after all the work is done?
+}
+
+/// removeUnusedCPEntries - Remove constant pool entries whose refcounts
+/// are zero.
+bool MipsConstantIslands::removeUnusedCPEntries() {
+ unsigned MadeChange = false;
+ for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
+ std::vector<CPEntry> &CPEs = CPEntries[i];
+ for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
+ if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
+ removeDeadCPEMI(CPEs[j].CPEMI);
+ CPEs[j].CPEMI = NULL;
+ MadeChange = true;
+ }
+ }
+ }
+ return MadeChange;
+}
+
+/// isBBInRange - Returns true if the distance between specific MI and
+/// specific BB can fit in MI's displacement field.
+bool MipsConstantIslands::isBBInRange
+ (MachineInstr *MI,MachineBasicBlock *DestBB, unsigned MaxDisp) {
+
+unsigned PCAdj = 4;
+
+ unsigned BrOffset = getOffsetOf(MI) + PCAdj;
+ unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
+
+ DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
+ << " from BB#" << MI->getParent()->getNumber()
+ << " max delta=" << MaxDisp
+ << " from " << getOffsetOf(MI) << " to " << DestOffset
+ << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
+
+ if (BrOffset <= DestOffset) {
+ // Branch before the Dest.
+ if (DestOffset-BrOffset <= MaxDisp)
+ return true;
+ } else {
+ if (BrOffset-DestOffset <= MaxDisp)
+ return true;
+ }
return false;
}
+/// fixupImmediateBr - Fix up an immediate branch whose destination is too far
+/// away to fit in its displacement field.
+bool MipsConstantIslands::fixupImmediateBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
+
+ // Check to see if the DestBB is already in-range.
+ if (isBBInRange(MI, DestBB, Br.MaxDisp))
+ return false;
+
+ if (!Br.isCond)
+ return fixupUnconditionalBr(Br);
+ return fixupConditionalBr(Br);
+}
+
+/// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
+/// too far away to fit in its displacement field. If the LR register has been
+/// spilled in the epilogue, then we can use BL to implement a far jump.
+/// Otherwise, add an intermediate branch instruction to a branch.
+bool
+MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *MBB = MI->getParent();
+ // Use BL to implement far jump.
+ Br.MaxDisp = ((1 << 16)-1) * 2;
+ MI->setDesc(TII->get(Mips::BimmX16));
+ BBInfo[MBB->getNumber()].Size += 2;
+ adjustBBOffsetsAfter(MBB);
+ HasFarJump = true;
+ ++NumUBrFixed;
+
+ DEBUG(dbgs() << " Changed B to long jump " << *MI);
+
+ return true;
+}
+
+/// fixupConditionalBr - Fix up a conditional branch whose destination is too
+/// far away to fit in its displacement field. It is converted to an inverse
+/// conditional branch + an unconditional branch to the destination.
+bool
+MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
+
+ // Add an unconditional branch to the destination and invert the branch
+ // condition to jump over it:
+ // blt L1
+ // =>
+ // bge L2
+ // b L1
+ // L2:
+ unsigned CCReg = 0; // FIXME
+ unsigned CC=0; //FIXME
+
+ // If the branch is at the end of its MBB and that has a fall-through block,
+ // direct the updated conditional branch to the fall-through block. Otherwise,
+ // split the MBB before the next instruction.
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineInstr *BMI = &MBB->back();
+ bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
+
+ ++NumCBrFixed;
+ if (BMI != MI) {
+ if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
+ BMI->getOpcode() == Br.UncondBr) {
+ // Last MI in the BB is an unconditional branch. Can we simply invert the
+ // condition and swap destinations:
+ // beq L1
+ // b L2
+ // =>
+ // bne L2
+ // b L1
+ MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
+ if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
+ DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
+ << *BMI);
+ BMI->getOperand(0).setMBB(DestBB);
+ MI->getOperand(0).setMBB(NewDest);
+ return true;
+ }
+ }
+ }
+
+ if (NeedSplit) {
+ splitBlockBeforeInstr(MI);
+ // No need for the branch to the next block. We're adding an unconditional
+ // branch to the destination.
+ int delta = TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size -= delta;
+ MBB->back().eraseFromParent();
+ // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
+ }
+ MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
+
+ DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
+ << " also invert condition and change dest. to BB#"
+ << NextBB->getNumber() << "\n");
+
+ // Insert a new conditional branch and a new unconditional branch.
+ // Also update the ImmBranch as well as adding a new entry for the new branch.
+ BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
+ .addMBB(NextBB).addImm(CC).addReg(CCReg);
+ Br.MI = &MBB->back();
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
+ ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
+
+ // Remove the old conditional branch. It may or may not still be in MBB.
+ BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
+ MI->eraseFromParent();
+ adjustBBOffsetsAfter(MBB);
+ return true;
+}
+
+
+void MipsConstantIslands::prescanForConstants() {
+ unsigned J = 0;
+ (void)J;
+ PrescannedForConstants = true;
+ for (MachineFunction::iterator B =
+ MF->begin(), E = MF->end(); B != E; ++B) {
+ for (MachineBasicBlock::instr_iterator I =
+ B->instr_begin(), EB = B->instr_end(); I != EB; ++I) {
+ switch(I->getDesc().getOpcode()) {
+ case Mips::LwConstant32: {
+ DEBUG(dbgs() << "constant island constant " << *I << "\n");
+ J = I->getNumOperands();
+ DEBUG(dbgs() << "num operands " << J << "\n");
+ MachineOperand& Literal = I->getOperand(1);
+ if (Literal.isImm()) {
+ int64_t V = Literal.getImm();
+ DEBUG(dbgs() << "literal " << V << "\n");
+ Type *Int32Ty =
+ Type::getInt32Ty(MF->getFunction()->getContext());
+ const Constant *C = ConstantInt::get(Int32Ty, V);
+ unsigned index = MCP->getConstantPoolIndex(C, 4);
+ I->getOperand(2).ChangeToImmediate(index);
+ DEBUG(dbgs() << "constant island constant " << *I << "\n");
+ I->setDesc(TII->get(Mips::LwRxPcTcp16));
+ I->RemoveOperand(1);
+ I->RemoveOperand(1);
+ I->addOperand(MachineOperand::CreateCPI(index, 0));
+ I->addOperand(MachineOperand::CreateImm(4));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+}
+
diff --git a/lib/Target/Mips/MipsDSPInstrInfo.td b/lib/Target/Mips/MipsDSPInstrInfo.td
index c12878a95209..d26838404451 100644
--- a/lib/Target/Mips/MipsDSPInstrInfo.td
+++ b/lib/Target/Mips/MipsDSPInstrInfo.td
@@ -256,236 +256,235 @@ class PREPEND_ENC : APPEND_FMT<0b00001>;
// Instruction desc.
class ADDU_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS, RegisterClass RCT = RCS> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS, RegisterOperand ROT = ROS> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs, ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class RADDU_W_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS = RCD> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS = ROD> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs))];
InstrItinClass Itinerary = itin;
}
class CMP_EQ_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCS,
- RegisterClass RCT = RCS> {
+ InstrItinClass itin, RegisterOperand ROS,
+ RegisterOperand ROT = ROS> {
dag OutOperandList = (outs);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rs, $rt");
- list<dag> Pattern = [(OpNode RCS:$rs, RCT:$rt)];
+ list<dag> Pattern = [(OpNode ROS:$rs, ROT:$rt)];
InstrItinClass Itinerary = itin;
}
class CMP_EQ_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS, RegisterClass RCT = RCS> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS, RegisterOperand ROT = ROS> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs, ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class PRECR_SRA_PH_W_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCT,
- RegisterClass RCS = RCT> {
- dag OutOperandList = (outs RCT:$rt);
- dag InOperandList = (ins RCS:$rs, shamt:$sa, RCS:$src);
+ InstrItinClass itin, RegisterOperand ROT,
+ RegisterOperand ROS = ROT> {
+ dag OutOperandList = (outs ROT:$rt);
+ dag InOperandList = (ins ROS:$rs, uimm5:$sa, ROS:$src);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
- list<dag> Pattern = [(set RCT:$rt, (OpNode RCS:$src, RCS:$rs, immZExt5:$sa))];
+ list<dag> Pattern = [(set ROT:$rt, (OpNode ROS:$src, ROS:$rs, immZExt5:$sa))];
InstrItinClass Itinerary = itin;
string Constraints = "$src = $rt";
}
class ABSQ_S_PH_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCT = RCD> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROT = ROD> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class REPL_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- ImmLeaf immPat, InstrItinClass itin, RegisterClass RC> {
- dag OutOperandList = (outs RC:$rd);
+ ImmLeaf immPat, InstrItinClass itin, RegisterOperand RO> {
+ dag OutOperandList = (outs RO:$rd);
dag InOperandList = (ins uimm16:$imm);
string AsmString = !strconcat(instr_asm, "\t$rd, $imm");
- list<dag> Pattern = [(set RC:$rd, (OpNode immPat:$imm))];
+ list<dag> Pattern = [(set RO:$rd, (OpNode immPat:$imm))];
InstrItinClass Itinerary = itin;
}
class SHLL_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RC> {
- dag OutOperandList = (outs RC:$rd);
- dag InOperandList = (ins RC:$rt, CPURegs:$rs_sa);
+ InstrItinClass itin, RegisterOperand RO> {
+ dag OutOperandList = (outs RO:$rd);
+ dag InOperandList = (ins RO:$rt, GPR32Opnd:$rs_sa);
string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa");
- list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs_sa))];
+ list<dag> Pattern = [(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs_sa))];
InstrItinClass Itinerary = itin;
}
class SHLL_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
SDPatternOperator ImmPat, InstrItinClass itin,
- RegisterClass RC> {
- dag OutOperandList = (outs RC:$rd);
- dag InOperandList = (ins RC:$rt, uimm16:$rs_sa);
+ RegisterOperand RO> {
+ dag OutOperandList = (outs RO:$rd);
+ dag InOperandList = (ins RO:$rt, uimm16:$rs_sa);
string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa");
- list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, ImmPat:$rs_sa))];
+ list<dag> Pattern = [(set RO:$rd, (OpNode RO:$rt, ImmPat:$rs_sa))];
InstrItinClass Itinerary = itin;
bit hasSideEffects = 1;
}
class LX_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rd);
- dag InOperandList = (ins CPURegs:$base, CPURegs:$index);
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins PtrRC:$base, PtrRC:$index);
string AsmString = !strconcat(instr_asm, "\t$rd, ${index}(${base})");
- list<dag> Pattern = [(set CPURegs:$rd,
- (OpNode CPURegs:$base, CPURegs:$index))];
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode iPTR:$base, iPTR:$index))];
InstrItinClass Itinerary = itin;
bit mayLoad = 1;
}
class ADDUH_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS = RCD, RegisterClass RCT = RCD> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS = ROD, RegisterOperand ROT = ROD> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs, ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class APPEND_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
SDPatternOperator ImmOp, InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins CPURegs:$rs, shamt:$sa, CPURegs:$src);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$rs, uimm5:$sa, GPR32Opnd:$src);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
- list<dag> Pattern = [(set CPURegs:$rt,
- (OpNode CPURegs:$src, CPURegs:$rs, ImmOp:$sa))];
+ list<dag> Pattern = [(set GPR32Opnd:$rt,
+ (OpNode GPR32Opnd:$src, GPR32Opnd:$rs, ImmOp:$sa))];
InstrItinClass Itinerary = itin;
string Constraints = "$src = $rt";
}
class EXTR_W_TY1_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins ACRegsDSP:$ac, CPURegs:$shift_rs);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins ACC64DSPOpnd:$ac, GPR32Opnd:$shift_rs);
string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs");
InstrItinClass Itinerary = itin;
}
class EXTR_W_TY1_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins ACRegsDSP:$ac, uimm16:$shift_rs);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins ACC64DSPOpnd:$ac, uimm16:$shift_rs);
string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs");
InstrItinClass Itinerary = itin;
}
class SHILO_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins simm16:$shift, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins simm16:$shift, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $shift");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode immSExt6:$shift, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode immSExt6:$shift, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class SHILO_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class MTHLIP_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$rs, $ac");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class RDDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rd);
+ dag OutOperandList = (outs GPR32Opnd:$rd);
dag InOperandList = (ins uimm16:$mask);
string AsmString = !strconcat(instr_asm, "\t$rd, $mask");
- list<dag> Pattern = [(set CPURegs:$rd, (OpNode immZExt10:$mask))];
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode immZExt10:$mask))];
InstrItinClass Itinerary = itin;
}
class WRDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
dag OutOperandList = (outs);
- dag InOperandList = (ins CPURegs:$rs, uimm16:$mask);
+ dag InOperandList = (ins GPR32Opnd:$rs, uimm16:$mask);
string AsmString = !strconcat(instr_asm, "\t$rs, $mask");
- list<dag> Pattern = [(OpNode CPURegs:$rs, immZExt10:$mask)];
+ list<dag> Pattern = [(OpNode GPR32Opnd:$rs, immZExt10:$mask)];
InstrItinClass Itinerary = itin;
}
class DPA_W_PH_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class MULT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
- list<dag> Pattern = [(set ACRegsDSP:$ac, (OpNode CPURegs:$rs, CPURegs:$rt))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac, (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt))];
InstrItinClass Itinerary = itin;
- int AddedComplexity = 20;
bit isCommutable = 1;
}
class MADD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin))];
InstrItinClass Itinerary = itin;
- int AddedComplexity = 20;
string Constraints = "$acin = $ac";
}
-class MFHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rd);
- dag InOperandList = (ins RC:$ac);
+class MFHI_DESC_BASE<string instr_asm, RegisterOperand RO, SDNode OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins RO:$ac);
string AsmString = !strconcat(instr_asm, "\t$rd, $ac");
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode RO:$ac))];
InstrItinClass Itinerary = itin;
}
-class MTHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> {
- dag OutOperandList = (outs RC:$ac);
- dag InOperandList = (ins CPURegs:$rs);
+class MTHI_DESC_BASE<string instr_asm, RegisterOperand RO, InstrItinClass itin> {
+ dag OutOperandList = (outs RO:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs);
string AsmString = !strconcat(instr_asm, "\t$rs, $ac");
InstrItinClass Itinerary = itin;
}
class BPOSGE32_PSEUDO_DESC_BASE<SDPatternOperator OpNode, InstrItinClass itin> :
- MipsPseudo<(outs CPURegs:$dst), (ins), [(set CPURegs:$dst, (OpNode))]> {
+ MipsPseudo<(outs GPR32Opnd:$dst), (ins), [(set GPR32Opnd:$dst, (OpNode))]> {
bit usesCustomInserter = 1;
}
@@ -501,10 +500,10 @@ class BPOSGE32_DESC_BASE<string instr_asm, InstrItinClass itin> {
class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins CPURegs:$src, CPURegs:$rs);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$src, GPR32Opnd:$rs);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs");
- list<dag> Pattern = [(set CPURegs:$rt, (OpNode CPURegs:$src, CPURegs:$rs))];
+ list<dag> Pattern = [(set GPR32Opnd:$rt, (OpNode GPR32Opnd:$src, GPR32Opnd:$rs))];
InstrItinClass Itinerary = itin;
string Constraints = "$src = $rt";
}
@@ -515,209 +514,209 @@ class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
// Addition/subtraction
class ADDU_QB_DESC : ADDU_QB_DESC_BASE<"addu.qb", null_frag, NoItinerary,
- DSPRegs, DSPRegs>, IsCommutable,
+ DSPROpnd, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag20]>;
class ADDU_S_QB_DESC : ADDU_QB_DESC_BASE<"addu_s.qb", int_mips_addu_s_qb,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBU_QB_DESC : ADDU_QB_DESC_BASE<"subu.qb", null_frag, NoItinerary,
- DSPRegs, DSPRegs>,
+ DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class SUBU_S_QB_DESC : ADDU_QB_DESC_BASE<"subu_s.qb", int_mips_subu_s_qb,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ADDQ_PH_DESC : ADDU_QB_DESC_BASE<"addq.ph", null_frag, NoItinerary,
- DSPRegs, DSPRegs>, IsCommutable,
+ DSPROpnd, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag20]>;
class ADDQ_S_PH_DESC : ADDU_QB_DESC_BASE<"addq_s.ph", int_mips_addq_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBQ_PH_DESC : ADDU_QB_DESC_BASE<"subq.ph", null_frag, NoItinerary,
- DSPRegs, DSPRegs>,
+ DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class SUBQ_S_PH_DESC : ADDU_QB_DESC_BASE<"subq_s.ph", int_mips_subq_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ADDQ_S_W_DESC : ADDU_QB_DESC_BASE<"addq_s.w", int_mips_addq_s_w,
- NoItinerary, CPURegs, CPURegs>,
+ NoItinerary, GPR32Opnd, GPR32Opnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBQ_S_W_DESC : ADDU_QB_DESC_BASE<"subq_s.w", int_mips_subq_s_w,
- NoItinerary, CPURegs, CPURegs>,
+ NoItinerary, GPR32Opnd, GPR32Opnd>,
Defs<[DSPOutFlag20]>;
class ADDSC_DESC : ADDU_QB_DESC_BASE<"addsc", null_frag, NoItinerary,
- CPURegs, CPURegs>, IsCommutable,
+ GPR32Opnd, GPR32Opnd>, IsCommutable,
Defs<[DSPCarry]>;
class ADDWC_DESC : ADDU_QB_DESC_BASE<"addwc", null_frag, NoItinerary,
- CPURegs, CPURegs>,
+ GPR32Opnd, GPR32Opnd>,
IsCommutable, Uses<[DSPCarry]>, Defs<[DSPOutFlag20]>;
class MODSUB_DESC : ADDU_QB_DESC_BASE<"modsub", int_mips_modsub, NoItinerary,
- CPURegs, CPURegs>;
+ GPR32Opnd, GPR32Opnd>;
class RADDU_W_QB_DESC : RADDU_W_QB_DESC_BASE<"raddu.w.qb", int_mips_raddu_w_qb,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
// Absolute value
class ABSQ_S_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.ph", int_mips_absq_s_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ABSQ_S_W_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.w", int_mips_absq_s_w,
- NoItinerary, CPURegs>,
+ NoItinerary, GPR32Opnd>,
Defs<[DSPOutFlag20]>;
// Precision reduce/expand
class PRECRQ_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.qb.ph",
int_mips_precrq_qb_ph,
- NoItinerary, DSPRegs, DSPRegs>;
+ NoItinerary, DSPROpnd, DSPROpnd>;
class PRECRQ_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.ph.w",
int_mips_precrq_ph_w,
- NoItinerary, DSPRegs, CPURegs>;
+ NoItinerary, DSPROpnd, GPR32Opnd>;
class PRECRQ_RS_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq_rs.ph.w",
int_mips_precrq_rs_ph_w,
- NoItinerary, DSPRegs,
- CPURegs>,
+ NoItinerary, DSPROpnd,
+ GPR32Opnd>,
Defs<[DSPOutFlag22]>;
class PRECRQU_S_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrqu_s.qb.ph",
int_mips_precrqu_s_qb_ph,
- NoItinerary, DSPRegs,
- DSPRegs>,
+ NoItinerary, DSPROpnd,
+ DSPROpnd>,
Defs<[DSPOutFlag22]>;
class PRECEQ_W_PHL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phl",
int_mips_preceq_w_phl,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class PRECEQ_W_PHR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phr",
int_mips_preceq_w_phr,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class PRECEQU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbl",
int_mips_precequ_ph_qbl,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEQU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbr",
int_mips_precequ_ph_qbr,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEQU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbla",
int_mips_precequ_ph_qbla,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEQU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbra",
int_mips_precequ_ph_qbra,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbl",
int_mips_preceu_ph_qbl,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbr",
int_mips_preceu_ph_qbr,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbla",
int_mips_preceu_ph_qbla,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbra",
int_mips_preceu_ph_qbra,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
// Shift
class SHLL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shll.qb", null_frag, immZExt3,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shllv.qb", int_mips_shll_qb,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHRL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shrl.qb", null_frag, immZExt3,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.qb", int_mips_shrl_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHLL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll.ph", null_frag, immZExt4,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv.ph", int_mips_shll_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLL_S_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.ph", int_mips_shll_s_ph,
- immZExt4, NoItinerary, DSPRegs>,
+ immZExt4, NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_S_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.ph", int_mips_shll_s_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHRA_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra.ph", null_frag, immZExt4,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRAV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav.ph", int_mips_shra_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRA_R_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.ph", int_mips_shra_r_ph,
- immZExt4, NoItinerary, DSPRegs>;
+ immZExt4, NoItinerary, DSPROpnd>;
class SHRAV_R_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.ph", int_mips_shra_r_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHLL_S_W_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.w", int_mips_shll_s_w,
- immZExt5, NoItinerary, CPURegs>,
+ immZExt5, NoItinerary, GPR32Opnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_S_W_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.w", int_mips_shll_s_w,
- NoItinerary, CPURegs>,
+ NoItinerary, GPR32Opnd>,
Defs<[DSPOutFlag22]>;
class SHRA_R_W_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.w", int_mips_shra_r_w,
- immZExt5, NoItinerary, CPURegs>;
+ immZExt5, NoItinerary, GPR32Opnd>;
class SHRAV_R_W_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.w", int_mips_shra_r_w,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
// Multiplication
class MULEU_S_PH_QBL_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbl",
int_mips_muleu_s_ph_qbl,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag21]>;
class MULEU_S_PH_QBR_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbr",
int_mips_muleu_s_ph_qbr,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag21]>;
class MULEQ_S_W_PHL_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phl",
int_mips_muleq_s_w_phl,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
class MULEQ_S_W_PHR_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phr",
int_mips_muleq_s_w_phr,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
class MULQ_RS_PH_DESC : ADDU_QB_DESC_BASE<"mulq_rs.ph", int_mips_mulq_rs_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
class MULSAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsaq_s.w.ph",
@@ -737,10 +736,10 @@ class MAQ_SA_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phr", MipsMAQ_SA_W_PHR>,
Defs<[DSPOutFlag16_19]>;
// Move from/to hi/lo.
-class MFHI_DESC : MFHI_DESC_BASE<"mfhi", HIRegsDSP, NoItinerary>;
-class MFLO_DESC : MFHI_DESC_BASE<"mflo", LORegsDSP, NoItinerary>;
-class MTHI_DESC : MTHI_DESC_BASE<"mthi", HIRegsDSP, NoItinerary>;
-class MTLO_DESC : MTHI_DESC_BASE<"mtlo", LORegsDSP, NoItinerary>;
+class MFHI_DESC : MFHI_DESC_BASE<"mfhi", ACC64DSPOpnd, MipsMFHI, NoItinerary>;
+class MFLO_DESC : MFHI_DESC_BASE<"mflo", ACC64DSPOpnd, MipsMFLO, NoItinerary>;
+class MTHI_DESC : MTHI_DESC_BASE<"mthi", HI32DSPOpnd, NoItinerary>;
+class MTLO_DESC : MTHI_DESC_BASE<"mtlo", LO32DSPOpnd, NoItinerary>;
// Dot product with accumulate/subtract
class DPAU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbl", MipsDPAU_H_QBL>;
@@ -773,67 +772,67 @@ class MSUBU_DSP_DESC : MADD_DESC_BASE<"msubu", MipsMSubu, NoItinerary>;
// Comparison
class CMPU_EQ_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.eq.qb",
int_mips_cmpu_eq_qb, NoItinerary,
- DSPRegs>,
+ DSPROpnd>,
IsCommutable, Defs<[DSPCCond]>;
class CMPU_LT_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.lt.qb",
int_mips_cmpu_lt_qb, NoItinerary,
- DSPRegs>, Defs<[DSPCCond]>;
+ DSPROpnd>, Defs<[DSPCCond]>;
class CMPU_LE_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.le.qb",
int_mips_cmpu_le_qb, NoItinerary,
- DSPRegs>, Defs<[DSPCCond]>;
+ DSPROpnd>, Defs<[DSPCCond]>;
class CMPGU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.eq.qb",
int_mips_cmpgu_eq_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable;
class CMPGU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.lt.qb",
int_mips_cmpgu_lt_qb,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class CMPGU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.le.qb",
int_mips_cmpgu_le_qb,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class CMP_EQ_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.eq.ph", int_mips_cmp_eq_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
IsCommutable, Defs<[DSPCCond]>;
class CMP_LT_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.lt.ph", int_mips_cmp_lt_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPCCond]>;
class CMP_LE_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.le.ph", int_mips_cmp_le_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPCCond]>;
// Misc
class BITREV_DESC : ABSQ_S_PH_R2_DESC_BASE<"bitrev", int_mips_bitrev,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
class PACKRL_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"packrl.ph", int_mips_packrl_ph,
- NoItinerary, DSPRegs, DSPRegs>;
+ NoItinerary, DSPROpnd, DSPROpnd>;
class REPL_QB_DESC : REPL_DESC_BASE<"repl.qb", int_mips_repl_qb, immZExt8,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class REPL_PH_DESC : REPL_DESC_BASE<"repl.ph", int_mips_repl_ph, immZExt10,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class REPLV_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.qb", int_mips_repl_qb,
- NoItinerary, DSPRegs, CPURegs>;
+ NoItinerary, DSPROpnd, GPR32Opnd>;
class REPLV_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.ph", int_mips_repl_ph,
- NoItinerary, DSPRegs, CPURegs>;
+ NoItinerary, DSPROpnd, GPR32Opnd>;
class PICK_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.qb", int_mips_pick_qb,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Uses<[DSPCCond]>;
class PICK_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.ph", int_mips_pick_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Uses<[DSPCCond]>;
class LWX_DESC : LX_DESC_BASE<"lwx", int_mips_lwx, NoItinerary>;
@@ -905,97 +904,97 @@ class INSV_DESC : INSV_DESC_BASE<"insv", int_mips_insv, NoItinerary>,
// MIPS DSP Rev 2
// Addition/subtraction
class ADDU_PH_DESC : ADDU_QB_DESC_BASE<"addu.ph", int_mips_addu_ph, NoItinerary,
- DSPRegs, DSPRegs>, IsCommutable,
+ DSPROpnd, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag20]>;
class ADDU_S_PH_DESC : ADDU_QB_DESC_BASE<"addu_s.ph", int_mips_addu_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBU_PH_DESC : ADDU_QB_DESC_BASE<"subu.ph", int_mips_subu_ph, NoItinerary,
- DSPRegs, DSPRegs>,
+ DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class SUBU_S_PH_DESC : ADDU_QB_DESC_BASE<"subu_s.ph", int_mips_subu_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ADDUH_QB_DESC : ADDUH_QB_DESC_BASE<"adduh.qb", int_mips_adduh_qb,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class ADDUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"adduh_r.qb", int_mips_adduh_r_qb,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class SUBUH_QB_DESC : ADDUH_QB_DESC_BASE<"subuh.qb", int_mips_subuh_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SUBUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"subuh_r.qb", int_mips_subuh_r_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class ADDQH_PH_DESC : ADDUH_QB_DESC_BASE<"addqh.ph", int_mips_addqh_ph,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class ADDQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"addqh_r.ph", int_mips_addqh_r_ph,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class SUBQH_PH_DESC : ADDUH_QB_DESC_BASE<"subqh.ph", int_mips_subqh_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SUBQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"subqh_r.ph", int_mips_subqh_r_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class ADDQH_W_DESC : ADDUH_QB_DESC_BASE<"addqh.w", int_mips_addqh_w,
- NoItinerary, CPURegs>, IsCommutable;
+ NoItinerary, GPR32Opnd>, IsCommutable;
class ADDQH_R_W_DESC : ADDUH_QB_DESC_BASE<"addqh_r.w", int_mips_addqh_r_w,
- NoItinerary, CPURegs>, IsCommutable;
+ NoItinerary, GPR32Opnd>, IsCommutable;
class SUBQH_W_DESC : ADDUH_QB_DESC_BASE<"subqh.w", int_mips_subqh_w,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
class SUBQH_R_W_DESC : ADDUH_QB_DESC_BASE<"subqh_r.w", int_mips_subqh_r_w,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
// Comparison
class CMPGDU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.eq.qb",
int_mips_cmpgdu_eq_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable, Defs<[DSPCCond]>;
class CMPGDU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.lt.qb",
int_mips_cmpgdu_lt_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
Defs<[DSPCCond]>;
class CMPGDU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.le.qb",
int_mips_cmpgdu_le_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
Defs<[DSPCCond]>;
// Absolute
class ABSQ_S_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.qb", int_mips_absq_s_qb,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag20]>;
// Multiplication
class MUL_PH_DESC : ADDUH_QB_DESC_BASE<"mul.ph", null_frag, NoItinerary,
- DSPRegs>, IsCommutable,
+ DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MUL_S_PH_DESC : ADDUH_QB_DESC_BASE<"mul_s.ph", int_mips_mul_s_ph,
- NoItinerary, DSPRegs>, IsCommutable,
+ NoItinerary, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MULQ_S_W_DESC : ADDUH_QB_DESC_BASE<"mulq_s.w", int_mips_mulq_s_w,
- NoItinerary, CPURegs>, IsCommutable,
+ NoItinerary, GPR32Opnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MULQ_RS_W_DESC : ADDUH_QB_DESC_BASE<"mulq_rs.w", int_mips_mulq_rs_w,
- NoItinerary, CPURegs>, IsCommutable,
+ NoItinerary, GPR32Opnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MULQ_S_PH_DESC : ADDU_QB_DESC_BASE<"mulq_s.ph", int_mips_mulq_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
// Dot product with accumulate/subtract
@@ -1026,36 +1025,36 @@ class MULSA_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsa.w.ph", MipsMULSA_W_PH>;
// Precision reduce/expand
class PRECR_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precr.qb.ph",
int_mips_precr_qb_ph,
- NoItinerary, DSPRegs, DSPRegs>;
+ NoItinerary, DSPROpnd, DSPROpnd>;
class PRECR_SRA_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra.ph.w",
int_mips_precr_sra_ph_w,
- NoItinerary, DSPRegs,
- CPURegs>;
+ NoItinerary, DSPROpnd,
+ GPR32Opnd>;
class PRECR_SRA_R_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra_r.ph.w",
int_mips_precr_sra_r_ph_w,
- NoItinerary, DSPRegs,
- CPURegs>;
+ NoItinerary, DSPROpnd,
+ GPR32Opnd>;
// Shift
class SHRA_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra.qb", null_frag, immZExt3,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRAV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav.qb", int_mips_shra_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRA_R_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.qb", int_mips_shra_r_qb,
- immZExt3, NoItinerary, DSPRegs>;
+ immZExt3, NoItinerary, DSPROpnd>;
class SHRAV_R_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.qb", int_mips_shra_r_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shrl.ph", null_frag, immZExt4,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.ph", int_mips_shrl_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
// Misc
class APPEND_DESC : APPEND_DESC_BASE<"append", int_mips_append, immZExt5,
@@ -1240,24 +1239,24 @@ def PREPEND : PREPEND_ENC, PREPEND_DESC;
}
// Pseudos.
-let isPseudo = 1 in {
+let isPseudo = 1, isCodeGenOnly = 1 in {
// Pseudo instructions for loading and storing accumulator registers.
- defm LOAD_AC_DSP : LoadM<"load_ac_dsp", ACRegsDSP>;
- defm STORE_AC_DSP : StoreM<"store_ac_dsp", ACRegsDSP>;
+ def LOAD_ACC64DSP : Load<"", ACC64DSPOpnd>;
+ def STORE_ACC64DSP : Store<"", ACC64DSPOpnd>;
// Pseudos for loading and storing ccond field of DSP control register.
- defm LOAD_CCOND_DSP : LoadM<"load_ccond_dsp", DSPCC>;
- defm STORE_CCOND_DSP : StoreM<"store_ccond_dsp", DSPCC>;
+ def LOAD_CCOND_DSP : Load<"load_ccond_dsp", DSPCC>;
+ def STORE_CCOND_DSP : Store<"store_ccond_dsp", DSPCC>;
}
// Pseudo CMP and PICK instructions.
class PseudoCMP<Instruction RealInst> :
- PseudoDSP<(outs DSPCC:$cmp), (ins DSPRegs:$rs, DSPRegs:$rt), []>,
- PseudoInstExpansion<(RealInst DSPRegs:$rs, DSPRegs:$rt)>, NeverHasSideEffects;
+ PseudoDSP<(outs DSPCC:$cmp), (ins DSPROpnd:$rs, DSPROpnd:$rt), []>,
+ PseudoInstExpansion<(RealInst DSPROpnd:$rs, DSPROpnd:$rt)>, NeverHasSideEffects;
class PseudoPICK<Instruction RealInst> :
- PseudoDSP<(outs DSPRegs:$rd), (ins DSPCC:$cmp, DSPRegs:$rs, DSPRegs:$rt), []>,
- PseudoInstExpansion<(RealInst DSPRegs:$rd, DSPRegs:$rs, DSPRegs:$rt)>,
+ PseudoDSP<(outs DSPROpnd:$rd), (ins DSPCC:$cmp, DSPROpnd:$rs, DSPROpnd:$rt), []>,
+ PseudoInstExpansion<(RealInst DSPROpnd:$rd, DSPROpnd:$rs, DSPROpnd:$rt)>,
NeverHasSideEffects;
def PseudoCMP_EQ_PH : PseudoCMP<CMP_EQ_PH>;
@@ -1270,6 +1269,8 @@ def PseudoCMPU_LE_QB : PseudoCMP<CMPU_LE_QB>;
def PseudoPICK_PH : PseudoPICK<PICK_PH>;
def PseudoPICK_QB : PseudoPICK<PICK_QB>;
+def PseudoMTLOHI_DSP : PseudoMTLOHI<ACC64DSP, GPR32>;
+
// Patterns.
class DSPPat<dag pattern, dag result, Predicate pred = HasDSP> :
Pat<pattern, result>, Requires<[pred]>;
@@ -1279,19 +1280,19 @@ class BitconvertPat<ValueType DstVT, ValueType SrcVT, RegisterClass DstRC,
DSPPat<(DstVT (bitconvert (SrcVT SrcRC:$src))),
(COPY_TO_REGCLASS SrcRC:$src, DstRC)>;
-def : BitconvertPat<i32, v2i16, CPURegs, DSPRegs>;
-def : BitconvertPat<i32, v4i8, CPURegs, DSPRegs>;
-def : BitconvertPat<v2i16, i32, DSPRegs, CPURegs>;
-def : BitconvertPat<v4i8, i32, DSPRegs, CPURegs>;
+def : BitconvertPat<i32, v2i16, GPR32, DSPR>;
+def : BitconvertPat<i32, v4i8, GPR32, DSPR>;
+def : BitconvertPat<v2i16, i32, DSPR, GPR32>;
+def : BitconvertPat<v4i8, i32, DSPR, GPR32>;
def : DSPPat<(v2i16 (load addr:$a)),
- (v2i16 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>;
+ (v2i16 (COPY_TO_REGCLASS (LW addr:$a), DSPR))>;
def : DSPPat<(v4i8 (load addr:$a)),
- (v4i8 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>;
-def : DSPPat<(store (v2i16 DSPRegs:$val), addr:$a),
- (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>;
-def : DSPPat<(store (v4i8 DSPRegs:$val), addr:$a),
- (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>;
+ (v4i8 (COPY_TO_REGCLASS (LW addr:$a), DSPR))>;
+def : DSPPat<(store (v2i16 DSPR:$val), addr:$a),
+ (SW (COPY_TO_REGCLASS DSPR:$val, GPR32), addr:$a)>;
+def : DSPPat<(store (v4i8 DSPR:$val), addr:$a),
+ (SW (COPY_TO_REGCLASS DSPR:$val, GPR32), addr:$a)>;
// Binary operations.
class DSPBinPat<Instruction Inst, ValueType ValTy, SDPatternOperator Node,
@@ -1336,7 +1337,7 @@ class DSPSetCCPat<Instruction Cmp, Instruction Pick, ValueType ValTy,
CondCode CC> :
DSPPat<(ValTy (MipsSETCC_DSP ValTy:$a, ValTy:$b, CC)),
(ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)),
- (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPRegs)),
+ (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPR)),
(ValTy ZERO)))>;
class DSPSetCCPatInv<Instruction Cmp, Instruction Pick, ValueType ValTy,
@@ -1344,7 +1345,7 @@ class DSPSetCCPatInv<Instruction Cmp, Instruction Pick, ValueType ValTy,
DSPPat<(ValTy (MipsSETCC_DSP ValTy:$a, ValTy:$b, CC)),
(ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)),
(ValTy ZERO),
- (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPRegs))))>;
+ (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPR))))>;
class DSPSelectCCPat<Instruction Cmp, Instruction Pick, ValueType ValTy,
CondCode CC> :
@@ -1384,12 +1385,12 @@ def : DSPSelectCCPatInv<PseudoCMPU_LE_QB, PseudoPICK_QB, v4i8, SETUGT>;
// Extr patterns.
class EXTR_W_TY1_R2_Pat<SDPatternOperator OpNode, Instruction Instr> :
- DSPPat<(i32 (OpNode CPURegs:$rs, ACRegsDSP:$ac)),
- (Instr ACRegsDSP:$ac, CPURegs:$rs)>;
+ DSPPat<(i32 (OpNode GPR32:$rs, ACC64DSP:$ac)),
+ (Instr ACC64DSP:$ac, GPR32:$rs)>;
class EXTR_W_TY1_R1_Pat<SDPatternOperator OpNode, Instruction Instr> :
- DSPPat<(i32 (OpNode immZExt5:$shift, ACRegsDSP:$ac)),
- (Instr ACRegsDSP:$ac, immZExt5:$shift)>;
+ DSPPat<(i32 (OpNode immZExt5:$shift, ACC64DSP:$ac)),
+ (Instr ACC64DSP:$ac, immZExt5:$shift)>;
def : EXTR_W_TY1_R1_Pat<MipsEXTP, EXTP>;
def : EXTR_W_TY1_R2_Pat<MipsEXTP, EXTPV>;
@@ -1404,11 +1405,6 @@ def : EXTR_W_TY1_R2_Pat<MipsEXTR_RS_W, EXTRV_RS_W>;
def : EXTR_W_TY1_R1_Pat<MipsEXTR_S_H, EXTR_S_H>;
def : EXTR_W_TY1_R2_Pat<MipsEXTR_S_H, EXTRV_S_H>;
-// mflo/hi patterns.
-let AddedComplexity = 20 in
-def : DSPPat<(i32 (ExtractLOHI ACRegsDSP:$ac, imm:$lohi_idx)),
- (EXTRACT_SUBREG ACRegsDSP:$ac, imm:$lohi_idx)>;
-
// Indexed load patterns.
class IndexedLoadPat<SDPatternOperator LoadNode, Instruction Instr> :
DSPPat<(i32 (LoadNode (add i32:$base, i32:$index))),
diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp
index d07a595af38a..ffbd83b1bbbf 100644
--- a/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -177,7 +177,7 @@ namespace {
class Filler : public MachineFunctionPass {
public:
Filler(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
+ : MachineFunctionPass(ID), TM(tm) { }
virtual const char *getPassName() const {
return "Mips Delay Slot Filler";
@@ -243,7 +243,6 @@ namespace {
bool terminateSearch(const MachineInstr &Candidate) const;
TargetMachine &TM;
- const TargetInstrInfo *TII;
static char ID;
};
@@ -422,8 +421,7 @@ bool LoadFromStackOrConst::hasHazard_(const MachineInstr &MI) {
return false;
if (const PseudoSourceValue *PSV = dyn_cast<const PseudoSourceValue>(V))
- return !PSV->PseudoSourceValue::isConstant(0) &&
- (V != PseudoSourceValue::getStack());
+ return !PSV->isConstant(0) && V != PseudoSourceValue::getStack();
return true;
}
@@ -438,7 +436,7 @@ bool MemDefsUses::hasHazard_(const MachineInstr &MI) {
// Check underlying object list.
if (getUnderlyingObjects(MI, Objs)) {
- for (SmallVector<const Value *, 4>::const_iterator I = Objs.begin();
+ for (SmallVectorImpl<const Value *>::const_iterator I = Objs.begin();
I != Objs.end(); ++I)
HasHazard |= updateDefsUses(*I, MI.mayStore());
@@ -474,7 +472,7 @@ getUnderlyingObjects(const MachineInstr &MI,
SmallVector<Value *, 4> Objs;
GetUnderlyingObjects(const_cast<Value *>(V), Objs);
- for (SmallVector<Value*, 4>::iterator I = Objs.begin(), E = Objs.end();
+ for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), E = Objs.end();
I != E; ++I) {
if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(*I)) {
if (PSV->isAliased(MFI))
@@ -514,6 +512,8 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
}
// Bundle the NOP to the instruction with the delay slot.
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
MIBundleBuilder(MBB, I, llvm::next(llvm::next(I)));
}
@@ -562,14 +562,13 @@ bool Filler::searchBackward(MachineBasicBlock &MBB, Iter Slot) const {
RegDU.init(*Slot);
- if (searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler)) {
- MBB.splice(llvm::next(Slot), &MBB, llvm::next(Filler).base());
- MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
- ++UsefulSlots;
- return true;
- }
+ if (!searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler))
+ return false;
- return false;
+ MBB.splice(llvm::next(Slot), &MBB, llvm::next(Filler).base());
+ MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
+ ++UsefulSlots;
+ return true;
}
bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const {
@@ -583,14 +582,13 @@ bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const {
RegDU.setCallerSaved(*Slot);
- if (searchRange(MBB, llvm::next(Slot), MBB.end(), RegDU, NM, Filler)) {
- MBB.splice(llvm::next(Slot), &MBB, Filler);
- MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
- ++UsefulSlots;
- return true;
- }
+ if (!searchRange(MBB, llvm::next(Slot), MBB.end(), RegDU, NM, Filler))
+ return false;
- return false;
+ MBB.splice(llvm::next(Slot), &MBB, Filler);
+ MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
+ ++UsefulSlots;
+ return true;
}
bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const {
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 968e5364842f..c417bd593413 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -57,7 +57,8 @@ bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
/// GOT address into a register.
SDNode *MipsDAGToDAGISel::getGlobalBaseReg() {
unsigned GlobalBaseReg = MF->getInfo<MipsFunctionInfo>()->getGlobalBaseReg();
- return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
+ return CurDAG->getRegister(GlobalBaseReg,
+ getTargetLowering()->getPointerTy()).getNode();
}
/// ComplexPattern used on MipsInstrInfo
@@ -68,6 +69,12 @@ bool MipsDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
return false;
}
+bool MipsDAGToDAGISel::selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
bool MipsDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
llvm_unreachable("Unimplemented function.");
@@ -80,12 +87,83 @@ bool MipsDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base,
return false;
}
+bool MipsDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
bool MipsDAGToDAGISel::selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Offset, SDValue &Alias) {
llvm_unreachable("Unimplemented function.");
return false;
}
+bool MipsDAGToDAGISel::selectVSplat(SDNode *N, APInt &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm1(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm2(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm3(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm4(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm6(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm8(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimmPow2(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatMaskL(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
@@ -97,6 +175,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ Node->setNodeId(-1);
return NULL;
}
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.h b/lib/Target/Mips/MipsISelDAGToDAG.h
index cf0f9c58aa9c..a4d9da532b2e 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -57,6 +57,11 @@ private:
virtual bool selectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ // Complex Pattern.
+ /// (reg + reg).
+ virtual bool selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
/// Fall back on this function if all else fails.
virtual bool selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
@@ -65,9 +70,42 @@ private:
virtual bool selectIntAddr(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
virtual bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Offset, SDValue &Alias);
+ /// \brief Select constant vector splats.
+ virtual bool selectVSplat(SDNode *N, APInt &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm1.
+ virtual bool selectVSplatUimm1(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm2.
+ virtual bool selectVSplatUimm2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm3.
+ virtual bool selectVSplatUimm3(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm4.
+ virtual bool selectVSplatUimm4(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm5.
+ virtual bool selectVSplatUimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm6.
+ virtual bool selectVSplatUimm6(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm8.
+ virtual bool selectVSplatUimm8(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a simm5.
+ virtual bool selectVSplatSimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a power of 2.
+ virtual bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is the inverse of a
+ /// power of 2.
+ virtual bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// ending at the most significant bit
+ virtual bool selectVSplatMaskL(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// starting at bit zero.
+ virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
+
virtual SDNode *Select(SDNode *N);
virtual std::pair<bool, SDNode*> selectNode(SDNode *Node) = 0;
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 4d76181f9215..1e8250c847fe 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -20,6 +20,7 @@
#include "MipsTargetMachine.h"
#include "MipsTargetObjectFile.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -34,6 +35,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <cctype>
using namespace llvm;
@@ -43,6 +45,11 @@ static cl::opt<bool>
LargeGOT("mxgot", cl::Hidden,
cl::desc("MIPS: Enable GOT larger than 64k."), cl::init(false));
+static cl::opt<bool>
+NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
+ cl::desc("MIPS: Don't trap on integer division by zero."),
+ cl::init(false));
+
static const uint16_t O32IntRegs[4] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3
};
@@ -62,10 +69,10 @@ static const uint16_t Mips64DPRegs[8] = {
// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
if (!isShiftedMask_64(I))
- return false;
+ return false;
Size = CountPopulation_64(I);
- Pos = CountTrailingZeros_64(I);
+ Pos = countTrailingZeros(I);
return true;
}
@@ -74,72 +81,35 @@ SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
return DAG.getRegister(FI->getGlobalBaseReg(), Ty);
}
-static SDValue getTargetNode(SDValue Op, SelectionDAG &DAG, unsigned Flag) {
- EVT Ty = Op.getValueType();
+SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
+}
- if (GlobalAddressSDNode *N = dyn_cast<GlobalAddressSDNode>(Op))
- return DAG.getTargetGlobalAddress(N->getGlobal(), Op.getDebugLoc(), Ty, 0,
- Flag);
- if (ExternalSymbolSDNode *N = dyn_cast<ExternalSymbolSDNode>(Op))
- return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
- if (BlockAddressSDNode *N = dyn_cast<BlockAddressSDNode>(Op))
- return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
- if (JumpTableSDNode *N = dyn_cast<JumpTableSDNode>(Op))
- return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
- if (ConstantPoolSDNode *N = dyn_cast<ConstantPoolSDNode>(Op))
- return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
- N->getOffset(), Flag);
-
- llvm_unreachable("Unexpected node type.");
- return SDValue();
+SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
}
-static SDValue getAddrNonPIC(SDValue Op, SelectionDAG &DAG) {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- SDValue Hi = getTargetNode(Op, DAG, MipsII::MO_ABS_HI);
- SDValue Lo = getTargetNode(Op, DAG, MipsII::MO_ABS_LO);
- return DAG.getNode(ISD::ADD, DL, Ty,
- DAG.getNode(MipsISD::Hi, DL, Ty, Hi),
- DAG.getNode(MipsISD::Lo, DL, Ty, Lo));
+SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
}
-SDValue MipsTargetLowering::getAddrLocal(SDValue Op, SelectionDAG &DAG,
- bool HasMips64) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
- SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
- getTargetNode(Op, DAG, GOTFlag));
- SDValue Load = DAG.getLoad(Ty, DL, DAG.getEntryNode(), GOT,
- MachinePointerInfo::getGOT(), false, false, false,
- 0);
- unsigned LoFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
- SDValue Lo = DAG.getNode(MipsISD::Lo, DL, Ty, getTargetNode(Op, DAG, LoFlag));
- return DAG.getNode(ISD::ADD, DL, Ty, Load, Lo);
-}
-
-SDValue MipsTargetLowering::getAddrGlobal(SDValue Op, SelectionDAG &DAG,
+SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
unsigned Flag) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
- getTargetNode(Op, DAG, Flag));
- return DAG.getLoad(Ty, DL, DAG.getEntryNode(), Tgt,
- MachinePointerInfo::getGOT(), false, false, false, 0);
+ return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
}
-SDValue MipsTargetLowering::getAddrGlobalLargeGOT(SDValue Op, SelectionDAG &DAG,
- unsigned HiFlag,
- unsigned LoFlag) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- SDValue Hi = DAG.getNode(MipsISD::Hi, DL, Ty, getTargetNode(Op, DAG, HiFlag));
- Hi = DAG.getNode(ISD::ADD, DL, Ty, Hi, getGlobalReg(DAG, Ty));
- SDValue Wrapper = DAG.getNode(MipsISD::Wrapper, DL, Ty, Hi,
- getTargetNode(Op, DAG, LoFlag));
- return DAG.getLoad(Ty, DL, DAG.getEntryNode(), Wrapper,
- MachinePointerInfo::getGOT(), false, false, false, 0);
+SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
+ N->getOffset(), Flag);
}
const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -156,9 +126,10 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::FPCmp: return "MipsISD::FPCmp";
case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
- case MipsISD::FPRound: return "MipsISD::FPRound";
- case MipsISD::ExtractLOHI: return "MipsISD::ExtractLOHI";
- case MipsISD::InsertLOHI: return "MipsISD::InsertLOHI";
+ case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
+ case MipsISD::MFHI: return "MipsISD::MFHI";
+ case MipsISD::MFLO: return "MipsISD::MFLO";
+ case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
case MipsISD::Mult: return "MipsISD::Mult";
case MipsISD::Multu: return "MipsISD::Multu";
case MipsISD::MAdd: return "MipsISD::MAdd";
@@ -202,6 +173,30 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
+ case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
+ case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
+ case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
+ case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
+ case MipsISD::VCEQ: return "MipsISD::VCEQ";
+ case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
+ case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
+ case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
+ case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
+ case MipsISD::VSMAX: return "MipsISD::VSMAX";
+ case MipsISD::VSMIN: return "MipsISD::VSMIN";
+ case MipsISD::VUMAX: return "MipsISD::VUMAX";
+ case MipsISD::VUMIN: return "MipsISD::VUMIN";
+ case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
+ case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
+ case MipsISD::VNOR: return "MipsISD::VNOR";
+ case MipsISD::VSHF: return "MipsISD::VSHF";
+ case MipsISD::SHF: return "MipsISD::SHF";
+ case MipsISD::ILVEV: return "MipsISD::ILVEV";
+ case MipsISD::ILVOD: return "MipsISD::ILVOD";
+ case MipsISD::ILVL: return "MipsISD::ILVL";
+ case MipsISD::ILVR: return "MipsISD::ILVR";
+ case MipsISD::PCKEV: return "MipsISD::PCKEV";
+ case MipsISD::PCKOD: return "MipsISD::PCKOD";
default: return NULL;
}
}
@@ -250,6 +245,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
if (!TM.Options.NoNaNsFPMath) {
setOperationAction(ISD::FABS, MVT::f32, Custom);
@@ -265,6 +261,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::i64, Custom);
setOperationAction(ISD::LOAD, MVT::i64, Custom);
setOperationAction(ISD::STORE, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
}
if (!HasMips64) {
@@ -339,11 +336,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FNEG, MVT::f64, Expand);
}
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
-
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::Other, Expand);
@@ -383,6 +375,8 @@ MipsTargetLowering(MipsTargetMachine &TM)
setTruncStoreAction(MVT::i64, MVT::i32, Custom);
}
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
setTargetDAGCombine(ISD::SDIVREM);
setTargetDAGCombine(ISD::UDIVREM);
setTargetDAGCombine(ISD::SELECT);
@@ -407,7 +401,7 @@ const MipsTargetLowering *MipsTargetLowering::create(MipsTargetMachine &TM) {
return llvm::createMipsSETargetLowering(TM);
}
-EVT MipsTargetLowering::getSetCCResultType(EVT VT) const {
+EVT MipsTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector())
return MVT::i32;
return VT.changeVectorElementTypeToInteger();
@@ -420,11 +414,11 @@ static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT Ty = N->getValueType(0);
- unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64;
- unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64;
+ unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
+ unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
MipsISD::DivRemU16;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
N->getOperand(0), N->getOperand(1));
@@ -502,7 +496,7 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
return Op;
SDValue RHS = Op.getOperand(1);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
// node if necessary.
@@ -514,12 +508,13 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
// Creates and returns a CMovFPT/F node.
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
- SDValue False, DebugLoc DL) {
+ SDValue False, SDLoc DL) {
ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
+ SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
- True.getValueType(), True, False, Cond);
+ True.getValueType(), True, FCC0, False, Cond);
}
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
@@ -545,7 +540,7 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (!CN || CN->getZExtValue())
return SDValue();
- const DebugLoc DL = N->getDebugLoc();
+ const SDLoc DL(N);
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
SDValue True = N->getOperand(1);
@@ -561,7 +556,7 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
// Pattern match EXT.
// $dst = and ((sra or srl) $src , pos), (2**size - 1)
// => ext $dst, $src, size, pos
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
return SDValue();
SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1);
@@ -590,7 +585,7 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
return SDValue();
- return DAG.getNode(MipsISD::Ext, N->getDebugLoc(), ValTy,
+ return DAG.getNode(MipsISD::Ext, SDLoc(N), ValTy,
ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32),
DAG.getConstant(SMSize, MVT::i32));
}
@@ -602,7 +597,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
// $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
// where mask1 = (2**size - 1) << pos, mask0 = ~mask1
// => ins $dst, $src, size, pos, $src1
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
return SDValue();
SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
@@ -644,7 +639,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
return SDValue();
- return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), ValTy, Shl.getOperand(0),
+ return DAG.getNode(MipsISD::Ins, SDLoc(N), ValTy, Shl.getOperand(0),
DAG.getConstant(SMPos0, MVT::i32),
DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
}
@@ -669,7 +664,7 @@ static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT ValTy = N->getValueType(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
Add.getOperand(0));
@@ -744,6 +739,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::LOAD: return lowerLOAD(Op, DAG);
case ISD::STORE: return lowerSTORE(Op, DAG);
case ISD::ADD: return lowerADD(Op, DAG);
+ case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
}
return SDValue();
}
@@ -763,6 +759,30 @@ addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
return VReg;
}
+static MachineBasicBlock *expandPseudoDIV(MachineInstr *MI,
+ MachineBasicBlock &MBB,
+ const TargetInstrInfo &TII,
+ bool Is64Bit) {
+ if (NoZeroDivCheck)
+ return &MBB;
+
+ // Insert instruction "teq $divisor_reg, $zero, 7".
+ MachineBasicBlock::iterator I(MI);
+ MachineInstrBuilder MIB;
+ MachineOperand &Divisor = MI->getOperand(2);
+ MIB = BuildMI(MBB, llvm::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
+ .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
+ .addReg(Mips::ZERO).addImm(7);
+
+ // Use the 32-bit sub-register if this is a 64-bit division.
+ if (Is64Bit)
+ MIB->getOperand(0).setSubReg(Mips::sub_32);
+
+ // Clear Divisor's kill flag.
+ Divisor.setIsKill(false);
+ return &MBB;
+}
+
MachineBasicBlock *
MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -770,108 +790,82 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
default:
llvm_unreachable("Unexpected instr type to insert");
case Mips::ATOMIC_LOAD_ADD_I8:
- case Mips::ATOMIC_LOAD_ADD_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I16:
- case Mips::ATOMIC_LOAD_ADD_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I32:
- case Mips::ATOMIC_LOAD_ADD_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I64:
- case Mips::ATOMIC_LOAD_ADD_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::DADDu);
case Mips::ATOMIC_LOAD_AND_I8:
- case Mips::ATOMIC_LOAD_AND_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I16:
- case Mips::ATOMIC_LOAD_AND_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I32:
- case Mips::ATOMIC_LOAD_AND_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I64:
- case Mips::ATOMIC_LOAD_AND_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::AND64);
case Mips::ATOMIC_LOAD_OR_I8:
- case Mips::ATOMIC_LOAD_OR_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I16:
- case Mips::ATOMIC_LOAD_OR_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I32:
- case Mips::ATOMIC_LOAD_OR_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I64:
- case Mips::ATOMIC_LOAD_OR_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::OR64);
case Mips::ATOMIC_LOAD_XOR_I8:
- case Mips::ATOMIC_LOAD_XOR_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I16:
- case Mips::ATOMIC_LOAD_XOR_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I32:
- case Mips::ATOMIC_LOAD_XOR_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I64:
- case Mips::ATOMIC_LOAD_XOR_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::XOR64);
case Mips::ATOMIC_LOAD_NAND_I8:
- case Mips::ATOMIC_LOAD_NAND_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, 0, true);
case Mips::ATOMIC_LOAD_NAND_I16:
- case Mips::ATOMIC_LOAD_NAND_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, 0, true);
case Mips::ATOMIC_LOAD_NAND_I32:
- case Mips::ATOMIC_LOAD_NAND_I32_P8:
return emitAtomicBinary(MI, BB, 4, 0, true);
case Mips::ATOMIC_LOAD_NAND_I64:
- case Mips::ATOMIC_LOAD_NAND_I64_P8:
return emitAtomicBinary(MI, BB, 8, 0, true);
case Mips::ATOMIC_LOAD_SUB_I8:
- case Mips::ATOMIC_LOAD_SUB_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I16:
- case Mips::ATOMIC_LOAD_SUB_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I32:
- case Mips::ATOMIC_LOAD_SUB_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I64:
- case Mips::ATOMIC_LOAD_SUB_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::DSUBu);
case Mips::ATOMIC_SWAP_I8:
- case Mips::ATOMIC_SWAP_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, 0);
case Mips::ATOMIC_SWAP_I16:
- case Mips::ATOMIC_SWAP_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, 0);
case Mips::ATOMIC_SWAP_I32:
- case Mips::ATOMIC_SWAP_I32_P8:
return emitAtomicBinary(MI, BB, 4, 0);
case Mips::ATOMIC_SWAP_I64:
- case Mips::ATOMIC_SWAP_I64_P8:
return emitAtomicBinary(MI, BB, 8, 0);
case Mips::ATOMIC_CMP_SWAP_I8:
- case Mips::ATOMIC_CMP_SWAP_I8_P8:
return emitAtomicCmpSwapPartword(MI, BB, 1);
case Mips::ATOMIC_CMP_SWAP_I16:
- case Mips::ATOMIC_CMP_SWAP_I16_P8:
return emitAtomicCmpSwapPartword(MI, BB, 2);
case Mips::ATOMIC_CMP_SWAP_I32:
- case Mips::ATOMIC_CMP_SWAP_I32_P8:
return emitAtomicCmpSwap(MI, BB, 4);
case Mips::ATOMIC_CMP_SWAP_I64:
- case Mips::ATOMIC_CMP_SWAP_I64_P8:
return emitAtomicCmpSwap(MI, BB, 8);
+ case Mips::PseudoSDIV:
+ case Mips::PseudoUDIV:
+ return expandPseudoDIV(MI, *BB, *getTargetMachine().getInstrInfo(), false);
+ case Mips::PseudoDSDIV:
+ case Mips::PseudoDUDIV:
+ return expandPseudoDIV(MI, *BB, *getTargetMachine().getInstrInfo(), true);
}
}
@@ -891,16 +885,16 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned LL, SC, AND, NOR, ZERO, BEQ;
if (Size == 4) {
- LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ LL = Mips::LL;
+ SC = Mips::SC;
AND = Mips::AND;
NOR = Mips::NOR;
ZERO = Mips::ZERO;
BEQ = Mips::BEQ;
}
else {
- LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
- SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ LL = Mips::LLD;
+ SC = Mips::SCD;
AND = Mips::AND64;
NOR = Mips::NOR64;
ZERO = Mips::ZERO_64;
@@ -926,8 +920,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
@@ -958,7 +951,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
BuildMI(BB, DL, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
BuildMI(BB, DL, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
@@ -969,15 +962,13 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
unsigned Size, unsigned BinOpcode,
bool Nand) const {
assert((Size == 1 || Size == 2) &&
- "Unsupported size for EmitAtomicBinaryPartial.");
+ "Unsupported size for EmitAtomicBinaryPartial.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1039,13 +1030,20 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ if (Subtarget->isLittle()) {
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ } else {
+ unsigned Off = RegInfo.createVirtualRegister(RC);
+ BuildMI(BB, DL, TII->get(Mips::XORi), Off)
+ .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
+ }
BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
.addReg(Mips::ZERO).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
- .addReg(ShiftAmt).addReg(MaskUpper);
+ .addReg(MaskUpper).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
- BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(ShiftAmt).addReg(Incr);
+ BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
// atomic.load.binop
// loopMBB:
@@ -1067,7 +1065,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
// beq success,$0,loopMBB
BB = loopMBB;
- BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@@ -1081,7 +1079,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
// and newval, binopres, mask
BuildMI(BB, DL, TII->get(BinOpcode), BinOpRes).addReg(OldVal).addReg(Incr2);
BuildMI(BB, DL, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask);
- } else {// atomic.swap
+ } else { // atomic.swap
// and newval, incr2, mask
BuildMI(BB, DL, TII->get(Mips::AND), NewVal).addReg(Incr2).addReg(Mask);
}
@@ -1090,7 +1088,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- BuildMI(BB, DL, TII->get(SC), Success)
+ BuildMI(BB, DL, TII->get(Mips::SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@@ -1106,21 +1104,20 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal1)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
- .addReg(ShiftAmt).addReg(MaskedOldVal1);
+ .addReg(MaskedOldVal1).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::SLL), SllRes)
.addReg(SrlRes).addImm(ShiftImm);
BuildMI(BB, DL, TII->get(Mips::SRA), Dest)
.addReg(SllRes).addImm(ShiftImm);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
-MachineBasicBlock *
-MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const {
+MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned Size) const {
assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
MachineFunction *MF = BB->getParent();
@@ -1131,15 +1128,14 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
unsigned LL, SC, ZERO, BNE, BEQ;
if (Size == 4) {
- LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ LL = Mips::LL;
+ SC = Mips::SC;
ZERO = Mips::ZERO;
BNE = Mips::BNE;
BEQ = Mips::BEQ;
- }
- else {
- LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
- SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ } else {
+ LL = Mips::LLD;
+ SC = Mips::SCD;
ZERO = Mips::ZERO_64;
BNE = Mips::BNE64;
BEQ = Mips::BEQ64;
@@ -1194,7 +1190,7 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
BuildMI(BB, DL, TII->get(BEQ))
.addReg(Success).addReg(ZERO).addMBB(loop1MBB);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
@@ -1211,8 +1207,6 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1282,27 +1276,34 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ if (Subtarget->isLittle()) {
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ } else {
+ unsigned Off = RegInfo.createVirtualRegister(RC);
+ BuildMI(BB, DL, TII->get(Mips::XORi), Off)
+ .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
+ }
BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
.addReg(Mips::ZERO).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
- .addReg(ShiftAmt).addReg(MaskUpper);
+ .addReg(MaskUpper).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
.addReg(CmpVal).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
- .addReg(ShiftAmt).addReg(MaskedCmpVal);
+ .addReg(MaskedCmpVal).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
.addReg(NewVal).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
- .addReg(ShiftAmt).addReg(MaskedNewVal);
+ .addReg(MaskedNewVal).addReg(ShiftAmt);
// loop1MBB:
// ll oldval,0(alginedaddr)
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::BNE))
@@ -1318,7 +1319,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- BuildMI(BB, DL, TII->get(SC), Success)
+ BuildMI(BB, DL, TII->get(Mips::SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
@@ -1331,7 +1332,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
int64_t ShiftImm = (Size == 1) ? 24 : 16;
BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
- .addReg(ShiftAmt).addReg(MaskedOldVal0);
+ .addReg(MaskedOldVal0).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::SLL), SllRes)
.addReg(SrlRes).addImm(ShiftImm);
BuildMI(BB, DL, TII->get(Mips::SRA), Dest)
@@ -1349,7 +1350,7 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Table = Op.getOperand(1);
SDValue Index = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT PTy = getPointerTy();
unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(*getDataLayout());
@@ -1375,14 +1376,12 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::BRIND, DL, MVT::Other, Chain, Addr);
}
-SDValue MipsTargetLowering::
-lowerBRCOND(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
// The first operand is the chain, the second is the condition, the third is
// the block to branch to if the condition is true.
SDValue Chain = Op.getOperand(0);
SDValue Dest = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
@@ -1395,8 +1394,9 @@ lowerBRCOND(SDValue Op, SelectionDAG &DAG) const
(Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
SDValue BrCode = DAG.getConstant(Opc, MVT::i32);
+ SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
- Dest, CondRes);
+ FCC0, Dest, CondRes);
}
SDValue MipsTargetLowering::
@@ -1409,15 +1409,16 @@ lowerSELECT(SDValue Op, SelectionDAG &DAG) const
return Op;
return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
- Op.getDebugLoc());
+ SDLoc(Op));
}
SDValue MipsTargetLowering::
lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
{
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT Ty = Op.getOperand(0).getValueType();
- SDValue Cond = DAG.getNode(ISD::SETCC, DL, getSetCCResultType(Ty),
+ SDValue Cond = DAG.getNode(ISD::SETCC, DL,
+ getSetCCResultType(*DAG.getContext(), Ty),
Op.getOperand(0), Op.getOperand(1),
Op.getOperand(4));
@@ -1434,14 +1435,16 @@ SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue True = DAG.getConstant(1, MVT::i32);
SDValue False = DAG.getConstant(0, MVT::i32);
- return createCMovFP(DAG, Cond, True, False, Op.getDebugLoc());
+ return createCMovFP(DAG, Cond, True, False, SDLoc(Op));
}
SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
// FIXME there isn't actually debug info here
- DebugLoc DL = Op.getDebugLoc();
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ SDLoc DL(Op);
+ EVT Ty = Op.getValueType();
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ const GlobalValue *GV = N->getGlobal();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
const MipsTargetObjectFile &TLOF =
@@ -1458,26 +1461,31 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
}
// %hi/%lo relocation
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
}
if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
if (LargeGOT)
- return getAddrGlobalLargeGOT(Op, DAG, MipsII::MO_GOT_HI16,
- MipsII::MO_GOT_LO16);
+ return getAddrGlobalLargeGOT(N, Ty, DAG, MipsII::MO_GOT_HI16,
+ MipsII::MO_GOT_LO16, DAG.getEntryNode(),
+ MachinePointerInfo::getGOT());
- return getAddrGlobal(Op, DAG,
- HasMips64 ? MipsII::MO_GOT_DISP : MipsII::MO_GOT16);
+ return getAddrGlobal(N, Ty, DAG,
+ HasMips64 ? MipsII::MO_GOT_DISP : MipsII::MO_GOT16,
+ DAG.getEntryNode(), MachinePointerInfo::getGOT());
}
SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
+ BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
+ EVT Ty = Op.getValueType();
+
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
}
SDValue MipsTargetLowering::
@@ -1488,7 +1496,7 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
// Local Exec TLS Model.
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- DebugLoc DL = GA->getDebugLoc();
+ SDLoc DL(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy();
@@ -1564,10 +1572,13 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
SDValue MipsTargetLowering::
lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
{
+ JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
+ EVT Ty = Op.getValueType();
+
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
}
SDValue MipsTargetLowering::
@@ -1582,18 +1593,20 @@ lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
+ ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
+ EVT Ty = Op.getValueType();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
}
SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
getPointerTy());
@@ -1604,12 +1617,13 @@ SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo(SV), false, false, 0);
}
-static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
EVT TyX = Op.getOperand(0).getValueType();
EVT TyY = Op.getOperand(1).getValueType();
SDValue Const1 = DAG.getConstant(1, MVT::i32);
SDValue Const31 = DAG.getConstant(31, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Res;
// If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
@@ -1623,7 +1637,7 @@ static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
Const1);
- if (HasR2) {
+ if (HasExtractInsert) {
// ext E, Y, 31, 1 ; extract bit31 of Y
// ins X, E, 31, 1 ; insert extracted bit at bit31 of X
SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
@@ -1649,18 +1663,19 @@ static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
}
-static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
SDValue Const1 = DAG.getConstant(1, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Bitcast to integer nodes.
SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
- if (HasR2) {
+ if (HasExtractInsert) {
// ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
// ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
@@ -1700,14 +1715,15 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
SDValue
MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
if (Subtarget->hasMips64())
- return lowerFCOPYSIGN64(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFCOPYSIGN64(Op, DAG, Subtarget->hasExtractInsert());
- return lowerFCOPYSIGN32(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFCOPYSIGN32(Op, DAG, Subtarget->hasExtractInsert());
}
-static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
// to i32.
@@ -1717,7 +1733,7 @@ static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
Const1);
// Clear MSB.
- if (HasR2)
+ if (HasExtractInsert)
Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
DAG.getRegister(Mips::ZERO, MVT::i32),
DAG.getConstant(31, MVT::i32), Const1, X);
@@ -1734,15 +1750,16 @@ static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
}
-static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Bitcast to integer node.
SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
// Clear MSB.
- if (HasR2)
+ if (HasExtractInsert)
Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
DAG.getRegister(Mips::ZERO_64, MVT::i64),
DAG.getConstant(63, MVT::i32), Const1, X);
@@ -1757,9 +1774,9 @@ static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
SDValue
MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
if (Subtarget->hasMips64() && (Op.getValueType() == MVT::f64))
- return lowerFABS64(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFABS64(Op, DAG, Subtarget->hasExtractInsert());
- return lowerFABS32(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFABS32(Op, DAG, Subtarget->hasExtractInsert());
}
SDValue MipsTargetLowering::
@@ -1771,7 +1788,7 @@ lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
IsN64 ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
@@ -1791,7 +1808,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
// Return RA, which contains the return address. Mark it an implicit live-in.
unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
- return DAG.getCopyFromReg(DAG.getEntryNode(), Op.getDebugLoc(), Reg, VT);
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
}
// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
@@ -1807,7 +1824,7 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT Ty = IsN64 ? MVT::i64 : MVT::i32;
// Store stack offset in V1, store jump target in V0. Glue CopyToReg and
@@ -1827,14 +1844,14 @@ SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
// FIXME: Need pseudo-fence for 'singlethread' fences
// FIXME: Set SType for weaker fences where supported/appropriate.
unsigned SType = 0;
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
DAG.getConstant(SType, MVT::i32));
}
SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
@@ -1865,7 +1882,7 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
bool IsSRA) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
@@ -1909,7 +1926,7 @@ static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
SDValue Ptr = LD->getBasePtr();
EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
EVT BasePtrVT = Ptr.getValueType();
- DebugLoc DL = LD->getDebugLoc();
+ SDLoc DL(LD);
SDVTList VTList = DAG.getVTList(VT, MVT::Other);
if (Offset)
@@ -1975,7 +1992,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// (set tmp1, (lwr baseptr, tmp0))
// (set tmp2, (shl tmp1, 32))
// (set dst, (srl tmp2, 32))
- DebugLoc DL = LD->getDebugLoc();
+ SDLoc DL(LD);
SDValue Const32 = DAG.getConstant(32, MVT::i32);
SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
@@ -1987,7 +2004,7 @@ static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
SDValue Chain, unsigned Offset) {
SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
- DebugLoc DL = SD->getDebugLoc();
+ SDLoc DL(SD);
SDVTList VTList = DAG.getVTList(MVT::Other);
if (Offset)
@@ -2000,16 +2017,8 @@ static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
}
// Expand an unaligned 32 or 64-bit integer store node.
-SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- StoreSDNode *SD = cast<StoreSDNode>(Op);
- EVT MemVT = SD->getMemoryVT();
-
- // Return if store is aligned or if MemVT is neither i32 nor i64.
- if ((SD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
- ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
- return SDValue();
-
- bool IsLittle = Subtarget->isLittle();
+static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG,
+ bool IsLittle) {
SDValue Value = SD->getValue(), Chain = SD->getChain();
EVT VT = Value.getValueType();
@@ -2036,6 +2045,34 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
}
+// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
+static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG) {
+ SDValue Val = SD->getValue();
+
+ if (Val.getOpcode() != ISD::FP_TO_SINT)
+ return SDValue();
+
+ EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits());
+ SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
+ Val.getOperand(0));
+
+ return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
+ SD->getPointerInfo(), SD->isVolatile(),
+ SD->isNonTemporal(), SD->getAlignment());
+}
+
+SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode *SD = cast<StoreSDNode>(Op);
+ EVT MemVT = SD->getMemoryVT();
+
+ // Lower unaligned integer stores.
+ if ((SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
+ ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
+ return lowerUnalignedIntStore(SD, DAG, Subtarget->isLittle());
+
+ return lowerFP_TO_SINT_STORE(SD, DAG);
+}
+
SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
if (Op->getOperand(0).getOpcode() != ISD::FRAMEADDR
|| cast<ConstantSDNode>
@@ -2053,10 +2090,18 @@ SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
EVT ValTy = Op->getValueType(0);
int FI = MFI->CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
SDValue InArgsAddr = DAG.getFrameIndex(FI, ValTy);
- return DAG.getNode(ISD::ADD, Op->getDebugLoc(), ValTy, InArgsAddr,
+ return DAG.getNode(ISD::ADD, SDLoc(Op), ValTy, InArgsAddr,
DAG.getConstant(0, ValTy));
}
+SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
+ SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
+ Op.getOperand(0));
+ return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -2076,21 +2121,14 @@ SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
//===----------------------------------------------------------------------===//
-static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
- MVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
+static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+ CCState &State, const uint16_t *F64Regs) {
- static const unsigned IntRegsSize=4, FloatRegsSize=2;
+ static const unsigned IntRegsSize = 4, FloatRegsSize = 2;
- static const uint16_t IntRegs[] = {
- Mips::A0, Mips::A1, Mips::A2, Mips::A3
- };
- static const uint16_t F32Regs[] = {
- Mips::F12, Mips::F14
- };
- static const uint16_t F64Regs[] = {
- Mips::D6, Mips::D7
- };
+ static const uint16_t IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
+ static const uint16_t F32Regs[] = { Mips::F12, Mips::F14 };
// Do not process byval args here.
if (ArgFlags.isByVal())
@@ -2159,14 +2197,28 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
return false;
}
+static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ static const uint16_t F64Regs[] = { Mips::D6, Mips::D7 };
+
+ return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
+}
+
+static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ static const uint16_t F64Regs[] = { Mips::D12_64, Mips::D14_64 };
+
+ return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
+}
+
#include "MipsGenCallingConv.inc"
//===----------------------------------------------------------------------===//
// Call Calling Convention Implementation
//===----------------------------------------------------------------------===//
-static const unsigned O32IntRegsSize = 4;
-
// Return next O32 integer argument register.
static unsigned getNextIntArgReg(unsigned Reg) {
assert((Reg == Mips::A0) || (Reg == Mips::A2));
@@ -2175,7 +2227,7 @@ static unsigned getNextIntArgReg(unsigned Reg) {
SDValue
MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
- SDValue Chain, SDValue Arg, DebugLoc DL,
+ SDValue Chain, SDValue Arg, SDLoc DL,
bool IsTailCall, SelectionDAG &DAG) const {
if (!IsTailCall) {
SDValue PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
@@ -2229,6 +2281,15 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
+ if (Subtarget->inMips16HardFloat()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
+ llvm::StringRef Sym = G->getGlobal()->getName();
+ Function *F = G->getGlobal()->getParent()->getFunction(Sym);
+ if (F->hasFnAttribute("__Mips16RetHelper")) {
+ Mask = MipsRegisterInfo::getMips16RetHelperMask();
+ }
+ }
+ }
Ops.push_back(CLI.DAG.getRegisterMask(Mask));
if (InFlag.getNode())
@@ -2241,10 +2302,10 @@ SDValue
MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &DL = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
@@ -2254,16 +2315,20 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering();
+ MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC::SpecialCallingConvType SpecialCallingConv =
+ getSpecialCallingConv(Callee);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo,
+ SpecialCallingConv);
MipsCCInfo.analyzeCallOperands(Outs, IsVarArg,
- getTargetMachine().Options.UseSoftFloat,
+ Subtarget->mipsSEUsesSoftFloat(),
Callee.getNode(), CLI.Args);
// Get a count of how many bytes are to be pushed on the stack.
@@ -2286,7 +2351,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
if (!IsTailCall)
- Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal);
+ Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
SDValue StackPtr = DAG.getCopyFromReg(Chain, DL,
IsN64 ? Mips::SP_64 : Mips::SP,
@@ -2380,32 +2445,40 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool IsPICCall = (IsN64 || IsPIC); // true if calls are translated to jalr $25
bool GlobalOrExternal = false, InternalLinkage = false;
SDValue CalleeLo;
+ EVT Ty = Callee.getValueType();
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
if (IsPICCall) {
- InternalLinkage = G->getGlobal()->hasInternalLinkage();
+ const GlobalValue *Val = G->getGlobal();
+ InternalLinkage = Val->hasInternalLinkage();
if (InternalLinkage)
- Callee = getAddrLocal(Callee, DAG, HasMips64);
+ Callee = getAddrLocal(G, Ty, DAG, HasMips64);
else if (LargeGOT)
- Callee = getAddrGlobalLargeGOT(Callee, DAG, MipsII::MO_CALL_HI16,
- MipsII::MO_CALL_LO16);
+ Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16,
+ MipsII::MO_CALL_LO16, Chain,
+ FuncInfo->callPtrInfo(Val));
else
- Callee = getAddrGlobal(Callee, DAG, MipsII::MO_GOT_CALL);
+ Callee = getAddrGlobal(G, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ FuncInfo->callPtrInfo(Val));
} else
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0,
MipsII::MO_NO_FLAG);
GlobalOrExternal = true;
}
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const char *Sym = S->getSymbol();
+
if (!IsN64 && !IsPIC) // !N64 && static
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
+ Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(),
MipsII::MO_NO_FLAG);
else if (LargeGOT)
- Callee = getAddrGlobalLargeGOT(Callee, DAG, MipsII::MO_CALL_HI16,
- MipsII::MO_CALL_LO16);
+ Callee = getAddrGlobalLargeGOT(S, Ty, DAG, MipsII::MO_CALL_HI16,
+ MipsII::MO_CALL_LO16, Chain,
+ FuncInfo->callPtrInfo(Sym));
else // N64 || PIC
- Callee = getAddrGlobal(Callee, DAG, MipsII::MO_GOT_CALL);
+ Callee = getAddrGlobal(S, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ FuncInfo->callPtrInfo(Sym));
GlobalOrExternal = true;
}
@@ -2424,7 +2497,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, DL);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
@@ -2439,7 +2512,7 @@ SDValue
MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SDNode *CallNode,
const Type *RetTy) const {
@@ -2447,9 +2520,9 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), RVLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
- MipsCCInfo.analyzeCallResult(Ins, getTargetMachine().Options.UseSoftFloat,
+ MipsCCInfo.analyzeCallResult(Ins, Subtarget->mipsSEUsesSoftFloat(),
CallNode, RetTy);
// Copy all of the result registers out of their specified physreg.
@@ -2478,7 +2551,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -2494,10 +2567,10 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
Function::const_arg_iterator FuncArg =
DAG.getMachineFunction().getFunction()->arg_begin();
- bool UseSoftFloat = getTargetMachine().Options.UseSoftFloat;
+ bool UseSoftFloat = Subtarget->mipsSEUsesSoftFloat();
MipsCCInfo.analyzeFormalArguments(Ins, UseSoftFloat, FuncArg);
MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
@@ -2526,21 +2599,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Arguments stored on registers
if (IsRegLoc) {
- EVT RegVT = VA.getLocVT();
+ MVT RegVT = VA.getLocVT();
unsigned ArgReg = VA.getLocReg();
- const TargetRegisterClass *RC;
-
- if (RegVT == MVT::i32)
- RC = Subtarget->inMips16Mode()? &Mips::CPU16RegsRegClass :
- &Mips::CPURegsRegClass;
- else if (RegVT == MVT::i64)
- RC = &Mips::CPU64RegsRegClass;
- else if (RegVT == MVT::f32)
- RC = &Mips::FGR32RegClass;
- else if (RegVT == MVT::f64)
- RC = HasMips64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
- else
- llvm_unreachable("RegVT not supported by FormalArguments Lowering");
+ const TargetRegisterClass *RC = getRegClassFor(RegVT);
// Transform the arguments stored on
// physical registers into virtual ones
@@ -2590,9 +2651,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Create load nodes to retrieve arguments from the stack
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- InVals.push_back(DAG.getLoad(ValVT, DL, Chain, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, false, 0));
+ SDValue Load = DAG.getLoad(ValVT, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, false, 0);
+ InVals.push_back(Load);
+ OutChains.push_back(Load.getValue(1));
}
}
@@ -2644,7 +2707,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
// the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@@ -2653,10 +2716,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), RVLocs,
*DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
// Analyze return values.
- MipsCCInfo.analyzeReturn(Outs, getTargetMachine().Options.UseSoftFloat,
+ MipsCCInfo.analyzeReturn(Outs, Subtarget->mipsSEUsesSoftFloat(),
MF.getFunction()->getReturnType());
SDValue Flag;
@@ -2715,7 +2778,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
MipsTargetLowering::ConstraintType MipsTargetLowering::
getConstraintType(const std::string &Constraint) const
{
- // Mips specific constrainy
+ // Mips specific constraints
// GCC config/mips/constraints.md
//
// 'd' : An address register. Equivalent to r
@@ -2766,16 +2829,19 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
if (type->isIntegerTy())
weight = CW_Register;
break;
- case 'f':
- if (type->isFloatTy())
+ case 'f': // FPU or MSA register
+ if (Subtarget->hasMSA() && type->isVectorTy() &&
+ cast<VectorType>(type)->getBitWidth() == 128)
+ weight = CW_Register;
+ else if (type->isFloatTy())
weight = CW_Register;
break;
case 'c': // $25 for indirect jumps
case 'l': // lo register
case 'x': // hilo register pair
- if (type->isIntegerTy())
+ if (type->isIntegerTy())
weight = CW_SpecificReg;
- break;
+ break;
case 'I': // signed 16 bit immediate
case 'J': // integer zero
case 'K': // unsigned 16 bit immediate
@@ -2793,11 +2859,109 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
return weight;
}
+/// This is a helper function to parse a physical register string and split it
+/// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
+/// that is returned indicates whether parsing was successful. The second flag
+/// is true if the numeric part exists.
+static std::pair<bool, bool>
+parsePhysicalReg(const StringRef &C, std::string &Prefix,
+ unsigned long long &Reg) {
+ if (C.front() != '{' || C.back() != '}')
+ return std::make_pair(false, false);
+
+ // Search for the first numeric character.
+ StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
+ I = std::find_if(B, E, std::ptr_fun(isdigit));
+
+ Prefix.assign(B, I - B);
+
+ // The second flag is set to false if no numeric characters were found.
+ if (I == E)
+ return std::make_pair(true, false);
+
+ // Parse the numeric characters.
+ return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
+ true);
+}
+
+std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
+parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterClass *RC;
+ std::string Prefix;
+ unsigned long long Reg;
+
+ std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
+
+ if (!R.first)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+
+ if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
+ // No numeric characters follow "hi" or "lo".
+ if (R.second)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+
+ RC = TRI->getRegClass(Prefix == "hi" ?
+ Mips::HI32RegClassID : Mips::LO32RegClassID);
+ return std::make_pair(*(RC->begin()), RC);
+ } else if (Prefix.compare(0, 4, "$msa") == 0) {
+ // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
+
+ // No numeric characters follow the name.
+ if (R.second)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass *)0);
+
+ Reg = StringSwitch<unsigned long long>(Prefix)
+ .Case("$msair", Mips::MSAIR)
+ .Case("$msacsr", Mips::MSACSR)
+ .Case("$msaaccess", Mips::MSAAccess)
+ .Case("$msasave", Mips::MSASave)
+ .Case("$msamodify", Mips::MSAModify)
+ .Case("$msarequest", Mips::MSARequest)
+ .Case("$msamap", Mips::MSAMap)
+ .Case("$msaunmap", Mips::MSAUnmap)
+ .Default(0);
+
+ if (!Reg)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass *)0);
+
+ RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
+ return std::make_pair(Reg, RC);
+ }
+
+ if (!R.second)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+
+ if (Prefix == "$f") { // Parse $f0-$f31.
+ // If the size of FP registers is 64-bit or Reg is an even number, select
+ // the 64-bit register class. Otherwise, select the 32-bit register class.
+ if (VT == MVT::Other)
+ VT = (Subtarget->isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
+
+ RC = getRegClassFor(VT);
+
+ if (RC == &Mips::AFGR64RegClass) {
+ assert(Reg % 2 == 0);
+ Reg >>= 1;
+ }
+ } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
+ RC = TRI->getRegClass(Mips::FCCRegClassID);
+ else if (Prefix == "$w") { // Parse $w0-$w31.
+ RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
+ } else { // Parse $0-$31.
+ assert(Prefix == "$");
+ RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
+ }
+
+ assert(Reg < RC->getNumRegs());
+ return std::make_pair(*(RC->begin() + Reg), RC);
+}
+
/// Given a register class constraint, like 'r', if this corresponds directly
/// to an LLVM register class, return a register of 0 and the register class
/// pointer.
std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
+getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
{
if (Constraint.size() == 1) {
switch (Constraint[0]) {
@@ -2807,18 +2971,26 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
if (Subtarget->inMips16Mode())
return std::make_pair(0U, &Mips::CPU16RegsRegClass);
- return std::make_pair(0U, &Mips::CPURegsRegClass);
+ return std::make_pair(0U, &Mips::GPR32RegClass);
}
if (VT == MVT::i64 && !HasMips64)
- return std::make_pair(0U, &Mips::CPURegsRegClass);
+ return std::make_pair(0U, &Mips::GPR32RegClass);
if (VT == MVT::i64 && HasMips64)
- return std::make_pair(0U, &Mips::CPU64RegsRegClass);
+ return std::make_pair(0U, &Mips::GPR64RegClass);
// This will generate an error message
return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
- case 'f':
- if (VT == MVT::f32)
+ case 'f': // FPU or MSA register
+ if (VT == MVT::v16i8)
+ return std::make_pair(0U, &Mips::MSA128BRegClass);
+ else if (VT == MVT::v8i16 || VT == MVT::v8f16)
+ return std::make_pair(0U, &Mips::MSA128HRegClass);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return std::make_pair(0U, &Mips::MSA128WRegClass);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return std::make_pair(0U, &Mips::MSA128DRegClass);
+ else if (VT == MVT::f32)
return std::make_pair(0U, &Mips::FGR32RegClass);
- if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
+ else if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
if (Subtarget->isFP64bit())
return std::make_pair(0U, &Mips::FGR64RegClass);
return std::make_pair(0U, &Mips::AFGR64RegClass);
@@ -2826,19 +2998,26 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
break;
case 'c': // register suitable for indirect jump
if (VT == MVT::i32)
- return std::make_pair((unsigned)Mips::T9, &Mips::CPURegsRegClass);
+ return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
assert(VT == MVT::i64 && "Unexpected type.");
- return std::make_pair((unsigned)Mips::T9_64, &Mips::CPU64RegsRegClass);
+ return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
case 'l': // register suitable for indirect jump
if (VT == MVT::i32)
- return std::make_pair((unsigned)Mips::LO, &Mips::LORegsRegClass);
- return std::make_pair((unsigned)Mips::LO64, &Mips::LORegs64RegClass);
+ return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
+ return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
case 'x': // register suitable for indirect jump
// Fixme: Not triggering the use of both hi and low
// This will generate an error message
return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
}
}
+
+ std::pair<unsigned, const TargetRegisterClass *> R;
+ R = parseRegForInlineAsmConstraint(Constraint, VT);
+
+ if (R.second)
+ return R;
+
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
@@ -2937,8 +3116,8 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
-bool
-MipsTargetLowering::isLegalAddressingMode(const AddrMode &AM, Type *Ty) const {
+bool MipsTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
// No global is ever allowed as a base.
if (AM.BaseGV)
return false;
@@ -3002,13 +3181,13 @@ static bool isF128SoftLibCall(const char *CallSym) {
"log10l", "log2l", "logl", "nearbyintl", "powl", "rintl", "sinl", "sqrtl",
"truncl"};
- const char * const *End = LibCalls + array_lengthof(LibCalls);
+ const char *const *End = LibCalls + array_lengthof(LibCalls);
// Check that LibCalls is sorted alphabetically.
MipsTargetLowering::LTStr Comp;
#ifndef NDEBUG
- for (const char * const *I = LibCalls; I < End - 1; ++I)
+ for (const char *const *I = LibCalls; I < End - 1; ++I)
assert(Comp(*I, *(I + 1)));
#endif
@@ -3029,13 +3208,32 @@ static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
}
-MipsTargetLowering::MipsCC::MipsCC(CallingConv::ID CC, bool IsO32_,
- CCState &Info)
- : CCInfo(Info), CallConv(CC), IsO32(IsO32_) {
+MipsTargetLowering::MipsCC::SpecialCallingConvType
+ MipsTargetLowering::getSpecialCallingConv(SDValue Callee) const {
+ MipsCC::SpecialCallingConvType SpecialCallingConv =
+ MipsCC::NoSpecialCallingConv;;
+ if (Subtarget->inMips16HardFloat()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ llvm::StringRef Sym = G->getGlobal()->getName();
+ Function *F = G->getGlobal()->getParent()->getFunction(Sym);
+ if (F->hasFnAttribute("__Mips16RetHelper")) {
+ SpecialCallingConv = MipsCC::Mips16RetHelperConv;
+ }
+ }
+ }
+ return SpecialCallingConv;
+}
+
+MipsTargetLowering::MipsCC::MipsCC(
+ CallingConv::ID CC, bool IsO32_, bool IsFP64_, CCState &Info,
+ MipsCC::SpecialCallingConvType SpecialCallingConv_)
+ : CCInfo(Info), CallConv(CC), IsO32(IsO32_), IsFP64(IsFP64_),
+ SpecialCallingConv(SpecialCallingConv_){
// Pre-allocate reserved argument area.
CCInfo.AllocateStack(reservedArgArea(), 1);
}
+
void MipsTargetLowering::MipsCC::
analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Args,
bool IsVarArg, bool IsSoftFloat, const SDNode *CallNode,
@@ -3143,11 +3341,10 @@ analyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsSoftFloat,
analyzeReturn(Outs, IsSoftFloat, 0, RetTy);
}
-void
-MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
- MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags) {
+void MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
+ MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags) {
assert(ArgFlags.getByValSize() && "Byval argument's size shouldn't be 0.");
struct ByValArgInfo ByVal;
@@ -3183,11 +3380,13 @@ llvm::CCAssignFn *MipsTargetLowering::MipsCC::fixedArgFn() const {
if (CallConv == CallingConv::Fast)
return CC_Mips_FastCC;
- return IsO32 ? CC_MipsO32 : CC_MipsN;
+ if (SpecialCallingConv == Mips16RetHelperConv)
+ return CC_Mips16RetHelper;
+ return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN;
}
llvm::CCAssignFn *MipsTargetLowering::MipsCC::varArgFn() const {
- return IsO32 ? CC_MipsO32 : CC_MipsN_VarArg;
+ return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN_VarArg;
}
const uint16_t *MipsTargetLowering::MipsCC::shadowRegs() const {
@@ -3233,7 +3432,7 @@ MVT MipsTargetLowering::MipsCC::getRegVT(MVT VT, const Type *OrigTy,
}
void MipsTargetLowering::
-copyByValRegs(SDValue Chain, DebugLoc DL, std::vector<SDValue> &OutChains,
+copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
const MipsCC &CC, const ByValArgInfo &ByVal) const {
@@ -3277,9 +3476,9 @@ copyByValRegs(SDValue Chain, DebugLoc DL, std::vector<SDValue> &OutChains,
// Copy byVal arg to registers and stack.
void MipsTargetLowering::
-passByValArg(SDValue Chain, DebugLoc DL,
+passByValArg(SDValue Chain, SDLoc DL,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const {
@@ -3365,17 +3564,15 @@ passByValArg(SDValue Chain, DebugLoc DL,
DAG.getConstant(Offset, PtrTy));
SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
DAG.getIntPtrConstant(ByVal.Address));
- Chain = DAG.getMemcpy(Chain, DL, Dst, Src,
- DAG.getConstant(MemCpySize, PtrTy), Alignment,
- /*isVolatile=*/false, /*AlwaysInline=*/false,
+ Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy),
+ Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false,
MachinePointerInfo(0), MachinePointerInfo(0));
MemOpChains.push_back(Chain);
}
-void
-MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
- const MipsCC &CC, SDValue Chain,
- DebugLoc DL, SelectionDAG &DAG) const {
+void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
+ const MipsCC &CC, SDValue Chain,
+ SDLoc DL, SelectionDAG &DAG) const {
unsigned NumRegs = CC.numIntArgRegs();
const uint16_t *ArgRegs = CC.intArgRegs();
const CCState &CCInfo = CC.getCCInfo();
@@ -3393,8 +3590,7 @@ MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
if (NumRegs == Idx)
VaArgOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), RegSize);
else
- VaArgOffset =
- (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx));
+ VaArgOffset = (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx));
// Record the frame index of the first variable argument
// which is a value necessary to VASTART.
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 5587e8f58147..65f68f04315d 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -17,6 +17,7 @@
#include "Mips.h"
#include "MipsSubtarget.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/Function.h"
@@ -60,8 +61,8 @@ namespace llvm {
CMovFP_T,
CMovFP_F,
- // Floating Point Rounding
- FPRound,
+ // FP-to-int truncation node.
+ TruncIntFP,
// Return
Ret,
@@ -69,10 +70,11 @@ namespace llvm {
EH_RETURN,
// Node used to extract integer from accumulator.
- ExtractLOHI,
+ MFHI,
+ MFLO,
// Node used to insert integers to accumulator.
- InsertLOHI,
+ MTLOHI,
// Mult nodes.
Mult,
@@ -152,6 +154,43 @@ namespace llvm {
SETCC_DSP,
SELECT_CC_DSP,
+ // Vector comparisons.
+ // These take a vector and return a boolean.
+ VALL_ZERO,
+ VANY_ZERO,
+ VALL_NONZERO,
+ VANY_NONZERO,
+
+ // These take a vector and return a vector bitmask.
+ VCEQ,
+ VCLE_S,
+ VCLE_U,
+ VCLT_S,
+ VCLT_U,
+
+ // Element-wise vector max/min.
+ VSMAX,
+ VSMIN,
+ VUMAX,
+ VUMIN,
+
+ // Vector Shuffle with mask as an operand
+ VSHF, // Generic shuffle
+ SHF, // 4-element set shuffle.
+ ILVEV, // Interleave even elements
+ ILVOD, // Interleave odd elements
+ ILVL, // Interleave left elements
+ ILVR, // Interleave right elements
+ PCKEV, // Pack even elements
+ PCKOD, // Pack odd elements
+
+ // Combined (XOR (OR $a, $b), -1)
+ VNOR,
+
+ // Extended vector element extraction
+ VEXTRACT_SEXT_ELT,
+ VEXTRACT_ZEXT_ELT,
+
// Load/Store Left/Right nodes.
LWL = ISD::FIRST_TARGET_MEMORY_OPCODE,
LWR,
@@ -195,7 +234,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - get the ISD::SETCC result ValueType
- EVT getSetCCResultType(EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
@@ -211,12 +250,72 @@ namespace llvm {
protected:
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const;
- SDValue getAddrLocal(SDValue Op, SelectionDAG &DAG, bool HasMips64) const;
-
- SDValue getAddrGlobal(SDValue Op, SelectionDAG &DAG, unsigned Flag) const;
-
- SDValue getAddrGlobalLargeGOT(SDValue Op, SelectionDAG &DAG,
- unsigned HiFlag, unsigned LoFlag) const;
+ // This method creates the following nodes, which are necessary for
+ // computing a local symbol's address:
+ //
+ // (add (load (wrapper $gp, %got(sym)), %lo(sym))
+ template<class NodeTy>
+ SDValue getAddrLocal(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ bool HasMips64) const {
+ SDLoc DL(N);
+ unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
+ SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
+ getTargetNode(N, Ty, DAG, GOTFlag));
+ SDValue Load = DAG.getLoad(Ty, DL, DAG.getEntryNode(), GOT,
+ MachinePointerInfo::getGOT(), false, false,
+ false, 0);
+ unsigned LoFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
+ SDValue Lo = DAG.getNode(MipsISD::Lo, DL, Ty,
+ getTargetNode(N, Ty, DAG, LoFlag));
+ return DAG.getNode(ISD::ADD, DL, Ty, Load, Lo);
+ }
+
+ // This method creates the following nodes, which are necessary for
+ // computing a global symbol's address:
+ //
+ // (load (wrapper $gp, %got(sym)))
+ template<class NodeTy>
+ SDValue getAddrGlobal(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag, SDValue Chain,
+ const MachinePointerInfo &PtrInfo) const {
+ SDLoc DL(N);
+ SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
+ getTargetNode(N, Ty, DAG, Flag));
+ return DAG.getLoad(Ty, DL, Chain, Tgt, PtrInfo, false, false, false, 0);
+ }
+
+ // This method creates the following nodes, which are necessary for
+ // computing a global symbol's address in large-GOT mode:
+ //
+ // (load (wrapper (add %hi(sym), $gp), %lo(sym)))
+ template<class NodeTy>
+ SDValue getAddrGlobalLargeGOT(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ unsigned HiFlag, unsigned LoFlag,
+ SDValue Chain,
+ const MachinePointerInfo &PtrInfo) const {
+ SDLoc DL(N);
+ SDValue Hi = DAG.getNode(MipsISD::Hi, DL, Ty,
+ getTargetNode(N, Ty, DAG, HiFlag));
+ Hi = DAG.getNode(ISD::ADD, DL, Ty, Hi, getGlobalReg(DAG, Ty));
+ SDValue Wrapper = DAG.getNode(MipsISD::Wrapper, DL, Ty, Hi,
+ getTargetNode(N, Ty, DAG, LoFlag));
+ return DAG.getLoad(Ty, DL, Chain, Wrapper, PtrInfo, false, false, false,
+ 0);
+ }
+
+ // This method creates the following nodes, which are necessary for
+ // computing a symbol's address in non-PIC mode:
+ //
+ // (add %hi(sym), %lo(sym))
+ template<class NodeTy>
+ SDValue getAddrNonPIC(NodeTy *N, EVT Ty, SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ SDValue Hi = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_HI);
+ SDValue Lo = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_LO);
+ return DAG.getNode(ISD::ADD, DL, Ty,
+ DAG.getNode(MipsISD::Hi, DL, Ty, Hi),
+ DAG.getNode(MipsISD::Lo, DL, Ty, Lo));
+ }
/// This function fills Ops, which is the list of operands that will later
/// be used when a function call node is created. It also generates
@@ -240,7 +339,13 @@ namespace llvm {
/// arguments and inquire about calling convention information.
class MipsCC {
public:
- MipsCC(CallingConv::ID CallConv, bool IsO32, CCState &Info);
+ enum SpecialCallingConvType {
+ Mips16RetHelperConv, NoSpecialCallingConv
+ };
+
+ MipsCC(CallingConv::ID CallConv, bool IsO32, bool IsFP64, CCState &Info,
+ SpecialCallingConvType SpecialCallingConv = NoSpecialCallingConv);
+
void analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
bool IsVarArg, bool IsSoftFloat,
@@ -275,7 +380,7 @@ namespace llvm {
/// Return pointer to array of integer argument registers.
const uint16_t *intArgRegs() const;
- typedef SmallVector<ByValArgInfo, 2>::const_iterator byval_iterator;
+ typedef SmallVectorImpl<ByValArgInfo>::const_iterator byval_iterator;
byval_iterator byval_begin() const { return ByValArgs.begin(); }
byval_iterator byval_end() const { return ByValArgs.end(); }
@@ -312,9 +417,13 @@ namespace llvm {
CCState &CCInfo;
CallingConv::ID CallConv;
- bool IsO32;
+ bool IsO32, IsFP64;
+ SpecialCallingConvType SpecialCallingConv;
SmallVector<ByValArgInfo, 2> ByValArgs;
};
+ protected:
+ SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
// Subtarget Info
const MipsSubtarget *Subtarget;
@@ -322,11 +431,32 @@ namespace llvm {
bool HasMips64, IsN64, IsO32;
private:
+ // Create a TargetGlobalAddress node.
+ SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetExternalSymbol node.
+ SDValue getTargetNode(ExternalSymbolSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetBlockAddress node.
+ SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetJumpTable node.
+ SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetConstantPool node.
+ SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ MipsCC::SpecialCallingConvType getSpecialCallingConv(SDValue Callee) const;
// Lower Operand helpers
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SDNode *CallNode, const Type *RetTy) const;
@@ -351,9 +481,8 @@ namespace llvm {
SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG& DAG) const;
SDValue lowerShiftRightParts(SDValue Op, SelectionDAG& DAG,
bool IsSRA) const;
- SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerADD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
/// isEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization.
@@ -365,7 +494,7 @@ namespace llvm {
/// copyByValArg - Copy argument registers which were used to pass a byval
/// argument to the stack. Create a stack frame object for the byval
/// argument.
- void copyByValRegs(SDValue Chain, DebugLoc DL,
+ void copyByValRegs(SDValue Chain, SDLoc DL,
std::vector<SDValue> &OutChains, SelectionDAG &DAG,
const ISD::ArgFlagsTy &Flags,
SmallVectorImpl<SDValue> &InVals,
@@ -373,9 +502,9 @@ namespace llvm {
const MipsCC &CC, const ByValArgInfo &ByVal) const;
/// passByValArg - Pass a byval argument in registers or on stack.
- void passByValArg(SDValue Chain, DebugLoc DL,
+ void passByValArg(SDValue Chain, SDLoc DL,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const;
@@ -384,17 +513,17 @@ namespace llvm {
/// to the stack. Also create a stack frame object for the first variable
/// argument.
void writeVarArgRegs(std::vector<SDValue> &OutChains, const MipsCC &CC,
- SDValue Chain, DebugLoc DL, SelectionDAG &DAG) const;
+ SDValue Chain, SDLoc DL, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain,
- SDValue Arg, DebugLoc DL, bool IsTailCall,
+ SDValue Arg, SDLoc DL, bool IsTailCall,
SelectionDAG &DAG) const;
virtual SDValue
@@ -412,7 +541,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
// Inline asm support
ConstraintType getConstraintType(const std::string &Constraint) const;
@@ -422,9 +551,14 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
+ /// This function parses registers that appear in inline-asm constraints.
+ /// It returns pair (0, 0) on failure.
+ std::pair<unsigned, const TargetRegisterClass *>
+ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td
index 6b23057c9cdb..9f7ce9aa72b0 100644
--- a/lib/Target/Mips/MipsInstrFPU.td
+++ b/lib/Target/Mips/MipsInstrFPU.td
@@ -24,12 +24,14 @@
//===----------------------------------------------------------------------===//
// Floating Point Compare and Branch
-def SDT_MipsFPBrcond : SDTypeProfile<0, 2, [SDTCisInt<0>,
- SDTCisVT<1, OtherVT>]>;
+def SDT_MipsFPBrcond : SDTypeProfile<0, 3, [SDTCisInt<0>,
+ SDTCisVT<1, i32>,
+ SDTCisVT<2, OtherVT>]>;
def SDT_MipsFPCmp : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>, SDTCisFP<1>,
SDTCisVT<2, i32>]>;
-def SDT_MipsCMovFP : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
- SDTCisSameAs<1, 2>]>;
+def SDT_MipsCMovFP : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisVT<2, i32>,
+ SDTCisSameAs<1, 3>]>;
+def SDT_MipsTruncIntFP : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
def SDT_MipsBuildPairF64 : SDTypeProfile<1, 2, [SDTCisVT<0, f64>,
SDTCisVT<1, i32>,
SDTCisSameAs<1, 2>]>;
@@ -42,6 +44,7 @@ def MipsCMovFP_T : SDNode<"MipsISD::CMovFP_T", SDT_MipsCMovFP, [SDNPInGlue]>;
def MipsCMovFP_F : SDNode<"MipsISD::CMovFP_F", SDT_MipsCMovFP, [SDNPInGlue]>;
def MipsFPBrcond : SDNode<"MipsISD::FPBrcond", SDT_MipsFPBrcond,
[SDNPHasChain, SDNPOptInGlue]>;
+def MipsTruncIntFP : SDNode<"MipsISD::TruncIntFP", SDT_MipsTruncIntFP>;
def MipsBuildPairF64 : SDNode<"MipsISD::BuildPairF64", SDT_MipsBuildPairF64>;
def MipsExtractElementF64 : SDNode<"MipsISD::ExtractElementF64",
SDT_MipsExtractElementF64>;
@@ -86,7 +89,7 @@ def fpimm0neg : PatLeaf<(fpimm), [{
// Only S32 and D32 are supported right now.
//===----------------------------------------------------------------------===//
-class ADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, bit IsComm,
+class ADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, bit IsComm,
SDPatternOperator OpNode= null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fs, $ft"),
@@ -96,15 +99,15 @@ class ADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, bit IsComm,
multiclass ADDS_M<string opstr, InstrItinClass Itin, bit IsComm,
SDPatternOperator OpNode = null_frag> {
- def _D32 : ADDS_FT<opstr, AFGR64, Itin, IsComm, OpNode>,
+ def _D32 : ADDS_FT<opstr, AFGR64Opnd, Itin, IsComm, OpNode>,
Requires<[NotFP64bit, HasStdEnc]>;
- def _D64 : ADDS_FT<opstr, FGR64, Itin, IsComm, OpNode>,
+ def _D64 : ADDS_FT<opstr, FGR64Opnd, Itin, IsComm, OpNode>,
Requires<[IsFP64bit, HasStdEnc]> {
string DecoderNamespace = "Mips64";
}
}
-class ABSS_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
+class ABSS_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$fd), (ins SrcRC:$fs), !strconcat(opstr, "\t$fd, $fs"),
[(set DstRC:$fd, (OpNode SrcRC:$fs))], Itin, FrmFR>,
@@ -112,95 +115,87 @@ class ABSS_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
multiclass ABSS_M<string opstr, InstrItinClass Itin,
SDPatternOperator OpNode= null_frag> {
- def _D32 : ABSS_FT<opstr, AFGR64, AFGR64, Itin, OpNode>,
+ def _D32 : ABSS_FT<opstr, AFGR64Opnd, AFGR64Opnd, Itin, OpNode>,
Requires<[NotFP64bit, HasStdEnc]>;
- def _D64 : ABSS_FT<opstr, FGR64, FGR64, Itin, OpNode>,
+ def _D64 : ABSS_FT<opstr, FGR64Opnd, FGR64Opnd, Itin, OpNode>,
Requires<[IsFP64bit, HasStdEnc]> {
string DecoderNamespace = "Mips64";
}
}
multiclass ROUND_M<string opstr, InstrItinClass Itin> {
- def _D32 : ABSS_FT<opstr, FGR32, AFGR64, Itin>,
+ def _D32 : ABSS_FT<opstr, FGR32Opnd, AFGR64Opnd, Itin>,
Requires<[NotFP64bit, HasStdEnc]>;
- def _D64 : ABSS_FT<opstr, FGR32, FGR64, Itin>,
+ def _D64 : ABSS_FT<opstr, FGR32Opnd, FGR64Opnd, Itin>,
Requires<[IsFP64bit, HasStdEnc]> {
let DecoderNamespace = "Mips64";
}
}
-class MFC1_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
+class MFC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$rt), (ins SrcRC:$fs), !strconcat(opstr, "\t$rt, $fs"),
[(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR>;
-class MTC1_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
+class MTC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$fs), (ins SrcRC:$rt), !strconcat(opstr, "\t$rt, $fs"),
[(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>;
-class MFC1_FT_CCR<string opstr, RegisterClass DstRC, RegisterOperand SrcRC,
- InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs DstRC:$rt), (ins SrcRC:$fs), !strconcat(opstr, "\t$rt, $fs"),
- [(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR>;
-
-class MTC1_FT_CCR<string opstr, RegisterOperand DstRC, RegisterClass SrcRC,
- InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs DstRC:$fs), (ins SrcRC:$rt), !strconcat(opstr, "\t$rt, $fs"),
- [(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>;
-
-class LW_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
- Operand MemOpnd, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class LW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
+ SDPatternOperator OpNode= null_frag> :
+ InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI> {
let DecoderMethod = "DecodeFMem";
+ let mayLoad = 1;
}
-class SW_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
- Operand MemOpnd, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class SW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
+ SDPatternOperator OpNode= null_frag> :
+ InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI> {
let DecoderMethod = "DecodeFMem";
+ let mayStore = 1;
}
-class MADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class MADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
[(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))], Itin, FrmFR>;
-class NMADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class NMADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
[(set RC:$fd, (fsub fpimm0, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr)))],
Itin, FrmFR>;
-class LWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC,
+class LWXC1_FT<string opstr, RegisterOperand DRC,
InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
- InstSE<(outs DRC:$fd), (ins PRC:$base, PRC:$index),
+ InstSE<(outs DRC:$fd), (ins PtrRC:$base, PtrRC:$index),
!strconcat(opstr, "\t$fd, ${index}(${base})"),
- [(set DRC:$fd, (OpNode (add PRC:$base, PRC:$index)))], Itin, FrmFI> {
+ [(set DRC:$fd, (OpNode (add iPTR:$base, iPTR:$index)))], Itin, FrmFI> {
let AddedComplexity = 20;
}
-class SWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC,
+class SWXC1_FT<string opstr, RegisterOperand DRC,
InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
- InstSE<(outs), (ins DRC:$fs, PRC:$base, PRC:$index),
+ InstSE<(outs), (ins DRC:$fs, PtrRC:$base, PtrRC:$index),
!strconcat(opstr, "\t$fs, ${index}(${base})"),
- [(OpNode DRC:$fs, (add PRC:$base, PRC:$index))], Itin, FrmFI> {
+ [(OpNode DRC:$fs, (add iPTR:$base, iPTR:$index))], Itin, FrmFI> {
let AddedComplexity = 20;
}
class BC1F_FT<string opstr, InstrItinClass Itin,
SDPatternOperator Op = null_frag> :
- InstSE<(outs), (ins brtarget:$offset), !strconcat(opstr, "\t$offset"),
- [(MipsFPBrcond Op, bb:$offset)], Itin, FrmFI> {
+ InstSE<(outs), (ins FCCRegsOpnd:$fcc, brtarget:$offset),
+ !strconcat(opstr, "\t$fcc, $offset"),
+ [(MipsFPBrcond Op, FCCRegsOpnd:$fcc, bb:$offset)], Itin, FrmFI> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
let Defs = [AT];
- let Uses = [FCR31];
}
class CEQS_FT<string typestr, RegisterClass RC, InstrItinClass Itin,
@@ -208,17 +203,53 @@ class CEQS_FT<string typestr, RegisterClass RC, InstrItinClass Itin,
InstSE<(outs), (ins RC:$fs, RC:$ft, condcode:$cond),
!strconcat("c.$cond.", typestr, "\t$fs, $ft"),
[(OpNode RC:$fs, RC:$ft, imm:$cond)], Itin, FrmFR> {
- let Defs = [FCR31];
-}
+ let Defs = [FCC0];
+ let isCodeGenOnly = 1;
+}
+
+class C_COND_FT<string CondStr, string Typestr, RegisterOperand RC> :
+ InstSE<(outs), (ins RC:$fs, RC:$ft),
+ !strconcat("c.", CondStr, ".", Typestr, "\t$fs, $ft"), [], IIFcmp,
+ FrmFR>;
+
+multiclass C_COND_M<string TypeStr, RegisterOperand RC, bits<5> fmt> {
+ def C_F_#NAME : C_COND_FT<"f", TypeStr, RC>, C_COND_FM<fmt, 0>;
+ def C_UN_#NAME : C_COND_FT<"un", TypeStr, RC>, C_COND_FM<fmt, 1>;
+ def C_EQ_#NAME : C_COND_FT<"eq", TypeStr, RC>, C_COND_FM<fmt, 2>;
+ def C_UEQ_#NAME : C_COND_FT<"ueq", TypeStr, RC>, C_COND_FM<fmt, 3>;
+ def C_OLT_#NAME : C_COND_FT<"olt", TypeStr, RC>, C_COND_FM<fmt, 4>;
+ def C_ULT_#NAME : C_COND_FT<"ult", TypeStr, RC>, C_COND_FM<fmt, 5>;
+ def C_OLE_#NAME : C_COND_FT<"ole", TypeStr, RC>, C_COND_FM<fmt, 6>;
+ def C_ULE_#NAME : C_COND_FT<"ule", TypeStr, RC>, C_COND_FM<fmt, 7>;
+ def C_SF_#NAME : C_COND_FT<"sf", TypeStr, RC>, C_COND_FM<fmt, 8>;
+ def C_NGLE_#NAME : C_COND_FT<"ngle", TypeStr, RC>, C_COND_FM<fmt, 9>;
+ def C_SEQ_#NAME : C_COND_FT<"seq", TypeStr, RC>, C_COND_FM<fmt, 10>;
+ def C_NGL_#NAME : C_COND_FT<"ngl", TypeStr, RC>, C_COND_FM<fmt, 11>;
+ def C_LT_#NAME : C_COND_FT<"lt", TypeStr, RC>, C_COND_FM<fmt, 12>;
+ def C_NGE_#NAME : C_COND_FT<"nge", TypeStr, RC>, C_COND_FM<fmt, 13>;
+ def C_LE_#NAME : C_COND_FT<"le", TypeStr, RC>, C_COND_FM<fmt, 14>;
+ def C_NGT_#NAME : C_COND_FT<"ngt", TypeStr, RC>, C_COND_FM<fmt, 15>;
+}
+
+defm S : C_COND_M<"s", FGR32Opnd, 16>;
+defm D32 : C_COND_M<"d", AFGR64Opnd, 17>,
+ Requires<[NotFP64bit, HasStdEnc]>;
+let DecoderNamespace = "Mips64" in
+defm D64 : C_COND_M<"d", FGR64Opnd, 17>, Requires<[IsFP64bit, HasStdEnc]>;
//===----------------------------------------------------------------------===//
// Floating Point Instructions
//===----------------------------------------------------------------------===//
-def ROUND_W_S : ABSS_FT<"round.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xc, 16>;
-def TRUNC_W_S : ABSS_FT<"trunc.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xd, 16>;
-def CEIL_W_S : ABSS_FT<"ceil.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xe, 16>;
-def FLOOR_W_S : ABSS_FT<"floor.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xf, 16>;
-def CVT_W_S : ABSS_FT<"cvt.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0x24, 16>;
+def ROUND_W_S : ABSS_FT<"round.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xc, 16>;
+def TRUNC_W_S : ABSS_FT<"trunc.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xd, 16>;
+def CEIL_W_S : ABSS_FT<"ceil.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xe, 16>;
+def FLOOR_W_S : ABSS_FT<"floor.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xf, 16>;
+def CVT_W_S : ABSS_FT<"cvt.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x24, 16>;
defm ROUND_W : ROUND_M<"round.w.d", IIFcvt>, ABSS_FM<0xc, 17>;
defm TRUNC_W : ROUND_M<"trunc.w.d", IIFcvt>, ABSS_FM<0xd, 17>;
@@ -227,46 +258,72 @@ defm FLOOR_W : ROUND_M<"floor.w.d", IIFcvt>, ABSS_FM<0xf, 17>;
defm CVT_W : ROUND_M<"cvt.w.d", IIFcvt>, ABSS_FM<0x24, 17>;
let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def ROUND_L_S : ABSS_FT<"round.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x8, 16>;
- def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64, FGR64, IIFcvt>,
+ def ROUND_L_S : ABSS_FT<"round.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x8, 16>;
+ def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
ABSS_FM<0x8, 17>;
- def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x9, 16>;
- def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64, FGR64, IIFcvt>,
+ def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x9, 16>;
+ def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
ABSS_FM<0x9, 17>;
- def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0xa, 16>;
- def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64, FGR64, IIFcvt>, ABSS_FM<0xa, 17>;
- def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0xb, 16>;
- def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64, FGR64, IIFcvt>,
+ def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xa, 16>;
+ def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0xa, 17>;
+ def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xb, 16>;
+ def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
ABSS_FM<0xb, 17>;
}
-def CVT_S_W : ABSS_FT<"cvt.s.w", FGR32, FGR32, IIFcvt>, ABSS_FM<0x20, 20>;
-def CVT_L_S : ABSS_FT<"cvt.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x25, 16>;
-def CVT_L_D64: ABSS_FT<"cvt.l.d", FGR64, FGR64, IIFcvt>, ABSS_FM<0x25, 17>;
+def CVT_S_W : ABSS_FT<"cvt.s.w", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x20, 20>;
+def CVT_L_S : ABSS_FT<"cvt.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x25, 16>;
+def CVT_L_D64: ABSS_FT<"cvt.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x25, 17>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def CVT_S_D32 : ABSS_FT<"cvt.s.d", FGR32, AFGR64, IIFcvt>, ABSS_FM<0x20, 17>;
- def CVT_D32_W : ABSS_FT<"cvt.d.w", AFGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 20>;
- def CVT_D32_S : ABSS_FT<"cvt.d.s", AFGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 16>;
+ def CVT_S_D32 : ABSS_FT<"cvt.s.d", FGR32Opnd, AFGR64Opnd, IIFcvt>,
+ ABSS_FM<0x20, 17>;
+ def CVT_D32_W : ABSS_FT<"cvt.d.w", AFGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 20>;
+ def CVT_D32_S : ABSS_FT<"cvt.d.s", AFGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 16>;
}
let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32, FGR64, IIFcvt>, ABSS_FM<0x20, 17>;
- def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32, FGR64, IIFcvt>, ABSS_FM<0x20, 21>;
- def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 20>;
- def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 16>;
- def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64, FGR64, IIFcvt>, ABSS_FM<0x21, 21>;
+ def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x20, 17>;
+ def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x20, 21>;
+ def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 20>;
+ def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 16>;
+ def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x21, 21>;
+}
+
+let isPseudo = 1, isCodeGenOnly = 1 in {
+ def PseudoCVT_S_W : ABSS_FT<"", FGR32Opnd, GPR32Opnd, IIFcvt>;
+ def PseudoCVT_D32_W : ABSS_FT<"", AFGR64Opnd, GPR32Opnd, IIFcvt>;
+ def PseudoCVT_S_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, IIFcvt>;
+ def PseudoCVT_D64_W : ABSS_FT<"", FGR64Opnd, GPR32Opnd, IIFcvt>;
+ def PseudoCVT_D64_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, IIFcvt>;
}
let Predicates = [NoNaNsFPMath, HasStdEnc] in {
- def FABS_S : ABSS_FT<"abs.s", FGR32, FGR32, IIFcvt, fabs>, ABSS_FM<0x5, 16>;
- def FNEG_S : ABSS_FT<"neg.s", FGR32, FGR32, IIFcvt, fneg>, ABSS_FM<0x7, 16>;
+ def FABS_S : ABSS_FT<"abs.s", FGR32Opnd, FGR32Opnd, IIFcvt, fabs>,
+ ABSS_FM<0x5, 16>;
+ def FNEG_S : ABSS_FT<"neg.s", FGR32Opnd, FGR32Opnd, IIFcvt, fneg>,
+ ABSS_FM<0x7, 16>;
defm FABS : ABSS_M<"abs.d", IIFcvt, fabs>, ABSS_FM<0x5, 17>;
defm FNEG : ABSS_M<"neg.d", IIFcvt, fneg>, ABSS_FM<0x7, 17>;
}
-def FSQRT_S : ABSS_FT<"sqrt.s", FGR32, FGR32, IIFsqrtSingle, fsqrt>,
- ABSS_FM<0x4, 16>;
+def FSQRT_S : ABSS_FT<"sqrt.s", FGR32Opnd, FGR32Opnd, IIFsqrtSingle,
+ fsqrt>, ABSS_FM<0x4, 16>;
defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>;
// The odd-numbered registers are only referenced when doing loads,
@@ -275,130 +332,136 @@ defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>;
// regardless of register aliasing.
/// Move Control Registers From/To CPU Registers
-def CFC1 : MFC1_FT_CCR<"cfc1", CPURegs, CCROpnd, IIFmove>, MFC1_FM<2>;
-def CTC1 : MTC1_FT_CCR<"ctc1", CCROpnd, CPURegs, IIFmove>, MFC1_FM<6>;
-def MFC1 : MFC1_FT<"mfc1", CPURegs, FGR32, IIFmove, bitconvert>, MFC1_FM<0>;
-def MTC1 : MTC1_FT<"mtc1", FGR32, CPURegs, IIFmove, bitconvert>, MFC1_FM<4>;
-def DMFC1 : MFC1_FT<"dmfc1", CPU64Regs, FGR64, IIFmove, bitconvert>, MFC1_FM<1>;
-def DMTC1 : MTC1_FT<"dmtc1", FGR64, CPU64Regs, IIFmove, bitconvert>, MFC1_FM<5>;
-
-def FMOV_S : ABSS_FT<"mov.s", FGR32, FGR32, IIFmove>, ABSS_FM<0x6, 16>;
-def FMOV_D32 : ABSS_FT<"mov.d", AFGR64, AFGR64, IIFmove>, ABSS_FM<0x6, 17>,
- Requires<[NotFP64bit, HasStdEnc]>;
-def FMOV_D64 : ABSS_FT<"mov.d", FGR64, FGR64, IIFmove>, ABSS_FM<0x6, 17>,
- Requires<[IsFP64bit, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
+def CFC1 : MFC1_FT<"cfc1", GPR32Opnd, CCROpnd, IIFmove>, MFC1_FM<2>;
+def CTC1 : MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, IIFmove>, MFC1_FM<6>;
+def MFC1 : MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, IIFmoveC1, bitconvert>,
+ MFC1_FM<0>;
+def MTC1 : MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, IIFmoveC1, bitconvert>,
+ MFC1_FM<4>;
+def MFHC1 : MFC1_FT<"mfhc1", GPR32Opnd, FGRH32Opnd, IIFmoveC1>,
+ MFC1_FM<3>;
+def MTHC1 : MTC1_FT<"mthc1", FGRH32Opnd, GPR32Opnd, IIFmoveC1>,
+ MFC1_FM<7>;
+def DMFC1 : MFC1_FT<"dmfc1", GPR64Opnd, FGR64Opnd, IIFmoveC1,
+ bitconvert>, MFC1_FM<1>;
+def DMTC1 : MTC1_FT<"dmtc1", FGR64Opnd, GPR64Opnd, IIFmoveC1,
+ bitconvert>, MFC1_FM<5>;
+
+def FMOV_S : ABSS_FT<"mov.s", FGR32Opnd, FGR32Opnd, IIFmove>,
+ ABSS_FM<0x6, 16>;
+def FMOV_D32 : ABSS_FT<"mov.d", AFGR64Opnd, AFGR64Opnd, IIFmove>,
+ ABSS_FM<0x6, 17>, Requires<[NotFP64bit, HasStdEnc]>;
+def FMOV_D64 : ABSS_FT<"mov.d", FGR64Opnd, FGR64Opnd, IIFmove>,
+ ABSS_FM<0x6, 17>, Requires<[IsFP64bit, HasStdEnc]> {
+ let DecoderNamespace = "Mips64";
}
/// Floating Point Memory Instructions
-let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in {
- def LWC1_P8 : LW_FT<"lwc1", FGR32, IILoad, mem64, load>, LW_FM<0x31>;
- def SWC1_P8 : SW_FT<"swc1", FGR32, IIStore, mem64, store>, LW_FM<0x39>;
- def LDC164_P8 : LW_FT<"ldc1", FGR64, IILoad, mem64, load>, LW_FM<0x35> {
- let isCodeGenOnly =1;
- }
- def SDC164_P8 : SW_FT<"sdc1", FGR64, IIStore, mem64, store>, LW_FM<0x3d> {
- let isCodeGenOnly =1;
- }
+let Predicates = [HasStdEnc] in {
+ def LWC1 : LW_FT<"lwc1", FGR32Opnd, IIFLoad, load>, LW_FM<0x31>;
+ def SWC1 : SW_FT<"swc1", FGR32Opnd, IIFStore, store>, LW_FM<0x39>;
}
-let Predicates = [NotN64, HasStdEnc] in {
- def LWC1 : LW_FT<"lwc1", FGR32, IILoad, mem, load>, LW_FM<0x31>;
- def SWC1 : SW_FT<"swc1", FGR32, IIStore, mem, store>, LW_FM<0x39>;
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
+ def LDC164 : LW_FT<"ldc1", FGR64Opnd, IIFLoad, load>, LW_FM<0x35>;
+ def SDC164 : SW_FT<"sdc1", FGR64Opnd, IIFStore, store>, LW_FM<0x3d>;
}
-let Predicates = [NotN64, HasMips64, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def LDC164 : LW_FT<"ldc1", FGR64, IILoad, mem, load>, LW_FM<0x35>;
- def SDC164 : SW_FT<"sdc1", FGR64, IIStore, mem, store>, LW_FM<0x3d>;
+let Predicates = [NotFP64bit, HasStdEnc] in {
+ def LDC1 : LW_FT<"ldc1", AFGR64Opnd, IIFLoad, load>, LW_FM<0x35>;
+ def SDC1 : SW_FT<"sdc1", AFGR64Opnd, IIFStore, store>, LW_FM<0x3d>;
}
-let Predicates = [NotN64, NotMips64, HasStdEnc] in {
- def LDC1 : LW_FT<"ldc1", AFGR64, IILoad, mem, load>, LW_FM<0x35>;
- def SDC1 : SW_FT<"sdc1", AFGR64, IIStore, mem, store>, LW_FM<0x3d>;
+/// Cop2 Memory Instructions
+let Predicates = [HasStdEnc] in {
+ def LWC2 : LW_FT<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>;
+ def SWC2 : SW_FT<"swc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3a>;
+ def LDC2 : LW_FT<"ldc2", COP2Opnd, NoItinerary, load>, LW_FM<0x36>;
+ def SDC2 : SW_FT<"sdc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3e>;
}
// Indexed loads and stores.
let Predicates = [HasFPIdx, HasStdEnc] in {
- def LWXC1 : LWXC1_FT<"lwxc1", FGR32, CPURegs, IILoad, load>, LWXC1_FM<0>;
- def SWXC1 : SWXC1_FT<"swxc1", FGR32, CPURegs, IIStore, store>, SWXC1_FM<8>;
-}
-
-let Predicates = [HasMips32r2, NotMips64, HasStdEnc] in {
- def LDXC1 : LWXC1_FT<"ldxc1", AFGR64, CPURegs, IILoad, load>, LWXC1_FM<1>;
- def SDXC1 : SWXC1_FT<"sdxc1", AFGR64, CPURegs, IIStore, store>, SWXC1_FM<9>;
+ def LWXC1 : LWXC1_FT<"lwxc1", FGR32Opnd, IIFLoad, load>, LWXC1_FM<0>;
+ def SWXC1 : SWXC1_FT<"swxc1", FGR32Opnd, IIFStore, store>, SWXC1_FM<8>;
}
-let Predicates = [HasMips64, NotN64, HasStdEnc], DecoderNamespace="Mips64" in {
- def LDXC164 : LWXC1_FT<"ldxc1", FGR64, CPURegs, IILoad, load>, LWXC1_FM<1>;
- def SDXC164 : SWXC1_FT<"sdxc1", FGR64, CPURegs, IIStore, store>, SWXC1_FM<9>;
+let Predicates = [HasFPIdx, NotFP64bit, HasStdEnc] in {
+ def LDXC1 : LWXC1_FT<"ldxc1", AFGR64Opnd, IIFLoad, load>, LWXC1_FM<1>;
+ def SDXC1 : SWXC1_FT<"sdxc1", AFGR64Opnd, IIFStore, store>, SWXC1_FM<9>;
}
-// n64
-let Predicates = [IsN64, HasStdEnc], isCodeGenOnly=1 in {
- def LWXC1_P8 : LWXC1_FT<"lwxc1", FGR32, CPU64Regs, IILoad, load>, LWXC1_FM<0>;
- def LDXC164_P8 : LWXC1_FT<"ldxc1", FGR64, CPU64Regs, IILoad, load>,
- LWXC1_FM<1>;
- def SWXC1_P8 : SWXC1_FT<"swxc1", FGR32, CPU64Regs, IIStore, store>,
- SWXC1_FM<8>;
- def SDXC164_P8 : SWXC1_FT<"sdxc1", FGR64, CPU64Regs, IIStore, store>,
- SWXC1_FM<9>;
+let Predicates = [HasFPIdx, IsFP64bit, HasStdEnc],
+ DecoderNamespace="Mips64" in {
+ def LDXC164 : LWXC1_FT<"ldxc1", FGR64Opnd, IIFLoad, load>, LWXC1_FM<1>;
+ def SDXC164 : SWXC1_FT<"sdxc1", FGR64Opnd, IIFStore, store>, SWXC1_FM<9>;
}
// Load/store doubleword indexed unaligned.
-let Predicates = [NotMips64, HasStdEnc] in {
- def LUXC1 : LWXC1_FT<"luxc1", AFGR64, CPURegs, IILoad>, LWXC1_FM<0x5>;
- def SUXC1 : SWXC1_FT<"suxc1", AFGR64, CPURegs, IIStore>, SWXC1_FM<0xd>;
+let Predicates = [NotFP64bit, HasStdEnc] in {
+ def LUXC1 : LWXC1_FT<"luxc1", AFGR64Opnd, IIFLoad>, LWXC1_FM<0x5>;
+ def SUXC1 : SWXC1_FT<"suxc1", AFGR64Opnd, IIFStore>, SWXC1_FM<0xd>;
}
-let Predicates = [HasMips64, HasStdEnc],
- DecoderNamespace="Mips64" in {
- def LUXC164 : LWXC1_FT<"luxc1", FGR64, CPURegs, IILoad>, LWXC1_FM<0x5>;
- def SUXC164 : SWXC1_FT<"suxc1", FGR64, CPURegs, IIStore>, SWXC1_FM<0xd>;
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace="Mips64" in {
+ def LUXC164 : LWXC1_FT<"luxc1", FGR64Opnd, IIFLoad>, LWXC1_FM<0x5>;
+ def SUXC164 : SWXC1_FT<"suxc1", FGR64Opnd, IIFStore>, SWXC1_FM<0xd>;
}
/// Floating-point Aritmetic
-def FADD_S : ADDS_FT<"add.s", FGR32, IIFadd, 1, fadd>, ADDS_FM<0x00, 16>;
-defm FADD : ADDS_M<"add.d", IIFadd, 1, fadd>, ADDS_FM<0x00, 17>;
-def FDIV_S : ADDS_FT<"div.s", FGR32, IIFdivSingle, 0, fdiv>, ADDS_FM<0x03, 16>;
-defm FDIV : ADDS_M<"div.d", IIFdivDouble, 0, fdiv>, ADDS_FM<0x03, 17>;
-def FMUL_S : ADDS_FT<"mul.s", FGR32, IIFmulSingle, 1, fmul>, ADDS_FM<0x02, 16>;
-defm FMUL : ADDS_M<"mul.d", IIFmulDouble, 1, fmul>, ADDS_FM<0x02, 17>;
-def FSUB_S : ADDS_FT<"sub.s", FGR32, IIFadd, 0, fsub>, ADDS_FM<0x01, 16>;
-defm FSUB : ADDS_M<"sub.d", IIFadd, 0, fsub>, ADDS_FM<0x01, 17>;
+def FADD_S : ADDS_FT<"add.s", FGR32Opnd, IIFadd, 1, fadd>,
+ ADDS_FM<0x00, 16>;
+defm FADD : ADDS_M<"add.d", IIFadd, 1, fadd>, ADDS_FM<0x00, 17>;
+def FDIV_S : ADDS_FT<"div.s", FGR32Opnd, IIFdivSingle, 0, fdiv>,
+ ADDS_FM<0x03, 16>;
+defm FDIV : ADDS_M<"div.d", IIFdivDouble, 0, fdiv>, ADDS_FM<0x03, 17>;
+def FMUL_S : ADDS_FT<"mul.s", FGR32Opnd, IIFmulSingle, 1, fmul>,
+ ADDS_FM<0x02, 16>;
+defm FMUL : ADDS_M<"mul.d", IIFmulDouble, 1, fmul>, ADDS_FM<0x02, 17>;
+def FSUB_S : ADDS_FT<"sub.s", FGR32Opnd, IIFadd, 0, fsub>,
+ ADDS_FM<0x01, 16>;
+defm FSUB : ADDS_M<"sub.d", IIFadd, 0, fsub>, ADDS_FM<0x01, 17>;
let Predicates = [HasMips32r2, HasStdEnc] in {
- def MADD_S : MADDS_FT<"madd.s", FGR32, IIFmulSingle, fadd>, MADDS_FM<4, 0>;
- def MSUB_S : MADDS_FT<"msub.s", FGR32, IIFmulSingle, fsub>, MADDS_FM<5, 0>;
+ def MADD_S : MADDS_FT<"madd.s", FGR32Opnd, IIFmulSingle, fadd>,
+ MADDS_FM<4, 0>;
+ def MSUB_S : MADDS_FT<"msub.s", FGR32Opnd, IIFmulSingle, fsub>,
+ MADDS_FM<5, 0>;
}
let Predicates = [HasMips32r2, NoNaNsFPMath, HasStdEnc] in {
- def NMADD_S : NMADDS_FT<"nmadd.s", FGR32, IIFmulSingle, fadd>, MADDS_FM<6, 0>;
- def NMSUB_S : NMADDS_FT<"nmsub.s", FGR32, IIFmulSingle, fsub>, MADDS_FM<7, 0>;
+ def NMADD_S : NMADDS_FT<"nmadd.s", FGR32Opnd, IIFmulSingle, fadd>,
+ MADDS_FM<6, 0>;
+ def NMSUB_S : NMADDS_FT<"nmsub.s", FGR32Opnd, IIFmulSingle, fsub>,
+ MADDS_FM<7, 0>;
}
let Predicates = [HasMips32r2, NotFP64bit, HasStdEnc] in {
- def MADD_D32 : MADDS_FT<"madd.d", AFGR64, IIFmulDouble, fadd>, MADDS_FM<4, 1>;
- def MSUB_D32 : MADDS_FT<"msub.d", AFGR64, IIFmulDouble, fsub>, MADDS_FM<5, 1>;
+ def MADD_D32 : MADDS_FT<"madd.d", AFGR64Opnd, IIFmulDouble, fadd>,
+ MADDS_FM<4, 1>;
+ def MSUB_D32 : MADDS_FT<"msub.d", AFGR64Opnd, IIFmulDouble, fsub>,
+ MADDS_FM<5, 1>;
}
let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath, HasStdEnc] in {
- def NMADD_D32 : NMADDS_FT<"nmadd.d", AFGR64, IIFmulDouble, fadd>,
+ def NMADD_D32 : NMADDS_FT<"nmadd.d", AFGR64Opnd, IIFmulDouble, fadd>,
MADDS_FM<6, 1>;
- def NMSUB_D32 : NMADDS_FT<"nmsub.d", AFGR64, IIFmulDouble, fsub>,
+ def NMSUB_D32 : NMADDS_FT<"nmsub.d", AFGR64Opnd, IIFmulDouble, fsub>,
MADDS_FM<7, 1>;
}
let Predicates = [HasMips32r2, IsFP64bit, HasStdEnc], isCodeGenOnly=1 in {
- def MADD_D64 : MADDS_FT<"madd.d", FGR64, IIFmulDouble, fadd>, MADDS_FM<4, 1>;
- def MSUB_D64 : MADDS_FT<"msub.d", FGR64, IIFmulDouble, fsub>, MADDS_FM<5, 1>;
+ def MADD_D64 : MADDS_FT<"madd.d", FGR64Opnd, IIFmulDouble, fadd>,
+ MADDS_FM<4, 1>;
+ def MSUB_D64 : MADDS_FT<"msub.d", FGR64Opnd, IIFmulDouble, fsub>,
+ MADDS_FM<5, 1>;
}
let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc],
isCodeGenOnly=1 in {
- def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64, IIFmulDouble, fadd>,
+ def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64Opnd, IIFmulDouble, fadd>,
MADDS_FM<6, 1>;
- def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64, IIFmulDouble, fsub>,
+ def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64Opnd, IIFmulDouble, fsub>,
MADDS_FM<7, 1>;
}
@@ -410,10 +473,9 @@ let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc],
def MIPS_BRANCH_F : PatLeaf<(i32 0)>;
def MIPS_BRANCH_T : PatLeaf<(i32 1)>;
-let DecoderMethod = "DecodeBC1" in {
def BC1F : BC1F_FT<"bc1f", IIBranch, MIPS_BRANCH_F>, BC1F_FM<0, 0>;
def BC1T : BC1F_FT<"bc1t", IIBranch, MIPS_BRANCH_T>, BC1F_FM<0, 1>;
-}
+
//===----------------------------------------------------------------------===//
// Floating Point Flag Conditions
//===----------------------------------------------------------------------===//
@@ -447,22 +509,36 @@ def FCMP_D64 : CEQS_FT<"d", FGR64, IIFcmp, MipsFPCmp>, CEQS_FM<17>,
//===----------------------------------------------------------------------===//
// Floating Point Pseudo-Instructions
//===----------------------------------------------------------------------===//
-def MOVCCRToCCR : PseudoSE<(outs CCR:$dst), (ins CCROpnd:$src), []>;
// This pseudo instr gets expanded into 2 mtc1 instrs after register
// allocation.
-def BuildPairF64 :
- PseudoSE<(outs AFGR64:$dst),
- (ins CPURegs:$lo, CPURegs:$hi),
- [(set AFGR64:$dst, (MipsBuildPairF64 CPURegs:$lo, CPURegs:$hi))]>;
+class BuildPairF64Base<RegisterOperand RO> :
+ PseudoSE<(outs RO:$dst), (ins GPR32Opnd:$lo, GPR32Opnd:$hi),
+ [(set RO:$dst, (MipsBuildPairF64 GPR32Opnd:$lo, GPR32Opnd:$hi))]>;
+
+def BuildPairF64 : BuildPairF64Base<AFGR64Opnd>,
+ Requires<[NotFP64bit, HasStdEnc]>;
+def BuildPairF64_64 : BuildPairF64Base<FGR64Opnd>,
+ Requires<[IsFP64bit, HasStdEnc]>;
// This pseudo instr gets expanded into 2 mfc1 instrs after register
// allocation.
// if n is 0, lower part of src is extracted.
// if n is 1, higher part of src is extracted.
-def ExtractElementF64 :
- PseudoSE<(outs CPURegs:$dst), (ins AFGR64:$src, i32imm:$n),
- [(set CPURegs:$dst, (MipsExtractElementF64 AFGR64:$src, imm:$n))]>;
+class ExtractElementF64Base<RegisterOperand RO> :
+ PseudoSE<(outs GPR32Opnd:$dst), (ins RO:$src, i32imm:$n),
+ [(set GPR32Opnd:$dst, (MipsExtractElementF64 RO:$src, imm:$n))]>;
+
+def ExtractElementF64 : ExtractElementF64Base<AFGR64Opnd>,
+ Requires<[NotFP64bit, HasStdEnc]>;
+def ExtractElementF64_64 : ExtractElementF64Base<FGR64Opnd>,
+ Requires<[IsFP64bit, HasStdEnc]>;
+
+//===----------------------------------------------------------------------===//
+// InstAliases.
+//===----------------------------------------------------------------------===//
+def : InstAlias<"bc1t $offset", (BC1T FCC0, brtarget:$offset)>;
+def : InstAlias<"bc1f $offset", (BC1F FCC0, brtarget:$offset)>;
//===----------------------------------------------------------------------===//
// Floating Point Patterns
@@ -470,59 +546,59 @@ def ExtractElementF64 :
def : MipsPat<(f32 fpimm0), (MTC1 ZERO)>;
def : MipsPat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>;
-def : MipsPat<(f32 (sint_to_fp CPURegs:$src)), (CVT_S_W (MTC1 CPURegs:$src))>;
-def : MipsPat<(i32 (fp_to_sint FGR32:$src)), (MFC1 (TRUNC_W_S FGR32:$src))>;
+def : MipsPat<(f32 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_S_W GPR32Opnd:$src)>;
+def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
+ (TRUNC_W_S FGR32Opnd:$src)>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def : MipsPat<(f64 (sint_to_fp CPURegs:$src)),
- (CVT_D32_W (MTC1 CPURegs:$src))>;
- def : MipsPat<(i32 (fp_to_sint AFGR64:$src)),
- (MFC1 (TRUNC_W_D32 AFGR64:$src))>;
- def : MipsPat<(f32 (fround AFGR64:$src)), (CVT_S_D32 AFGR64:$src)>;
- def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D32_S FGR32:$src)>;
+ def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_D32_W GPR32Opnd:$src)>;
+ def : MipsPat<(MipsTruncIntFP AFGR64Opnd:$src),
+ (TRUNC_W_D32 AFGR64Opnd:$src)>;
+ def : MipsPat<(f32 (fround AFGR64Opnd:$src)),
+ (CVT_S_D32 AFGR64Opnd:$src)>;
+ def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
+ (CVT_D32_S FGR32Opnd:$src)>;
}
let Predicates = [IsFP64bit, HasStdEnc] in {
def : MipsPat<(f64 fpimm0), (DMTC1 ZERO_64)>;
def : MipsPat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>;
- def : MipsPat<(f64 (sint_to_fp CPURegs:$src)),
- (CVT_D64_W (MTC1 CPURegs:$src))>;
- def : MipsPat<(f32 (sint_to_fp CPU64Regs:$src)),
- (CVT_S_L (DMTC1 CPU64Regs:$src))>;
- def : MipsPat<(f64 (sint_to_fp CPU64Regs:$src)),
- (CVT_D64_L (DMTC1 CPU64Regs:$src))>;
+ def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_D64_W GPR32Opnd:$src)>;
+ def : MipsPat<(f32 (sint_to_fp GPR64Opnd:$src)),
+ (EXTRACT_SUBREG (PseudoCVT_S_L GPR64Opnd:$src), sub_lo)>;
+ def : MipsPat<(f64 (sint_to_fp GPR64Opnd:$src)),
+ (PseudoCVT_D64_L GPR64Opnd:$src)>;
- def : MipsPat<(i32 (fp_to_sint FGR64:$src)),
- (MFC1 (TRUNC_W_D64 FGR64:$src))>;
- def : MipsPat<(i64 (fp_to_sint FGR32:$src)), (DMFC1 (TRUNC_L_S FGR32:$src))>;
- def : MipsPat<(i64 (fp_to_sint FGR64:$src)),
- (DMFC1 (TRUNC_L_D64 FGR64:$src))>;
+ def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
+ (TRUNC_W_D64 FGR64Opnd:$src)>;
+ def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
+ (TRUNC_L_S FGR32Opnd:$src)>;
+ def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
+ (TRUNC_L_D64 FGR64Opnd:$src)>;
- def : MipsPat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>;
- def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>;
+ def : MipsPat<(f32 (fround FGR64Opnd:$src)),
+ (CVT_S_D64 FGR64Opnd:$src)>;
+ def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
+ (CVT_D64_S FGR32Opnd:$src)>;
}
// Patterns for loads/stores with a reg+imm operand.
let AddedComplexity = 40 in {
- let Predicates = [IsN64, HasStdEnc] in {
- def : LoadRegImmPat<LWC1_P8, f32, load>;
- def : StoreRegImmPat<SWC1_P8, f32>;
- def : LoadRegImmPat<LDC164_P8, f64, load>;
- def : StoreRegImmPat<SDC164_P8, f64>;
- }
-
- let Predicates = [NotN64, HasStdEnc] in {
+ let Predicates = [HasStdEnc] in {
def : LoadRegImmPat<LWC1, f32, load>;
def : StoreRegImmPat<SWC1, f32>;
}
- let Predicates = [NotN64, HasMips64, HasStdEnc] in {
+ let Predicates = [IsFP64bit, HasStdEnc] in {
def : LoadRegImmPat<LDC164, f64, load>;
def : StoreRegImmPat<SDC164, f64>;
}
- let Predicates = [NotN64, NotMips64, HasStdEnc] in {
+ let Predicates = [NotFP64bit, HasStdEnc] in {
def : LoadRegImmPat<LDC1, f64, load>;
def : StoreRegImmPat<SDC1, f64>;
}
diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td
index ea0737222125..737a018c67af 100644
--- a/lib/Target/Mips/MipsInstrFormats.td
+++ b/lib/Target/Mips/MipsInstrFormats.td
@@ -183,7 +183,7 @@ class BranchBase<bits<6> op, dag outs, dag ins, string asmstr,
// Format J instruction class in Mips : <|opcode|address|>
//===----------------------------------------------------------------------===//
-class FJ<bits<6> op>
+class FJ<bits<6> op> : StdArch
{
bits<26> target;
@@ -272,7 +272,7 @@ class SRLV_FM<bits<6> funct, bit rotate> : StdArch {
let Inst{5-0} = funct;
}
-class BEQ_FM<bits<6> op> {
+class BEQ_FM<bits<6> op> : StdArch {
bits<5> rs;
bits<5> rt;
bits<16> offset;
@@ -285,7 +285,7 @@ class BEQ_FM<bits<6> op> {
let Inst{15-0} = offset;
}
-class BGEZ_FM<bits<6> op, bits<5> funct> {
+class BGEZ_FM<bits<6> op, bits<5> funct> : StdArch {
bits<5> rs;
bits<16> offset;
@@ -297,17 +297,6 @@ class BGEZ_FM<bits<6> op, bits<5> funct> {
let Inst{15-0} = offset;
}
-class B_FM {
- bits<16> offset;
-
- bits<32> Inst;
-
- let Inst{31-26} = 4;
- let Inst{25-21} = 0;
- let Inst{20-16} = 0;
- let Inst{15-0} = offset;
-}
-
class SLTI_FM<bits<6> op> : StdArch {
bits<5> rt;
bits<5> rs;
@@ -321,7 +310,7 @@ class SLTI_FM<bits<6> op> : StdArch {
let Inst{15-0} = imm16;
}
-class MFLO_FM<bits<6> funct> {
+class MFLO_FM<bits<6> funct> : StdArch {
bits<5> rd;
bits<32> Inst;
@@ -333,7 +322,7 @@ class MFLO_FM<bits<6> funct> {
let Inst{5-0} = funct;
}
-class MTLO_FM<bits<6> funct> {
+class MTLO_FM<bits<6> funct> : StdArch {
bits<5> rs;
bits<32> Inst;
@@ -344,7 +333,7 @@ class MTLO_FM<bits<6> funct> {
let Inst{5-0} = funct;
}
-class SEB_FM<bits<5> funct, bits<6> funct2> {
+class SEB_FM<bits<5> funct, bits<6> funct2> : StdArch {
bits<5> rd;
bits<5> rt;
@@ -358,7 +347,7 @@ class SEB_FM<bits<5> funct, bits<6> funct2> {
let Inst{5-0} = funct2;
}
-class CLO_FM<bits<6> funct> {
+class CLO_FM<bits<6> funct> : StdArch {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -374,7 +363,7 @@ class CLO_FM<bits<6> funct> {
let rt = rd;
}
-class LUI_FM {
+class LUI_FM : StdArch {
bits<5> rt;
bits<16> imm16;
@@ -386,7 +375,7 @@ class LUI_FM {
let Inst{15-0} = imm16;
}
-class JALR_FM {
+class JALR_FM : StdArch {
bits<5> rd;
bits<5> rs;
@@ -400,18 +389,7 @@ class JALR_FM {
let Inst{5-0} = 9;
}
-class BAL_FM {
- bits<16> offset;
-
- bits<32> Inst;
-
- let Inst{31-26} = 1;
- let Inst{25-21} = 0;
- let Inst{20-16} = 0x11;
- let Inst{15-0} = offset;
-}
-
-class BGEZAL_FM<bits<5> funct> {
+class BGEZAL_FM<bits<5> funct> : StdArch {
bits<5> rs;
bits<16> offset;
@@ -446,7 +424,7 @@ class MULT_FM<bits<6> op, bits<6> funct> : StdArch {
let Inst{5-0} = funct;
}
-class EXT_FM<bits<6> funct> {
+class EXT_FM<bits<6> funct> : StdArch {
bits<5> rt;
bits<5> rs;
bits<5> pos;
@@ -476,6 +454,90 @@ class RDHWR_FM {
let Inst{5-0} = 0x3b;
}
+class TEQ_FM<bits<6> funct> : StdArch {
+ bits<5> rs;
+ bits<5> rt;
+ bits<10> code_;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-6} = code_;
+ let Inst{5-0} = funct;
+}
+
+class TEQI_FM<bits<5> funct> : StdArch {
+ bits<5> rs;
+ bits<16> imm16;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 1;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = funct;
+ let Inst{15-0} = imm16;
+}
+//===----------------------------------------------------------------------===//
+// System calls format <op|code_|funct>
+//===----------------------------------------------------------------------===//
+
+class SYS_FM<bits<6> funct>
+{
+ bits<20> code_;
+ bits<32> Inst;
+ let Inst{31-26} = 0x0;
+ let Inst{25-6} = code_;
+ let Inst{5-0} = funct;
+}
+
+//===----------------------------------------------------------------------===//
+// Break instruction format <op|code_1|funct>
+//===----------------------------------------------------------------------===//
+
+class BRK_FM<bits<6> funct>
+{
+ bits<10> code_1;
+ bits<10> code_2;
+ bits<32> Inst;
+ let Inst{31-26} = 0x0;
+ let Inst{25-16} = code_1;
+ let Inst{15-6} = code_2;
+ let Inst{5-0} = funct;
+}
+
+//===----------------------------------------------------------------------===//
+// Exception return format <Cop0|1|0|funct>
+//===----------------------------------------------------------------------===//
+
+class ER_FM<bits<6> funct>
+{
+ bits<32> Inst;
+ let Inst{31-26} = 0x10;
+ let Inst{25} = 1;
+ let Inst{24-6} = 0;
+ let Inst{5-0} = funct;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Enable/disable interrupt instruction format <Cop0|MFMC0|rt|12|0|sc|0|0>
+//===----------------------------------------------------------------------===//
+
+class EI_FM<bits<1> sc>
+{
+ bits<32> Inst;
+ bits<5> rt;
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = 0xb;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = 0xc;
+ let Inst{10-6} = 0;
+ let Inst{5} = sc;
+ let Inst{4-0} = 0;
+}
+
//===----------------------------------------------------------------------===//
//
// FLOATING POINT INSTRUCTION FORMATS
@@ -609,13 +671,14 @@ class SWXC1_FM<bits<6> funct> {
}
class BC1F_FM<bit nd, bit tf> {
+ bits<3> fcc;
bits<16> offset;
bits<32> Inst;
let Inst{31-26} = 0x11;
let Inst{25-21} = 0x8;
- let Inst{20-18} = 0; // cc
+ let Inst{20-18} = fcc;
let Inst{17} = nd;
let Inst{16} = tf;
let Inst{15-0} = offset;
@@ -637,6 +700,10 @@ class CEQS_FM<bits<5> fmt> {
let Inst{3-0} = cond;
}
+class C_COND_FM<bits<5> fmt, bits<4> c> : CEQS_FM<fmt> {
+ let cond = c;
+}
+
class CMov_I_F_FM<bits<6> funct, bits<5> fmt> {
bits<5> fd;
bits<5> fs;
@@ -652,15 +719,16 @@ class CMov_I_F_FM<bits<6> funct, bits<5> fmt> {
let Inst{5-0} = funct;
}
-class CMov_F_I_FM<bit tf> {
+class CMov_F_I_FM<bit tf> : StdArch {
bits<5> rd;
bits<5> rs;
+ bits<3> fcc;
bits<32> Inst;
let Inst{31-26} = 0;
let Inst{25-21} = rs;
- let Inst{20-18} = 0; // cc
+ let Inst{20-18} = fcc;
let Inst{17} = 0;
let Inst{16} = tf;
let Inst{15-11} = rd;
@@ -671,12 +739,13 @@ class CMov_F_I_FM<bit tf> {
class CMov_F_F_FM<bits<5> fmt, bit tf> {
bits<5> fd;
bits<5> fs;
+ bits<3> fcc;
bits<32> Inst;
let Inst{31-26} = 0x11;
let Inst{25-21} = fmt;
- let Inst{20-18} = 0; // cc
+ let Inst{20-18} = fcc;
let Inst{17} = 0;
let Inst{16} = tf;
let Inst{15-11} = fs;
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index ad92d41209e9..0ebad0584757 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -22,11 +22,14 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "MipsGenInstrInfo.inc"
using namespace llvm;
+// Pin the vtable to this file.
+void MipsInstrInfo::anchor() {}
+
MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm, unsigned UncondBr)
: MipsGenInstrInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
TM(tm), UncondBrOpc(UncondBr) {}
@@ -61,15 +64,6 @@ MachineMemOperand *MipsInstrInfo::GetMemOperand(MachineBasicBlock &MBB, int FI,
MFI.getObjectSize(FI), Align);
}
-MachineInstr*
-MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
- uint64_t Offset, const MDNode *MDPtr,
- DebugLoc DL) const {
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(Mips::DBG_VALUE))
- .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
//===----------------------------------------------------------------------===//
// Branch Analysis
//===----------------------------------------------------------------------===//
@@ -77,7 +71,7 @@ MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
void MipsInstrInfo::AnalyzeCondBr(const MachineInstr *Inst, unsigned Opc,
MachineBasicBlock *&BB,
SmallVectorImpl<MachineOperand> &Cond) const {
- assert(GetAnalyzableBrOpc(Opc) && "Not an analyzable branch");
+ assert(getAnalyzableBrOpc(Opc) && "Not an analyzable branch");
int NumOp = Inst->getNumExplicitOperands();
// for both int and fp branches, the last explicit operand is the
@@ -167,7 +161,7 @@ RemoveBranch(MachineBasicBlock &MBB) const
// Up to 2 branches are removed.
// Note that indirect branches are not removed.
for(removed = 0; I != REnd && removed < 2; ++I, ++removed)
- if (!GetAnalyzableBrOpc(I->getOpcode()))
+ if (!getAnalyzableBrOpc(I->getOpcode()))
break;
MBB.erase(I.base(), FirstBr.base());
@@ -182,7 +176,7 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
{
assert( (Cond.size() && Cond.size() <= 3) &&
"Invalid Mips branch condition!");
- Cond[0].setImm(GetOppositeBranchOpc(Cond[0].getImm()));
+ Cond[0].setImm(getOppositeBranchOpc(Cond[0].getImm()));
return false;
}
@@ -210,7 +204,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
BranchInstrs.push_back(LastInst);
// Not an analyzable branch (e.g., indirect jump).
- if (!GetAnalyzableBrOpc(LastOpc))
+ if (!getAnalyzableBrOpc(LastOpc))
return LastInst->isIndirectBranch() ? BT_Indirect : BT_None;
// Get the second to last instruction in the block.
@@ -219,7 +213,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
if (++I != REnd) {
SecondLastInst = &*I;
- SecondLastOpc = GetAnalyzableBrOpc(SecondLastInst->getOpcode());
+ SecondLastOpc = getAnalyzableBrOpc(SecondLastInst->getOpcode());
// Not an analyzable branch (must be an indirect jump).
if (isUnpredicatedTerminator(SecondLastInst) && !SecondLastOpc)
@@ -228,7 +222,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
// If there is only one terminator instruction, process it.
if (!SecondLastOpc) {
- // Unconditional branch
+ // Unconditional branch.
if (LastOpc == UncondBrOpc) {
TBB = LastInst->getOperand(0).getMBB();
return BT_Uncond;
@@ -280,5 +274,22 @@ unsigned MipsInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const char *AsmStr = MI->getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
}
+ case Mips::CONSTPOOL_ENTRY:
+ // If this machine instr is a constant pool entry, its size is recorded as
+ // operand #2.
+ return MI->getOperand(2).getImm();
}
}
+
+MachineInstrBuilder
+MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
+ MachineBasicBlock::iterator I) const {
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));
+
+ for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J)
+ MIB.addOperand(I->getOperand(J));
+
+ MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
+ return MIB;
+}
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index 8c05d97beac2..d9ac961cd33c 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -17,6 +17,7 @@
#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
#include "MipsRegisterInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -26,6 +27,7 @@
namespace llvm {
class MipsInstrInfo : public MipsGenInstrInfo {
+ virtual void anchor();
protected:
MipsTargetMachine &TM;
unsigned UncondBrOpc;
@@ -66,11 +68,6 @@ public:
bool AllowModify,
SmallVectorImpl<MachineInstr*> &BranchInstrs) const;
- virtual MachineInstr* emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx, uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const;
-
/// Insert nop instruction when hazard condition is found
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
@@ -81,7 +78,7 @@ public:
///
virtual const MipsRegisterInfo &getRegisterInfo() const = 0;
- virtual unsigned GetOppositeBranchOpc(unsigned Opc) const = 0;
+ virtual unsigned getOppositeBranchOpc(unsigned Opc) const = 0;
/// Return the number of bytes of code the specified instruction may be.
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
@@ -116,6 +113,11 @@ public:
const TargetRegisterInfo *TRI,
int64_t Offset) const = 0;
+ /// Create an instruction which has the same operands and memory operands
+ /// as MI but has a new opcode.
+ MachineInstrBuilder genInstrWithNewOpc(unsigned NewOpc,
+ MachineBasicBlock::iterator I) const;
+
protected:
bool isZeroImm(const MachineOperand &op) const;
@@ -123,7 +125,7 @@ protected:
unsigned Flag) const;
private:
- virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const = 0;
+ virtual unsigned getAnalyzableBrOpc(unsigned Opc) const = 0;
void AnalyzeCondBr(const MachineInstr *Inst, unsigned Opc,
MachineBasicBlock *&BB,
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index 86ec72982b33..ebdbaa416fcc 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -23,10 +23,9 @@ def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
SDTCisInt<4>]>;
def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-def SDT_ExtractLOHI : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVT<1, untyped>,
- SDTCisVT<2, i32>]>;
-def SDT_InsertLOHI : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>,
- SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>;
+def SDT_MFLOHI : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVT<1, untyped>]>;
+def SDT_MTLOHI : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>,
+ SDTCisInt<1>, SDTCisSameAs<1, 2>]>;
def SDT_MipsMultDiv : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>, SDTCisInt<1>,
SDTCisSameAs<1, 2>]>;
def SDT_MipsMAddMSub : SDTypeProfile<1, 3,
@@ -85,11 +84,12 @@ def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
[SDNPHasChain, SDNPSideEffect,
SDNPOptInGlue, SDNPOutGlue]>;
-// Node used to extract integer from LO/HI register.
-def ExtractLOHI : SDNode<"MipsISD::ExtractLOHI", SDT_ExtractLOHI>;
+// Nodes used to extract LO/HI registers.
+def MipsMFHI : SDNode<"MipsISD::MFHI", SDT_MFLOHI>;
+def MipsMFLO : SDNode<"MipsISD::MFLO", SDT_MFLOHI>;
// Node used to insert 32-bit integers to LOHI register pair.
-def InsertLOHI : SDNode<"MipsISD::InsertLOHI", SDT_InsertLOHI>;
+def MipsMTLOHI : SDNode<"MipsISD::MTLOHI", SDT_MTLOHI>;
// Mult nodes.
def MipsMult : SDNode<"MipsISD::Mult", SDT_MipsMultDiv>;
@@ -104,7 +104,8 @@ def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub>;
// DivRem(u) nodes
def MipsDivRem : SDNode<"MipsISD::DivRem", SDT_MipsMultDiv>;
def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsMultDiv>;
-def MipsDivRem16 : SDNode<"MipsISD::DivRem16", SDT_MipsDivRem16, [SDNPOutGlue]>;
+def MipsDivRem16 : SDNode<"MipsISD::DivRem16", SDT_MipsDivRem16,
+ [SDNPOutGlue]>;
def MipsDivRemU16 : SDNode<"MipsISD::DivRemU16", SDT_MipsDivRem16,
[SDNPOutGlue]>;
@@ -113,7 +114,7 @@ def MipsDivRemU16 : SDNode<"MipsISD::DivRemU16", SDT_MipsDivRem16,
// Wrapper node patterns give the instruction selector a chance to replace
// target constant nodes that would otherwise remain unchanged with ADDiu
// nodes. Without these wrapper node patterns, the following conditional move
-// instrucion is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
+// instruction is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
// compiled:
// movn %got(d)($gp), %got(c)($gp), $4
// This instruction is illegal since movn can take only register operands.
@@ -180,6 +181,12 @@ def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">,
def HasStdEnc : Predicate<"Subtarget.hasStandardEncoding()">,
AssemblerPredicate<"!FeatureMips16">;
def NotDSP : Predicate<"!Subtarget.hasDSP()">;
+def InMicroMips : Predicate<"Subtarget.inMicroMipsMode()">,
+ AssemblerPredicate<"FeatureMicroMips">;
+def NotInMicroMips : Predicate<"!Subtarget.inMicroMipsMode()">,
+ AssemblerPredicate<"!FeatureMicroMips">;
+def IsLE : Predicate<"Subtarget.isLittle()">;
+def IsBE : Predicate<"!Subtarget.isLittle()">;
class MipsPat<dag pattern, dag result> : Pat<pattern, result> {
let Predicates = [HasStdEnc];
@@ -240,7 +247,7 @@ def brtarget : Operand<OtherVT> {
def calltarget : Operand<iPTR> {
let EncoderMethod = "getJumpTargetOpValue";
}
-def calltarget64: Operand<i64>;
+
def simm16 : Operand<i32> {
let DecoderMethod= "DecodeSimm16";
}
@@ -248,48 +255,73 @@ def simm16 : Operand<i32> {
def simm20 : Operand<i32> {
}
-def simm16_64 : Operand<i64>;
-def shamt : Operand<i32>;
+def uimm20 : Operand<i32> {
+}
+
+def uimm10 : Operand<i32> {
+}
+
+def simm16_64 : Operand<i64> {
+ let DecoderMethod = "DecodeSimm16";
+}
// Unsigned Operand
+def uimm5 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
+def uimm6 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
def uimm16 : Operand<i32> {
let PrintMethod = "printUnsignedImm";
}
+def pcrel16 : Operand<i32> {
+}
+
def MipsMemAsmOperand : AsmOperandClass {
let Name = "Mem";
let ParserMethod = "parseMemOperand";
}
-// Address operand
-def mem : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPURegs, simm16);
- let EncoderMethod = "getMemEncoding";
- let ParserMatchClass = MipsMemAsmOperand;
- let OperandType = "OPERAND_MEMORY";
+def MipsInvertedImmoperand : AsmOperandClass {
+ let Name = "InvNum";
+ let RenderMethod = "addImmOperands";
+ let ParserMethod = "parseInvNum";
+}
+
+def PtrRegAsmOperand : AsmOperandClass {
+ let Name = "PtrReg";
+ let ParserMethod = "parsePtrReg";
}
-def mem64 : Operand<i64> {
+
+def InvertedImOperand : Operand<i32> {
+ let ParserMatchClass = MipsInvertedImmoperand;
+}
+
+// Address operand
+def mem : Operand<iPTR> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPU64Regs, simm16_64);
+ let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
let ParserMatchClass = MipsMemAsmOperand;
let OperandType = "OPERAND_MEMORY";
}
-def mem_ea : Operand<i32> {
+def mem_ea : Operand<iPTR> {
let PrintMethod = "printMemOperandEA";
- let MIOperandInfo = (ops CPURegs, simm16);
+ let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
let OperandType = "OPERAND_MEMORY";
}
-def mem_ea_64 : Operand<i64> {
- let PrintMethod = "printMemOperandEA";
- let MIOperandInfo = (ops CPU64Regs, simm16_64);
- let EncoderMethod = "getMemEncoding";
- let OperandType = "OPERAND_MEMORY";
+def PtrRC : Operand<iPTR> {
+ let MIOperandInfo = (ops ptr_rc);
+ let DecoderMethod = "DecodePtrRegisterClass";
+ let ParserMatchClass = PtrRegAsmOperand;
}
// size operand of ext instruction
@@ -362,6 +394,9 @@ def addr :
def addrRegImm :
ComplexPattern<iPTR, 2, "selectAddrRegImm", [frameindex]>;
+def addrRegReg :
+ ComplexPattern<iPTR, 2, "selectAddrRegReg", [frameindex]>;
+
def addrDefault :
ComplexPattern<iPTR, 2, "selectAddrDefault", [frameindex]>;
@@ -382,160 +417,111 @@ class ArithLogicR<string opstr, RegisterOperand RO, bit isComm = 0,
// Arithmetic and logical instructions with 2 register operands.
class ArithLogicI<string opstr, Operand Od, RegisterOperand RO,
+ InstrItinClass Itin = NoItinerary,
SDPatternOperator imm_type = null_frag,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RO:$rt), (ins RO:$rs, Od:$imm16),
!strconcat(opstr, "\t$rt, $rs, $imm16"),
[(set RO:$rt, (OpNode RO:$rs, imm_type:$imm16))],
- IIAlu, FrmI, opstr> {
+ Itin, FrmI, opstr> {
let isReMaterializable = 1;
+ let TwoOperandAliasConstraint = "$rs = $rt";
}
// Arithmetic Multiply ADD/SUB
class MArithR<string opstr, bit isComm = 0> :
- InstSE<(outs), (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt),
- !strconcat(opstr, "\t$rs, $rt"), [], IIImul, FrmR> {
- let Defs = [HI, LO];
- let Uses = [HI, LO];
+ InstSE<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ !strconcat(opstr, "\t$rs, $rt"), [], IIImult, FrmR, opstr> {
+ let Defs = [HI0, LO0];
+ let Uses = [HI0, LO0];
let isCommutable = isComm;
}
// Logical
-class LogicNOR<string opstr, RegisterOperand RC>:
- InstSE<(outs RC:$rd), (ins RC:$rs, RC:$rt),
+class LogicNOR<string opstr, RegisterOperand RO>:
+ InstSE<(outs RO:$rd), (ins RO:$rs, RO:$rt),
!strconcat(opstr, "\t$rd, $rs, $rt"),
- [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu, FrmR, opstr> {
+ [(set RO:$rd, (not (or RO:$rs, RO:$rt)))], IIArith, FrmR, opstr> {
let isCommutable = 1;
}
// Shifts
class shift_rotate_imm<string opstr, Operand ImmOpnd,
- RegisterOperand RC, SDPatternOperator OpNode = null_frag,
+ RegisterOperand RO, SDPatternOperator OpNode = null_frag,
SDPatternOperator PF = null_frag> :
- InstSE<(outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt),
+ InstSE<(outs RO:$rd), (ins RO:$rt, ImmOpnd:$shamt),
!strconcat(opstr, "\t$rd, $rt, $shamt"),
- [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu, FrmR, opstr>;
+ [(set RO:$rd, (OpNode RO:$rt, PF:$shamt))], IIArith, FrmR, opstr>;
-class shift_rotate_reg<string opstr, RegisterOperand RC,
+class shift_rotate_reg<string opstr, RegisterOperand RO,
SDPatternOperator OpNode = null_frag>:
- InstSE<(outs RC:$rd), (ins CPURegsOpnd:$rs, RC:$rt),
+ InstSE<(outs RO:$rd), (ins RO:$rt, GPR32Opnd:$rs),
!strconcat(opstr, "\t$rd, $rt, $rs"),
- [(set RC:$rd, (OpNode RC:$rt, CPURegsOpnd:$rs))], IIAlu, FrmR, opstr>;
+ [(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs))], IIArith, FrmR, opstr>;
// Load Upper Imediate
-class LoadUpper<string opstr, RegisterClass RC, Operand Imm>:
- InstSE<(outs RC:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"),
- [], IIAlu, FrmI>, IsAsCheapAsAMove {
+class LoadUpper<string opstr, RegisterOperand RO, Operand Imm>:
+ InstSE<(outs RO:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"),
+ [], IIArith, FrmI, opstr>, IsAsCheapAsAMove {
let neverHasSideEffects = 1;
let isReMaterializable = 1;
}
-class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin>: FFI<op, outs, ins, asmstr, pattern> {
- bits<21> addr;
- let Inst{25-21} = addr{20-16};
- let Inst{15-0} = addr{15-0};
- let DecoderMethod = "DecodeMem";
-}
-
// Memory Load/Store
-class Load<string opstr, SDPatternOperator OpNode, RegisterClass RC,
- Operand MemOpnd, ComplexPattern Addr, string ofsuffix> :
- InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(set RC:$rt, (OpNode Addr:$addr))], NoItinerary, FrmI,
- !strconcat(opstr, ofsuffix)> {
+class Load<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
+ InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
+ InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(set RO:$rt, (OpNode Addr:$addr))], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let canFoldAsLoad = 1;
let mayLoad = 1;
}
-class Store<string opstr, SDPatternOperator OpNode, RegisterClass RC,
- Operand MemOpnd, ComplexPattern Addr, string ofsuffix> :
- InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(OpNode RC:$rt, Addr:$addr)], NoItinerary, FrmI,
- !strconcat(opstr, ofsuffix)> {
+class Store<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
+ InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
+ InstSE<(outs), (ins RO:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(OpNode RO:$rt, Addr:$addr)], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let mayStore = 1;
}
-multiclass LoadM<string opstr, RegisterClass RC,
- SDPatternOperator OpNode = null_frag,
- ComplexPattern Addr = addr> {
- def NAME : Load<opstr, OpNode, RC, mem, Addr, "">,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : Load<opstr, OpNode, RC, mem64, Addr, "_p8">,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
-multiclass StoreM<string opstr, RegisterClass RC,
- SDPatternOperator OpNode = null_frag,
- ComplexPattern Addr = addr> {
- def NAME : Store<opstr, OpNode, RC, mem, Addr, "">,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : Store<opstr, OpNode, RC, mem64, Addr, "_p8">,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
// Load/Store Left/Right
let canFoldAsLoad = 1 in
-class LoadLeftRight<string opstr, SDNode OpNode, RegisterClass RC,
- Operand MemOpnd> :
- InstSE<(outs RC:$rt), (ins MemOpnd:$addr, RC:$src),
+class LoadLeftRight<string opstr, SDNode OpNode, RegisterOperand RO,
+ InstrItinClass Itin> :
+ InstSE<(outs RO:$rt), (ins mem:$addr, RO:$src),
!strconcat(opstr, "\t$rt, $addr"),
- [(set RC:$rt, (OpNode addr:$addr, RC:$src))], NoItinerary, FrmI> {
+ [(set RO:$rt, (OpNode addr:$addr, RO:$src))], Itin, FrmI> {
let DecoderMethod = "DecodeMem";
string Constraints = "$src = $rt";
}
-class StoreLeftRight<string opstr, SDNode OpNode, RegisterClass RC,
- Operand MemOpnd>:
- InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(OpNode RC:$rt, addr:$addr)], NoItinerary, FrmI> {
+class StoreLeftRight<string opstr, SDNode OpNode, RegisterOperand RO,
+ InstrItinClass Itin> :
+ InstSE<(outs), (ins RO:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(OpNode RO:$rt, addr:$addr)], Itin, FrmI> {
let DecoderMethod = "DecodeMem";
}
-multiclass LoadLeftRightM<string opstr, SDNode OpNode, RegisterClass RC> {
- def NAME : LoadLeftRight<opstr, OpNode, RC, mem>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : LoadLeftRight<opstr, OpNode, RC, mem64>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
-multiclass StoreLeftRightM<string opstr, SDNode OpNode, RegisterClass RC> {
- def NAME : StoreLeftRight<opstr, OpNode, RC, mem>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : StoreLeftRight<opstr, OpNode, RC, mem64>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
// Conditional Branch
-class CBranch<string opstr, PatFrag cond_op, RegisterClass RC> :
- InstSE<(outs), (ins RC:$rs, RC:$rt, brtarget:$offset),
+class CBranch<string opstr, DAGOperand opnd, PatFrag cond_op,
+ RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, RO:$rt, opnd:$offset),
!strconcat(opstr, "\t$rs, $rt, $offset"),
- [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$offset)], IIBranch,
- FrmI> {
+ [(brcond (i32 (cond_op RO:$rs, RO:$rt)), bb:$offset)], IIBranch,
+ FrmI, opstr> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
let Defs = [AT];
}
-class CBranchZero<string opstr, PatFrag cond_op, RegisterClass RC> :
- InstSE<(outs), (ins RC:$rs, brtarget:$offset),
+class CBranchZero<string opstr, DAGOperand opnd, PatFrag cond_op,
+ RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, opnd:$offset),
!strconcat(opstr, "\t$rs, $offset"),
- [(brcond (i32 (cond_op RC:$rs, 0)), bb:$offset)], IIBranch, FrmI> {
+ [(brcond (i32 (cond_op RO:$rs, 0)), bb:$offset)], IIBranch,
+ FrmI, opstr> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
@@ -543,24 +529,24 @@ class CBranchZero<string opstr, PatFrag cond_op, RegisterClass RC> :
}
// SetCC
-class SetCC_R<string opstr, PatFrag cond_op, RegisterClass RC> :
- InstSE<(outs CPURegsOpnd:$rd), (ins RC:$rs, RC:$rt),
+class SetCC_R<string opstr, PatFrag cond_op, RegisterOperand RO> :
+ InstSE<(outs GPR32Opnd:$rd), (ins RO:$rs, RO:$rt),
!strconcat(opstr, "\t$rd, $rs, $rt"),
- [(set CPURegsOpnd:$rd, (cond_op RC:$rs, RC:$rt))],
- IIAlu, FrmR, opstr>;
+ [(set GPR32Opnd:$rd, (cond_op RO:$rs, RO:$rt))],
+ IIslt, FrmR, opstr>;
class SetCC_I<string opstr, PatFrag cond_op, Operand Od, PatLeaf imm_type,
- RegisterClass RC>:
- InstSE<(outs CPURegsOpnd:$rt), (ins RC:$rs, Od:$imm16),
+ RegisterOperand RO>:
+ InstSE<(outs GPR32Opnd:$rt), (ins RO:$rs, Od:$imm16),
!strconcat(opstr, "\t$rt, $rs, $imm16"),
- [(set CPURegsOpnd:$rt, (cond_op RC:$rs, imm_type:$imm16))],
- IIAlu, FrmI, opstr>;
+ [(set GPR32Opnd:$rt, (cond_op RO:$rs, imm_type:$imm16))],
+ IIslt, FrmI, opstr>;
// Jump
class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator,
- SDPatternOperator targetoperator> :
+ SDPatternOperator targetoperator, string bopstr> :
InstSE<(outs), (ins opnd:$target), !strconcat(opstr, "\t$target"),
- [(operator targetoperator:$target)], IIBranch, FrmJ> {
+ [(operator targetoperator:$target)], IIBranch, FrmJ, bopstr> {
let isTerminator=1;
let isBarrier=1;
let hasDelaySlot = 1;
@@ -569,9 +555,9 @@ class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator,
}
// Unconditional branch
-class UncondBranch<string opstr> :
- InstSE<(outs), (ins brtarget:$offset), !strconcat(opstr, "\t$offset"),
- [(br bb:$offset)], IIBranch, FrmI> {
+class UncondBranch<Instruction BEQInst> :
+ PseudoSE<(outs), (ins brtarget:$offset), [(br bb:$offset)], IIBranch>,
+ PseudoInstExpansion<(BEQInst ZERO, ZERO, brtarget:$offset)> {
let isBranch = 1;
let isTerminator = 1;
let isBarrier = 1;
@@ -582,17 +568,20 @@ class UncondBranch<string opstr> :
// Base class for indirect branch and return instruction classes.
let isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
-class JumpFR<RegisterClass RC, SDPatternOperator operator = null_frag>:
- InstSE<(outs), (ins RC:$rs), "jr\t$rs", [(operator RC:$rs)], IIBranch, FrmR>;
+class JumpFR<string opstr, RegisterOperand RO,
+ SDPatternOperator operator = null_frag>:
+ InstSE<(outs), (ins RO:$rs), "jr\t$rs", [(operator RO:$rs)], IIBranch,
+ FrmR, opstr>;
// Indirect branch
-class IndirectBranch<RegisterClass RC>: JumpFR<RC, brind> {
+class IndirectBranch<string opstr, RegisterOperand RO> :
+ JumpFR<opstr, RO, brind> {
let isBranch = 1;
let isIndirectBranch = 1;
}
// Return instruction
-class RetBase<RegisterClass RC>: JumpFR<RC> {
+class RetBase<string opstr, RegisterOperand RO>: JumpFR<opstr, RO> {
let isReturn = 1;
let isCodeGenOnly = 1;
let hasCtrlDep = 1;
@@ -601,29 +590,30 @@ class RetBase<RegisterClass RC>: JumpFR<RC> {
// Jump and Link (Call)
let isCall=1, hasDelaySlot=1, Defs = [RA] in {
- class JumpLink<string opstr> :
- InstSE<(outs), (ins calltarget:$target), !strconcat(opstr, "\t$target"),
- [(MipsJmpLink imm:$target)], IIBranch, FrmJ> {
+ class JumpLink<string opstr, DAGOperand opnd> :
+ InstSE<(outs), (ins opnd:$target), !strconcat(opstr, "\t$target"),
+ [(MipsJmpLink imm:$target)], IIBranch, FrmJ, opstr> {
let DecoderMethod = "DecodeJumpTarget";
}
- class JumpLinkRegPseudo<RegisterClass RC, Instruction JALRInst,
- Register RetReg>:
- PseudoSE<(outs), (ins RC:$rs), [(MipsJmpLink RC:$rs)], IIBranch>,
- PseudoInstExpansion<(JALRInst RetReg, RC:$rs)>;
+ class JumpLinkRegPseudo<RegisterOperand RO, Instruction JALRInst,
+ Register RetReg, RegisterOperand ResRO = RO>:
+ PseudoSE<(outs), (ins RO:$rs), [(MipsJmpLink RO:$rs)], IIBranch>,
+ PseudoInstExpansion<(JALRInst RetReg, ResRO:$rs)>;
- class JumpLinkReg<string opstr, RegisterClass RC>:
- InstSE<(outs RC:$rd), (ins RC:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [], IIBranch, FrmR>;
+ class JumpLinkReg<string opstr, RegisterOperand RO>:
+ InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
+ [], IIBranch, FrmR, opstr>;
- class BGEZAL_FT<string opstr, RegisterOperand RO> :
- InstSE<(outs), (ins RO:$rs, brtarget:$offset),
- !strconcat(opstr, "\t$rs, $offset"), [], IIBranch, FrmI>;
+ class BGEZAL_FT<string opstr, DAGOperand opnd, RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, opnd:$offset),
+ !strconcat(opstr, "\t$rs, $offset"), [], IIBranch, FrmI, opstr>;
}
-class BAL_FT :
- InstSE<(outs), (ins brtarget:$offset), "bal\t$offset", [], IIBranch, FrmI> {
+class BAL_BR_Pseudo<Instruction RealInst> :
+ PseudoSE<(outs), (ins brtarget:$offset), [], IIBranch>,
+ PseudoInstExpansion<(RealInst ZERO, brtarget:$offset)> {
let isBranch = 1;
let isTerminator = 1;
let isBarrier = 1;
@@ -631,12 +621,49 @@ class BAL_FT :
let Defs = [RA];
}
+// Syscall
+class SYS_FT<string opstr> :
+ InstSE<(outs), (ins uimm20:$code_),
+ !strconcat(opstr, "\t$code_"), [], NoItinerary, FrmI>;
+// Break
+class BRK_FT<string opstr> :
+ InstSE<(outs), (ins uimm10:$code_1, uimm10:$code_2),
+ !strconcat(opstr, "\t$code_1, $code_2"), [], NoItinerary, FrmOther>;
+
+// (D)Eret
+class ER_FT<string opstr> :
+ InstSE<(outs), (ins),
+ opstr, [], NoItinerary, FrmOther>;
+
+// Interrupts
+class DEI_FT<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt), (ins),
+ !strconcat(opstr, "\t$rt"), [], NoItinerary, FrmOther>;
+
+// Wait
+class WAIT_FT<string opstr> :
+ InstSE<(outs), (ins), opstr, [], NoItinerary, FrmOther> {
+ let Inst{31-26} = 0x10;
+ let Inst{25} = 1;
+ let Inst{24-6} = 0;
+ let Inst{5-0} = 0x20;
+}
+
// Sync
let hasSideEffects = 1 in
class SYNC_FT :
InstSE<(outs), (ins i32imm:$stype), "sync $stype", [(MipsSync imm:$stype)],
NoItinerary, FrmOther>;
+let hasSideEffects = 1 in
+class TEQ_FT<string opstr, RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, RO:$rt, uimm16:$code_),
+ !strconcat(opstr, "\t$rs, $rt, $code_"), [], NoItinerary,
+ FrmI, opstr>;
+
+class TEQI_FT<string opstr, RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, uimm16:$imm16),
+ !strconcat(opstr, "\t$rs, $imm16"), [], NoItinerary, FrmOther, opstr>;
// Mul, Div
class Mult<string opstr, InstrItinClass itin, RegisterOperand RO,
list<Register> DefRegs> :
@@ -651,49 +678,61 @@ class Mult<string opstr, InstrItinClass itin, RegisterOperand RO,
// operands.
class MultDivPseudo<Instruction RealInst, RegisterClass R0, RegisterOperand R1,
SDPatternOperator OpNode, InstrItinClass Itin,
- bit IsComm = 1, bit HasSideEffects = 0> :
+ bit IsComm = 1, bit HasSideEffects = 0,
+ bit UsesCustomInserter = 0> :
PseudoSE<(outs R0:$ac), (ins R1:$rs, R1:$rt),
[(set R0:$ac, (OpNode R1:$rs, R1:$rt))], Itin>,
PseudoInstExpansion<(RealInst R1:$rs, R1:$rt)> {
let isCommutable = IsComm;
let hasSideEffects = HasSideEffects;
+ let usesCustomInserter = UsesCustomInserter;
}
// Pseudo multiply add/sub instruction with explicit accumulator register
// operands.
class MAddSubPseudo<Instruction RealInst, SDPatternOperator OpNode>
- : PseudoSE<(outs ACRegs:$ac),
- (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin),
- [(set ACRegs:$ac,
- (OpNode CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin))],
- IIImul>,
- PseudoInstExpansion<(RealInst CPURegsOpnd:$rs, CPURegsOpnd:$rt)> {
+ : PseudoSE<(outs ACC64:$ac),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64:$acin),
+ [(set ACC64:$ac,
+ (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64:$acin))],
+ IIImult>,
+ PseudoInstExpansion<(RealInst GPR32Opnd:$rs, GPR32Opnd:$rt)> {
string Constraints = "$acin = $ac";
}
class Div<string opstr, InstrItinClass itin, RegisterOperand RO,
list<Register> DefRegs> :
InstSE<(outs), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$$zero, $rs, $rt"),
- [], itin, FrmR> {
+ [], itin, FrmR, opstr> {
let Defs = DefRegs;
}
// Move from Hi/Lo
-class MoveFromLOHI<string opstr, RegisterClass RC, list<Register> UseRegs>:
- InstSE<(outs RC:$rd), (ins), !strconcat(opstr, "\t$rd"), [], IIHiLo, FrmR> {
- let Uses = UseRegs;
+class PseudoMFLOHI<RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode>
+ : PseudoSE<(outs DstRC:$rd), (ins SrcRC:$hilo),
+ [(set DstRC:$rd, (OpNode SrcRC:$hilo))], IIHiLo>;
+
+class MoveFromLOHI<string opstr, RegisterOperand RO, Register UseReg>:
+ InstSE<(outs RO:$rd), (ins), !strconcat(opstr, "\t$rd"), [], IIHiLo, FrmR,
+ opstr> {
+ let Uses = [UseReg];
let neverHasSideEffects = 1;
}
-class MoveToLOHI<string opstr, RegisterClass RC, list<Register> DefRegs>:
- InstSE<(outs), (ins RC:$rs), !strconcat(opstr, "\t$rs"), [], IIHiLo, FrmR> {
+class PseudoMTLOHI<RegisterClass DstRC, RegisterClass SrcRC>
+ : PseudoSE<(outs DstRC:$lohi), (ins SrcRC:$lo, SrcRC:$hi),
+ [(set DstRC:$lohi, (MipsMTLOHI SrcRC:$lo, SrcRC:$hi))], IIHiLo>;
+
+class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>:
+ InstSE<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"), [], IIHiLo,
+ FrmR, opstr> {
let Defs = DefRegs;
let neverHasSideEffects = 1;
}
-class EffectiveAddress<string opstr, RegisterClass RC, Operand Mem> :
- InstSE<(outs RC:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(set RC:$rt, addr:$addr)], NoItinerary, FrmI> {
+class EffectiveAddress<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt), (ins mem_ea:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(set RO:$rt, addr:$addr)], NoItinerary, FrmI> {
let isCodeGenOnly = 1;
let DecoderMethod = "DecodeMem";
}
@@ -701,97 +740,91 @@ class EffectiveAddress<string opstr, RegisterClass RC, Operand Mem> :
// Count Leading Ones/Zeros in Word
class CountLeading0<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz RO:$rs))], IIAlu, FrmR>,
+ [(set RO:$rd, (ctlz RO:$rs))], IIArith, FrmR, opstr>,
Requires<[HasBitCount, HasStdEnc]>;
class CountLeading1<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz (not RO:$rs)))], IIAlu, FrmR>,
+ [(set RO:$rd, (ctlz (not RO:$rs)))], IIArith, FrmR, opstr>,
Requires<[HasBitCount, HasStdEnc]>;
// Sign Extend in Register.
-class SignExtInReg<string opstr, ValueType vt, RegisterClass RC> :
- InstSE<(outs RC:$rd), (ins RC:$rt), !strconcat(opstr, "\t$rd, $rt"),
- [(set RC:$rd, (sext_inreg RC:$rt, vt))], NoItinerary, FrmR> {
+class SignExtInReg<string opstr, ValueType vt, RegisterOperand RO> :
+ InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"),
+ [(set RO:$rd, (sext_inreg RO:$rt, vt))], IIseb, FrmR, opstr> {
let Predicates = [HasSEInReg, HasStdEnc];
}
// Subword Swap
class SubwordSwap<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"), [],
- NoItinerary, FrmR> {
+ NoItinerary, FrmR, opstr> {
let Predicates = [HasSwap, HasStdEnc];
let neverHasSideEffects = 1;
}
// Read Hardware
-class ReadHardware<RegisterClass CPURegClass, RegisterOperand RO> :
- InstSE<(outs CPURegClass:$rt), (ins RO:$rd), "rdhwr\t$rt, $rd", [],
- IIAlu, FrmR>;
+class ReadHardware<RegisterOperand CPURegOperand, RegisterOperand RO> :
+ InstSE<(outs CPURegOperand:$rt), (ins RO:$rd), "rdhwr\t$rt, $rd", [],
+ IIArith, FrmR>;
// Ext and Ins
-class ExtBase<string opstr, RegisterOperand RO>:
- InstSE<(outs RO:$rt), (ins RO:$rs, uimm16:$pos, size_ext:$size),
+class ExtBase<string opstr, RegisterOperand RO, Operand PosOpnd,
+ SDPatternOperator Op = null_frag>:
+ InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, size_ext:$size),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
- [(set RO:$rt, (MipsExt RO:$rs, imm:$pos, imm:$size))], NoItinerary,
- FrmR> {
+ [(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size))], NoItinerary,
+ FrmR, opstr> {
let Predicates = [HasMips32r2, HasStdEnc];
}
-class InsBase<string opstr, RegisterOperand RO>:
- InstSE<(outs RO:$rt), (ins RO:$rs, uimm16:$pos, size_ins:$size, RO:$src),
+class InsBase<string opstr, RegisterOperand RO, Operand PosOpnd,
+ SDPatternOperator Op = null_frag>:
+ InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, size_ins:$size, RO:$src),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
- [(set RO:$rt, (MipsIns RO:$rs, imm:$pos, imm:$size, RO:$src))],
- NoItinerary, FrmR> {
+ [(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size, RO:$src))],
+ NoItinerary, FrmR, opstr> {
let Predicates = [HasMips32r2, HasStdEnc];
let Constraints = "$src = $rt";
}
// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
-class Atomic2Ops<PatFrag Op, RegisterClass DRC, RegisterClass PRC> :
- PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
- [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
-
-multiclass Atomic2Ops32<PatFrag Op> {
- def NAME : Atomic2Ops<Op, CPURegs, CPURegs>, Requires<[NotN64, HasStdEnc]>;
- def _P8 : Atomic2Ops<Op, CPURegs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- }
-}
+class Atomic2Ops<PatFrag Op, RegisterClass DRC> :
+ PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$incr),
+ [(set DRC:$dst, (Op iPTR:$ptr, DRC:$incr))]>;
// Atomic Compare & Swap.
-class AtomicCmpSwap<PatFrag Op, RegisterClass DRC, RegisterClass PRC> :
- PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
- [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
-
-multiclass AtomicCmpSwap32<PatFrag Op> {
- def NAME : AtomicCmpSwap<Op, CPURegs, CPURegs>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : AtomicCmpSwap<Op, CPURegs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- }
-}
+class AtomicCmpSwap<PatFrag Op, RegisterClass DRC> :
+ PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$cmp, DRC:$swap),
+ [(set DRC:$dst, (Op iPTR:$ptr, DRC:$cmp, DRC:$swap))]>;
-class LLBase<string opstr, RegisterOperand RO, Operand Mem> :
- InstSE<(outs RO:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class LLBase<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
let mayLoad = 1;
}
-class SCBase<string opstr, RegisterOperand RO, Operand Mem> :
- InstSE<(outs RO:$dst), (ins RO:$rt, Mem:$addr),
+class SCBase<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$dst), (ins RO:$rt, mem:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
let mayStore = 1;
let Constraints = "$rt = $dst";
}
-class MFC3OP<dag outs, dag ins, string asmstr> :
- InstSE<outs, ins, asmstr, [], NoItinerary, FrmFR>;
+class MFC3OP<string asmstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt, RO:$rd, uimm16:$sel), (ins),
+ !strconcat(asmstr, "\t$rt, $rd, $sel"), [], NoItinerary, FrmFR>;
+
+class TrapBase<Instruction RealInst>
+ : PseudoSE<(outs), (ins), [(trap)], NoItinerary>,
+ PseudoInstExpansion<(RealInst 0, 0)> {
+ let isBarrier = 1;
+ let isTerminator = 1;
+ let isCodeGenOnly = 1;
+}
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -809,38 +842,38 @@ def ADJCALLSTACKUP : MipsPseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
}
let usesCustomInserter = 1 in {
- defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8>;
- defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16>;
- defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32>;
- defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8>;
- defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16>;
- defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32>;
- defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8>;
- defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16>;
- defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32>;
- defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8>;
- defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16>;
- defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32>;
- defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8>;
- defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16>;
- defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32>;
- defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8>;
- defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16>;
- defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32>;
-
- defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8>;
- defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16>;
- defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32>;
-
- defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8>;
- defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16>;
- defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32>;
+ def ATOMIC_LOAD_ADD_I8 : Atomic2Ops<atomic_load_add_8, GPR32>;
+ def ATOMIC_LOAD_ADD_I16 : Atomic2Ops<atomic_load_add_16, GPR32>;
+ def ATOMIC_LOAD_ADD_I32 : Atomic2Ops<atomic_load_add_32, GPR32>;
+ def ATOMIC_LOAD_SUB_I8 : Atomic2Ops<atomic_load_sub_8, GPR32>;
+ def ATOMIC_LOAD_SUB_I16 : Atomic2Ops<atomic_load_sub_16, GPR32>;
+ def ATOMIC_LOAD_SUB_I32 : Atomic2Ops<atomic_load_sub_32, GPR32>;
+ def ATOMIC_LOAD_AND_I8 : Atomic2Ops<atomic_load_and_8, GPR32>;
+ def ATOMIC_LOAD_AND_I16 : Atomic2Ops<atomic_load_and_16, GPR32>;
+ def ATOMIC_LOAD_AND_I32 : Atomic2Ops<atomic_load_and_32, GPR32>;
+ def ATOMIC_LOAD_OR_I8 : Atomic2Ops<atomic_load_or_8, GPR32>;
+ def ATOMIC_LOAD_OR_I16 : Atomic2Ops<atomic_load_or_16, GPR32>;
+ def ATOMIC_LOAD_OR_I32 : Atomic2Ops<atomic_load_or_32, GPR32>;
+ def ATOMIC_LOAD_XOR_I8 : Atomic2Ops<atomic_load_xor_8, GPR32>;
+ def ATOMIC_LOAD_XOR_I16 : Atomic2Ops<atomic_load_xor_16, GPR32>;
+ def ATOMIC_LOAD_XOR_I32 : Atomic2Ops<atomic_load_xor_32, GPR32>;
+ def ATOMIC_LOAD_NAND_I8 : Atomic2Ops<atomic_load_nand_8, GPR32>;
+ def ATOMIC_LOAD_NAND_I16 : Atomic2Ops<atomic_load_nand_16, GPR32>;
+ def ATOMIC_LOAD_NAND_I32 : Atomic2Ops<atomic_load_nand_32, GPR32>;
+
+ def ATOMIC_SWAP_I8 : Atomic2Ops<atomic_swap_8, GPR32>;
+ def ATOMIC_SWAP_I16 : Atomic2Ops<atomic_swap_16, GPR32>;
+ def ATOMIC_SWAP_I32 : Atomic2Ops<atomic_swap_32, GPR32>;
+
+ def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<atomic_cmp_swap_8, GPR32>;
+ def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<atomic_cmp_swap_16, GPR32>;
+ def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, GPR32>;
}
/// Pseudo instructions for loading and storing accumulator registers.
-let isPseudo = 1 in {
- defm LOAD_AC64 : LoadM<"load_ac64", ACRegs>;
- defm STORE_AC64 : StoreM<"store_ac64", ACRegs>;
+let isPseudo = 1, isCodeGenOnly = 1 in {
+ def LOAD_ACC64 : Load<"", ACC64>;
+ def STORE_ACC64 : Store<"", ACC64>;
}
//===----------------------------------------------------------------------===//
@@ -851,113 +884,146 @@ let isPseudo = 1 in {
//===----------------------------------------------------------------------===//
/// Arithmetic Instructions (ALU Immediate)
-def ADDiu : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd, immSExt16, add>,
+def ADDiu : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd, IIArith, immSExt16,
+ add>,
ADDI_FM<0x9>, IsAsCheapAsAMove;
-def ADDi : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>, ADDI_FM<0x8>;
-def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>,
+def ADDi : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>, ADDI_FM<0x8>;
+def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xa>;
-def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>,
+def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xb>;
-def ANDi : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>,
+def ANDi : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd, IILogic, immZExt16,
+ and>,
ADDI_FM<0xc>;
-def ORi : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>,
+def ORi : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd, IILogic, immZExt16,
+ or>,
ADDI_FM<0xd>;
-def XORi : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>,
+def XORi : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd, IILogic, immZExt16,
+ xor>,
ADDI_FM<0xe>;
-def LUi : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM;
+def LUi : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM;
/// Arithmetic Instructions (3-Operand, R-Type)
-def ADDu : MMRel, ArithLogicR<"addu", CPURegsOpnd, 1, IIAlu, add>,
+def ADDu : MMRel, ArithLogicR<"addu", GPR32Opnd, 1, IIArith, add>,
ADD_FM<0, 0x21>;
-def SUBu : MMRel, ArithLogicR<"subu", CPURegsOpnd, 0, IIAlu, sub>,
+def SUBu : MMRel, ArithLogicR<"subu", GPR32Opnd, 0, IIArith, sub>,
ADD_FM<0, 0x23>;
-def MUL : MMRel, ArithLogicR<"mul", CPURegsOpnd, 1, IIImul, mul>,
+let Defs = [HI0, LO0] in
+def MUL : MMRel, ArithLogicR<"mul", GPR32Opnd, 1, IIImul, mul>,
ADD_FM<0x1c, 2>;
-def ADD : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM<0, 0x20>;
-def SUB : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM<0, 0x22>;
-def SLT : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM<0, 0x2a>;
-def SLTu : MMRel, SetCC_R<"sltu", setult, CPURegs>, ADD_FM<0, 0x2b>;
-def AND : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>,
+def ADD : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM<0, 0x20>;
+def SUB : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM<0, 0x22>;
+def SLT : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM<0, 0x2a>;
+def SLTu : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>, ADD_FM<0, 0x2b>;
+def AND : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IILogic, and>,
ADD_FM<0, 0x24>;
-def OR : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>,
+def OR : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IILogic, or>,
ADD_FM<0, 0x25>;
-def XOR : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>,
+def XOR : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IILogic, xor>,
ADD_FM<0, 0x26>;
-def NOR : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM<0, 0x27>;
+def NOR : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM<0, 0x27>;
/// Shift Instructions
-def SLL : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd, shl, immZExt5>,
+def SLL : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd, shl, immZExt5>,
SRA_FM<0, 0>;
-def SRL : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd, srl, immZExt5>,
+def SRL : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd, srl, immZExt5>,
SRA_FM<2, 0>;
-def SRA : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd, sra, immZExt5>,
+def SRA : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd, sra, immZExt5>,
SRA_FM<3, 0>;
-def SLLV : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd, shl>, SRLV_FM<4, 0>;
-def SRLV : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd, srl>, SRLV_FM<6, 0>;
-def SRAV : MMRel, shift_rotate_reg<"srav", CPURegsOpnd, sra>, SRLV_FM<7, 0>;
+def SLLV : MMRel, shift_rotate_reg<"sllv", GPR32Opnd, shl>, SRLV_FM<4, 0>;
+def SRLV : MMRel, shift_rotate_reg<"srlv", GPR32Opnd, srl>, SRLV_FM<6, 0>;
+def SRAV : MMRel, shift_rotate_reg<"srav", GPR32Opnd, sra>, SRLV_FM<7, 0>;
// Rotate Instructions
let Predicates = [HasMips32r2, HasStdEnc] in {
- def ROTR : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd, rotr,
+ def ROTR : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, rotr,
immZExt5>,
SRA_FM<2, 1>;
- def ROTRV : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd, rotr>,
+ def ROTRV : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, rotr>,
SRLV_FM<6, 1>;
}
/// Load and Store Instructions
/// aligned
-defm LB : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM<0x20>;
-defm LBu : LoadM<"lbu", CPURegs, zextloadi8, addrDefault>, MMRel, LW_FM<0x24>;
-defm LH : LoadM<"lh", CPURegs, sextloadi16, addrDefault>, MMRel, LW_FM<0x21>;
-defm LHu : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM<0x25>;
-defm LW : LoadM<"lw", CPURegs, load, addrDefault>, MMRel, LW_FM<0x23>;
-defm SB : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM<0x28>;
-defm SH : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM<0x29>;
-defm SW : StoreM<"sw", CPURegs, store>, MMRel, LW_FM<0x2b>;
+def LB : Load<"lb", GPR32Opnd, sextloadi8, IILoad>, MMRel, LW_FM<0x20>;
+def LBu : Load<"lbu", GPR32Opnd, zextloadi8, IILoad, addrDefault>, MMRel,
+ LW_FM<0x24>;
+def LH : Load<"lh", GPR32Opnd, sextloadi16, IILoad, addrDefault>, MMRel,
+ LW_FM<0x21>;
+def LHu : Load<"lhu", GPR32Opnd, zextloadi16, IILoad>, MMRel, LW_FM<0x25>;
+def LW : Load<"lw", GPR32Opnd, load, IILoad, addrDefault>, MMRel,
+ LW_FM<0x23>;
+def SB : Store<"sb", GPR32Opnd, truncstorei8, IIStore>, MMRel, LW_FM<0x28>;
+def SH : Store<"sh", GPR32Opnd, truncstorei16, IIStore>, MMRel, LW_FM<0x29>;
+def SW : Store<"sw", GPR32Opnd, store, IIStore>, MMRel, LW_FM<0x2b>;
/// load/store left/right
-defm LWL : LoadLeftRightM<"lwl", MipsLWL, CPURegs>, LW_FM<0x22>;
-defm LWR : LoadLeftRightM<"lwr", MipsLWR, CPURegs>, LW_FM<0x26>;
-defm SWL : StoreLeftRightM<"swl", MipsSWL, CPURegs>, LW_FM<0x2a>;
-defm SWR : StoreLeftRightM<"swr", MipsSWR, CPURegs>, LW_FM<0x2e>;
+let Predicates = [NotInMicroMips] in {
+def LWL : LoadLeftRight<"lwl", MipsLWL, GPR32Opnd, IILoad>, LW_FM<0x22>;
+def LWR : LoadLeftRight<"lwr", MipsLWR, GPR32Opnd, IILoad>, LW_FM<0x26>;
+def SWL : StoreLeftRight<"swl", MipsSWL, GPR32Opnd, IIStore>, LW_FM<0x2a>;
+def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, IIStore>, LW_FM<0x2e>;
+}
def SYNC : SYNC_FT, SYNC_FM;
+def TEQ : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM<0x34>;
+def TGE : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM<0x30>;
+def TGEU : MMRel, TEQ_FT<"tgeu", GPR32Opnd>, TEQ_FM<0x31>;
+def TLT : MMRel, TEQ_FT<"tlt", GPR32Opnd>, TEQ_FM<0x32>;
+def TLTU : MMRel, TEQ_FT<"tltu", GPR32Opnd>, TEQ_FM<0x33>;
+def TNE : MMRel, TEQ_FT<"tne", GPR32Opnd>, TEQ_FM<0x36>;
-/// Load-linked, Store-conditional
-let Predicates = [NotN64, HasStdEnc] in {
- def LL : LLBase<"ll", CPURegsOpnd, mem>, LW_FM<0x30>;
- def SC : SCBase<"sc", CPURegsOpnd, mem>, LW_FM<0x38>;
-}
+def TEQI : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM<0xc>;
+def TGEI : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM<0x8>;
+def TGEIU : MMRel, TEQI_FT<"tgeiu", GPR32Opnd>, TEQI_FM<0x9>;
+def TLTI : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM<0xa>;
+def TTLTIU : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM<0xb>;
+def TNEI : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM<0xe>;
-let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in {
- def LL_P8 : LLBase<"ll", CPURegsOpnd, mem64>, LW_FM<0x30>;
- def SC_P8 : SCBase<"sc", CPURegsOpnd, mem64>, LW_FM<0x38>;
-}
+def BREAK : BRK_FT<"break">, BRK_FM<0xd>;
+def SYSCALL : SYS_FT<"syscall">, SYS_FM<0xc>;
+def TRAP : TrapBase<BREAK>;
+
+def ERET : ER_FT<"eret">, ER_FM<0x18>;
+def DERET : ER_FT<"deret">, ER_FM<0x1f>;
+
+def EI : DEI_FT<"ei", GPR32Opnd>, EI_FM<1>;
+def DI : DEI_FT<"di", GPR32Opnd>, EI_FM<0>;
+
+def WAIT : WAIT_FT<"wait">;
+
+/// Load-linked, Store-conditional
+def LL : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>;
+def SC : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>;
/// Jump and Branch Instructions
-def J : JumpFJ<jmptarget, "j", br, bb>, FJ<2>,
+def J : MMRel, JumpFJ<jmptarget, "j", br, bb, "j">, FJ<2>,
Requires<[RelocStatic, HasStdEnc]>, IsBranch;
-def JR : IndirectBranch<CPURegs>, MTLO_FM<8>;
-def B : UncondBranch<"b">, B_FM;
-def BEQ : CBranch<"beq", seteq, CPURegs>, BEQ_FM<4>;
-def BNE : CBranch<"bne", setne, CPURegs>, BEQ_FM<5>;
-def BGEZ : CBranchZero<"bgez", setge, CPURegs>, BGEZ_FM<1, 1>;
-def BGTZ : CBranchZero<"bgtz", setgt, CPURegs>, BGEZ_FM<7, 0>;
-def BLEZ : CBranchZero<"blez", setle, CPURegs>, BGEZ_FM<6, 0>;
-def BLTZ : CBranchZero<"bltz", setlt, CPURegs>, BGEZ_FM<1, 0>;
-
-def BAL_BR: BAL_FT, BAL_FM;
-
-def JAL : JumpLink<"jal">, FJ<3>;
-def JALR : JumpLinkReg<"jalr", CPURegs>, JALR_FM;
-def JALRPseudo : JumpLinkRegPseudo<CPURegs, JALR, RA>;
-def BGEZAL : BGEZAL_FT<"bgezal", CPURegsOpnd>, BGEZAL_FM<0x11>;
-def BLTZAL : BGEZAL_FT<"bltzal", CPURegsOpnd>, BGEZAL_FM<0x10>;
-def TAILCALL : JumpFJ<calltarget, "j", MipsTailCall, imm>, FJ<2>, IsTailCall;
-def TAILCALL_R : JumpFR<CPURegs, MipsTailCall>, MTLO_FM<8>, IsTailCall;
-
-def RET : RetBase<CPURegs>, MTLO_FM<8>;
+def JR : MMRel, IndirectBranch<"jr", GPR32Opnd>, MTLO_FM<8>;
+def BEQ : MMRel, CBranch<"beq", brtarget, seteq, GPR32Opnd>, BEQ_FM<4>;
+def BNE : MMRel, CBranch<"bne", brtarget, setne, GPR32Opnd>, BEQ_FM<5>;
+def BGEZ : MMRel, CBranchZero<"bgez", brtarget, setge, GPR32Opnd>,
+ BGEZ_FM<1, 1>;
+def BGTZ : MMRel, CBranchZero<"bgtz", brtarget, setgt, GPR32Opnd>,
+ BGEZ_FM<7, 0>;
+def BLEZ : MMRel, CBranchZero<"blez", brtarget, setle, GPR32Opnd>,
+ BGEZ_FM<6, 0>;
+def BLTZ : MMRel, CBranchZero<"bltz", brtarget, setlt, GPR32Opnd>,
+ BGEZ_FM<1, 0>;
+def B : UncondBranch<BEQ>;
+
+def JAL : MMRel, JumpLink<"jal", calltarget>, FJ<3>;
+def JALR : MMRel, JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM;
+def JALRPseudo : JumpLinkRegPseudo<GPR32Opnd, JALR, RA>;
+def BGEZAL : MMRel, BGEZAL_FT<"bgezal", brtarget, GPR32Opnd>, BGEZAL_FM<0x11>;
+def BLTZAL : MMRel, BGEZAL_FT<"bltzal", brtarget, GPR32Opnd>, BGEZAL_FM<0x10>;
+def BAL_BR : BAL_BR_Pseudo<BGEZAL>;
+def TAILCALL : MMRel, JumpFJ<calltarget, "j", MipsTailCall, imm, "tcall">,
+ FJ<2>, IsTailCall;
+def TAILCALL_R : MMRel, JumpFR<"tcallr", GPR32Opnd, MipsTailCall>, MTLO_FM<8>,
+ IsTailCall;
+
+def RET : MMRel, RetBase<"ret", GPR32Opnd>, MTLO_FM<8>;
// Exception handling related node and instructions.
// The conversion sequence is:
@@ -973,41 +1039,38 @@ def MIPSehret : SDNode<"MipsISD::EH_RETURN", SDT_MipsEHRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
let Uses = [V0, V1], isTerminator = 1, isReturn = 1, isBarrier = 1 in {
- def MIPSeh_return32 : MipsPseudo<(outs), (ins CPURegs:$spoff, CPURegs:$dst),
- [(MIPSehret CPURegs:$spoff, CPURegs:$dst)]>;
- def MIPSeh_return64 : MipsPseudo<(outs), (ins CPU64Regs:$spoff,
- CPU64Regs:$dst),
- [(MIPSehret CPU64Regs:$spoff, CPU64Regs:$dst)]>;
+ def MIPSeh_return32 : MipsPseudo<(outs), (ins GPR32:$spoff, GPR32:$dst),
+ [(MIPSehret GPR32:$spoff, GPR32:$dst)]>;
+ def MIPSeh_return64 : MipsPseudo<(outs), (ins GPR64:$spoff,
+ GPR64:$dst),
+ [(MIPSehret GPR64:$spoff, GPR64:$dst)]>;
}
/// Multiply and Divide Instructions.
-def MULT : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>,
+def MULT : MMRel, Mult<"mult", IIImult, GPR32Opnd, [HI0, LO0]>,
MULT_FM<0, 0x18>;
-def MULTu : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>,
+def MULTu : MMRel, Mult<"multu", IIImult, GPR32Opnd, [HI0, LO0]>,
MULT_FM<0, 0x19>;
-def PseudoMULT : MultDivPseudo<MULT, ACRegs, CPURegsOpnd, MipsMult, IIImul>;
-def PseudoMULTu : MultDivPseudo<MULTu, ACRegs, CPURegsOpnd, MipsMultu, IIImul>;
-def SDIV : Div<"div", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1a>;
-def UDIV : Div<"divu", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1b>;
-def PseudoSDIV : MultDivPseudo<SDIV, ACRegs, CPURegsOpnd, MipsDivRem, IIIdiv, 0>;
-def PseudoUDIV : MultDivPseudo<UDIV, ACRegs, CPURegsOpnd, MipsDivRemU, IIIdiv,
- 0>;
-
-def MTHI : MoveToLOHI<"mthi", CPURegs, [HI]>, MTLO_FM<0x11>;
-def MTLO : MoveToLOHI<"mtlo", CPURegs, [LO]>, MTLO_FM<0x13>;
-def MFHI : MoveFromLOHI<"mfhi", CPURegs, [HI]>, MFLO_FM<0x10>;
-def MFLO : MoveFromLOHI<"mflo", CPURegs, [LO]>, MFLO_FM<0x12>;
+def SDIV : MMRel, Div<"div", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1a>;
+def UDIV : MMRel, Div<"divu", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1b>;
+
+def MTHI : MMRel, MoveToLOHI<"mthi", GPR32Opnd, [HI0]>, MTLO_FM<0x11>;
+def MTLO : MMRel, MoveToLOHI<"mtlo", GPR32Opnd, [LO0]>, MTLO_FM<0x13>;
+def MFHI : MMRel, MoveFromLOHI<"mfhi", GPR32Opnd, AC0>, MFLO_FM<0x10>;
+def MFLO : MMRel, MoveFromLOHI<"mflo", GPR32Opnd, AC0>, MFLO_FM<0x12>;
/// Sign Ext In Register Instructions.
-def SEB : SignExtInReg<"seb", i8, CPURegs>, SEB_FM<0x10, 0x20>;
-def SEH : SignExtInReg<"seh", i16, CPURegs>, SEB_FM<0x18, 0x20>;
+def SEB : MMRel, SignExtInReg<"seb", i8, GPR32Opnd>, SEB_FM<0x10, 0x20>;
+def SEH : MMRel, SignExtInReg<"seh", i16, GPR32Opnd>, SEB_FM<0x18, 0x20>;
/// Count Leading
-def CLZ : CountLeading0<"clz", CPURegsOpnd>, CLO_FM<0x20>;
-def CLO : CountLeading1<"clo", CPURegsOpnd>, CLO_FM<0x21>;
+def CLZ : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM<0x20>;
+def CLO : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM<0x21>;
/// Word Swap Bytes Within Halfwords
-def WSBH : SubwordSwap<"wsbh", CPURegsOpnd>, SEB_FM<2, 0x20>;
+def WSBH : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM<2, 0x20>;
/// No operation.
def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>;
@@ -1016,85 +1079,98 @@ def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>;
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
-def LEA_ADDiu : EffectiveAddress<"addiu", CPURegs, mem_ea>, LW_FM<9>;
+def LEA_ADDiu : EffectiveAddress<"addiu", GPR32Opnd>, LW_FM<9>;
// MADD*/MSUB*
-def MADD : MArithR<"madd", 1>, MULT_FM<0x1c, 0>;
-def MADDU : MArithR<"maddu", 1>, MULT_FM<0x1c, 1>;
-def MSUB : MArithR<"msub">, MULT_FM<0x1c, 4>;
-def MSUBU : MArithR<"msubu">, MULT_FM<0x1c, 5>;
+def MADD : MMRel, MArithR<"madd", 1>, MULT_FM<0x1c, 0>;
+def MADDU : MMRel, MArithR<"maddu", 1>, MULT_FM<0x1c, 1>;
+def MSUB : MMRel, MArithR<"msub">, MULT_FM<0x1c, 4>;
+def MSUBU : MMRel, MArithR<"msubu">, MULT_FM<0x1c, 5>;
+
+let Predicates = [HasStdEnc, NotDSP] in {
+def PseudoMULT : MultDivPseudo<MULT, ACC64, GPR32Opnd, MipsMult, IIImult>;
+def PseudoMULTu : MultDivPseudo<MULTu, ACC64, GPR32Opnd, MipsMultu, IIImult>;
+def PseudoMFHI : PseudoMFLOHI<GPR32, ACC64, MipsMFHI>;
+def PseudoMFLO : PseudoMFLOHI<GPR32, ACC64, MipsMFLO>;
+def PseudoMTLOHI : PseudoMTLOHI<ACC64, GPR32>;
def PseudoMADD : MAddSubPseudo<MADD, MipsMAdd>;
def PseudoMADDU : MAddSubPseudo<MADDU, MipsMAddu>;
def PseudoMSUB : MAddSubPseudo<MSUB, MipsMSub>;
def PseudoMSUBU : MAddSubPseudo<MSUBU, MipsMSubu>;
+}
-def RDHWR : ReadHardware<CPURegs, HWRegsOpnd>, RDHWR_FM;
-
-def EXT : ExtBase<"ext", CPURegsOpnd>, EXT_FM<0>;
-def INS : InsBase<"ins", CPURegsOpnd>, EXT_FM<4>;
-
-/// Move Control Registers From/To CPU Registers
-def MFC0_3OP : MFC3OP<(outs CPURegsOpnd:$rt),
- (ins CPURegsOpnd:$rd, uimm16:$sel),
- "mfc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 0>;
+def PseudoSDIV : MultDivPseudo<SDIV, ACC64, GPR32Opnd, MipsDivRem, IIIdiv,
+ 0, 1, 1>;
+def PseudoUDIV : MultDivPseudo<UDIV, ACC64, GPR32Opnd, MipsDivRemU, IIIdiv,
+ 0, 1, 1>;
-def MTC0_3OP : MFC3OP<(outs CPURegsOpnd:$rd, uimm16:$sel),
- (ins CPURegsOpnd:$rt),
- "mtc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 4>;
+def RDHWR : ReadHardware<GPR32Opnd, HWRegsOpnd>, RDHWR_FM;
-def MFC2_3OP : MFC3OP<(outs CPURegsOpnd:$rt),
- (ins CPURegsOpnd:$rd, uimm16:$sel),
- "mfc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 0>;
+def EXT : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, MipsExt>, EXT_FM<0>;
+def INS : MMRel, InsBase<"ins", GPR32Opnd, uimm5, MipsIns>, EXT_FM<4>;
-def MTC2_3OP : MFC3OP<(outs CPURegsOpnd:$rd, uimm16:$sel),
- (ins CPURegsOpnd:$rt),
- "mtc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 4>;
+/// Move Control Registers From/To CPU Registers
+def MFC0 : MFC3OP<"mfc0", GPR32Opnd>, MFC3OP_FM<0x10, 0>;
+def MTC0 : MFC3OP<"mtc0", GPR32Opnd>, MFC3OP_FM<0x10, 4>;
+def MFC2 : MFC3OP<"mfc2", GPR32Opnd>, MFC3OP_FM<0x12, 0>;
+def MTC2 : MFC3OP<"mtc2", GPR32Opnd>, MFC3OP_FM<0x12, 4>;
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
def : InstAlias<"move $dst, $src",
- (ADDu CPURegsOpnd:$dst, CPURegsOpnd:$src,ZERO), 1>,
- Requires<[NotMips64]>;
-def : InstAlias<"move $dst, $src",
- (OR CPURegsOpnd:$dst, CPURegsOpnd:$src,ZERO), 1>,
+ (ADDu GPR32Opnd:$dst, GPR32Opnd:$src,ZERO), 1>,
Requires<[NotMips64]>;
-def : InstAlias<"bal $offset", (BGEZAL RA, brtarget:$offset), 1>;
+def : InstAlias<"bal $offset", (BGEZAL ZERO, brtarget:$offset), 0>;
def : InstAlias<"addu $rs, $rt, $imm",
- (ADDiu CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>;
+ (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
def : InstAlias<"add $rs, $rt, $imm",
- (ADDi CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>;
+ (ADDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
def : InstAlias<"and $rs, $rt, $imm",
- (ANDi CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>;
-def : InstAlias<"j $rs", (JR CPURegs:$rs), 0>,
- Requires<[NotMips64]>;
-def : InstAlias<"jalr $rs", (JALR RA, CPURegs:$rs)>, Requires<[NotMips64]>;
-def : InstAlias<"jal $rs", (JALR RA, CPURegs:$rs), 0>, Requires<[NotMips64]>;
-def : InstAlias<"jal $rd,$rs", (JALR CPURegs:$rd, CPURegs:$rs), 0>,
- Requires<[NotMips64]>;
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
+def : InstAlias<"j $rs", (JR GPR32Opnd:$rs), 0>;
+def : InstAlias<"jalr $rs", (JALR RA, GPR32Opnd:$rs), 0>;
+def : InstAlias<"jal $rs", (JALR RA, GPR32Opnd:$rs), 0>;
+def : InstAlias<"jal $rd,$rs", (JALR GPR32Opnd:$rd, GPR32Opnd:$rs), 0>;
def : InstAlias<"not $rt, $rs",
- (NOR CPURegsOpnd:$rt, CPURegsOpnd:$rs, ZERO), 1>;
+ (NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
def : InstAlias<"neg $rt, $rs",
- (SUB CPURegsOpnd:$rt, ZERO, CPURegsOpnd:$rs), 1>;
+ (SUB GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
def : InstAlias<"negu $rt, $rs",
- (SUBu CPURegsOpnd:$rt, ZERO, CPURegsOpnd:$rs), 1>;
+ (SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
def : InstAlias<"slt $rs, $rt, $imm",
- (SLTi CPURegsOpnd:$rs, CPURegs:$rt, simm16:$imm), 0>;
+ (SLTi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
def : InstAlias<"xor $rs, $rt, $imm",
- (XORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>,
- Requires<[NotMips64]>;
+ (XORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
def : InstAlias<"or $rs, $rt, $imm",
- (ORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>,
- Requires<[NotMips64]>;
+ (ORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
def : InstAlias<"nop", (SLL ZERO, ZERO, 0), 1>;
-def : InstAlias<"mfc0 $rt, $rd",
- (MFC0_3OP CPURegsOpnd:$rt, CPURegsOpnd:$rd, 0), 0>;
-def : InstAlias<"mtc0 $rt, $rd",
- (MTC0_3OP CPURegsOpnd:$rd, 0, CPURegsOpnd:$rt), 0>;
-def : InstAlias<"mfc2 $rt, $rd",
- (MFC2_3OP CPURegsOpnd:$rt, CPURegsOpnd:$rd, 0), 0>;
-def : InstAlias<"mtc2 $rt, $rd",
- (MTC2_3OP CPURegsOpnd:$rd, 0, CPURegsOpnd:$rt), 0>;
+def : InstAlias<"mfc0 $rt, $rd", (MFC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"mtc0 $rt, $rd", (MTC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"mfc2 $rt, $rd", (MFC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"mtc2 $rt, $rd", (MTC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"b $offset", (BEQ ZERO, ZERO, brtarget:$offset), 0>;
+def : InstAlias<"bnez $rs,$offset",
+ (BNE GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
+def : InstAlias<"beqz $rs,$offset",
+ (BEQ GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
+def : InstAlias<"syscall", (SYSCALL 0), 1>;
+
+def : InstAlias<"break $imm", (BREAK uimm10:$imm, 0), 1>;
+def : InstAlias<"break", (BREAK 0, 0), 1>;
+def : InstAlias<"ei", (EI ZERO), 1>;
+def : InstAlias<"di", (DI ZERO), 1>;
+
+def : InstAlias<"teq $rs, $rt", (TEQ GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tge $rs, $rt", (TGE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tgeu $rs, $rt", (TGEU GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tlt $rs, $rt", (TLT GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tltu $rs, $rt", (TLTU GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tne $rs, $rt", (TNE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"sub, $rd, $rs, $imm",
+ (ADDi GPR32Opnd:$rd, GPR32Opnd:$rs, InvertedImOperand:$imm)>;
+def : InstAlias<"subu, $rd, $rs, $imm",
+ (ADDiu GPR32Opnd:$rd, GPR32Opnd:$rs, InvertedImOperand:$imm)>;
//===----------------------------------------------------------------------===//
// Assembler Pseudo Instructions
@@ -1103,19 +1179,17 @@ def : InstAlias<"mtc2 $rt, $rd",
class LoadImm32< string instr_asm, Operand Od, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32),
!strconcat(instr_asm, "\t$rt, $imm32")> ;
-def LoadImm32Reg : LoadImm32<"li", shamt,CPURegsOpnd>;
+def LoadImm32Reg : LoadImm32<"li", uimm5, GPR32Opnd>;
class LoadAddress<string instr_asm, Operand MemOpnd, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins MemOpnd:$addr),
!strconcat(instr_asm, "\t$rt, $addr")> ;
-def LoadAddr32Reg : LoadAddress<"la", mem, CPURegsOpnd>;
+def LoadAddr32Reg : LoadAddress<"la", mem, GPR32Opnd>;
class LoadAddressImm<string instr_asm, Operand Od, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32),
!strconcat(instr_asm, "\t$rt, $imm32")> ;
-def LoadAddr32Imm : LoadAddressImm<"la", shamt,CPURegsOpnd>;
-
-
+def LoadAddr32Imm : LoadAddressImm<"la", uimm5, GPR32Opnd>;
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
@@ -1141,13 +1215,13 @@ def : MipsPat<(i32 imm:$imm),
(ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>;
// Carry MipsPatterns
-def : MipsPat<(subc CPURegs:$lhs, CPURegs:$rhs),
- (SUBu CPURegs:$lhs, CPURegs:$rhs)>;
+def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs),
+ (SUBu GPR32:$lhs, GPR32:$rhs)>;
let Predicates = [HasStdEnc, NotDSP] in {
- def : MipsPat<(addc CPURegs:$lhs, CPURegs:$rhs),
- (ADDu CPURegs:$lhs, CPURegs:$rhs)>;
- def : MipsPat<(addc CPURegs:$src, immSExt16:$imm),
- (ADDiu CPURegs:$src, imm:$imm)>;
+ def : MipsPat<(addc GPR32:$lhs, GPR32:$rhs),
+ (ADDu GPR32:$lhs, GPR32:$rhs)>;
+ def : MipsPat<(addc GPR32:$src, immSExt16:$imm),
+ (ADDiu GPR32:$src, imm:$imm)>;
}
// Call
@@ -1155,8 +1229,8 @@ def : MipsPat<(MipsJmpLink (i32 tglobaladdr:$dst)),
(JAL tglobaladdr:$dst)>;
def : MipsPat<(MipsJmpLink (i32 texternalsym:$dst)),
(JAL texternalsym:$dst)>;
-//def : MipsPat<(MipsJmpLink CPURegs:$dst),
-// (JALR CPURegs:$dst)>;
+//def : MipsPat<(MipsJmpLink GPR32:$dst),
+// (JALR GPR32:$dst)>;
// Tail call
def : MipsPat<(MipsTailCall (iPTR tglobaladdr:$dst)),
@@ -1178,58 +1252,49 @@ def : MipsPat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
def : MipsPat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
def : MipsPat<(MipsLo texternalsym:$in), (ADDiu ZERO, texternalsym:$in)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)),
- (ADDiu CPURegs:$hi, tglobaladdr:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)),
- (ADDiu CPURegs:$hi, tblockaddress:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
- (ADDiu CPURegs:$hi, tjumptable:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
- (ADDiu CPURegs:$hi, tconstpool:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tglobaladdr:$lo)),
+ (ADDiu GPR32:$hi, tglobaladdr:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tblockaddress:$lo)),
+ (ADDiu GPR32:$hi, tblockaddress:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tjumptable:$lo)),
+ (ADDiu GPR32:$hi, tjumptable:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tconstpool:$lo)),
+ (ADDiu GPR32:$hi, tconstpool:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (ADDiu GPR32:$hi, tglobaltlsaddr:$lo)>;
// gp_rel relocs
-def : MipsPat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
- (ADDiu CPURegs:$gp, tglobaladdr:$in)>;
-def : MipsPat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
- (ADDiu CPURegs:$gp, tconstpool:$in)>;
+def : MipsPat<(add GPR32:$gp, (MipsGPRel tglobaladdr:$in)),
+ (ADDiu GPR32:$gp, tglobaladdr:$in)>;
+def : MipsPat<(add GPR32:$gp, (MipsGPRel tconstpool:$in)),
+ (ADDiu GPR32:$gp, tconstpool:$in)>;
// wrapper_pic
class WrapperPat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
MipsPat<(MipsWrapper RC:$gp, node:$in),
(ADDiuOp RC:$gp, node:$in)>;
-def : WrapperPat<tglobaladdr, ADDiu, CPURegs>;
-def : WrapperPat<tconstpool, ADDiu, CPURegs>;
-def : WrapperPat<texternalsym, ADDiu, CPURegs>;
-def : WrapperPat<tblockaddress, ADDiu, CPURegs>;
-def : WrapperPat<tjumptable, ADDiu, CPURegs>;
-def : WrapperPat<tglobaltlsaddr, ADDiu, CPURegs>;
+def : WrapperPat<tglobaladdr, ADDiu, GPR32>;
+def : WrapperPat<tconstpool, ADDiu, GPR32>;
+def : WrapperPat<texternalsym, ADDiu, GPR32>;
+def : WrapperPat<tblockaddress, ADDiu, GPR32>;
+def : WrapperPat<tjumptable, ADDiu, GPR32>;
+def : WrapperPat<tglobaltlsaddr, ADDiu, GPR32>;
// Mips does not have "not", so we expand our way
-def : MipsPat<(not CPURegs:$in),
- (NOR CPURegsOpnd:$in, ZERO)>;
+def : MipsPat<(not GPR32:$in),
+ (NOR GPR32Opnd:$in, ZERO)>;
// extended loads
-let Predicates = [NotN64, HasStdEnc] in {
+let Predicates = [HasStdEnc] in {
def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
}
-let Predicates = [IsN64, HasStdEnc] in {
- def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
- def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
- def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu_P8 addr:$src)>;
-}
// peepholes
-let Predicates = [NotN64, HasStdEnc] in {
- def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
-}
-let Predicates = [IsN64, HasStdEnc] in {
- def : MipsPat<(store (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
-}
+let Predicates = [HasStdEnc] in
+def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
// brcond patterns
multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp,
@@ -1248,6 +1313,10 @@ def : MipsPat<(brcond (i32 (setge RC:$lhs, immSExt16:$rhs)), bb:$dst),
(BEQ (SLTiOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
def : MipsPat<(brcond (i32 (setuge RC:$lhs, immSExt16:$rhs)), bb:$dst),
(BEQ (SLTiuOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setgt RC:$lhs, immSExt16Plus1:$rhs)), bb:$dst),
+ (BEQ (SLTiOp RC:$lhs, (Plus1 imm:$rhs)), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setugt RC:$lhs, immSExt16Plus1:$rhs)), bb:$dst),
+ (BEQ (SLTiuOp RC:$lhs, (Plus1 imm:$rhs)), ZERO, bb:$dst)>;
def : MipsPat<(brcond (i32 (setle RC:$lhs, RC:$rhs)), bb:$dst),
(BEQ (SLTOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
@@ -1258,11 +1327,20 @@ def : MipsPat<(brcond RC:$cond, bb:$dst),
(BNEOp RC:$cond, ZEROReg, bb:$dst)>;
}
-defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
+defm : BrcondPats<GPR32, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
+
+def : MipsPat<(brcond (i32 (setlt i32:$lhs, 1)), bb:$dst),
+ (BLEZ i32:$lhs, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setgt i32:$lhs, -1)), bb:$dst),
+ (BGEZ i32:$lhs, bb:$dst)>;
// setcc patterns
multiclass SeteqPats<RegisterClass RC, Instruction SLTiuOp, Instruction XOROp,
Instruction SLTuOp, Register ZEROReg> {
+ def : MipsPat<(seteq RC:$lhs, 0),
+ (SLTiuOp RC:$lhs, 1)>;
+ def : MipsPat<(setne RC:$lhs, 0),
+ (SLTuOp ZEROReg, RC:$lhs)>;
def : MipsPat<(seteq RC:$lhs, RC:$rhs),
(SLTiuOp (XOROp RC:$lhs, RC:$rhs), 1)>;
def : MipsPat<(setne RC:$lhs, RC:$rhs),
@@ -1298,31 +1376,22 @@ multiclass SetgeImmPats<RegisterClass RC, Instruction SLTiOp,
(XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>;
}
-defm : SeteqPats<CPURegs, SLTiu, XOR, SLTu, ZERO>;
-defm : SetlePats<CPURegs, SLT, SLTu>;
-defm : SetgtPats<CPURegs, SLT, SLTu>;
-defm : SetgePats<CPURegs, SLT, SLTu>;
-defm : SetgeImmPats<CPURegs, SLTi, SLTiu>;
+defm : SeteqPats<GPR32, SLTiu, XOR, SLTu, ZERO>;
+defm : SetlePats<GPR32, SLT, SLTu>;
+defm : SetgtPats<GPR32, SLT, SLTu>;
+defm : SetgePats<GPR32, SLT, SLTu>;
+defm : SetgeImmPats<GPR32, SLTi, SLTiu>;
// bswap pattern
-def : MipsPat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
-
-// mflo/hi patterns.
-def : MipsPat<(i32 (ExtractLOHI ACRegs:$ac, imm:$lohi_idx)),
- (EXTRACT_SUBREG ACRegs:$ac, imm:$lohi_idx)>;
+def : MipsPat<(bswap GPR32:$rt), (ROTR (WSBH GPR32:$rt), 16)>;
// Load halfword/word patterns.
let AddedComplexity = 40 in {
- let Predicates = [NotN64, HasStdEnc] in {
+ let Predicates = [HasStdEnc] in {
def : LoadRegImmPat<LBu, i32, zextloadi8>;
def : LoadRegImmPat<LH, i32, sextloadi16>;
def : LoadRegImmPat<LW, i32, load>;
}
- let Predicates = [IsN64, HasStdEnc] in {
- def : LoadRegImmPat<LBu_P8, i32, zextloadi8>;
- def : LoadRegImmPat<LH_P8, i32, sextloadi16>;
- def : LoadRegImmPat<LW_P8, i32, load>;
- }
}
//===----------------------------------------------------------------------===//
@@ -1343,6 +1412,10 @@ include "Mips16InstrInfo.td"
include "MipsDSPInstrFormats.td"
include "MipsDSPInstrInfo.td"
+// MSA
+include "MipsMSAInstrFormats.td"
+include "MipsMSAInstrInfo.td"
+
// Micromips
include "MicroMipsInstrFormats.td"
include "MicroMipsInstrInfo.td"
diff --git a/lib/Target/Mips/MipsJITInfo.cpp b/lib/Target/Mips/MipsJITInfo.cpp
index 1b2a325d3ce6..d76cb1da2ddc 100644
--- a/lib/Target/Mips/MipsJITInfo.cpp
+++ b/lib/Target/Mips/MipsJITInfo.cpp
@@ -218,9 +218,9 @@ void *MipsJITInfo::emitFunctionStub(const Function *F, void *Fn,
Hi++;
int Lo = (int)(EmittedAddr & 0xffff);
- // lui t9, %hi(EmittedAddr)
- // addiu t9, t9, %lo(EmittedAddr)
- // jalr t8, t9
+ // lui $t9, %hi(EmittedAddr)
+ // addiu $t9, $t9, %lo(EmittedAddr)
+ // jalr $t8, $t9
// nop
if (IsLittleEndian) {
JCE.emitWordLE(0xf << 26 | 25 << 16 | Hi);
diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp
index bf5ad3703119..2efe57847add 100644
--- a/lib/Target/Mips/MipsLongBranch.cpp
+++ b/lib/Target/Mips/MipsLongBranch.cpp
@@ -65,7 +65,6 @@ namespace {
static char ID;
MipsLongBranch(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm),
- TII(static_cast<const MipsInstrInfo*>(tm.getInstrInfo())),
IsPIC(TM.getRelocationModel() == Reloc::PIC_),
ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
LongBranchSeqSize(!IsPIC ? 2 : (ABI == MipsSubtarget::N64 ? 13 : 9)) {}
@@ -85,7 +84,6 @@ namespace {
void expandToLongBranch(MBBInfo &Info);
const TargetMachine &TM;
- const MipsInstrInfo *TII;
MachineFunction *MF;
SmallVector<MBBInfo, 16> MBBInfos;
bool IsPIC;
@@ -172,6 +170,8 @@ void MipsLongBranch::initMBBInfo() {
MBBInfos.clear();
MBBInfos.resize(MF->size());
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
for (unsigned I = 0, E = MBBInfos.size(); I < E; ++I) {
MachineBasicBlock *MBB = MF->getBlockNumbered(I);
@@ -217,7 +217,9 @@ int64_t MipsLongBranch::computeOffset(const MachineInstr *Br) {
// MachineBasicBlock operand MBBOpnd.
void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
DebugLoc DL, MachineBasicBlock *MBBOpnd) {
- unsigned NewOpc = TII->GetOppositeBranchOpc(Br->getOpcode());
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
+ unsigned NewOpc = TII->getOppositeBranchOpc(Br->getOpcode());
const MCInstrDesc &NewDesc = TII->get(NewOpc);
MachineInstrBuilder MIB = BuildMI(MBB, Br, DL, NewDesc);
@@ -235,6 +237,11 @@ void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
MIB.addMBB(MBBOpnd);
+ // Bundle the instruction in the delay slot to the newly created branch
+ // and erase the original branch.
+ assert(Br->isBundledWithSucc());
+ MachineBasicBlock::instr_iterator II(Br);
+ MIBundleBuilder(&*MIB).append((++II)->removeFromBundle());
Br->eraseFromParent();
}
@@ -247,6 +254,9 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
MachineFunction::iterator FallThroughMBB = ++MachineFunction::iterator(MBB);
MachineBasicBlock *LongBrMBB = MF->CreateMachineBasicBlock(BB);
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
+
MF->insert(FallThroughMBB, LongBrMBB);
MBB->removeSuccessor(TgtMBB);
MBB->addSuccessor(LongBrMBB);
@@ -399,6 +409,9 @@ static void emitGPDisp(MachineFunction &F, const MipsInstrInfo *TII) {
}
bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
+
if (TM.getSubtarget<MipsSubtarget>().inMips16Mode())
return false;
if ((TM.getRelocationModel() == Reloc::PIC_) &&
@@ -412,7 +425,7 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
MF = &F;
initMBBInfo();
- SmallVector<MBBInfo, 16>::iterator I, E = MBBInfos.end();
+ SmallVectorImpl<MBBInfo>::iterator I, E = MBBInfos.end();
bool EverMadeChange = false, MadeChange = true;
while (MadeChange) {
@@ -424,8 +437,10 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
if (!I->Br || I->HasLongBranch)
continue;
+ int ShVal = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode() ? 2 : 4;
+
// Check if offset fits into 16-bit immediate field of branches.
- if (!ForceLongBranch && isInt<16>(computeOffset(I->Br) / 4))
+ if (!ForceLongBranch && isInt<16>(computeOffset(I->Br) / ShVal))
continue;
I->HasLongBranch = true;
diff --git a/lib/Target/Mips/MipsMCInstLower.cpp b/lib/Target/Mips/MipsMCInstLower.cpp
index d836975eb7d2..b6dfadce14e9 100644
--- a/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/lib/Target/Mips/MipsMCInstLower.cpp
@@ -28,8 +28,7 @@ using namespace llvm;
MipsMCInstLower::MipsMCInstLower(MipsAsmPrinter &asmprinter)
: AsmPrinter(asmprinter) {}
-void MipsMCInstLower::Initialize(Mangler *M, MCContext *C) {
- Mang = M;
+void MipsMCInstLower::Initialize(MCContext *C) {
Ctx = C;
}
@@ -74,7 +73,7 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
break;
case MachineOperand::MO_GlobalAddress:
- Symbol = Mang->getSymbol(MO.getGlobal());
+ Symbol = AsmPrinter.getSymbol(MO.getGlobal());
Offset += MO.getOffset();
break;
diff --git a/lib/Target/Mips/MipsMCInstLower.h b/lib/Target/Mips/MipsMCInstLower.h
index c4a6016105b2..4570bd90997f 100644
--- a/lib/Target/Mips/MipsMCInstLower.h
+++ b/lib/Target/Mips/MipsMCInstLower.h
@@ -19,7 +19,6 @@ namespace llvm {
class MCOperand;
class MachineInstr;
class MachineFunction;
- class Mangler;
class MipsAsmPrinter;
/// MipsMCInstLower - This class is used to lower an MachineInstr into an
@@ -27,11 +26,10 @@ namespace llvm {
class LLVM_LIBRARY_VISIBILITY MipsMCInstLower {
typedef MachineOperand::MachineOperandType MachineOperandType;
MCContext *Ctx;
- Mangler *Mang;
MipsAsmPrinter &AsmPrinter;
public:
MipsMCInstLower(MipsAsmPrinter &asmprinter);
- void Initialize(Mangler *mang, MCContext *C);
+ void Initialize(MCContext *C);
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const;
diff --git a/lib/Target/Mips/MipsMSAInstrFormats.td b/lib/Target/Mips/MipsMSAInstrFormats.td
new file mode 100644
index 000000000000..875dc0b4034d
--- /dev/null
+++ b/lib/Target/Mips/MipsMSAInstrFormats.td
@@ -0,0 +1,406 @@
+//===- MipsMSAInstrFormats.td - Mips Instruction Formats ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def HasMSA : Predicate<"Subtarget.hasMSA()">,
+ AssemblerPredicate<"FeatureMSA">;
+
+class MSAInst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther> {
+ let Predicates = [HasMSA];
+ let Inst{31-26} = 0b011110;
+}
+
+class MSACBranch : MSAInst {
+ let Inst{31-26} = 0b010001;
+}
+
+class MSASpecial : MSAInst {
+ let Inst{31-26} = 0b000000;
+}
+
+class PseudoMSA<dag outs, dag ins, list<dag> pattern,
+ InstrItinClass itin = IIPseudo>:
+ MipsPseudo<outs, ins, pattern, itin> {
+ let Predicates = [HasMSA];
+}
+
+class MSA_BIT_B_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<3> m;
+
+ let Inst{25-23} = major;
+ let Inst{22-19} = 0b1110;
+ let Inst{18-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_BIT_H_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<4> m;
+
+ let Inst{25-23} = major;
+ let Inst{22-20} = 0b110;
+ let Inst{19-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_BIT_W_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<5> m;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = 0b10;
+ let Inst{20-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_BIT_D_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<6> m;
+
+ let Inst{25-23} = major;
+ let Inst{22} = 0b0;
+ let Inst{21-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_2R_FILL_FMT<bits<8> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-18} = major;
+ let Inst{17-16} = df;
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_2R_FMT<bits<8> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-18} = major;
+ let Inst{17-16} = df;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_2RF_FMT<bits<9> major, bits<1> df, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-17} = major;
+ let Inst{16} = df;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_3R_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> wt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = wt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_3RF_FMT<bits<4> major, bits<1> df, bits<6> minor>: MSAInst {
+ bits<5> wt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21} = df;
+ let Inst{20-16} = wt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_3R_INDEX_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> rt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_FMT<bits<10> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-16} = major;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_CFCMSA_FMT<bits<10> major, bits<6> minor>: MSAInst {
+ bits<5> rd;
+ bits<5> cs;
+
+ let Inst{25-16} = major;
+ let Inst{15-11} = cs;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_CTCMSA_FMT<bits<10> major, bits<6> minor>: MSAInst {
+ bits<5> rs;
+ bits<5> cd;
+
+ let Inst{25-16} = major;
+ let Inst{15-11} = rs;
+ let Inst{10-6} = cd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_B_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-20} = 0b00;
+ let Inst{19-16} = n{3-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_H_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-19} = 0b100;
+ let Inst{18-16} = n{2-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-18} = 0b1100;
+ let Inst{17-16} = n{1-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_D_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-17} = 0b11100;
+ let Inst{16} = n{0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_COPY_B_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> rd;
+
+ let Inst{25-22} = major;
+ let Inst{21-20} = 0b00;
+ let Inst{19-16} = n{3-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_COPY_H_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> rd;
+
+ let Inst{25-22} = major;
+ let Inst{21-19} = 0b100;
+ let Inst{18-16} = n{2-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_COPY_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> rd;
+
+ let Inst{25-22} = major;
+ let Inst{21-18} = 0b1100;
+ let Inst{17-16} = n{1-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_INSERT_B_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<6> n;
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-20} = 0b00;
+ let Inst{19-16} = n{3-0};
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_INSERT_H_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<6> n;
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-19} = 0b100;
+ let Inst{18-16} = n{2-0};
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_INSERT_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<6> n;
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-18} = 0b1100;
+ let Inst{17-16} = n{1-0};
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_I5_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> imm;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = imm;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_I8_FMT<bits<2> major, bits<6> minor>: MSAInst {
+ bits<8> u8;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-24} = major;
+ let Inst{23-16} = u8;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_I10_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<10> s10;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-11} = s10;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_MI10_FMT<bits<2> df, bits<4> minor>: MSAInst {
+ bits<21> addr;
+ bits<5> wd;
+
+ let Inst{25-16} = addr{9-0};
+ let Inst{15-11} = addr{20-16};
+ let Inst{10-6} = wd;
+ let Inst{5-2} = minor;
+ let Inst{1-0} = df;
+}
+
+class MSA_VEC_FMT<bits<5> major, bits<6> minor>: MSAInst {
+ bits<5> wt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-21} = major;
+ let Inst{20-16} = wt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_CBRANCH_FMT<bits<3> major, bits<2> df>: MSACBranch {
+ bits<16> offset;
+ bits<5> wt;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = wt;
+ let Inst{15-0} = offset;
+}
+
+class MSA_CBRANCH_V_FMT<bits<5> major>: MSACBranch {
+ bits<16> offset;
+ bits<5> wt;
+
+ let Inst{25-21} = major;
+ let Inst{20-16} = wt;
+ let Inst{15-0} = offset;
+}
+
+class SPECIAL_LSA_FMT<bits<6> minor>: MSASpecial {
+ bits<5> rs;
+ bits<5> rt;
+ bits<5> rd;
+ bits<2> sa;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-8} = 0b000;
+ let Inst{7-6} = sa;
+ let Inst{5-0} = minor;
+}
diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td
new file mode 100644
index 000000000000..82c51a6473da
--- /dev/null
+++ b/lib/Target/Mips/MipsMSAInstrInfo.td
@@ -0,0 +1,3694 @@
+//===- MipsMSAInstrInfo.td - MSA ASE instructions -*- tablegen ------------*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Mips MSA ASE instructions.
+//
+//===----------------------------------------------------------------------===//
+
+def SDT_MipsVecCond : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>]>;
+def SDT_VSetCC : SDTypeProfile<1, 3, [SDTCisInt<0>,
+ SDTCisInt<1>,
+ SDTCisSameAs<1, 2>,
+ SDTCisVT<3, OtherVT>]>;
+def SDT_VFSetCC : SDTypeProfile<1, 3, [SDTCisInt<0>,
+ SDTCisFP<1>,
+ SDTCisSameAs<1, 2>,
+ SDTCisVT<3, OtherVT>]>;
+def SDT_VSHF : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>,
+ SDTCisInt<1>, SDTCisVec<1>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>]>;
+def SDT_SHF : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<0>,
+ SDTCisVT<1, i32>, SDTCisSameAs<0, 2>]>;
+def SDT_ILV : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<0>,
+ SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>]>;
+
+def MipsVAllNonZero : SDNode<"MipsISD::VALL_NONZERO", SDT_MipsVecCond>;
+def MipsVAnyNonZero : SDNode<"MipsISD::VANY_NONZERO", SDT_MipsVecCond>;
+def MipsVAllZero : SDNode<"MipsISD::VALL_ZERO", SDT_MipsVecCond>;
+def MipsVAnyZero : SDNode<"MipsISD::VANY_ZERO", SDT_MipsVecCond>;
+def MipsVSMax : SDNode<"MipsISD::VSMAX", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVSMin : SDNode<"MipsISD::VSMIN", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVUMax : SDNode<"MipsISD::VUMAX", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVUMin : SDNode<"MipsISD::VUMIN", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVNOR : SDNode<"MipsISD::VNOR", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVSHF : SDNode<"MipsISD::VSHF", SDT_VSHF>;
+def MipsSHF : SDNode<"MipsISD::SHF", SDT_SHF>;
+def MipsILVEV : SDNode<"MipsISD::ILVEV", SDT_ILV>;
+def MipsILVOD : SDNode<"MipsISD::ILVOD", SDT_ILV>;
+def MipsILVL : SDNode<"MipsISD::ILVL", SDT_ILV>;
+def MipsILVR : SDNode<"MipsISD::ILVR", SDT_ILV>;
+def MipsPCKEV : SDNode<"MipsISD::PCKEV", SDT_ILV>;
+def MipsPCKOD : SDNode<"MipsISD::PCKOD", SDT_ILV>;
+
+def vsetcc : SDNode<"ISD::SETCC", SDT_VSetCC>;
+def vfsetcc : SDNode<"ISD::SETCC", SDT_VFSetCC>;
+
+def MipsVExtractSExt : SDNode<"MipsISD::VEXTRACT_SEXT_ELT",
+ SDTypeProfile<1, 3, [SDTCisPtrTy<2>]>, []>;
+def MipsVExtractZExt : SDNode<"MipsISD::VEXTRACT_ZEXT_ELT",
+ SDTypeProfile<1, 3, [SDTCisPtrTy<2>]>, []>;
+
+// Operands
+
+def uimm2 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
+// The immediate of an LSA instruction needs special handling
+// as the encoded value should be subtracted by one.
+def uimm2LSAAsmOperand : AsmOperandClass {
+ let Name = "LSAImm";
+ let ParserMethod = "parseLSAImm";
+ let RenderMethod = "addImmOperands";
+}
+
+def LSAImm : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+ let EncoderMethod = "getLSAImmEncoding";
+ let DecoderMethod = "DecodeLSAImm";
+ let ParserMatchClass = uimm2LSAAsmOperand;
+}
+
+def uimm3 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def uimm4 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def uimm8 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def simm5 : Operand<i32>;
+
+def simm10 : Operand<i32>;
+
+def vsplat_uimm1 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm2 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm3 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm4 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm5 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm6 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm8 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_simm5 : Operand<vAny>;
+
+def vsplat_simm10 : Operand<vAny>;
+
+def immZExt2Lsa : ImmLeaf<i32, [{return isUInt<2>(Imm - 1);}]>;
+
+// Pattern fragments
+def vextract_sext_i8 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractSExt node:$vec, node:$idx, i8)>;
+def vextract_sext_i16 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractSExt node:$vec, node:$idx, i16)>;
+def vextract_sext_i32 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractSExt node:$vec, node:$idx, i32)>;
+
+def vextract_zext_i8 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractZExt node:$vec, node:$idx, i8)>;
+def vextract_zext_i16 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractZExt node:$vec, node:$idx, i16)>;
+def vextract_zext_i32 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractZExt node:$vec, node:$idx, i32)>;
+
+def vinsert_v16i8 : PatFrag<(ops node:$vec, node:$val, node:$idx),
+ (v16i8 (vector_insert node:$vec, node:$val, node:$idx))>;
+def vinsert_v8i16 : PatFrag<(ops node:$vec, node:$val, node:$idx),
+ (v8i16 (vector_insert node:$vec, node:$val, node:$idx))>;
+def vinsert_v4i32 : PatFrag<(ops node:$vec, node:$val, node:$idx),
+ (v4i32 (vector_insert node:$vec, node:$val, node:$idx))>;
+
+class vfsetcc_type<ValueType ResTy, ValueType OpTy, CondCode CC> :
+ PatFrag<(ops node:$lhs, node:$rhs),
+ (ResTy (vfsetcc (OpTy node:$lhs), (OpTy node:$rhs), CC))>;
+
+// ISD::SETFALSE cannot occur
+def vfsetoeq_v4f32 : vfsetcc_type<v4i32, v4f32, SETOEQ>;
+def vfsetoeq_v2f64 : vfsetcc_type<v2i64, v2f64, SETOEQ>;
+def vfsetoge_v4f32 : vfsetcc_type<v4i32, v4f32, SETOGE>;
+def vfsetoge_v2f64 : vfsetcc_type<v2i64, v2f64, SETOGE>;
+def vfsetogt_v4f32 : vfsetcc_type<v4i32, v4f32, SETOGT>;
+def vfsetogt_v2f64 : vfsetcc_type<v2i64, v2f64, SETOGT>;
+def vfsetole_v4f32 : vfsetcc_type<v4i32, v4f32, SETOLE>;
+def vfsetole_v2f64 : vfsetcc_type<v2i64, v2f64, SETOLE>;
+def vfsetolt_v4f32 : vfsetcc_type<v4i32, v4f32, SETOLT>;
+def vfsetolt_v2f64 : vfsetcc_type<v2i64, v2f64, SETOLT>;
+def vfsetone_v4f32 : vfsetcc_type<v4i32, v4f32, SETONE>;
+def vfsetone_v2f64 : vfsetcc_type<v2i64, v2f64, SETONE>;
+def vfsetord_v4f32 : vfsetcc_type<v4i32, v4f32, SETO>;
+def vfsetord_v2f64 : vfsetcc_type<v2i64, v2f64, SETO>;
+def vfsetun_v4f32 : vfsetcc_type<v4i32, v4f32, SETUO>;
+def vfsetun_v2f64 : vfsetcc_type<v2i64, v2f64, SETUO>;
+def vfsetueq_v4f32 : vfsetcc_type<v4i32, v4f32, SETUEQ>;
+def vfsetueq_v2f64 : vfsetcc_type<v2i64, v2f64, SETUEQ>;
+def vfsetuge_v4f32 : vfsetcc_type<v4i32, v4f32, SETUGE>;
+def vfsetuge_v2f64 : vfsetcc_type<v2i64, v2f64, SETUGE>;
+def vfsetugt_v4f32 : vfsetcc_type<v4i32, v4f32, SETUGT>;
+def vfsetugt_v2f64 : vfsetcc_type<v2i64, v2f64, SETUGT>;
+def vfsetule_v4f32 : vfsetcc_type<v4i32, v4f32, SETULE>;
+def vfsetule_v2f64 : vfsetcc_type<v2i64, v2f64, SETULE>;
+def vfsetult_v4f32 : vfsetcc_type<v4i32, v4f32, SETULT>;
+def vfsetult_v2f64 : vfsetcc_type<v2i64, v2f64, SETULT>;
+def vfsetune_v4f32 : vfsetcc_type<v4i32, v4f32, SETUNE>;
+def vfsetune_v2f64 : vfsetcc_type<v2i64, v2f64, SETUNE>;
+// ISD::SETTRUE cannot occur
+// ISD::SETFALSE2 cannot occur
+// ISD::SETTRUE2 cannot occur
+
+class vsetcc_type<ValueType ResTy, CondCode CC> :
+ PatFrag<(ops node:$lhs, node:$rhs),
+ (ResTy (vsetcc node:$lhs, node:$rhs, CC))>;
+
+def vseteq_v16i8 : vsetcc_type<v16i8, SETEQ>;
+def vseteq_v8i16 : vsetcc_type<v8i16, SETEQ>;
+def vseteq_v4i32 : vsetcc_type<v4i32, SETEQ>;
+def vseteq_v2i64 : vsetcc_type<v2i64, SETEQ>;
+def vsetle_v16i8 : vsetcc_type<v16i8, SETLE>;
+def vsetle_v8i16 : vsetcc_type<v8i16, SETLE>;
+def vsetle_v4i32 : vsetcc_type<v4i32, SETLE>;
+def vsetle_v2i64 : vsetcc_type<v2i64, SETLE>;
+def vsetlt_v16i8 : vsetcc_type<v16i8, SETLT>;
+def vsetlt_v8i16 : vsetcc_type<v8i16, SETLT>;
+def vsetlt_v4i32 : vsetcc_type<v4i32, SETLT>;
+def vsetlt_v2i64 : vsetcc_type<v2i64, SETLT>;
+def vsetule_v16i8 : vsetcc_type<v16i8, SETULE>;
+def vsetule_v8i16 : vsetcc_type<v8i16, SETULE>;
+def vsetule_v4i32 : vsetcc_type<v4i32, SETULE>;
+def vsetule_v2i64 : vsetcc_type<v2i64, SETULE>;
+def vsetult_v16i8 : vsetcc_type<v16i8, SETULT>;
+def vsetult_v8i16 : vsetcc_type<v8i16, SETULT>;
+def vsetult_v4i32 : vsetcc_type<v4i32, SETULT>;
+def vsetult_v2i64 : vsetcc_type<v2i64, SETULT>;
+
+def vsplati8 : PatFrag<(ops node:$e0),
+ (v16i8 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplati16 : PatFrag<(ops node:$e0),
+ (v8i16 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplati32 : PatFrag<(ops node:$e0),
+ (v4i32 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplati64 : PatFrag<(ops node:$e0),
+ (v2i64 (build_vector:$v0 node:$e0, node:$e0))>;
+def vsplatf32 : PatFrag<(ops node:$e0),
+ (v4f32 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplatf64 : PatFrag<(ops node:$e0),
+ (v2f64 (build_vector node:$e0, node:$e0))>;
+
+def vsplati8_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati8 node:$i), node:$v, node:$v)>;
+def vsplati16_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati16 node:$i), node:$v, node:$v)>;
+def vsplati32_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati32 node:$i), node:$v, node:$v)>;
+def vsplati64_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati64 node:$i), node:$v, node:$v)>;
+
+class SplatPatLeaf<Operand opclass, dag frag, code pred = [{}],
+ SDNodeXForm xform = NOOP_SDNodeXForm>
+ : PatLeaf<frag, pred, xform> {
+ Operand OpClass = opclass;
+}
+
+class SplatComplexPattern<Operand opclass, ValueType ty, int numops, string fn,
+ list<SDNode> roots = [],
+ list<SDNodeProperty> props = []> :
+ ComplexPattern<ty, numops, fn, roots, props> {
+ Operand OpClass = opclass;
+}
+
+def vsplati8_uimm3 : SplatComplexPattern<vsplat_uimm3, v16i8, 1,
+ "selectVSplatUimm3",
+ [build_vector, bitconvert]>;
+
+def vsplati8_uimm4 : SplatComplexPattern<vsplat_uimm4, v16i8, 1,
+ "selectVSplatUimm4",
+ [build_vector, bitconvert]>;
+
+def vsplati8_uimm5 : SplatComplexPattern<vsplat_uimm5, v16i8, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati8_uimm8 : SplatComplexPattern<vsplat_uimm8, v16i8, 1,
+ "selectVSplatUimm8",
+ [build_vector, bitconvert]>;
+
+def vsplati8_simm5 : SplatComplexPattern<vsplat_simm5, v16i8, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati16_uimm3 : SplatComplexPattern<vsplat_uimm3, v8i16, 1,
+ "selectVSplatUimm3",
+ [build_vector, bitconvert]>;
+
+def vsplati16_uimm4 : SplatComplexPattern<vsplat_uimm4, v8i16, 1,
+ "selectVSplatUimm4",
+ [build_vector, bitconvert]>;
+
+def vsplati16_uimm5 : SplatComplexPattern<vsplat_uimm5, v8i16, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati16_simm5 : SplatComplexPattern<vsplat_simm5, v8i16, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati32_uimm2 : SplatComplexPattern<vsplat_uimm2, v4i32, 1,
+ "selectVSplatUimm2",
+ [build_vector, bitconvert]>;
+
+def vsplati32_uimm5 : SplatComplexPattern<vsplat_uimm5, v4i32, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati32_simm5 : SplatComplexPattern<vsplat_simm5, v4i32, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati64_uimm1 : SplatComplexPattern<vsplat_uimm1, v2i64, 1,
+ "selectVSplatUimm1",
+ [build_vector, bitconvert]>;
+
+def vsplati64_uimm5 : SplatComplexPattern<vsplat_uimm5, v2i64, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati64_uimm6 : SplatComplexPattern<vsplat_uimm6, v2i64, 1,
+ "selectVSplatUimm6",
+ [build_vector, bitconvert]>;
+
+def vsplati64_simm5 : SplatComplexPattern<vsplat_simm5, v2i64, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with a value that is an exact
+// power of 2
+def vsplat_uimm_pow2 : ComplexPattern<vAny, 1, "selectVSplatUimmPow2",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with a value that is the bitwise
+// inverse of an exact power of 2
+def vsplat_uimm_inv_pow2 : ComplexPattern<vAny, 1, "selectVSplatUimmInvPow2",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with only a consecutive sequence
+// of left-most bits set.
+def vsplat_maskl_bits : SplatComplexPattern<vsplat_uimm8, vAny, 1,
+ "selectVSplatMaskL",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with only a consecutive sequence
+// of right-most bits set.
+def vsplat_maskr_bits : SplatComplexPattern<vsplat_uimm8, vAny, 1,
+ "selectVSplatMaskR",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with a value that equals 1
+// FIXME: These should be a ComplexPattern but we can't use them because the
+// ISel generator requires the uses to have a name, but providing a name
+// causes other errors ("used in pattern but not operand list")
+def vsplat_imm_eq_1 : PatLeaf<(build_vector), [{
+ APInt Imm;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ return selectVSplat (N, Imm) &&
+ Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == 1;
+}]>;
+
+def vsplati64_imm_eq_1 : PatLeaf<(bitconvert (v4i32 (build_vector))), [{
+ APInt Imm;
+ SDNode *BV = N->getOperand(0).getNode();
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ return selectVSplat (BV, Imm) &&
+ Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == 1;
+}]>;
+
+def vbclr_b : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl vsplat_imm_eq_1, node:$wt),
+ immAllOnesV))>;
+def vbclr_h : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl vsplat_imm_eq_1, node:$wt),
+ immAllOnesV))>;
+def vbclr_w : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl vsplat_imm_eq_1, node:$wt),
+ immAllOnesV))>;
+def vbclr_d : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl (v2i64 vsplati64_imm_eq_1),
+ node:$wt),
+ (bitconvert (v4i32 immAllOnesV))))>;
+
+def vbneg_b : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbneg_h : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbneg_w : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbneg_d : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl (v2i64 vsplati64_imm_eq_1),
+ node:$wt))>;
+
+def vbset_b : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbset_h : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbset_w : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbset_d : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl (v2i64 vsplati64_imm_eq_1),
+ node:$wt))>;
+
+def fms : PatFrag<(ops node:$wd, node:$ws, node:$wt),
+ (fsub node:$wd, (fmul node:$ws, node:$wt))>;
+
+def muladd : PatFrag<(ops node:$wd, node:$ws, node:$wt),
+ (add node:$wd, (mul node:$ws, node:$wt))>;
+
+def mulsub : PatFrag<(ops node:$wd, node:$ws, node:$wt),
+ (sub node:$wd, (mul node:$ws, node:$wt))>;
+
+def mul_fexp2 : PatFrag<(ops node:$ws, node:$wt),
+ (fmul node:$ws, (fexp2 node:$wt))>;
+
+// Immediates
+def immSExt5 : ImmLeaf<i32, [{return isInt<5>(Imm);}]>;
+def immSExt10: ImmLeaf<i32, [{return isInt<10>(Imm);}]>;
+
+// Instruction encoding.
+class ADD_A_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010000>;
+class ADD_A_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010000>;
+class ADD_A_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010000>;
+class ADD_A_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010000>;
+
+class ADDS_A_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010000>;
+class ADDS_A_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010000>;
+class ADDS_A_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010000>;
+class ADDS_A_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010000>;
+
+class ADDS_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010000>;
+class ADDS_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010000>;
+class ADDS_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010000>;
+class ADDS_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010000>;
+
+class ADDS_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010000>;
+class ADDS_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010000>;
+class ADDS_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010000>;
+class ADDS_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010000>;
+
+class ADDV_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b001110>;
+class ADDV_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b001110>;
+class ADDV_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b001110>;
+class ADDV_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b001110>;
+
+class ADDVI_B_ENC : MSA_I5_FMT<0b000, 0b00, 0b000110>;
+class ADDVI_H_ENC : MSA_I5_FMT<0b000, 0b01, 0b000110>;
+class ADDVI_W_ENC : MSA_I5_FMT<0b000, 0b10, 0b000110>;
+class ADDVI_D_ENC : MSA_I5_FMT<0b000, 0b11, 0b000110>;
+
+class AND_V_ENC : MSA_VEC_FMT<0b00000, 0b011110>;
+
+class ANDI_B_ENC : MSA_I8_FMT<0b00, 0b000000>;
+
+class ASUB_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010001>;
+class ASUB_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010001>;
+class ASUB_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010001>;
+class ASUB_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010001>;
+
+class ASUB_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010001>;
+class ASUB_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010001>;
+class ASUB_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010001>;
+class ASUB_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010001>;
+
+class AVE_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010000>;
+class AVE_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010000>;
+class AVE_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010000>;
+class AVE_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010000>;
+
+class AVE_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010000>;
+class AVE_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010000>;
+class AVE_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010000>;
+class AVE_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010000>;
+
+class AVER_S_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b010000>;
+class AVER_S_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010000>;
+class AVER_S_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010000>;
+class AVER_S_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010000>;
+
+class AVER_U_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b010000>;
+class AVER_U_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010000>;
+class AVER_U_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010000>;
+class AVER_U_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010000>;
+
+class BCLR_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b001101>;
+class BCLR_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b001101>;
+class BCLR_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b001101>;
+class BCLR_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b001101>;
+
+class BCLRI_B_ENC : MSA_BIT_B_FMT<0b011, 0b001001>;
+class BCLRI_H_ENC : MSA_BIT_H_FMT<0b011, 0b001001>;
+class BCLRI_W_ENC : MSA_BIT_W_FMT<0b011, 0b001001>;
+class BCLRI_D_ENC : MSA_BIT_D_FMT<0b011, 0b001001>;
+
+class BINSL_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b001101>;
+class BINSL_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b001101>;
+class BINSL_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b001101>;
+class BINSL_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b001101>;
+
+class BINSLI_B_ENC : MSA_BIT_B_FMT<0b110, 0b001001>;
+class BINSLI_H_ENC : MSA_BIT_H_FMT<0b110, 0b001001>;
+class BINSLI_W_ENC : MSA_BIT_W_FMT<0b110, 0b001001>;
+class BINSLI_D_ENC : MSA_BIT_D_FMT<0b110, 0b001001>;
+
+class BINSR_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b001101>;
+class BINSR_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b001101>;
+class BINSR_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b001101>;
+class BINSR_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b001101>;
+
+class BINSRI_B_ENC : MSA_BIT_B_FMT<0b111, 0b001001>;
+class BINSRI_H_ENC : MSA_BIT_H_FMT<0b111, 0b001001>;
+class BINSRI_W_ENC : MSA_BIT_W_FMT<0b111, 0b001001>;
+class BINSRI_D_ENC : MSA_BIT_D_FMT<0b111, 0b001001>;
+
+class BMNZ_V_ENC : MSA_VEC_FMT<0b00100, 0b011110>;
+
+class BMNZI_B_ENC : MSA_I8_FMT<0b00, 0b000001>;
+
+class BMZ_V_ENC : MSA_VEC_FMT<0b00101, 0b011110>;
+
+class BMZI_B_ENC : MSA_I8_FMT<0b01, 0b000001>;
+
+class BNEG_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b001101>;
+class BNEG_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b001101>;
+class BNEG_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b001101>;
+class BNEG_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b001101>;
+
+class BNEGI_B_ENC : MSA_BIT_B_FMT<0b101, 0b001001>;
+class BNEGI_H_ENC : MSA_BIT_H_FMT<0b101, 0b001001>;
+class BNEGI_W_ENC : MSA_BIT_W_FMT<0b101, 0b001001>;
+class BNEGI_D_ENC : MSA_BIT_D_FMT<0b101, 0b001001>;
+
+class BNZ_B_ENC : MSA_CBRANCH_FMT<0b111, 0b00>;
+class BNZ_H_ENC : MSA_CBRANCH_FMT<0b111, 0b01>;
+class BNZ_W_ENC : MSA_CBRANCH_FMT<0b111, 0b10>;
+class BNZ_D_ENC : MSA_CBRANCH_FMT<0b111, 0b11>;
+
+class BNZ_V_ENC : MSA_CBRANCH_V_FMT<0b01111>;
+
+class BSEL_V_ENC : MSA_VEC_FMT<0b00110, 0b011110>;
+
+class BSELI_B_ENC : MSA_I8_FMT<0b10, 0b000001>;
+
+class BSET_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b001101>;
+class BSET_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b001101>;
+class BSET_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b001101>;
+class BSET_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b001101>;
+
+class BSETI_B_ENC : MSA_BIT_B_FMT<0b100, 0b001001>;
+class BSETI_H_ENC : MSA_BIT_H_FMT<0b100, 0b001001>;
+class BSETI_W_ENC : MSA_BIT_W_FMT<0b100, 0b001001>;
+class BSETI_D_ENC : MSA_BIT_D_FMT<0b100, 0b001001>;
+
+class BZ_B_ENC : MSA_CBRANCH_FMT<0b110, 0b00>;
+class BZ_H_ENC : MSA_CBRANCH_FMT<0b110, 0b01>;
+class BZ_W_ENC : MSA_CBRANCH_FMT<0b110, 0b10>;
+class BZ_D_ENC : MSA_CBRANCH_FMT<0b110, 0b11>;
+
+class BZ_V_ENC : MSA_CBRANCH_V_FMT<0b01011>;
+
+class CEQ_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b001111>;
+class CEQ_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b001111>;
+class CEQ_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b001111>;
+class CEQ_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b001111>;
+
+class CEQI_B_ENC : MSA_I5_FMT<0b000, 0b00, 0b000111>;
+class CEQI_H_ENC : MSA_I5_FMT<0b000, 0b01, 0b000111>;
+class CEQI_W_ENC : MSA_I5_FMT<0b000, 0b10, 0b000111>;
+class CEQI_D_ENC : MSA_I5_FMT<0b000, 0b11, 0b000111>;
+
+class CFCMSA_ENC : MSA_ELM_CFCMSA_FMT<0b0001111110, 0b011001>;
+
+class CLE_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b001111>;
+class CLE_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b001111>;
+class CLE_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b001111>;
+class CLE_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b001111>;
+
+class CLE_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b001111>;
+class CLE_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b001111>;
+class CLE_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b001111>;
+class CLE_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b001111>;
+
+class CLEI_S_B_ENC : MSA_I5_FMT<0b100, 0b00, 0b000111>;
+class CLEI_S_H_ENC : MSA_I5_FMT<0b100, 0b01, 0b000111>;
+class CLEI_S_W_ENC : MSA_I5_FMT<0b100, 0b10, 0b000111>;
+class CLEI_S_D_ENC : MSA_I5_FMT<0b100, 0b11, 0b000111>;
+
+class CLEI_U_B_ENC : MSA_I5_FMT<0b101, 0b00, 0b000111>;
+class CLEI_U_H_ENC : MSA_I5_FMT<0b101, 0b01, 0b000111>;
+class CLEI_U_W_ENC : MSA_I5_FMT<0b101, 0b10, 0b000111>;
+class CLEI_U_D_ENC : MSA_I5_FMT<0b101, 0b11, 0b000111>;
+
+class CLT_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001111>;
+class CLT_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001111>;
+class CLT_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001111>;
+class CLT_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001111>;
+
+class CLT_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b001111>;
+class CLT_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b001111>;
+class CLT_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b001111>;
+class CLT_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b001111>;
+
+class CLTI_S_B_ENC : MSA_I5_FMT<0b010, 0b00, 0b000111>;
+class CLTI_S_H_ENC : MSA_I5_FMT<0b010, 0b01, 0b000111>;
+class CLTI_S_W_ENC : MSA_I5_FMT<0b010, 0b10, 0b000111>;
+class CLTI_S_D_ENC : MSA_I5_FMT<0b010, 0b11, 0b000111>;
+
+class CLTI_U_B_ENC : MSA_I5_FMT<0b011, 0b00, 0b000111>;
+class CLTI_U_H_ENC : MSA_I5_FMT<0b011, 0b01, 0b000111>;
+class CLTI_U_W_ENC : MSA_I5_FMT<0b011, 0b10, 0b000111>;
+class CLTI_U_D_ENC : MSA_I5_FMT<0b011, 0b11, 0b000111>;
+
+class COPY_S_B_ENC : MSA_ELM_COPY_B_FMT<0b0010, 0b011001>;
+class COPY_S_H_ENC : MSA_ELM_COPY_H_FMT<0b0010, 0b011001>;
+class COPY_S_W_ENC : MSA_ELM_COPY_W_FMT<0b0010, 0b011001>;
+
+class COPY_U_B_ENC : MSA_ELM_COPY_B_FMT<0b0011, 0b011001>;
+class COPY_U_H_ENC : MSA_ELM_COPY_H_FMT<0b0011, 0b011001>;
+class COPY_U_W_ENC : MSA_ELM_COPY_W_FMT<0b0011, 0b011001>;
+
+class CTCMSA_ENC : MSA_ELM_CTCMSA_FMT<0b0000111110, 0b011001>;
+
+class DIV_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010010>;
+class DIV_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010010>;
+class DIV_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010010>;
+class DIV_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010010>;
+
+class DIV_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010010>;
+class DIV_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010010>;
+class DIV_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010010>;
+class DIV_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010010>;
+
+class DOTP_S_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010011>;
+class DOTP_S_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010011>;
+class DOTP_S_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010011>;
+
+class DOTP_U_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010011>;
+class DOTP_U_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010011>;
+class DOTP_U_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010011>;
+
+class DPADD_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010011>;
+class DPADD_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010011>;
+class DPADD_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010011>;
+
+class DPADD_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010011>;
+class DPADD_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010011>;
+class DPADD_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010011>;
+
+class DPSUB_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010011>;
+class DPSUB_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010011>;
+class DPSUB_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010011>;
+
+class DPSUB_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010011>;
+class DPSUB_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010011>;
+class DPSUB_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010011>;
+
+class FADD_W_ENC : MSA_3RF_FMT<0b0000, 0b0, 0b011011>;
+class FADD_D_ENC : MSA_3RF_FMT<0b0000, 0b1, 0b011011>;
+
+class FCAF_W_ENC : MSA_3RF_FMT<0b0000, 0b0, 0b011010>;
+class FCAF_D_ENC : MSA_3RF_FMT<0b0000, 0b1, 0b011010>;
+
+class FCEQ_W_ENC : MSA_3RF_FMT<0b0010, 0b0, 0b011010>;
+class FCEQ_D_ENC : MSA_3RF_FMT<0b0010, 0b1, 0b011010>;
+
+class FCLASS_W_ENC : MSA_2RF_FMT<0b110010000, 0b0, 0b011110>;
+class FCLASS_D_ENC : MSA_2RF_FMT<0b110010000, 0b1, 0b011110>;
+
+class FCLE_W_ENC : MSA_3RF_FMT<0b0110, 0b0, 0b011010>;
+class FCLE_D_ENC : MSA_3RF_FMT<0b0110, 0b1, 0b011010>;
+
+class FCLT_W_ENC : MSA_3RF_FMT<0b0100, 0b0, 0b011010>;
+class FCLT_D_ENC : MSA_3RF_FMT<0b0100, 0b1, 0b011010>;
+
+class FCNE_W_ENC : MSA_3RF_FMT<0b0011, 0b0, 0b011100>;
+class FCNE_D_ENC : MSA_3RF_FMT<0b0011, 0b1, 0b011100>;
+
+class FCOR_W_ENC : MSA_3RF_FMT<0b0001, 0b0, 0b011100>;
+class FCOR_D_ENC : MSA_3RF_FMT<0b0001, 0b1, 0b011100>;
+
+class FCUEQ_W_ENC : MSA_3RF_FMT<0b0011, 0b0, 0b011010>;
+class FCUEQ_D_ENC : MSA_3RF_FMT<0b0011, 0b1, 0b011010>;
+
+class FCULE_W_ENC : MSA_3RF_FMT<0b0111, 0b0, 0b011010>;
+class FCULE_D_ENC : MSA_3RF_FMT<0b0111, 0b1, 0b011010>;
+
+class FCULT_W_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011010>;
+class FCULT_D_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011010>;
+
+class FCUN_W_ENC : MSA_3RF_FMT<0b0001, 0b0, 0b011010>;
+class FCUN_D_ENC : MSA_3RF_FMT<0b0001, 0b1, 0b011010>;
+
+class FCUNE_W_ENC : MSA_3RF_FMT<0b0010, 0b0, 0b011100>;
+class FCUNE_D_ENC : MSA_3RF_FMT<0b0010, 0b1, 0b011100>;
+
+class FDIV_W_ENC : MSA_3RF_FMT<0b0011, 0b0, 0b011011>;
+class FDIV_D_ENC : MSA_3RF_FMT<0b0011, 0b1, 0b011011>;
+
+class FEXDO_H_ENC : MSA_3RF_FMT<0b1000, 0b0, 0b011011>;
+class FEXDO_W_ENC : MSA_3RF_FMT<0b1000, 0b1, 0b011011>;
+
+class FEXP2_W_ENC : MSA_3RF_FMT<0b0111, 0b0, 0b011011>;
+class FEXP2_D_ENC : MSA_3RF_FMT<0b0111, 0b1, 0b011011>;
+
+class FEXUPL_W_ENC : MSA_2RF_FMT<0b110011000, 0b0, 0b011110>;
+class FEXUPL_D_ENC : MSA_2RF_FMT<0b110011000, 0b1, 0b011110>;
+
+class FEXUPR_W_ENC : MSA_2RF_FMT<0b110011001, 0b0, 0b011110>;
+class FEXUPR_D_ENC : MSA_2RF_FMT<0b110011001, 0b1, 0b011110>;
+
+class FFINT_S_W_ENC : MSA_2RF_FMT<0b110011110, 0b0, 0b011110>;
+class FFINT_S_D_ENC : MSA_2RF_FMT<0b110011110, 0b1, 0b011110>;
+
+class FFINT_U_W_ENC : MSA_2RF_FMT<0b110011111, 0b0, 0b011110>;
+class FFINT_U_D_ENC : MSA_2RF_FMT<0b110011111, 0b1, 0b011110>;
+
+class FFQL_W_ENC : MSA_2RF_FMT<0b110011010, 0b0, 0b011110>;
+class FFQL_D_ENC : MSA_2RF_FMT<0b110011010, 0b1, 0b011110>;
+
+class FFQR_W_ENC : MSA_2RF_FMT<0b110011011, 0b0, 0b011110>;
+class FFQR_D_ENC : MSA_2RF_FMT<0b110011011, 0b1, 0b011110>;
+
+class FILL_B_ENC : MSA_2R_FILL_FMT<0b11000000, 0b00, 0b011110>;
+class FILL_H_ENC : MSA_2R_FILL_FMT<0b11000000, 0b01, 0b011110>;
+class FILL_W_ENC : MSA_2R_FILL_FMT<0b11000000, 0b10, 0b011110>;
+
+class FLOG2_W_ENC : MSA_2RF_FMT<0b110010111, 0b0, 0b011110>;
+class FLOG2_D_ENC : MSA_2RF_FMT<0b110010111, 0b1, 0b011110>;
+
+class FMADD_W_ENC : MSA_3RF_FMT<0b0100, 0b0, 0b011011>;
+class FMADD_D_ENC : MSA_3RF_FMT<0b0100, 0b1, 0b011011>;
+
+class FMAX_W_ENC : MSA_3RF_FMT<0b1110, 0b0, 0b011011>;
+class FMAX_D_ENC : MSA_3RF_FMT<0b1110, 0b1, 0b011011>;
+
+class FMAX_A_W_ENC : MSA_3RF_FMT<0b1111, 0b0, 0b011011>;
+class FMAX_A_D_ENC : MSA_3RF_FMT<0b1111, 0b1, 0b011011>;
+
+class FMIN_W_ENC : MSA_3RF_FMT<0b1100, 0b0, 0b011011>;
+class FMIN_D_ENC : MSA_3RF_FMT<0b1100, 0b1, 0b011011>;
+
+class FMIN_A_W_ENC : MSA_3RF_FMT<0b1101, 0b0, 0b011011>;
+class FMIN_A_D_ENC : MSA_3RF_FMT<0b1101, 0b1, 0b011011>;
+
+class FMSUB_W_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011011>;
+class FMSUB_D_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011011>;
+
+class FMUL_W_ENC : MSA_3RF_FMT<0b0010, 0b0, 0b011011>;
+class FMUL_D_ENC : MSA_3RF_FMT<0b0010, 0b1, 0b011011>;
+
+class FRINT_W_ENC : MSA_2RF_FMT<0b110010110, 0b0, 0b011110>;
+class FRINT_D_ENC : MSA_2RF_FMT<0b110010110, 0b1, 0b011110>;
+
+class FRCP_W_ENC : MSA_2RF_FMT<0b110010101, 0b0, 0b011110>;
+class FRCP_D_ENC : MSA_2RF_FMT<0b110010101, 0b1, 0b011110>;
+
+class FRSQRT_W_ENC : MSA_2RF_FMT<0b110010100, 0b0, 0b011110>;
+class FRSQRT_D_ENC : MSA_2RF_FMT<0b110010100, 0b1, 0b011110>;
+
+class FSAF_W_ENC : MSA_3RF_FMT<0b1000, 0b0, 0b011010>;
+class FSAF_D_ENC : MSA_3RF_FMT<0b1000, 0b1, 0b011010>;
+
+class FSEQ_W_ENC : MSA_3RF_FMT<0b1010, 0b0, 0b011010>;
+class FSEQ_D_ENC : MSA_3RF_FMT<0b1010, 0b1, 0b011010>;
+
+class FSLE_W_ENC : MSA_3RF_FMT<0b1110, 0b0, 0b011010>;
+class FSLE_D_ENC : MSA_3RF_FMT<0b1110, 0b1, 0b011010>;
+
+class FSLT_W_ENC : MSA_3RF_FMT<0b1100, 0b0, 0b011010>;
+class FSLT_D_ENC : MSA_3RF_FMT<0b1100, 0b1, 0b011010>;
+
+class FSNE_W_ENC : MSA_3RF_FMT<0b1011, 0b0, 0b011100>;
+class FSNE_D_ENC : MSA_3RF_FMT<0b1011, 0b1, 0b011100>;
+
+class FSOR_W_ENC : MSA_3RF_FMT<0b1001, 0b0, 0b011100>;
+class FSOR_D_ENC : MSA_3RF_FMT<0b1001, 0b1, 0b011100>;
+
+class FSQRT_W_ENC : MSA_2RF_FMT<0b110010011, 0b0, 0b011110>;
+class FSQRT_D_ENC : MSA_2RF_FMT<0b110010011, 0b1, 0b011110>;
+
+class FSUB_W_ENC : MSA_3RF_FMT<0b0001, 0b0, 0b011011>;
+class FSUB_D_ENC : MSA_3RF_FMT<0b0001, 0b1, 0b011011>;
+
+class FSUEQ_W_ENC : MSA_3RF_FMT<0b1011, 0b0, 0b011010>;
+class FSUEQ_D_ENC : MSA_3RF_FMT<0b1011, 0b1, 0b011010>;
+
+class FSULE_W_ENC : MSA_3RF_FMT<0b1111, 0b0, 0b011010>;
+class FSULE_D_ENC : MSA_3RF_FMT<0b1111, 0b1, 0b011010>;
+
+class FSULT_W_ENC : MSA_3RF_FMT<0b1101, 0b0, 0b011010>;
+class FSULT_D_ENC : MSA_3RF_FMT<0b1101, 0b1, 0b011010>;
+
+class FSUN_W_ENC : MSA_3RF_FMT<0b1001, 0b0, 0b011010>;
+class FSUN_D_ENC : MSA_3RF_FMT<0b1001, 0b1, 0b011010>;
+
+class FSUNE_W_ENC : MSA_3RF_FMT<0b1010, 0b0, 0b011100>;
+class FSUNE_D_ENC : MSA_3RF_FMT<0b1010, 0b1, 0b011100>;
+
+class FTINT_S_W_ENC : MSA_2RF_FMT<0b110011100, 0b0, 0b011110>;
+class FTINT_S_D_ENC : MSA_2RF_FMT<0b110011100, 0b1, 0b011110>;
+
+class FTINT_U_W_ENC : MSA_2RF_FMT<0b110011101, 0b0, 0b011110>;
+class FTINT_U_D_ENC : MSA_2RF_FMT<0b110011101, 0b1, 0b011110>;
+
+class FTQ_H_ENC : MSA_3RF_FMT<0b1010, 0b0, 0b011011>;
+class FTQ_W_ENC : MSA_3RF_FMT<0b1010, 0b1, 0b011011>;
+
+class FTRUNC_S_W_ENC : MSA_2RF_FMT<0b110010001, 0b0, 0b011110>;
+class FTRUNC_S_D_ENC : MSA_2RF_FMT<0b110010001, 0b1, 0b011110>;
+
+class FTRUNC_U_W_ENC : MSA_2RF_FMT<0b110010010, 0b0, 0b011110>;
+class FTRUNC_U_D_ENC : MSA_2RF_FMT<0b110010010, 0b1, 0b011110>;
+
+class HADD_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010101>;
+class HADD_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010101>;
+class HADD_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010101>;
+
+class HADD_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010101>;
+class HADD_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010101>;
+class HADD_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010101>;
+
+class HSUB_S_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010101>;
+class HSUB_S_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010101>;
+class HSUB_S_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010101>;
+
+class HSUB_U_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010101>;
+class HSUB_U_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010101>;
+class HSUB_U_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010101>;
+
+class ILVEV_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b010100>;
+class ILVEV_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010100>;
+class ILVEV_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010100>;
+class ILVEV_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010100>;
+
+class ILVL_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010100>;
+class ILVL_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010100>;
+class ILVL_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010100>;
+class ILVL_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010100>;
+
+class ILVOD_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b010100>;
+class ILVOD_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010100>;
+class ILVOD_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010100>;
+class ILVOD_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010100>;
+
+class ILVR_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010100>;
+class ILVR_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010100>;
+class ILVR_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010100>;
+class ILVR_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010100>;
+
+class INSERT_B_ENC : MSA_ELM_INSERT_B_FMT<0b0100, 0b011001>;
+class INSERT_H_ENC : MSA_ELM_INSERT_H_FMT<0b0100, 0b011001>;
+class INSERT_W_ENC : MSA_ELM_INSERT_W_FMT<0b0100, 0b011001>;
+
+class INSVE_B_ENC : MSA_ELM_B_FMT<0b0101, 0b011001>;
+class INSVE_H_ENC : MSA_ELM_H_FMT<0b0101, 0b011001>;
+class INSVE_W_ENC : MSA_ELM_W_FMT<0b0101, 0b011001>;
+class INSVE_D_ENC : MSA_ELM_D_FMT<0b0101, 0b011001>;
+
+class LD_B_ENC : MSA_MI10_FMT<0b00, 0b1000>;
+class LD_H_ENC : MSA_MI10_FMT<0b01, 0b1000>;
+class LD_W_ENC : MSA_MI10_FMT<0b10, 0b1000>;
+class LD_D_ENC : MSA_MI10_FMT<0b11, 0b1000>;
+
+class LDI_B_ENC : MSA_I10_FMT<0b110, 0b00, 0b000111>;
+class LDI_H_ENC : MSA_I10_FMT<0b110, 0b01, 0b000111>;
+class LDI_W_ENC : MSA_I10_FMT<0b110, 0b10, 0b000111>;
+class LDI_D_ENC : MSA_I10_FMT<0b110, 0b11, 0b000111>;
+
+class LSA_ENC : SPECIAL_LSA_FMT<0b000101>;
+
+class MADD_Q_H_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011100>;
+class MADD_Q_W_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011100>;
+
+class MADDR_Q_H_ENC : MSA_3RF_FMT<0b1101, 0b0, 0b011100>;
+class MADDR_Q_W_ENC : MSA_3RF_FMT<0b1101, 0b1, 0b011100>;
+
+class MADDV_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010010>;
+class MADDV_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010010>;
+class MADDV_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010010>;
+class MADDV_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010010>;
+
+class MAX_A_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b001110>;
+class MAX_A_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b001110>;
+class MAX_A_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b001110>;
+class MAX_A_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b001110>;
+
+class MAX_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001110>;
+class MAX_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001110>;
+class MAX_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001110>;
+class MAX_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001110>;
+
+class MAX_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b001110>;
+class MAX_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b001110>;
+class MAX_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b001110>;
+class MAX_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b001110>;
+
+class MAXI_S_B_ENC : MSA_I5_FMT<0b010, 0b00, 0b000110>;
+class MAXI_S_H_ENC : MSA_I5_FMT<0b010, 0b01, 0b000110>;
+class MAXI_S_W_ENC : MSA_I5_FMT<0b010, 0b10, 0b000110>;
+class MAXI_S_D_ENC : MSA_I5_FMT<0b010, 0b11, 0b000110>;
+
+class MAXI_U_B_ENC : MSA_I5_FMT<0b011, 0b00, 0b000110>;
+class MAXI_U_H_ENC : MSA_I5_FMT<0b011, 0b01, 0b000110>;
+class MAXI_U_W_ENC : MSA_I5_FMT<0b011, 0b10, 0b000110>;
+class MAXI_U_D_ENC : MSA_I5_FMT<0b011, 0b11, 0b000110>;
+
+class MIN_A_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b001110>;
+class MIN_A_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b001110>;
+class MIN_A_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b001110>;
+class MIN_A_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b001110>;
+
+class MIN_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b001110>;
+class MIN_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b001110>;
+class MIN_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b001110>;
+class MIN_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b001110>;
+
+class MIN_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b001110>;
+class MIN_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b001110>;
+class MIN_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b001110>;
+class MIN_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b001110>;
+
+class MINI_S_B_ENC : MSA_I5_FMT<0b100, 0b00, 0b000110>;
+class MINI_S_H_ENC : MSA_I5_FMT<0b100, 0b01, 0b000110>;
+class MINI_S_W_ENC : MSA_I5_FMT<0b100, 0b10, 0b000110>;
+class MINI_S_D_ENC : MSA_I5_FMT<0b100, 0b11, 0b000110>;
+
+class MINI_U_B_ENC : MSA_I5_FMT<0b101, 0b00, 0b000110>;
+class MINI_U_H_ENC : MSA_I5_FMT<0b101, 0b01, 0b000110>;
+class MINI_U_W_ENC : MSA_I5_FMT<0b101, 0b10, 0b000110>;
+class MINI_U_D_ENC : MSA_I5_FMT<0b101, 0b11, 0b000110>;
+
+class MOD_S_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b010010>;
+class MOD_S_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010010>;
+class MOD_S_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010010>;
+class MOD_S_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010010>;
+
+class MOD_U_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b010010>;
+class MOD_U_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010010>;
+class MOD_U_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010010>;
+class MOD_U_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010010>;
+
+class MOVE_V_ENC : MSA_ELM_FMT<0b0010111110, 0b011001>;
+
+class MSUB_Q_H_ENC : MSA_3RF_FMT<0b0110, 0b0, 0b011100>;
+class MSUB_Q_W_ENC : MSA_3RF_FMT<0b0110, 0b1, 0b011100>;
+
+class MSUBR_Q_H_ENC : MSA_3RF_FMT<0b1110, 0b0, 0b011100>;
+class MSUBR_Q_W_ENC : MSA_3RF_FMT<0b1110, 0b1, 0b011100>;
+
+class MSUBV_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010010>;
+class MSUBV_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010010>;
+class MSUBV_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010010>;
+class MSUBV_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010010>;
+
+class MUL_Q_H_ENC : MSA_3RF_FMT<0b0100, 0b0, 0b011100>;
+class MUL_Q_W_ENC : MSA_3RF_FMT<0b0100, 0b1, 0b011100>;
+
+class MULR_Q_H_ENC : MSA_3RF_FMT<0b1100, 0b0, 0b011100>;
+class MULR_Q_W_ENC : MSA_3RF_FMT<0b1100, 0b1, 0b011100>;
+
+class MULV_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010010>;
+class MULV_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010010>;
+class MULV_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010010>;
+class MULV_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010010>;
+
+class NLOC_B_ENC : MSA_2R_FMT<0b11000010, 0b00, 0b011110>;
+class NLOC_H_ENC : MSA_2R_FMT<0b11000010, 0b01, 0b011110>;
+class NLOC_W_ENC : MSA_2R_FMT<0b11000010, 0b10, 0b011110>;
+class NLOC_D_ENC : MSA_2R_FMT<0b11000010, 0b11, 0b011110>;
+
+class NLZC_B_ENC : MSA_2R_FMT<0b11000011, 0b00, 0b011110>;
+class NLZC_H_ENC : MSA_2R_FMT<0b11000011, 0b01, 0b011110>;
+class NLZC_W_ENC : MSA_2R_FMT<0b11000011, 0b10, 0b011110>;
+class NLZC_D_ENC : MSA_2R_FMT<0b11000011, 0b11, 0b011110>;
+
+class NOR_V_ENC : MSA_VEC_FMT<0b00010, 0b011110>;
+
+class NORI_B_ENC : MSA_I8_FMT<0b10, 0b000000>;
+
+class OR_V_ENC : MSA_VEC_FMT<0b00001, 0b011110>;
+
+class ORI_B_ENC : MSA_I8_FMT<0b01, 0b000000>;
+
+class PCKEV_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010100>;
+class PCKEV_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010100>;
+class PCKEV_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010100>;
+class PCKEV_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010100>;
+
+class PCKOD_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010100>;
+class PCKOD_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010100>;
+class PCKOD_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010100>;
+class PCKOD_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010100>;
+
+class PCNT_B_ENC : MSA_2R_FMT<0b11000001, 0b00, 0b011110>;
+class PCNT_H_ENC : MSA_2R_FMT<0b11000001, 0b01, 0b011110>;
+class PCNT_W_ENC : MSA_2R_FMT<0b11000001, 0b10, 0b011110>;
+class PCNT_D_ENC : MSA_2R_FMT<0b11000001, 0b11, 0b011110>;
+
+class SAT_S_B_ENC : MSA_BIT_B_FMT<0b000, 0b001010>;
+class SAT_S_H_ENC : MSA_BIT_H_FMT<0b000, 0b001010>;
+class SAT_S_W_ENC : MSA_BIT_W_FMT<0b000, 0b001010>;
+class SAT_S_D_ENC : MSA_BIT_D_FMT<0b000, 0b001010>;
+
+class SAT_U_B_ENC : MSA_BIT_B_FMT<0b001, 0b001010>;
+class SAT_U_H_ENC : MSA_BIT_H_FMT<0b001, 0b001010>;
+class SAT_U_W_ENC : MSA_BIT_W_FMT<0b001, 0b001010>;
+class SAT_U_D_ENC : MSA_BIT_D_FMT<0b001, 0b001010>;
+
+class SHF_B_ENC : MSA_I8_FMT<0b00, 0b000010>;
+class SHF_H_ENC : MSA_I8_FMT<0b01, 0b000010>;
+class SHF_W_ENC : MSA_I8_FMT<0b10, 0b000010>;
+
+class SLD_B_ENC : MSA_3R_INDEX_FMT<0b000, 0b00, 0b010100>;
+class SLD_H_ENC : MSA_3R_INDEX_FMT<0b000, 0b01, 0b010100>;
+class SLD_W_ENC : MSA_3R_INDEX_FMT<0b000, 0b10, 0b010100>;
+class SLD_D_ENC : MSA_3R_INDEX_FMT<0b000, 0b11, 0b010100>;
+
+class SLDI_B_ENC : MSA_ELM_B_FMT<0b0000, 0b011001>;
+class SLDI_H_ENC : MSA_ELM_H_FMT<0b0000, 0b011001>;
+class SLDI_W_ENC : MSA_ELM_W_FMT<0b0000, 0b011001>;
+class SLDI_D_ENC : MSA_ELM_D_FMT<0b0000, 0b011001>;
+
+class SLL_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b001101>;
+class SLL_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b001101>;
+class SLL_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b001101>;
+class SLL_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b001101>;
+
+class SLLI_B_ENC : MSA_BIT_B_FMT<0b000, 0b001001>;
+class SLLI_H_ENC : MSA_BIT_H_FMT<0b000, 0b001001>;
+class SLLI_W_ENC : MSA_BIT_W_FMT<0b000, 0b001001>;
+class SLLI_D_ENC : MSA_BIT_D_FMT<0b000, 0b001001>;
+
+class SPLAT_B_ENC : MSA_3R_INDEX_FMT<0b001, 0b00, 0b010100>;
+class SPLAT_H_ENC : MSA_3R_INDEX_FMT<0b001, 0b01, 0b010100>;
+class SPLAT_W_ENC : MSA_3R_INDEX_FMT<0b001, 0b10, 0b010100>;
+class SPLAT_D_ENC : MSA_3R_INDEX_FMT<0b001, 0b11, 0b010100>;
+
+class SPLATI_B_ENC : MSA_ELM_B_FMT<0b0001, 0b011001>;
+class SPLATI_H_ENC : MSA_ELM_H_FMT<0b0001, 0b011001>;
+class SPLATI_W_ENC : MSA_ELM_W_FMT<0b0001, 0b011001>;
+class SPLATI_D_ENC : MSA_ELM_D_FMT<0b0001, 0b011001>;
+
+class SRA_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b001101>;
+class SRA_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b001101>;
+class SRA_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b001101>;
+class SRA_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b001101>;
+
+class SRAI_B_ENC : MSA_BIT_B_FMT<0b001, 0b001001>;
+class SRAI_H_ENC : MSA_BIT_H_FMT<0b001, 0b001001>;
+class SRAI_W_ENC : MSA_BIT_W_FMT<0b001, 0b001001>;
+class SRAI_D_ENC : MSA_BIT_D_FMT<0b001, 0b001001>;
+
+class SRAR_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010101>;
+class SRAR_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010101>;
+class SRAR_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010101>;
+class SRAR_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010101>;
+
+class SRARI_B_ENC : MSA_BIT_B_FMT<0b010, 0b001010>;
+class SRARI_H_ENC : MSA_BIT_H_FMT<0b010, 0b001010>;
+class SRARI_W_ENC : MSA_BIT_W_FMT<0b010, 0b001010>;
+class SRARI_D_ENC : MSA_BIT_D_FMT<0b010, 0b001010>;
+
+class SRL_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001101>;
+class SRL_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001101>;
+class SRL_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001101>;
+class SRL_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001101>;
+
+class SRLI_B_ENC : MSA_BIT_B_FMT<0b010, 0b001001>;
+class SRLI_H_ENC : MSA_BIT_H_FMT<0b010, 0b001001>;
+class SRLI_W_ENC : MSA_BIT_W_FMT<0b010, 0b001001>;
+class SRLI_D_ENC : MSA_BIT_D_FMT<0b010, 0b001001>;
+
+class SRLR_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010101>;
+class SRLR_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010101>;
+class SRLR_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010101>;
+class SRLR_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010101>;
+
+class SRLRI_B_ENC : MSA_BIT_B_FMT<0b011, 0b001010>;
+class SRLRI_H_ENC : MSA_BIT_H_FMT<0b011, 0b001010>;
+class SRLRI_W_ENC : MSA_BIT_W_FMT<0b011, 0b001010>;
+class SRLRI_D_ENC : MSA_BIT_D_FMT<0b011, 0b001010>;
+
+class ST_B_ENC : MSA_MI10_FMT<0b00, 0b1001>;
+class ST_H_ENC : MSA_MI10_FMT<0b01, 0b1001>;
+class ST_W_ENC : MSA_MI10_FMT<0b10, 0b1001>;
+class ST_D_ENC : MSA_MI10_FMT<0b11, 0b1001>;
+
+class SUBS_S_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010001>;
+class SUBS_S_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010001>;
+class SUBS_S_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010001>;
+class SUBS_S_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010001>;
+
+class SUBS_U_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010001>;
+class SUBS_U_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010001>;
+class SUBS_U_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010001>;
+class SUBS_U_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010001>;
+
+class SUBSUS_U_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010001>;
+class SUBSUS_U_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010001>;
+class SUBSUS_U_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010001>;
+class SUBSUS_U_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010001>;
+
+class SUBSUU_S_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010001>;
+class SUBSUU_S_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010001>;
+class SUBSUU_S_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010001>;
+class SUBSUU_S_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010001>;
+
+class SUBV_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b001110>;
+class SUBV_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b001110>;
+class SUBV_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b001110>;
+class SUBV_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b001110>;
+
+class SUBVI_B_ENC : MSA_I5_FMT<0b001, 0b00, 0b000110>;
+class SUBVI_H_ENC : MSA_I5_FMT<0b001, 0b01, 0b000110>;
+class SUBVI_W_ENC : MSA_I5_FMT<0b001, 0b10, 0b000110>;
+class SUBVI_D_ENC : MSA_I5_FMT<0b001, 0b11, 0b000110>;
+
+class VSHF_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010101>;
+class VSHF_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010101>;
+class VSHF_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010101>;
+class VSHF_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010101>;
+
+class XOR_V_ENC : MSA_VEC_FMT<0b00011, 0b011110>;
+
+class XORI_B_ENC : MSA_I8_FMT<0b11, 0b000000>;
+
+// Instruction desc.
+class MSA_BIT_B_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm3:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_H_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm4:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_W_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm5:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_D_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm6:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_B_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm3:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt3:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_H_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm4:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt4:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_W_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm5:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt5:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_D_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm6:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt6:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_BINSXI_DESC_BASE<string instr_asm, ValueType Ty,
+ ComplexPattern Mask, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, vsplat_uimm8:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (vselect (Ty Mask:$m), (Ty ROWD:$wd_in),
+ ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_BIT_BINSLI_DESC_BASE<string instr_asm, ValueType Ty,
+ RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, vsplat_maskl_bits, ROWD, ROWS, itin>;
+
+class MSA_BIT_BINSRI_DESC_BASE<string instr_asm, ValueType Ty,
+ RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, vsplat_maskr_bits, ROWD, ROWS, itin>;
+
+class MSA_BIT_SPLAT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SplatComplexPattern SplatImm,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, SplatImm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_COPY_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ValueType VecTy, RegisterOperand ROD,
+ RegisterOperand ROWS,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROWS:$ws, uimm4:$n);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $ws[$n]");
+ list<dag> Pattern = [(set ROD:$rd, (OpNode (VecTy ROWS:$ws), immZExt4:$n))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_ELM_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm4:$n);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$n]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt4:$n))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_COPY_PSEUDO_BASE<SDPatternOperator OpNode, ValueType VecTy,
+ RegisterClass RCD, RegisterClass RCWS> :
+ MipsPseudo<(outs RCD:$wd), (ins RCWS:$ws, uimm4:$n),
+ [(set RCD:$wd, (OpNode (VecTy RCWS:$ws), immZExt4:$n))]> {
+ bit usesCustomInserter = 1;
+}
+
+class MSA_I5_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SplatComplexPattern SplatImm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $imm");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, SplatImm:$imm))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_I8_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SplatComplexPattern SplatImm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$u8);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, SplatImm:$u8))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed in the next few patches
+class MSA_I8_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm8:$u8);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt8:$u8))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_I8_SHF_DESC_BASE<string instr_asm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm8:$u8);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
+ list<dag> Pattern = [(set ROWD:$wd, (MipsSHF immZExt8:$u8, ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_I10_LDI_DESC_BASE<string instr_asm, RegisterOperand ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins vsplat_simm10:$s10);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $s10");
+ // LDI is matched using custom matching code in MipsSEISelDAGToDAG.cpp
+ list<dag> Pattern = [];
+ bit hasSideEffects = 0;
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_2R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_2R_FILL_DESC_BASE<string instr_asm, ValueType VT,
+ SDPatternOperator OpNode, RegisterOperand ROWD,
+ RegisterOperand ROS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROS:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $rs");
+ list<dag> Pattern = [(set ROWD:$wd, (VT (OpNode ROS:$rs)))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_2R_FILL_PSEUDO_BASE<ValueType VT, SDPatternOperator OpNode,
+ RegisterClass RCWD, RegisterClass RCWS = RCWD> :
+ MipsPseudo<(outs RCWD:$wd), (ins RCWS:$fs),
+ [(set RCWD:$wd, (OpNode RCWS:$fs))]> {
+ let usesCustomInserter = 1;
+}
+
+class MSA_2RF_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_BINSX_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in, ROWS:$ws,
+ ROWT:$wt))];
+ string Constraints = "$wd = $wd_in";
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_SPLAT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, GPR32:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$rt]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, GPR32:$rt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_VSHF_DESC_BASE<string instr_asm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (MipsVSHF ROWD:$wd_in, ROWS:$ws,
+ ROWT:$wt))];
+ string Constraints = "$wd = $wd_in";
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_SLD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, GPR32:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$rt]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, GPR32:$rt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_4R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd,
+ (OpNode ROWD:$wd_in, ROWS:$ws, ROWT:$wt))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_3RF_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_3R_DESC_BASE<instr_asm, OpNode, ROWD, ROWS, ROWT, itin>;
+
+class MSA_3RF_4RF_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_3R_4R_DESC_BASE<instr_asm, OpNode, ROWD, ROWS, ROWT, itin>;
+
+class MSA_CBRANCH_DESC_BASE<string instr_asm, RegisterOperand ROWD> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins ROWD:$wt, brtarget:$offset);
+ string AsmString = !strconcat(instr_asm, "\t$wt, $offset");
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = IIBranch;
+ bit isBranch = 1;
+ bit isTerminator = 1;
+ bit hasDelaySlot = 1;
+ list<Register> Defs = [AT];
+}
+
+class MSA_INSERT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROS,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROS:$rs, uimm6:$n);
+ string AsmString = !strconcat(instr_asm, "\t$wd[$n], $rs");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in,
+ ROS:$rs,
+ immZExt6:$n))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_INSERT_PSEUDO_BASE<SDPatternOperator OpNode, ValueType Ty,
+ RegisterOperand ROWD, RegisterOperand ROFS> :
+ MipsPseudo<(outs ROWD:$wd), (ins ROWD:$wd_in, uimm6:$n, ROFS:$fs),
+ [(set ROWD:$wd, (OpNode (Ty ROWD:$wd_in), ROFS:$fs,
+ immZExt6:$n))]> {
+ bit usesCustomInserter = 1;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_INSVE_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, uimm6:$n, ROWS:$ws);
+ string AsmString = !strconcat(instr_asm, "\t$wd[$n], $ws[0]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in,
+ immZExt6:$n,
+ ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_VEC_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_ELM_SPLAT_DESC_BASE<string instr_asm, SplatComplexPattern SplatImm,
+ RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$n);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$n]");
+ list<dag> Pattern = [(set ROWD:$wd, (MipsVSHF SplatImm:$n, ROWS:$ws,
+ ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_VEC_PSEUDO_BASE<SDPatternOperator OpNode, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD> :
+ MipsPseudo<(outs ROWD:$wd), (ins ROWS:$ws, ROWT:$wt),
+ [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))]>;
+
+class ADD_A_B_DESC : MSA_3R_DESC_BASE<"add_a.b", int_mips_add_a_b, MSA128BOpnd>,
+ IsCommutable;
+class ADD_A_H_DESC : MSA_3R_DESC_BASE<"add_a.h", int_mips_add_a_h, MSA128HOpnd>,
+ IsCommutable;
+class ADD_A_W_DESC : MSA_3R_DESC_BASE<"add_a.w", int_mips_add_a_w, MSA128WOpnd>,
+ IsCommutable;
+class ADD_A_D_DESC : MSA_3R_DESC_BASE<"add_a.d", int_mips_add_a_d, MSA128DOpnd>,
+ IsCommutable;
+
+class ADDS_A_B_DESC : MSA_3R_DESC_BASE<"adds_a.b", int_mips_adds_a_b,
+ MSA128BOpnd>, IsCommutable;
+class ADDS_A_H_DESC : MSA_3R_DESC_BASE<"adds_a.h", int_mips_adds_a_h,
+ MSA128HOpnd>, IsCommutable;
+class ADDS_A_W_DESC : MSA_3R_DESC_BASE<"adds_a.w", int_mips_adds_a_w,
+ MSA128WOpnd>, IsCommutable;
+class ADDS_A_D_DESC : MSA_3R_DESC_BASE<"adds_a.d", int_mips_adds_a_d,
+ MSA128DOpnd>, IsCommutable;
+
+class ADDS_S_B_DESC : MSA_3R_DESC_BASE<"adds_s.b", int_mips_adds_s_b,
+ MSA128BOpnd>, IsCommutable;
+class ADDS_S_H_DESC : MSA_3R_DESC_BASE<"adds_s.h", int_mips_adds_s_h,
+ MSA128HOpnd>, IsCommutable;
+class ADDS_S_W_DESC : MSA_3R_DESC_BASE<"adds_s.w", int_mips_adds_s_w,
+ MSA128WOpnd>, IsCommutable;
+class ADDS_S_D_DESC : MSA_3R_DESC_BASE<"adds_s.d", int_mips_adds_s_d,
+ MSA128DOpnd>, IsCommutable;
+
+class ADDS_U_B_DESC : MSA_3R_DESC_BASE<"adds_u.b", int_mips_adds_u_b,
+ MSA128BOpnd>, IsCommutable;
+class ADDS_U_H_DESC : MSA_3R_DESC_BASE<"adds_u.h", int_mips_adds_u_h,
+ MSA128HOpnd>, IsCommutable;
+class ADDS_U_W_DESC : MSA_3R_DESC_BASE<"adds_u.w", int_mips_adds_u_w,
+ MSA128WOpnd>, IsCommutable;
+class ADDS_U_D_DESC : MSA_3R_DESC_BASE<"adds_u.d", int_mips_adds_u_d,
+ MSA128DOpnd>, IsCommutable;
+
+class ADDV_B_DESC : MSA_3R_DESC_BASE<"addv.b", add, MSA128BOpnd>, IsCommutable;
+class ADDV_H_DESC : MSA_3R_DESC_BASE<"addv.h", add, MSA128HOpnd>, IsCommutable;
+class ADDV_W_DESC : MSA_3R_DESC_BASE<"addv.w", add, MSA128WOpnd>, IsCommutable;
+class ADDV_D_DESC : MSA_3R_DESC_BASE<"addv.d", add, MSA128DOpnd>, IsCommutable;
+
+class ADDVI_B_DESC : MSA_I5_DESC_BASE<"addvi.b", add, vsplati8_uimm5,
+ MSA128BOpnd>;
+class ADDVI_H_DESC : MSA_I5_DESC_BASE<"addvi.h", add, vsplati16_uimm5,
+ MSA128HOpnd>;
+class ADDVI_W_DESC : MSA_I5_DESC_BASE<"addvi.w", add, vsplati32_uimm5,
+ MSA128WOpnd>;
+class ADDVI_D_DESC : MSA_I5_DESC_BASE<"addvi.d", add, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class AND_V_DESC : MSA_VEC_DESC_BASE<"and.v", and, MSA128BOpnd>;
+class AND_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<and, MSA128HOpnd>;
+class AND_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<and, MSA128WOpnd>;
+class AND_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<and, MSA128DOpnd>;
+
+class ANDI_B_DESC : MSA_I8_DESC_BASE<"andi.b", and, vsplati8_uimm8,
+ MSA128BOpnd>;
+
+class ASUB_S_B_DESC : MSA_3R_DESC_BASE<"asub_s.b", int_mips_asub_s_b,
+ MSA128BOpnd>;
+class ASUB_S_H_DESC : MSA_3R_DESC_BASE<"asub_s.h", int_mips_asub_s_h,
+ MSA128HOpnd>;
+class ASUB_S_W_DESC : MSA_3R_DESC_BASE<"asub_s.w", int_mips_asub_s_w,
+ MSA128WOpnd>;
+class ASUB_S_D_DESC : MSA_3R_DESC_BASE<"asub_s.d", int_mips_asub_s_d,
+ MSA128DOpnd>;
+
+class ASUB_U_B_DESC : MSA_3R_DESC_BASE<"asub_u.b", int_mips_asub_u_b,
+ MSA128BOpnd>;
+class ASUB_U_H_DESC : MSA_3R_DESC_BASE<"asub_u.h", int_mips_asub_u_h,
+ MSA128HOpnd>;
+class ASUB_U_W_DESC : MSA_3R_DESC_BASE<"asub_u.w", int_mips_asub_u_w,
+ MSA128WOpnd>;
+class ASUB_U_D_DESC : MSA_3R_DESC_BASE<"asub_u.d", int_mips_asub_u_d,
+ MSA128DOpnd>;
+
+class AVE_S_B_DESC : MSA_3R_DESC_BASE<"ave_s.b", int_mips_ave_s_b, MSA128BOpnd>,
+ IsCommutable;
+class AVE_S_H_DESC : MSA_3R_DESC_BASE<"ave_s.h", int_mips_ave_s_h, MSA128HOpnd>,
+ IsCommutable;
+class AVE_S_W_DESC : MSA_3R_DESC_BASE<"ave_s.w", int_mips_ave_s_w, MSA128WOpnd>,
+ IsCommutable;
+class AVE_S_D_DESC : MSA_3R_DESC_BASE<"ave_s.d", int_mips_ave_s_d, MSA128DOpnd>,
+ IsCommutable;
+
+class AVE_U_B_DESC : MSA_3R_DESC_BASE<"ave_u.b", int_mips_ave_u_b, MSA128BOpnd>,
+ IsCommutable;
+class AVE_U_H_DESC : MSA_3R_DESC_BASE<"ave_u.h", int_mips_ave_u_h, MSA128HOpnd>,
+ IsCommutable;
+class AVE_U_W_DESC : MSA_3R_DESC_BASE<"ave_u.w", int_mips_ave_u_w, MSA128WOpnd>,
+ IsCommutable;
+class AVE_U_D_DESC : MSA_3R_DESC_BASE<"ave_u.d", int_mips_ave_u_d, MSA128DOpnd>,
+ IsCommutable;
+
+class AVER_S_B_DESC : MSA_3R_DESC_BASE<"aver_s.b", int_mips_aver_s_b,
+ MSA128BOpnd>, IsCommutable;
+class AVER_S_H_DESC : MSA_3R_DESC_BASE<"aver_s.h", int_mips_aver_s_h,
+ MSA128HOpnd>, IsCommutable;
+class AVER_S_W_DESC : MSA_3R_DESC_BASE<"aver_s.w", int_mips_aver_s_w,
+ MSA128WOpnd>, IsCommutable;
+class AVER_S_D_DESC : MSA_3R_DESC_BASE<"aver_s.d", int_mips_aver_s_d,
+ MSA128DOpnd>, IsCommutable;
+
+class AVER_U_B_DESC : MSA_3R_DESC_BASE<"aver_u.b", int_mips_aver_u_b,
+ MSA128BOpnd>, IsCommutable;
+class AVER_U_H_DESC : MSA_3R_DESC_BASE<"aver_u.h", int_mips_aver_u_h,
+ MSA128HOpnd>, IsCommutable;
+class AVER_U_W_DESC : MSA_3R_DESC_BASE<"aver_u.w", int_mips_aver_u_w,
+ MSA128WOpnd>, IsCommutable;
+class AVER_U_D_DESC : MSA_3R_DESC_BASE<"aver_u.d", int_mips_aver_u_d,
+ MSA128DOpnd>, IsCommutable;
+
+class BCLR_B_DESC : MSA_3R_DESC_BASE<"bclr.b", vbclr_b, MSA128BOpnd>;
+class BCLR_H_DESC : MSA_3R_DESC_BASE<"bclr.h", vbclr_h, MSA128HOpnd>;
+class BCLR_W_DESC : MSA_3R_DESC_BASE<"bclr.w", vbclr_w, MSA128WOpnd>;
+class BCLR_D_DESC : MSA_3R_DESC_BASE<"bclr.d", vbclr_d, MSA128DOpnd>;
+
+class BCLRI_B_DESC : MSA_BIT_B_DESC_BASE<"bclri.b", and, vsplat_uimm_inv_pow2,
+ MSA128BOpnd>;
+class BCLRI_H_DESC : MSA_BIT_H_DESC_BASE<"bclri.h", and, vsplat_uimm_inv_pow2,
+ MSA128HOpnd>;
+class BCLRI_W_DESC : MSA_BIT_W_DESC_BASE<"bclri.w", and, vsplat_uimm_inv_pow2,
+ MSA128WOpnd>;
+class BCLRI_D_DESC : MSA_BIT_D_DESC_BASE<"bclri.d", and, vsplat_uimm_inv_pow2,
+ MSA128DOpnd>;
+
+class BINSL_B_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.b", int_mips_binsl_b,
+ MSA128BOpnd>;
+class BINSL_H_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.h", int_mips_binsl_h,
+ MSA128HOpnd>;
+class BINSL_W_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.w", int_mips_binsl_w,
+ MSA128WOpnd>;
+class BINSL_D_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.d", int_mips_binsl_d,
+ MSA128DOpnd>;
+
+class BINSLI_B_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.b", v16i8, MSA128BOpnd>;
+class BINSLI_H_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.h", v8i16, MSA128HOpnd>;
+class BINSLI_W_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.w", v4i32, MSA128WOpnd>;
+class BINSLI_D_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.d", v2i64, MSA128DOpnd>;
+
+class BINSR_B_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.b", int_mips_binsr_b,
+ MSA128BOpnd>;
+class BINSR_H_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.h", int_mips_binsr_h,
+ MSA128HOpnd>;
+class BINSR_W_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.w", int_mips_binsr_w,
+ MSA128WOpnd>;
+class BINSR_D_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.d", int_mips_binsr_d,
+ MSA128DOpnd>;
+
+class BINSRI_B_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.b", v16i8, MSA128BOpnd>;
+class BINSRI_H_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.h", v8i16, MSA128HOpnd>;
+class BINSRI_W_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.w", v4i32, MSA128WOpnd>;
+class BINSRI_D_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.d", v2i64, MSA128DOpnd>;
+
+class BMNZ_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt);
+ string AsmString = "bmnz.v\t$wd, $ws, $wt";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect MSA128BOpnd:$wt,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wd_in))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BMNZI_B_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ vsplat_uimm8:$u8);
+ string AsmString = "bmnzi.b\t$wd, $ws, $u8";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect vsplati8_uimm8:$u8,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wd_in))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BMZ_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt);
+ string AsmString = "bmz.v\t$wd, $ws, $wt";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect MSA128BOpnd:$wt,
+ MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BMZI_B_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ vsplat_uimm8:$u8);
+ string AsmString = "bmzi.b\t$wd, $ws, $u8";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect vsplati8_uimm8:$u8,
+ MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BNEG_B_DESC : MSA_3R_DESC_BASE<"bneg.b", vbneg_b, MSA128BOpnd>;
+class BNEG_H_DESC : MSA_3R_DESC_BASE<"bneg.h", vbneg_h, MSA128HOpnd>;
+class BNEG_W_DESC : MSA_3R_DESC_BASE<"bneg.w", vbneg_w, MSA128WOpnd>;
+class BNEG_D_DESC : MSA_3R_DESC_BASE<"bneg.d", vbneg_d, MSA128DOpnd>;
+
+class BNEGI_B_DESC : MSA_BIT_B_DESC_BASE<"bnegi.b", xor, vsplat_uimm_pow2, MSA128BOpnd>;
+class BNEGI_H_DESC : MSA_BIT_H_DESC_BASE<"bnegi.h", xor, vsplat_uimm_pow2, MSA128HOpnd>;
+class BNEGI_W_DESC : MSA_BIT_W_DESC_BASE<"bnegi.w", xor, vsplat_uimm_pow2, MSA128WOpnd>;
+class BNEGI_D_DESC : MSA_BIT_D_DESC_BASE<"bnegi.d", xor, vsplat_uimm_pow2, MSA128DOpnd>;
+
+class BNZ_B_DESC : MSA_CBRANCH_DESC_BASE<"bnz.b", MSA128BOpnd>;
+class BNZ_H_DESC : MSA_CBRANCH_DESC_BASE<"bnz.h", MSA128HOpnd>;
+class BNZ_W_DESC : MSA_CBRANCH_DESC_BASE<"bnz.w", MSA128WOpnd>;
+class BNZ_D_DESC : MSA_CBRANCH_DESC_BASE<"bnz.d", MSA128DOpnd>;
+
+class BNZ_V_DESC : MSA_CBRANCH_DESC_BASE<"bnz.v", MSA128BOpnd>;
+
+class BSEL_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt);
+ string AsmString = "bsel.v\t$wd, $ws, $wt";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd,
+ (vselect MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BSELI_B_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ vsplat_uimm8:$u8);
+ string AsmString = "bseli.b\t$wd, $ws, $u8";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws,
+ vsplati8_uimm8:$u8))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BSET_B_DESC : MSA_3R_DESC_BASE<"bset.b", vbset_b, MSA128BOpnd>;
+class BSET_H_DESC : MSA_3R_DESC_BASE<"bset.h", vbset_h, MSA128HOpnd>;
+class BSET_W_DESC : MSA_3R_DESC_BASE<"bset.w", vbset_w, MSA128WOpnd>;
+class BSET_D_DESC : MSA_3R_DESC_BASE<"bset.d", vbset_d, MSA128DOpnd>;
+
+class BSETI_B_DESC : MSA_BIT_B_DESC_BASE<"bseti.b", or, vsplat_uimm_pow2,
+ MSA128BOpnd>;
+class BSETI_H_DESC : MSA_BIT_H_DESC_BASE<"bseti.h", or, vsplat_uimm_pow2,
+ MSA128HOpnd>;
+class BSETI_W_DESC : MSA_BIT_W_DESC_BASE<"bseti.w", or, vsplat_uimm_pow2,
+ MSA128WOpnd>;
+class BSETI_D_DESC : MSA_BIT_D_DESC_BASE<"bseti.d", or, vsplat_uimm_pow2,
+ MSA128DOpnd>;
+
+class BZ_B_DESC : MSA_CBRANCH_DESC_BASE<"bz.b", MSA128BOpnd>;
+class BZ_H_DESC : MSA_CBRANCH_DESC_BASE<"bz.h", MSA128HOpnd>;
+class BZ_W_DESC : MSA_CBRANCH_DESC_BASE<"bz.w", MSA128WOpnd>;
+class BZ_D_DESC : MSA_CBRANCH_DESC_BASE<"bz.d", MSA128DOpnd>;
+
+class BZ_V_DESC : MSA_CBRANCH_DESC_BASE<"bz.v", MSA128BOpnd>;
+
+class CEQ_B_DESC : MSA_3R_DESC_BASE<"ceq.b", vseteq_v16i8, MSA128BOpnd>,
+ IsCommutable;
+class CEQ_H_DESC : MSA_3R_DESC_BASE<"ceq.h", vseteq_v8i16, MSA128HOpnd>,
+ IsCommutable;
+class CEQ_W_DESC : MSA_3R_DESC_BASE<"ceq.w", vseteq_v4i32, MSA128WOpnd>,
+ IsCommutable;
+class CEQ_D_DESC : MSA_3R_DESC_BASE<"ceq.d", vseteq_v2i64, MSA128DOpnd>,
+ IsCommutable;
+
+class CEQI_B_DESC : MSA_I5_DESC_BASE<"ceqi.b", vseteq_v16i8, vsplati8_simm5,
+ MSA128BOpnd>;
+class CEQI_H_DESC : MSA_I5_DESC_BASE<"ceqi.h", vseteq_v8i16, vsplati16_simm5,
+ MSA128HOpnd>;
+class CEQI_W_DESC : MSA_I5_DESC_BASE<"ceqi.w", vseteq_v4i32, vsplati32_simm5,
+ MSA128WOpnd>;
+class CEQI_D_DESC : MSA_I5_DESC_BASE<"ceqi.d", vseteq_v2i64, vsplati64_simm5,
+ MSA128DOpnd>;
+
+class CFCMSA_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins MSA128CROpnd:$cs);
+ string AsmString = "cfcmsa\t$rd, $cs";
+ InstrItinClass Itinerary = NoItinerary;
+ bit hasSideEffects = 1;
+}
+
+class CLE_S_B_DESC : MSA_3R_DESC_BASE<"cle_s.b", vsetle_v16i8, MSA128BOpnd>;
+class CLE_S_H_DESC : MSA_3R_DESC_BASE<"cle_s.h", vsetle_v8i16, MSA128HOpnd>;
+class CLE_S_W_DESC : MSA_3R_DESC_BASE<"cle_s.w", vsetle_v4i32, MSA128WOpnd>;
+class CLE_S_D_DESC : MSA_3R_DESC_BASE<"cle_s.d", vsetle_v2i64, MSA128DOpnd>;
+
+class CLE_U_B_DESC : MSA_3R_DESC_BASE<"cle_u.b", vsetule_v16i8, MSA128BOpnd>;
+class CLE_U_H_DESC : MSA_3R_DESC_BASE<"cle_u.h", vsetule_v8i16, MSA128HOpnd>;
+class CLE_U_W_DESC : MSA_3R_DESC_BASE<"cle_u.w", vsetule_v4i32, MSA128WOpnd>;
+class CLE_U_D_DESC : MSA_3R_DESC_BASE<"cle_u.d", vsetule_v2i64, MSA128DOpnd>;
+
+class CLEI_S_B_DESC : MSA_I5_DESC_BASE<"clei_s.b", vsetle_v16i8,
+ vsplati8_simm5, MSA128BOpnd>;
+class CLEI_S_H_DESC : MSA_I5_DESC_BASE<"clei_s.h", vsetle_v8i16,
+ vsplati16_simm5, MSA128HOpnd>;
+class CLEI_S_W_DESC : MSA_I5_DESC_BASE<"clei_s.w", vsetle_v4i32,
+ vsplati32_simm5, MSA128WOpnd>;
+class CLEI_S_D_DESC : MSA_I5_DESC_BASE<"clei_s.d", vsetle_v2i64,
+ vsplati64_simm5, MSA128DOpnd>;
+
+class CLEI_U_B_DESC : MSA_I5_DESC_BASE<"clei_u.b", vsetule_v16i8,
+ vsplati8_uimm5, MSA128BOpnd>;
+class CLEI_U_H_DESC : MSA_I5_DESC_BASE<"clei_u.h", vsetule_v8i16,
+ vsplati16_uimm5, MSA128HOpnd>;
+class CLEI_U_W_DESC : MSA_I5_DESC_BASE<"clei_u.w", vsetule_v4i32,
+ vsplati32_uimm5, MSA128WOpnd>;
+class CLEI_U_D_DESC : MSA_I5_DESC_BASE<"clei_u.d", vsetule_v2i64,
+ vsplati64_uimm5, MSA128DOpnd>;
+
+class CLT_S_B_DESC : MSA_3R_DESC_BASE<"clt_s.b", vsetlt_v16i8, MSA128BOpnd>;
+class CLT_S_H_DESC : MSA_3R_DESC_BASE<"clt_s.h", vsetlt_v8i16, MSA128HOpnd>;
+class CLT_S_W_DESC : MSA_3R_DESC_BASE<"clt_s.w", vsetlt_v4i32, MSA128WOpnd>;
+class CLT_S_D_DESC : MSA_3R_DESC_BASE<"clt_s.d", vsetlt_v2i64, MSA128DOpnd>;
+
+class CLT_U_B_DESC : MSA_3R_DESC_BASE<"clt_u.b", vsetult_v16i8, MSA128BOpnd>;
+class CLT_U_H_DESC : MSA_3R_DESC_BASE<"clt_u.h", vsetult_v8i16, MSA128HOpnd>;
+class CLT_U_W_DESC : MSA_3R_DESC_BASE<"clt_u.w", vsetult_v4i32, MSA128WOpnd>;
+class CLT_U_D_DESC : MSA_3R_DESC_BASE<"clt_u.d", vsetult_v2i64, MSA128DOpnd>;
+
+class CLTI_S_B_DESC : MSA_I5_DESC_BASE<"clti_s.b", vsetlt_v16i8,
+ vsplati8_simm5, MSA128BOpnd>;
+class CLTI_S_H_DESC : MSA_I5_DESC_BASE<"clti_s.h", vsetlt_v8i16,
+ vsplati16_simm5, MSA128HOpnd>;
+class CLTI_S_W_DESC : MSA_I5_DESC_BASE<"clti_s.w", vsetlt_v4i32,
+ vsplati32_simm5, MSA128WOpnd>;
+class CLTI_S_D_DESC : MSA_I5_DESC_BASE<"clti_s.d", vsetlt_v2i64,
+ vsplati64_simm5, MSA128DOpnd>;
+
+class CLTI_U_B_DESC : MSA_I5_DESC_BASE<"clti_u.b", vsetult_v16i8,
+ vsplati8_uimm5, MSA128BOpnd>;
+class CLTI_U_H_DESC : MSA_I5_DESC_BASE<"clti_u.h", vsetult_v8i16,
+ vsplati16_uimm5, MSA128HOpnd>;
+class CLTI_U_W_DESC : MSA_I5_DESC_BASE<"clti_u.w", vsetult_v4i32,
+ vsplati32_uimm5, MSA128WOpnd>;
+class CLTI_U_D_DESC : MSA_I5_DESC_BASE<"clti_u.d", vsetult_v2i64,
+ vsplati64_uimm5, MSA128DOpnd>;
+
+class COPY_S_B_DESC : MSA_COPY_DESC_BASE<"copy_s.b", vextract_sext_i8, v16i8,
+ GPR32Opnd, MSA128BOpnd>;
+class COPY_S_H_DESC : MSA_COPY_DESC_BASE<"copy_s.h", vextract_sext_i16, v8i16,
+ GPR32Opnd, MSA128HOpnd>;
+class COPY_S_W_DESC : MSA_COPY_DESC_BASE<"copy_s.w", vextract_sext_i32, v4i32,
+ GPR32Opnd, MSA128WOpnd>;
+
+class COPY_U_B_DESC : MSA_COPY_DESC_BASE<"copy_u.b", vextract_zext_i8, v16i8,
+ GPR32Opnd, MSA128BOpnd>;
+class COPY_U_H_DESC : MSA_COPY_DESC_BASE<"copy_u.h", vextract_zext_i16, v8i16,
+ GPR32Opnd, MSA128HOpnd>;
+class COPY_U_W_DESC : MSA_COPY_DESC_BASE<"copy_u.w", vextract_zext_i32, v4i32,
+ GPR32Opnd, MSA128WOpnd>;
+
+class COPY_FW_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v4f32, FGR32,
+ MSA128W>;
+class COPY_FD_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v2f64, FGR64,
+ MSA128D>;
+
+class CTCMSA_DESC {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins MSA128CROpnd:$cd, GPR32Opnd:$rs);
+ string AsmString = "ctcmsa\t$cd, $rs";
+ InstrItinClass Itinerary = NoItinerary;
+ bit hasSideEffects = 1;
+}
+
+class DIV_S_B_DESC : MSA_3R_DESC_BASE<"div_s.b", sdiv, MSA128BOpnd>;
+class DIV_S_H_DESC : MSA_3R_DESC_BASE<"div_s.h", sdiv, MSA128HOpnd>;
+class DIV_S_W_DESC : MSA_3R_DESC_BASE<"div_s.w", sdiv, MSA128WOpnd>;
+class DIV_S_D_DESC : MSA_3R_DESC_BASE<"div_s.d", sdiv, MSA128DOpnd>;
+
+class DIV_U_B_DESC : MSA_3R_DESC_BASE<"div_u.b", udiv, MSA128BOpnd>;
+class DIV_U_H_DESC : MSA_3R_DESC_BASE<"div_u.h", udiv, MSA128HOpnd>;
+class DIV_U_W_DESC : MSA_3R_DESC_BASE<"div_u.w", udiv, MSA128WOpnd>;
+class DIV_U_D_DESC : MSA_3R_DESC_BASE<"div_u.d", udiv, MSA128DOpnd>;
+
+class DOTP_S_H_DESC : MSA_3R_DESC_BASE<"dotp_s.h", int_mips_dotp_s_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>,
+ IsCommutable;
+class DOTP_S_W_DESC : MSA_3R_DESC_BASE<"dotp_s.w", int_mips_dotp_s_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>,
+ IsCommutable;
+class DOTP_S_D_DESC : MSA_3R_DESC_BASE<"dotp_s.d", int_mips_dotp_s_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>,
+ IsCommutable;
+
+class DOTP_U_H_DESC : MSA_3R_DESC_BASE<"dotp_u.h", int_mips_dotp_u_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>,
+ IsCommutable;
+class DOTP_U_W_DESC : MSA_3R_DESC_BASE<"dotp_u.w", int_mips_dotp_u_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>,
+ IsCommutable;
+class DOTP_U_D_DESC : MSA_3R_DESC_BASE<"dotp_u.d", int_mips_dotp_u_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>,
+ IsCommutable;
+
+class DPADD_S_H_DESC : MSA_3R_4R_DESC_BASE<"dpadd_s.h", int_mips_dpadd_s_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>, IsCommutable;
+class DPADD_S_W_DESC : MSA_3R_4R_DESC_BASE<"dpadd_s.w", int_mips_dpadd_s_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>, IsCommutable;
+class DPADD_S_D_DESC : MSA_3R_4R_DESC_BASE<"dpadd_s.d", int_mips_dpadd_s_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>, IsCommutable;
+
+class DPADD_U_H_DESC : MSA_3R_4R_DESC_BASE<"dpadd_u.h", int_mips_dpadd_u_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>, IsCommutable;
+class DPADD_U_W_DESC : MSA_3R_4R_DESC_BASE<"dpadd_u.w", int_mips_dpadd_u_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>, IsCommutable;
+class DPADD_U_D_DESC : MSA_3R_4R_DESC_BASE<"dpadd_u.d", int_mips_dpadd_u_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>, IsCommutable;
+
+class DPSUB_S_H_DESC : MSA_3R_4R_DESC_BASE<"dpsub_s.h", int_mips_dpsub_s_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>;
+class DPSUB_S_W_DESC : MSA_3R_4R_DESC_BASE<"dpsub_s.w", int_mips_dpsub_s_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>;
+class DPSUB_S_D_DESC : MSA_3R_4R_DESC_BASE<"dpsub_s.d", int_mips_dpsub_s_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>;
+
+class DPSUB_U_H_DESC : MSA_3R_4R_DESC_BASE<"dpsub_u.h", int_mips_dpsub_u_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>;
+class DPSUB_U_W_DESC : MSA_3R_4R_DESC_BASE<"dpsub_u.w", int_mips_dpsub_u_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>;
+class DPSUB_U_D_DESC : MSA_3R_4R_DESC_BASE<"dpsub_u.d", int_mips_dpsub_u_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>;
+
+class FADD_W_DESC : MSA_3RF_DESC_BASE<"fadd.w", fadd, MSA128WOpnd>,
+ IsCommutable;
+class FADD_D_DESC : MSA_3RF_DESC_BASE<"fadd.d", fadd, MSA128DOpnd>,
+ IsCommutable;
+
+class FCAF_W_DESC : MSA_3RF_DESC_BASE<"fcaf.w", int_mips_fcaf_w, MSA128WOpnd>,
+ IsCommutable;
+class FCAF_D_DESC : MSA_3RF_DESC_BASE<"fcaf.d", int_mips_fcaf_d, MSA128DOpnd>,
+ IsCommutable;
+
+class FCEQ_W_DESC : MSA_3RF_DESC_BASE<"fceq.w", vfsetoeq_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCEQ_D_DESC : MSA_3RF_DESC_BASE<"fceq.d", vfsetoeq_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCLASS_W_DESC : MSA_2RF_DESC_BASE<"fclass.w", int_mips_fclass_w,
+ MSA128WOpnd>;
+class FCLASS_D_DESC : MSA_2RF_DESC_BASE<"fclass.d", int_mips_fclass_d,
+ MSA128DOpnd>;
+
+class FCLE_W_DESC : MSA_3RF_DESC_BASE<"fcle.w", vfsetole_v4f32, MSA128WOpnd>;
+class FCLE_D_DESC : MSA_3RF_DESC_BASE<"fcle.d", vfsetole_v2f64, MSA128DOpnd>;
+
+class FCLT_W_DESC : MSA_3RF_DESC_BASE<"fclt.w", vfsetolt_v4f32, MSA128WOpnd>;
+class FCLT_D_DESC : MSA_3RF_DESC_BASE<"fclt.d", vfsetolt_v2f64, MSA128DOpnd>;
+
+class FCNE_W_DESC : MSA_3RF_DESC_BASE<"fcne.w", vfsetone_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCNE_D_DESC : MSA_3RF_DESC_BASE<"fcne.d", vfsetone_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCOR_W_DESC : MSA_3RF_DESC_BASE<"fcor.w", vfsetord_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCOR_D_DESC : MSA_3RF_DESC_BASE<"fcor.d", vfsetord_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCUEQ_W_DESC : MSA_3RF_DESC_BASE<"fcueq.w", vfsetueq_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCUEQ_D_DESC : MSA_3RF_DESC_BASE<"fcueq.d", vfsetueq_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCULE_W_DESC : MSA_3RF_DESC_BASE<"fcule.w", vfsetule_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCULE_D_DESC : MSA_3RF_DESC_BASE<"fcule.d", vfsetule_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCULT_W_DESC : MSA_3RF_DESC_BASE<"fcult.w", vfsetult_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCULT_D_DESC : MSA_3RF_DESC_BASE<"fcult.d", vfsetult_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCUN_W_DESC : MSA_3RF_DESC_BASE<"fcun.w", vfsetun_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCUN_D_DESC : MSA_3RF_DESC_BASE<"fcun.d", vfsetun_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCUNE_W_DESC : MSA_3RF_DESC_BASE<"fcune.w", vfsetune_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCUNE_D_DESC : MSA_3RF_DESC_BASE<"fcune.d", vfsetune_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FDIV_W_DESC : MSA_3RF_DESC_BASE<"fdiv.w", fdiv, MSA128WOpnd>;
+class FDIV_D_DESC : MSA_3RF_DESC_BASE<"fdiv.d", fdiv, MSA128DOpnd>;
+
+class FEXDO_H_DESC : MSA_3RF_DESC_BASE<"fexdo.h", int_mips_fexdo_h,
+ MSA128HOpnd, MSA128WOpnd, MSA128WOpnd>;
+class FEXDO_W_DESC : MSA_3RF_DESC_BASE<"fexdo.w", int_mips_fexdo_w,
+ MSA128WOpnd, MSA128DOpnd, MSA128DOpnd>;
+
+// The fexp2.df instruction multiplies the first operand by 2 to the power of
+// the second operand. We therefore need a pseudo-insn in order to invent the
+// 1.0 when we only need to match ISD::FEXP2.
+class FEXP2_W_DESC : MSA_3RF_DESC_BASE<"fexp2.w", mul_fexp2, MSA128WOpnd>;
+class FEXP2_D_DESC : MSA_3RF_DESC_BASE<"fexp2.d", mul_fexp2, MSA128DOpnd>;
+let usesCustomInserter = 1 in {
+ class FEXP2_W_1_PSEUDO_DESC :
+ MipsPseudo<(outs MSA128W:$wd), (ins MSA128W:$ws),
+ [(set MSA128W:$wd, (fexp2 MSA128W:$ws))]>;
+ class FEXP2_D_1_PSEUDO_DESC :
+ MipsPseudo<(outs MSA128D:$wd), (ins MSA128D:$ws),
+ [(set MSA128D:$wd, (fexp2 MSA128D:$ws))]>;
+}
+
+class FEXUPL_W_DESC : MSA_2RF_DESC_BASE<"fexupl.w", int_mips_fexupl_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FEXUPL_D_DESC : MSA_2RF_DESC_BASE<"fexupl.d", int_mips_fexupl_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FEXUPR_W_DESC : MSA_2RF_DESC_BASE<"fexupr.w", int_mips_fexupr_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FEXUPR_D_DESC : MSA_2RF_DESC_BASE<"fexupr.d", int_mips_fexupr_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FFINT_S_W_DESC : MSA_2RF_DESC_BASE<"ffint_s.w", sint_to_fp, MSA128WOpnd>;
+class FFINT_S_D_DESC : MSA_2RF_DESC_BASE<"ffint_s.d", sint_to_fp, MSA128DOpnd>;
+
+class FFINT_U_W_DESC : MSA_2RF_DESC_BASE<"ffint_u.w", uint_to_fp, MSA128WOpnd>;
+class FFINT_U_D_DESC : MSA_2RF_DESC_BASE<"ffint_u.d", uint_to_fp, MSA128DOpnd>;
+
+class FFQL_W_DESC : MSA_2RF_DESC_BASE<"ffql.w", int_mips_ffql_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FFQL_D_DESC : MSA_2RF_DESC_BASE<"ffql.d", int_mips_ffql_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FFQR_W_DESC : MSA_2RF_DESC_BASE<"ffqr.w", int_mips_ffqr_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FFQR_D_DESC : MSA_2RF_DESC_BASE<"ffqr.d", int_mips_ffqr_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FILL_B_DESC : MSA_2R_FILL_DESC_BASE<"fill.b", v16i8, vsplati8,
+ MSA128BOpnd, GPR32Opnd>;
+class FILL_H_DESC : MSA_2R_FILL_DESC_BASE<"fill.h", v8i16, vsplati16,
+ MSA128HOpnd, GPR32Opnd>;
+class FILL_W_DESC : MSA_2R_FILL_DESC_BASE<"fill.w", v4i32, vsplati32,
+ MSA128WOpnd, GPR32Opnd>;
+
+class FILL_FW_PSEUDO_DESC : MSA_2R_FILL_PSEUDO_BASE<v4f32, vsplatf32, MSA128W,
+ FGR32>;
+class FILL_FD_PSEUDO_DESC : MSA_2R_FILL_PSEUDO_BASE<v2f64, vsplatf64, MSA128D,
+ FGR64>;
+
+class FLOG2_W_DESC : MSA_2RF_DESC_BASE<"flog2.w", flog2, MSA128WOpnd>;
+class FLOG2_D_DESC : MSA_2RF_DESC_BASE<"flog2.d", flog2, MSA128DOpnd>;
+
+class FMADD_W_DESC : MSA_3RF_4RF_DESC_BASE<"fmadd.w", fma, MSA128WOpnd>;
+class FMADD_D_DESC : MSA_3RF_4RF_DESC_BASE<"fmadd.d", fma, MSA128DOpnd>;
+
+class FMAX_W_DESC : MSA_3RF_DESC_BASE<"fmax.w", int_mips_fmax_w, MSA128WOpnd>;
+class FMAX_D_DESC : MSA_3RF_DESC_BASE<"fmax.d", int_mips_fmax_d, MSA128DOpnd>;
+
+class FMAX_A_W_DESC : MSA_3RF_DESC_BASE<"fmax_a.w", int_mips_fmax_a_w,
+ MSA128WOpnd>;
+class FMAX_A_D_DESC : MSA_3RF_DESC_BASE<"fmax_a.d", int_mips_fmax_a_d,
+ MSA128DOpnd>;
+
+class FMIN_W_DESC : MSA_3RF_DESC_BASE<"fmin.w", int_mips_fmin_w, MSA128WOpnd>;
+class FMIN_D_DESC : MSA_3RF_DESC_BASE<"fmin.d", int_mips_fmin_d, MSA128DOpnd>;
+
+class FMIN_A_W_DESC : MSA_3RF_DESC_BASE<"fmin_a.w", int_mips_fmin_a_w,
+ MSA128WOpnd>;
+class FMIN_A_D_DESC : MSA_3RF_DESC_BASE<"fmin_a.d", int_mips_fmin_a_d,
+ MSA128DOpnd>;
+
+class FMSUB_W_DESC : MSA_3RF_4RF_DESC_BASE<"fmsub.w", fms, MSA128WOpnd>;
+class FMSUB_D_DESC : MSA_3RF_4RF_DESC_BASE<"fmsub.d", fms, MSA128DOpnd>;
+
+class FMUL_W_DESC : MSA_3RF_DESC_BASE<"fmul.w", fmul, MSA128WOpnd>;
+class FMUL_D_DESC : MSA_3RF_DESC_BASE<"fmul.d", fmul, MSA128DOpnd>;
+
+class FRINT_W_DESC : MSA_2RF_DESC_BASE<"frint.w", frint, MSA128WOpnd>;
+class FRINT_D_DESC : MSA_2RF_DESC_BASE<"frint.d", frint, MSA128DOpnd>;
+
+class FRCP_W_DESC : MSA_2RF_DESC_BASE<"frcp.w", int_mips_frcp_w, MSA128WOpnd>;
+class FRCP_D_DESC : MSA_2RF_DESC_BASE<"frcp.d", int_mips_frcp_d, MSA128DOpnd>;
+
+class FRSQRT_W_DESC : MSA_2RF_DESC_BASE<"frsqrt.w", int_mips_frsqrt_w,
+ MSA128WOpnd>;
+class FRSQRT_D_DESC : MSA_2RF_DESC_BASE<"frsqrt.d", int_mips_frsqrt_d,
+ MSA128DOpnd>;
+
+class FSAF_W_DESC : MSA_3RF_DESC_BASE<"fsaf.w", int_mips_fsaf_w, MSA128WOpnd>;
+class FSAF_D_DESC : MSA_3RF_DESC_BASE<"fsaf.d", int_mips_fsaf_d, MSA128DOpnd>;
+
+class FSEQ_W_DESC : MSA_3RF_DESC_BASE<"fseq.w", int_mips_fseq_w, MSA128WOpnd>;
+class FSEQ_D_DESC : MSA_3RF_DESC_BASE<"fseq.d", int_mips_fseq_d, MSA128DOpnd>;
+
+class FSLE_W_DESC : MSA_3RF_DESC_BASE<"fsle.w", int_mips_fsle_w, MSA128WOpnd>;
+class FSLE_D_DESC : MSA_3RF_DESC_BASE<"fsle.d", int_mips_fsle_d, MSA128DOpnd>;
+
+class FSLT_W_DESC : MSA_3RF_DESC_BASE<"fslt.w", int_mips_fslt_w, MSA128WOpnd>;
+class FSLT_D_DESC : MSA_3RF_DESC_BASE<"fslt.d", int_mips_fslt_d, MSA128DOpnd>;
+
+class FSNE_W_DESC : MSA_3RF_DESC_BASE<"fsne.w", int_mips_fsne_w, MSA128WOpnd>;
+class FSNE_D_DESC : MSA_3RF_DESC_BASE<"fsne.d", int_mips_fsne_d, MSA128DOpnd>;
+
+class FSOR_W_DESC : MSA_3RF_DESC_BASE<"fsor.w", int_mips_fsor_w, MSA128WOpnd>;
+class FSOR_D_DESC : MSA_3RF_DESC_BASE<"fsor.d", int_mips_fsor_d, MSA128DOpnd>;
+
+class FSQRT_W_DESC : MSA_2RF_DESC_BASE<"fsqrt.w", fsqrt, MSA128WOpnd>;
+class FSQRT_D_DESC : MSA_2RF_DESC_BASE<"fsqrt.d", fsqrt, MSA128DOpnd>;
+
+class FSUB_W_DESC : MSA_3RF_DESC_BASE<"fsub.w", fsub, MSA128WOpnd>;
+class FSUB_D_DESC : MSA_3RF_DESC_BASE<"fsub.d", fsub, MSA128DOpnd>;
+
+class FSUEQ_W_DESC : MSA_3RF_DESC_BASE<"fsueq.w", int_mips_fsueq_w,
+ MSA128WOpnd>;
+class FSUEQ_D_DESC : MSA_3RF_DESC_BASE<"fsueq.d", int_mips_fsueq_d,
+ MSA128DOpnd>;
+
+class FSULE_W_DESC : MSA_3RF_DESC_BASE<"fsule.w", int_mips_fsule_w,
+ MSA128WOpnd>;
+class FSULE_D_DESC : MSA_3RF_DESC_BASE<"fsule.d", int_mips_fsule_d,
+ MSA128DOpnd>;
+
+class FSULT_W_DESC : MSA_3RF_DESC_BASE<"fsult.w", int_mips_fsult_w,
+ MSA128WOpnd>;
+class FSULT_D_DESC : MSA_3RF_DESC_BASE<"fsult.d", int_mips_fsult_d,
+ MSA128DOpnd>;
+
+class FSUN_W_DESC : MSA_3RF_DESC_BASE<"fsun.w", int_mips_fsun_w,
+ MSA128WOpnd>;
+class FSUN_D_DESC : MSA_3RF_DESC_BASE<"fsun.d", int_mips_fsun_d,
+ MSA128DOpnd>;
+
+class FSUNE_W_DESC : MSA_3RF_DESC_BASE<"fsune.w", int_mips_fsune_w,
+ MSA128WOpnd>;
+class FSUNE_D_DESC : MSA_3RF_DESC_BASE<"fsune.d", int_mips_fsune_d,
+ MSA128DOpnd>;
+
+class FTINT_S_W_DESC : MSA_2RF_DESC_BASE<"ftint_s.w", int_mips_ftint_s_w,
+ MSA128WOpnd>;
+class FTINT_S_D_DESC : MSA_2RF_DESC_BASE<"ftint_s.d", int_mips_ftint_s_d,
+ MSA128DOpnd>;
+
+class FTINT_U_W_DESC : MSA_2RF_DESC_BASE<"ftint_u.w", int_mips_ftint_u_w,
+ MSA128WOpnd>;
+class FTINT_U_D_DESC : MSA_2RF_DESC_BASE<"ftint_u.d", int_mips_ftint_u_d,
+ MSA128DOpnd>;
+
+class FTQ_H_DESC : MSA_3RF_DESC_BASE<"ftq.h", int_mips_ftq_h,
+ MSA128HOpnd, MSA128WOpnd, MSA128WOpnd>;
+class FTQ_W_DESC : MSA_3RF_DESC_BASE<"ftq.w", int_mips_ftq_w,
+ MSA128WOpnd, MSA128DOpnd, MSA128DOpnd>;
+
+class FTRUNC_S_W_DESC : MSA_2RF_DESC_BASE<"ftrunc_s.w", fp_to_sint,
+ MSA128WOpnd>;
+class FTRUNC_S_D_DESC : MSA_2RF_DESC_BASE<"ftrunc_s.d", fp_to_sint,
+ MSA128DOpnd>;
+
+class FTRUNC_U_W_DESC : MSA_2RF_DESC_BASE<"ftrunc_u.w", fp_to_uint,
+ MSA128WOpnd>;
+class FTRUNC_U_D_DESC : MSA_2RF_DESC_BASE<"ftrunc_u.d", fp_to_uint,
+ MSA128DOpnd>;
+
+class HADD_S_H_DESC : MSA_3R_DESC_BASE<"hadd_s.h", int_mips_hadd_s_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HADD_S_W_DESC : MSA_3R_DESC_BASE<"hadd_s.w", int_mips_hadd_s_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HADD_S_D_DESC : MSA_3R_DESC_BASE<"hadd_s.d", int_mips_hadd_s_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class HADD_U_H_DESC : MSA_3R_DESC_BASE<"hadd_u.h", int_mips_hadd_u_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HADD_U_W_DESC : MSA_3R_DESC_BASE<"hadd_u.w", int_mips_hadd_u_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HADD_U_D_DESC : MSA_3R_DESC_BASE<"hadd_u.d", int_mips_hadd_u_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class HSUB_S_H_DESC : MSA_3R_DESC_BASE<"hsub_s.h", int_mips_hsub_s_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HSUB_S_W_DESC : MSA_3R_DESC_BASE<"hsub_s.w", int_mips_hsub_s_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HSUB_S_D_DESC : MSA_3R_DESC_BASE<"hsub_s.d", int_mips_hsub_s_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class HSUB_U_H_DESC : MSA_3R_DESC_BASE<"hsub_u.h", int_mips_hsub_u_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HSUB_U_W_DESC : MSA_3R_DESC_BASE<"hsub_u.w", int_mips_hsub_u_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HSUB_U_D_DESC : MSA_3R_DESC_BASE<"hsub_u.d", int_mips_hsub_u_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class ILVEV_B_DESC : MSA_3R_DESC_BASE<"ilvev.b", MipsILVEV, MSA128BOpnd>;
+class ILVEV_H_DESC : MSA_3R_DESC_BASE<"ilvev.h", MipsILVEV, MSA128HOpnd>;
+class ILVEV_W_DESC : MSA_3R_DESC_BASE<"ilvev.w", MipsILVEV, MSA128WOpnd>;
+class ILVEV_D_DESC : MSA_3R_DESC_BASE<"ilvev.d", MipsILVEV, MSA128DOpnd>;
+
+class ILVL_B_DESC : MSA_3R_DESC_BASE<"ilvl.b", MipsILVL, MSA128BOpnd>;
+class ILVL_H_DESC : MSA_3R_DESC_BASE<"ilvl.h", MipsILVL, MSA128HOpnd>;
+class ILVL_W_DESC : MSA_3R_DESC_BASE<"ilvl.w", MipsILVL, MSA128WOpnd>;
+class ILVL_D_DESC : MSA_3R_DESC_BASE<"ilvl.d", MipsILVL, MSA128DOpnd>;
+
+class ILVOD_B_DESC : MSA_3R_DESC_BASE<"ilvod.b", MipsILVOD, MSA128BOpnd>;
+class ILVOD_H_DESC : MSA_3R_DESC_BASE<"ilvod.h", MipsILVOD, MSA128HOpnd>;
+class ILVOD_W_DESC : MSA_3R_DESC_BASE<"ilvod.w", MipsILVOD, MSA128WOpnd>;
+class ILVOD_D_DESC : MSA_3R_DESC_BASE<"ilvod.d", MipsILVOD, MSA128DOpnd>;
+
+class ILVR_B_DESC : MSA_3R_DESC_BASE<"ilvr.b", MipsILVR, MSA128BOpnd>;
+class ILVR_H_DESC : MSA_3R_DESC_BASE<"ilvr.h", MipsILVR, MSA128HOpnd>;
+class ILVR_W_DESC : MSA_3R_DESC_BASE<"ilvr.w", MipsILVR, MSA128WOpnd>;
+class ILVR_D_DESC : MSA_3R_DESC_BASE<"ilvr.d", MipsILVR, MSA128DOpnd>;
+
+class INSERT_B_DESC : MSA_INSERT_DESC_BASE<"insert.b", vinsert_v16i8,
+ MSA128BOpnd, GPR32Opnd>;
+class INSERT_H_DESC : MSA_INSERT_DESC_BASE<"insert.h", vinsert_v8i16,
+ MSA128HOpnd, GPR32Opnd>;
+class INSERT_W_DESC : MSA_INSERT_DESC_BASE<"insert.w", vinsert_v4i32,
+ MSA128WOpnd, GPR32Opnd>;
+
+class INSERT_FW_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v4f32,
+ MSA128WOpnd, FGR32Opnd>;
+class INSERT_FD_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v2f64,
+ MSA128DOpnd, FGR64Opnd>;
+
+class INSVE_B_DESC : MSA_INSVE_DESC_BASE<"insve.b", int_mips_insve_b,
+ MSA128BOpnd>;
+class INSVE_H_DESC : MSA_INSVE_DESC_BASE<"insve.h", int_mips_insve_h,
+ MSA128HOpnd>;
+class INSVE_W_DESC : MSA_INSVE_DESC_BASE<"insve.w", int_mips_insve_w,
+ MSA128WOpnd>;
+class INSVE_D_DESC : MSA_INSVE_DESC_BASE<"insve.d", int_mips_insve_d,
+ MSA128DOpnd>;
+
+class LD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ValueType TyNode, RegisterOperand ROWD,
+ Operand MemOpnd = mem, ComplexPattern Addr = addrRegImm,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins MemOpnd:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $addr");
+ list<dag> Pattern = [(set ROWD:$wd, (TyNode (OpNode Addr:$addr)))];
+ InstrItinClass Itinerary = itin;
+ string DecoderMethod = "DecodeMSA128Mem";
+}
+
+class LD_B_DESC : LD_DESC_BASE<"ld.b", load, v16i8, MSA128BOpnd>;
+class LD_H_DESC : LD_DESC_BASE<"ld.h", load, v8i16, MSA128HOpnd>;
+class LD_W_DESC : LD_DESC_BASE<"ld.w", load, v4i32, MSA128WOpnd>;
+class LD_D_DESC : LD_DESC_BASE<"ld.d", load, v2i64, MSA128DOpnd>;
+
+class LDI_B_DESC : MSA_I10_LDI_DESC_BASE<"ldi.b", MSA128BOpnd>;
+class LDI_H_DESC : MSA_I10_LDI_DESC_BASE<"ldi.h", MSA128HOpnd>;
+class LDI_W_DESC : MSA_I10_LDI_DESC_BASE<"ldi.w", MSA128WOpnd>;
+class LDI_D_DESC : MSA_I10_LDI_DESC_BASE<"ldi.d", MSA128DOpnd>;
+
+class LSA_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt, LSAImm:$sa);
+ string AsmString = "lsa\t$rd, $rs, $rt, $sa";
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (add GPR32Opnd:$rs,
+ (shl GPR32Opnd:$rt,
+ immZExt2Lsa:$sa)))];
+ InstrItinClass Itinerary = NoItinerary;
+}
+
+class MADD_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.h", int_mips_madd_q_h,
+ MSA128HOpnd>;
+class MADD_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.w", int_mips_madd_q_w,
+ MSA128WOpnd>;
+
+class MADDR_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"maddr_q.h", int_mips_maddr_q_h,
+ MSA128HOpnd>;
+class MADDR_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"maddr_q.w", int_mips_maddr_q_w,
+ MSA128WOpnd>;
+
+class MADDV_B_DESC : MSA_3R_4R_DESC_BASE<"maddv.b", muladd, MSA128BOpnd>;
+class MADDV_H_DESC : MSA_3R_4R_DESC_BASE<"maddv.h", muladd, MSA128HOpnd>;
+class MADDV_W_DESC : MSA_3R_4R_DESC_BASE<"maddv.w", muladd, MSA128WOpnd>;
+class MADDV_D_DESC : MSA_3R_4R_DESC_BASE<"maddv.d", muladd, MSA128DOpnd>;
+
+class MAX_A_B_DESC : MSA_3R_DESC_BASE<"max_a.b", int_mips_max_a_b, MSA128BOpnd>;
+class MAX_A_H_DESC : MSA_3R_DESC_BASE<"max_a.h", int_mips_max_a_h, MSA128HOpnd>;
+class MAX_A_W_DESC : MSA_3R_DESC_BASE<"max_a.w", int_mips_max_a_w, MSA128WOpnd>;
+class MAX_A_D_DESC : MSA_3R_DESC_BASE<"max_a.d", int_mips_max_a_d, MSA128DOpnd>;
+
+class MAX_S_B_DESC : MSA_3R_DESC_BASE<"max_s.b", MipsVSMax, MSA128BOpnd>;
+class MAX_S_H_DESC : MSA_3R_DESC_BASE<"max_s.h", MipsVSMax, MSA128HOpnd>;
+class MAX_S_W_DESC : MSA_3R_DESC_BASE<"max_s.w", MipsVSMax, MSA128WOpnd>;
+class MAX_S_D_DESC : MSA_3R_DESC_BASE<"max_s.d", MipsVSMax, MSA128DOpnd>;
+
+class MAX_U_B_DESC : MSA_3R_DESC_BASE<"max_u.b", MipsVUMax, MSA128BOpnd>;
+class MAX_U_H_DESC : MSA_3R_DESC_BASE<"max_u.h", MipsVUMax, MSA128HOpnd>;
+class MAX_U_W_DESC : MSA_3R_DESC_BASE<"max_u.w", MipsVUMax, MSA128WOpnd>;
+class MAX_U_D_DESC : MSA_3R_DESC_BASE<"max_u.d", MipsVUMax, MSA128DOpnd>;
+
+class MAXI_S_B_DESC : MSA_I5_DESC_BASE<"maxi_s.b", MipsVSMax, vsplati8_simm5,
+ MSA128BOpnd>;
+class MAXI_S_H_DESC : MSA_I5_DESC_BASE<"maxi_s.h", MipsVSMax, vsplati16_simm5,
+ MSA128HOpnd>;
+class MAXI_S_W_DESC : MSA_I5_DESC_BASE<"maxi_s.w", MipsVSMax, vsplati32_simm5,
+ MSA128WOpnd>;
+class MAXI_S_D_DESC : MSA_I5_DESC_BASE<"maxi_s.d", MipsVSMax, vsplati64_simm5,
+ MSA128DOpnd>;
+
+class MAXI_U_B_DESC : MSA_I5_DESC_BASE<"maxi_u.b", MipsVUMax, vsplati8_uimm5,
+ MSA128BOpnd>;
+class MAXI_U_H_DESC : MSA_I5_DESC_BASE<"maxi_u.h", MipsVUMax, vsplati16_uimm5,
+ MSA128HOpnd>;
+class MAXI_U_W_DESC : MSA_I5_DESC_BASE<"maxi_u.w", MipsVUMax, vsplati32_uimm5,
+ MSA128WOpnd>;
+class MAXI_U_D_DESC : MSA_I5_DESC_BASE<"maxi_u.d", MipsVUMax, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class MIN_A_B_DESC : MSA_3R_DESC_BASE<"min_a.b", int_mips_min_a_b, MSA128BOpnd>;
+class MIN_A_H_DESC : MSA_3R_DESC_BASE<"min_a.h", int_mips_min_a_h, MSA128HOpnd>;
+class MIN_A_W_DESC : MSA_3R_DESC_BASE<"min_a.w", int_mips_min_a_w, MSA128WOpnd>;
+class MIN_A_D_DESC : MSA_3R_DESC_BASE<"min_a.d", int_mips_min_a_d, MSA128DOpnd>;
+
+class MIN_S_B_DESC : MSA_3R_DESC_BASE<"min_s.b", MipsVSMin, MSA128BOpnd>;
+class MIN_S_H_DESC : MSA_3R_DESC_BASE<"min_s.h", MipsVSMin, MSA128HOpnd>;
+class MIN_S_W_DESC : MSA_3R_DESC_BASE<"min_s.w", MipsVSMin, MSA128WOpnd>;
+class MIN_S_D_DESC : MSA_3R_DESC_BASE<"min_s.d", MipsVSMin, MSA128DOpnd>;
+
+class MIN_U_B_DESC : MSA_3R_DESC_BASE<"min_u.b", MipsVUMin, MSA128BOpnd>;
+class MIN_U_H_DESC : MSA_3R_DESC_BASE<"min_u.h", MipsVUMin, MSA128HOpnd>;
+class MIN_U_W_DESC : MSA_3R_DESC_BASE<"min_u.w", MipsVUMin, MSA128WOpnd>;
+class MIN_U_D_DESC : MSA_3R_DESC_BASE<"min_u.d", MipsVUMin, MSA128DOpnd>;
+
+class MINI_S_B_DESC : MSA_I5_DESC_BASE<"mini_s.b", MipsVSMin, vsplati8_simm5,
+ MSA128BOpnd>;
+class MINI_S_H_DESC : MSA_I5_DESC_BASE<"mini_s.h", MipsVSMin, vsplati16_simm5,
+ MSA128HOpnd>;
+class MINI_S_W_DESC : MSA_I5_DESC_BASE<"mini_s.w", MipsVSMin, vsplati32_simm5,
+ MSA128WOpnd>;
+class MINI_S_D_DESC : MSA_I5_DESC_BASE<"mini_s.d", MipsVSMin, vsplati64_simm5,
+ MSA128DOpnd>;
+
+class MINI_U_B_DESC : MSA_I5_DESC_BASE<"mini_u.b", MipsVUMin, vsplati8_uimm5,
+ MSA128BOpnd>;
+class MINI_U_H_DESC : MSA_I5_DESC_BASE<"mini_u.h", MipsVUMin, vsplati16_uimm5,
+ MSA128HOpnd>;
+class MINI_U_W_DESC : MSA_I5_DESC_BASE<"mini_u.w", MipsVUMin, vsplati32_uimm5,
+ MSA128WOpnd>;
+class MINI_U_D_DESC : MSA_I5_DESC_BASE<"mini_u.d", MipsVUMin, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class MOD_S_B_DESC : MSA_3R_DESC_BASE<"mod_s.b", srem, MSA128BOpnd>;
+class MOD_S_H_DESC : MSA_3R_DESC_BASE<"mod_s.h", srem, MSA128HOpnd>;
+class MOD_S_W_DESC : MSA_3R_DESC_BASE<"mod_s.w", srem, MSA128WOpnd>;
+class MOD_S_D_DESC : MSA_3R_DESC_BASE<"mod_s.d", srem, MSA128DOpnd>;
+
+class MOD_U_B_DESC : MSA_3R_DESC_BASE<"mod_u.b", urem, MSA128BOpnd>;
+class MOD_U_H_DESC : MSA_3R_DESC_BASE<"mod_u.h", urem, MSA128HOpnd>;
+class MOD_U_W_DESC : MSA_3R_DESC_BASE<"mod_u.w", urem, MSA128WOpnd>;
+class MOD_U_D_DESC : MSA_3R_DESC_BASE<"mod_u.d", urem, MSA128DOpnd>;
+
+class MOVE_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$ws);
+ string AsmString = "move.v\t$wd, $ws";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = NoItinerary;
+}
+
+class MSUB_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"msub_q.h", int_mips_msub_q_h,
+ MSA128HOpnd>;
+class MSUB_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"msub_q.w", int_mips_msub_q_w,
+ MSA128WOpnd>;
+
+class MSUBR_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"msubr_q.h", int_mips_msubr_q_h,
+ MSA128HOpnd>;
+class MSUBR_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"msubr_q.w", int_mips_msubr_q_w,
+ MSA128WOpnd>;
+
+class MSUBV_B_DESC : MSA_3R_4R_DESC_BASE<"msubv.b", mulsub, MSA128BOpnd>;
+class MSUBV_H_DESC : MSA_3R_4R_DESC_BASE<"msubv.h", mulsub, MSA128HOpnd>;
+class MSUBV_W_DESC : MSA_3R_4R_DESC_BASE<"msubv.w", mulsub, MSA128WOpnd>;
+class MSUBV_D_DESC : MSA_3R_4R_DESC_BASE<"msubv.d", mulsub, MSA128DOpnd>;
+
+class MUL_Q_H_DESC : MSA_3RF_DESC_BASE<"mul_q.h", int_mips_mul_q_h,
+ MSA128HOpnd>;
+class MUL_Q_W_DESC : MSA_3RF_DESC_BASE<"mul_q.w", int_mips_mul_q_w,
+ MSA128WOpnd>;
+
+class MULR_Q_H_DESC : MSA_3RF_DESC_BASE<"mulr_q.h", int_mips_mulr_q_h,
+ MSA128HOpnd>;
+class MULR_Q_W_DESC : MSA_3RF_DESC_BASE<"mulr_q.w", int_mips_mulr_q_w,
+ MSA128WOpnd>;
+
+class MULV_B_DESC : MSA_3R_DESC_BASE<"mulv.b", mul, MSA128BOpnd>;
+class MULV_H_DESC : MSA_3R_DESC_BASE<"mulv.h", mul, MSA128HOpnd>;
+class MULV_W_DESC : MSA_3R_DESC_BASE<"mulv.w", mul, MSA128WOpnd>;
+class MULV_D_DESC : MSA_3R_DESC_BASE<"mulv.d", mul, MSA128DOpnd>;
+
+class NLOC_B_DESC : MSA_2R_DESC_BASE<"nloc.b", int_mips_nloc_b, MSA128BOpnd>;
+class NLOC_H_DESC : MSA_2R_DESC_BASE<"nloc.h", int_mips_nloc_h, MSA128HOpnd>;
+class NLOC_W_DESC : MSA_2R_DESC_BASE<"nloc.w", int_mips_nloc_w, MSA128WOpnd>;
+class NLOC_D_DESC : MSA_2R_DESC_BASE<"nloc.d", int_mips_nloc_d, MSA128DOpnd>;
+
+class NLZC_B_DESC : MSA_2R_DESC_BASE<"nlzc.b", ctlz, MSA128BOpnd>;
+class NLZC_H_DESC : MSA_2R_DESC_BASE<"nlzc.h", ctlz, MSA128HOpnd>;
+class NLZC_W_DESC : MSA_2R_DESC_BASE<"nlzc.w", ctlz, MSA128WOpnd>;
+class NLZC_D_DESC : MSA_2R_DESC_BASE<"nlzc.d", ctlz, MSA128DOpnd>;
+
+class NOR_V_DESC : MSA_VEC_DESC_BASE<"nor.v", MipsVNOR, MSA128BOpnd>;
+class NOR_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<MipsVNOR, MSA128HOpnd>;
+class NOR_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<MipsVNOR, MSA128WOpnd>;
+class NOR_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<MipsVNOR, MSA128DOpnd>;
+
+class NORI_B_DESC : MSA_I8_DESC_BASE<"nori.b", MipsVNOR, vsplati8_uimm8,
+ MSA128BOpnd>;
+
+class OR_V_DESC : MSA_VEC_DESC_BASE<"or.v", or, MSA128BOpnd>;
+class OR_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<or, MSA128HOpnd>;
+class OR_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<or, MSA128WOpnd>;
+class OR_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<or, MSA128DOpnd>;
+
+class ORI_B_DESC : MSA_I8_DESC_BASE<"ori.b", or, vsplati8_uimm8, MSA128BOpnd>;
+
+class PCKEV_B_DESC : MSA_3R_DESC_BASE<"pckev.b", MipsPCKEV, MSA128BOpnd>;
+class PCKEV_H_DESC : MSA_3R_DESC_BASE<"pckev.h", MipsPCKEV, MSA128HOpnd>;
+class PCKEV_W_DESC : MSA_3R_DESC_BASE<"pckev.w", MipsPCKEV, MSA128WOpnd>;
+class PCKEV_D_DESC : MSA_3R_DESC_BASE<"pckev.d", MipsPCKEV, MSA128DOpnd>;
+
+class PCKOD_B_DESC : MSA_3R_DESC_BASE<"pckod.b", MipsPCKOD, MSA128BOpnd>;
+class PCKOD_H_DESC : MSA_3R_DESC_BASE<"pckod.h", MipsPCKOD, MSA128HOpnd>;
+class PCKOD_W_DESC : MSA_3R_DESC_BASE<"pckod.w", MipsPCKOD, MSA128WOpnd>;
+class PCKOD_D_DESC : MSA_3R_DESC_BASE<"pckod.d", MipsPCKOD, MSA128DOpnd>;
+
+class PCNT_B_DESC : MSA_2R_DESC_BASE<"pcnt.b", ctpop, MSA128BOpnd>;
+class PCNT_H_DESC : MSA_2R_DESC_BASE<"pcnt.h", ctpop, MSA128HOpnd>;
+class PCNT_W_DESC : MSA_2R_DESC_BASE<"pcnt.w", ctpop, MSA128WOpnd>;
+class PCNT_D_DESC : MSA_2R_DESC_BASE<"pcnt.d", ctpop, MSA128DOpnd>;
+
+class SAT_S_B_DESC : MSA_BIT_B_X_DESC_BASE<"sat_s.b", int_mips_sat_s_b,
+ MSA128BOpnd>;
+class SAT_S_H_DESC : MSA_BIT_H_X_DESC_BASE<"sat_s.h", int_mips_sat_s_h,
+ MSA128HOpnd>;
+class SAT_S_W_DESC : MSA_BIT_W_X_DESC_BASE<"sat_s.w", int_mips_sat_s_w,
+ MSA128WOpnd>;
+class SAT_S_D_DESC : MSA_BIT_D_X_DESC_BASE<"sat_s.d", int_mips_sat_s_d,
+ MSA128DOpnd>;
+
+class SAT_U_B_DESC : MSA_BIT_B_X_DESC_BASE<"sat_u.b", int_mips_sat_u_b,
+ MSA128BOpnd>;
+class SAT_U_H_DESC : MSA_BIT_H_X_DESC_BASE<"sat_u.h", int_mips_sat_u_h,
+ MSA128HOpnd>;
+class SAT_U_W_DESC : MSA_BIT_W_X_DESC_BASE<"sat_u.w", int_mips_sat_u_w,
+ MSA128WOpnd>;
+class SAT_U_D_DESC : MSA_BIT_D_X_DESC_BASE<"sat_u.d", int_mips_sat_u_d,
+ MSA128DOpnd>;
+
+class SHF_B_DESC : MSA_I8_SHF_DESC_BASE<"shf.b", MSA128BOpnd>;
+class SHF_H_DESC : MSA_I8_SHF_DESC_BASE<"shf.h", MSA128HOpnd>;
+class SHF_W_DESC : MSA_I8_SHF_DESC_BASE<"shf.w", MSA128WOpnd>;
+
+class SLD_B_DESC : MSA_3R_SLD_DESC_BASE<"sld.b", int_mips_sld_b, MSA128BOpnd>;
+class SLD_H_DESC : MSA_3R_SLD_DESC_BASE<"sld.h", int_mips_sld_h, MSA128HOpnd>;
+class SLD_W_DESC : MSA_3R_SLD_DESC_BASE<"sld.w", int_mips_sld_w, MSA128WOpnd>;
+class SLD_D_DESC : MSA_3R_SLD_DESC_BASE<"sld.d", int_mips_sld_d, MSA128DOpnd>;
+
+class SLDI_B_DESC : MSA_ELM_DESC_BASE<"sldi.b", int_mips_sldi_b, MSA128BOpnd>;
+class SLDI_H_DESC : MSA_ELM_DESC_BASE<"sldi.h", int_mips_sldi_h, MSA128HOpnd>;
+class SLDI_W_DESC : MSA_ELM_DESC_BASE<"sldi.w", int_mips_sldi_w, MSA128WOpnd>;
+class SLDI_D_DESC : MSA_ELM_DESC_BASE<"sldi.d", int_mips_sldi_d, MSA128DOpnd>;
+
+class SLL_B_DESC : MSA_3R_DESC_BASE<"sll.b", shl, MSA128BOpnd>;
+class SLL_H_DESC : MSA_3R_DESC_BASE<"sll.h", shl, MSA128HOpnd>;
+class SLL_W_DESC : MSA_3R_DESC_BASE<"sll.w", shl, MSA128WOpnd>;
+class SLL_D_DESC : MSA_3R_DESC_BASE<"sll.d", shl, MSA128DOpnd>;
+
+class SLLI_B_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.b", shl, vsplati8_uimm3,
+ MSA128BOpnd>;
+class SLLI_H_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.h", shl, vsplati16_uimm4,
+ MSA128HOpnd>;
+class SLLI_W_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.w", shl, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SLLI_D_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.d", shl, vsplati64_uimm6,
+ MSA128DOpnd>;
+
+class SPLAT_B_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.b", vsplati8_elt,
+ MSA128BOpnd>;
+class SPLAT_H_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.h", vsplati16_elt,
+ MSA128HOpnd>;
+class SPLAT_W_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.w", vsplati32_elt,
+ MSA128WOpnd>;
+class SPLAT_D_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.d", vsplati64_elt,
+ MSA128DOpnd>;
+
+class SPLATI_B_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.b", vsplati8_uimm4,
+ MSA128BOpnd>;
+class SPLATI_H_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.h", vsplati16_uimm3,
+ MSA128HOpnd>;
+class SPLATI_W_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.w", vsplati32_uimm2,
+ MSA128WOpnd>;
+class SPLATI_D_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.d", vsplati64_uimm1,
+ MSA128DOpnd>;
+
+class SRA_B_DESC : MSA_3R_DESC_BASE<"sra.b", sra, MSA128BOpnd>;
+class SRA_H_DESC : MSA_3R_DESC_BASE<"sra.h", sra, MSA128HOpnd>;
+class SRA_W_DESC : MSA_3R_DESC_BASE<"sra.w", sra, MSA128WOpnd>;
+class SRA_D_DESC : MSA_3R_DESC_BASE<"sra.d", sra, MSA128DOpnd>;
+
+class SRAI_B_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.b", sra, vsplati8_uimm3,
+ MSA128BOpnd>;
+class SRAI_H_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.h", sra, vsplati16_uimm4,
+ MSA128HOpnd>;
+class SRAI_W_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.w", sra, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SRAI_D_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.d", sra, vsplati64_uimm6,
+ MSA128DOpnd>;
+
+class SRAR_B_DESC : MSA_3R_DESC_BASE<"srar.b", int_mips_srar_b, MSA128BOpnd>;
+class SRAR_H_DESC : MSA_3R_DESC_BASE<"srar.h", int_mips_srar_h, MSA128HOpnd>;
+class SRAR_W_DESC : MSA_3R_DESC_BASE<"srar.w", int_mips_srar_w, MSA128WOpnd>;
+class SRAR_D_DESC : MSA_3R_DESC_BASE<"srar.d", int_mips_srar_d, MSA128DOpnd>;
+
+class SRARI_B_DESC : MSA_BIT_B_X_DESC_BASE<"srari.b", int_mips_srari_b,
+ MSA128BOpnd>;
+class SRARI_H_DESC : MSA_BIT_H_X_DESC_BASE<"srari.h", int_mips_srari_h,
+ MSA128HOpnd>;
+class SRARI_W_DESC : MSA_BIT_W_X_DESC_BASE<"srari.w", int_mips_srari_w,
+ MSA128WOpnd>;
+class SRARI_D_DESC : MSA_BIT_D_X_DESC_BASE<"srari.d", int_mips_srari_d,
+ MSA128DOpnd>;
+
+class SRL_B_DESC : MSA_3R_DESC_BASE<"srl.b", srl, MSA128BOpnd>;
+class SRL_H_DESC : MSA_3R_DESC_BASE<"srl.h", srl, MSA128HOpnd>;
+class SRL_W_DESC : MSA_3R_DESC_BASE<"srl.w", srl, MSA128WOpnd>;
+class SRL_D_DESC : MSA_3R_DESC_BASE<"srl.d", srl, MSA128DOpnd>;
+
+class SRLI_B_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.b", srl, vsplati8_uimm3,
+ MSA128BOpnd>;
+class SRLI_H_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.h", srl, vsplati16_uimm4,
+ MSA128HOpnd>;
+class SRLI_W_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.w", srl, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SRLI_D_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.d", srl, vsplati64_uimm6,
+ MSA128DOpnd>;
+
+class SRLR_B_DESC : MSA_3R_DESC_BASE<"srlr.b", int_mips_srlr_b, MSA128BOpnd>;
+class SRLR_H_DESC : MSA_3R_DESC_BASE<"srlr.h", int_mips_srlr_h, MSA128HOpnd>;
+class SRLR_W_DESC : MSA_3R_DESC_BASE<"srlr.w", int_mips_srlr_w, MSA128WOpnd>;
+class SRLR_D_DESC : MSA_3R_DESC_BASE<"srlr.d", int_mips_srlr_d, MSA128DOpnd>;
+
+class SRLRI_B_DESC : MSA_BIT_B_X_DESC_BASE<"srlri.b", int_mips_srlri_b,
+ MSA128BOpnd>;
+class SRLRI_H_DESC : MSA_BIT_H_X_DESC_BASE<"srlri.h", int_mips_srlri_h,
+ MSA128HOpnd>;
+class SRLRI_W_DESC : MSA_BIT_W_X_DESC_BASE<"srlri.w", int_mips_srlri_w,
+ MSA128WOpnd>;
+class SRLRI_D_DESC : MSA_BIT_D_X_DESC_BASE<"srlri.d", int_mips_srlri_d,
+ MSA128DOpnd>;
+
+class ST_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ValueType TyNode, RegisterOperand ROWD,
+ Operand MemOpnd = mem, ComplexPattern Addr = addrRegImm,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins ROWD:$wd, MemOpnd:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $addr");
+ list<dag> Pattern = [(OpNode (TyNode ROWD:$wd), Addr:$addr)];
+ InstrItinClass Itinerary = itin;
+ string DecoderMethod = "DecodeMSA128Mem";
+}
+
+class ST_B_DESC : ST_DESC_BASE<"st.b", store, v16i8, MSA128BOpnd>;
+class ST_H_DESC : ST_DESC_BASE<"st.h", store, v8i16, MSA128HOpnd>;
+class ST_W_DESC : ST_DESC_BASE<"st.w", store, v4i32, MSA128WOpnd>;
+class ST_D_DESC : ST_DESC_BASE<"st.d", store, v2i64, MSA128DOpnd>;
+
+class SUBS_S_B_DESC : MSA_3R_DESC_BASE<"subs_s.b", int_mips_subs_s_b,
+ MSA128BOpnd>;
+class SUBS_S_H_DESC : MSA_3R_DESC_BASE<"subs_s.h", int_mips_subs_s_h,
+ MSA128HOpnd>;
+class SUBS_S_W_DESC : MSA_3R_DESC_BASE<"subs_s.w", int_mips_subs_s_w,
+ MSA128WOpnd>;
+class SUBS_S_D_DESC : MSA_3R_DESC_BASE<"subs_s.d", int_mips_subs_s_d,
+ MSA128DOpnd>;
+
+class SUBS_U_B_DESC : MSA_3R_DESC_BASE<"subs_u.b", int_mips_subs_u_b,
+ MSA128BOpnd>;
+class SUBS_U_H_DESC : MSA_3R_DESC_BASE<"subs_u.h", int_mips_subs_u_h,
+ MSA128HOpnd>;
+class SUBS_U_W_DESC : MSA_3R_DESC_BASE<"subs_u.w", int_mips_subs_u_w,
+ MSA128WOpnd>;
+class SUBS_U_D_DESC : MSA_3R_DESC_BASE<"subs_u.d", int_mips_subs_u_d,
+ MSA128DOpnd>;
+
+class SUBSUS_U_B_DESC : MSA_3R_DESC_BASE<"subsus_u.b", int_mips_subsus_u_b,
+ MSA128BOpnd>;
+class SUBSUS_U_H_DESC : MSA_3R_DESC_BASE<"subsus_u.h", int_mips_subsus_u_h,
+ MSA128HOpnd>;
+class SUBSUS_U_W_DESC : MSA_3R_DESC_BASE<"subsus_u.w", int_mips_subsus_u_w,
+ MSA128WOpnd>;
+class SUBSUS_U_D_DESC : MSA_3R_DESC_BASE<"subsus_u.d", int_mips_subsus_u_d,
+ MSA128DOpnd>;
+
+class SUBSUU_S_B_DESC : MSA_3R_DESC_BASE<"subsuu_s.b", int_mips_subsuu_s_b,
+ MSA128BOpnd>;
+class SUBSUU_S_H_DESC : MSA_3R_DESC_BASE<"subsuu_s.h", int_mips_subsuu_s_h,
+ MSA128HOpnd>;
+class SUBSUU_S_W_DESC : MSA_3R_DESC_BASE<"subsuu_s.w", int_mips_subsuu_s_w,
+ MSA128WOpnd>;
+class SUBSUU_S_D_DESC : MSA_3R_DESC_BASE<"subsuu_s.d", int_mips_subsuu_s_d,
+ MSA128DOpnd>;
+
+class SUBV_B_DESC : MSA_3R_DESC_BASE<"subv.b", sub, MSA128BOpnd>;
+class SUBV_H_DESC : MSA_3R_DESC_BASE<"subv.h", sub, MSA128HOpnd>;
+class SUBV_W_DESC : MSA_3R_DESC_BASE<"subv.w", sub, MSA128WOpnd>;
+class SUBV_D_DESC : MSA_3R_DESC_BASE<"subv.d", sub, MSA128DOpnd>;
+
+class SUBVI_B_DESC : MSA_I5_DESC_BASE<"subvi.b", sub, vsplati8_uimm5,
+ MSA128BOpnd>;
+class SUBVI_H_DESC : MSA_I5_DESC_BASE<"subvi.h", sub, vsplati16_uimm5,
+ MSA128HOpnd>;
+class SUBVI_W_DESC : MSA_I5_DESC_BASE<"subvi.w", sub, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SUBVI_D_DESC : MSA_I5_DESC_BASE<"subvi.d", sub, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class VSHF_B_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.b", MSA128BOpnd>;
+class VSHF_H_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.h", MSA128HOpnd>;
+class VSHF_W_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.w", MSA128WOpnd>;
+class VSHF_D_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.d", MSA128DOpnd>;
+
+class XOR_V_DESC : MSA_VEC_DESC_BASE<"xor.v", xor, MSA128BOpnd>;
+class XOR_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<xor, MSA128HOpnd>;
+class XOR_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<xor, MSA128WOpnd>;
+class XOR_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<xor, MSA128DOpnd>;
+
+class XORI_B_DESC : MSA_I8_DESC_BASE<"xori.b", xor, vsplati8_uimm8,
+ MSA128BOpnd>;
+
+// Instruction defs.
+def ADD_A_B : ADD_A_B_ENC, ADD_A_B_DESC;
+def ADD_A_H : ADD_A_H_ENC, ADD_A_H_DESC;
+def ADD_A_W : ADD_A_W_ENC, ADD_A_W_DESC;
+def ADD_A_D : ADD_A_D_ENC, ADD_A_D_DESC;
+
+def ADDS_A_B : ADDS_A_B_ENC, ADDS_A_B_DESC;
+def ADDS_A_H : ADDS_A_H_ENC, ADDS_A_H_DESC;
+def ADDS_A_W : ADDS_A_W_ENC, ADDS_A_W_DESC;
+def ADDS_A_D : ADDS_A_D_ENC, ADDS_A_D_DESC;
+
+def ADDS_S_B : ADDS_S_B_ENC, ADDS_S_B_DESC;
+def ADDS_S_H : ADDS_S_H_ENC, ADDS_S_H_DESC;
+def ADDS_S_W : ADDS_S_W_ENC, ADDS_S_W_DESC;
+def ADDS_S_D : ADDS_S_D_ENC, ADDS_S_D_DESC;
+
+def ADDS_U_B : ADDS_U_B_ENC, ADDS_U_B_DESC;
+def ADDS_U_H : ADDS_U_H_ENC, ADDS_U_H_DESC;
+def ADDS_U_W : ADDS_U_W_ENC, ADDS_U_W_DESC;
+def ADDS_U_D : ADDS_U_D_ENC, ADDS_U_D_DESC;
+
+def ADDV_B : ADDV_B_ENC, ADDV_B_DESC;
+def ADDV_H : ADDV_H_ENC, ADDV_H_DESC;
+def ADDV_W : ADDV_W_ENC, ADDV_W_DESC;
+def ADDV_D : ADDV_D_ENC, ADDV_D_DESC;
+
+def ADDVI_B : ADDVI_B_ENC, ADDVI_B_DESC;
+def ADDVI_H : ADDVI_H_ENC, ADDVI_H_DESC;
+def ADDVI_W : ADDVI_W_ENC, ADDVI_W_DESC;
+def ADDVI_D : ADDVI_D_ENC, ADDVI_D_DESC;
+
+def AND_V : AND_V_ENC, AND_V_DESC;
+def AND_V_H_PSEUDO : AND_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(AND_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def AND_V_W_PSEUDO : AND_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(AND_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def AND_V_D_PSEUDO : AND_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(AND_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def ANDI_B : ANDI_B_ENC, ANDI_B_DESC;
+
+def ASUB_S_B : ASUB_S_B_ENC, ASUB_S_B_DESC;
+def ASUB_S_H : ASUB_S_H_ENC, ASUB_S_H_DESC;
+def ASUB_S_W : ASUB_S_W_ENC, ASUB_S_W_DESC;
+def ASUB_S_D : ASUB_S_D_ENC, ASUB_S_D_DESC;
+
+def ASUB_U_B : ASUB_U_B_ENC, ASUB_U_B_DESC;
+def ASUB_U_H : ASUB_U_H_ENC, ASUB_U_H_DESC;
+def ASUB_U_W : ASUB_U_W_ENC, ASUB_U_W_DESC;
+def ASUB_U_D : ASUB_U_D_ENC, ASUB_U_D_DESC;
+
+def AVE_S_B : AVE_S_B_ENC, AVE_S_B_DESC;
+def AVE_S_H : AVE_S_H_ENC, AVE_S_H_DESC;
+def AVE_S_W : AVE_S_W_ENC, AVE_S_W_DESC;
+def AVE_S_D : AVE_S_D_ENC, AVE_S_D_DESC;
+
+def AVE_U_B : AVE_U_B_ENC, AVE_U_B_DESC;
+def AVE_U_H : AVE_U_H_ENC, AVE_U_H_DESC;
+def AVE_U_W : AVE_U_W_ENC, AVE_U_W_DESC;
+def AVE_U_D : AVE_U_D_ENC, AVE_U_D_DESC;
+
+def AVER_S_B : AVER_S_B_ENC, AVER_S_B_DESC;
+def AVER_S_H : AVER_S_H_ENC, AVER_S_H_DESC;
+def AVER_S_W : AVER_S_W_ENC, AVER_S_W_DESC;
+def AVER_S_D : AVER_S_D_ENC, AVER_S_D_DESC;
+
+def AVER_U_B : AVER_U_B_ENC, AVER_U_B_DESC;
+def AVER_U_H : AVER_U_H_ENC, AVER_U_H_DESC;
+def AVER_U_W : AVER_U_W_ENC, AVER_U_W_DESC;
+def AVER_U_D : AVER_U_D_ENC, AVER_U_D_DESC;
+
+def BCLR_B : BCLR_B_ENC, BCLR_B_DESC;
+def BCLR_H : BCLR_H_ENC, BCLR_H_DESC;
+def BCLR_W : BCLR_W_ENC, BCLR_W_DESC;
+def BCLR_D : BCLR_D_ENC, BCLR_D_DESC;
+
+def BCLRI_B : BCLRI_B_ENC, BCLRI_B_DESC;
+def BCLRI_H : BCLRI_H_ENC, BCLRI_H_DESC;
+def BCLRI_W : BCLRI_W_ENC, BCLRI_W_DESC;
+def BCLRI_D : BCLRI_D_ENC, BCLRI_D_DESC;
+
+def BINSL_B : BINSL_B_ENC, BINSL_B_DESC;
+def BINSL_H : BINSL_H_ENC, BINSL_H_DESC;
+def BINSL_W : BINSL_W_ENC, BINSL_W_DESC;
+def BINSL_D : BINSL_D_ENC, BINSL_D_DESC;
+
+def BINSLI_B : BINSLI_B_ENC, BINSLI_B_DESC;
+def BINSLI_H : BINSLI_H_ENC, BINSLI_H_DESC;
+def BINSLI_W : BINSLI_W_ENC, BINSLI_W_DESC;
+def BINSLI_D : BINSLI_D_ENC, BINSLI_D_DESC;
+
+def BINSR_B : BINSR_B_ENC, BINSR_B_DESC;
+def BINSR_H : BINSR_H_ENC, BINSR_H_DESC;
+def BINSR_W : BINSR_W_ENC, BINSR_W_DESC;
+def BINSR_D : BINSR_D_ENC, BINSR_D_DESC;
+
+def BINSRI_B : BINSRI_B_ENC, BINSRI_B_DESC;
+def BINSRI_H : BINSRI_H_ENC, BINSRI_H_DESC;
+def BINSRI_W : BINSRI_W_ENC, BINSRI_W_DESC;
+def BINSRI_D : BINSRI_D_ENC, BINSRI_D_DESC;
+
+def BMNZ_V : BMNZ_V_ENC, BMNZ_V_DESC;
+
+def BMNZI_B : BMNZI_B_ENC, BMNZI_B_DESC;
+
+def BMZ_V : BMZ_V_ENC, BMZ_V_DESC;
+
+def BMZI_B : BMZI_B_ENC, BMZI_B_DESC;
+
+def BNEG_B : BNEG_B_ENC, BNEG_B_DESC;
+def BNEG_H : BNEG_H_ENC, BNEG_H_DESC;
+def BNEG_W : BNEG_W_ENC, BNEG_W_DESC;
+def BNEG_D : BNEG_D_ENC, BNEG_D_DESC;
+
+def BNEGI_B : BNEGI_B_ENC, BNEGI_B_DESC;
+def BNEGI_H : BNEGI_H_ENC, BNEGI_H_DESC;
+def BNEGI_W : BNEGI_W_ENC, BNEGI_W_DESC;
+def BNEGI_D : BNEGI_D_ENC, BNEGI_D_DESC;
+
+def BNZ_B : BNZ_B_ENC, BNZ_B_DESC;
+def BNZ_H : BNZ_H_ENC, BNZ_H_DESC;
+def BNZ_W : BNZ_W_ENC, BNZ_W_DESC;
+def BNZ_D : BNZ_D_ENC, BNZ_D_DESC;
+
+def BNZ_V : BNZ_V_ENC, BNZ_V_DESC;
+
+def BSEL_V : BSEL_V_ENC, BSEL_V_DESC;
+
+class MSA_BSEL_PSEUDO_BASE<RegisterOperand RO, ValueType Ty> :
+ MipsPseudo<(outs RO:$wd), (ins RO:$wd_in, RO:$ws, RO:$wt),
+ [(set RO:$wd, (Ty (vselect RO:$wd_in, RO:$ws, RO:$wt)))]>,
+ PseudoInstExpansion<(BSEL_V MSA128BOpnd:$wd, MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws, MSA128BOpnd:$wt)> {
+ let Constraints = "$wd_in = $wd";
+}
+
+def BSEL_H_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128HOpnd, v8i16>;
+def BSEL_W_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128WOpnd, v4i32>;
+def BSEL_D_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128DOpnd, v2i64>;
+def BSEL_FW_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128WOpnd, v4f32>;
+def BSEL_FD_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128DOpnd, v2f64>;
+
+def BSELI_B : BSELI_B_ENC, BSELI_B_DESC;
+
+def BSET_B : BSET_B_ENC, BSET_B_DESC;
+def BSET_H : BSET_H_ENC, BSET_H_DESC;
+def BSET_W : BSET_W_ENC, BSET_W_DESC;
+def BSET_D : BSET_D_ENC, BSET_D_DESC;
+
+def BSETI_B : BSETI_B_ENC, BSETI_B_DESC;
+def BSETI_H : BSETI_H_ENC, BSETI_H_DESC;
+def BSETI_W : BSETI_W_ENC, BSETI_W_DESC;
+def BSETI_D : BSETI_D_ENC, BSETI_D_DESC;
+
+def BZ_B : BZ_B_ENC, BZ_B_DESC;
+def BZ_H : BZ_H_ENC, BZ_H_DESC;
+def BZ_W : BZ_W_ENC, BZ_W_DESC;
+def BZ_D : BZ_D_ENC, BZ_D_DESC;
+
+def BZ_V : BZ_V_ENC, BZ_V_DESC;
+
+def CEQ_B : CEQ_B_ENC, CEQ_B_DESC;
+def CEQ_H : CEQ_H_ENC, CEQ_H_DESC;
+def CEQ_W : CEQ_W_ENC, CEQ_W_DESC;
+def CEQ_D : CEQ_D_ENC, CEQ_D_DESC;
+
+def CEQI_B : CEQI_B_ENC, CEQI_B_DESC;
+def CEQI_H : CEQI_H_ENC, CEQI_H_DESC;
+def CEQI_W : CEQI_W_ENC, CEQI_W_DESC;
+def CEQI_D : CEQI_D_ENC, CEQI_D_DESC;
+
+def CFCMSA : CFCMSA_ENC, CFCMSA_DESC;
+
+def CLE_S_B : CLE_S_B_ENC, CLE_S_B_DESC;
+def CLE_S_H : CLE_S_H_ENC, CLE_S_H_DESC;
+def CLE_S_W : CLE_S_W_ENC, CLE_S_W_DESC;
+def CLE_S_D : CLE_S_D_ENC, CLE_S_D_DESC;
+
+def CLE_U_B : CLE_U_B_ENC, CLE_U_B_DESC;
+def CLE_U_H : CLE_U_H_ENC, CLE_U_H_DESC;
+def CLE_U_W : CLE_U_W_ENC, CLE_U_W_DESC;
+def CLE_U_D : CLE_U_D_ENC, CLE_U_D_DESC;
+
+def CLEI_S_B : CLEI_S_B_ENC, CLEI_S_B_DESC;
+def CLEI_S_H : CLEI_S_H_ENC, CLEI_S_H_DESC;
+def CLEI_S_W : CLEI_S_W_ENC, CLEI_S_W_DESC;
+def CLEI_S_D : CLEI_S_D_ENC, CLEI_S_D_DESC;
+
+def CLEI_U_B : CLEI_U_B_ENC, CLEI_U_B_DESC;
+def CLEI_U_H : CLEI_U_H_ENC, CLEI_U_H_DESC;
+def CLEI_U_W : CLEI_U_W_ENC, CLEI_U_W_DESC;
+def CLEI_U_D : CLEI_U_D_ENC, CLEI_U_D_DESC;
+
+def CLT_S_B : CLT_S_B_ENC, CLT_S_B_DESC;
+def CLT_S_H : CLT_S_H_ENC, CLT_S_H_DESC;
+def CLT_S_W : CLT_S_W_ENC, CLT_S_W_DESC;
+def CLT_S_D : CLT_S_D_ENC, CLT_S_D_DESC;
+
+def CLT_U_B : CLT_U_B_ENC, CLT_U_B_DESC;
+def CLT_U_H : CLT_U_H_ENC, CLT_U_H_DESC;
+def CLT_U_W : CLT_U_W_ENC, CLT_U_W_DESC;
+def CLT_U_D : CLT_U_D_ENC, CLT_U_D_DESC;
+
+def CLTI_S_B : CLTI_S_B_ENC, CLTI_S_B_DESC;
+def CLTI_S_H : CLTI_S_H_ENC, CLTI_S_H_DESC;
+def CLTI_S_W : CLTI_S_W_ENC, CLTI_S_W_DESC;
+def CLTI_S_D : CLTI_S_D_ENC, CLTI_S_D_DESC;
+
+def CLTI_U_B : CLTI_U_B_ENC, CLTI_U_B_DESC;
+def CLTI_U_H : CLTI_U_H_ENC, CLTI_U_H_DESC;
+def CLTI_U_W : CLTI_U_W_ENC, CLTI_U_W_DESC;
+def CLTI_U_D : CLTI_U_D_ENC, CLTI_U_D_DESC;
+
+def COPY_S_B : COPY_S_B_ENC, COPY_S_B_DESC;
+def COPY_S_H : COPY_S_H_ENC, COPY_S_H_DESC;
+def COPY_S_W : COPY_S_W_ENC, COPY_S_W_DESC;
+
+def COPY_U_B : COPY_U_B_ENC, COPY_U_B_DESC;
+def COPY_U_H : COPY_U_H_ENC, COPY_U_H_DESC;
+def COPY_U_W : COPY_U_W_ENC, COPY_U_W_DESC;
+
+def COPY_FW_PSEUDO : COPY_FW_PSEUDO_DESC;
+def COPY_FD_PSEUDO : COPY_FD_PSEUDO_DESC;
+
+def CTCMSA : CTCMSA_ENC, CTCMSA_DESC;
+
+def DIV_S_B : DIV_S_B_ENC, DIV_S_B_DESC;
+def DIV_S_H : DIV_S_H_ENC, DIV_S_H_DESC;
+def DIV_S_W : DIV_S_W_ENC, DIV_S_W_DESC;
+def DIV_S_D : DIV_S_D_ENC, DIV_S_D_DESC;
+
+def DIV_U_B : DIV_U_B_ENC, DIV_U_B_DESC;
+def DIV_U_H : DIV_U_H_ENC, DIV_U_H_DESC;
+def DIV_U_W : DIV_U_W_ENC, DIV_U_W_DESC;
+def DIV_U_D : DIV_U_D_ENC, DIV_U_D_DESC;
+
+def DOTP_S_H : DOTP_S_H_ENC, DOTP_S_H_DESC;
+def DOTP_S_W : DOTP_S_W_ENC, DOTP_S_W_DESC;
+def DOTP_S_D : DOTP_S_D_ENC, DOTP_S_D_DESC;
+
+def DOTP_U_H : DOTP_U_H_ENC, DOTP_U_H_DESC;
+def DOTP_U_W : DOTP_U_W_ENC, DOTP_U_W_DESC;
+def DOTP_U_D : DOTP_U_D_ENC, DOTP_U_D_DESC;
+
+def DPADD_S_H : DPADD_S_H_ENC, DPADD_S_H_DESC;
+def DPADD_S_W : DPADD_S_W_ENC, DPADD_S_W_DESC;
+def DPADD_S_D : DPADD_S_D_ENC, DPADD_S_D_DESC;
+
+def DPADD_U_H : DPADD_U_H_ENC, DPADD_U_H_DESC;
+def DPADD_U_W : DPADD_U_W_ENC, DPADD_U_W_DESC;
+def DPADD_U_D : DPADD_U_D_ENC, DPADD_U_D_DESC;
+
+def DPSUB_S_H : DPSUB_S_H_ENC, DPSUB_S_H_DESC;
+def DPSUB_S_W : DPSUB_S_W_ENC, DPSUB_S_W_DESC;
+def DPSUB_S_D : DPSUB_S_D_ENC, DPSUB_S_D_DESC;
+
+def DPSUB_U_H : DPSUB_U_H_ENC, DPSUB_U_H_DESC;
+def DPSUB_U_W : DPSUB_U_W_ENC, DPSUB_U_W_DESC;
+def DPSUB_U_D : DPSUB_U_D_ENC, DPSUB_U_D_DESC;
+
+def FADD_W : FADD_W_ENC, FADD_W_DESC;
+def FADD_D : FADD_D_ENC, FADD_D_DESC;
+
+def FCAF_W : FCAF_W_ENC, FCAF_W_DESC;
+def FCAF_D : FCAF_D_ENC, FCAF_D_DESC;
+
+def FCEQ_W : FCEQ_W_ENC, FCEQ_W_DESC;
+def FCEQ_D : FCEQ_D_ENC, FCEQ_D_DESC;
+
+def FCLE_W : FCLE_W_ENC, FCLE_W_DESC;
+def FCLE_D : FCLE_D_ENC, FCLE_D_DESC;
+
+def FCLT_W : FCLT_W_ENC, FCLT_W_DESC;
+def FCLT_D : FCLT_D_ENC, FCLT_D_DESC;
+
+def FCLASS_W : FCLASS_W_ENC, FCLASS_W_DESC;
+def FCLASS_D : FCLASS_D_ENC, FCLASS_D_DESC;
+
+def FCNE_W : FCNE_W_ENC, FCNE_W_DESC;
+def FCNE_D : FCNE_D_ENC, FCNE_D_DESC;
+
+def FCOR_W : FCOR_W_ENC, FCOR_W_DESC;
+def FCOR_D : FCOR_D_ENC, FCOR_D_DESC;
+
+def FCUEQ_W : FCUEQ_W_ENC, FCUEQ_W_DESC;
+def FCUEQ_D : FCUEQ_D_ENC, FCUEQ_D_DESC;
+
+def FCULE_W : FCULE_W_ENC, FCULE_W_DESC;
+def FCULE_D : FCULE_D_ENC, FCULE_D_DESC;
+
+def FCULT_W : FCULT_W_ENC, FCULT_W_DESC;
+def FCULT_D : FCULT_D_ENC, FCULT_D_DESC;
+
+def FCUN_W : FCUN_W_ENC, FCUN_W_DESC;
+def FCUN_D : FCUN_D_ENC, FCUN_D_DESC;
+
+def FCUNE_W : FCUNE_W_ENC, FCUNE_W_DESC;
+def FCUNE_D : FCUNE_D_ENC, FCUNE_D_DESC;
+
+def FDIV_W : FDIV_W_ENC, FDIV_W_DESC;
+def FDIV_D : FDIV_D_ENC, FDIV_D_DESC;
+
+def FEXDO_H : FEXDO_H_ENC, FEXDO_H_DESC;
+def FEXDO_W : FEXDO_W_ENC, FEXDO_W_DESC;
+
+def FEXP2_W : FEXP2_W_ENC, FEXP2_W_DESC;
+def FEXP2_D : FEXP2_D_ENC, FEXP2_D_DESC;
+def FEXP2_W_1_PSEUDO : FEXP2_W_1_PSEUDO_DESC;
+def FEXP2_D_1_PSEUDO : FEXP2_D_1_PSEUDO_DESC;
+
+def FEXUPL_W : FEXUPL_W_ENC, FEXUPL_W_DESC;
+def FEXUPL_D : FEXUPL_D_ENC, FEXUPL_D_DESC;
+
+def FEXUPR_W : FEXUPR_W_ENC, FEXUPR_W_DESC;
+def FEXUPR_D : FEXUPR_D_ENC, FEXUPR_D_DESC;
+
+def FFINT_S_W : FFINT_S_W_ENC, FFINT_S_W_DESC;
+def FFINT_S_D : FFINT_S_D_ENC, FFINT_S_D_DESC;
+
+def FFINT_U_W : FFINT_U_W_ENC, FFINT_U_W_DESC;
+def FFINT_U_D : FFINT_U_D_ENC, FFINT_U_D_DESC;
+
+def FFQL_W : FFQL_W_ENC, FFQL_W_DESC;
+def FFQL_D : FFQL_D_ENC, FFQL_D_DESC;
+
+def FFQR_W : FFQR_W_ENC, FFQR_W_DESC;
+def FFQR_D : FFQR_D_ENC, FFQR_D_DESC;
+
+def FILL_B : FILL_B_ENC, FILL_B_DESC;
+def FILL_H : FILL_H_ENC, FILL_H_DESC;
+def FILL_W : FILL_W_ENC, FILL_W_DESC;
+def FILL_FW_PSEUDO : FILL_FW_PSEUDO_DESC;
+def FILL_FD_PSEUDO : FILL_FD_PSEUDO_DESC;
+
+def FLOG2_W : FLOG2_W_ENC, FLOG2_W_DESC;
+def FLOG2_D : FLOG2_D_ENC, FLOG2_D_DESC;
+
+def FMADD_W : FMADD_W_ENC, FMADD_W_DESC;
+def FMADD_D : FMADD_D_ENC, FMADD_D_DESC;
+
+def FMAX_W : FMAX_W_ENC, FMAX_W_DESC;
+def FMAX_D : FMAX_D_ENC, FMAX_D_DESC;
+
+def FMAX_A_W : FMAX_A_W_ENC, FMAX_A_W_DESC;
+def FMAX_A_D : FMAX_A_D_ENC, FMAX_A_D_DESC;
+
+def FMIN_W : FMIN_W_ENC, FMIN_W_DESC;
+def FMIN_D : FMIN_D_ENC, FMIN_D_DESC;
+
+def FMIN_A_W : FMIN_A_W_ENC, FMIN_A_W_DESC;
+def FMIN_A_D : FMIN_A_D_ENC, FMIN_A_D_DESC;
+
+def FMSUB_W : FMSUB_W_ENC, FMSUB_W_DESC;
+def FMSUB_D : FMSUB_D_ENC, FMSUB_D_DESC;
+
+def FMUL_W : FMUL_W_ENC, FMUL_W_DESC;
+def FMUL_D : FMUL_D_ENC, FMUL_D_DESC;
+
+def FRINT_W : FRINT_W_ENC, FRINT_W_DESC;
+def FRINT_D : FRINT_D_ENC, FRINT_D_DESC;
+
+def FRCP_W : FRCP_W_ENC, FRCP_W_DESC;
+def FRCP_D : FRCP_D_ENC, FRCP_D_DESC;
+
+def FRSQRT_W : FRSQRT_W_ENC, FRSQRT_W_DESC;
+def FRSQRT_D : FRSQRT_D_ENC, FRSQRT_D_DESC;
+
+def FSAF_W : FSAF_W_ENC, FSAF_W_DESC;
+def FSAF_D : FSAF_D_ENC, FSAF_D_DESC;
+
+def FSEQ_W : FSEQ_W_ENC, FSEQ_W_DESC;
+def FSEQ_D : FSEQ_D_ENC, FSEQ_D_DESC;
+
+def FSLE_W : FSLE_W_ENC, FSLE_W_DESC;
+def FSLE_D : FSLE_D_ENC, FSLE_D_DESC;
+
+def FSLT_W : FSLT_W_ENC, FSLT_W_DESC;
+def FSLT_D : FSLT_D_ENC, FSLT_D_DESC;
+
+def FSNE_W : FSNE_W_ENC, FSNE_W_DESC;
+def FSNE_D : FSNE_D_ENC, FSNE_D_DESC;
+
+def FSOR_W : FSOR_W_ENC, FSOR_W_DESC;
+def FSOR_D : FSOR_D_ENC, FSOR_D_DESC;
+
+def FSQRT_W : FSQRT_W_ENC, FSQRT_W_DESC;
+def FSQRT_D : FSQRT_D_ENC, FSQRT_D_DESC;
+
+def FSUB_W : FSUB_W_ENC, FSUB_W_DESC;
+def FSUB_D : FSUB_D_ENC, FSUB_D_DESC;
+
+def FSUEQ_W : FSUEQ_W_ENC, FSUEQ_W_DESC;
+def FSUEQ_D : FSUEQ_D_ENC, FSUEQ_D_DESC;
+
+def FSULE_W : FSULE_W_ENC, FSULE_W_DESC;
+def FSULE_D : FSULE_D_ENC, FSULE_D_DESC;
+
+def FSULT_W : FSULT_W_ENC, FSULT_W_DESC;
+def FSULT_D : FSULT_D_ENC, FSULT_D_DESC;
+
+def FSUN_W : FSUN_W_ENC, FSUN_W_DESC;
+def FSUN_D : FSUN_D_ENC, FSUN_D_DESC;
+
+def FSUNE_W : FSUNE_W_ENC, FSUNE_W_DESC;
+def FSUNE_D : FSUNE_D_ENC, FSUNE_D_DESC;
+
+def FTINT_S_W : FTINT_S_W_ENC, FTINT_S_W_DESC;
+def FTINT_S_D : FTINT_S_D_ENC, FTINT_S_D_DESC;
+
+def FTINT_U_W : FTINT_U_W_ENC, FTINT_U_W_DESC;
+def FTINT_U_D : FTINT_U_D_ENC, FTINT_U_D_DESC;
+
+def FTQ_H : FTQ_H_ENC, FTQ_H_DESC;
+def FTQ_W : FTQ_W_ENC, FTQ_W_DESC;
+
+def FTRUNC_S_W : FTRUNC_S_W_ENC, FTRUNC_S_W_DESC;
+def FTRUNC_S_D : FTRUNC_S_D_ENC, FTRUNC_S_D_DESC;
+
+def FTRUNC_U_W : FTRUNC_U_W_ENC, FTRUNC_U_W_DESC;
+def FTRUNC_U_D : FTRUNC_U_D_ENC, FTRUNC_U_D_DESC;
+
+def HADD_S_H : HADD_S_H_ENC, HADD_S_H_DESC;
+def HADD_S_W : HADD_S_W_ENC, HADD_S_W_DESC;
+def HADD_S_D : HADD_S_D_ENC, HADD_S_D_DESC;
+
+def HADD_U_H : HADD_U_H_ENC, HADD_U_H_DESC;
+def HADD_U_W : HADD_U_W_ENC, HADD_U_W_DESC;
+def HADD_U_D : HADD_U_D_ENC, HADD_U_D_DESC;
+
+def HSUB_S_H : HSUB_S_H_ENC, HSUB_S_H_DESC;
+def HSUB_S_W : HSUB_S_W_ENC, HSUB_S_W_DESC;
+def HSUB_S_D : HSUB_S_D_ENC, HSUB_S_D_DESC;
+
+def HSUB_U_H : HSUB_U_H_ENC, HSUB_U_H_DESC;
+def HSUB_U_W : HSUB_U_W_ENC, HSUB_U_W_DESC;
+def HSUB_U_D : HSUB_U_D_ENC, HSUB_U_D_DESC;
+
+def ILVEV_B : ILVEV_B_ENC, ILVEV_B_DESC;
+def ILVEV_H : ILVEV_H_ENC, ILVEV_H_DESC;
+def ILVEV_W : ILVEV_W_ENC, ILVEV_W_DESC;
+def ILVEV_D : ILVEV_D_ENC, ILVEV_D_DESC;
+
+def ILVL_B : ILVL_B_ENC, ILVL_B_DESC;
+def ILVL_H : ILVL_H_ENC, ILVL_H_DESC;
+def ILVL_W : ILVL_W_ENC, ILVL_W_DESC;
+def ILVL_D : ILVL_D_ENC, ILVL_D_DESC;
+
+def ILVOD_B : ILVOD_B_ENC, ILVOD_B_DESC;
+def ILVOD_H : ILVOD_H_ENC, ILVOD_H_DESC;
+def ILVOD_W : ILVOD_W_ENC, ILVOD_W_DESC;
+def ILVOD_D : ILVOD_D_ENC, ILVOD_D_DESC;
+
+def ILVR_B : ILVR_B_ENC, ILVR_B_DESC;
+def ILVR_H : ILVR_H_ENC, ILVR_H_DESC;
+def ILVR_W : ILVR_W_ENC, ILVR_W_DESC;
+def ILVR_D : ILVR_D_ENC, ILVR_D_DESC;
+
+def INSERT_B : INSERT_B_ENC, INSERT_B_DESC;
+def INSERT_H : INSERT_H_ENC, INSERT_H_DESC;
+def INSERT_W : INSERT_W_ENC, INSERT_W_DESC;
+
+// INSERT_FW_PSEUDO defined after INSVE_W
+// INSERT_FD_PSEUDO defined after INSVE_D
+
+def INSVE_B : INSVE_B_ENC, INSVE_B_DESC;
+def INSVE_H : INSVE_H_ENC, INSVE_H_DESC;
+def INSVE_W : INSVE_W_ENC, INSVE_W_DESC;
+def INSVE_D : INSVE_D_ENC, INSVE_D_DESC;
+
+def INSERT_FW_PSEUDO : INSERT_FW_PSEUDO_DESC;
+def INSERT_FD_PSEUDO : INSERT_FD_PSEUDO_DESC;
+
+def LD_B: LD_B_ENC, LD_B_DESC;
+def LD_H: LD_H_ENC, LD_H_DESC;
+def LD_W: LD_W_ENC, LD_W_DESC;
+def LD_D: LD_D_ENC, LD_D_DESC;
+
+def LDI_B : LDI_B_ENC, LDI_B_DESC;
+def LDI_H : LDI_H_ENC, LDI_H_DESC;
+def LDI_W : LDI_W_ENC, LDI_W_DESC;
+def LDI_D : LDI_D_ENC, LDI_D_DESC;
+
+def LSA : LSA_ENC, LSA_DESC;
+
+def MADD_Q_H : MADD_Q_H_ENC, MADD_Q_H_DESC;
+def MADD_Q_W : MADD_Q_W_ENC, MADD_Q_W_DESC;
+
+def MADDR_Q_H : MADDR_Q_H_ENC, MADDR_Q_H_DESC;
+def MADDR_Q_W : MADDR_Q_W_ENC, MADDR_Q_W_DESC;
+
+def MADDV_B : MADDV_B_ENC, MADDV_B_DESC;
+def MADDV_H : MADDV_H_ENC, MADDV_H_DESC;
+def MADDV_W : MADDV_W_ENC, MADDV_W_DESC;
+def MADDV_D : MADDV_D_ENC, MADDV_D_DESC;
+
+def MAX_A_B : MAX_A_B_ENC, MAX_A_B_DESC;
+def MAX_A_H : MAX_A_H_ENC, MAX_A_H_DESC;
+def MAX_A_W : MAX_A_W_ENC, MAX_A_W_DESC;
+def MAX_A_D : MAX_A_D_ENC, MAX_A_D_DESC;
+
+def MAX_S_B : MAX_S_B_ENC, MAX_S_B_DESC;
+def MAX_S_H : MAX_S_H_ENC, MAX_S_H_DESC;
+def MAX_S_W : MAX_S_W_ENC, MAX_S_W_DESC;
+def MAX_S_D : MAX_S_D_ENC, MAX_S_D_DESC;
+
+def MAX_U_B : MAX_U_B_ENC, MAX_U_B_DESC;
+def MAX_U_H : MAX_U_H_ENC, MAX_U_H_DESC;
+def MAX_U_W : MAX_U_W_ENC, MAX_U_W_DESC;
+def MAX_U_D : MAX_U_D_ENC, MAX_U_D_DESC;
+
+def MAXI_S_B : MAXI_S_B_ENC, MAXI_S_B_DESC;
+def MAXI_S_H : MAXI_S_H_ENC, MAXI_S_H_DESC;
+def MAXI_S_W : MAXI_S_W_ENC, MAXI_S_W_DESC;
+def MAXI_S_D : MAXI_S_D_ENC, MAXI_S_D_DESC;
+
+def MAXI_U_B : MAXI_U_B_ENC, MAXI_U_B_DESC;
+def MAXI_U_H : MAXI_U_H_ENC, MAXI_U_H_DESC;
+def MAXI_U_W : MAXI_U_W_ENC, MAXI_U_W_DESC;
+def MAXI_U_D : MAXI_U_D_ENC, MAXI_U_D_DESC;
+
+def MIN_A_B : MIN_A_B_ENC, MIN_A_B_DESC;
+def MIN_A_H : MIN_A_H_ENC, MIN_A_H_DESC;
+def MIN_A_W : MIN_A_W_ENC, MIN_A_W_DESC;
+def MIN_A_D : MIN_A_D_ENC, MIN_A_D_DESC;
+
+def MIN_S_B : MIN_S_B_ENC, MIN_S_B_DESC;
+def MIN_S_H : MIN_S_H_ENC, MIN_S_H_DESC;
+def MIN_S_W : MIN_S_W_ENC, MIN_S_W_DESC;
+def MIN_S_D : MIN_S_D_ENC, MIN_S_D_DESC;
+
+def MIN_U_B : MIN_U_B_ENC, MIN_U_B_DESC;
+def MIN_U_H : MIN_U_H_ENC, MIN_U_H_DESC;
+def MIN_U_W : MIN_U_W_ENC, MIN_U_W_DESC;
+def MIN_U_D : MIN_U_D_ENC, MIN_U_D_DESC;
+
+def MINI_S_B : MINI_S_B_ENC, MINI_S_B_DESC;
+def MINI_S_H : MINI_S_H_ENC, MINI_S_H_DESC;
+def MINI_S_W : MINI_S_W_ENC, MINI_S_W_DESC;
+def MINI_S_D : MINI_S_D_ENC, MINI_S_D_DESC;
+
+def MINI_U_B : MINI_U_B_ENC, MINI_U_B_DESC;
+def MINI_U_H : MINI_U_H_ENC, MINI_U_H_DESC;
+def MINI_U_W : MINI_U_W_ENC, MINI_U_W_DESC;
+def MINI_U_D : MINI_U_D_ENC, MINI_U_D_DESC;
+
+def MOD_S_B : MOD_S_B_ENC, MOD_S_B_DESC;
+def MOD_S_H : MOD_S_H_ENC, MOD_S_H_DESC;
+def MOD_S_W : MOD_S_W_ENC, MOD_S_W_DESC;
+def MOD_S_D : MOD_S_D_ENC, MOD_S_D_DESC;
+
+def MOD_U_B : MOD_U_B_ENC, MOD_U_B_DESC;
+def MOD_U_H : MOD_U_H_ENC, MOD_U_H_DESC;
+def MOD_U_W : MOD_U_W_ENC, MOD_U_W_DESC;
+def MOD_U_D : MOD_U_D_ENC, MOD_U_D_DESC;
+
+def MOVE_V : MOVE_V_ENC, MOVE_V_DESC;
+
+def MSUB_Q_H : MSUB_Q_H_ENC, MSUB_Q_H_DESC;
+def MSUB_Q_W : MSUB_Q_W_ENC, MSUB_Q_W_DESC;
+
+def MSUBR_Q_H : MSUBR_Q_H_ENC, MSUBR_Q_H_DESC;
+def MSUBR_Q_W : MSUBR_Q_W_ENC, MSUBR_Q_W_DESC;
+
+def MSUBV_B : MSUBV_B_ENC, MSUBV_B_DESC;
+def MSUBV_H : MSUBV_H_ENC, MSUBV_H_DESC;
+def MSUBV_W : MSUBV_W_ENC, MSUBV_W_DESC;
+def MSUBV_D : MSUBV_D_ENC, MSUBV_D_DESC;
+
+def MUL_Q_H : MUL_Q_H_ENC, MUL_Q_H_DESC;
+def MUL_Q_W : MUL_Q_W_ENC, MUL_Q_W_DESC;
+
+def MULR_Q_H : MULR_Q_H_ENC, MULR_Q_H_DESC;
+def MULR_Q_W : MULR_Q_W_ENC, MULR_Q_W_DESC;
+
+def MULV_B : MULV_B_ENC, MULV_B_DESC;
+def MULV_H : MULV_H_ENC, MULV_H_DESC;
+def MULV_W : MULV_W_ENC, MULV_W_DESC;
+def MULV_D : MULV_D_ENC, MULV_D_DESC;
+
+def NLOC_B : NLOC_B_ENC, NLOC_B_DESC;
+def NLOC_H : NLOC_H_ENC, NLOC_H_DESC;
+def NLOC_W : NLOC_W_ENC, NLOC_W_DESC;
+def NLOC_D : NLOC_D_ENC, NLOC_D_DESC;
+
+def NLZC_B : NLZC_B_ENC, NLZC_B_DESC;
+def NLZC_H : NLZC_H_ENC, NLZC_H_DESC;
+def NLZC_W : NLZC_W_ENC, NLZC_W_DESC;
+def NLZC_D : NLZC_D_ENC, NLZC_D_DESC;
+
+def NOR_V : NOR_V_ENC, NOR_V_DESC;
+def NOR_V_H_PSEUDO : NOR_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(NOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def NOR_V_W_PSEUDO : NOR_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(NOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def NOR_V_D_PSEUDO : NOR_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(NOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def NORI_B : NORI_B_ENC, NORI_B_DESC;
+
+def OR_V : OR_V_ENC, OR_V_DESC;
+def OR_V_H_PSEUDO : OR_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(OR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def OR_V_W_PSEUDO : OR_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(OR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def OR_V_D_PSEUDO : OR_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(OR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def ORI_B : ORI_B_ENC, ORI_B_DESC;
+
+def PCKEV_B : PCKEV_B_ENC, PCKEV_B_DESC;
+def PCKEV_H : PCKEV_H_ENC, PCKEV_H_DESC;
+def PCKEV_W : PCKEV_W_ENC, PCKEV_W_DESC;
+def PCKEV_D : PCKEV_D_ENC, PCKEV_D_DESC;
+
+def PCKOD_B : PCKOD_B_ENC, PCKOD_B_DESC;
+def PCKOD_H : PCKOD_H_ENC, PCKOD_H_DESC;
+def PCKOD_W : PCKOD_W_ENC, PCKOD_W_DESC;
+def PCKOD_D : PCKOD_D_ENC, PCKOD_D_DESC;
+
+def PCNT_B : PCNT_B_ENC, PCNT_B_DESC;
+def PCNT_H : PCNT_H_ENC, PCNT_H_DESC;
+def PCNT_W : PCNT_W_ENC, PCNT_W_DESC;
+def PCNT_D : PCNT_D_ENC, PCNT_D_DESC;
+
+def SAT_S_B : SAT_S_B_ENC, SAT_S_B_DESC;
+def SAT_S_H : SAT_S_H_ENC, SAT_S_H_DESC;
+def SAT_S_W : SAT_S_W_ENC, SAT_S_W_DESC;
+def SAT_S_D : SAT_S_D_ENC, SAT_S_D_DESC;
+
+def SAT_U_B : SAT_U_B_ENC, SAT_U_B_DESC;
+def SAT_U_H : SAT_U_H_ENC, SAT_U_H_DESC;
+def SAT_U_W : SAT_U_W_ENC, SAT_U_W_DESC;
+def SAT_U_D : SAT_U_D_ENC, SAT_U_D_DESC;
+
+def SHF_B : SHF_B_ENC, SHF_B_DESC;
+def SHF_H : SHF_H_ENC, SHF_H_DESC;
+def SHF_W : SHF_W_ENC, SHF_W_DESC;
+
+def SLD_B : SLD_B_ENC, SLD_B_DESC;
+def SLD_H : SLD_H_ENC, SLD_H_DESC;
+def SLD_W : SLD_W_ENC, SLD_W_DESC;
+def SLD_D : SLD_D_ENC, SLD_D_DESC;
+
+def SLDI_B : SLDI_B_ENC, SLDI_B_DESC;
+def SLDI_H : SLDI_H_ENC, SLDI_H_DESC;
+def SLDI_W : SLDI_W_ENC, SLDI_W_DESC;
+def SLDI_D : SLDI_D_ENC, SLDI_D_DESC;
+
+def SLL_B : SLL_B_ENC, SLL_B_DESC;
+def SLL_H : SLL_H_ENC, SLL_H_DESC;
+def SLL_W : SLL_W_ENC, SLL_W_DESC;
+def SLL_D : SLL_D_ENC, SLL_D_DESC;
+
+def SLLI_B : SLLI_B_ENC, SLLI_B_DESC;
+def SLLI_H : SLLI_H_ENC, SLLI_H_DESC;
+def SLLI_W : SLLI_W_ENC, SLLI_W_DESC;
+def SLLI_D : SLLI_D_ENC, SLLI_D_DESC;
+
+def SPLAT_B : SPLAT_B_ENC, SPLAT_B_DESC;
+def SPLAT_H : SPLAT_H_ENC, SPLAT_H_DESC;
+def SPLAT_W : SPLAT_W_ENC, SPLAT_W_DESC;
+def SPLAT_D : SPLAT_D_ENC, SPLAT_D_DESC;
+
+def SPLATI_B : SPLATI_B_ENC, SPLATI_B_DESC;
+def SPLATI_H : SPLATI_H_ENC, SPLATI_H_DESC;
+def SPLATI_W : SPLATI_W_ENC, SPLATI_W_DESC;
+def SPLATI_D : SPLATI_D_ENC, SPLATI_D_DESC;
+
+def SRA_B : SRA_B_ENC, SRA_B_DESC;
+def SRA_H : SRA_H_ENC, SRA_H_DESC;
+def SRA_W : SRA_W_ENC, SRA_W_DESC;
+def SRA_D : SRA_D_ENC, SRA_D_DESC;
+
+def SRAI_B : SRAI_B_ENC, SRAI_B_DESC;
+def SRAI_H : SRAI_H_ENC, SRAI_H_DESC;
+def SRAI_W : SRAI_W_ENC, SRAI_W_DESC;
+def SRAI_D : SRAI_D_ENC, SRAI_D_DESC;
+
+def SRAR_B : SRAR_B_ENC, SRAR_B_DESC;
+def SRAR_H : SRAR_H_ENC, SRAR_H_DESC;
+def SRAR_W : SRAR_W_ENC, SRAR_W_DESC;
+def SRAR_D : SRAR_D_ENC, SRAR_D_DESC;
+
+def SRARI_B : SRARI_B_ENC, SRARI_B_DESC;
+def SRARI_H : SRARI_H_ENC, SRARI_H_DESC;
+def SRARI_W : SRARI_W_ENC, SRARI_W_DESC;
+def SRARI_D : SRARI_D_ENC, SRARI_D_DESC;
+
+def SRL_B : SRL_B_ENC, SRL_B_DESC;
+def SRL_H : SRL_H_ENC, SRL_H_DESC;
+def SRL_W : SRL_W_ENC, SRL_W_DESC;
+def SRL_D : SRL_D_ENC, SRL_D_DESC;
+
+def SRLI_B : SRLI_B_ENC, SRLI_B_DESC;
+def SRLI_H : SRLI_H_ENC, SRLI_H_DESC;
+def SRLI_W : SRLI_W_ENC, SRLI_W_DESC;
+def SRLI_D : SRLI_D_ENC, SRLI_D_DESC;
+
+def SRLR_B : SRLR_B_ENC, SRLR_B_DESC;
+def SRLR_H : SRLR_H_ENC, SRLR_H_DESC;
+def SRLR_W : SRLR_W_ENC, SRLR_W_DESC;
+def SRLR_D : SRLR_D_ENC, SRLR_D_DESC;
+
+def SRLRI_B : SRLRI_B_ENC, SRLRI_B_DESC;
+def SRLRI_H : SRLRI_H_ENC, SRLRI_H_DESC;
+def SRLRI_W : SRLRI_W_ENC, SRLRI_W_DESC;
+def SRLRI_D : SRLRI_D_ENC, SRLRI_D_DESC;
+
+def ST_B: ST_B_ENC, ST_B_DESC;
+def ST_H: ST_H_ENC, ST_H_DESC;
+def ST_W: ST_W_ENC, ST_W_DESC;
+def ST_D: ST_D_ENC, ST_D_DESC;
+
+def SUBS_S_B : SUBS_S_B_ENC, SUBS_S_B_DESC;
+def SUBS_S_H : SUBS_S_H_ENC, SUBS_S_H_DESC;
+def SUBS_S_W : SUBS_S_W_ENC, SUBS_S_W_DESC;
+def SUBS_S_D : SUBS_S_D_ENC, SUBS_S_D_DESC;
+
+def SUBS_U_B : SUBS_U_B_ENC, SUBS_U_B_DESC;
+def SUBS_U_H : SUBS_U_H_ENC, SUBS_U_H_DESC;
+def SUBS_U_W : SUBS_U_W_ENC, SUBS_U_W_DESC;
+def SUBS_U_D : SUBS_U_D_ENC, SUBS_U_D_DESC;
+
+def SUBSUS_U_B : SUBSUS_U_B_ENC, SUBSUS_U_B_DESC;
+def SUBSUS_U_H : SUBSUS_U_H_ENC, SUBSUS_U_H_DESC;
+def SUBSUS_U_W : SUBSUS_U_W_ENC, SUBSUS_U_W_DESC;
+def SUBSUS_U_D : SUBSUS_U_D_ENC, SUBSUS_U_D_DESC;
+
+def SUBSUU_S_B : SUBSUU_S_B_ENC, SUBSUU_S_B_DESC;
+def SUBSUU_S_H : SUBSUU_S_H_ENC, SUBSUU_S_H_DESC;
+def SUBSUU_S_W : SUBSUU_S_W_ENC, SUBSUU_S_W_DESC;
+def SUBSUU_S_D : SUBSUU_S_D_ENC, SUBSUU_S_D_DESC;
+
+def SUBV_B : SUBV_B_ENC, SUBV_B_DESC;
+def SUBV_H : SUBV_H_ENC, SUBV_H_DESC;
+def SUBV_W : SUBV_W_ENC, SUBV_W_DESC;
+def SUBV_D : SUBV_D_ENC, SUBV_D_DESC;
+
+def SUBVI_B : SUBVI_B_ENC, SUBVI_B_DESC;
+def SUBVI_H : SUBVI_H_ENC, SUBVI_H_DESC;
+def SUBVI_W : SUBVI_W_ENC, SUBVI_W_DESC;
+def SUBVI_D : SUBVI_D_ENC, SUBVI_D_DESC;
+
+def VSHF_B : VSHF_B_ENC, VSHF_B_DESC;
+def VSHF_H : VSHF_H_ENC, VSHF_H_DESC;
+def VSHF_W : VSHF_W_ENC, VSHF_W_DESC;
+def VSHF_D : VSHF_D_ENC, VSHF_D_DESC;
+
+def XOR_V : XOR_V_ENC, XOR_V_DESC;
+def XOR_V_H_PSEUDO : XOR_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(XOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def XOR_V_W_PSEUDO : XOR_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(XOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def XOR_V_D_PSEUDO : XOR_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(XOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def XORI_B : XORI_B_ENC, XORI_B_DESC;
+
+// Patterns.
+class MSAPat<dag pattern, dag result, list<Predicate> pred = [HasMSA]> :
+ Pat<pattern, result>, Requires<pred>;
+
+def : MSAPat<(extractelt (v4i32 MSA128W:$ws), immZExt4:$idx),
+ (COPY_S_W MSA128W:$ws, immZExt4:$idx)>;
+
+def : MSAPat<(v16i8 (load addr:$addr)), (LD_B addr:$addr)>;
+def : MSAPat<(v8i16 (load addr:$addr)), (LD_H addr:$addr)>;
+def : MSAPat<(v4i32 (load addr:$addr)), (LD_W addr:$addr)>;
+def : MSAPat<(v2i64 (load addr:$addr)), (LD_D addr:$addr)>;
+def : MSAPat<(v8f16 (load addr:$addr)), (LD_H addr:$addr)>;
+def : MSAPat<(v4f32 (load addr:$addr)), (LD_W addr:$addr)>;
+def : MSAPat<(v2f64 (load addr:$addr)), (LD_D addr:$addr)>;
+
+def : MSAPat<(v8f16 (load addrRegImm:$addr)), (LD_H addrRegImm:$addr)>;
+def : MSAPat<(v4f32 (load addrRegImm:$addr)), (LD_W addrRegImm:$addr)>;
+def : MSAPat<(v2f64 (load addrRegImm:$addr)), (LD_D addrRegImm:$addr)>;
+
+def : MSAPat<(store (v16i8 MSA128B:$ws), addr:$addr),
+ (ST_B MSA128B:$ws, addr:$addr)>;
+def : MSAPat<(store (v8i16 MSA128H:$ws), addr:$addr),
+ (ST_H MSA128H:$ws, addr:$addr)>;
+def : MSAPat<(store (v4i32 MSA128W:$ws), addr:$addr),
+ (ST_W MSA128W:$ws, addr:$addr)>;
+def : MSAPat<(store (v2i64 MSA128D:$ws), addr:$addr),
+ (ST_D MSA128D:$ws, addr:$addr)>;
+def : MSAPat<(store (v8f16 MSA128H:$ws), addr:$addr),
+ (ST_H MSA128H:$ws, addr:$addr)>;
+def : MSAPat<(store (v4f32 MSA128W:$ws), addr:$addr),
+ (ST_W MSA128W:$ws, addr:$addr)>;
+def : MSAPat<(store (v2f64 MSA128D:$ws), addr:$addr),
+ (ST_D MSA128D:$ws, addr:$addr)>;
+
+def ST_FH : MSAPat<(store (v8f16 MSA128H:$ws), addrRegImm:$addr),
+ (ST_H MSA128H:$ws, addrRegImm:$addr)>;
+def ST_FW : MSAPat<(store (v4f32 MSA128W:$ws), addrRegImm:$addr),
+ (ST_W MSA128W:$ws, addrRegImm:$addr)>;
+def ST_FD : MSAPat<(store (v2f64 MSA128D:$ws), addrRegImm:$addr),
+ (ST_D MSA128D:$ws, addrRegImm:$addr)>;
+
+class MSA_FABS_PSEUDO_DESC_BASE<RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MipsPseudo<(outs ROWD:$wd),
+ (ins ROWS:$ws),
+ [(set ROWD:$wd, (fabs ROWS:$ws))]> {
+ InstrItinClass Itinerary = itin;
+}
+def FABS_W : MSA_FABS_PSEUDO_DESC_BASE<MSA128WOpnd>,
+ PseudoInstExpansion<(FMAX_A_W MSA128WOpnd:$wd, MSA128WOpnd:$ws,
+ MSA128WOpnd:$ws)>;
+def FABS_D : MSA_FABS_PSEUDO_DESC_BASE<MSA128DOpnd>,
+ PseudoInstExpansion<(FMAX_A_D MSA128DOpnd:$wd, MSA128DOpnd:$ws,
+ MSA128DOpnd:$ws)>;
+
+class MSABitconvertPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC, list<Predicate> preds = [HasMSA]> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS SrcVT:$src, DstRC), preds>;
+
+// These are endian-independant because the element size doesnt change
+def : MSABitconvertPat<v8i16, v8f16, MSA128H>;
+def : MSABitconvertPat<v4i32, v4f32, MSA128W>;
+def : MSABitconvertPat<v2i64, v2f64, MSA128D>;
+def : MSABitconvertPat<v8f16, v8i16, MSA128H>;
+def : MSABitconvertPat<v4f32, v4i32, MSA128W>;
+def : MSABitconvertPat<v2f64, v2i64, MSA128D>;
+
+// Little endian bitcasts are always no-ops
+def : MSABitconvertPat<v16i8, v8i16, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v4i32, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v2i64, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v8f16, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v4f32, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v2f64, MSA128B, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v8i16, v16i8, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v4i32, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v2i64, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v4f32, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v2f64, MSA128H, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v4i32, v16i8, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v8i16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v2i64, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v8f16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v2f64, MSA128W, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v2i64, v16i8, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v8i16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v4i32, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v8f16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v4f32, MSA128D, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v4f32, v16i8, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v8i16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v2i64, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v8f16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v2f64, MSA128W, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v2f64, v16i8, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v8i16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v4i32, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v8f16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v4f32, MSA128D, [HasMSA, IsLE]>;
+
+// Big endian bitcasts expand to shuffle instructions.
+// This is because bitcast is defined to be a store/load sequence and the
+// vector store/load instructions are mixed-endian with respect to the vector
+// as a whole (little endian with respect to element order, but big endian
+// elements).
+
+class MSABitconvertReverseQuartersPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC, MSAInst Insn,
+ RegisterClass ViaRC> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS (Insn (COPY_TO_REGCLASS SrcVT:$src, ViaRC), 27),
+ DstRC),
+ [HasMSA, IsBE]>;
+
+class MSABitconvertReverseHalvesPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC, MSAInst Insn,
+ RegisterClass ViaRC> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS (Insn (COPY_TO_REGCLASS SrcVT:$src, ViaRC), 177),
+ DstRC),
+ [HasMSA, IsBE]>;
+
+class MSABitconvertReverseBInHPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseHalvesPat<DstVT, SrcVT, DstRC, SHF_B, MSA128B>;
+
+class MSABitconvertReverseBInWPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseQuartersPat<DstVT, SrcVT, DstRC, SHF_B, MSA128B>;
+
+class MSABitconvertReverseBInDPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS
+ (SHF_W
+ (COPY_TO_REGCLASS
+ (SHF_B (COPY_TO_REGCLASS SrcVT:$src, MSA128B), 27),
+ MSA128W), 177),
+ DstRC),
+ [HasMSA, IsBE]>;
+
+class MSABitconvertReverseHInWPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseHalvesPat<DstVT, SrcVT, DstRC, SHF_H, MSA128H>;
+
+class MSABitconvertReverseHInDPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseQuartersPat<DstVT, SrcVT, DstRC, SHF_H, MSA128H>;
+
+class MSABitconvertReverseWInDPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseHalvesPat<DstVT, SrcVT, DstRC, SHF_W, MSA128W>;
+
+def : MSABitconvertReverseBInHPat<v8i16, v16i8, MSA128H>;
+def : MSABitconvertReverseBInHPat<v8f16, v16i8, MSA128H>;
+def : MSABitconvertReverseBInWPat<v4i32, v16i8, MSA128W>;
+def : MSABitconvertReverseBInWPat<v4f32, v16i8, MSA128W>;
+def : MSABitconvertReverseBInDPat<v2i64, v16i8, MSA128D>;
+def : MSABitconvertReverseBInDPat<v2f64, v16i8, MSA128D>;
+
+def : MSABitconvertReverseBInHPat<v16i8, v8i16, MSA128B>;
+def : MSABitconvertReverseHInWPat<v4i32, v8i16, MSA128W>;
+def : MSABitconvertReverseHInWPat<v4f32, v8i16, MSA128W>;
+def : MSABitconvertReverseHInDPat<v2i64, v8i16, MSA128D>;
+def : MSABitconvertReverseHInDPat<v2f64, v8i16, MSA128D>;
+
+def : MSABitconvertReverseBInHPat<v16i8, v8f16, MSA128B>;
+def : MSABitconvertReverseHInWPat<v4i32, v8f16, MSA128W>;
+def : MSABitconvertReverseHInWPat<v4f32, v8f16, MSA128W>;
+def : MSABitconvertReverseHInDPat<v2i64, v8f16, MSA128D>;
+def : MSABitconvertReverseHInDPat<v2f64, v8f16, MSA128D>;
+
+def : MSABitconvertReverseBInWPat<v16i8, v4i32, MSA128B>;
+def : MSABitconvertReverseHInWPat<v8i16, v4i32, MSA128H>;
+def : MSABitconvertReverseHInWPat<v8f16, v4i32, MSA128H>;
+def : MSABitconvertReverseWInDPat<v2i64, v4i32, MSA128D>;
+def : MSABitconvertReverseWInDPat<v2f64, v4i32, MSA128D>;
+
+def : MSABitconvertReverseBInWPat<v16i8, v4f32, MSA128B>;
+def : MSABitconvertReverseHInWPat<v8i16, v4f32, MSA128H>;
+def : MSABitconvertReverseHInWPat<v8f16, v4f32, MSA128H>;
+def : MSABitconvertReverseWInDPat<v2i64, v4f32, MSA128D>;
+def : MSABitconvertReverseWInDPat<v2f64, v4f32, MSA128D>;
+
+def : MSABitconvertReverseBInDPat<v16i8, v2i64, MSA128B>;
+def : MSABitconvertReverseHInDPat<v8i16, v2i64, MSA128H>;
+def : MSABitconvertReverseHInDPat<v8f16, v2i64, MSA128H>;
+def : MSABitconvertReverseWInDPat<v4i32, v2i64, MSA128W>;
+def : MSABitconvertReverseWInDPat<v4f32, v2i64, MSA128W>;
+
+def : MSABitconvertReverseBInDPat<v16i8, v2f64, MSA128B>;
+def : MSABitconvertReverseHInDPat<v8i16, v2f64, MSA128H>;
+def : MSABitconvertReverseHInDPat<v8f16, v2f64, MSA128H>;
+def : MSABitconvertReverseWInDPat<v4i32, v2f64, MSA128W>;
+def : MSABitconvertReverseWInDPat<v4f32, v2f64, MSA128W>;
+
+// Pseudos used to implement BNZ.df, and BZ.df
+
+class MSA_CBRANCH_PSEUDO_DESC_BASE<SDPatternOperator OpNode, ValueType TyNode,
+ RegisterClass RCWS,
+ InstrItinClass itin = NoItinerary> :
+ MipsPseudo<(outs GPR32:$dst),
+ (ins RCWS:$ws),
+ [(set GPR32:$dst, (OpNode (TyNode RCWS:$ws)))]> {
+ bit usesCustomInserter = 1;
+}
+
+def SNZ_B_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v16i8,
+ MSA128B, NoItinerary>;
+def SNZ_H_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v8i16,
+ MSA128H, NoItinerary>;
+def SNZ_W_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v4i32,
+ MSA128W, NoItinerary>;
+def SNZ_D_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v2i64,
+ MSA128D, NoItinerary>;
+def SNZ_V_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAnyNonZero, v16i8,
+ MSA128B, NoItinerary>;
+
+def SZ_B_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v16i8,
+ MSA128B, NoItinerary>;
+def SZ_H_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v8i16,
+ MSA128H, NoItinerary>;
+def SZ_W_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v4i32,
+ MSA128W, NoItinerary>;
+def SZ_D_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v2i64,
+ MSA128D, NoItinerary>;
+def SZ_V_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAnyZero, v16i8,
+ MSA128B, NoItinerary>;
diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp
index 59b23f7ad7c1..dedf802f80ac 100644
--- a/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/lib/Target/Mips/MipsMachineFunction.cpp
@@ -15,6 +15,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -22,6 +23,53 @@ static cl::opt<bool>
FixGlobalBaseReg("mips-fix-global-base-reg", cl::Hidden, cl::init(true),
cl::desc("Always use $gp as the global base register."));
+// class MipsCallEntry.
+MipsCallEntry::MipsCallEntry(const StringRef &N) {
+#ifndef NDEBUG
+ Name = N;
+ Val = 0;
+#endif
+}
+
+MipsCallEntry::MipsCallEntry(const GlobalValue *V) {
+#ifndef NDEBUG
+ Val = V;
+#endif
+}
+
+bool MipsCallEntry::isConstant(const MachineFrameInfo *) const {
+ return false;
+}
+
+bool MipsCallEntry::isAliased(const MachineFrameInfo *) const {
+ return false;
+}
+
+bool MipsCallEntry::mayAlias(const MachineFrameInfo *) const {
+ return false;
+}
+
+void MipsCallEntry::printCustom(raw_ostream &O) const {
+ O << "MipsCallEntry: ";
+#ifndef NDEBUG
+ if (Val)
+ O << Val->getName();
+ else
+ O << Name;
+#endif
+}
+
+MipsFunctionInfo::~MipsFunctionInfo() {
+ for (StringMap<const MipsCallEntry *>::iterator
+ I = ExternalCallEntries.begin(), E = ExternalCallEntries.end(); I != E;
+ ++I)
+ delete I->getValue();
+
+ for (ValueMap<const GlobalValue *, const MipsCallEntry *>::iterator
+ I = GlobalCallEntries.begin(), E = GlobalCallEntries.end(); I != E; ++I)
+ delete I->second;
+}
+
bool MipsFunctionInfo::globalBaseRegSet() const {
return GlobalBaseReg;
}
@@ -38,8 +86,8 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
else
RC = ST.isABI_N64() ?
- (const TargetRegisterClass*)&Mips::CPU64RegsRegClass :
- (const TargetRegisterClass*)&Mips::CPURegsRegClass;
+ (const TargetRegisterClass*)&Mips::GPR64RegClass :
+ (const TargetRegisterClass*)&Mips::GPR32RegClass;
return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
}
@@ -60,7 +108,7 @@ void MipsFunctionInfo::createEhDataRegsFI() {
for (int I = 0; I < 4; ++I) {
const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>();
const TargetRegisterClass *RC = ST.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
EhDataRegFI[I] = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
@@ -72,4 +120,22 @@ bool MipsFunctionInfo::isEhDataRegFI(int FI) const {
|| FI == EhDataRegFI[2] || FI == EhDataRegFI[3]);
}
+MachinePointerInfo MipsFunctionInfo::callPtrInfo(const StringRef &Name) {
+ const MipsCallEntry *&E = ExternalCallEntries[Name];
+
+ if (!E)
+ E = new MipsCallEntry(Name);
+
+ return MachinePointerInfo(E);
+}
+
+MachinePointerInfo MipsFunctionInfo::callPtrInfo(const GlobalValue *Val) {
+ const MipsCallEntry *&E = GlobalCallEntries[Val];
+
+ if (!E)
+ E = new MipsCallEntry(Val);
+
+ return MachinePointerInfo(E);
+}
+
void MipsFunctionInfo::anchor() { }
diff --git a/lib/Target/Mips/MipsMachineFunction.h b/lib/Target/Mips/MipsMachineFunction.h
index b05b348037d9..43bf6827eefb 100644
--- a/lib/Target/Mips/MipsMachineFunction.h
+++ b/lib/Target/Mips/MipsMachineFunction.h
@@ -15,56 +15,48 @@
#define MIPS_MACHINE_FUNCTION_INFO_H
#include "MipsSubtarget.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/ValueMap.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include <utility>
namespace llvm {
+/// \brief A class derived from PseudoSourceValue that represents a GOT entry
+/// resolved by lazy-binding.
+class MipsCallEntry : public PseudoSourceValue {
+public:
+ explicit MipsCallEntry(const StringRef &N);
+ explicit MipsCallEntry(const GlobalValue *V);
+ virtual bool isConstant(const MachineFrameInfo *) const;
+ virtual bool isAliased(const MachineFrameInfo *) const;
+ virtual bool mayAlias(const MachineFrameInfo *) const;
+
+private:
+ virtual void printCustom(raw_ostream &O) const;
+#ifndef NDEBUG
+ std::string Name;
+ const GlobalValue *Val;
+#endif
+};
+
/// MipsFunctionInfo - This class is derived from MachineFunction private
/// Mips target-specific information for each MachineFunction.
class MipsFunctionInfo : public MachineFunctionInfo {
- virtual void anchor();
-
- MachineFunction& MF;
- /// SRetReturnReg - Some subtargets require that sret lowering includes
- /// returning the value of the returned struct in a register. This field
- /// holds the virtual register into which the sret argument is passed.
- unsigned SRetReturnReg;
-
- /// GlobalBaseReg - keeps track of the virtual register initialized for
- /// use as the global base register. This is used for PIC in some PIC
- /// relocation models.
- unsigned GlobalBaseReg;
-
- /// Mips16SPAliasReg - keeps track of the virtual register initialized for
- /// use as an alias for SP for use in load/store of halfword/byte from/to
- /// the stack
- unsigned Mips16SPAliasReg;
-
- /// VarArgsFrameIndex - FrameIndex for start of varargs area.
- int VarArgsFrameIndex;
-
- /// True if function has a byval argument.
- bool HasByvalArg;
-
- /// Size of incoming argument area.
- unsigned IncomingArgSize;
-
- /// CallsEhReturn - Whether the function calls llvm.eh.return.
- bool CallsEhReturn;
-
- /// Frame objects for spilling eh data registers.
- int EhDataRegFI[4];
-
public:
MipsFunctionInfo(MachineFunction& MF)
: MF(MF), SRetReturnReg(0), GlobalBaseReg(0), Mips16SPAliasReg(0),
VarArgsFrameIndex(0), CallsEhReturn(false)
{}
+ ~MipsFunctionInfo();
+
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
@@ -92,6 +84,51 @@ public:
int getEhDataRegFI(unsigned Reg) const { return EhDataRegFI[Reg]; }
bool isEhDataRegFI(int FI) const;
+ /// \brief Create a MachinePointerInfo that has a MipsCallEntr object
+ /// representing a GOT entry for an external function.
+ MachinePointerInfo callPtrInfo(const StringRef &Name);
+
+ /// \brief Create a MachinePointerInfo that has a MipsCallEntr object
+ /// representing a GOT entry for a global function.
+ MachinePointerInfo callPtrInfo(const GlobalValue *Val);
+
+private:
+ virtual void anchor();
+
+ MachineFunction& MF;
+ /// SRetReturnReg - Some subtargets require that sret lowering includes
+ /// returning the value of the returned struct in a register. This field
+ /// holds the virtual register into which the sret argument is passed.
+ unsigned SRetReturnReg;
+
+ /// GlobalBaseReg - keeps track of the virtual register initialized for
+ /// use as the global base register. This is used for PIC in some PIC
+ /// relocation models.
+ unsigned GlobalBaseReg;
+
+ /// Mips16SPAliasReg - keeps track of the virtual register initialized for
+ /// use as an alias for SP for use in load/store of halfword/byte from/to
+ /// the stack
+ unsigned Mips16SPAliasReg;
+
+ /// VarArgsFrameIndex - FrameIndex for start of varargs area.
+ int VarArgsFrameIndex;
+
+ /// True if function has a byval argument.
+ bool HasByvalArg;
+
+ /// Size of incoming argument area.
+ unsigned IncomingArgSize;
+
+ /// CallsEhReturn - Whether the function calls llvm.eh.return.
+ bool CallsEhReturn;
+
+ /// Frame objects for spilling eh data registers.
+ int EhDataRegFI[4];
+
+ /// MipsCallEntry maps.
+ StringMap<const MipsCallEntry *> ExternalCallEntries;
+ ValueMap<const GlobalValue *, const MipsCallEntry *> GlobalCallEntries;
};
} // end of namespace llvm
diff --git a/lib/Target/Mips/MipsOs16.cpp b/lib/Target/Mips/MipsOs16.cpp
index 1919077eeb71..fe60841212e0 100644
--- a/lib/Target/Mips/MipsOs16.cpp
+++ b/lib/Target/Mips/MipsOs16.cpp
@@ -14,9 +14,17 @@
#define DEBUG_TYPE "mips-os16"
#include "MipsOs16.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+
+static cl::opt<std::string> Mips32FunctionMask(
+ "mips32-function-mask",
+ cl::init(""),
+ cl::desc("Force function to be mips32"),
+ cl::Hidden);
+
namespace {
// Figure out if we need float point based on the function signature.
@@ -85,18 +93,43 @@ namespace llvm {
bool MipsOs16::runOnModule(Module &M) {
- DEBUG(errs() << "Run on Module MipsOs16\n");
+ bool usingMask = Mips32FunctionMask.length() > 0;
+ bool doneUsingMask = false; // this will make it stop repeating
+ DEBUG(dbgs() << "Run on Module MipsOs16 \n" << Mips32FunctionMask << "\n");
+ if (usingMask)
+ DEBUG(dbgs() << "using mask \n" << Mips32FunctionMask << "\n");
+ unsigned int functionIndex = 0;
bool modified = false;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration()) continue;
DEBUG(dbgs() << "Working on " << F->getName() << "\n");
- if (needsFP(*F)) {
- DEBUG(dbgs() << " need to compile as nomips16 \n");
- F->addFnAttr("nomips16");
+ if (usingMask) {
+ if (!doneUsingMask) {
+ if (functionIndex == Mips32FunctionMask.length())
+ functionIndex = 0;
+ switch (Mips32FunctionMask[functionIndex]) {
+ case '1':
+ DEBUG(dbgs() << "mask forced mips32: " << F->getName() << "\n");
+ F->addFnAttr("nomips16");
+ break;
+ case '.':
+ doneUsingMask = true;
+ break;
+ default:
+ break;
+ }
+ functionIndex++;
+ }
}
else {
- F->addFnAttr("mips16");
- DEBUG(dbgs() << " no need to compile as nomips16 \n");
+ if (needsFP(*F)) {
+ DEBUG(dbgs() << "os16 forced mips32: " << F->getName() << "\n");
+ F->addFnAttr("nomips16");
+ }
+ else {
+ DEBUG(dbgs() << "os16 forced mips16: " << F->getName() << "\n");
+ F->addFnAttr("mips16");
+ }
}
}
return modified;
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index dead07bacd5e..3105b0208451 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -47,6 +47,11 @@ MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST)
unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
+const TargetRegisterClass *
+MipsRegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ return Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+}
unsigned
MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
@@ -54,9 +59,9 @@ MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
switch (RC->getID()) {
default:
return 0;
- case Mips::CPURegsRegClassID:
- case Mips::CPU64RegsRegClassID:
- case Mips::DSPRegsRegClassID: {
+ case Mips::GPR32RegClassID:
+ case Mips::GPR64RegClassID:
+ case Mips::DSPRRegClassID: {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
return 28 - TFI->hasFP(MF);
}
@@ -78,48 +83,60 @@ const uint16_t* MipsRegisterInfo::
getCalleeSavedRegs(const MachineFunction *MF) const {
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_SaveList;
- else if (!Subtarget.hasMips64())
- return CSR_O32_SaveList;
- else if (Subtarget.isABI_N32())
+
+ if (Subtarget.isABI_N64())
+ return CSR_N64_SaveList;
+
+ if (Subtarget.isABI_N32())
return CSR_N32_SaveList;
- assert(Subtarget.isABI_N64());
- return CSR_N64_SaveList;
+ if (Subtarget.isFP64bit())
+ return CSR_O32_FP64_SaveList;
+
+ return CSR_O32_SaveList;
}
const uint32_t*
MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_RegMask;
- else if (!Subtarget.hasMips64())
- return CSR_O32_RegMask;
- else if (Subtarget.isABI_N32())
+
+ if (Subtarget.isABI_N64())
+ return CSR_N64_RegMask;
+
+ if (Subtarget.isABI_N32())
return CSR_N32_RegMask;
- assert(Subtarget.isABI_N64());
- return CSR_N64_RegMask;
+ if (Subtarget.isFP64bit())
+ return CSR_O32_FP64_RegMask;
+
+ return CSR_O32_RegMask;
+}
+
+const uint32_t *MipsRegisterInfo::getMips16RetHelperMask() {
+ return CSR_Mips16RetHelper_RegMask;
}
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
- static const uint16_t ReservedCPURegs[] = {
+ static const uint16_t ReservedGPR32[] = {
Mips::ZERO, Mips::K0, Mips::K1, Mips::SP
};
- static const uint16_t ReservedCPU64Regs[] = {
+ static const uint16_t ReservedGPR64[] = {
Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64
};
BitVector Reserved(getNumRegs());
typedef TargetRegisterClass::const_iterator RegIter;
- for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I)
- Reserved.set(ReservedCPURegs[I]);
+ for (unsigned I = 0; I < array_lengthof(ReservedGPR32); ++I)
+ Reserved.set(ReservedGPR32[I]);
- for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I)
- Reserved.set(ReservedCPU64Regs[I]);
+ for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I)
+ Reserved.set(ReservedGPR64[I]);
- if (Subtarget.hasMips64()) {
+ if (Subtarget.isFP64bit()) {
// Reserve all registers in AFGR64.
for (RegIter Reg = Mips::AFGR64RegClass.begin(),
EReg = Mips::AFGR64RegClass.end(); Reg != EReg; ++Reg)
@@ -142,7 +159,6 @@ getReservedRegs(const MachineFunction &MF) const {
// Reserve hardware registers.
Reserved.set(Mips::HWR29);
- Reserved.set(Mips::HWR29_64);
// Reserve DSP control register.
Reserved.set(Mips::DSPPos);
@@ -151,10 +167,22 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(Mips::DSPEFI);
Reserved.set(Mips::DSPOutFlag);
+ // Reserve MSA control registers.
+ Reserved.set(Mips::MSAIR);
+ Reserved.set(Mips::MSACSR);
+ Reserved.set(Mips::MSAAccess);
+ Reserved.set(Mips::MSASave);
+ Reserved.set(Mips::MSAModify);
+ Reserved.set(Mips::MSARequest);
+ Reserved.set(Mips::MSAMap);
+ Reserved.set(Mips::MSAUnmap);
+
// Reserve RA if in mips16 mode.
if (Subtarget.inMips16Mode()) {
Reserved.set(Mips::RA);
Reserved.set(Mips::RA_64);
+ Reserved.set(Mips::T0);
+ Reserved.set(Mips::T1);
}
// Reserve GP if small section is used.
@@ -212,12 +240,3 @@ getFrameRegister(const MachineFunction &MF) const {
}
-unsigned MipsRegisterInfo::
-getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
-}
-
-unsigned MipsRegisterInfo::
-getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
-}
diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h
index 5ed51241391f..0450c6fbe47e 100644
--- a/lib/Target/Mips/MipsRegisterInfo.h
+++ b/lib/Target/Mips/MipsRegisterInfo.h
@@ -42,10 +42,14 @@ public:
void adjustMipsStackFrame(MachineFunction &MF) const;
/// Code Generation virtual methods...
+ const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const;
+
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const;
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ static const uint32_t *getMips16RetHelperMask();
BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -64,10 +68,6 @@ public:
/// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
- /// Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-
/// \brief Return GPR register class.
virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0;
diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td
index 229f1677c044..3173d0927af1 100644
--- a/lib/Target/Mips/MipsRegisterInfo.td
+++ b/lib/Target/Mips/MipsRegisterInfo.td
@@ -11,16 +11,15 @@
// Declarations that describe the MIPS register file
//===----------------------------------------------------------------------===//
let Namespace = "Mips" in {
-def sub_fpeven : SubRegIndex;
-def sub_fpodd : SubRegIndex;
-def sub_32 : SubRegIndex;
-def sub_lo : SubRegIndex;
-def sub_hi : SubRegIndex;
-def sub_dsp16_19 : SubRegIndex;
-def sub_dsp20 : SubRegIndex;
-def sub_dsp21 : SubRegIndex;
-def sub_dsp22 : SubRegIndex;
-def sub_dsp23 : SubRegIndex;
+def sub_32 : SubRegIndex<32>;
+def sub_64 : SubRegIndex<64>;
+def sub_lo : SubRegIndex<32>;
+def sub_hi : SubRegIndex<32, 32>;
+def sub_dsp16_19 : SubRegIndex<4, 16>;
+def sub_dsp20 : SubRegIndex<1, 20>;
+def sub_dsp21 : SubRegIndex<1, 21>;
+def sub_dsp22 : SubRegIndex<1, 22>;
+def sub_dsp23 : SubRegIndex<1, 23>;
}
class Unallocatable {
@@ -54,17 +53,24 @@ class FPR<bits<16> Enc, string n> : MipsReg<Enc, n>;
// Mips 64-bit (aliased) FPU Registers
class AFPR<bits<16> Enc, string n, list<Register> subregs>
: MipsRegWithSubRegs<Enc, n, subregs> {
- let SubRegIndices = [sub_fpeven, sub_fpodd];
+ let SubRegIndices = [sub_lo, sub_hi];
let CoveredBySubRegs = 1;
}
class AFPR64<bits<16> Enc, string n, list<Register> subregs>
: MipsRegWithSubRegs<Enc, n, subregs> {
- let SubRegIndices = [sub_32];
+ let SubRegIndices = [sub_lo, sub_hi];
+ let CoveredBySubRegs = 1;
+}
+
+// Mips 128-bit (aliased) MSA Registers
+class AFPR128<bits<16> Enc, string n, list<Register> subregs>
+ : MipsRegWithSubRegs<Enc, n, subregs> {
+ let SubRegIndices = [sub_64];
}
// Accumulator Registers
-class ACC<bits<16> Enc, string n, list<Register> subregs>
+class ACCReg<bits<16> Enc, string n, list<Register> subregs>
: MipsRegWithSubRegs<Enc, n, subregs> {
let SubRegIndices = [sub_lo, sub_hi];
let CoveredBySubRegs = 1;
@@ -147,127 +153,70 @@ let Namespace = "Mips" in {
def RA_64 : Mips64GPRReg< 31, "ra", [RA]>, DwarfRegNum<[31]>;
/// Mips Single point precision FPU Registers
- def F0 : FPR< 0, "f0">, DwarfRegNum<[32]>;
- def F1 : FPR< 1, "f1">, DwarfRegNum<[33]>;
- def F2 : FPR< 2, "f2">, DwarfRegNum<[34]>;
- def F3 : FPR< 3, "f3">, DwarfRegNum<[35]>;
- def F4 : FPR< 4, "f4">, DwarfRegNum<[36]>;
- def F5 : FPR< 5, "f5">, DwarfRegNum<[37]>;
- def F6 : FPR< 6, "f6">, DwarfRegNum<[38]>;
- def F7 : FPR< 7, "f7">, DwarfRegNum<[39]>;
- def F8 : FPR< 8, "f8">, DwarfRegNum<[40]>;
- def F9 : FPR< 9, "f9">, DwarfRegNum<[41]>;
- def F10 : FPR<10, "f10">, DwarfRegNum<[42]>;
- def F11 : FPR<11, "f11">, DwarfRegNum<[43]>;
- def F12 : FPR<12, "f12">, DwarfRegNum<[44]>;
- def F13 : FPR<13, "f13">, DwarfRegNum<[45]>;
- def F14 : FPR<14, "f14">, DwarfRegNum<[46]>;
- def F15 : FPR<15, "f15">, DwarfRegNum<[47]>;
- def F16 : FPR<16, "f16">, DwarfRegNum<[48]>;
- def F17 : FPR<17, "f17">, DwarfRegNum<[49]>;
- def F18 : FPR<18, "f18">, DwarfRegNum<[50]>;
- def F19 : FPR<19, "f19">, DwarfRegNum<[51]>;
- def F20 : FPR<20, "f20">, DwarfRegNum<[52]>;
- def F21 : FPR<21, "f21">, DwarfRegNum<[53]>;
- def F22 : FPR<22, "f22">, DwarfRegNum<[54]>;
- def F23 : FPR<23, "f23">, DwarfRegNum<[55]>;
- def F24 : FPR<24, "f24">, DwarfRegNum<[56]>;
- def F25 : FPR<25, "f25">, DwarfRegNum<[57]>;
- def F26 : FPR<26, "f26">, DwarfRegNum<[58]>;
- def F27 : FPR<27, "f27">, DwarfRegNum<[59]>;
- def F28 : FPR<28, "f28">, DwarfRegNum<[60]>;
- def F29 : FPR<29, "f29">, DwarfRegNum<[61]>;
- def F30 : FPR<30, "f30">, DwarfRegNum<[62]>;
- def F31 : FPR<31, "f31">, DwarfRegNum<[63]>;
+ foreach I = 0-31 in
+ def F#I : FPR<I, "f"#I>, DwarfRegNum<[!add(I, 32)]>;
+
+ // Higher half of 64-bit FP registers.
+ foreach I = 0-31 in
+ def F_HI#I : FPR<I, "f"#I>, DwarfRegNum<[!add(I, 32)]>;
/// Mips Double point precision FPU Registers (aliased
/// with the single precision to hold 64 bit values)
- def D0 : AFPR< 0, "f0", [F0, F1]>;
- def D1 : AFPR< 2, "f2", [F2, F3]>;
- def D2 : AFPR< 4, "f4", [F4, F5]>;
- def D3 : AFPR< 6, "f6", [F6, F7]>;
- def D4 : AFPR< 8, "f8", [F8, F9]>;
- def D5 : AFPR<10, "f10", [F10, F11]>;
- def D6 : AFPR<12, "f12", [F12, F13]>;
- def D7 : AFPR<14, "f14", [F14, F15]>;
- def D8 : AFPR<16, "f16", [F16, F17]>;
- def D9 : AFPR<18, "f18", [F18, F19]>;
- def D10 : AFPR<20, "f20", [F20, F21]>;
- def D11 : AFPR<22, "f22", [F22, F23]>;
- def D12 : AFPR<24, "f24", [F24, F25]>;
- def D13 : AFPR<26, "f26", [F26, F27]>;
- def D14 : AFPR<28, "f28", [F28, F29]>;
- def D15 : AFPR<30, "f30", [F30, F31]>;
+ foreach I = 0-15 in
+ def D#I : AFPR<!shl(I, 1), "f"#!shl(I, 1),
+ [!cast<FPR>("F"#!shl(I, 1)),
+ !cast<FPR>("F"#!add(!shl(I, 1), 1))]>;
/// Mips Double point precision FPU Registers in MFP64 mode.
- def D0_64 : AFPR64<0, "f0", [F0]>, DwarfRegNum<[32]>;
- def D1_64 : AFPR64<1, "f1", [F1]>, DwarfRegNum<[33]>;
- def D2_64 : AFPR64<2, "f2", [F2]>, DwarfRegNum<[34]>;
- def D3_64 : AFPR64<3, "f3", [F3]>, DwarfRegNum<[35]>;
- def D4_64 : AFPR64<4, "f4", [F4]>, DwarfRegNum<[36]>;
- def D5_64 : AFPR64<5, "f5", [F5]>, DwarfRegNum<[37]>;
- def D6_64 : AFPR64<6, "f6", [F6]>, DwarfRegNum<[38]>;
- def D7_64 : AFPR64<7, "f7", [F7]>, DwarfRegNum<[39]>;
- def D8_64 : AFPR64<8, "f8", [F8]>, DwarfRegNum<[40]>;
- def D9_64 : AFPR64<9, "f9", [F9]>, DwarfRegNum<[41]>;
- def D10_64 : AFPR64<10, "f10", [F10]>, DwarfRegNum<[42]>;
- def D11_64 : AFPR64<11, "f11", [F11]>, DwarfRegNum<[43]>;
- def D12_64 : AFPR64<12, "f12", [F12]>, DwarfRegNum<[44]>;
- def D13_64 : AFPR64<13, "f13", [F13]>, DwarfRegNum<[45]>;
- def D14_64 : AFPR64<14, "f14", [F14]>, DwarfRegNum<[46]>;
- def D15_64 : AFPR64<15, "f15", [F15]>, DwarfRegNum<[47]>;
- def D16_64 : AFPR64<16, "f16", [F16]>, DwarfRegNum<[48]>;
- def D17_64 : AFPR64<17, "f17", [F17]>, DwarfRegNum<[49]>;
- def D18_64 : AFPR64<18, "f18", [F18]>, DwarfRegNum<[50]>;
- def D19_64 : AFPR64<19, "f19", [F19]>, DwarfRegNum<[51]>;
- def D20_64 : AFPR64<20, "f20", [F20]>, DwarfRegNum<[52]>;
- def D21_64 : AFPR64<21, "f21", [F21]>, DwarfRegNum<[53]>;
- def D22_64 : AFPR64<22, "f22", [F22]>, DwarfRegNum<[54]>;
- def D23_64 : AFPR64<23, "f23", [F23]>, DwarfRegNum<[55]>;
- def D24_64 : AFPR64<24, "f24", [F24]>, DwarfRegNum<[56]>;
- def D25_64 : AFPR64<25, "f25", [F25]>, DwarfRegNum<[57]>;
- def D26_64 : AFPR64<26, "f26", [F26]>, DwarfRegNum<[58]>;
- def D27_64 : AFPR64<27, "f27", [F27]>, DwarfRegNum<[59]>;
- def D28_64 : AFPR64<28, "f28", [F28]>, DwarfRegNum<[60]>;
- def D29_64 : AFPR64<29, "f29", [F29]>, DwarfRegNum<[61]>;
- def D30_64 : AFPR64<30, "f30", [F30]>, DwarfRegNum<[62]>;
- def D31_64 : AFPR64<31, "f31", [F31]>, DwarfRegNum<[63]>;
+ foreach I = 0-31 in
+ def D#I#_64 : AFPR64<I, "f"#I, [!cast<FPR>("F"#I), !cast<FPR>("F_HI"#I)]>,
+ DwarfRegNum<[!add(I, 32)]>;
+
+ /// Mips MSA registers
+ /// MSA and FPU cannot both be present unless the FPU has 64-bit registers
+ foreach I = 0-31 in
+ def W#I : AFPR128<I, "w"#I, [!cast<AFPR64>("D"#I#"_64")]>,
+ DwarfRegNum<[!add(I, 32)]>;
// Hi/Lo registers
- def HI : Register<"ac0">, DwarfRegNum<[64]>;
- def HI1 : Register<"ac1">, DwarfRegNum<[176]>;
- def HI2 : Register<"ac2">, DwarfRegNum<[178]>;
- def HI3 : Register<"ac3">, DwarfRegNum<[180]>;
- def LO : Register<"ac0">, DwarfRegNum<[65]>;
- def LO1 : Register<"ac1">, DwarfRegNum<[177]>;
- def LO2 : Register<"ac2">, DwarfRegNum<[179]>;
- def LO3 : Register<"ac3">, DwarfRegNum<[181]>;
+ def HI0 : MipsReg<0, "ac0">, DwarfRegNum<[64]>;
+ def HI1 : MipsReg<1, "ac1">, DwarfRegNum<[176]>;
+ def HI2 : MipsReg<2, "ac2">, DwarfRegNum<[178]>;
+ def HI3 : MipsReg<3, "ac3">, DwarfRegNum<[180]>;
+ def LO0 : MipsReg<0, "ac0">, DwarfRegNum<[65]>;
+ def LO1 : MipsReg<1, "ac1">, DwarfRegNum<[177]>;
+ def LO2 : MipsReg<2, "ac2">, DwarfRegNum<[179]>;
+ def LO3 : MipsReg<3, "ac3">, DwarfRegNum<[181]>;
let SubRegIndices = [sub_32] in {
- def HI64 : RegisterWithSubRegs<"hi", [HI]>;
- def LO64 : RegisterWithSubRegs<"lo", [LO]>;
+ def HI0_64 : RegisterWithSubRegs<"hi", [HI0]>;
+ def LO0_64 : RegisterWithSubRegs<"lo", [LO0]>;
}
- // Status flags register
- def FCR31 : Register<"31">;
+ // FP control registers.
+ foreach I = 0-31 in
+ def FCR#I : MipsReg<#I, ""#I>;
+
+ // FP condition code registers.
+ foreach I = 0-7 in
+ def FCC#I : MipsReg<#I, "fcc"#I>;
- // fcc0 register
- def FCC0 : MipsReg<0, "fcc0">;
+ // COP2 registers.
+ foreach I = 0-31 in
+ def COP2#I : MipsReg<#I, ""#I>;
// PC register
def PC : Register<"pc">;
// Hardware register $29
def HWR29 : MipsReg<29, "29">;
- def HWR29_64 : MipsReg<29, "29">;
// Accum registers
- def AC0 : ACC<0, "ac0", [LO, HI]>;
- def AC1 : ACC<1, "ac1", [LO1, HI1]>;
- def AC2 : ACC<2, "ac2", [LO2, HI2]>;
- def AC3 : ACC<3, "ac3", [LO3, HI3]>;
+ foreach I = 0-3 in
+ def AC#I : ACCReg<#I, "ac"#I,
+ [!cast<Register>("LO"#I), !cast<Register>("HI"#I)]>;
- def AC0_64 : ACC<0, "ac0", [LO64, HI64]>;
+ def AC0_64 : ACCReg<0, "ac0", [LO0_64, HI0_64]>;
// DSP-ASE control register fields.
def DSPPos : Register<"">;
@@ -286,13 +235,23 @@ let Namespace = "Mips" in {
def DSPOutFlag : RegisterWithSubRegs<"", [DSPOutFlag16_19, DSPOutFlag20,
DSPOutFlag21, DSPOutFlag22,
DSPOutFlag23]>;
+
+ // MSA-ASE control registers.
+ def MSAIR : MipsReg<0, "0">;
+ def MSACSR : MipsReg<1, "1">;
+ def MSAAccess : MipsReg<2, "2">;
+ def MSASave : MipsReg<3, "3">;
+ def MSAModify : MipsReg<4, "4">;
+ def MSARequest : MipsReg<5, "5">;
+ def MSAMap : MipsReg<6, "6">;
+ def MSAUnmap : MipsReg<7, "7">;
}
//===----------------------------------------------------------------------===//
// Register Classes
//===----------------------------------------------------------------------===//
-class CPURegsClass<list<ValueType> regTypes> :
+class GPR32Class<list<ValueType> regTypes> :
RegisterClass<"Mips", regTypes, 32, (add
// Reserved
ZERO, AT,
@@ -307,10 +266,10 @@ class CPURegsClass<list<ValueType> regTypes> :
// Reserved
K0, K1, GP, SP, FP, RA)>;
-def CPURegs : CPURegsClass<[i32]>;
-def DSPRegs : CPURegsClass<[v4i8, v2i16]>;
+def GPR32 : GPR32Class<[i32]>;
+def DSPR : GPR32Class<[v4i8, v2i16]>;
-def CPU64Regs : RegisterClass<"Mips", [i64], 64, (add
+def GPR64 : RegisterClass<"Mips", [i64], 64, (add
// Reserved
ZERO_64, AT_64,
// Return Values and Arguments
@@ -330,6 +289,13 @@ def CPU16Regs : RegisterClass<"Mips", [i32], 32, (add
// Callee save
S0, S1)>;
+def CPU16RegsPlusSP : RegisterClass<"Mips", [i32], 32, (add
+ // Return Values and Arguments
+ V0, V1, A0, A1, A2, A3,
+ // Callee save
+ S0, S1,
+ SP)>;
+
def CPURAReg : RegisterClass<"Mips", [i32], 32, (add RA)>, Unallocatable;
def CPUSPReg : RegisterClass<"Mips", [i32], 32, (add SP)>, Unallocatable;
@@ -343,6 +309,9 @@ def CPUSPReg : RegisterClass<"Mips", [i32], 32, (add SP)>, Unallocatable;
// * FGR32 - 32 32-bit registers (single float only mode)
def FGR32 : RegisterClass<"Mips", [f32], 32, (sequence "F%u", 0, 31)>;
+def FGRH32 : RegisterClass<"Mips", [f32], 32, (sequence "F_HI%u", 0, 31)>,
+ Unallocatable;
+
def AFGR64 : RegisterClass<"Mips", [f64], 64, (add
// Return Values and Arguments
D0, D1,
@@ -357,78 +326,224 @@ def AFGR64 : RegisterClass<"Mips", [f64], 64, (add
def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)>;
-// Condition Register for floating point operations
-def CCR : RegisterClass<"Mips", [i32], 32, (add FCR31,FCC0)>, Unallocatable;
+// FP control registers.
+def CCR : RegisterClass<"Mips", [i32], 32, (sequence "FCR%u", 0, 31)>,
+ Unallocatable;
+
+// FP condition code registers.
+def FCC : RegisterClass<"Mips", [i32], 32, (sequence "FCC%u", 0, 7)>,
+ Unallocatable;
+
+def MSA128B: RegisterClass<"Mips", [v16i8], 128,
+ (sequence "W%u", 0, 31)>;
+def MSA128H: RegisterClass<"Mips", [v8i16, v8f16], 128,
+ (sequence "W%u", 0, 31)>;
+def MSA128W: RegisterClass<"Mips", [v4i32, v4f32], 128,
+ (sequence "W%u", 0, 31)>;
+def MSA128D: RegisterClass<"Mips", [v2i64, v2f64], 128,
+ (sequence "W%u", 0, 31)>;
+
+def MSACtrl: RegisterClass<"Mips", [i32], 32, (add
+ MSAIR, MSACSR, MSAAccess, MSASave, MSAModify, MSARequest, MSAMap, MSAUnmap)>;
// Hi/Lo Registers
-def LORegs : RegisterClass<"Mips", [i32], 32, (add LO)>;
-def HIRegs : RegisterClass<"Mips", [i32], 32, (add HI)>;
-def LORegsDSP : RegisterClass<"Mips", [i32], 32, (add LO, LO1, LO2, LO3)>;
-def HIRegsDSP : RegisterClass<"Mips", [i32], 32, (add HI, HI1, HI2, HI3)>;
-def LORegs64 : RegisterClass<"Mips", [i64], 64, (add LO64)>;
-def HIRegs64 : RegisterClass<"Mips", [i64], 64, (add HI64)>;
+def LO32 : RegisterClass<"Mips", [i32], 32, (add LO0)>;
+def HI32 : RegisterClass<"Mips", [i32], 32, (add HI0)>;
+def LO32DSP : RegisterClass<"Mips", [i32], 32, (sequence "LO%u", 0, 3)>;
+def HI32DSP : RegisterClass<"Mips", [i32], 32, (sequence "HI%u", 0, 3)>;
+def LO64 : RegisterClass<"Mips", [i64], 64, (add LO0_64)>;
+def HI64 : RegisterClass<"Mips", [i64], 64, (add HI0_64)>;
// Hardware registers
def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>, Unallocatable;
-def HWRegs64 : RegisterClass<"Mips", [i64], 64, (add HWR29_64)>, Unallocatable;
// Accumulator Registers
-def ACRegs : RegisterClass<"Mips", [untyped], 64, (add AC0)> {
+def ACC64 : RegisterClass<"Mips", [untyped], 64, (add AC0)> {
let Size = 64;
}
-def ACRegs128 : RegisterClass<"Mips", [untyped], 128, (add AC0_64)> {
+def ACC128 : RegisterClass<"Mips", [untyped], 128, (add AC0_64)> {
let Size = 128;
}
-def ACRegsDSP : RegisterClass<"Mips", [untyped], 64, (sequence "AC%u", 0, 3)> {
+def ACC64DSP : RegisterClass<"Mips", [untyped], 64, (sequence "AC%u", 0, 3)> {
let Size = 64;
}
def DSPCC : RegisterClass<"Mips", [v4i8, v2i16], 32, (add DSPCCond)>;
+// Coprocessor 2 registers.
+def COP2 : RegisterClass<"Mips", [i32], 32, (sequence "COP2%u", 0, 31)>,
+ Unallocatable;
+
// Register Operands.
-def CPURegsAsmOperand : AsmOperandClass {
- let Name = "CPURegsAsm";
- let ParserMethod = "parseCPURegs";
+
+class MipsAsmRegOperand : AsmOperandClass {
+ let RenderMethod = "addRegAsmOperands";
+}
+def GPR32AsmOperand : MipsAsmRegOperand {
+ let Name = "GPR32Asm";
+ let ParserMethod = "parseGPR32";
+}
+
+def GPR64AsmOperand : MipsAsmRegOperand {
+ let Name = "GPR64Asm";
+ let ParserMethod = "parseGPR64";
+}
+
+def ACC64DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "ACC64DSPAsm";
+ let ParserMethod = "parseACC64DSP";
+}
+
+def LO32DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "LO32DSPAsm";
+ let ParserMethod = "parseLO32DSP";
}
-def CPU64RegsAsmOperand : AsmOperandClass {
- let Name = "CPU64RegsAsm";
- let ParserMethod = "parseCPU64Regs";
+def HI32DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "HI32DSPAsm";
+ let ParserMethod = "parseHI32DSP";
}
-def CCRAsmOperand : AsmOperandClass {
+def CCRAsmOperand : MipsAsmRegOperand {
let Name = "CCRAsm";
let ParserMethod = "parseCCRRegs";
}
-def CPURegsOpnd : RegisterOperand<CPURegs, "printCPURegs"> {
- let ParserMatchClass = CPURegsAsmOperand;
+def AFGR64AsmOperand : MipsAsmRegOperand {
+ let Name = "AFGR64Asm";
+ let ParserMethod = "parseAFGR64Regs";
+}
+
+def FGR64AsmOperand : MipsAsmRegOperand {
+ let Name = "FGR64Asm";
+ let ParserMethod = "parseFGR64Regs";
}
-def CPU64RegsOpnd : RegisterOperand<CPU64Regs, "printCPURegs"> {
- let ParserMatchClass = CPU64RegsAsmOperand;
+def FGR32AsmOperand : MipsAsmRegOperand {
+ let Name = "FGR32Asm";
+ let ParserMethod = "parseFGR32Regs";
}
-def CCROpnd : RegisterOperand<CCR, "printCPURegs"> {
+def FGRH32AsmOperand : MipsAsmRegOperand {
+ let Name = "FGRH32Asm";
+ let ParserMethod = "parseFGRH32Regs";
+}
+
+def FCCRegsAsmOperand : MipsAsmRegOperand {
+ let Name = "FCCRegsAsm";
+ let ParserMethod = "parseFCCRegs";
+}
+
+def MSA128BAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128BAsm";
+ let ParserMethod = "parseMSA128BRegs";
+}
+
+def MSA128HAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128HAsm";
+ let ParserMethod = "parseMSA128HRegs";
+}
+
+def MSA128WAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128WAsm";
+ let ParserMethod = "parseMSA128WRegs";
+}
+
+def MSA128DAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128DAsm";
+ let ParserMethod = "parseMSA128DRegs";
+}
+
+def MSA128CRAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128CRAsm";
+ let ParserMethod = "parseMSA128CtrlRegs";
+}
+
+def GPR32Opnd : RegisterOperand<GPR32> {
+ let ParserMatchClass = GPR32AsmOperand;
+}
+
+def GPR64Opnd : RegisterOperand<GPR64> {
+ let ParserMatchClass = GPR64AsmOperand;
+}
+
+def DSPROpnd : RegisterOperand<DSPR> {
+ let ParserMatchClass = GPR32AsmOperand;
+}
+
+def CCROpnd : RegisterOperand<CCR> {
let ParserMatchClass = CCRAsmOperand;
}
-def HWRegsAsmOperand : AsmOperandClass {
+def HWRegsAsmOperand : MipsAsmRegOperand {
let Name = "HWRegsAsm";
let ParserMethod = "parseHWRegs";
}
-def HW64RegsAsmOperand : AsmOperandClass {
- let Name = "HW64RegsAsm";
- let ParserMethod = "parseHW64Regs";
+def COP2AsmOperand : MipsAsmRegOperand {
+ let Name = "COP2Asm";
+ let ParserMethod = "parseCOP2";
}
-def HWRegsOpnd : RegisterOperand<HWRegs, "printCPURegs"> {
+def HWRegsOpnd : RegisterOperand<HWRegs> {
let ParserMatchClass = HWRegsAsmOperand;
}
-def HW64RegsOpnd : RegisterOperand<HWRegs64, "printCPURegs"> {
- let ParserMatchClass = HW64RegsAsmOperand;
+def AFGR64Opnd : RegisterOperand<AFGR64> {
+ let ParserMatchClass = AFGR64AsmOperand;
+}
+
+def FGR64Opnd : RegisterOperand<FGR64> {
+ let ParserMatchClass = FGR64AsmOperand;
+}
+
+def FGR32Opnd : RegisterOperand<FGR32> {
+ let ParserMatchClass = FGR32AsmOperand;
+}
+
+def FGRH32Opnd : RegisterOperand<FGRH32> {
+ let ParserMatchClass = FGRH32AsmOperand;
+}
+
+def FCCRegsOpnd : RegisterOperand<FCC> {
+ let ParserMatchClass = FCCRegsAsmOperand;
}
+
+def LO32DSPOpnd : RegisterOperand<LO32DSP> {
+ let ParserMatchClass = LO32DSPAsmOperand;
+}
+
+def HI32DSPOpnd : RegisterOperand<HI32DSP> {
+ let ParserMatchClass = HI32DSPAsmOperand;
+}
+
+def ACC64DSPOpnd : RegisterOperand<ACC64DSP> {
+ let ParserMatchClass = ACC64DSPAsmOperand;
+}
+
+def COP2Opnd : RegisterOperand<COP2> {
+ let ParserMatchClass = COP2AsmOperand;
+}
+
+def MSA128BOpnd : RegisterOperand<MSA128B> {
+ let ParserMatchClass = MSA128BAsmOperand;
+}
+
+def MSA128HOpnd : RegisterOperand<MSA128H> {
+ let ParserMatchClass = MSA128HAsmOperand;
+}
+
+def MSA128WOpnd : RegisterOperand<MSA128W> {
+ let ParserMatchClass = MSA128WAsmOperand;
+}
+
+def MSA128DOpnd : RegisterOperand<MSA128D> {
+ let ParserMatchClass = MSA128DAsmOperand;
+}
+
+def MSA128CROpnd : RegisterOperand<MSACtrl> {
+ let ParserMatchClass = MSA128CRAsmOperand;
+}
+
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index b295e911bdc9..33ed4b3e3a67 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -32,6 +32,21 @@ using namespace llvm;
namespace {
typedef MachineBasicBlock::iterator Iter;
+static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) {
+ if (Mips::ACC64RegClass.contains(Src))
+ return std::make_pair((unsigned)Mips::PseudoMFHI,
+ (unsigned)Mips::PseudoMFLO);
+
+ if (Mips::ACC64DSPRegClass.contains(Src))
+ return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP);
+
+ if (Mips::ACC128RegClass.contains(Src))
+ return std::make_pair((unsigned)Mips::PseudoMFHI64,
+ (unsigned)Mips::PseudoMFLO64);
+
+ return std::make_pair(0, 0);
+}
+
/// Helper class to expand pseudos.
class ExpandPseudo {
public:
@@ -43,22 +58,19 @@ private:
void expandLoadCCond(MachineBasicBlock &MBB, Iter I);
void expandStoreCCond(MachineBasicBlock &MBB, Iter I);
void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
- void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
+ void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
+ unsigned MFLoOpc, unsigned RegSize);
bool expandCopy(MachineBasicBlock &MBB, Iter I);
- bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned Dst,
- unsigned Src, unsigned RegSize);
+ bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
+ unsigned MFLoOpc);
MachineFunction &MF;
- const MipsSEInstrInfo &TII;
- const MipsRegisterInfo &RegInfo;
MachineRegisterInfo &MRI;
};
}
ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
- : MF(MF_),
- TII(*static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo())),
- RegInfo(TII.getRegisterInfo()), MRI(MF.getRegInfo()) {}
+ : MF(MF_), MRI(MF.getRegInfo()) {}
bool ExpandPseudo::expand() {
bool Expanded = false;
@@ -74,32 +86,26 @@ bool ExpandPseudo::expand() {
bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
switch(I->getOpcode()) {
case Mips::LOAD_CCOND_DSP:
- case Mips::LOAD_CCOND_DSP_P8:
expandLoadCCond(MBB, I);
break;
case Mips::STORE_CCOND_DSP:
- case Mips::STORE_CCOND_DSP_P8:
expandStoreCCond(MBB, I);
break;
- case Mips::LOAD_AC64:
- case Mips::LOAD_AC64_P8:
- case Mips::LOAD_AC_DSP:
- case Mips::LOAD_AC_DSP_P8:
+ case Mips::LOAD_ACC64:
+ case Mips::LOAD_ACC64DSP:
expandLoadACC(MBB, I, 4);
break;
- case Mips::LOAD_AC128:
- case Mips::LOAD_AC128_P8:
+ case Mips::LOAD_ACC128:
expandLoadACC(MBB, I, 8);
break;
- case Mips::STORE_AC64:
- case Mips::STORE_AC64_P8:
- case Mips::STORE_AC_DSP:
- case Mips::STORE_AC_DSP_P8:
- expandStoreACC(MBB, I, 4);
+ case Mips::STORE_ACC64:
+ expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4);
+ break;
+ case Mips::STORE_ACC64DSP:
+ expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4);
break;
- case Mips::STORE_AC128:
- case Mips::STORE_AC128_P8:
- expandStoreACC(MBB, I, 8);
+ case Mips::STORE_ACC128:
+ expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
break;
case TargetOpcode::COPY:
if (!expandCopy(MBB, I))
@@ -119,6 +125,11 @@ void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
unsigned VR = MRI.createVirtualRegister(RC);
unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
@@ -134,6 +145,11 @@ void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
unsigned VR = MRI.createVirtualRegister(RC);
unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
@@ -152,6 +168,11 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
@@ -168,62 +189,69 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
}
void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
+ unsigned MFHiOpc, unsigned MFLoOpc,
unsigned RegSize) {
- // copy $vr0, lo
+ // mflo $vr0, src
// store $vr0, FI
- // copy $vr1, hi
+ // mfhi $vr1, src
// store $vr1, FI + 4
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
unsigned SrcKill = getKillRegState(I->getOperand(0).isKill());
- unsigned Lo = RegInfo.getSubReg(Src, Mips::sub_lo);
- unsigned Hi = RegInfo.getSubReg(Src, Mips::sub_hi);
DebugLoc DL = I->getDebugLoc();
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR0).addReg(Lo, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR1).addReg(Hi, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
}
bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
- unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
+ unsigned Src = I->getOperand(1).getReg();
+ std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
- if (Mips::ACRegsDSPRegClass.contains(Dst, Src))
- return expandCopyACC(MBB, I, Dst, Src, 4);
-
- if (Mips::ACRegs128RegClass.contains(Dst, Src))
- return expandCopyACC(MBB, I, Dst, Src, 8);
+ if (!Opcodes.first)
+ return false;
- return false;
+ return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second);
}
-bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned Dst,
- unsigned Src, unsigned RegSize) {
- // copy $vr0, src_lo
+bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
+ unsigned MFHiOpc, unsigned MFLoOpc) {
+ // mflo $vr0, src
// copy dst_lo, $vr0
- // copy $vr1, src_hi
+ // mfhi $vr1, src
// copy dst_hi, $vr1
- const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
+ unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
+ unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2;
+ const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
unsigned SrcKill = getKillRegState(I->getOperand(1).isKill());
unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
- unsigned SrcLo = RegInfo.getSubReg(Src, Mips::sub_lo);
- unsigned SrcHi = RegInfo.getSubReg(Src, Mips::sub_hi);
DebugLoc DL = I->getDebugLoc();
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR0).addReg(SrcLo, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo)
.addReg(VR0, RegState::Kill);
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR1).addReg(SrcHi, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi)
.addReg(VR1, RegState::Kill);
return true;
@@ -244,10 +272,12 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- const MipsRegisterInfo *RegInfo =
- static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const MipsSEInstrInfo &TII =
*static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
MachineBasicBlock::iterator MBBI = MBB.begin();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
@@ -262,7 +292,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
if (StackSize == 0 && !MFI->adjustsStack()) return;
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
MachineLocation DstML, SrcML;
// Adjust stack.
@@ -272,9 +302,8 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
- DstML = MachineLocation(MachineLocation::VirtualFP);
- SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
- Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(AdjustSPLabel, -StackSize));
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -298,35 +327,36 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
// If Reg is a double precision register, emit two cfa_offsets,
// one for each of the paired single precision registers.
if (Mips::AFGR64RegClass.contains(Reg)) {
- MachineLocation DstML0(MachineLocation::VirtualFP, Offset);
- MachineLocation DstML1(MachineLocation::VirtualFP, Offset + 4);
- MachineLocation SrcML0(RegInfo->getSubReg(Reg, Mips::sub_fpeven));
- MachineLocation SrcML1(RegInfo->getSubReg(Reg, Mips::sub_fpodd));
+ unsigned Reg0 =
+ MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true);
+ unsigned Reg1 =
+ MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true);
if (!STI.isLittle())
- std::swap(SrcML0, SrcML1);
+ std::swap(Reg0, Reg1);
- Moves.push_back(MachineMove(CSLabel, DstML0, SrcML0));
- Moves.push_back(MachineMove(CSLabel, DstML1, SrcML1));
+ MMI.addFrameInst(
+ MCCFIInstruction::createOffset(CSLabel, Reg0, Offset));
+ MMI.addFrameInst(
+ MCCFIInstruction::createOffset(CSLabel, Reg1, Offset + 4));
} else {
- // Reg is either in CPURegs or FGR32.
- DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
- SrcML = MachineLocation(Reg);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ // Reg is either in GPR32 or FGR32.
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ CSLabel, MRI->getDwarfRegNum(Reg, 1), Offset));
}
}
}
if (MipsFI->callsEhReturn()) {
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
// Insert instructions that spill eh data registers.
for (int I = 0; I < 4; ++I) {
if (!MBB.isLiveIn(ehDataReg(I)))
MBB.addLiveIn(ehDataReg(I));
TII.storeRegToStackSlot(MBB, MBBI, ehDataReg(I), false,
- MipsFI->getEhDataRegFI(I), RC, RegInfo);
+ MipsFI->getEhDataRegFI(I), RC, &RegInfo);
}
// Emit .cfi_offset directives for eh data registers.
@@ -335,9 +365,8 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel2);
for (int I = 0; I < 4; ++I) {
int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I));
- DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
- SrcML = MachineLocation(ehDataReg(I));
- Moves.push_back(MachineMove(CSLabel2, DstML, SrcML));
+ unsigned Reg = MRI->getDwarfRegNum(ehDataReg(I), true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel2, Reg, Offset));
}
}
@@ -350,9 +379,8 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
- DstML = MachineLocation(FP);
- SrcML = MachineLocation(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(SetFPLabel, DstML, SrcML));
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
+ SetFPLabel, MRI->getDwarfRegNum(FP, true)));
}
}
@@ -361,10 +389,12 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
MachineFrameInfo *MFI = MF.getFrameInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- const MipsRegisterInfo *RegInfo =
- static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const MipsSEInstrInfo &TII =
*static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
DebugLoc dl = MBBI->getDebugLoc();
unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
@@ -385,7 +415,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
if (MipsFI->callsEhReturn()) {
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
// Find first instruction that restores a callee-saved register.
MachineBasicBlock::iterator I = MBBI;
@@ -395,7 +425,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
// Insert instructions that restore eh data registers.
for (int J = 0; J < 4; ++J) {
TII.loadRegFromStackSlot(MBB, I, ehDataReg(J), MipsFI->getEhDataRegFI(J),
- RC, RegInfo);
+ RC, &RegInfo);
}
}
@@ -493,7 +523,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// The spill slot should be half the size of the accumulator. If target is
// mips64, it should be 64-bit, otherwise it should be 32-bt.
const TargetRegisterClass *RC = STI.hasMips64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
RS->addScavengingFrameIndex(FI);
@@ -507,7 +537,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
return;
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
RS->addScavengingFrameIndex(FI);
diff --git a/lib/Target/Mips/MipsSEFrameLowering.h b/lib/Target/Mips/MipsSEFrameLowering.h
index 193a66cc65a7..8fa9e469887d 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.h
+++ b/lib/Target/Mips/MipsSEFrameLowering.h
@@ -21,7 +21,7 @@ namespace llvm {
class MipsSEFrameLowering : public MipsFrameLowering {
public:
explicit MipsSEFrameLowering(const MipsSubtarget &STI)
- : MipsFrameLowering(STI, STI.hasMips64() ? 16 : 8) {}
+ : MipsFrameLowering(STI, STI.stackAlignment()) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 8a6523a5d424..737660ec876c 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -66,6 +66,21 @@ void MipsSEDAGToDAGISel::addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
MIB.addReg(Mips::DSPEFI, Flag);
}
+unsigned MipsSEDAGToDAGISel::getMSACtrlReg(const SDValue RegIdx) const {
+ switch (cast<ConstantSDNode>(RegIdx)->getZExtValue()) {
+ default:
+ llvm_unreachable("Could not map int to register");
+ case 0: return Mips::MSAIR;
+ case 1: return Mips::MSACSR;
+ case 2: return Mips::MSAAccess;
+ case 3: return Mips::MSASave;
+ case 4: return Mips::MSAModify;
+ case 5: return Mips::MSARequest;
+ case 6: return Mips::MSAMap;
+ case 7: return Mips::MSAUnmap;
+ }
+}
+
bool MipsSEDAGToDAGISel::replaceUsesWithZeroReg(MachineRegisterInfo *MRI,
const MachineInstr& MI) {
unsigned DstReg = 0, ZeroReg = 0;
@@ -119,9 +134,9 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
const TargetRegisterClass *RC;
if (Subtarget.isABI_N64())
- RC = (const TargetRegisterClass*)&Mips::CPU64RegsRegClass;
+ RC = (const TargetRegisterClass*)&Mips::GPR64RegClass;
else
- RC = (const TargetRegisterClass*)&Mips::CPURegsRegClass;
+ RC = (const TargetRegisterClass*)&Mips::GPR32RegClass;
V0 = RegInfo.createVirtualRegister(RC);
V1 = RegInfo.createVirtualRegister(RC);
@@ -214,7 +229,7 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
}
SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
- SDValue CmpLHS, DebugLoc DL,
+ SDValue CmpLHS, SDLoc DL,
SDNode *Node) const {
unsigned Opc = InFlag.getOpcode(); (void)Opc;
@@ -301,6 +316,20 @@ bool MipsSEDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
return false;
}
+/// ComplexPattern used on MipsInstrInfo
+/// Used on Mips Load/Store instructions
+bool MipsSEDAGToDAGISel::selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ // Operand is a result from an ADD.
+ if (Addr.getOpcode() == ISD::ADD) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1);
+ return true;
+ }
+
+ return false;
+}
+
bool MipsSEDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
Base = Addr;
@@ -314,9 +343,266 @@ bool MipsSEDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base,
selectAddrDefault(Addr, Base, Offset);
}
+/// Used on microMIPS Load/Store unaligned instructions (12-bit offset)
+bool MipsSEDAGToDAGISel::selectAddrRegImm12(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ EVT ValTy = Addr.getValueType();
+
+ // Addresses of the form FI+const or FI|const
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
+ if (isInt<12>(CN->getSExtValue())) {
+
+ // If the first operand is a FI then get the TargetFI Node
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
+ (Addr.getOperand(0)))
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ else
+ Base = Addr.getOperand(0);
+
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool MipsSEDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ return selectAddrRegImm12(Addr, Base, Offset) ||
+ selectAddrDefault(Addr, Base, Offset);
+}
+
+// Select constant vector splats.
+//
+// Returns true and sets Imm if:
+// * MSA is enabled
+// * N is a ISD::BUILD_VECTOR representing a constant splat
+bool MipsSEDAGToDAGISel::selectVSplat(SDNode *N, APInt &Imm) const {
+ if (!Subtarget.hasMSA())
+ return false;
+
+ BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N);
+
+ if (Node == NULL)
+ return false;
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ if (!Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs, 8,
+ !Subtarget.isLittle()))
+ return false;
+
+ Imm = SplatValue;
+
+ return true;
+}
+
+// Select constant vector splats.
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value fits in an integer with the specified signed-ness and
+// width.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+//
+// It's worth noting that this function is not used as part of the selection
+// of ldi.[bhwd] since it does not permit using the wrong-typed ldi.[bhwd]
+// instruction to achieve the desired bit pattern. ldi.[bhwd] is selected in
+// MipsSEDAGToDAGISel::selectNode.
+bool MipsSEDAGToDAGISel::
+selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
+ unsigned ImmBitSize) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat (N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ if (( Signed && ImmValue.isSignedIntN(ImmBitSize)) ||
+ (!Signed && ImmValue.isIntN(ImmBitSize))) {
+ Imm = CurDAG->getTargetConstant(ImmValue, EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm1(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 1);
+}
+
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm2(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 2);
+}
+
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm3(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 3);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm4(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 4);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm5(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 5);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm6(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 6);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm8(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 8);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatSimm5(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, true, 5);
+}
+
+// Select constant vector splats whose value is a power of 2.
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value is a power of two.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+bool MipsSEDAGToDAGISel::selectVSplatUimmPow2(SDValue N, SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat (N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ int32_t Log2 = ImmValue.exactLogBase2();
+
+ if (Log2 != -1) {
+ Imm = CurDAG->getTargetConstant(Log2, EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Select constant vector splats whose value only has a consecutive sequence
+// of left-most bits set (e.g. 0b11...1100...00).
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value is a consecutive sequence of left-most bits.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+bool MipsSEDAGToDAGISel::selectVSplatMaskL(SDValue N, SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat(N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ // Extract the run of set bits starting with bit zero from the bitwise
+ // inverse of ImmValue, and test that the inverse of this is the same
+ // as the original value.
+ if (ImmValue == ~(~ImmValue & ~(~ImmValue + 1))) {
+
+ Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Select constant vector splats whose value only has a consecutive sequence
+// of right-most bits set (e.g. 0b00...0011...11).
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value is a consecutive sequence of right-most bits.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+bool MipsSEDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat(N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ // Extract the run of set bits starting with bit zero, and test that the
+ // result is the same as the original value
+ if (ImmValue == (ImmValue & ~(ImmValue + 1))) {
+ Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool MipsSEDAGToDAGISel::selectVSplatUimmInvPow2(SDValue N,
+ SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat(N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ int32_t Log2 = (~ImmValue).exactLogBase2();
+
+ if (Log2 != -1) {
+ Imm = CurDAG->getTargetConstant(Log2, EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
///
// Instruction Selection not handled by the auto-generated
@@ -348,6 +634,11 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO_64, MVT::i64);
Result = CurDAG->getMachineNode(Mips::DMTC1, DL, MVT::f64, Zero);
+ } else if (Subtarget.isFP64bit()) {
+ SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
+ Mips::ZERO, MVT::i32);
+ Result = CurDAG->getMachineNode(Mips::BuildPairF64_64, DL, MVT::f64,
+ Zero, Zero);
} else {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO, MVT::i32);
@@ -374,7 +665,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
AnalyzeImm.Analyze(Imm, Size, false);
MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
- DebugLoc DL = CN->getDebugLoc();
+ SDLoc DL(CN);
SDNode *RegOpnd;
SDValue ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd),
MVT::i64);
@@ -401,24 +692,71 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
return std::make_pair(true, RegOpnd);
}
+ case ISD::INTRINSIC_W_CHAIN: {
+ switch (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
+ default:
+ break;
+
+ case Intrinsic::mips_cfcmsa: {
+ SDValue ChainIn = Node->getOperand(0);
+ SDValue RegIdx = Node->getOperand(2);
+ SDValue Reg = CurDAG->getCopyFromReg(ChainIn, DL,
+ getMSACtrlReg(RegIdx), MVT::i32);
+ return std::make_pair(true, Reg.getNode());
+ }
+ }
+ break;
+ }
+
+ case ISD::INTRINSIC_WO_CHAIN: {
+ switch (cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue()) {
+ default:
+ break;
+
+ case Intrinsic::mips_move_v:
+ // Like an assignment but will always produce a move.v even if
+ // unnecessary.
+ return std::make_pair(true,
+ CurDAG->getMachineNode(Mips::MOVE_V, DL,
+ Node->getValueType(0),
+ Node->getOperand(1)));
+ }
+ break;
+ }
+
+ case ISD::INTRINSIC_VOID: {
+ switch (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
+ default:
+ break;
+
+ case Intrinsic::mips_ctcmsa: {
+ SDValue ChainIn = Node->getOperand(0);
+ SDValue RegIdx = Node->getOperand(2);
+ SDValue Value = Node->getOperand(3);
+ SDValue ChainOut = CurDAG->getCopyToReg(ChainIn, DL,
+ getMSACtrlReg(RegIdx), Value);
+ return std::make_pair(true, ChainOut.getNode());
+ }
+ }
+ break;
+ }
+
case MipsISD::ThreadPointer: {
- EVT PtrVT = TLI.getPointerTy();
- unsigned RdhwrOpc, SrcReg, DestReg;
+ EVT PtrVT = getTargetLowering()->getPointerTy();
+ unsigned RdhwrOpc, DestReg;
if (PtrVT == MVT::i32) {
RdhwrOpc = Mips::RDHWR;
- SrcReg = Mips::HWR29;
DestReg = Mips::V1;
} else {
RdhwrOpc = Mips::RDHWR64;
- SrcReg = Mips::HWR29_64;
DestReg = Mips::V1_64;
}
SDNode *Rdhwr =
- CurDAG->getMachineNode(RdhwrOpc, Node->getDebugLoc(),
+ CurDAG->getMachineNode(RdhwrOpc, SDLoc(Node),
Node->getValueType(0),
- CurDAG->getRegister(SrcReg, PtrVT));
+ CurDAG->getRegister(Mips::HWR29, MVT::i32));
SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, DestReg,
SDValue(Rdhwr, 0));
SDValue ResNode = CurDAG->getCopyFromReg(Chain, DL, DestReg, PtrVT);
@@ -426,18 +764,81 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
return std::make_pair(true, ResNode.getNode());
}
- case MipsISD::InsertLOHI: {
- unsigned RCID = Subtarget.hasDSP() ? Mips::ACRegsDSPRegClassID :
- Mips::ACRegsRegClassID;
- SDValue RegClass = CurDAG->getTargetConstant(RCID, MVT::i32);
- SDValue LoIdx = CurDAG->getTargetConstant(Mips::sub_lo, MVT::i32);
- SDValue HiIdx = CurDAG->getTargetConstant(Mips::sub_hi, MVT::i32);
- const SDValue Ops[] = { RegClass, Node->getOperand(0), LoIdx,
- Node->getOperand(1), HiIdx };
- SDNode *Res = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
- MVT::Untyped, Ops);
+ case ISD::BUILD_VECTOR: {
+ // Select appropriate ldi.[bhwd] instructions for constant splats of
+ // 128-bit when MSA is enabled. Fixup any register class mismatches that
+ // occur as a result.
+ //
+ // This allows the compiler to use a wider range of immediates than would
+ // otherwise be allowed. If, for example, v4i32 could only use ldi.h then
+ // it would not be possible to load { 0x01010101, 0x01010101, 0x01010101,
+ // 0x01010101 } without using a constant pool. This would be sub-optimal
+ // when // 'ldi.b wd, 1' is capable of producing that bit-pattern in the
+ // same set/ of registers. Similarly, ldi.h isn't capable of producing {
+ // 0x00000000, 0x00000001, 0x00000000, 0x00000001 } but 'ldi.d wd, 1' can.
+
+ BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Node);
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ unsigned LdiOp;
+ EVT ResVecTy = BVN->getValueType(0);
+ EVT ViaVecTy;
+
+ if (!Subtarget.hasMSA() || !BVN->getValueType(0).is128BitVector())
+ return std::make_pair(false, (SDNode*)NULL);
+
+ if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs, 8,
+ !Subtarget.isLittle()))
+ return std::make_pair(false, (SDNode*)NULL);
+
+ switch (SplatBitSize) {
+ default:
+ return std::make_pair(false, (SDNode*)NULL);
+ case 8:
+ LdiOp = Mips::LDI_B;
+ ViaVecTy = MVT::v16i8;
+ break;
+ case 16:
+ LdiOp = Mips::LDI_H;
+ ViaVecTy = MVT::v8i16;
+ break;
+ case 32:
+ LdiOp = Mips::LDI_W;
+ ViaVecTy = MVT::v4i32;
+ break;
+ case 64:
+ LdiOp = Mips::LDI_D;
+ ViaVecTy = MVT::v2i64;
+ break;
+ }
+
+ if (!SplatValue.isSignedIntN(10))
+ return std::make_pair(false, (SDNode*)NULL);
+
+ SDValue Imm = CurDAG->getTargetConstant(SplatValue,
+ ViaVecTy.getVectorElementType());
+
+ SDNode *Res = CurDAG->getMachineNode(LdiOp, SDLoc(Node), ViaVecTy, Imm);
+
+ if (ResVecTy != ViaVecTy) {
+ // If LdiOp is writing to a different register class to ResVecTy, then
+ // fix it up here. This COPY_TO_REGCLASS should never cause a move.v
+ // since the source and destination register sets contain the same
+ // registers.
+ const TargetLowering *TLI = getTargetLowering();
+ MVT ResVecTySimple = ResVecTy.getSimpleVT();
+ const TargetRegisterClass *RC = TLI->getRegClassFor(ResVecTySimple);
+ Res = CurDAG->getMachineNode(Mips::COPY_TO_REGCLASS, SDLoc(Node),
+ ResVecTy, SDValue(Res, 0),
+ CurDAG->getTargetConstant(RC->getID(),
+ MVT::i32));
+ }
+
return std::make_pair(true, Res);
}
+
}
return std::make_pair(false, (SDNode*)NULL);
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h
index a235e96b96a1..dc52064c9830 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h
@@ -30,23 +30,67 @@ private:
void addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
MachineFunction &MF);
+ unsigned getMSACtrlReg(const SDValue RegIdx) const;
+
bool replaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&);
- std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, DebugLoc dl,
+ std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc dl,
EVT Ty, bool HasLo, bool HasHi);
SDNode *selectAddESubE(unsigned MOp, SDValue InFlag, SDValue CmpLHS,
- DebugLoc DL, SDNode *Node) const;
+ SDLoc DL, SDNode *Node) const;
virtual bool selectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
virtual bool selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
virtual bool selectIntAddr(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectAddrRegImm12(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
+ virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
+ /// \brief Select constant vector splats.
+ virtual bool selectVSplat(SDNode *N, APInt &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a given integer.
+ virtual bool selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
+ unsigned ImmBitSize) const;
+ /// \brief Select constant vector splats whose value fits in a uimm1.
+ virtual bool selectVSplatUimm1(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm2.
+ virtual bool selectVSplatUimm2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm3.
+ virtual bool selectVSplatUimm3(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm4.
+ virtual bool selectVSplatUimm4(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm5.
+ virtual bool selectVSplatUimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm6.
+ virtual bool selectVSplatUimm6(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm8.
+ virtual bool selectVSplatUimm8(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a simm5.
+ virtual bool selectVSplatSimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a power of 2.
+ virtual bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is the inverse of a
+ /// power of 2.
+ virtual bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// ending at the most significant bit
+ virtual bool selectVSplatMaskL(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// starting at bit zero.
+ virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
+
virtual std::pair<bool, SDNode*> selectNode(SDNode *Node);
virtual void processFunctionAfterISel(MachineFunction &MF);
diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp
index 8544bb891073..809adc03b151 100644
--- a/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -10,6 +10,7 @@
// Subclass of MipsTargetLowering specialized for mips32/64.
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "mips-isel"
#include "MipsSEISelLowering.h"
#include "MipsRegisterInfo.h"
#include "MipsTargetMachine.h"
@@ -17,6 +18,8 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
@@ -25,22 +28,40 @@ static cl::opt<bool>
EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
cl::desc("MIPS: Enable tail calls."), cl::init(false));
+static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false),
+ cl::desc("Expand double precision loads and "
+ "stores to their single precision "
+ "counterparts"));
+
MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
: MipsTargetLowering(TM) {
// Set up the register classes
+ addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
- clearRegisterClasses();
+ if (HasMips64)
+ addRegisterClass(MVT::i64, &Mips::GPR64RegClass);
- addRegisterClass(MVT::i32, &Mips::CPURegsRegClass);
+ if (Subtarget->hasDSP() || Subtarget->hasMSA()) {
+ // Expand all truncating stores and extending loads.
+ unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
- if (HasMips64)
- addRegisterClass(MVT::i64, &Mips::CPU64RegsRegClass);
+ for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) {
+ for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1)
+ setTruncStoreAction((MVT::SimpleValueType)VT0,
+ (MVT::SimpleValueType)VT1, Expand);
+
+ setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
+ setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand);
+ }
+ }
if (Subtarget->hasDSP()) {
MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
- addRegisterClass(VecTys[i], &Mips::DSPRegsRegClass);
+ addRegisterClass(VecTys[i], &Mips::DSPRRegClass);
// Expand all builtin opcodes.
for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
@@ -63,12 +84,28 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
if (Subtarget->hasDSPR2())
setOperationAction(ISD::MUL, MVT::v2i16, Legal);
- if (!TM.Options.UseSoftFloat) {
+ if (Subtarget->hasMSA()) {
+ addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass);
+ addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass);
+ addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass);
+ addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass);
+ addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass);
+ addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass);
+ addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass);
+
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::OR);
+ setTargetDAGCombine(ISD::SRA);
+ setTargetDAGCombine(ISD::VSELECT);
+ setTargetDAGCombine(ISD::XOR);
+ }
+
+ if (!Subtarget->mipsSEUsesSoftFloat()) {
addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
// When dealing with single precision only, use libcalls
if (!Subtarget->isSingleFloat()) {
- if (HasMips64)
+ if (Subtarget->isFP64bit())
addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
else
addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
@@ -99,6 +136,16 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
setTargetDAGCombine(ISD::ADDE);
setTargetDAGCombine(ISD::SUBE);
+ setTargetDAGCombine(ISD::MUL);
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+
+ if (NoDPLoadStore) {
+ setOperationAction(ISD::LOAD, MVT::f64, Custom);
+ setOperationAction(ISD::STORE, MVT::f64, Custom);
+ }
computeRegisterProperties();
}
@@ -108,6 +155,93 @@ llvm::createMipsSETargetLowering(MipsTargetMachine &TM) {
return new MipsSETargetLowering(TM);
}
+// Enable MSA support for the given integer type and Register class.
+void MipsSETargetLowering::
+addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
+ addRegisterClass(Ty, RC);
+
+ // Expand all builtin opcodes.
+ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
+ setOperationAction(Opc, Ty, Expand);
+
+ setOperationAction(ISD::BITCAST, Ty, Legal);
+ setOperationAction(ISD::LOAD, Ty, Legal);
+ setOperationAction(ISD::STORE, Ty, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
+
+ setOperationAction(ISD::ADD, Ty, Legal);
+ setOperationAction(ISD::AND, Ty, Legal);
+ setOperationAction(ISD::CTLZ, Ty, Legal);
+ setOperationAction(ISD::CTPOP, Ty, Legal);
+ setOperationAction(ISD::MUL, Ty, Legal);
+ setOperationAction(ISD::OR, Ty, Legal);
+ setOperationAction(ISD::SDIV, Ty, Legal);
+ setOperationAction(ISD::SREM, Ty, Legal);
+ setOperationAction(ISD::SHL, Ty, Legal);
+ setOperationAction(ISD::SRA, Ty, Legal);
+ setOperationAction(ISD::SRL, Ty, Legal);
+ setOperationAction(ISD::SUB, Ty, Legal);
+ setOperationAction(ISD::UDIV, Ty, Legal);
+ setOperationAction(ISD::UREM, Ty, Legal);
+ setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom);
+ setOperationAction(ISD::VSELECT, Ty, Legal);
+ setOperationAction(ISD::XOR, Ty, Legal);
+
+ if (Ty == MVT::v4i32 || Ty == MVT::v2i64) {
+ setOperationAction(ISD::FP_TO_SINT, Ty, Legal);
+ setOperationAction(ISD::FP_TO_UINT, Ty, Legal);
+ setOperationAction(ISD::SINT_TO_FP, Ty, Legal);
+ setOperationAction(ISD::UINT_TO_FP, Ty, Legal);
+ }
+
+ setOperationAction(ISD::SETCC, Ty, Legal);
+ setCondCodeAction(ISD::SETNE, Ty, Expand);
+ setCondCodeAction(ISD::SETGE, Ty, Expand);
+ setCondCodeAction(ISD::SETGT, Ty, Expand);
+ setCondCodeAction(ISD::SETUGE, Ty, Expand);
+ setCondCodeAction(ISD::SETUGT, Ty, Expand);
+}
+
+// Enable MSA support for the given floating-point type and Register class.
+void MipsSETargetLowering::
+addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
+ addRegisterClass(Ty, RC);
+
+ // Expand all builtin opcodes.
+ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
+ setOperationAction(Opc, Ty, Expand);
+
+ setOperationAction(ISD::LOAD, Ty, Legal);
+ setOperationAction(ISD::STORE, Ty, Legal);
+ setOperationAction(ISD::BITCAST, Ty, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
+
+ if (Ty != MVT::v8f16) {
+ setOperationAction(ISD::FABS, Ty, Legal);
+ setOperationAction(ISD::FADD, Ty, Legal);
+ setOperationAction(ISD::FDIV, Ty, Legal);
+ setOperationAction(ISD::FEXP2, Ty, Legal);
+ setOperationAction(ISD::FLOG2, Ty, Legal);
+ setOperationAction(ISD::FMA, Ty, Legal);
+ setOperationAction(ISD::FMUL, Ty, Legal);
+ setOperationAction(ISD::FRINT, Ty, Legal);
+ setOperationAction(ISD::FSQRT, Ty, Legal);
+ setOperationAction(ISD::FSUB, Ty, Legal);
+ setOperationAction(ISD::VSELECT, Ty, Legal);
+
+ setOperationAction(ISD::SETCC, Ty, Legal);
+ setCondCodeAction(ISD::SETOGE, Ty, Expand);
+ setCondCodeAction(ISD::SETOGT, Ty, Expand);
+ setCondCodeAction(ISD::SETUGE, Ty, Expand);
+ setCondCodeAction(ISD::SETUGT, Ty, Expand);
+ setCondCodeAction(ISD::SETGE, Ty, Expand);
+ setCondCodeAction(ISD::SETGT, Ty, Expand);
+ }
+}
bool
MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
@@ -127,6 +261,8 @@ MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch(Op.getOpcode()) {
+ case ISD::LOAD: return lowerLOAD(Op, DAG);
+ case ISD::STORE: return lowerSTORE(Op, DAG);
case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG);
case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG);
case ISD::MULHS: return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG);
@@ -137,6 +273,10 @@ SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
DAG);
case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INTRINSIC_W_CHAIN: return lowerINTRINSIC_W_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_VOID: return lowerINTRINSIC_VOID(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::BUILD_VECTOR: return lowerBUILD_VECTOR(Op, DAG);
+ case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, DAG);
}
return MipsTargetLowering::LowerOperation(Op, DAG);
@@ -186,10 +326,10 @@ static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
return false;
- DebugLoc DL = ADDENode->getDebugLoc();
+ SDLoc DL(ADDENode);
// Initialize accumulator.
- SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
+ SDValue ACCIn = CurDAG->getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
ADDCNode->getOperand(1),
ADDENode->getOperand(1));
@@ -203,15 +343,11 @@ static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
// replace uses of adde and addc here
if (!SDValue(ADDCNode, 0).use_empty()) {
- SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
- SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
- LoIdx);
+ SDValue LoOut = CurDAG->getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut);
}
if (!SDValue(ADDENode, 0).use_empty()) {
- SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
- SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
- HiIdx);
+ SDValue HiOut = CurDAG->getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut);
}
@@ -262,10 +398,10 @@ static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
return false;
- DebugLoc DL = SUBENode->getDebugLoc();
+ SDLoc DL(SUBENode);
// Initialize accumulator.
- SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
+ SDValue ACCIn = CurDAG->getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
SUBCNode->getOperand(0),
SUBENode->getOperand(0));
@@ -279,15 +415,11 @@ static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
// replace uses of sube and subc here
if (!SDValue(SUBCNode, 0).use_empty()) {
- SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
- SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
- LoIdx);
+ SDValue LoOut = CurDAG->getNode(MipsISD::MFLO, DL, MVT::i32, MSub);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut);
}
if (!SDValue(SUBENode, 0).use_empty()) {
- SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
- SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
- HiIdx);
+ SDValue HiOut = CurDAG->getNode(MipsISD::MFHI, DL, MVT::i32, MSub);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut);
}
@@ -307,6 +439,248 @@ static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT
+//
+// Performs the following transformations:
+// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its
+// sign/zero-extension is completely overwritten by the new one performed by
+// the ISD::AND.
+// - Removes redundant zero extensions performed by an ISD::AND.
+static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget *Subtarget) {
+ if (!Subtarget->hasMSA())
+ return SDValue();
+
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ unsigned Op0Opcode = Op0->getOpcode();
+
+ // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d)
+ // where $d + 1 == 2^n and n == 32
+ // or $d + 1 == 2^n and n <= 32 and ZExt
+ // -> (MipsVExtractZExt $a, $b, $c)
+ if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT ||
+ Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) {
+ ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1);
+
+ if (!Mask)
+ return SDValue();
+
+ int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2();
+
+ if (Log2IfPositive <= 0)
+ return SDValue(); // Mask+1 is not a power of 2
+
+ SDValue Op0Op2 = Op0->getOperand(2);
+ EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT();
+ unsigned ExtendTySize = ExtendTy.getSizeInBits();
+ unsigned Log2 = Log2IfPositive;
+
+ if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) ||
+ Log2 == ExtendTySize) {
+ SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 };
+ DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT,
+ Op0->getVTList(), Ops, Op0->getNumOperands());
+ return Op0;
+ }
+ }
+
+ return SDValue();
+}
+
+// Determine if the specified node is a constant vector splat.
+//
+// Returns true and sets Imm if:
+// * N is a ISD::BUILD_VECTOR representing a constant splat
+//
+// This function is quite similar to MipsSEDAGToDAGISel::selectVSplat. The
+// differences are that it assumes the MSA has already been checked and the
+// arbitrary requirement for a maximum of 32-bit integers isn't applied (and
+// must not be in order for binsri.d to be selectable).
+static bool isVSplat(SDValue N, APInt &Imm, bool IsLittleEndian) {
+ BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N.getNode());
+
+ if (Node == NULL)
+ return false;
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ if (!Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
+ 8, !IsLittleEndian))
+ return false;
+
+ Imm = SplatValue;
+
+ return true;
+}
+
+// Test whether the given node is an all-ones build_vector.
+static bool isVectorAllOnes(SDValue N) {
+ // Look through bitcasts. Endianness doesn't matter because we are looking
+ // for an all-ones value.
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
+
+ if (!BVN)
+ return false;
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ // Endianness doesn't matter in this context because we are looking for
+ // an all-ones value.
+ if (BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs))
+ return SplatValue.isAllOnesValue();
+
+ return false;
+}
+
+// Test whether N is the bitwise inverse of OfNode.
+static bool isBitwiseInverse(SDValue N, SDValue OfNode) {
+ if (N->getOpcode() != ISD::XOR)
+ return false;
+
+ if (isVectorAllOnes(N->getOperand(0)))
+ return N->getOperand(1) == OfNode;
+
+ if (isVectorAllOnes(N->getOperand(1)))
+ return N->getOperand(0) == OfNode;
+
+ return false;
+}
+
+// Perform combines where ISD::OR is the root node.
+//
+// Performs the following transformations:
+// - (or (and $a, $mask), (and $b, $inv_mask)) => (vselect $mask, $a, $b)
+// where $inv_mask is the bitwise inverse of $mask and the 'or' has a 128-bit
+// vector type.
+static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget *Subtarget) {
+ if (!Subtarget->hasMSA())
+ return SDValue();
+
+ EVT Ty = N->getValueType(0);
+
+ if (!Ty.is128BitVector())
+ return SDValue();
+
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ if (Op0->getOpcode() == ISD::AND && Op1->getOpcode() == ISD::AND) {
+ SDValue Op0Op0 = Op0->getOperand(0);
+ SDValue Op0Op1 = Op0->getOperand(1);
+ SDValue Op1Op0 = Op1->getOperand(0);
+ SDValue Op1Op1 = Op1->getOperand(1);
+ bool IsLittleEndian = !Subtarget->isLittle();
+
+ SDValue IfSet, IfClr, Cond;
+ bool IsConstantMask = false;
+ APInt Mask, InvMask;
+
+ // If Op0Op0 is an appropriate mask, try to find it's inverse in either
+ // Op1Op0, or Op1Op1. Keep track of the Cond, IfSet, and IfClr nodes, while
+ // looking.
+ // IfClr will be set if we find a valid match.
+ if (isVSplat(Op0Op0, Mask, IsLittleEndian)) {
+ Cond = Op0Op0;
+ IfSet = Op0Op1;
+
+ if (isVSplat(Op1Op0, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op1;
+ else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op0;
+
+ IsConstantMask = true;
+ }
+
+ // If IfClr is not yet set, and Op0Op1 is an appropriate mask, try the same
+ // thing again using this mask.
+ // IfClr will be set if we find a valid match.
+ if (!IfClr.getNode() && isVSplat(Op0Op1, Mask, IsLittleEndian)) {
+ Cond = Op0Op1;
+ IfSet = Op0Op0;
+
+ if (isVSplat(Op1Op0, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op1;
+ else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op0;
+
+ IsConstantMask = true;
+ }
+
+ // If IfClr is not yet set, try looking for a non-constant match.
+ // IfClr will be set if we find a valid match amongst the eight
+ // possibilities.
+ if (!IfClr.getNode()) {
+ if (isBitwiseInverse(Op0Op0, Op1Op0)) {
+ Cond = Op1Op0;
+ IfSet = Op1Op1;
+ IfClr = Op0Op1;
+ } else if (isBitwiseInverse(Op0Op1, Op1Op0)) {
+ Cond = Op1Op0;
+ IfSet = Op1Op1;
+ IfClr = Op0Op0;
+ } else if (isBitwiseInverse(Op0Op0, Op1Op1)) {
+ Cond = Op1Op1;
+ IfSet = Op1Op0;
+ IfClr = Op0Op1;
+ } else if (isBitwiseInverse(Op0Op1, Op1Op1)) {
+ Cond = Op1Op1;
+ IfSet = Op1Op0;
+ IfClr = Op0Op0;
+ } else if (isBitwiseInverse(Op1Op0, Op0Op0)) {
+ Cond = Op0Op0;
+ IfSet = Op0Op1;
+ IfClr = Op1Op1;
+ } else if (isBitwiseInverse(Op1Op1, Op0Op0)) {
+ Cond = Op0Op0;
+ IfSet = Op0Op1;
+ IfClr = Op1Op0;
+ } else if (isBitwiseInverse(Op1Op0, Op0Op1)) {
+ Cond = Op0Op1;
+ IfSet = Op0Op0;
+ IfClr = Op1Op1;
+ } else if (isBitwiseInverse(Op1Op1, Op0Op1)) {
+ Cond = Op0Op1;
+ IfSet = Op0Op0;
+ IfClr = Op1Op0;
+ }
+ }
+
+ // At this point, IfClr will be set if we have a valid match.
+ if (!IfClr.getNode())
+ return SDValue();
+
+ assert(Cond.getNode() && IfSet.getNode());
+
+ // Fold degenerate cases.
+ if (IsConstantMask) {
+ if (Mask.isAllOnesValue())
+ return IfSet;
+ else if (Mask == 0)
+ return IfClr;
+ }
+
+ // Transform the DAG into an equivalent VSELECT.
+ return DAG.getNode(ISD::VSELECT, SDLoc(N), Ty, Cond, IfClr, IfSet);
+ }
+
+ return SDValue();
+}
+
static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const MipsSubtarget *Subtarget) {
@@ -320,6 +694,57 @@ static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
+ EVT ShiftTy, SelectionDAG &DAG) {
+ // Clear the upper (64 - VT.sizeInBits) bits.
+ C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits());
+
+ // Return 0.
+ if (C == 0)
+ return DAG.getConstant(0, VT);
+
+ // Return x.
+ if (C == 1)
+ return X;
+
+ // If c is power of 2, return (shl x, log2(c)).
+ if (isPowerOf2_64(C))
+ return DAG.getNode(ISD::SHL, DL, VT, X,
+ DAG.getConstant(Log2_64(C), ShiftTy));
+
+ unsigned Log2Ceil = Log2_64_Ceil(C);
+ uint64_t Floor = 1LL << Log2_64(C);
+ uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil;
+
+ // If |c - floor_c| <= |c - ceil_c|,
+ // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))),
+ // return (add constMult(x, floor_c), constMult(x, c - floor_c)).
+ if (C - Floor <= Ceil - C) {
+ SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG);
+ SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG);
+ return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
+ }
+
+ // If |c - floor_c| > |c - ceil_c|,
+ // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)).
+ SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG);
+ SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG);
+ return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
+}
+
+static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
+ const TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSETargetLowering *TL) {
+ EVT VT = N->getValueType(0);
+
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+ if (!VT.isVector())
+ return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N),
+ VT, TL->getScalarShiftAmountTy(VT), DAG);
+
+ return SDValue(N, 0);
+}
+
static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
SelectionDAG &DAG,
const MipsSubtarget *Subtarget) {
@@ -330,6 +755,9 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
+ if (!Subtarget->hasDSP())
+ return SDValue();
+
if (!BV ||
!BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
EltSize, !Subtarget->isLittle()) ||
@@ -337,7 +765,7 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
(SplatValue.getZExtValue() >= EltSize))
return SDValue();
- return DAG.getNode(Opc, N->getDebugLoc(), Ty, N->getOperand(0),
+ return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
}
@@ -352,11 +780,57 @@ static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget);
}
+// Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold
+// constant splats into MipsISD::SHRA_DSP for DSPr2.
+//
+// Performs the following transformations:
+// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its
+// sign/zero-extension is completely overwritten by the new one performed by
+// the ISD::SRA and ISD::SHL nodes.
+// - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL
+// sequence.
+//
+// See performDSPShiftCombine for more information about the transformation
+// used for DSPr2.
static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const MipsSubtarget *Subtarget) {
EVT Ty = N->getValueType(0);
+ if (Subtarget->hasMSA()) {
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d)
+ // where $d + sizeof($c) == 32
+ // or $d + sizeof($c) <= 32 and SExt
+ // -> (MipsVExtractSExt $a, $b, $c)
+ if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) {
+ SDValue Op0Op0 = Op0->getOperand(0);
+ ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1);
+
+ if (!ShAmount)
+ return SDValue();
+
+ if (Op0Op0->getOpcode() != MipsISD::VEXTRACT_SEXT_ELT &&
+ Op0Op0->getOpcode() != MipsISD::VEXTRACT_ZEXT_ELT)
+ return SDValue();
+
+ EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT();
+ unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits();
+
+ if (TotalBits == 32 ||
+ (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT &&
+ TotalBits <= 32)) {
+ SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1),
+ Op0Op0->getOperand(2) };
+ DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT,
+ Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands());
+ return Op0Op0;
+ }
+ }
+ }
+
if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2()))
return SDValue();
@@ -402,24 +876,91 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get()))
return SDValue();
- return DAG.getNode(MipsISD::SETCC_DSP, N->getDebugLoc(), Ty, N->getOperand(0),
+ return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0),
N->getOperand(1), N->getOperand(2));
}
static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
EVT Ty = N->getValueType(0);
- if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
- return SDValue();
+ if (Ty.is128BitVector() && Ty.isInteger()) {
+ // Try the following combines:
+ // (vselect (setcc $a, $b, SETLT), $b, $a)) -> (vsmax $a, $b)
+ // (vselect (setcc $a, $b, SETLE), $b, $a)) -> (vsmax $a, $b)
+ // (vselect (setcc $a, $b, SETLT), $a, $b)) -> (vsmin $a, $b)
+ // (vselect (setcc $a, $b, SETLE), $a, $b)) -> (vsmin $a, $b)
+ // (vselect (setcc $a, $b, SETULT), $b, $a)) -> (vumax $a, $b)
+ // (vselect (setcc $a, $b, SETULE), $b, $a)) -> (vumax $a, $b)
+ // (vselect (setcc $a, $b, SETULT), $a, $b)) -> (vumin $a, $b)
+ // (vselect (setcc $a, $b, SETULE), $a, $b)) -> (vumin $a, $b)
+ // SETGT/SETGE/SETUGT/SETUGE variants of these will show up initially but
+ // will be expanded to equivalent SETLT/SETLE/SETULT/SETULE versions by the
+ // legalizer.
+ SDValue Op0 = N->getOperand(0);
+
+ if (Op0->getOpcode() != ISD::SETCC)
+ return SDValue();
+
+ ISD::CondCode CondCode = cast<CondCodeSDNode>(Op0->getOperand(2))->get();
+ bool Signed;
+
+ if (CondCode == ISD::SETLT || CondCode == ISD::SETLE)
+ Signed = true;
+ else if (CondCode == ISD::SETULT || CondCode == ISD::SETULE)
+ Signed = false;
+ else
+ return SDValue();
+
+ SDValue Op1 = N->getOperand(1);
+ SDValue Op2 = N->getOperand(2);
+ SDValue Op0Op0 = Op0->getOperand(0);
+ SDValue Op0Op1 = Op0->getOperand(1);
+
+ if (Op1 == Op0Op0 && Op2 == Op0Op1)
+ return DAG.getNode(Signed ? MipsISD::VSMIN : MipsISD::VUMIN, SDLoc(N),
+ Ty, Op1, Op2);
+ else if (Op1 == Op0Op1 && Op2 == Op0Op0)
+ return DAG.getNode(Signed ? MipsISD::VSMAX : MipsISD::VUMAX, SDLoc(N),
+ Ty, Op1, Op2);
+ } else if ((Ty == MVT::v2i16) || (Ty == MVT::v4i8)) {
+ SDValue SetCC = N->getOperand(0);
+
+ if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
+ return SDValue();
+
+ return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty,
+ SetCC.getOperand(0), SetCC.getOperand(1),
+ N->getOperand(1), N->getOperand(2), SetCC.getOperand(2));
+ }
- SDValue SetCC = N->getOperand(0);
+ return SDValue();
+}
- if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
- return SDValue();
+static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
+ const MipsSubtarget *Subtarget) {
+ EVT Ty = N->getValueType(0);
- return DAG.getNode(MipsISD::SELECT_CC_DSP, N->getDebugLoc(), Ty,
- SetCC.getOperand(0), SetCC.getOperand(1), N->getOperand(1),
- N->getOperand(2), SetCC.getOperand(2));
+ if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
+ // Try the following combines:
+ // (xor (or $a, $b), (build_vector allones))
+ // (xor (or $a, $b), (bitcast (build_vector allones)))
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ SDValue NotOp;
+
+ if (ISD::isBuildVectorAllOnes(Op0.getNode()))
+ NotOp = Op1;
+ else if (ISD::isBuildVectorAllOnes(Op1.getNode()))
+ NotOp = Op0;
+ else
+ return SDValue();
+
+ if (NotOp->getOpcode() == ISD::OR)
+ return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0),
+ NotOp->getOperand(1));
+ }
+
+ return SDValue();
}
SDValue
@@ -430,8 +971,16 @@ MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
switch (N->getOpcode()) {
case ISD::ADDE:
return performADDECombine(N, DAG, DCI, Subtarget);
+ case ISD::AND:
+ Val = performANDCombine(N, DAG, DCI, Subtarget);
+ break;
+ case ISD::OR:
+ Val = performORCombine(N, DAG, DCI, Subtarget);
+ break;
case ISD::SUBE:
return performSUBECombine(N, DAG, DCI, Subtarget);
+ case ISD::MUL:
+ return performMULCombine(N, DAG, DCI, this);
case ISD::SHL:
return performSHLCombine(N, DAG, DCI, Subtarget);
case ISD::SRA:
@@ -440,14 +989,22 @@ MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
return performSRLCombine(N, DAG, DCI, Subtarget);
case ISD::VSELECT:
return performVSELECTCombine(N, DAG);
- case ISD::SETCC: {
+ case ISD::XOR:
+ Val = performXORCombine(N, DAG, Subtarget);
+ break;
+ case ISD::SETCC:
Val = performSETCCCombine(N, DAG);
break;
}
- }
- if (Val.getNode())
+ if (Val.getNode()) {
+ DEBUG(dbgs() << "\nMipsSE DAG Combine:\n";
+ N->printrWithDepth(dbgs(), &DAG);
+ dbgs() << "\n=> \n";
+ Val.getNode()->printrWithDepth(dbgs(), &DAG);
+ dbgs() << "\n");
return Val;
+ }
return MipsTargetLowering::PerformDAGCombine(N, DCI);
}
@@ -460,6 +1017,42 @@ MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case Mips::BPOSGE32_PSEUDO:
return emitBPOSGE32(MI, BB);
+ case Mips::SNZ_B_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B);
+ case Mips::SNZ_H_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H);
+ case Mips::SNZ_W_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W);
+ case Mips::SNZ_D_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D);
+ case Mips::SNZ_V_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V);
+ case Mips::SZ_B_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_B);
+ case Mips::SZ_H_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_H);
+ case Mips::SZ_W_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_W);
+ case Mips::SZ_D_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_D);
+ case Mips::SZ_V_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_V);
+ case Mips::COPY_FW_PSEUDO:
+ return emitCOPY_FW(MI, BB);
+ case Mips::COPY_FD_PSEUDO:
+ return emitCOPY_FD(MI, BB);
+ case Mips::INSERT_FW_PSEUDO:
+ return emitINSERT_FW(MI, BB);
+ case Mips::INSERT_FD_PSEUDO:
+ return emitINSERT_FD(MI, BB);
+ case Mips::FILL_FW_PSEUDO:
+ return emitFILL_FW(MI, BB);
+ case Mips::FILL_FD_PSEUDO:
+ return emitFILL_FD(MI, BB);
+ case Mips::FEXP2_W_1_PSEUDO:
+ return emitFEXP2_W_1(MI, BB);
+ case Mips::FEXP2_D_1_PSEUDO:
+ return emitFEXP2_D_1(MI, BB);
}
}
@@ -496,21 +1089,81 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
InternalLinkage, CLI, Callee, Chain);
}
+SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ LoadSDNode &Nd = *cast<LoadSDNode>(Op);
+
+ if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
+ return MipsTargetLowering::lowerLOAD(Op, DAG);
+
+ // Replace a double precision load with two i32 loads and a buildpair64.
+ SDLoc DL(Op);
+ SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
+ EVT PtrVT = Ptr.getValueType();
+
+ // i32 load from lower address.
+ SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr,
+ MachinePointerInfo(), Nd.isVolatile(),
+ Nd.isNonTemporal(), Nd.isInvariant(),
+ Nd.getAlignment());
+
+ // i32 load from higher address.
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
+ MachinePointerInfo(), Nd.isVolatile(),
+ Nd.isNonTemporal(), Nd.isInvariant(),
+ std::min(Nd.getAlignment(), 4U));
+
+ if (!Subtarget->isLittle())
+ std::swap(Lo, Hi);
+
+ SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
+ SDValue Ops[2] = {BP, Hi.getValue(1)};
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode &Nd = *cast<StoreSDNode>(Op);
+
+ if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
+ return MipsTargetLowering::lowerSTORE(Op, DAG);
+
+ // Replace a double precision store with two extractelement64s and i32 stores.
+ SDLoc DL(Op);
+ SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
+ EVT PtrVT = Ptr.getValueType();
+ SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
+ Val, DAG.getConstant(0, MVT::i32));
+ SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
+ Val, DAG.getConstant(1, MVT::i32));
+
+ if (!Subtarget->isLittle())
+ std::swap(Lo, Hi);
+
+ // i32 store to lower address.
+ Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(),
+ Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(),
+ Nd.getTBAAInfo());
+
+ // i32 store to higher address.
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
+ Nd.isVolatile(), Nd.isNonTemporal(),
+ std::min(Nd.getAlignment(), 4U), Nd.getTBAAInfo());
+}
+
SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
bool HasLo, bool HasHi,
SelectionDAG &DAG) const {
EVT Ty = Op.getOperand(0).getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped,
Op.getOperand(0), Op.getOperand(1));
SDValue Lo, Hi;
if (HasLo)
- Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
- DAG.getConstant(Mips::sub_lo, MVT::i32));
+ Lo = DAG.getNode(MipsISD::MFLO, DL, Ty, Mult);
if (HasHi)
- Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
- DAG.getConstant(Mips::sub_hi, MVT::i32));
+ Hi = DAG.getNode(MipsISD::MFHI, DL, Ty, Mult);
if (!HasLo || !HasHi)
return HasLo ? Lo : Hi;
@@ -520,19 +1173,17 @@ SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
}
-static SDValue initAccumulator(SDValue In, DebugLoc DL, SelectionDAG &DAG) {
+static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
DAG.getConstant(0, MVT::i32));
SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
DAG.getConstant(1, MVT::i32));
- return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi);
+ return DAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, InLo, InHi);
}
-static SDValue extractLOHI(SDValue Op, DebugLoc DL, SelectionDAG &DAG) {
- SDValue Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
- DAG.getConstant(Mips::sub_lo, MVT::i32));
- SDValue Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
- DAG.getConstant(Mips::sub_hi, MVT::i32));
+static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) {
+ SDValue Lo = DAG.getNode(MipsISD::MFLO, DL, MVT::i32, Op);
+ SDValue Hi = DAG.getNode(MipsISD::MFHI, DL, MVT::i32, Op);
return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
}
@@ -549,7 +1200,7 @@ static SDValue extractLOHI(SDValue Op, DebugLoc DL, SelectionDAG &DAG) {
// out64 = merge-values (v0, v1)
//
static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
SmallVector<SDValue, 3> Ops;
unsigned OpNo = 0;
@@ -596,8 +1247,156 @@ static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
return DAG.getMergeValues(Vals, 2, DL);
}
+// Lower an MSA copy intrinsic into the specified SelectionDAG node
+static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
+ SDLoc DL(Op);
+ SDValue Vec = Op->getOperand(1);
+ SDValue Idx = Op->getOperand(2);
+ EVT ResTy = Op->getValueType(0);
+ EVT EltTy = Vec->getValueType(0).getVectorElementType();
+
+ SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx,
+ DAG.getValueType(EltTy));
+
+ return Result;
+}
+
+static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
+ EVT ResVecTy = Op->getValueType(0);
+ EVT ViaVecTy = ResVecTy;
+ SDLoc DL(Op);
+
+ // When ResVecTy == MVT::v2i64, LaneA is the upper 32 bits of the lane and
+ // LaneB is the lower 32-bits. Otherwise LaneA and LaneB are alternating
+ // lanes.
+ SDValue LaneA;
+ SDValue LaneB = Op->getOperand(2);
+
+ if (ResVecTy == MVT::v2i64) {
+ LaneA = DAG.getConstant(0, MVT::i32);
+ ViaVecTy = MVT::v4i32;
+ } else
+ LaneA = LaneB;
+
+ SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB,
+ LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB };
+
+ SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
+ ViaVecTy.getVectorNumElements());
+
+ if (ViaVecTy != ResVecTy)
+ Result = DAG.getNode(ISD::BITCAST, DL, ResVecTy, Result);
+
+ return Result;
+}
+
+static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
+ return DAG.getConstant(Op->getConstantOperandVal(ImmOp), Op->getValueType(0));
+}
+
+static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
+ bool BigEndian, SelectionDAG &DAG) {
+ EVT ViaVecTy = VecTy;
+ SDValue SplatValueA = SplatValue;
+ SDValue SplatValueB = SplatValue;
+ SDLoc DL(SplatValue);
+
+ if (VecTy == MVT::v2i64) {
+ // v2i64 BUILD_VECTOR must be performed via v4i32 so split into i32's.
+ ViaVecTy = MVT::v4i32;
+
+ SplatValueA = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValue);
+ SplatValueB = DAG.getNode(ISD::SRL, DL, MVT::i64, SplatValue,
+ DAG.getConstant(32, MVT::i32));
+ SplatValueB = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValueB);
+ }
+
+ // We currently hold the parts in little endian order. Swap them if
+ // necessary.
+ if (BigEndian)
+ std::swap(SplatValueA, SplatValueB);
+
+ SDValue Ops[16] = { SplatValueA, SplatValueB, SplatValueA, SplatValueB,
+ SplatValueA, SplatValueB, SplatValueA, SplatValueB,
+ SplatValueA, SplatValueB, SplatValueA, SplatValueB,
+ SplatValueA, SplatValueB, SplatValueA, SplatValueB };
+
+ SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
+ ViaVecTy.getVectorNumElements());
+
+ if (VecTy != ViaVecTy)
+ Result = DAG.getNode(ISD::BITCAST, DL, VecTy, Result);
+
+ return Result;
+}
+
+static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
+ unsigned Opc, SDValue Imm,
+ bool BigEndian) {
+ EVT VecTy = Op->getValueType(0);
+ SDValue Exp2Imm;
+ SDLoc DL(Op);
+
+ // The DAG Combiner can't constant fold bitcasted vectors yet so we must do it
+ // here for now.
+ if (VecTy == MVT::v2i64) {
+ if (ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(Imm)) {
+ APInt BitImm = APInt(64, 1) << CImm->getAPIntValue();
+
+ SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), MVT::i32);
+ SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), MVT::i32);
+
+ if (BigEndian)
+ std::swap(BitImmLoOp, BitImmHiOp);
+
+ Exp2Imm =
+ DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
+ DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, BitImmLoOp,
+ BitImmHiOp, BitImmLoOp, BitImmHiOp));
+ }
+ }
+
+ if (Exp2Imm.getNode() == NULL) {
+ // We couldnt constant fold, do a vector shift instead
+
+ // Extend i32 to i64 if necessary. Sign or zero extend doesn't matter since
+ // only values 0-63 are valid.
+ if (VecTy == MVT::v2i64)
+ Imm = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Imm);
+
+ Exp2Imm = getBuildVectorSplat(VecTy, Imm, BigEndian, DAG);
+
+ Exp2Imm =
+ DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, VecTy), Exp2Imm);
+ }
+
+ return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm);
+}
+
+static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) {
+ EVT ResTy = Op->getValueType(0);
+ SDLoc DL(Op);
+ SDValue One = DAG.getConstant(1, ResTy);
+ SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, Op->getOperand(2));
+
+ return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1),
+ DAG.getNOT(DL, Bit, ResTy));
+}
+
+static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ EVT ResTy = Op->getValueType(0);
+ APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1)
+ << cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue();
+ SDValue BitMask = DAG.getConstant(~BitImm, ResTy);
+
+ return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), BitMask);
+}
+
SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+
switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
default:
return SDValue();
@@ -633,12 +1432,610 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
return lowerDSPIntr(Op, DAG, MipsISD::MSub);
case Intrinsic::mips_msubu:
return lowerDSPIntr(Op, DAG, MipsISD::MSubu);
+ case Intrinsic::mips_addv_b:
+ case Intrinsic::mips_addv_h:
+ case Intrinsic::mips_addv_w:
+ case Intrinsic::mips_addv_d:
+ return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_addvi_b:
+ case Intrinsic::mips_addvi_h:
+ case Intrinsic::mips_addvi_w:
+ case Intrinsic::mips_addvi_d:
+ return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_and_v:
+ return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_andi_b:
+ return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_bclr_b:
+ case Intrinsic::mips_bclr_h:
+ case Intrinsic::mips_bclr_w:
+ case Intrinsic::mips_bclr_d:
+ return lowerMSABitClear(Op, DAG);
+ case Intrinsic::mips_bclri_b:
+ case Intrinsic::mips_bclri_h:
+ case Intrinsic::mips_bclri_w:
+ case Intrinsic::mips_bclri_d:
+ return lowerMSABitClearImm(Op, DAG);
+ case Intrinsic::mips_binsli_b:
+ case Intrinsic::mips_binsli_h:
+ case Intrinsic::mips_binsli_w:
+ case Intrinsic::mips_binsli_d: {
+ EVT VecTy = Op->getValueType(0);
+ EVT EltTy = VecTy.getVectorElementType();
+ APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(),
+ Op->getConstantOperandVal(3));
+ return DAG.getNode(ISD::VSELECT, DL, VecTy,
+ DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
+ Op->getOperand(2));
+ }
+ case Intrinsic::mips_binsri_b:
+ case Intrinsic::mips_binsri_h:
+ case Intrinsic::mips_binsri_w:
+ case Intrinsic::mips_binsri_d: {
+ EVT VecTy = Op->getValueType(0);
+ EVT EltTy = VecTy.getVectorElementType();
+ APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(),
+ Op->getConstantOperandVal(3));
+ return DAG.getNode(ISD::VSELECT, DL, VecTy,
+ DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
+ Op->getOperand(2));
+ }
+ case Intrinsic::mips_bmnz_v:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
+ Op->getOperand(2), Op->getOperand(1));
+ case Intrinsic::mips_bmnzi_b:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ lowerMSASplatImm(Op, 3, DAG), Op->getOperand(2),
+ Op->getOperand(1));
+ case Intrinsic::mips_bmz_v:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_bmzi_b:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ lowerMSASplatImm(Op, 3, DAG), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_bneg_b:
+ case Intrinsic::mips_bneg_h:
+ case Intrinsic::mips_bneg_w:
+ case Intrinsic::mips_bneg_d: {
+ EVT VecTy = Op->getValueType(0);
+ SDValue One = DAG.getConstant(1, VecTy);
+
+ return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1),
+ DAG.getNode(ISD::SHL, DL, VecTy, One,
+ Op->getOperand(2)));
+ }
+ case Intrinsic::mips_bnegi_b:
+ case Intrinsic::mips_bnegi_h:
+ case Intrinsic::mips_bnegi_w:
+ case Intrinsic::mips_bnegi_d:
+ return lowerMSABinaryBitImmIntr(Op, DAG, ISD::XOR, Op->getOperand(2),
+ !Subtarget->isLittle());
+ case Intrinsic::mips_bnz_b:
+ case Intrinsic::mips_bnz_h:
+ case Intrinsic::mips_bnz_w:
+ case Intrinsic::mips_bnz_d:
+ return DAG.getNode(MipsISD::VALL_NONZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_bnz_v:
+ return DAG.getNode(MipsISD::VANY_NONZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_bsel_v:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2),
+ Op->getOperand(3));
+ case Intrinsic::mips_bseli_b:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2),
+ lowerMSASplatImm(Op, 3, DAG));
+ case Intrinsic::mips_bset_b:
+ case Intrinsic::mips_bset_h:
+ case Intrinsic::mips_bset_w:
+ case Intrinsic::mips_bset_d: {
+ EVT VecTy = Op->getValueType(0);
+ SDValue One = DAG.getConstant(1, VecTy);
+
+ return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1),
+ DAG.getNode(ISD::SHL, DL, VecTy, One,
+ Op->getOperand(2)));
+ }
+ case Intrinsic::mips_bseti_b:
+ case Intrinsic::mips_bseti_h:
+ case Intrinsic::mips_bseti_w:
+ case Intrinsic::mips_bseti_d:
+ return lowerMSABinaryBitImmIntr(Op, DAG, ISD::OR, Op->getOperand(2),
+ !Subtarget->isLittle());
+ case Intrinsic::mips_bz_b:
+ case Intrinsic::mips_bz_h:
+ case Intrinsic::mips_bz_w:
+ case Intrinsic::mips_bz_d:
+ return DAG.getNode(MipsISD::VALL_ZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_bz_v:
+ return DAG.getNode(MipsISD::VANY_ZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ceq_b:
+ case Intrinsic::mips_ceq_h:
+ case Intrinsic::mips_ceq_w:
+ case Intrinsic::mips_ceq_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETEQ);
+ case Intrinsic::mips_ceqi_b:
+ case Intrinsic::mips_ceqi_h:
+ case Intrinsic::mips_ceqi_w:
+ case Intrinsic::mips_ceqi_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ);
+ case Intrinsic::mips_cle_s_b:
+ case Intrinsic::mips_cle_s_h:
+ case Intrinsic::mips_cle_s_w:
+ case Intrinsic::mips_cle_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETLE);
+ case Intrinsic::mips_clei_s_b:
+ case Intrinsic::mips_clei_s_h:
+ case Intrinsic::mips_clei_s_w:
+ case Intrinsic::mips_clei_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETLE);
+ case Intrinsic::mips_cle_u_b:
+ case Intrinsic::mips_cle_u_h:
+ case Intrinsic::mips_cle_u_w:
+ case Intrinsic::mips_cle_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULE);
+ case Intrinsic::mips_clei_u_b:
+ case Intrinsic::mips_clei_u_h:
+ case Intrinsic::mips_clei_u_w:
+ case Intrinsic::mips_clei_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETULE);
+ case Intrinsic::mips_clt_s_b:
+ case Intrinsic::mips_clt_s_h:
+ case Intrinsic::mips_clt_s_w:
+ case Intrinsic::mips_clt_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETLT);
+ case Intrinsic::mips_clti_s_b:
+ case Intrinsic::mips_clti_s_h:
+ case Intrinsic::mips_clti_s_w:
+ case Intrinsic::mips_clti_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETLT);
+ case Intrinsic::mips_clt_u_b:
+ case Intrinsic::mips_clt_u_h:
+ case Intrinsic::mips_clt_u_w:
+ case Intrinsic::mips_clt_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULT);
+ case Intrinsic::mips_clti_u_b:
+ case Intrinsic::mips_clti_u_h:
+ case Intrinsic::mips_clti_u_w:
+ case Intrinsic::mips_clti_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETULT);
+ case Intrinsic::mips_copy_s_b:
+ case Intrinsic::mips_copy_s_h:
+ case Intrinsic::mips_copy_s_w:
+ return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
+ case Intrinsic::mips_copy_s_d:
+ // Don't lower directly into VEXTRACT_SEXT_ELT since i64 might be illegal.
+ // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
+ // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_copy_u_b:
+ case Intrinsic::mips_copy_u_h:
+ case Intrinsic::mips_copy_u_w:
+ return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
+ case Intrinsic::mips_copy_u_d:
+ // Don't lower directly into VEXTRACT_ZEXT_ELT since i64 might be illegal.
+ // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
+ // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
+ //
+ // Note: When i64 is illegal, this results in copy_s.w instructions instead
+ // of copy_u.w instructions. This makes no difference to the behaviour
+ // since i64 is only illegal when the register file is 32-bit.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_div_s_b:
+ case Intrinsic::mips_div_s_h:
+ case Intrinsic::mips_div_s_w:
+ case Intrinsic::mips_div_s_d:
+ return DAG.getNode(ISD::SDIV, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_div_u_b:
+ case Intrinsic::mips_div_u_h:
+ case Intrinsic::mips_div_u_w:
+ case Intrinsic::mips_div_u_d:
+ return DAG.getNode(ISD::UDIV, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_fadd_w:
+ case Intrinsic::mips_fadd_d:
+ return DAG.getNode(ISD::FADD, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away
+ case Intrinsic::mips_fceq_w:
+ case Intrinsic::mips_fceq_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETOEQ);
+ case Intrinsic::mips_fcle_w:
+ case Intrinsic::mips_fcle_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETOLE);
+ case Intrinsic::mips_fclt_w:
+ case Intrinsic::mips_fclt_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETOLT);
+ case Intrinsic::mips_fcne_w:
+ case Intrinsic::mips_fcne_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETONE);
+ case Intrinsic::mips_fcor_w:
+ case Intrinsic::mips_fcor_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETO);
+ case Intrinsic::mips_fcueq_w:
+ case Intrinsic::mips_fcueq_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETUEQ);
+ case Intrinsic::mips_fcule_w:
+ case Intrinsic::mips_fcule_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULE);
+ case Intrinsic::mips_fcult_w:
+ case Intrinsic::mips_fcult_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULT);
+ case Intrinsic::mips_fcun_w:
+ case Intrinsic::mips_fcun_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETUO);
+ case Intrinsic::mips_fcune_w:
+ case Intrinsic::mips_fcune_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETUNE);
+ case Intrinsic::mips_fdiv_w:
+ case Intrinsic::mips_fdiv_d:
+ return DAG.getNode(ISD::FDIV, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_ffint_u_w:
+ case Intrinsic::mips_ffint_u_d:
+ return DAG.getNode(ISD::UINT_TO_FP, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ffint_s_w:
+ case Intrinsic::mips_ffint_s_d:
+ return DAG.getNode(ISD::SINT_TO_FP, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_fill_b:
+ case Intrinsic::mips_fill_h:
+ case Intrinsic::mips_fill_w:
+ case Intrinsic::mips_fill_d: {
+ SmallVector<SDValue, 16> Ops;
+ EVT ResTy = Op->getValueType(0);
+
+ for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
+ Ops.push_back(Op->getOperand(1));
+
+ // If ResTy is v2i64 then the type legalizer will break this node down into
+ // an equivalent v4i32.
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, &Ops[0], Ops.size());
+ }
+ case Intrinsic::mips_fexp2_w:
+ case Intrinsic::mips_fexp2_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(
+ ISD::FMUL, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::FEXP2, SDLoc(Op), ResTy, Op->getOperand(2)));
+ }
+ case Intrinsic::mips_flog2_w:
+ case Intrinsic::mips_flog2_d:
+ return DAG.getNode(ISD::FLOG2, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_fmadd_w:
+ case Intrinsic::mips_fmadd_d:
+ return DAG.getNode(ISD::FMA, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
+ case Intrinsic::mips_fmul_w:
+ case Intrinsic::mips_fmul_d:
+ return DAG.getNode(ISD::FMUL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_fmsub_w:
+ case Intrinsic::mips_fmsub_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::FSUB, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::FMUL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_frint_w:
+ case Intrinsic::mips_frint_d:
+ return DAG.getNode(ISD::FRINT, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_fsqrt_w:
+ case Intrinsic::mips_fsqrt_d:
+ return DAG.getNode(ISD::FSQRT, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_fsub_w:
+ case Intrinsic::mips_fsub_d:
+ return DAG.getNode(ISD::FSUB, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_ftrunc_u_w:
+ case Intrinsic::mips_ftrunc_u_d:
+ return DAG.getNode(ISD::FP_TO_UINT, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ftrunc_s_w:
+ case Intrinsic::mips_ftrunc_s_d:
+ return DAG.getNode(ISD::FP_TO_SINT, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ilvev_b:
+ case Intrinsic::mips_ilvev_h:
+ case Intrinsic::mips_ilvev_w:
+ case Intrinsic::mips_ilvev_d:
+ return DAG.getNode(MipsISD::ILVEV, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_ilvl_b:
+ case Intrinsic::mips_ilvl_h:
+ case Intrinsic::mips_ilvl_w:
+ case Intrinsic::mips_ilvl_d:
+ return DAG.getNode(MipsISD::ILVL, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_ilvod_b:
+ case Intrinsic::mips_ilvod_h:
+ case Intrinsic::mips_ilvod_w:
+ case Intrinsic::mips_ilvod_d:
+ return DAG.getNode(MipsISD::ILVOD, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_ilvr_b:
+ case Intrinsic::mips_ilvr_h:
+ case Intrinsic::mips_ilvr_w:
+ case Intrinsic::mips_ilvr_d:
+ return DAG.getNode(MipsISD::ILVR, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_insert_b:
+ case Intrinsic::mips_insert_h:
+ case Intrinsic::mips_insert_w:
+ case Intrinsic::mips_insert_d:
+ return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(3), Op->getOperand(2));
+ case Intrinsic::mips_ldi_b:
+ case Intrinsic::mips_ldi_h:
+ case Intrinsic::mips_ldi_w:
+ case Intrinsic::mips_ldi_d:
+ return lowerMSASplatImm(Op, 1, DAG);
+ case Intrinsic::mips_lsa: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::SHL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_maddv_b:
+ case Intrinsic::mips_maddv_h:
+ case Intrinsic::mips_maddv_w:
+ case Intrinsic::mips_maddv_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::MUL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_max_s_b:
+ case Intrinsic::mips_max_s_h:
+ case Intrinsic::mips_max_s_w:
+ case Intrinsic::mips_max_s_d:
+ return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_max_u_b:
+ case Intrinsic::mips_max_u_h:
+ case Intrinsic::mips_max_u_w:
+ case Intrinsic::mips_max_u_d:
+ return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_maxi_s_b:
+ case Intrinsic::mips_maxi_s_h:
+ case Intrinsic::mips_maxi_s_w:
+ case Intrinsic::mips_maxi_s_d:
+ return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_maxi_u_b:
+ case Intrinsic::mips_maxi_u_h:
+ case Intrinsic::mips_maxi_u_w:
+ case Intrinsic::mips_maxi_u_d:
+ return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_min_s_b:
+ case Intrinsic::mips_min_s_h:
+ case Intrinsic::mips_min_s_w:
+ case Intrinsic::mips_min_s_d:
+ return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_min_u_b:
+ case Intrinsic::mips_min_u_h:
+ case Intrinsic::mips_min_u_w:
+ case Intrinsic::mips_min_u_d:
+ return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_mini_s_b:
+ case Intrinsic::mips_mini_s_h:
+ case Intrinsic::mips_mini_s_w:
+ case Intrinsic::mips_mini_s_d:
+ return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_mini_u_b:
+ case Intrinsic::mips_mini_u_h:
+ case Intrinsic::mips_mini_u_w:
+ case Intrinsic::mips_mini_u_d:
+ return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_mod_s_b:
+ case Intrinsic::mips_mod_s_h:
+ case Intrinsic::mips_mod_s_w:
+ case Intrinsic::mips_mod_s_d:
+ return DAG.getNode(ISD::SREM, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_mod_u_b:
+ case Intrinsic::mips_mod_u_h:
+ case Intrinsic::mips_mod_u_w:
+ case Intrinsic::mips_mod_u_d:
+ return DAG.getNode(ISD::UREM, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_mulv_b:
+ case Intrinsic::mips_mulv_h:
+ case Intrinsic::mips_mulv_w:
+ case Intrinsic::mips_mulv_d:
+ return DAG.getNode(ISD::MUL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_msubv_b:
+ case Intrinsic::mips_msubv_h:
+ case Intrinsic::mips_msubv_w:
+ case Intrinsic::mips_msubv_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::SUB, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::MUL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_nlzc_b:
+ case Intrinsic::mips_nlzc_h:
+ case Intrinsic::mips_nlzc_w:
+ case Intrinsic::mips_nlzc_d:
+ return DAG.getNode(ISD::CTLZ, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_nor_v: {
+ SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ return DAG.getNOT(DL, Res, Res->getValueType(0));
+ }
+ case Intrinsic::mips_nori_b: {
+ SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0),
+ Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG));
+ return DAG.getNOT(DL, Res, Res->getValueType(0));
+ }
+ case Intrinsic::mips_or_v:
+ return DAG.getNode(ISD::OR, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_ori_b:
+ return DAG.getNode(ISD::OR, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_pckev_b:
+ case Intrinsic::mips_pckev_h:
+ case Intrinsic::mips_pckev_w:
+ case Intrinsic::mips_pckev_d:
+ return DAG.getNode(MipsISD::PCKEV, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_pckod_b:
+ case Intrinsic::mips_pckod_h:
+ case Intrinsic::mips_pckod_w:
+ case Intrinsic::mips_pckod_d:
+ return DAG.getNode(MipsISD::PCKOD, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_pcnt_b:
+ case Intrinsic::mips_pcnt_h:
+ case Intrinsic::mips_pcnt_w:
+ case Intrinsic::mips_pcnt_d:
+ return DAG.getNode(ISD::CTPOP, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_shf_b:
+ case Intrinsic::mips_shf_h:
+ case Intrinsic::mips_shf_w:
+ return DAG.getNode(MipsISD::SHF, DL, Op->getValueType(0),
+ Op->getOperand(2), Op->getOperand(1));
+ case Intrinsic::mips_sll_b:
+ case Intrinsic::mips_sll_h:
+ case Intrinsic::mips_sll_w:
+ case Intrinsic::mips_sll_d:
+ return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_slli_b:
+ case Intrinsic::mips_slli_h:
+ case Intrinsic::mips_slli_w:
+ case Intrinsic::mips_slli_d:
+ return DAG.getNode(ISD::SHL, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_splat_b:
+ case Intrinsic::mips_splat_h:
+ case Intrinsic::mips_splat_w:
+ case Intrinsic::mips_splat_d:
+ // We can't lower via VECTOR_SHUFFLE because it requires constant shuffle
+ // masks, nor can we lower via BUILD_VECTOR & EXTRACT_VECTOR_ELT because
+ // EXTRACT_VECTOR_ELT can't extract i64's on MIPS32.
+ // Instead we lower to MipsISD::VSHF and match from there.
+ return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
+ lowerMSASplatZExt(Op, 2, DAG), Op->getOperand(1),
+ Op->getOperand(1));
+ case Intrinsic::mips_splati_b:
+ case Intrinsic::mips_splati_h:
+ case Intrinsic::mips_splati_w:
+ case Intrinsic::mips_splati_d:
+ return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
+ lowerMSASplatImm(Op, 2, DAG), Op->getOperand(1),
+ Op->getOperand(1));
+ case Intrinsic::mips_sra_b:
+ case Intrinsic::mips_sra_h:
+ case Intrinsic::mips_sra_w:
+ case Intrinsic::mips_sra_d:
+ return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_srai_b:
+ case Intrinsic::mips_srai_h:
+ case Intrinsic::mips_srai_w:
+ case Intrinsic::mips_srai_d:
+ return DAG.getNode(ISD::SRA, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_srl_b:
+ case Intrinsic::mips_srl_h:
+ case Intrinsic::mips_srl_w:
+ case Intrinsic::mips_srl_d:
+ return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_srli_b:
+ case Intrinsic::mips_srli_h:
+ case Intrinsic::mips_srli_w:
+ case Intrinsic::mips_srli_d:
+ return DAG.getNode(ISD::SRL, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_subv_b:
+ case Intrinsic::mips_subv_h:
+ case Intrinsic::mips_subv_w:
+ case Intrinsic::mips_subv_d:
+ return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_subvi_b:
+ case Intrinsic::mips_subvi_h:
+ case Intrinsic::mips_subvi_w:
+ case Intrinsic::mips_subvi_d:
+ return DAG.getNode(ISD::SUB, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_vshf_b:
+ case Intrinsic::mips_vshf_h:
+ case Intrinsic::mips_vshf_w:
+ case Intrinsic::mips_vshf_d:
+ return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
+ case Intrinsic::mips_xor_v:
+ return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_xori_b:
+ return DAG.getNode(ISD::XOR, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
}
}
+static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
+ SDLoc DL(Op);
+ SDValue ChainIn = Op->getOperand(0);
+ SDValue Address = Op->getOperand(2);
+ SDValue Offset = Op->getOperand(3);
+ EVT ResTy = Op->getValueType(0);
+ EVT PtrTy = Address->getValueType(0);
+
+ Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
+
+ return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false,
+ false, false, 16);
+}
+
SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
- switch (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue()) {
+ unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+ switch (Intr) {
default:
return SDValue();
case Intrinsic::mips_extp:
@@ -681,9 +2078,524 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH);
case Intrinsic::mips_dpsqx_sa_w_ph:
return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH);
+ case Intrinsic::mips_ld_b:
+ case Intrinsic::mips_ld_h:
+ case Intrinsic::mips_ld_w:
+ case Intrinsic::mips_ld_d:
+ return lowerMSALoadIntr(Op, DAG, Intr);
+ }
+}
+
+static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
+ SDLoc DL(Op);
+ SDValue ChainIn = Op->getOperand(0);
+ SDValue Value = Op->getOperand(2);
+ SDValue Address = Op->getOperand(3);
+ SDValue Offset = Op->getOperand(4);
+ EVT PtrTy = Address->getValueType(0);
+
+ Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
+
+ return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false,
+ false, 16);
+}
+
+SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+ switch (Intr) {
+ default:
+ return SDValue();
+ case Intrinsic::mips_st_b:
+ case Intrinsic::mips_st_h:
+ case Intrinsic::mips_st_w:
+ case Intrinsic::mips_st_d:
+ return lowerMSAStoreIntr(Op, DAG, Intr);
}
}
+/// \brief Check if the given BuildVectorSDNode is a splat.
+/// This method currently relies on DAG nodes being reused when equivalent,
+/// so it's possible for this to return false even when isConstantSplat returns
+/// true.
+static bool isSplatVector(const BuildVectorSDNode *N) {
+ unsigned int nOps = N->getNumOperands();
+ assert(nOps > 1 && "isSplatVector has 0 or 1 sized build vector");
+
+ SDValue Operand0 = N->getOperand(0);
+
+ for (unsigned int i = 1; i < nOps; ++i) {
+ if (N->getOperand(i) != Operand0)
+ return false;
+ }
+
+ return true;
+}
+
+// Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT.
+//
+// The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We
+// choose to sign-extend but we could have equally chosen zero-extend. The
+// DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT
+// result into this node later (possibly changing it to a zero-extend in the
+// process).
+SDValue MipsSETargetLowering::
+lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT ResTy = Op->getValueType(0);
+ SDValue Op0 = Op->getOperand(0);
+ EVT VecTy = Op0->getValueType(0);
+
+ if (!VecTy.is128BitVector())
+ return SDValue();
+
+ if (ResTy.isInteger()) {
+ SDValue Op1 = Op->getOperand(1);
+ EVT EltTy = VecTy.getVectorElementType();
+ return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1,
+ DAG.getValueType(EltTy));
+ }
+
+ return Op;
+}
+
+static bool isConstantOrUndef(const SDValue Op) {
+ if (Op->getOpcode() == ISD::UNDEF)
+ return true;
+ if (dyn_cast<ConstantSDNode>(Op))
+ return true;
+ if (dyn_cast<ConstantFPSDNode>(Op))
+ return true;
+ return false;
+}
+
+static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) {
+ for (unsigned i = 0; i < Op->getNumOperands(); ++i)
+ if (isConstantOrUndef(Op->getOperand(i)))
+ return true;
+ return false;
+}
+
+// Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the
+// backend.
+//
+// Lowers according to the following rules:
+// - Constant splats are legal as-is as long as the SplatBitSize is a power of
+// 2 less than or equal to 64 and the value fits into a signed 10-bit
+// immediate
+// - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize
+// is a power of 2 less than or equal to 64 and the value does not fit into a
+// signed 10-bit immediate
+// - Non-constant splats are legal as-is.
+// - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT.
+// - All others are illegal and must be expanded.
+SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
+ EVT ResTy = Op->getValueType(0);
+ SDLoc DL(Op);
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ if (!Subtarget->hasMSA() || !ResTy.is128BitVector())
+ return SDValue();
+
+ if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs, 8,
+ !Subtarget->isLittle()) && SplatBitSize <= 64) {
+ // We can only cope with 8, 16, 32, or 64-bit elements
+ if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
+ SplatBitSize != 64)
+ return SDValue();
+
+ // If the value fits into a simm10 then we can use ldi.[bhwd]
+ // However, if it isn't an integer type we will have to bitcast from an
+ // integer type first. Also, if there are any undefs, we must lower them
+ // to defined values first.
+ if (ResTy.isInteger() && !HasAnyUndefs && SplatValue.isSignedIntN(10))
+ return Op;
+
+ EVT ViaVecTy;
+
+ switch (SplatBitSize) {
+ default:
+ return SDValue();
+ case 8:
+ ViaVecTy = MVT::v16i8;
+ break;
+ case 16:
+ ViaVecTy = MVT::v8i16;
+ break;
+ case 32:
+ ViaVecTy = MVT::v4i32;
+ break;
+ case 64:
+ // There's no fill.d to fall back on for 64-bit values
+ return SDValue();
+ }
+
+ // SelectionDAG::getConstant will promote SplatValue appropriately.
+ SDValue Result = DAG.getConstant(SplatValue, ViaVecTy);
+
+ // Bitcast to the type we originally wanted
+ if (ViaVecTy != ResTy)
+ Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
+
+ return Result;
+ } else if (isSplatVector(Node))
+ return Op;
+ else if (!isConstantOrUndefBUILD_VECTOR(Node)) {
+ // Use INSERT_VECTOR_ELT operations rather than expand to stores.
+ // The resulting code is the same length as the expansion, but it doesn't
+ // use memory operations
+ EVT ResTy = Node->getValueType(0);
+
+ assert(ResTy.isVector());
+
+ unsigned NumElts = ResTy.getVectorNumElements();
+ SDValue Vector = DAG.getUNDEF(ResTy);
+ for (unsigned i = 0; i < NumElts; ++i) {
+ Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
+ Node->getOperand(i),
+ DAG.getConstant(i, MVT::i32));
+ }
+ return Vector;
+ }
+
+ return SDValue();
+}
+
+// Lower VECTOR_SHUFFLE into SHF (if possible).
+//
+// SHF splits the vector into blocks of four elements, then shuffles these
+// elements according to a <4 x i2> constant (encoded as an integer immediate).
+//
+// It is therefore possible to lower into SHF when the mask takes the form:
+// <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
+// When undef's appear they are treated as if they were whatever value is
+// necessary in order to fit the above form.
+//
+// For example:
+// %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
+// <8 x i32> <i32 3, i32 2, i32 1, i32 0,
+// i32 7, i32 6, i32 5, i32 4>
+// is lowered to:
+// (SHF_H $w0, $w1, 27)
+// where the 27 comes from:
+// 3 + (2 << 2) + (1 << 4) + (0 << 6)
+static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ int SHFIndices[4] = { -1, -1, -1, -1 };
+
+ if (Indices.size() < 4)
+ return SDValue();
+
+ for (unsigned i = 0; i < 4; ++i) {
+ for (unsigned j = i; j < Indices.size(); j += 4) {
+ int Idx = Indices[j];
+
+ // Convert from vector index to 4-element subvector index
+ // If an index refers to an element outside of the subvector then give up
+ if (Idx != -1) {
+ Idx -= 4 * (j / 4);
+ if (Idx < 0 || Idx >= 4)
+ return SDValue();
+ }
+
+ // If the mask has an undef, replace it with the current index.
+ // Note that it might still be undef if the current index is also undef
+ if (SHFIndices[i] == -1)
+ SHFIndices[i] = Idx;
+
+ // Check that non-undef values are the same as in the mask. If they
+ // aren't then give up
+ if (!(Idx == -1 || Idx == SHFIndices[i]))
+ return SDValue();
+ }
+ }
+
+ // Calculate the immediate. Replace any remaining undefs with zero
+ APInt Imm(32, 0);
+ for (int i = 3; i >= 0; --i) {
+ int Idx = SHFIndices[i];
+
+ if (Idx == -1)
+ Idx = 0;
+
+ Imm <<= 2;
+ Imm |= Idx & 0x3;
+ }
+
+ return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
+ DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
+}
+
+// Lower VECTOR_SHUFFLE into ILVEV (if possible).
+//
+// ILVEV interleaves the even elements from each vector.
+//
+// It is possible to lower into ILVEV when the mask takes the form:
+// <0, n, 2, n+2, 4, n+4, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int WsIdx = 0;
+ int WtIdx = ResTy.getVectorNumElements();
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx += 2;
+ WtIdx += 2;
+ }
+
+ return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into ILVOD (if possible).
+//
+// ILVOD interleaves the odd elements from each vector.
+//
+// It is possible to lower into ILVOD when the mask takes the form:
+// <1, n+1, 3, n+3, 5, n+5, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int WsIdx = 1;
+ int WtIdx = ResTy.getVectorNumElements() + 1;
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx += 2;
+ WtIdx += 2;
+ }
+
+ return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into ILVL (if possible).
+//
+// ILVL interleaves consecutive elements from the left half of each vector.
+//
+// It is possible to lower into ILVL when the mask takes the form:
+// <0, n, 1, n+1, 2, n+2, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int WsIdx = 0;
+ int WtIdx = ResTy.getVectorNumElements();
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx ++;
+ WtIdx ++;
+ }
+
+ return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into ILVR (if possible).
+//
+// ILVR interleaves consecutive elements from the right half of each vector.
+//
+// It is possible to lower into ILVR when the mask takes the form:
+// <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
+// where n is the number of elements in the vector and x is half n.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ unsigned NumElts = ResTy.getVectorNumElements();
+ int WsIdx = NumElts / 2;
+ int WtIdx = NumElts + NumElts / 2;
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx ++;
+ WtIdx ++;
+ }
+
+ return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into PCKEV (if possible).
+//
+// PCKEV copies the even elements of each vector into the result vector.
+//
+// It is possible to lower into PCKEV when the mask takes the form:
+// <0, 2, 4, ..., n, n+2, n+4, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int Idx = 0;
+
+ for (unsigned i = 0; i < Indices.size(); ++i) {
+ if (Indices[i] != -1 && Indices[i] != Idx)
+ return SDValue();
+ Idx += 2;
+ }
+
+ return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into PCKOD (if possible).
+//
+// PCKOD copies the odd elements of each vector into the result vector.
+//
+// It is possible to lower into PCKOD when the mask takes the form:
+// <1, 3, 5, ..., n+1, n+3, n+5, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int Idx = 1;
+
+ for (unsigned i = 0; i < Indices.size(); ++i) {
+ if (Indices[i] != -1 && Indices[i] != Idx)
+ return SDValue();
+ Idx += 2;
+ }
+
+ return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into VSHF.
+//
+// This mostly consists of converting the shuffle indices in Indices into a
+// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is
+// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example,
+// if the type is v8i16 and all the indices are less than 8 then the second
+// operand is unused and can be replaced with anything. We choose to replace it
+// with the used operand since this reduces the number of instructions overall.
+static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ SmallVector<SDValue, 16> Ops;
+ SDValue Op0;
+ SDValue Op1;
+ EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger();
+ EVT MaskEltTy = MaskVecTy.getVectorElementType();
+ bool Using1stVec = false;
+ bool Using2ndVec = false;
+ SDLoc DL(Op);
+ int ResTyNumElts = ResTy.getVectorNumElements();
+
+ for (int i = 0; i < ResTyNumElts; ++i) {
+ // Idx == -1 means UNDEF
+ int Idx = Indices[i];
+
+ if (0 <= Idx && Idx < ResTyNumElts)
+ Using1stVec = true;
+ if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2)
+ Using2ndVec = true;
+ }
+
+ for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
+ ++I)
+ Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
+
+ SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
+ Ops.size());
+
+ if (Using1stVec && Using2ndVec) {
+ Op0 = Op->getOperand(0);
+ Op1 = Op->getOperand(1);
+ } else if (Using1stVec)
+ Op0 = Op1 = Op->getOperand(0);
+ else if (Using2ndVec)
+ Op0 = Op1 = Op->getOperand(1);
+ else
+ llvm_unreachable("shuffle vector mask references neither vector operand?");
+
+ return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
+}
+
+// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
+// indices in the shuffle.
+SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
+ SelectionDAG &DAG) const {
+ ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op);
+ EVT ResTy = Op->getValueType(0);
+
+ if (!ResTy.is128BitVector())
+ return SDValue();
+
+ int ResTyNumElts = ResTy.getVectorNumElements();
+ SmallVector<int, 16> Indices;
+
+ for (int i = 0; i < ResTyNumElts; ++i)
+ Indices.push_back(Node->getMaskElt(i));
+
+ SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
+}
+
MachineBasicBlock * MipsSETargetLowering::
emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
// $bb:
@@ -701,7 +2613,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- const TargetRegisterClass *RC = &Mips::CPURegsRegClass;
+ const TargetRegisterClass *RC = &Mips::GPR32RegClass;
DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
@@ -746,3 +2658,318 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
MI->eraseFromParent(); // The pseudo instruction is gone now.
return Sink;
}
+
+MachineBasicBlock * MipsSETargetLowering::
+emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
+ unsigned BranchOp) const{
+ // $bb:
+ // vany_nonzero $rd, $ws
+ // =>
+ // $bb:
+ // bnz.b $ws, $tbb
+ // b $fbb
+ // $fbb:
+ // li $rd1, 0
+ // b $sink
+ // $tbb:
+ // li $rd2, 1
+ // $sink:
+ // $rd = phi($rd1, $fbb, $rd2, $tbb)
+
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetRegisterClass *RC = &Mips::GPR32RegClass;
+ DebugLoc DL = MI->getDebugLoc();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, FBB);
+ F->insert(It, TBB);
+ F->insert(It, Sink);
+
+ // Transfer the remainder of BB and its successor edges to Sink.
+ Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ Sink->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Add successors.
+ BB->addSuccessor(FBB);
+ BB->addSuccessor(TBB);
+ FBB->addSuccessor(Sink);
+ TBB->addSuccessor(Sink);
+
+ // Insert the real bnz.b instruction to $BB.
+ BuildMI(BB, DL, TII->get(BranchOp))
+ .addReg(MI->getOperand(1).getReg())
+ .addMBB(TBB);
+
+ // Fill $FBB.
+ unsigned RD1 = RegInfo.createVirtualRegister(RC);
+ BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1)
+ .addReg(Mips::ZERO).addImm(0);
+ BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
+
+ // Fill $TBB.
+ unsigned RD2 = RegInfo.createVirtualRegister(RC);
+ BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2)
+ .addReg(Mips::ZERO).addImm(1);
+
+ // Insert phi function to $Sink.
+ BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
+ MI->getOperand(0).getReg())
+ .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return Sink;
+}
+
+// Emit the COPY_FW pseudo instruction.
+//
+// copy_fw_pseudo $fd, $ws, n
+// =>
+// copy_u_w $rt, $ws, $n
+// mtc1 $rt, $fd
+//
+// When n is zero, the equivalent operation can be performed with (potentially)
+// zero instructions due to register overlaps. This optimization is never valid
+// for lane 1 because it would require FR=0 mode which isn't supported by MSA.
+MachineBasicBlock * MipsSETargetLowering::
+emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Fd = MI->getOperand(0).getReg();
+ unsigned Ws = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm();
+
+ if (Lane == 0)
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_lo);
+ else {
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
+ }
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the COPY_FD pseudo instruction.
+//
+// copy_fd_pseudo $fd, $ws, n
+// =>
+// splati.d $wt, $ws, $n
+// copy $fd, $wt:sub_64
+//
+// When n is zero, the equivalent operation can be performed with (potentially)
+// zero instructions due to register overlaps. This optimization is always
+// valid because FR=1 mode which is the only supported mode in MSA.
+MachineBasicBlock * MipsSETargetLowering::
+emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
+ assert(Subtarget->isFP64bit());
+
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ unsigned Fd = MI->getOperand(0).getReg();
+ unsigned Ws = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm() * 2;
+ DebugLoc DL = MI->getDebugLoc();
+
+ if (Lane == 0)
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64);
+ else {
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64);
+ }
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the INSERT_FW pseudo instruction.
+//
+// insert_fw_pseudo $wd, $wd_in, $n, $fs
+// =>
+// subreg_to_reg $wt:sub_lo, $fs
+// insve_w $wd[$n], $wd_in, $wt[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Wd_in = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm();
+ unsigned Fs = MI->getOperand(3).getReg();
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
+ .addImm(0)
+ .addReg(Fs)
+ .addImm(Mips::sub_lo);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_W), Wd)
+ .addReg(Wd_in)
+ .addImm(Lane)
+ .addReg(Wt);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the INSERT_FD pseudo instruction.
+//
+// insert_fd_pseudo $wd, $fs, n
+// =>
+// subreg_to_reg $wt:sub_64, $fs
+// insve_d $wd[$n], $wd_in, $wt[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ assert(Subtarget->isFP64bit());
+
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Wd_in = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm();
+ unsigned Fs = MI->getOperand(3).getReg();
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
+ .addImm(0)
+ .addReg(Fs)
+ .addImm(Mips::sub_64);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_D), Wd)
+ .addReg(Wd_in)
+ .addImm(Lane)
+ .addReg(Wt);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FILL_FW pseudo instruction.
+//
+// fill_fw_pseudo $wd, $fs
+// =>
+// implicit_def $wt1
+// insert_subreg $wt2:subreg_lo, $wt1, $fs
+// splati.w $wd, $wt2[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Fs = MI->getOperand(1).getReg();
+ unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2)
+ .addReg(Wt1)
+ .addReg(Fs)
+ .addImm(Mips::sub_lo);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wd).addReg(Wt2).addImm(0);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FILL_FD pseudo instruction.
+//
+// fill_fd_pseudo $wd, $fs
+// =>
+// implicit_def $wt1
+// insert_subreg $wt2:subreg_64, $wt1, $fs
+// splati.d $wd, $wt2[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ assert(Subtarget->isFP64bit());
+
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Fs = MI->getOperand(1).getReg();
+ unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+ unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2)
+ .addReg(Wt1)
+ .addReg(Fs)
+ .addImm(Mips::sub_64);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wd).addReg(Wt2).addImm(0);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FEXP2_W_1 pseudo instructions.
+//
+// fexp2_w_1_pseudo $wd, $wt
+// =>
+// ldi.w $ws, 1
+// fexp2.w $wd, $ws, $wt
+MachineBasicBlock *
+MipsSETargetLowering::emitFEXP2_W_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ const TargetRegisterClass *RC = &Mips::MSA128WRegClass;
+ unsigned Ws1 = RegInfo.createVirtualRegister(RC);
+ unsigned Ws2 = RegInfo.createVirtualRegister(RC);
+ DebugLoc DL = MI->getDebugLoc();
+
+ // Splat 1.0 into a vector
+ BuildMI(*BB, MI, DL, TII->get(Mips::LDI_W), Ws1).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_W), Ws2).addReg(Ws1);
+
+ // Emit 1.0 * fexp2(Wt)
+ BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_W), MI->getOperand(0).getReg())
+ .addReg(Ws2)
+ .addReg(MI->getOperand(1).getReg());
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FEXP2_D_1 pseudo instructions.
+//
+// fexp2_d_1_pseudo $wd, $wt
+// =>
+// ldi.d $ws, 1
+// fexp2.d $wd, $ws, $wt
+MachineBasicBlock *
+MipsSETargetLowering::emitFEXP2_D_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ const TargetRegisterClass *RC = &Mips::MSA128DRegClass;
+ unsigned Ws1 = RegInfo.createVirtualRegister(RC);
+ unsigned Ws2 = RegInfo.createVirtualRegister(RC);
+ DebugLoc DL = MI->getDebugLoc();
+
+ // Splat 1.0 into a vector
+ BuildMI(*BB, MI, DL, TII->get(Mips::LDI_D), Ws1).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_D), Ws2).addReg(Ws1);
+
+ // Emit 1.0 * fexp2(Wt)
+ BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_D), MI->getOperand(0).getReg())
+ .addReg(Ws2)
+ .addReg(MI->getOperand(1).getReg());
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
diff --git a/lib/Target/Mips/MipsSEISelLowering.h b/lib/Target/Mips/MipsSEISelLowering.h
index ec8a5c73f1a4..c5210d94b34d 100644
--- a/lib/Target/Mips/MipsSEISelLowering.h
+++ b/lib/Target/Mips/MipsSEISelLowering.h
@@ -22,6 +22,14 @@ namespace llvm {
public:
explicit MipsSETargetLowering(MipsTargetMachine &TM);
+ /// \brief Enable MSA support for the given integer type and Register
+ /// class.
+ void addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC);
+ /// \brief Enable MSA support for the given floating-point type and
+ /// Register class.
+ void addMSAFloatType(MVT::SimpleValueType Ty,
+ const TargetRegisterClass *RC);
+
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@@ -38,8 +46,8 @@ namespace llvm {
virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
if (VT == MVT::Untyped)
- return Subtarget->hasDSP() ? &Mips::ACRegsDSPRegClass :
- &Mips::ACRegsRegClass;
+ return Subtarget->hasDSP() ? &Mips::ACC64DSPRegClass :
+ &Mips::ACC64RegClass;
return TargetLowering::getRepRegClassFor(VT);
}
@@ -56,14 +64,50 @@ namespace llvm {
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const;
+ SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+
SDValue lowerMulDiv(SDValue Op, unsigned NewOpc, bool HasLo, bool HasHi,
SelectionDAG &DAG) const;
SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ /// \brief Lower VECTOR_SHUFFLE into one of a number of instructions
+ /// depending on the indices in the shuffle.
+ SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
MachineBasicBlock *emitBPOSGE32(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitMSACBranchPseudo(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned BranchOp) const;
+ /// \brief Emit the COPY_FW pseudo instruction
+ MachineBasicBlock *emitCOPY_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the COPY_FD pseudo instruction
+ MachineBasicBlock *emitCOPY_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the INSERT_FW pseudo instruction
+ MachineBasicBlock *emitINSERT_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the INSERT_FD pseudo instruction
+ MachineBasicBlock *emitINSERT_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FILL_FW pseudo instruction
+ MachineBasicBlock *emitFILL_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FILL_FD pseudo instruction
+ MachineBasicBlock *emitFILL_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FEXP2_W_1 pseudo instructions.
+ MachineBasicBlock *emitFEXP2_W_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FEXP2_D_1 pseudo instructions.
+ MachineBasicBlock *emitFEXP2_D_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
};
}
diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp
index a0768e51c079..02931a3e39ee 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
@@ -26,7 +27,7 @@ using namespace llvm;
MipsSEInstrInfo::MipsSEInstrInfo(MipsTargetMachine &tm)
: MipsInstrInfo(tm,
tm.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J),
- RI(*tm.getSubtargetImpl(), *this),
+ RI(*tm.getSubtargetImpl()),
IsN64(tm.getSubtarget<MipsSubtarget>().isABI_N64()) {}
const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
@@ -43,10 +44,8 @@ isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
{
unsigned Opc = MI->getOpcode();
- if ((Opc == Mips::LW) || (Opc == Mips::LW_P8) || (Opc == Mips::LD) ||
- (Opc == Mips::LD_P8) || (Opc == Mips::LWC1) || (Opc == Mips::LWC1_P8) ||
- (Opc == Mips::LDC1) || (Opc == Mips::LDC164) ||
- (Opc == Mips::LDC164_P8)) {
+ if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
+ (Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
@@ -68,10 +67,8 @@ isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
{
unsigned Opc = MI->getOpcode();
- if ((Opc == Mips::SW) || (Opc == Mips::SW_P8) || (Opc == Mips::SD) ||
- (Opc == Mips::SD_P8) || (Opc == Mips::SWC1) || (Opc == Mips::SWC1_P8) ||
- (Opc == Mips::SDC1) || (Opc == Mips::SDC164) ||
- (Opc == Mips::SDC164_P8)) {
+ if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
+ (Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
@@ -88,39 +85,41 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
unsigned Opc = 0, ZeroReg = 0;
- if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg.
- if (Mips::CPURegsRegClass.contains(SrcReg))
- Opc = Mips::OR, ZeroReg = Mips::ZERO;
+ if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg.
+ if (Mips::GPR32RegClass.contains(SrcReg))
+ Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
else if (Mips::CCRRegClass.contains(SrcReg))
Opc = Mips::CFC1;
else if (Mips::FGR32RegClass.contains(SrcReg))
Opc = Mips::MFC1;
- else if (Mips::HIRegsRegClass.contains(SrcReg))
+ else if (Mips::HI32RegClass.contains(SrcReg))
Opc = Mips::MFHI, SrcReg = 0;
- else if (Mips::LORegsRegClass.contains(SrcReg))
+ else if (Mips::LO32RegClass.contains(SrcReg))
Opc = Mips::MFLO, SrcReg = 0;
- else if (Mips::HIRegsDSPRegClass.contains(SrcReg))
+ else if (Mips::HI32DSPRegClass.contains(SrcReg))
Opc = Mips::MFHI_DSP;
- else if (Mips::LORegsDSPRegClass.contains(SrcReg))
+ else if (Mips::LO32DSPRegClass.contains(SrcReg))
Opc = Mips::MFLO_DSP;
else if (Mips::DSPCCRegClass.contains(SrcReg)) {
BuildMI(MBB, I, DL, get(Mips::RDDSP), DestReg).addImm(1 << 4)
.addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
return;
}
+ else if (Mips::MSACtrlRegClass.contains(SrcReg))
+ Opc = Mips::CFCMSA;
}
- else if (Mips::CPURegsRegClass.contains(SrcReg)) { // Copy from CPU Reg.
+ else if (Mips::GPR32RegClass.contains(SrcReg)) { // Copy from CPU Reg.
if (Mips::CCRRegClass.contains(DestReg))
Opc = Mips::CTC1;
else if (Mips::FGR32RegClass.contains(DestReg))
Opc = Mips::MTC1;
- else if (Mips::HIRegsRegClass.contains(DestReg))
+ else if (Mips::HI32RegClass.contains(DestReg))
Opc = Mips::MTHI, DestReg = 0;
- else if (Mips::LORegsRegClass.contains(DestReg))
+ else if (Mips::LO32RegClass.contains(DestReg))
Opc = Mips::MTLO, DestReg = 0;
- else if (Mips::HIRegsDSPRegClass.contains(DestReg))
+ else if (Mips::HI32DSPRegClass.contains(DestReg))
Opc = Mips::MTHI_DSP;
- else if (Mips::LORegsDSPRegClass.contains(DestReg))
+ else if (Mips::LO32DSPRegClass.contains(DestReg))
Opc = Mips::MTLO_DSP;
else if (Mips::DSPCCRegClass.contains(DestReg)) {
BuildMI(MBB, I, DL, get(Mips::WRDSP))
@@ -128,6 +127,8 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addReg(DestReg, RegState::ImplicitDefine);
return;
}
+ else if (Mips::MSACtrlRegClass.contains(DestReg))
+ Opc = Mips::CTCMSA;
}
else if (Mips::FGR32RegClass.contains(DestReg, SrcReg))
Opc = Mips::FMOV_S;
@@ -135,26 +136,28 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Opc = Mips::FMOV_D32;
else if (Mips::FGR64RegClass.contains(DestReg, SrcReg))
Opc = Mips::FMOV_D64;
- else if (Mips::CCRRegClass.contains(DestReg, SrcReg))
- Opc = Mips::MOVCCRToCCR;
- else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg.
- if (Mips::CPU64RegsRegClass.contains(SrcReg))
- Opc = Mips::OR64, ZeroReg = Mips::ZERO_64;
- else if (Mips::HIRegs64RegClass.contains(SrcReg))
+ else if (Mips::GPR64RegClass.contains(DestReg)) { // Copy to CPU64 Reg.
+ if (Mips::GPR64RegClass.contains(SrcReg))
+ Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64;
+ else if (Mips::HI64RegClass.contains(SrcReg))
Opc = Mips::MFHI64, SrcReg = 0;
- else if (Mips::LORegs64RegClass.contains(SrcReg))
+ else if (Mips::LO64RegClass.contains(SrcReg))
Opc = Mips::MFLO64, SrcReg = 0;
else if (Mips::FGR64RegClass.contains(SrcReg))
Opc = Mips::DMFC1;
}
- else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
- if (Mips::HIRegs64RegClass.contains(DestReg))
+ else if (Mips::GPR64RegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
+ if (Mips::HI64RegClass.contains(DestReg))
Opc = Mips::MTHI64, DestReg = 0;
- else if (Mips::LORegs64RegClass.contains(DestReg))
+ else if (Mips::LO64RegClass.contains(DestReg))
Opc = Mips::MTLO64, DestReg = 0;
else if (Mips::FGR64RegClass.contains(DestReg))
Opc = Mips::DMTC1;
}
+ else if (Mips::MSA128BRegClass.contains(DestReg)) { // Copy to MSA reg
+ if (Mips::MSA128BRegClass.contains(SrcReg))
+ Opc = Mips::MOVE_V;
+ }
assert(Opc && "Cannot copy registers");
@@ -181,24 +184,32 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc = 0;
- if (Mips::CPURegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SW_P8 : Mips::SW;
- else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SD_P8 : Mips::SD;
- else if (Mips::ACRegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_AC64_P8 : Mips::STORE_AC64;
- else if (Mips::ACRegsDSPRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_AC_DSP_P8 : Mips::STORE_AC_DSP;
- else if (Mips::ACRegs128RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_AC128_P8 : Mips::STORE_AC128;
+ if (Mips::GPR32RegClass.hasSubClassEq(RC))
+ Opc = Mips::SW;
+ else if (Mips::GPR64RegClass.hasSubClassEq(RC))
+ Opc = Mips::SD;
+ else if (Mips::ACC64RegClass.hasSubClassEq(RC))
+ Opc = Mips::STORE_ACC64;
+ else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
+ Opc = Mips::STORE_ACC64DSP;
+ else if (Mips::ACC128RegClass.hasSubClassEq(RC))
+ Opc = Mips::STORE_ACC128;
else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_CCOND_DSP_P8 : Mips::STORE_CCOND_DSP;
+ Opc = Mips::STORE_CCOND_DSP;
else if (Mips::FGR32RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1;
+ Opc = Mips::SWC1;
else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
Opc = Mips::SDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SDC164_P8 : Mips::SDC164;
+ Opc = Mips::SDC164;
+ else if (RC->hasType(MVT::v16i8))
+ Opc = Mips::ST_B;
+ else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16))
+ Opc = Mips::ST_H;
+ else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32))
+ Opc = Mips::ST_W;
+ else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
+ Opc = Mips::ST_D;
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
@@ -214,24 +225,32 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
unsigned Opc = 0;
- if (Mips::CPURegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LW_P8 : Mips::LW;
- else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LD_P8 : Mips::LD;
- else if (Mips::ACRegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_AC64_P8 : Mips::LOAD_AC64;
- else if (Mips::ACRegsDSPRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_AC_DSP_P8 : Mips::LOAD_AC_DSP;
- else if (Mips::ACRegs128RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_AC128_P8 : Mips::LOAD_AC128;
+ if (Mips::GPR32RegClass.hasSubClassEq(RC))
+ Opc = Mips::LW;
+ else if (Mips::GPR64RegClass.hasSubClassEq(RC))
+ Opc = Mips::LD;
+ else if (Mips::ACC64RegClass.hasSubClassEq(RC))
+ Opc = Mips::LOAD_ACC64;
+ else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
+ Opc = Mips::LOAD_ACC64DSP;
+ else if (Mips::ACC128RegClass.hasSubClassEq(RC))
+ Opc = Mips::LOAD_ACC128;
else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_CCOND_DSP_P8 : Mips::LOAD_CCOND_DSP;
+ Opc = Mips::LOAD_CCOND_DSP;
else if (Mips::FGR32RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1;
+ Opc = Mips::LWC1;
else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
Opc = Mips::LDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164;
+ Opc = Mips::LDC164;
+ else if (RC->hasType(MVT::v16i8))
+ Opc = Mips::LD_B;
+ else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16))
+ Opc = Mips::LD_H;
+ else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32))
+ Opc = Mips::LD_W;
+ else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
+ Opc = Mips::LD_D;
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset)
@@ -245,17 +264,59 @@ bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
default:
return false;
case Mips::RetRA:
- ExpandRetRA(MBB, MI, Mips::RET);
+ expandRetRA(MBB, MI, Mips::RET);
+ break;
+ case Mips::PseudoMFHI:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFHI);
+ break;
+ case Mips::PseudoMFLO:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFLO);
+ break;
+ case Mips::PseudoMFHI64:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFHI64);
+ break;
+ case Mips::PseudoMFLO64:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFLO64);
+ break;
+ case Mips::PseudoMTLOHI:
+ expandPseudoMTLoHi(MBB, MI, Mips::MTLO, Mips::MTHI, false);
+ break;
+ case Mips::PseudoMTLOHI64:
+ expandPseudoMTLoHi(MBB, MI, Mips::MTLO64, Mips::MTHI64, false);
+ break;
+ case Mips::PseudoMTLOHI_DSP:
+ expandPseudoMTLoHi(MBB, MI, Mips::MTLO_DSP, Mips::MTHI_DSP, true);
+ break;
+ case Mips::PseudoCVT_S_W:
+ expandCvtFPInt(MBB, MI, Mips::CVT_S_W, Mips::MTC1, false);
+ break;
+ case Mips::PseudoCVT_D32_W:
+ expandCvtFPInt(MBB, MI, Mips::CVT_D32_W, Mips::MTC1, false);
+ break;
+ case Mips::PseudoCVT_S_L:
+ expandCvtFPInt(MBB, MI, Mips::CVT_S_L, Mips::DMTC1, true);
+ break;
+ case Mips::PseudoCVT_D64_W:
+ expandCvtFPInt(MBB, MI, Mips::CVT_D64_W, Mips::MTC1, true);
+ break;
+ case Mips::PseudoCVT_D64_L:
+ expandCvtFPInt(MBB, MI, Mips::CVT_D64_L, Mips::DMTC1, true);
break;
case Mips::BuildPairF64:
- ExpandBuildPairF64(MBB, MI);
+ expandBuildPairF64(MBB, MI, false);
+ break;
+ case Mips::BuildPairF64_64:
+ expandBuildPairF64(MBB, MI, true);
break;
case Mips::ExtractElementF64:
- ExpandExtractElementF64(MBB, MI);
+ expandExtractElementF64(MBB, MI, false);
+ break;
+ case Mips::ExtractElementF64_64:
+ expandExtractElementF64(MBB, MI, true);
break;
case Mips::MIPSeh_return32:
case Mips::MIPSeh_return64:
- ExpandEhReturn(MBB, MI);
+ expandEhReturn(MBB, MI);
break;
}
@@ -263,9 +324,9 @@ bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return true;
}
-/// GetOppositeBranchOpc - Return the inverse of the specified
+/// getOppositeBranchOpc - Return the inverse of the specified
/// opcode, e.g. turning BEQ to BNE.
-unsigned MipsSEInstrInfo::GetOppositeBranchOpc(unsigned Opc) const {
+unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const {
switch (Opc) {
default: llvm_unreachable("Illegal opcode!");
case Mips::BEQ: return Mips::BNE;
@@ -315,7 +376,7 @@ MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
bool LastInstrIsADDiu = NewImm;
const MipsAnalyzeImmediate::InstSeq &Seq =
@@ -346,7 +407,7 @@ MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
return Reg;
}
-unsigned MipsSEInstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
+unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ ||
Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ ||
Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
@@ -356,51 +417,134 @@ unsigned MipsSEInstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
Opc : 0;
}
-void MipsSEInstrInfo::ExpandRetRA(MachineBasicBlock &MBB,
+void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned Opc) const {
BuildMI(MBB, I, I->getDebugLoc(), get(Opc)).addReg(Mips::RA);
}
-void MipsSEInstrInfo::ExpandExtractElementF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+std::pair<bool, bool>
+MipsSEInstrInfo::compareOpndSize(unsigned Opc,
+ const MachineFunction &MF) const {
+ const MCInstrDesc &Desc = get(Opc);
+ assert(Desc.NumOperands == 2 && "Unary instruction expected.");
+ const MipsRegisterInfo *RI = &getRegisterInfo();
+ unsigned DstRegSize = getRegClass(Desc, 0, RI, MF)->getSize();
+ unsigned SrcRegSize = getRegClass(Desc, 1, RI, MF)->getSize();
+
+ return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
+}
+
+void MipsSEInstrInfo::expandPseudoMFHiLo(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned NewOpc) const {
+ BuildMI(MBB, I, I->getDebugLoc(), get(NewOpc), I->getOperand(0).getReg());
+}
+
+void MipsSEInstrInfo::expandPseudoMTLoHi(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned LoOpc,
+ unsigned HiOpc,
+ bool HasExplicitDef) const {
+ // Expand
+ // lo_hi pseudomtlohi $gpr0, $gpr1
+ // to these two instructions:
+ // mtlo $gpr0
+ // mthi $gpr1
+
+ DebugLoc DL = I->getDebugLoc();
+ const MachineOperand &SrcLo = I->getOperand(1), &SrcHi = I->getOperand(2);
+ MachineInstrBuilder LoInst = BuildMI(MBB, I, DL, get(LoOpc));
+ MachineInstrBuilder HiInst = BuildMI(MBB, I, DL, get(HiOpc));
+ LoInst.addReg(SrcLo.getReg(), getKillRegState(SrcLo.isKill()));
+ HiInst.addReg(SrcHi.getReg(), getKillRegState(SrcHi.isKill()));
+
+ // Add lo/hi registers if the mtlo/hi instructions created have explicit
+ // def registers.
+ if (HasExplicitDef) {
+ unsigned DstReg = I->getOperand(0).getReg();
+ unsigned DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
+ unsigned DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi);
+ LoInst.addReg(DstLo, RegState::Define);
+ HiInst.addReg(DstHi, RegState::Define);
+ }
+}
+
+void MipsSEInstrInfo::expandCvtFPInt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned CvtOpc, unsigned MovOpc,
+ bool IsI64) const {
+ const MCInstrDesc &CvtDesc = get(CvtOpc), &MovDesc = get(MovOpc);
+ const MachineOperand &Dst = I->getOperand(0), &Src = I->getOperand(1);
+ unsigned DstReg = Dst.getReg(), SrcReg = Src.getReg(), TmpReg = DstReg;
+ unsigned KillSrc = getKillRegState(Src.isKill());
+ DebugLoc DL = I->getDebugLoc();
+ bool DstIsLarger, SrcIsLarger;
+
+ tie(DstIsLarger, SrcIsLarger) = compareOpndSize(CvtOpc, *MBB.getParent());
+
+ if (DstIsLarger)
+ TmpReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
+
+ if (SrcIsLarger)
+ DstReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
+
+ BuildMI(MBB, I, DL, MovDesc, TmpReg).addReg(SrcReg, KillSrc);
+ BuildMI(MBB, I, DL, CvtDesc, DstReg).addReg(TmpReg, RegState::Kill);
+}
+
+void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ bool FP64) const {
unsigned DstReg = I->getOperand(0).getReg();
unsigned SrcReg = I->getOperand(1).getReg();
unsigned N = I->getOperand(2).getImm();
- const MCInstrDesc& Mfc1Tdd = get(Mips::MFC1);
DebugLoc dl = I->getDebugLoc();
assert(N < 2 && "Invalid immediate");
- unsigned SubIdx = N ? Mips::sub_fpodd : Mips::sub_fpeven;
+ unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo;
unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
- BuildMI(MBB, I, dl, Mfc1Tdd, DstReg).addReg(SubReg);
+ if (SubIdx == Mips::sub_hi && FP64)
+ BuildMI(MBB, I, dl, get(Mips::MFHC1), DstReg).addReg(SubReg);
+ else
+ BuildMI(MBB, I, dl, get(Mips::MFC1), DstReg).addReg(SubReg);
}
-void MipsSEInstrInfo::ExpandBuildPairF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ bool FP64) const {
unsigned DstReg = I->getOperand(0).getReg();
unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1);
DebugLoc dl = I->getDebugLoc();
const TargetRegisterInfo &TRI = getRegisterInfo();
- // mtc1 Lo, $fp
- // mtc1 Hi, $fp + 1
- BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_fpeven))
+ // For FP32 mode:
+ // mtc1 Lo, $fp
+ // mtc1 Hi, $fp + 1
+ // For FP64 mode:
+ // mtc1 Lo, $fp
+ // mthc1 Hi, $fp
+
+ BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_lo))
.addReg(LoReg);
- BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_fpodd))
- .addReg(HiReg);
+
+ if (FP64)
+ BuildMI(MBB, I, dl, get(Mips::MTHC1), TRI.getSubReg(DstReg, Mips::sub_hi))
+ .addReg(HiReg);
+ else
+ BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_hi))
+ .addReg(HiReg);
}
-void MipsSEInstrInfo::ExpandEhReturn(MachineBasicBlock &MBB,
+void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
// This pseudo instruction is generated as part of the lowering of
// ISD::EH_RETURN. We convert it to a stack increment by OffsetReg, and
// indirect jump to TargetReg
const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
unsigned ADDU = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned OR = STI.isABI_N64() ? Mips::OR64 : Mips::OR;
unsigned JR = STI.isABI_N64() ? Mips::JR64 : Mips::JR;
unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
unsigned RA = STI.isABI_N64() ? Mips::RA_64 : Mips::RA;
@@ -409,13 +553,13 @@ void MipsSEInstrInfo::ExpandEhReturn(MachineBasicBlock &MBB,
unsigned OffsetReg = I->getOperand(0).getReg();
unsigned TargetReg = I->getOperand(1).getReg();
- // or $ra, $v0, $zero
+ // addu $ra, $v0, $zero
// addu $sp, $sp, $v1
// jr $ra
if (TM.getRelocationModel() == Reloc::PIC_)
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), T9)
+ BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), T9)
.addReg(TargetReg).addReg(ZERO);
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), RA)
+ BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), RA)
.addReg(TargetReg).addReg(ZERO);
BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), SP)
.addReg(SP).addReg(OffsetReg);
diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h
index 0bf7876f0fe0..6d2dd901f33b 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/lib/Target/Mips/MipsSEInstrInfo.h
@@ -65,7 +65,7 @@ public:
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
- virtual unsigned GetOppositeBranchOpc(unsigned Opc) const;
+ virtual unsigned getOppositeBranchOpc(unsigned Opc) const;
/// Adjust SP by Amount bytes.
void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
@@ -79,15 +79,39 @@ public:
unsigned *NewImm) const;
private:
- virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
+ virtual unsigned getAnalyzableBrOpc(unsigned Opc) const;
- void ExpandRetRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ void expandRetRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc) const;
- void ExpandExtractElementF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
- void ExpandBuildPairF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
- void ExpandEhReturn(MachineBasicBlock &MBB,
+
+ std::pair<bool, bool> compareOpndSize(unsigned Opc,
+ const MachineFunction &MF) const;
+
+ void expandPseudoMFHiLo(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned NewOpc) const;
+
+ void expandPseudoMTLoHi(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned LoOpc, unsigned HiOpc,
+ bool HasExplicitDef) const;
+
+ /// Expand pseudo Int-to-FP conversion instructions.
+ ///
+ /// For example, the following pseudo instruction
+ /// PseudoCVT_D32_W D2, A5
+ /// gets expanded into these two instructions:
+ /// MTC1 F4, A5
+ /// CVT_D32_W D2, F4
+ ///
+ /// We do this expansion post-RA to avoid inserting a floating point copy
+ /// instruction between MTC1 and CVT_D32_W.
+ void expandCvtFPInt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned CvtOpc, unsigned MovOpc, bool IsI64) const;
+
+ void expandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, bool FP64) const;
+ void expandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, bool FP64) const;
+ void expandEhReturn(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
};
diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp
index 96967380b29d..2d440840aaff 100644
--- a/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -40,9 +40,8 @@
using namespace llvm;
-MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST,
- const MipsSEInstrInfo &I)
- : MipsRegisterInfo(ST), TII(I) {}
+MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST)
+ : MipsRegisterInfo(ST) {}
bool MipsSERegisterInfo::
requiresRegisterScavenging(const MachineFunction &MF) const {
@@ -57,10 +56,28 @@ requiresFrameIndexScavenging(const MachineFunction &MF) const {
const TargetRegisterClass *
MipsSERegisterInfo::intRegClass(unsigned Size) const {
if (Size == 4)
- return &Mips::CPURegsRegClass;
+ return &Mips::GPR32RegClass;
assert(Size == 8);
- return &Mips::CPU64RegsRegClass;
+ return &Mips::GPR64RegClass;
+}
+
+/// Determine whether a given opcode is an MSA load/store (supporting 10-bit
+/// offsets) or a non-MSA load/store (supporting 16-bit offsets).
+static inline bool isMSALoadOrStore(const unsigned Opcode) {
+ switch (Opcode) {
+ case Mips::LD_B:
+ case Mips::LD_H:
+ case Mips::LD_W:
+ case Mips::LD_D:
+ case Mips::ST_B:
+ case Mips::ST_H:
+ case Mips::ST_W:
+ case Mips::ST_D:
+ return true;
+ default:
+ return false;
+ }
}
void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
@@ -112,21 +129,49 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
- // If MI is not a debug value, make sure Offset fits in the 16-bit immediate
- // field.
- if (!MI.isDebugValue() && !isInt<16>(Offset)) {
- MachineBasicBlock &MBB = *MI.getParent();
- DebugLoc DL = II->getDebugLoc();
- unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned NewImm;
-
- unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL, &NewImm);
- BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg)
- .addReg(Reg, RegState::Kill);
-
- FrameReg = Reg;
- Offset = SignExtend64<16>(NewImm);
- IsKill = true;
+ if (!MI.isDebugValue()) {
+ // Make sure Offset fits within the field available.
+ // For MSA instructions, this is a 10-bit signed immediate, otherwise it is
+ // a 16-bit signed immediate.
+ unsigned OffsetBitSize = isMSALoadOrStore(MI.getOpcode()) ? 10 : 16;
+
+ if (OffsetBitSize == 10 && !isInt<10>(Offset) && isInt<16>(Offset)) {
+ // If we have an offset that needs to fit into a signed 10-bit immediate
+ // and doesn't, but does fit into 16-bits then use an ADDiu
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = II->getDebugLoc();
+ unsigned ADDiu = Subtarget.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
+ const TargetRegisterClass *RC =
+ Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
+ unsigned Reg = RegInfo.createVirtualRegister(RC);
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo *>(
+ MBB.getParent()->getTarget().getInstrInfo());
+ BuildMI(MBB, II, DL, TII.get(ADDiu), Reg).addReg(FrameReg).addImm(Offset);
+
+ FrameReg = Reg;
+ Offset = 0;
+ IsKill = true;
+ } else if (!isInt<16>(Offset)) {
+ // Otherwise split the offset into 16-bit pieces and add it in multiple
+ // instructions.
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = II->getDebugLoc();
+ unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned NewImm = 0;
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo *>(
+ MBB.getParent()->getTarget().getInstrInfo());
+ unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL,
+ OffsetBitSize == 16 ? &NewImm : NULL);
+ BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg)
+ .addReg(Reg, RegState::Kill);
+
+ FrameReg = Reg;
+ Offset = SignExtend64<16>(NewImm);
+ IsKill = true;
+ }
}
MI.getOperand(OpNo).ChangeToRegister(FrameReg, false, false, IsKill);
diff --git a/lib/Target/Mips/MipsSERegisterInfo.h b/lib/Target/Mips/MipsSERegisterInfo.h
index 2f7c37bb460d..76cdd9d230d3 100644
--- a/lib/Target/Mips/MipsSERegisterInfo.h
+++ b/lib/Target/Mips/MipsSERegisterInfo.h
@@ -21,11 +21,8 @@ namespace llvm {
class MipsSEInstrInfo;
class MipsSERegisterInfo : public MipsRegisterInfo {
- const MipsSEInstrInfo &TII;
-
public:
- MipsSERegisterInfo(const MipsSubtarget &Subtarget,
- const MipsSEInstrInfo &TII);
+ MipsSERegisterInfo(const MipsSubtarget &Subtarget);
bool requiresRegisterScavenging(const MachineFunction &MF) const;
diff --git a/lib/Target/Mips/MipsSchedule.td b/lib/Target/Mips/MipsSchedule.td
index 1add02ff83e9..2779064c4149 100644
--- a/lib/Target/Mips/MipsSchedule.td
+++ b/lib/Target/Mips/MipsSchedule.td
@@ -17,13 +17,18 @@ def IMULDIV : FuncUnit;
// Instruction Itinerary classes used for Mips
//===----------------------------------------------------------------------===//
def IIAlu : InstrItinClass;
+def IIArith : InstrItinClass;
+def IILogic : InstrItinClass;
def IILoad : InstrItinClass;
def IIStore : InstrItinClass;
def IIXfer : InstrItinClass;
def IIBranch : InstrItinClass;
def IIHiLo : InstrItinClass;
def IIImul : InstrItinClass;
+def IIImult : InstrItinClass;
def IIIdiv : InstrItinClass;
+def IIseb : InstrItinClass;
+def IIslt : InstrItinClass;
def IIFcvt : InstrItinClass;
def IIFmove : InstrItinClass;
def IIFcmp : InstrItinClass;
@@ -35,6 +40,9 @@ def IIFdivDouble : InstrItinClass;
def IIFsqrtSingle : InstrItinClass;
def IIFsqrtDouble : InstrItinClass;
def IIFrecipFsqrtStep : InstrItinClass;
+def IIFLoad : InstrItinClass;
+def IIFStore : InstrItinClass;
+def IIFmoveC1 : InstrItinClass;
def IIPseudo : InstrItinClass;
//===----------------------------------------------------------------------===//
@@ -42,6 +50,8 @@ def IIPseudo : InstrItinClass;
//===----------------------------------------------------------------------===//
def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<IIAlu , [InstrStage<1, [ALU]>]>,
+ InstrItinData<IIArith , [InstrStage<1, [ALU]>]>,
+ InstrItinData<IILogic , [InstrStage<1, [ALU]>]>,
InstrItinData<IILoad , [InstrStage<3, [ALU]>]>,
InstrItinData<IIStore , [InstrStage<1, [ALU]>]>,
InstrItinData<IIXfer , [InstrStage<2, [ALU]>]>,
@@ -59,5 +69,8 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<IIFdivDouble , [InstrStage<36, [ALU]>]>,
InstrItinData<IIFsqrtSingle , [InstrStage<54, [ALU]>]>,
InstrItinData<IIFsqrtDouble , [InstrStage<12, [ALU]>]>,
- InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]>
+ InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]>,
+ InstrItinData<IIFLoad , [InstrStage<3, [ALU]>]>,
+ InstrItinData<IIFStore , [InstrStage<1, [ALU]>]>,
+ InstrItinData<IIFmoveC1 , [InstrStage<2, [ALU]>]>
]>;
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 14a2b2779512..0a81072b0858 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -48,6 +48,17 @@ static cl::opt<bool> Mips_Os16(
"floating point as Mips 16"),
cl::Hidden);
+static cl::opt<bool>
+Mips16HardFloat("mips16-hard-float", cl::NotHidden,
+ cl::desc("MIPS: mips16 hard float enable."),
+ cl::init(false));
+
+static cl::opt<bool>
+Mips16ConstantIslands(
+ "mips16-constant-islands", cl::Hidden,
+ cl::desc("MIPS: mips16 constant islands enable. experimental feature"),
+ cl::init(false));
+
void MipsSubtarget::anchor() { }
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
@@ -58,8 +69,9 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
IsLinux(true), HasSEInReg(false), HasCondMov(false), HasSwap(false),
HasBitCount(false), HasFPIdx(false),
- InMips16Mode(false), InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
- AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
+ InMips16Mode(false), InMips16HardFloat(Mips16HardFloat),
+ InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
+ AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16), HasMSA(false),
RM(_RM), OverrideMode(NoOverride), TM(_TM)
{
std::string CPUName = CPU;
@@ -83,12 +95,20 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
(hasMips64() && (isABI_N32() || isABI_N64()))) &&
"Invalid Arch & ABI pair.");
+ if (hasMSA() && !isFP64bit())
+ report_fatal_error("MSA requires a 64-bit FPU register file (FR=1 mode). "
+ "See -mattr=+fp64.",
+ false);
+
// Is the target system Linux ?
if (TT.find("linux") == std::string::npos)
IsLinux = false;
// Set UseSmallSection.
UseSmallSection = !IsLinux && (RM == Reloc::Static);
+ // set some subtarget specific features
+ if (inMips16Mode())
+ HasBitCount=false;
}
bool
@@ -98,7 +118,7 @@ MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel,
Mode = TargetSubtargetInfo::ANTIDEP_NONE;
CriticalPathRCs.clear();
CriticalPathRCs.push_back(hasMips64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass);
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass);
return OptLevel >= CodeGenOpt::Aggressive;
}
@@ -146,3 +166,11 @@ void MipsSubtarget::resetSubtarget(MachineFunction *MF) {
}
}
+bool MipsSubtarget::mipsSEUsesSoftFloat() const {
+ return TM->Options.UseSoftFloat && !InMips16HardFloat;
+}
+
+bool MipsSubtarget::useConstantIslands() {
+ DEBUG(dbgs() << "use constant islands " << Mips16ConstantIslands << "\n");
+ return Mips16ConstantIslands;
+}
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index f2f0e15887e4..6b2ab1238b87 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -93,6 +93,9 @@ protected:
// InMips16 -- can process Mips16 instructions
bool InMips16Mode;
+ // Mips16 hard float
+ bool InMips16HardFloat;
+
// PreviousInMips16 -- the function we just processed was in Mips 16 Mode
bool PreviousInMips16Mode;
@@ -110,6 +113,9 @@ protected:
// compiled as Mips32
bool Os16;
+ // HasMSA -- supports MSA ASE.
+ bool HasMSA;
+
InstrItineraryData InstrItins;
// The instance to the register info section object
@@ -154,6 +160,7 @@ public:
bool isLittle() const { return IsLittle; }
bool isFP64bit() const { return IsFP64bit; }
+ bool isNotFP64bit() const { return !IsFP64bit; }
bool isGP64bit() const { return IsGP64bit; }
bool isGP32bit() const { return !IsGP64bit; }
bool isSingleFloat() const { return IsSingleFloat; }
@@ -170,28 +177,48 @@ public:
}
llvm_unreachable("Unexpected mode");
}
- bool inMips16ModeDefault() {
+ bool inMips16ModeDefault() const {
return InMips16Mode;
}
+ bool inMips16HardFloat() const {
+ return inMips16Mode() && InMips16HardFloat;
+ }
bool inMicroMipsMode() const { return InMicroMipsMode; }
bool hasDSP() const { return HasDSP; }
bool hasDSPR2() const { return HasDSPR2; }
+ bool hasMSA() const { return HasMSA; }
bool isLinux() const { return IsLinux; }
bool useSmallSection() const { return UseSmallSection; }
bool hasStandardEncoding() const { return !inMips16Mode(); }
+ bool mipsSEUsesSoftFloat() const;
+
+ bool enableLongBranchPass() const {
+ return hasStandardEncoding() || allowMixed16_32();
+ }
+
/// Features related to the presence of specific instructions.
bool hasSEInReg() const { return HasSEInReg; }
bool hasCondMov() const { return HasCondMov; }
bool hasSwap() const { return HasSwap; }
bool hasBitCount() const { return HasBitCount; }
bool hasFPIdx() const { return HasFPIdx; }
+ bool hasExtractInsert() const { return !inMips16Mode() && hasMips32r2(); }
- bool allowMixed16_32() const { return AllowMixed16_32;};
+ const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
+ bool allowMixed16_32() const { return inMips16ModeDefault() |
+ AllowMixed16_32;}
bool os16() const { return Os16;};
+// for now constant islands are on for the whole compilation unit but we only
+// really use them if in addition we are in mips16 mode
+//
+static bool useConstantIslands();
+
+ unsigned stackAlignment() const { return hasMips64() ? 16 : 8; }
+
// Grab MipsRegInfo object
const MipsReginfo &getMReginfo() const { return MRI; }
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index ee28e2a122dd..5046c1b782f6 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -22,6 +22,7 @@
#include "MipsSEISelLowering.h"
#include "MipsSEISelDAGToDAG.h"
#include "Mips16FrameLowering.h"
+#include "Mips16HardFloat.h"
#include "Mips16InstrInfo.h"
#include "Mips16ISelDAGToDAG.h"
#include "Mips16ISelLowering.h"
@@ -31,6 +32,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Transforms/Scalar.h"
using namespace llvm;
@@ -69,8 +71,9 @@ MipsTargetMachine(const Target &T, StringRef TT,
"E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32-S64")),
InstrInfo(MipsInstrInfo::create(*this)),
FrameLowering(MipsFrameLowering::create(*this, Subtarget)),
- TLInfo(MipsTargetLowering::create(*this)),
- TSInfo(*this), JITInfo() {
+ TLInfo(MipsTargetLowering::create(*this)), TSInfo(*this),
+ InstrItins(Subtarget.getInstrItineraryData()), JITInfo() {
+ initAsmInfo();
}
@@ -132,7 +135,13 @@ namespace {
class MipsPassConfig : public TargetPassConfig {
public:
MipsPassConfig(MipsTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {
+ // The current implementation of long branch pass requires a scratch
+ // register ($at) to be available before branch instructions. Tail merging
+ // can break this requirement, so disable it when long branch pass is
+ // enabled.
+ EnableTailMerge = !getMipsSubtarget().enableLongBranchPass();
+ }
MipsTargetMachine &getMipsTargetMachine() const {
return getTM<MipsTargetMachine>();
@@ -156,6 +165,9 @@ void MipsPassConfig::addIRPasses() {
TargetPassConfig::addIRPasses();
if (getMipsSubtarget().os16())
addPass(createMipsOs16(getMipsTargetMachine()));
+ if (getMipsSubtarget().inMips16HardFloat())
+ addPass(createMips16HardFloat(getMipsTargetMachine()));
+ addPass(createPartiallyInlineLibCallsPass());
}
// Install an instruction selector pass using
// the ISelDag to gen Mips code.
@@ -191,8 +203,7 @@ bool MipsPassConfig::addPreEmitPass() {
const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
addPass(createMipsDelaySlotFillerPass(TM));
- if (Subtarget.hasStandardEncoding() ||
- Subtarget.allowMixed16_32())
+ if (Subtarget.enableLongBranchPass())
addPass(createMipsLongBranchPass(TM));
if (Subtarget.inMips16Mode() ||
Subtarget.allowMixed16_32())
diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h
index ee557084fbbf..5a9a11d861c0 100644
--- a/lib/Target/Mips/MipsTargetMachine.h
+++ b/lib/Target/Mips/MipsTargetMachine.h
@@ -44,6 +44,7 @@ class MipsTargetMachine : public LLVMTargetMachine {
OwningPtr<const MipsFrameLowering> FrameLoweringSE;
OwningPtr<const MipsTargetLowering> TLInfoSE;
MipsSelectionDAGInfo TSInfo;
+ const InstrItineraryData &InstrItins;
MipsJITInfo JITInfo;
public:
@@ -65,6 +66,11 @@ public:
{ return &Subtarget; }
virtual const DataLayout *getDataLayout() const
{ return &DL;}
+
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return Subtarget.inMips16Mode() ? 0 : &InstrItins;
+ }
+
virtual MipsJITInfo *getJITInfo()
{ return &JITInfo; }
diff --git a/lib/Target/Mips/MipsTargetStreamer.h b/lib/Target/Mips/MipsTargetStreamer.h
new file mode 100644
index 000000000000..96966fd7cbc0
--- /dev/null
+++ b/lib/Target/Mips/MipsTargetStreamer.h
@@ -0,0 +1,44 @@
+//===-- MipsTargetStreamer.h - Mips Target Streamer ------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSTARGETSTREAMER_H
+#define MIPSTARGETSTREAMER_H
+
+#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class MipsTargetStreamer : public MCTargetStreamer {
+ virtual void anchor();
+
+public:
+ virtual void emitMipsHackELFFlags(unsigned Flags) = 0;
+ virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) = 0;
+};
+
+// This part is for ascii assembly output
+class MipsTargetAsmStreamer : public MipsTargetStreamer {
+ formatted_raw_ostream &OS;
+
+public:
+ MipsTargetAsmStreamer(formatted_raw_ostream &OS);
+ virtual void emitMipsHackELFFlags(unsigned Flags);
+ virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val);
+};
+
+// This part is for ELF object output
+class MipsTargetELFStreamer : public MipsTargetStreamer {
+public:
+ MCELFStreamer &getStreamer();
+ virtual void emitMipsHackELFFlags(unsigned Flags);
+ virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val);
+};
+}
+
+#endif
diff --git a/lib/Target/NVPTX/CMakeLists.txt b/lib/Target/NVPTX/CMakeLists.txt
index 735ca9b6b765..4f1324c6d5a5 100644
--- a/lib/Target/NVPTX/CMakeLists.txt
+++ b/lib/Target/NVPTX/CMakeLists.txt
@@ -24,11 +24,13 @@ set(NVPTXCodeGen_sources
NVPTXUtilities.cpp
NVVMReflect.cpp
NVPTXGenericToNVVM.cpp
+ NVPTXPrologEpilogPass.cpp
+ NVPTXMCExpr.cpp
)
add_llvm_target(NVPTXCodeGen ${NVPTXCodeGen_sources})
-add_dependencies(LLVMNVPTXCodeGen intrinsics_gen)
+add_dependencies(LLVMNVPTXCodeGen NVPTXCommonTableGen intrinsics_gen)
add_subdirectory(TargetInfo)
add_subdirectory(InstPrinter)
diff --git a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
index 10051c768058..d5be0e42ab0c 100644
--- a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
+++ b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
@@ -1 +1,289 @@
-// Placeholder
+//===-- NVPTXInstPrinter.cpp - PTX assembly instruction printing ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Print MCInst instructions to .ptx format.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asm-printer"
+#include "InstPrinter/NVPTXInstPrinter.h"
+#include "NVPTX.h"
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+#include <cctype>
+using namespace llvm;
+
+#include "NVPTXGenAsmWriter.inc"
+
+
+NVPTXInstPrinter::NVPTXInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI)
+ : MCInstPrinter(MAI, MII, MRI) {
+ setAvailableFeatures(STI.getFeatureBits());
+}
+
+void NVPTXInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
+ // Decode the virtual register
+ // Must be kept in sync with NVPTXAsmPrinter::encodeVirtualRegister
+ unsigned RCId = (RegNo >> 28);
+ switch (RCId) {
+ default: report_fatal_error("Bad virtual register encoding");
+ case 0:
+ // This is actually a physical register, so defer to the autogenerated
+ // register printer
+ OS << getRegisterName(RegNo);
+ return;
+ case 1:
+ OS << "%p";
+ break;
+ case 2:
+ OS << "%rs";
+ break;
+ case 3:
+ OS << "%r";
+ break;
+ case 4:
+ OS << "%rl";
+ break;
+ case 5:
+ OS << "%f";
+ break;
+ case 6:
+ OS << "%fl";
+ break;
+ }
+
+ unsigned VReg = RegNo & 0x0FFFFFFF;
+ OS << VReg;
+}
+
+void NVPTXInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
+ StringRef Annot) {
+ printInstruction(MI, OS);
+
+ // Next always print the annotation.
+ printAnnotation(OS, Annot);
+}
+
+void NVPTXInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ unsigned Reg = Op.getReg();
+ printRegName(O, Reg);
+ } else if (Op.isImm()) {
+ O << markup("<imm:") << formatImm(Op.getImm()) << markup(">");
+ } else {
+ assert(Op.isExpr() && "Unknown operand kind in printOperand");
+ O << *Op.getExpr();
+ }
+}
+
+void NVPTXInstPrinter::printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O,
+ const char *Modifier) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+ int64_t Imm = MO.getImm();
+
+ if (strcmp(Modifier, "ftz") == 0) {
+ // FTZ flag
+ if (Imm & NVPTX::PTXCvtMode::FTZ_FLAG)
+ O << ".ftz";
+ } else if (strcmp(Modifier, "sat") == 0) {
+ // SAT flag
+ if (Imm & NVPTX::PTXCvtMode::SAT_FLAG)
+ O << ".sat";
+ } else if (strcmp(Modifier, "base") == 0) {
+ // Default operand
+ switch (Imm & NVPTX::PTXCvtMode::BASE_MASK) {
+ default:
+ return;
+ case NVPTX::PTXCvtMode::NONE:
+ break;
+ case NVPTX::PTXCvtMode::RNI:
+ O << ".rni";
+ break;
+ case NVPTX::PTXCvtMode::RZI:
+ O << ".rzi";
+ break;
+ case NVPTX::PTXCvtMode::RMI:
+ O << ".rmi";
+ break;
+ case NVPTX::PTXCvtMode::RPI:
+ O << ".rpi";
+ break;
+ case NVPTX::PTXCvtMode::RN:
+ O << ".rn";
+ break;
+ case NVPTX::PTXCvtMode::RZ:
+ O << ".rz";
+ break;
+ case NVPTX::PTXCvtMode::RM:
+ O << ".rm";
+ break;
+ case NVPTX::PTXCvtMode::RP:
+ O << ".rp";
+ break;
+ }
+ } else {
+ llvm_unreachable("Invalid conversion modifier");
+ }
+}
+
+void NVPTXInstPrinter::printCmpMode(const MCInst *MI, int OpNum, raw_ostream &O,
+ const char *Modifier) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+ int64_t Imm = MO.getImm();
+
+ if (strcmp(Modifier, "ftz") == 0) {
+ // FTZ flag
+ if (Imm & NVPTX::PTXCmpMode::FTZ_FLAG)
+ O << ".ftz";
+ } else if (strcmp(Modifier, "base") == 0) {
+ switch (Imm & NVPTX::PTXCmpMode::BASE_MASK) {
+ default:
+ return;
+ case NVPTX::PTXCmpMode::EQ:
+ O << ".eq";
+ break;
+ case NVPTX::PTXCmpMode::NE:
+ O << ".ne";
+ break;
+ case NVPTX::PTXCmpMode::LT:
+ O << ".lt";
+ break;
+ case NVPTX::PTXCmpMode::LE:
+ O << ".le";
+ break;
+ case NVPTX::PTXCmpMode::GT:
+ O << ".gt";
+ break;
+ case NVPTX::PTXCmpMode::GE:
+ O << ".ge";
+ break;
+ case NVPTX::PTXCmpMode::LO:
+ O << ".lo";
+ break;
+ case NVPTX::PTXCmpMode::LS:
+ O << ".ls";
+ break;
+ case NVPTX::PTXCmpMode::HI:
+ O << ".hi";
+ break;
+ case NVPTX::PTXCmpMode::HS:
+ O << ".hs";
+ break;
+ case NVPTX::PTXCmpMode::EQU:
+ O << ".equ";
+ break;
+ case NVPTX::PTXCmpMode::NEU:
+ O << ".neu";
+ break;
+ case NVPTX::PTXCmpMode::LTU:
+ O << ".ltu";
+ break;
+ case NVPTX::PTXCmpMode::LEU:
+ O << ".leu";
+ break;
+ case NVPTX::PTXCmpMode::GTU:
+ O << ".gtu";
+ break;
+ case NVPTX::PTXCmpMode::GEU:
+ O << ".geu";
+ break;
+ case NVPTX::PTXCmpMode::NUM:
+ O << ".num";
+ break;
+ case NVPTX::PTXCmpMode::NotANumber:
+ O << ".nan";
+ break;
+ }
+ } else {
+ llvm_unreachable("Empty Modifier");
+ }
+}
+
+void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum,
+ raw_ostream &O, const char *Modifier) {
+ if (Modifier) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+ int Imm = (int) MO.getImm();
+ if (!strcmp(Modifier, "volatile")) {
+ if (Imm)
+ O << ".volatile";
+ } else if (!strcmp(Modifier, "addsp")) {
+ switch (Imm) {
+ case NVPTX::PTXLdStInstCode::GLOBAL:
+ O << ".global";
+ break;
+ case NVPTX::PTXLdStInstCode::SHARED:
+ O << ".shared";
+ break;
+ case NVPTX::PTXLdStInstCode::LOCAL:
+ O << ".local";
+ break;
+ case NVPTX::PTXLdStInstCode::PARAM:
+ O << ".param";
+ break;
+ case NVPTX::PTXLdStInstCode::CONSTANT:
+ O << ".const";
+ break;
+ case NVPTX::PTXLdStInstCode::GENERIC:
+ break;
+ default:
+ llvm_unreachable("Wrong Address Space");
+ }
+ } else if (!strcmp(Modifier, "sign")) {
+ if (Imm == NVPTX::PTXLdStInstCode::Signed)
+ O << "s";
+ else if (Imm == NVPTX::PTXLdStInstCode::Unsigned)
+ O << "u";
+ else
+ O << "f";
+ } else if (!strcmp(Modifier, "vec")) {
+ if (Imm == NVPTX::PTXLdStInstCode::V2)
+ O << ".v2";
+ else if (Imm == NVPTX::PTXLdStInstCode::V4)
+ O << ".v4";
+ } else
+ llvm_unreachable("Unknown Modifier");
+ } else
+ llvm_unreachable("Empty Modifier");
+}
+
+void NVPTXInstPrinter::printMemOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O, const char *Modifier) {
+ printOperand(MI, OpNum, O);
+
+ if (Modifier && !strcmp(Modifier, "add")) {
+ O << ", ";
+ printOperand(MI, OpNum + 1, O);
+ } else {
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0)
+ return; // don't print ',0' or '+0'
+ O << "+";
+ printOperand(MI, OpNum + 1, O);
+ }
+}
+
+void NVPTXInstPrinter::printProtoIdent(const MCInst *MI, int OpNum,
+ raw_ostream &O, const char *Modifier) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+ assert(Op.isExpr() && "Call prototype is not an MCExpr?");
+ const MCExpr *Expr = Op.getExpr();
+ const MCSymbol &Sym = cast<MCSymbolRefExpr>(Expr)->getSymbol();
+ O << Sym.getName();
+}
diff --git a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
new file mode 100644
index 000000000000..93029ae8d39e
--- /dev/null
+++ b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
@@ -0,0 +1,53 @@
+//= NVPTXInstPrinter.h - Convert NVPTX MCInst to assembly syntax --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an NVPTX MCInst to .ptx file syntax.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTX_INST_PRINTER_H
+#define NVPTX_INST_PRINTER_H
+
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class MCOperand;
+class MCSubtargetInfo;
+
+class NVPTXInstPrinter : public MCInstPrinter {
+public:
+ NVPTXInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
+
+ virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
+ virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot);
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+ // End
+
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O,
+ const char *Modifier = 0);
+ void printCmpMode(const MCInst *MI, int OpNum, raw_ostream &O,
+ const char *Modifier = 0);
+ void printLdStCode(const MCInst *MI, int OpNum,
+ raw_ostream &O, const char *Modifier = 0);
+ void printMemOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O, const char *Modifier = 0);
+ void printProtoIdent(const MCInst *MI, int OpNum,
+ raw_ostream &O, const char *Modifier = 0);
+};
+
+}
+
+#endif
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h b/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
index b3e8b5d2622d..edf4a80b11e6 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
@@ -22,7 +22,6 @@ namespace llvm {
enum AddressSpace {
ADDRESS_SPACE_GENERIC = 0,
ADDRESS_SPACE_GLOBAL = 1,
- ADDRESS_SPACE_CONST_NOT_GEN = 2, // Not part of generic space
ADDRESS_SPACE_SHARED = 3,
ADDRESS_SPACE_CONST = 4,
ADDRESS_SPACE_LOCAL = 5,
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
index 459cd96cb0cd..f2784b836b70 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
@@ -17,17 +17,15 @@
using namespace llvm;
-bool CompileForDebugging;
-
// -debug-compile - Command line option to inform opt and llc passes to
// compile for debugging
-static cl::opt<bool, true>
-Debug("debug-compile", cl::desc("Compile for debugging"), cl::Hidden,
- cl::location(CompileForDebugging), cl::init(false));
+static cl::opt<bool> CompileForDebugging("debug-compile",
+ cl::desc("Compile for debugging"),
+ cl::Hidden, cl::init(false));
void NVPTXMCAsmInfo::anchor() {}
-NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Target &T, const StringRef &TT) {
+NVPTXMCAsmInfo::NVPTXMCAsmInfo(const StringRef &TT) {
Triple TheTriple(TT);
if (TheTriple.getArch() == Triple::nvptx64) {
PointerSize = CalleeSaveStackSlotSize = 8;
@@ -37,8 +35,6 @@ NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Target &T, const StringRef &TT) {
PrivateGlobalPrefix = "$L__";
- AllowPeriodsInName = false;
-
HasSetDirective = false;
HasSingleParameterDotFile = false;
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
index 82097daa4ae5..7d1633f60d2c 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
@@ -23,7 +23,7 @@ class StringRef;
class NVPTXMCAsmInfo : public MCAsmInfo {
virtual void anchor();
public:
- explicit NVPTXMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit NVPTXMCAsmInfo(const StringRef &TT);
};
} // namespace llvm
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
index ccd29705df72..871bac94f84c 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
@@ -13,6 +13,7 @@
#include "NVPTXMCTargetDesc.h"
#include "NVPTXMCAsmInfo.h"
+#include "InstPrinter/NVPTXInstPrinter.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -57,6 +58,17 @@ static MCCodeGenInfo *createNVPTXMCCodeGenInfo(
return X;
}
+static MCInstPrinter *createNVPTXMCInstPrinter(const Target &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI) {
+ if (SyntaxVariant == 0)
+ return new NVPTXInstPrinter(MAI, MII, MRI, STI);
+ return 0;
+}
+
// Force static initialization.
extern "C" void LLVMInitializeNVPTXTargetMC() {
// Register the MC asm info.
@@ -85,4 +97,9 @@ extern "C" void LLVMInitializeNVPTXTargetMC() {
TargetRegistry::RegisterMCSubtargetInfo(TheNVPTXTarget64,
createNVPTXMCSubtargetInfo);
+ // Register the MCInstPrinter.
+ TargetRegistry::RegisterMCInstPrinter(TheNVPTXTarget32,
+ createNVPTXMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheNVPTXTarget64,
+ createNVPTXMCInstPrinter);
}
diff --git a/lib/Target/NVPTX/ManagedStringPool.h b/lib/Target/NVPTX/ManagedStringPool.h
index d6c79b5110cc..f9fb05922920 100644
--- a/lib/Target/NVPTX/ManagedStringPool.h
+++ b/lib/Target/NVPTX/ManagedStringPool.h
@@ -29,7 +29,7 @@ class ManagedStringPool {
public:
ManagedStringPool() {}
~ManagedStringPool() {
- SmallVector<std::string *, 8>::iterator Current = Pool.begin();
+ SmallVectorImpl<std::string *>::iterator Current = Pool.begin();
while (Current != Pool.end()) {
delete *Current;
Current++;
diff --git a/lib/Target/NVPTX/NVPTX.h b/lib/Target/NVPTX/NVPTX.h
index 072c65da35cb..490b49d9ead6 100644
--- a/lib/Target/NVPTX/NVPTX.h
+++ b/lib/Target/NVPTX/NVPTX.h
@@ -27,6 +27,7 @@
namespace llvm {
class NVPTXTargetMachine;
class FunctionPass;
+class MachineFunctionPass;
class formatted_raw_ostream;
namespace NVPTXCC {
@@ -60,12 +61,10 @@ inline static const char *NVPTXCondCodeToString(NVPTXCC::CondCodes CC) {
FunctionPass *
createNVPTXISelDag(NVPTXTargetMachine &TM, llvm::CodeGenOpt::Level OptLevel);
-FunctionPass *createLowerStructArgsPass(NVPTXTargetMachine &);
-FunctionPass *createNVPTXReMatPass(NVPTXTargetMachine &);
-FunctionPass *createNVPTXReMatBlockPass(NVPTXTargetMachine &);
ModulePass *createGenericToNVVMPass();
ModulePass *createNVVMReflectPass();
ModulePass *createNVVMReflectPass(const StringMap<int>& Mapping);
+MachineFunctionPass *createNVPTXPrologEpilogPass();
bool isImageOrSamplerVal(const Value *, const Module *);
@@ -75,8 +74,7 @@ extern Target TheNVPTXTarget64;
namespace NVPTX {
enum DrvInterface {
NVCL,
- CUDA,
- TEST
+ CUDA
};
// A field inside TSFlags needs a shift and a mask. The usage is
@@ -130,6 +128,53 @@ enum VecType {
V4 = 4
};
}
+
+/// PTXCvtMode - Conversion code enumeration
+namespace PTXCvtMode {
+enum CvtMode {
+ NONE = 0,
+ RNI,
+ RZI,
+ RMI,
+ RPI,
+ RN,
+ RZ,
+ RM,
+ RP,
+
+ BASE_MASK = 0x0F,
+ FTZ_FLAG = 0x10,
+ SAT_FLAG = 0x20
+};
+}
+
+/// PTXCmpMode - Comparison mode enumeration
+namespace PTXCmpMode {
+enum CmpMode {
+ EQ = 0,
+ NE,
+ LT,
+ LE,
+ GT,
+ GE,
+ LO,
+ LS,
+ HI,
+ HS,
+ EQU,
+ NEU,
+ LTU,
+ LEU,
+ GTU,
+ GEU,
+ NUM,
+ // NAN is a MACRO
+ NotANumber,
+
+ BASE_MASK = 0xFF,
+ FTZ_FLAG = 0x100
+};
+}
}
} // end namespace llvm;
diff --git a/lib/Target/NVPTX/NVPTX.td b/lib/Target/NVPTX/NVPTX.td
index d78b4e81a3e5..6183a755c320 100644
--- a/lib/Target/NVPTX/NVPTX.td
+++ b/lib/Target/NVPTX/NVPTX.td
@@ -57,6 +57,12 @@ def : Proc<"sm_35", [SM35]>;
def NVPTXInstrInfo : InstrInfo {
}
+def NVPTXAsmWriter : AsmWriter {
+ bit isMCAsmWriter = 1;
+ string AsmWriterClassName = "InstPrinter";
+}
+
def NVPTX : Target {
let InstructionSet = NVPTXInstrInfo;
+ let AssemblyWriters = [NVPTXAsmWriter];
}
diff --git a/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp b/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp
index 0f792ec6826e..1f3769601788 100644
--- a/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp
+++ b/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp
@@ -37,7 +37,7 @@ bool NVPTXAllocaHoisting::runOnFunction(Function &function) {
}
char NVPTXAllocaHoisting::ID = 1;
-RegisterPass<NVPTXAllocaHoisting>
+static RegisterPass<NVPTXAllocaHoisting>
X("alloca-hoisting", "Hoisting alloca instructions in non-entry "
"blocks to the entry block");
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 229e4e5980df..7552fe704115 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -16,10 +16,11 @@
#include "MCTargetDesc/NVPTXMCAsmInfo.h"
#include "NVPTX.h"
#include "NVPTXInstrInfo.h"
-#include "NVPTXNumRegisters.h"
+#include "NVPTXMCExpr.h"
#include "NVPTXRegisterInfo.h"
#include "NVPTXTargetMachine.h"
#include "NVPTXUtilities.h"
+#include "InstPrinter/NVPTXInstPrinter.h"
#include "cl_common_defines.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ConstantFolding.h"
@@ -47,23 +48,17 @@
#include <sstream>
using namespace llvm;
-#include "NVPTXGenAsmWriter.inc"
-
-bool RegAllocNilUsed = true;
-
#define DEPOTNAME "__local_depot"
static cl::opt<bool>
-EmitLineNumbers("nvptx-emit-line-numbers",
+EmitLineNumbers("nvptx-emit-line-numbers", cl::Hidden,
cl::desc("NVPTX Specific: Emit Line numbers even without -G"),
cl::init(true));
-namespace llvm { bool InterleaveSrcInPtx = false; }
-
-static cl::opt<bool, true>
-InterleaveSrc("nvptx-emit-src", cl::ZeroOrMore,
+static cl::opt<bool>
+InterleaveSrc("nvptx-emit-src", cl::ZeroOrMore, cl::Hidden,
cl::desc("NVPTX Specific: Emit source line in ptx file"),
- cl::location(llvm::InterleaveSrcInPtx));
+ cl::init(false));
namespace {
/// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V
@@ -131,7 +126,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
return MCConstantExpr::Create(CI->getZExtValue(), Ctx);
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
- return MCSymbolRefExpr::Create(AP.Mang->getSymbol(GV), Ctx);
+ return MCSymbolRefExpr::Create(AP.getSymbol(GV), Ctx);
if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV))
return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
@@ -279,8 +274,10 @@ void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
const LLVMContext &ctx = MF->getFunction()->getContext();
DIScope Scope(curLoc.getScope(ctx));
- if (!Scope.Verify())
- return;
+ assert((!Scope || Scope.isScope()) &&
+ "Scope of a DebugLoc should be null or a DIScope.");
+ if (!Scope)
+ return;
StringRef fileName(Scope.getFilename());
StringRef dirName(Scope.getDirectory());
@@ -294,7 +291,7 @@ void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
return;
// Emit the line from the source file.
- if (llvm::InterleaveSrcInPtx)
+ if (InterleaveSrc)
this->emitSrcInText(fileName.str(), curLoc.getLine());
std::stringstream temp;
@@ -308,8 +305,115 @@ void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
raw_svector_ostream OS(Str);
if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
emitLineNumberAsDotLoc(*MI);
- printInstruction(MI, OS);
- OutStreamer.EmitRawText(OS.str());
+
+ MCInst Inst;
+ lowerToMCInst(MI, Inst);
+ OutStreamer.EmitInstruction(Inst);
+}
+
+void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
+ OutMI.setOpcode(MI->getOpcode());
+
+ // Special: Do not mangle symbol operand of CALL_PROTOTYPE
+ if (MI->getOpcode() == NVPTX::CALL_PROTOTYPE) {
+ const MachineOperand &MO = MI->getOperand(0);
+ OutMI.addOperand(GetSymbolRef(MO,
+ OutContext.GetOrCreateSymbol(Twine(MO.getSymbolName()))));
+ return;
+ }
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+
+ MCOperand MCOp;
+ if (lowerOperand(MO, MCOp))
+ OutMI.addOperand(MCOp);
+ }
+}
+
+bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO,
+ MCOperand &MCOp) {
+ switch (MO.getType()) {
+ default: llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_Register:
+ MCOp = MCOperand::CreateReg(encodeVirtualRegister(MO.getReg()));
+ break;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::CreateImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
+ MO.getMBB()->getSymbol(), OutContext));
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = GetSymbolRef(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = GetSymbolRef(MO, getSymbol(MO.getGlobal()));
+ break;
+ case MachineOperand::MO_FPImmediate: {
+ const ConstantFP *Cnt = MO.getFPImm();
+ APFloat Val = Cnt->getValueAPF();
+
+ switch (Cnt->getType()->getTypeID()) {
+ default: report_fatal_error("Unsupported FP type"); break;
+ case Type::FloatTyID:
+ MCOp = MCOperand::CreateExpr(
+ NVPTXFloatMCExpr::CreateConstantFPSingle(Val, OutContext));
+ break;
+ case Type::DoubleTyID:
+ MCOp = MCOperand::CreateExpr(
+ NVPTXFloatMCExpr::CreateConstantFPDouble(Val, OutContext));
+ break;
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) {
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+
+ DenseMap<unsigned, unsigned> &RegMap = VRegMapping[RC];
+ unsigned RegNum = RegMap[Reg];
+
+ // Encode the register class in the upper 4 bits
+ // Must be kept in sync with NVPTXInstPrinter::printRegName
+ unsigned Ret = 0;
+ if (RC == &NVPTX::Int1RegsRegClass) {
+ Ret = (1 << 28);
+ } else if (RC == &NVPTX::Int16RegsRegClass) {
+ Ret = (2 << 28);
+ } else if (RC == &NVPTX::Int32RegsRegClass) {
+ Ret = (3 << 28);
+ } else if (RC == &NVPTX::Int64RegsRegClass) {
+ Ret = (4 << 28);
+ } else if (RC == &NVPTX::Float32RegsRegClass) {
+ Ret = (5 << 28);
+ } else if (RC == &NVPTX::Float64RegsRegClass) {
+ Ret = (6 << 28);
+ } else {
+ report_fatal_error("Bad register class");
+ }
+
+ // Insert the vreg number
+ Ret |= (RegNum & 0x0FFFFFFF);
+ return Ret;
+ } else {
+ // Some special-use registers are actually physical registers.
+ // Encode this as the register class ID of 0 and the real register ID.
+ return Reg & 0x0FFFFFFF;
+ }
+}
+
+MCOperand NVPTXAsmPrinter::GetSymbolRef(const MachineOperand &MO,
+ const MCSymbol *Symbol) {
+ const MCExpr *Expr;
+ Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None,
+ OutContext);
+ return MCOperand::CreateExpr(Expr);
}
void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
@@ -436,9 +540,7 @@ void NVPTXAsmPrinter::EmitFunctionEntryLabel() {
}
void NVPTXAsmPrinter::EmitFunctionBodyStart() {
- const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
- unsigned numRegClasses = TRI.getNumRegClasses();
- VRidGlobal2LocalMap = new std::map<unsigned, unsigned>[numRegClasses + 1];
+ VRegMapping.clear();
OutStreamer.EmitRawText(StringRef("{\n"));
setAndEmitFunctionVirtualRegisters(*MF);
@@ -450,7 +552,20 @@ void NVPTXAsmPrinter::EmitFunctionBodyStart() {
void NVPTXAsmPrinter::EmitFunctionBodyEnd() {
OutStreamer.EmitRawText(StringRef("}\n"));
- delete[] VRidGlobal2LocalMap;
+ VRegMapping.clear();
+}
+
+void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
+ unsigned RegNo = MI->getOperand(0).getReg();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ if (TRI->isVirtualRegister(RegNo)) {
+ OutStreamer.AddComment(Twine("implicit-def: ") +
+ getVirtualRegisterName(RegNo));
+ } else {
+ OutStreamer.AddComment(Twine("implicit-def: ") +
+ TM.getRegisterInfo()->getName(RegNo));
+ }
+ OutStreamer.AddBlankLine();
}
void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function &F,
@@ -504,24 +619,30 @@ void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function &F,
O << ".minnctapersm " << mincta << "\n";
}
-void NVPTXAsmPrinter::getVirtualRegisterName(unsigned vr, bool isVec,
- raw_ostream &O) {
- const TargetRegisterClass *RC = MRI->getRegClass(vr);
- unsigned id = RC->getID();
+std::string
+NVPTXAsmPrinter::getVirtualRegisterName(unsigned Reg) const {
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
- std::map<unsigned, unsigned> &regmap = VRidGlobal2LocalMap[id];
- unsigned mapped_vr = regmap[vr];
+ std::string Name;
+ raw_string_ostream NameStr(Name);
- if (!isVec) {
- O << getNVPTXRegClassStr(RC) << mapped_vr;
- return;
- }
- report_fatal_error("Bad register!");
+ VRegRCMap::const_iterator I = VRegMapping.find(RC);
+ assert(I != VRegMapping.end() && "Bad register class");
+ const DenseMap<unsigned, unsigned> &RegMap = I->second;
+
+ VRegMap::const_iterator VI = RegMap.find(Reg);
+ assert(VI != RegMap.end() && "Bad virtual register");
+ unsigned MappedVR = VI->second;
+
+ NameStr << getNVPTXRegClassStr(RC) << MappedVR;
+
+ NameStr.flush();
+ return Name;
}
-void NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr, bool isVec,
+void NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr,
raw_ostream &O) {
- getVirtualRegisterName(vr, isVec, O);
+ O << getVirtualRegisterName(vr);
}
void NVPTXAsmPrinter::printVecModifiedImmediate(
@@ -554,145 +675,7 @@ void NVPTXAsmPrinter::printVecModifiedImmediate(
llvm_unreachable("Unknown Modifier on immediate operand");
}
-void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O, const char *Modifier) {
- const MachineOperand &MO = MI->getOperand(opNum);
- switch (MO.getType()) {
- case MachineOperand::MO_Register:
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- if (MO.getReg() == NVPTX::VRDepot)
- O << DEPOTNAME << getFunctionNumber();
- else
- O << getRegisterName(MO.getReg());
- } else {
- if (!Modifier)
- emitVirtualRegister(MO.getReg(), false, O);
- else {
- if (strcmp(Modifier, "vecfull") == 0)
- emitVirtualRegister(MO.getReg(), true, O);
- else
- llvm_unreachable(
- "Don't know how to handle the modifier on virtual register.");
- }
- }
- return;
- case MachineOperand::MO_Immediate:
- if (!Modifier)
- O << MO.getImm();
- else if (strstr(Modifier, "vec") == Modifier)
- printVecModifiedImmediate(MO, Modifier, O);
- else
- llvm_unreachable(
- "Don't know how to handle modifier on immediate operand");
- return;
-
- case MachineOperand::MO_FPImmediate:
- printFPConstant(MO.getFPImm(), O);
- break;
-
- case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
- break;
-
- case MachineOperand::MO_ExternalSymbol: {
- const char *symbname = MO.getSymbolName();
- if (strstr(symbname, ".PARAM") == symbname) {
- unsigned index;
- sscanf(symbname + 6, "%u[];", &index);
- printParamName(index, O);
- } else if (strstr(symbname, ".HLPPARAM") == symbname) {
- unsigned index;
- sscanf(symbname + 9, "%u[];", &index);
- O << *CurrentFnSym << "_param_" << index << "_offset";
- } else
- O << symbname;
- break;
- }
-
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
-
- default:
- llvm_unreachable("Operand type not supported.");
- }
-}
-
-void NVPTXAsmPrinter::printImplicitDef(const MachineInstr *MI,
- raw_ostream &O) const {
-#ifndef __OPTIMIZE__
- O << "\t// Implicit def :";
- //printOperand(MI, 0);
- O << "\n";
-#endif
-}
-
-void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O, const char *Modifier) {
- printOperand(MI, opNum, O);
-
- if (Modifier && !strcmp(Modifier, "add")) {
- O << ", ";
- printOperand(MI, opNum + 1, O);
- } else {
- if (MI->getOperand(opNum + 1).isImm() &&
- MI->getOperand(opNum + 1).getImm() == 0)
- return; // don't print ',0' or '+0'
- O << "+";
- printOperand(MI, opNum + 1, O);
- }
-}
-
-void NVPTXAsmPrinter::printLdStCode(const MachineInstr *MI, int opNum,
- raw_ostream &O, const char *Modifier) {
- if (Modifier) {
- const MachineOperand &MO = MI->getOperand(opNum);
- int Imm = (int) MO.getImm();
- if (!strcmp(Modifier, "volatile")) {
- if (Imm)
- O << ".volatile";
- } else if (!strcmp(Modifier, "addsp")) {
- switch (Imm) {
- case NVPTX::PTXLdStInstCode::GLOBAL:
- O << ".global";
- break;
- case NVPTX::PTXLdStInstCode::SHARED:
- O << ".shared";
- break;
- case NVPTX::PTXLdStInstCode::LOCAL:
- O << ".local";
- break;
- case NVPTX::PTXLdStInstCode::PARAM:
- O << ".param";
- break;
- case NVPTX::PTXLdStInstCode::CONSTANT:
- O << ".const";
- break;
- case NVPTX::PTXLdStInstCode::GENERIC:
- if (!nvptxSubtarget.hasGenericLdSt())
- O << ".global";
- break;
- default:
- llvm_unreachable("Wrong Address Space");
- }
- } else if (!strcmp(Modifier, "sign")) {
- if (Imm == NVPTX::PTXLdStInstCode::Signed)
- O << "s";
- else if (Imm == NVPTX::PTXLdStInstCode::Unsigned)
- O << "u";
- else
- O << "f";
- } else if (!strcmp(Modifier, "vec")) {
- if (Imm == NVPTX::PTXLdStInstCode::V2)
- O << ".v2";
- else if (Imm == NVPTX::PTXLdStInstCode::V4)
- O << ".v4";
- } else
- llvm_unreachable("Unknown Modifier");
- } else
- llvm_unreachable("Empty Modifier");
-}
void NVPTXAsmPrinter::emitDeclaration(const Function *F, raw_ostream &O) {
@@ -702,7 +685,7 @@ void NVPTXAsmPrinter::emitDeclaration(const Function *F, raw_ostream &O) {
else
O << ".func ";
printReturnValStr(F, O);
- O << *Mang->getSymbol(F) << "\n";
+ O << *getSymbol(F) << "\n";
emitFunctionParamList(F, O);
O << ";\n";
}
@@ -912,7 +895,7 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
.Initialize(OutContext, TM);
- Mang = new Mangler(OutContext, *TM.getDataLayout());
+ Mang = new Mangler(&TM);
// Emit header before any dwarf directives are emitted below.
emitHeader(M, OS1);
@@ -921,6 +904,16 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
// Already commented out
//bool Result = AsmPrinter::doInitialization(M);
+ // Emit module-level inline asm if it exists.
+ if (!M.getModuleInlineAsm().empty()) {
+ OutStreamer.AddComment("Start of file scope inline assembly");
+ OutStreamer.AddBlankLine();
+ OutStreamer.EmitRawText(StringRef(M.getModuleInlineAsm()));
+ OutStreamer.AddBlankLine();
+ OutStreamer.AddComment("End of file scope inline assembly");
+ OutStreamer.AddBlankLine();
+ }
+
if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
recordAndEmitFilenames(M);
@@ -1222,12 +1215,11 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
else
O << getPTXFundamentalTypeStr(ETy, false);
O << " ";
- O << *Mang->getSymbol(GVar);
+ O << *getSymbol(GVar);
// Ptx allows variable initilization only for constant and global state
// spaces.
if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
- (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) ||
(PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) &&
GVar->hasInitializer()) {
const Constant *Initializer = GVar->getInitializer();
@@ -1251,7 +1243,6 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
// Ptx allows variable initilization only for constant and
// global state spaces.
if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
- (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) ||
(PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) &&
GVar->hasInitializer()) {
const Constant *Initializer = GVar->getInitializer();
@@ -1260,15 +1251,15 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
bufferAggregateConstant(Initializer, &aggBuffer);
if (aggBuffer.numSymbols) {
if (nvptxSubtarget.is64Bit()) {
- O << " .u64 " << *Mang->getSymbol(GVar) << "[";
+ O << " .u64 " << *getSymbol(GVar) << "[";
O << ElementSize / 8;
} else {
- O << " .u32 " << *Mang->getSymbol(GVar) << "[";
+ O << " .u32 " << *getSymbol(GVar) << "[";
O << ElementSize / 4;
}
O << "]";
} else {
- O << " .b8 " << *Mang->getSymbol(GVar) << "[";
+ O << " .b8 " << *getSymbol(GVar) << "[";
O << ElementSize;
O << "]";
}
@@ -1276,7 +1267,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
aggBuffer.print();
O << "}";
} else {
- O << " .b8 " << *Mang->getSymbol(GVar);
+ O << " .b8 " << *getSymbol(GVar);
if (ElementSize) {
O << "[";
O << ElementSize;
@@ -1284,7 +1275,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
}
}
} else {
- O << " .b8 " << *Mang->getSymbol(GVar);
+ O << " .b8 " << *getSymbol(GVar);
if (ElementSize) {
O << "[";
O << ElementSize;
@@ -1322,14 +1313,6 @@ void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace,
O << "global";
break;
case llvm::ADDRESS_SPACE_CONST:
- // This logic should be consistent with that in
- // getCodeAddrSpace() (NVPTXISelDATToDAT.cpp)
- if (nvptxSubtarget.hasGenericLdSt())
- O << "global";
- else
- O << "const";
- break;
- case llvm::ADDRESS_SPACE_CONST_NOT_GEN:
O << "const";
break;
case llvm::ADDRESS_SPACE_SHARED:
@@ -1399,7 +1382,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
O << " .";
O << getPTXFundamentalTypeStr(ETy);
O << " ";
- O << *Mang->getSymbol(GVar);
+ O << *getSymbol(GVar);
return;
}
@@ -1414,7 +1397,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
case Type::ArrayTyID:
case Type::VectorTyID:
ElementSize = TD->getTypeStoreSize(ETy);
- O << " .b8 " << *Mang->getSymbol(GVar) << "[";
+ O << " .b8 " << *getSymbol(GVar) << "[";
if (ElementSize) {
O << itostr(ElementSize);
}
@@ -1469,7 +1452,7 @@ void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I,
int paramIndex, raw_ostream &O) {
if ((nvptxSubtarget.getDrvInterface() == NVPTX::NVCL) ||
(nvptxSubtarget.getDrvInterface() == NVPTX::CUDA))
- O << *Mang->getSymbol(I->getParent()) << "_param_" << paramIndex;
+ O << *getSymbol(I->getParent()) << "_param_" << paramIndex;
else {
std::string argName = I->getName();
const char *p = argName.c_str();
@@ -1528,13 +1511,13 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
if (llvm::isImage(*I)) {
std::string sname = I->getName();
if (llvm::isImageWriteOnly(*I))
- O << "\t.param .surfref " << *Mang->getSymbol(F) << "_param_"
+ O << "\t.param .surfref " << *getSymbol(F) << "_param_"
<< paramIndex;
else // Default image is read_only
- O << "\t.param .texref " << *Mang->getSymbol(F) << "_param_"
+ O << "\t.param .texref " << *getSymbol(F) << "_param_"
<< paramIndex;
} else // Should be llvm::isSampler(*I)
- O << "\t.param .samplerref " << *Mang->getSymbol(F) << "_param_"
+ O << "\t.param .samplerref " << *getSymbol(F) << "_param_"
<< paramIndex;
continue;
}
@@ -1569,14 +1552,13 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
default:
O << ".ptr ";
break;
- case llvm::ADDRESS_SPACE_CONST_NOT_GEN:
+ case llvm::ADDRESS_SPACE_CONST:
O << ".ptr .const ";
break;
case llvm::ADDRESS_SPACE_SHARED:
O << ".ptr .shared ";
break;
case llvm::ADDRESS_SPACE_GLOBAL:
- case llvm::ADDRESS_SPACE_CONST:
O << ".ptr .global ";
break;
}
@@ -1709,48 +1691,36 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
for (unsigned i = 0; i < numVRs; i++) {
unsigned int vr = TRI->index2VirtReg(i);
const TargetRegisterClass *RC = MRI->getRegClass(vr);
- std::map<unsigned, unsigned> &regmap = VRidGlobal2LocalMap[RC->getID()];
+ DenseMap<unsigned, unsigned> &regmap = VRegMapping[RC];
int n = regmap.size();
regmap.insert(std::make_pair(vr, n + 1));
}
// Emit register declarations
// @TODO: Extract out the real register usage
- O << "\t.reg .pred %p<" << NVPTXNumRegisters << ">;\n";
- O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
- O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
- O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
- O << "\t.reg .s64 %rl<" << NVPTXNumRegisters << ">;\n";
- O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
- O << "\t.reg .f64 %fl<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .pred %p<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .s64 %rl<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .f64 %fl<" << NVPTXNumRegisters << ">;\n";
// Emit declaration of the virtual registers or 'physical' registers for
// each register class
- //for (unsigned i=0; i< numRegClasses; i++) {
- // std::map<unsigned, unsigned> &regmap = VRidGlobal2LocalMap[i];
- // const TargetRegisterClass *RC = TRI->getRegClass(i);
- // std::string rcname = getNVPTXRegClassName(RC);
- // std::string rcStr = getNVPTXRegClassStr(RC);
- // //int n = regmap.size();
- // if (!isNVPTXVectorRegClass(RC)) {
- // O << "\t.reg " << rcname << " \t" << rcStr << "<"
- // << NVPTXNumRegisters << ">;\n";
- // }
-
- // Only declare those registers that may be used. And do not emit vector
- // registers as
- // they are all elementized to scalar registers.
- //if (n && !isNVPTXVectorRegClass(RC)) {
- // if (RegAllocNilUsed) {
- // O << "\t.reg " << rcname << " \t" << rcStr << "<" << (n+1)
- // << ">;\n";
- // }
- // else {
- // O << "\t.reg " << rcname << " \t" << StrToUpper(rcStr)
- // << "<" << 32 << ">;\n";
- // }
- //}
- //}
+ for (unsigned i=0; i< TRI->getNumRegClasses(); i++) {
+ const TargetRegisterClass *RC = TRI->getRegClass(i);
+ DenseMap<unsigned, unsigned> &regmap = VRegMapping[RC];
+ std::string rcname = getNVPTXRegClassName(RC);
+ std::string rcStr = getNVPTXRegClassStr(RC);
+ int n = regmap.size();
+
+ // Only declare those registers that may be used.
+ if (n) {
+ O << "\t.reg " << rcname << " \t" << rcStr << "<" << (n+1)
+ << ">;\n";
+ }
+ }
OutStreamer.EmitRawText(O.str());
}
@@ -1794,13 +1764,13 @@ void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
return;
}
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
- O << *Mang->getSymbol(GVar);
+ O << *getSymbol(GVar);
return;
}
if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
const Value *v = Cexpr->stripPointerCasts();
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
- O << *Mang->getSymbol(GVar);
+ O << *getSymbol(GVar);
return;
} else {
O << *LowerConstant(CPV, *this);
@@ -1918,7 +1888,7 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
case Type::VectorTyID:
case Type::StructTyID: {
if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV) ||
- isa<ConstantStruct>(CPV)) {
+ isa<ConstantStruct>(CPV) || isa<ConstantDataSequential>(CPV)) {
int ElementSize = TD->getTypeAllocSize(CPV->getType());
bufferAggregateConstant(CPV, aggBuffer);
if (Bytes > ElementSize)
@@ -1991,41 +1961,6 @@ bool NVPTXAsmPrinter::isImageType(const Type *Ty) {
return false;
}
-/// PrintAsmOperand - Print out an operand for an inline asm expression.
-///
-bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &O) {
- if (ExtraCode && ExtraCode[0]) {
- if (ExtraCode[1] != 0)
- return true; // Unknown modifier.
-
- switch (ExtraCode[0]) {
- default:
- // See if this is a generic print operand
- return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
- case 'r':
- break;
- }
- }
-
- printOperand(MI, OpNo, O);
-
- return false;
-}
-
-bool NVPTXAsmPrinter::PrintAsmMemoryOperand(
- const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &O) {
- if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier
-
- O << '[';
- printMemOperand(MI, OpNo, O);
- O << ']';
-
- return false;
-}
bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
switch (MI.getOpcode()) {
@@ -2040,7 +1975,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
case NVPTX::CallArgI32:
case NVPTX::CallArgI32imm:
case NVPTX::CallArgI64:
- case NVPTX::CallArgI8:
case NVPTX::CallArgParam:
case NVPTX::CallVoidInst:
case NVPTX::CallVoidInstReg:
@@ -2058,10 +1992,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
case NVPTX::StoreParamI32:
case NVPTX::StoreParamI64:
case NVPTX::StoreParamI8:
- case NVPTX::StoreParamS32I8:
- case NVPTX::StoreParamU32I8:
- case NVPTX::StoreParamS32I16:
- case NVPTX::StoreParamU32I16:
case NVPTX::StoreRetvalF32:
case NVPTX::StoreRetvalF64:
case NVPTX::StoreRetvalI16:
@@ -2074,7 +2004,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
case NVPTX::LastCallArgI32:
case NVPTX::LastCallArgI32imm:
case NVPTX::LastCallArgI64:
- case NVPTX::LastCallArgI8:
case NVPTX::LastCallArgParam:
case NVPTX::LoadParamMemF32:
case NVPTX::LoadParamMemF64:
@@ -2082,12 +2011,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
case NVPTX::LoadParamMemI32:
case NVPTX::LoadParamMemI64:
case NVPTX::LoadParamMemI8:
- case NVPTX::LoadParamRegF32:
- case NVPTX::LoadParamRegF64:
- case NVPTX::LoadParamRegI16:
- case NVPTX::LoadParamRegI32:
- case NVPTX::LoadParamRegI64:
- case NVPTX::LoadParamRegI8:
case NVPTX::PrototypeInst:
case NVPTX::DBG_VALUE:
return true;
@@ -2095,6 +2018,116 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
return false;
}
+/// PrintAsmOperand - Print out an operand for an inline asm expression.
+///
+bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode, raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0)
+ return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
+ case 'r':
+ break;
+ }
+ }
+
+ printOperand(MI, OpNo, O);
+
+ return false;
+}
+
+bool NVPTXAsmPrinter::PrintAsmMemoryOperand(
+ const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant,
+ const char *ExtraCode, raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0])
+ return true; // Unknown modifier
+
+ O << '[';
+ printMemOperand(MI, OpNo, O);
+ O << ']';
+
+ return false;
+}
+
+void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &O, const char *Modifier) {
+ const MachineOperand &MO = MI->getOperand(opNum);
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (MO.getReg() == NVPTX::VRDepot)
+ O << DEPOTNAME << getFunctionNumber();
+ else
+ O << NVPTXInstPrinter::getRegisterName(MO.getReg());
+ } else {
+ emitVirtualRegister(MO.getReg(), O);
+ }
+ return;
+
+ case MachineOperand::MO_Immediate:
+ if (!Modifier)
+ O << MO.getImm();
+ else if (strstr(Modifier, "vec") == Modifier)
+ printVecModifiedImmediate(MO, Modifier, O);
+ else
+ llvm_unreachable(
+ "Don't know how to handle modifier on immediate operand");
+ return;
+
+ case MachineOperand::MO_FPImmediate:
+ printFPConstant(MO.getFPImm(), O);
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ O << *getSymbol(MO.getGlobal());
+ break;
+
+ case MachineOperand::MO_ExternalSymbol: {
+ const char *symbname = MO.getSymbolName();
+ if (strstr(symbname, ".PARAM") == symbname) {
+ unsigned index;
+ sscanf(symbname + 6, "%u[];", &index);
+ printParamName(index, O);
+ } else if (strstr(symbname, ".HLPPARAM") == symbname) {
+ unsigned index;
+ sscanf(symbname + 9, "%u[];", &index);
+ O << *CurrentFnSym << "_param_" << index << "_offset";
+ } else
+ O << symbname;
+ break;
+ }
+
+ case MachineOperand::MO_MachineBasicBlock:
+ O << *MO.getMBB()->getSymbol();
+ return;
+
+ default:
+ llvm_unreachable("Operand type not supported.");
+ }
+}
+
+void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &O, const char *Modifier) {
+ printOperand(MI, opNum, O);
+
+ if (Modifier && !strcmp(Modifier, "add")) {
+ O << ", ";
+ printOperand(MI, opNum + 1, O);
+ } else {
+ if (MI->getOperand(opNum + 1).isImm() &&
+ MI->getOperand(opNum + 1).getImm() == 0)
+ return; // don't print ',0' or '+0'
+ O << "+";
+ printOperand(MI, opNum + 1, O);
+ }
+}
+
+
// Force static initialization.
extern "C" void LLVMInitializeNVPTXBackendAsmPrinter() {
RegisterAsmPrinter<NVPTXAsmPrinter> X(TheNVPTXTarget32);
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.h b/lib/Target/NVPTX/NVPTXAsmPrinter.h
index 7faa6b265b9d..3abe5d166826 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.h
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.h
@@ -155,7 +155,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
if (pos == nextSymbolPos) {
const Value *v = Symbols[nSym];
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
- MCSymbol *Name = AP.Mang->getSymbol(GVar);
+ MCSymbol *Name = AP.getSymbol(GVar);
O << *Name;
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(v)) {
O << *nvptx::LowerConstant(Cexpr, AP);
@@ -188,16 +188,17 @@ private:
void EmitFunctionEntryLabel();
void EmitFunctionBodyStart();
void EmitFunctionBodyEnd();
+ void emitImplicitDef(const MachineInstr *MI) const;
void EmitInstruction(const MachineInstr *);
+ void lowerToMCInst(const MachineInstr *MI, MCInst &OutMI);
+ bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
+ MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol);
+ unsigned encodeVirtualRegister(unsigned Reg);
void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const {}
void printGlobalVariable(const GlobalVariable *GVar);
- void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
- const char *Modifier = 0);
- void printLdStCode(const MachineInstr *MI, int opNum, raw_ostream &O,
- const char *Modifier = 0);
void printVecModifiedImmediate(const MachineOperand &MO, const char *Modifier,
raw_ostream &O);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
@@ -213,22 +214,23 @@ private:
void emitGlobals(const Module &M);
void emitHeader(Module &M, raw_ostream &O);
void emitKernelFunctionDirectives(const Function &F, raw_ostream &O) const;
- void emitVirtualRegister(unsigned int vr, bool isVec, raw_ostream &O);
+ void emitVirtualRegister(unsigned int vr, raw_ostream &);
void emitFunctionExternParamList(const MachineFunction &MF);
void emitFunctionParamList(const Function *, raw_ostream &O);
void emitFunctionParamList(const MachineFunction &MF, raw_ostream &O);
void setAndEmitFunctionVirtualRegisters(const MachineFunction &MF);
void emitFunctionTempData(const MachineFunction &MF, unsigned &FrameSize);
bool isImageType(const Type *Ty);
+ void printReturnValStr(const Function *, raw_ostream &O);
+ void printReturnValStr(const MachineFunction &MF, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &);
+ void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
+ const char *Modifier = 0);
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &);
- void printReturnValStr(const Function *, raw_ostream &O);
- void printReturnValStr(const MachineFunction &MF, raw_ostream &O);
-
protected:
bool doInitialization(Module &M);
bool doFinalization(Module &M);
@@ -243,7 +245,9 @@ private:
// The contents are specific for each
// MachineFunction. But the size of the
// array is not.
- std::map<unsigned, unsigned> *VRidGlobal2LocalMap;
+ typedef DenseMap<unsigned, unsigned> VRegMap;
+ typedef DenseMap<const TargetRegisterClass *, VRegMap> VRegRCMap;
+ VRegRCMap VRegMapping;
// cache the subtarget here.
const NVPTXSubtarget &nvptxSubtarget;
// Build the map between type name and ID based on module's type
@@ -281,7 +285,6 @@ public:
: AsmPrinter(TM, Streamer),
nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
CurrentBankselLabelInBasicBlock = "";
- VRidGlobal2LocalMap = NULL;
reader = NULL;
}
@@ -292,7 +295,7 @@ public:
bool ignoreLoc(const MachineInstr &);
- virtual void getVirtualRegisterName(unsigned, bool, raw_ostream &);
+ std::string getVirtualRegisterName(unsigned) const;
DebugLoc prevDebugLoc;
void emitLineNumberAsDotLoc(const MachineInstr &);
diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/lib/Target/NVPTX/NVPTXFrameLowering.cpp
index 6533da5102b0..9030584f06fc 100644
--- a/lib/Target/NVPTX/NVPTXFrameLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXFrameLowering.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -36,30 +37,24 @@ void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const {
// in the BB, so giving it no debug location.
DebugLoc dl = DebugLoc();
- if (tm.getSubtargetImpl()->hasGenericLdSt()) {
- // mov %SPL, %depot;
- // cvta.local %SP, %SPL;
- if (is64bit) {
- MachineInstr *MI = BuildMI(
- MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64),
- NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal);
- BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::IMOV64rr),
- NVPTX::VRFrameLocal).addReg(NVPTX::VRDepot);
- } else {
- MachineInstr *MI = BuildMI(
- MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes),
- NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal);
- BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::IMOV32rr),
- NVPTX::VRFrameLocal).addReg(NVPTX::VRDepot);
- }
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ // mov %SPL, %depot;
+ // cvta.local %SP, %SPL;
+ if (is64bit) {
+ unsigned LocalReg = MRI.createVirtualRegister(&NVPTX::Int64RegsRegClass);
+ MachineInstr *MI = BuildMI(
+ MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64),
+ NVPTX::VRFrame).addReg(LocalReg);
+ BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR_64),
+ LocalReg).addImm(MF.getFunctionNumber());
} else {
- // mov %SP, %depot;
- if (is64bit)
- BuildMI(MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::IMOV64rr),
- NVPTX::VRFrame).addReg(NVPTX::VRDepot);
- else
- BuildMI(MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::IMOV32rr),
- NVPTX::VRFrame).addReg(NVPTX::VRDepot);
+ unsigned LocalReg = MRI.createVirtualRegister(&NVPTX::Int32RegsRegClass);
+ MachineInstr *MI = BuildMI(
+ MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes),
+ NVPTX::VRFrame).addReg(LocalReg);
+ BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR),
+ LocalReg).addImm(MF.getFunctionNumber());
}
}
}
diff --git a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
index 1077c46fb406..9fb0dd88b758 100644
--- a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
+++ b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
@@ -142,7 +142,7 @@ bool GenericToNVVM::runOnModule(Module &M) {
GlobalVariable *GV = I->first;
GlobalVariable *NewGV = I->second;
++I;
- Constant *BitCastNewGV = ConstantExpr::getBitCast(NewGV, GV->getType());
+ Constant *BitCastNewGV = ConstantExpr::getPointerCast(NewGV, GV->getType());
// At this point, the remaining uses of GV should be found only in global
// variable initializers, as other uses have been already been removed
// while walking through the instructions in function definitions.
@@ -384,7 +384,7 @@ void GenericToNVVM::remapNamedMDNode(Module *M, NamedMDNode *N) {
// Replace the old operands with the new operands.
N->dropAllReferences();
- for (SmallVector<MDNode *, 16>::iterator I = NewOperands.begin(),
+ for (SmallVectorImpl<MDNode *>::iterator I = NewOperands.begin(),
E = NewOperands.end();
I != E; ++I) {
N->addOperand(*I);
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index d4378c2322d4..4b8b306a705a 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -25,28 +25,29 @@
using namespace llvm;
-static cl::opt<bool> UseFMADInstruction(
- "nvptx-mad-enable", cl::ZeroOrMore,
- cl::desc("NVPTX Specific: Enable generating FMAD instructions"),
- cl::init(false));
-
static cl::opt<int>
-FMAContractLevel("nvptx-fma-level", cl::ZeroOrMore,
+FMAContractLevel("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
" 1: do it 2: do it aggressively"),
cl::init(2));
static cl::opt<int> UsePrecDivF32(
- "nvptx-prec-divf32", cl::ZeroOrMore,
+ "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
" IEEE Compliant F32 div.rnd if avaiable."),
cl::init(2));
static cl::opt<bool>
-UsePrecSqrtF32("nvptx-prec-sqrtf32",
+UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden,
cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
cl::init(true));
+static cl::opt<bool>
+FtzEnabled("nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
+ cl::init(false));
+
+
/// createNVPTXISelDag - This pass converts a legalized DAG into a
/// NVPTX-specific DAG, ready for instruction scheduling.
FunctionPass *llvm::createNVPTXISelDag(NVPTXTargetMachine &TM,
@@ -58,12 +59,7 @@ NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel),
Subtarget(tm.getSubtarget<NVPTXSubtarget>()) {
- // Always do fma.f32 fpcontract if the target supports the instruction.
- // Always do fma.f64 fpcontract if the target supports the instruction.
- // Do mad.f32 is nvptx-mad-enable is specified and the target does not
- // support fma.f32.
- doFMADF32 = (OptLevel > 0) && UseFMADInstruction && !Subtarget.hasFMAF32();
doFMAF32 = (OptLevel > 0) && Subtarget.hasFMAF32() && (FMAContractLevel >= 1);
doFMAF64 = (OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel >= 1);
doFMAF32AGG =
@@ -71,28 +67,61 @@ NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
doFMAF64AGG =
(OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel == 2);
- allowFMA = (FMAContractLevel >= 1) || UseFMADInstruction;
-
- UseF32FTZ = false;
+ allowFMA = (FMAContractLevel >= 1);
doMulWide = (OptLevel > 0);
+}
- // Decide how to translate f32 div
- do_DIVF32_PREC = UsePrecDivF32;
- // Decide how to translate f32 sqrt
- do_SQRTF32_PREC = UsePrecSqrtF32;
- // sm less than sm_20 does not support div.rnd. Use div.full.
- if (do_DIVF32_PREC == 2 && !Subtarget.reqPTX20())
- do_DIVF32_PREC = 1;
+int NVPTXDAGToDAGISel::getDivF32Level() const {
+ if (UsePrecDivF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-div32=N is used on the command-line, always honor it
+ return UsePrecDivF32;
+ } else {
+ // Otherwise, use div.approx if fast math is enabled
+ if (TM.Options.UnsafeFPMath)
+ return 0;
+ else
+ return 2;
+ }
+}
+
+bool NVPTXDAGToDAGISel::usePrecSqrtF32() const {
+ if (UsePrecSqrtF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
+ return UsePrecSqrtF32;
+ } else {
+ // Otherwise, use sqrt.approx if fast math is enabled
+ if (TM.Options.UnsafeFPMath)
+ return false;
+ else
+ return true;
+ }
+}
+bool NVPTXDAGToDAGISel::useF32FTZ() const {
+ if (FtzEnabled.getNumOccurrences() > 0) {
+ // If nvptx-f32ftz is used on the command-line, always honor it
+ return FtzEnabled;
+ } else {
+ const Function *F = MF->getFunction();
+ // Otherwise, check for an nvptx-f32ftz attribute on the function
+ if (F->hasFnAttribute("nvptx-f32ftz"))
+ return (F->getAttributes().getAttribute(AttributeSet::FunctionIndex,
+ "nvptx-f32ftz")
+ .getValueAsString() == "true");
+ else
+ return false;
+ }
}
/// Select - Select instructions not customized! Used for
/// expanded, promoted and normal instructions.
SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
- if (N->isMachineOpcode())
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
return NULL; // Already selected.
+ }
SDNode *ResNode = NULL;
switch (N->getOpcode()) {
@@ -116,6 +145,23 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
case NVPTXISD::StoreV4:
ResNode = SelectStoreVector(N);
break;
+ case NVPTXISD::LoadParam:
+ case NVPTXISD::LoadParamV2:
+ case NVPTXISD::LoadParamV4:
+ ResNode = SelectLoadParam(N);
+ break;
+ case NVPTXISD::StoreRetval:
+ case NVPTXISD::StoreRetvalV2:
+ case NVPTXISD::StoreRetvalV4:
+ ResNode = SelectStoreRetval(N);
+ break;
+ case NVPTXISD::StoreParam:
+ case NVPTXISD::StoreParamV2:
+ case NVPTXISD::StoreParamV4:
+ case NVPTXISD::StoreParamS32:
+ case NVPTXISD::StoreParamU32:
+ ResNode = SelectStoreParam(N);
+ break;
default:
break;
}
@@ -127,42 +173,26 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
static unsigned int getCodeAddrSpace(MemSDNode *N,
const NVPTXSubtarget &Subtarget) {
const Value *Src = N->getSrcValue();
+
if (!Src)
- return NVPTX::PTXLdStInstCode::LOCAL;
+ return NVPTX::PTXLdStInstCode::GENERIC;
if (const PointerType *PT = dyn_cast<PointerType>(Src->getType())) {
switch (PT->getAddressSpace()) {
- case llvm::ADDRESS_SPACE_LOCAL:
- return NVPTX::PTXLdStInstCode::LOCAL;
- case llvm::ADDRESS_SPACE_GLOBAL:
- return NVPTX::PTXLdStInstCode::GLOBAL;
- case llvm::ADDRESS_SPACE_SHARED:
- return NVPTX::PTXLdStInstCode::SHARED;
- case llvm::ADDRESS_SPACE_CONST_NOT_GEN:
- return NVPTX::PTXLdStInstCode::CONSTANT;
- case llvm::ADDRESS_SPACE_GENERIC:
- return NVPTX::PTXLdStInstCode::GENERIC;
- case llvm::ADDRESS_SPACE_PARAM:
- return NVPTX::PTXLdStInstCode::PARAM;
- case llvm::ADDRESS_SPACE_CONST:
- // If the arch supports generic address space, translate it to GLOBAL
- // for correctness.
- // If the arch does not support generic address space, then the arch
- // does not really support ADDRESS_SPACE_CONST, translate it to
- // to CONSTANT for better performance.
- if (Subtarget.hasGenericLdSt())
- return NVPTX::PTXLdStInstCode::GLOBAL;
- else
- return NVPTX::PTXLdStInstCode::CONSTANT;
- default:
- break;
+ case llvm::ADDRESS_SPACE_LOCAL: return NVPTX::PTXLdStInstCode::LOCAL;
+ case llvm::ADDRESS_SPACE_GLOBAL: return NVPTX::PTXLdStInstCode::GLOBAL;
+ case llvm::ADDRESS_SPACE_SHARED: return NVPTX::PTXLdStInstCode::SHARED;
+ case llvm::ADDRESS_SPACE_GENERIC: return NVPTX::PTXLdStInstCode::GENERIC;
+ case llvm::ADDRESS_SPACE_PARAM: return NVPTX::PTXLdStInstCode::PARAM;
+ case llvm::ADDRESS_SPACE_CONST: return NVPTX::PTXLdStInstCode::CONSTANT;
+ default: break;
}
}
- return NVPTX::PTXLdStInstCode::LOCAL;
+ return NVPTX::PTXLdStInstCode::GENERIC;
}
SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT LoadedVT = LD->getMemoryVT();
SDNode *NVPTXLD = NULL;
@@ -205,7 +235,8 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
// type is integer
// Float : ISD::NON_EXTLOAD or ISD::EXTLOAD and the type is float
MVT ScalarVT = SimpleVT.getScalarType();
- unsigned fromTypeWidth = ScalarVT.getSizeInBits();
+ // Read at least 8 bits (predicates are stored as 8-bit values)
+ unsigned fromTypeWidth = std::max(8U, ScalarVT.getSizeInBits());
unsigned int fromType;
if ((LD->getExtensionType() == ISD::SEXTLOAD))
fromType = NVPTX::PTXLdStInstCode::Signed;
@@ -220,7 +251,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
SDValue Addr;
SDValue Offset, Base;
unsigned Opcode;
- MVT::SimpleValueType TargetVT = LD->getValueType(0).getSimpleVT().SimpleTy;
+ MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
if (SelectDirectAddr(N1, Addr)) {
switch (TargetVT) {
@@ -401,7 +432,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
SDValue Op1 = N->getOperand(1);
SDValue Addr, Offset, Base;
unsigned Opcode;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDNode *LD;
MemSDNode *MemSD = cast<MemSDNode>(N);
EVT LoadedVT = MemSD->getMemoryVT();
@@ -430,7 +461,8 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
// type is integer
// Float : ISD::NON_EXTLOAD or ISD::EXTLOAD and the type is float
MVT ScalarVT = SimpleVT.getScalarType();
- unsigned FromTypeWidth = ScalarVT.getSizeInBits();
+ // Read at least 8 bits (predicates are stored as 8-bit values)
+ unsigned FromTypeWidth = std::max(8U, ScalarVT.getSizeInBits());
unsigned int FromType;
// The last operand holds the original LoadSDNode::getExtensionType() value
unsigned ExtensionType = cast<ConstantSDNode>(
@@ -782,194 +814,478 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
unsigned Opcode;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDNode *LD;
+ MemSDNode *Mem = cast<MemSDNode>(N);
+ SDValue Base, Offset, Addr;
- EVT RetVT = N->getValueType(0);
+ EVT EltVT = Mem->getMemoryVT().getVectorElementType();
- // Select opcode
- if (Subtarget.is64Bit()) {
+ if (SelectDirectAddr(Op1, Addr)) {
switch (N->getOpcode()) {
default:
return NULL;
case NVPTXISD::LDGV2:
- switch (RetVT.getSimpleVT().SimpleTy) {
+ switch (EltVT.getSimpleVT().SimpleTy) {
default:
return NULL;
case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_avar;
break;
case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_avar;
break;
case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_avar;
break;
case MVT::i64:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_avar;
break;
case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_avar;
break;
case MVT::f64:
- Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_avar;
break;
}
break;
- case NVPTXISD::LDGV4:
- switch (RetVT.getSimpleVT().SimpleTy) {
+ case NVPTXISD::LDUV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
default:
return NULL;
case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_avar;
break;
case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_avar;
break;
case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_avar;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_avar;
break;
case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_avar;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_avar;
break;
}
break;
- case NVPTXISD::LDUV2:
- switch (RetVT.getSimpleVT().SimpleTy) {
+ case NVPTXISD::LDGV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
default:
return NULL;
case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_avar;
break;
case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_avar;
break;
case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_64;
- break;
- case MVT::i64:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_avar;
break;
case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_64;
- break;
- case MVT::f64:
- Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_avar;
break;
}
break;
case NVPTXISD::LDUV4:
- switch (RetVT.getSimpleVT().SimpleTy) {
+ switch (EltVT.getSimpleVT().SimpleTy) {
default:
return NULL;
case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_avar;
break;
case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_avar;
break;
case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_avar;
break;
case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_64;
+ Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_avar;
break;
}
break;
}
- } else {
- switch (N->getOpcode()) {
- default:
- return NULL;
- case NVPTXISD::LDGV2:
- switch (RetVT.getSimpleVT().SimpleTy) {
+
+ SDValue Ops[] = { Addr, Chain };
+ LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(),
+ ArrayRef<SDValue>(Ops, 2));
+ } else if (Subtarget.is64Bit()
+ ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
+ : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
+ if (Subtarget.is64Bit()) {
+ switch (N->getOpcode()) {
default:
return NULL;
- case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_32;
- break;
- case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_32;
- break;
- case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_32;
+ case NVPTXISD::LDGV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_ari64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_ari64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_ari64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_ari64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_ari64;
+ break;
+ }
break;
- case MVT::i64:
- Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_32;
+ case NVPTXISD::LDUV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_ari64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_ari64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_ari64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_ari64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_ari64;
+ break;
+ }
break;
- case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_32;
+ case NVPTXISD::LDGV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_ari64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_ari64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_ari64;
+ break;
+ }
break;
- case MVT::f64:
- Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_32;
+ case NVPTXISD::LDUV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_ari64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_ari64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_ari64;
+ break;
+ }
break;
}
- break;
- case NVPTXISD::LDGV4:
- switch (RetVT.getSimpleVT().SimpleTy) {
+ } else {
+ switch (N->getOpcode()) {
default:
return NULL;
- case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_32;
+ case NVPTXISD::LDGV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_ari32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_ari32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_ari32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_ari32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_ari32;
+ break;
+ }
break;
- case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_32;
+ case NVPTXISD::LDUV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_ari32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_ari32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_ari32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_ari32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_ari32;
+ break;
+ }
break;
- case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_32;
+ case NVPTXISD::LDGV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_ari32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_ari32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_ari32;
+ break;
+ }
break;
- case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_32;
+ case NVPTXISD::LDUV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_ari32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_ari32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_ari32;
+ break;
+ }
break;
}
- break;
- case NVPTXISD::LDUV2:
- switch (RetVT.getSimpleVT().SimpleTy) {
+ }
+
+ SDValue Ops[] = { Base, Offset, Chain };
+
+ LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(),
+ ArrayRef<SDValue>(Ops, 3));
+ } else {
+ if (Subtarget.is64Bit()) {
+ switch (N->getOpcode()) {
default:
return NULL;
- case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_32;
- break;
- case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_32;
- break;
- case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_32;
+ case NVPTXISD::LDGV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_areg64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_areg64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_areg64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_areg64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_areg64;
+ break;
+ }
break;
- case MVT::i64:
- Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_32;
+ case NVPTXISD::LDUV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_areg64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_areg64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_areg64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_areg64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_areg64;
+ break;
+ }
break;
- case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_32;
+ case NVPTXISD::LDGV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_areg64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_areg64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_areg64;
+ break;
+ }
break;
- case MVT::f64:
- Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_32;
+ case NVPTXISD::LDUV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_areg64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_areg64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_areg64;
+ break;
+ }
break;
}
- break;
- case NVPTXISD::LDUV4:
- switch (RetVT.getSimpleVT().SimpleTy) {
+ } else {
+ switch (N->getOpcode()) {
default:
return NULL;
- case MVT::i8:
- Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_32;
+ case NVPTXISD::LDGV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_areg32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_areg32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_areg32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_areg32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_areg32;
+ break;
+ }
break;
- case MVT::i16:
- Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_32;
+ case NVPTXISD::LDUV2:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_areg32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_areg32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_areg32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_areg32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_areg32;
+ break;
+ }
break;
- case MVT::i32:
- Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_32;
+ case NVPTXISD::LDGV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_areg32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_areg32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_areg32;
+ break;
+ }
break;
- case MVT::f32:
- Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_32;
+ case NVPTXISD::LDUV4:
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg32;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_areg32;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_areg32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_areg32;
+ break;
+ }
break;
}
- break;
}
- }
- SDValue Ops[] = { Op1, Chain };
- LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
+ SDValue Ops[] = { Op1, Chain };
+ LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(),
+ ArrayRef<SDValue>(Ops, 2));
+ }
MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
@@ -979,7 +1295,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
}
SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
StoreSDNode *ST = cast<StoreSDNode>(N);
EVT StoreVT = ST->getMemoryVT();
SDNode *NVPTXST = NULL;
@@ -1033,8 +1349,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
SDValue Addr;
SDValue Offset, Base;
unsigned Opcode;
- MVT::SimpleValueType SourceVT =
- N1.getNode()->getValueType(0).getSimpleVT().SimpleTy;
+ MVT::SimpleValueType SourceVT = N1.getNode()->getSimpleValueType(0).SimpleTy;
if (SelectDirectAddr(N2, Addr)) {
switch (SourceVT) {
@@ -1214,7 +1529,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
SDValue Op1 = N->getOperand(1);
SDValue Addr, Offset, Base;
unsigned Opcode;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDNode *ST;
EVT EltVT = Op1.getValueType();
MemSDNode *MemSD = cast<MemSDNode>(N);
@@ -1585,6 +1900,414 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
return ST;
}
+SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
+ SDValue Chain = Node->getOperand(0);
+ SDValue Offset = Node->getOperand(2);
+ SDValue Flag = Node->getOperand(3);
+ SDLoc DL(Node);
+ MemSDNode *Mem = cast<MemSDNode>(Node);
+
+ unsigned VecSize;
+ switch (Node->getOpcode()) {
+ default:
+ return NULL;
+ case NVPTXISD::LoadParam:
+ VecSize = 1;
+ break;
+ case NVPTXISD::LoadParamV2:
+ VecSize = 2;
+ break;
+ case NVPTXISD::LoadParamV4:
+ VecSize = 4;
+ break;
+ }
+
+ EVT EltVT = Node->getValueType(0);
+ EVT MemVT = Mem->getMemoryVT();
+
+ unsigned Opc = 0;
+
+ switch (VecSize) {
+ default:
+ return NULL;
+ case 1:
+ switch (MemVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opc = NVPTX::LoadParamMemI8;
+ break;
+ case MVT::i8:
+ Opc = NVPTX::LoadParamMemI8;
+ break;
+ case MVT::i16:
+ Opc = NVPTX::LoadParamMemI16;
+ break;
+ case MVT::i32:
+ Opc = NVPTX::LoadParamMemI32;
+ break;
+ case MVT::i64:
+ Opc = NVPTX::LoadParamMemI64;
+ break;
+ case MVT::f32:
+ Opc = NVPTX::LoadParamMemF32;
+ break;
+ case MVT::f64:
+ Opc = NVPTX::LoadParamMemF64;
+ break;
+ }
+ break;
+ case 2:
+ switch (MemVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opc = NVPTX::LoadParamMemV2I8;
+ break;
+ case MVT::i8:
+ Opc = NVPTX::LoadParamMemV2I8;
+ break;
+ case MVT::i16:
+ Opc = NVPTX::LoadParamMemV2I16;
+ break;
+ case MVT::i32:
+ Opc = NVPTX::LoadParamMemV2I32;
+ break;
+ case MVT::i64:
+ Opc = NVPTX::LoadParamMemV2I64;
+ break;
+ case MVT::f32:
+ Opc = NVPTX::LoadParamMemV2F32;
+ break;
+ case MVT::f64:
+ Opc = NVPTX::LoadParamMemV2F64;
+ break;
+ }
+ break;
+ case 4:
+ switch (MemVT.getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opc = NVPTX::LoadParamMemV4I8;
+ break;
+ case MVT::i8:
+ Opc = NVPTX::LoadParamMemV4I8;
+ break;
+ case MVT::i16:
+ Opc = NVPTX::LoadParamMemV4I16;
+ break;
+ case MVT::i32:
+ Opc = NVPTX::LoadParamMemV4I32;
+ break;
+ case MVT::f32:
+ Opc = NVPTX::LoadParamMemV4F32;
+ break;
+ }
+ break;
+ }
+
+ SDVTList VTs;
+ if (VecSize == 1) {
+ VTs = CurDAG->getVTList(EltVT, MVT::Other, MVT::Glue);
+ } else if (VecSize == 2) {
+ VTs = CurDAG->getVTList(EltVT, EltVT, MVT::Other, MVT::Glue);
+ } else {
+ EVT EVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other, MVT::Glue };
+ VTs = CurDAG->getVTList(&EVTs[0], 5);
+ }
+
+ unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+
+ SmallVector<SDValue, 2> Ops;
+ Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32));
+ Ops.push_back(Chain);
+ Ops.push_back(Flag);
+
+ SDNode *Ret =
+ CurDAG->getMachineNode(Opc, DL, VTs, Ops);
+ return Ret;
+}
+
+SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
+ SDLoc DL(N);
+ SDValue Chain = N->getOperand(0);
+ SDValue Offset = N->getOperand(1);
+ unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ MemSDNode *Mem = cast<MemSDNode>(N);
+
+ // How many elements do we have?
+ unsigned NumElts = 1;
+ switch (N->getOpcode()) {
+ default:
+ return NULL;
+ case NVPTXISD::StoreRetval:
+ NumElts = 1;
+ break;
+ case NVPTXISD::StoreRetvalV2:
+ NumElts = 2;
+ break;
+ case NVPTXISD::StoreRetvalV4:
+ NumElts = 4;
+ break;
+ }
+
+ // Build vector of operands
+ SmallVector<SDValue, 6> Ops;
+ for (unsigned i = 0; i < NumElts; ++i)
+ Ops.push_back(N->getOperand(i + 2));
+ Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32));
+ Ops.push_back(Chain);
+
+ // Determine target opcode
+ // If we have an i1, use an 8-bit store. The lowering code in
+ // NVPTXISelLowering will have already emitted an upcast.
+ unsigned Opcode = 0;
+ switch (NumElts) {
+ default:
+ return NULL;
+ case 1:
+ switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opcode = NVPTX::StoreRetvalI8;
+ break;
+ case MVT::i8:
+ Opcode = NVPTX::StoreRetvalI8;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::StoreRetvalI16;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreRetvalI32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::StoreRetvalI64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::StoreRetvalF32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::StoreRetvalF64;
+ break;
+ }
+ break;
+ case 2:
+ switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opcode = NVPTX::StoreRetvalV2I8;
+ break;
+ case MVT::i8:
+ Opcode = NVPTX::StoreRetvalV2I8;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::StoreRetvalV2I16;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreRetvalV2I32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::StoreRetvalV2I64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::StoreRetvalV2F32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::StoreRetvalV2F64;
+ break;
+ }
+ break;
+ case 4:
+ switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opcode = NVPTX::StoreRetvalV4I8;
+ break;
+ case MVT::i8:
+ Opcode = NVPTX::StoreRetvalV4I8;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::StoreRetvalV4I16;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreRetvalV4I32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::StoreRetvalV4F32;
+ break;
+ }
+ break;
+ }
+
+ SDNode *Ret =
+ CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops);
+ MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
+ MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(Ret)->setMemRefs(MemRefs0, MemRefs0 + 1);
+
+ return Ret;
+}
+
+SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
+ SDLoc DL(N);
+ SDValue Chain = N->getOperand(0);
+ SDValue Param = N->getOperand(1);
+ unsigned ParamVal = cast<ConstantSDNode>(Param)->getZExtValue();
+ SDValue Offset = N->getOperand(2);
+ unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ MemSDNode *Mem = cast<MemSDNode>(N);
+ SDValue Flag = N->getOperand(N->getNumOperands() - 1);
+
+ // How many elements do we have?
+ unsigned NumElts = 1;
+ switch (N->getOpcode()) {
+ default:
+ return NULL;
+ case NVPTXISD::StoreParamU32:
+ case NVPTXISD::StoreParamS32:
+ case NVPTXISD::StoreParam:
+ NumElts = 1;
+ break;
+ case NVPTXISD::StoreParamV2:
+ NumElts = 2;
+ break;
+ case NVPTXISD::StoreParamV4:
+ NumElts = 4;
+ break;
+ }
+
+ // Build vector of operands
+ SmallVector<SDValue, 8> Ops;
+ for (unsigned i = 0; i < NumElts; ++i)
+ Ops.push_back(N->getOperand(i + 3));
+ Ops.push_back(CurDAG->getTargetConstant(ParamVal, MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32));
+ Ops.push_back(Chain);
+ Ops.push_back(Flag);
+
+ // Determine target opcode
+ // If we have an i1, use an 8-bit store. The lowering code in
+ // NVPTXISelLowering will have already emitted an upcast.
+ unsigned Opcode = 0;
+ switch (N->getOpcode()) {
+ default:
+ switch (NumElts) {
+ default:
+ return NULL;
+ case 1:
+ switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opcode = NVPTX::StoreParamI8;
+ break;
+ case MVT::i8:
+ Opcode = NVPTX::StoreParamI8;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::StoreParamI16;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreParamI32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::StoreParamI64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::StoreParamF32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::StoreParamF64;
+ break;
+ }
+ break;
+ case 2:
+ switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opcode = NVPTX::StoreParamV2I8;
+ break;
+ case MVT::i8:
+ Opcode = NVPTX::StoreParamV2I8;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::StoreParamV2I16;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreParamV2I32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::StoreParamV2I64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::StoreParamV2F32;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::StoreParamV2F64;
+ break;
+ }
+ break;
+ case 4:
+ switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
+ default:
+ return NULL;
+ case MVT::i1:
+ Opcode = NVPTX::StoreParamV4I8;
+ break;
+ case MVT::i8:
+ Opcode = NVPTX::StoreParamV4I8;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::StoreParamV4I16;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreParamV4I32;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::StoreParamV4F32;
+ break;
+ }
+ break;
+ }
+ break;
+ // Special case: if we have a sign-extend/zero-extend node, insert the
+ // conversion instruction first, and use that as the value operand to
+ // the selected StoreParam node.
+ case NVPTXISD::StoreParamU32: {
+ Opcode = NVPTX::StoreParamI32;
+ SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE,
+ MVT::i32);
+ SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_u32_u16, DL,
+ MVT::i32, Ops[0], CvtNone);
+ Ops[0] = SDValue(Cvt, 0);
+ break;
+ }
+ case NVPTXISD::StoreParamS32: {
+ Opcode = NVPTX::StoreParamI32;
+ SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE,
+ MVT::i32);
+ SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_s32_s16, DL,
+ MVT::i32, Ops[0], CvtNone);
+ Ops[0] = SDValue(Cvt, 0);
+ break;
+ }
+ }
+
+ SDVTList RetVTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
+ SDNode *Ret =
+ CurDAG->getMachineNode(Opcode, DL, RetVTs, Ops);
+ MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
+ MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(Ret)->setMemRefs(MemRefs0, MemRefs0 + 1);
+
+ return Ret;
+}
+
// SelectDirectAddr - Match a direct address for DAG.
// A direct address could be a globaladdress or externalsymbol.
bool NVPTXDAGToDAGISel::SelectDirectAddr(SDValue N, SDValue &Address) {
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index ed16d4450b26..d961e5014532 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -28,38 +28,22 @@ class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
// If true, generate corresponding FPCONTRACT. This is
// language dependent (i.e. CUDA and OpenCL works differently).
- bool doFMADF32;
bool doFMAF64;
bool doFMAF32;
bool doFMAF64AGG;
bool doFMAF32AGG;
bool allowFMA;
- // 0: use div.approx
- // 1: use div.full
- // 2: For sm_20 and later, ieee-compliant div.rnd.f32 can be generated;
- // Otherwise, use div.full
- int do_DIVF32_PREC;
-
- // If true, generate sqrt.rn, else generate sqrt.approx. If FTZ
- // is true, then generate the corresponding FTZ version.
- bool do_SQRTF32_PREC;
-
- // If true, add .ftz to f32 instructions.
- // This is only meaningful for sm_20 and later, as the default
- // is not ftz.
- // For sm earlier than sm_20, f32 denorms are always ftz by the
- // hardware.
- // We always add the .ftz modifier regardless of the sm value
- // when Use32FTZ is true.
- bool UseF32FTZ;
-
// If true, generate mul.wide from sext and mul
bool doMulWide;
+ int getDivF32Level() const;
+ bool usePrecSqrtF32() const;
+ bool useF32FTZ() const;
+
public:
explicit NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
- CodeGenOpt::Level OptLevel);
+ CodeGenOpt::Level OptLevel);
// Pass Name
virtual const char *getPassName() const {
@@ -80,7 +64,10 @@ private:
SDNode *SelectLDGLDUVector(SDNode *N);
SDNode *SelectStore(SDNode *N);
SDNode *SelectStoreVector(SDNode *N);
-
+ SDNode *SelectLoadParam(SDNode *N);
+ SDNode *SelectStoreRetval(SDNode *N);
+ SDNode *SelectStoreParam(SDNode *N);
+
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 6e01a5a82071..6a8be753c87c 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -51,6 +51,8 @@ static bool IsPTXVectorType(MVT VT) {
switch (VT.SimpleTy) {
default:
return false;
+ case MVT::v2i1:
+ case MVT::v4i1:
case MVT::v2i8:
case MVT::v4i8:
case MVT::v2i16:
@@ -65,6 +67,37 @@ static bool IsPTXVectorType(MVT VT) {
}
}
+/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
+/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
+/// into their primitive components.
+/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
+/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
+/// LowerCall, and LowerReturn.
+static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
+ SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets = 0,
+ uint64_t StartingOffset = 0) {
+ SmallVector<EVT, 16> TempVTs;
+ SmallVector<uint64_t, 16> TempOffsets;
+
+ ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
+ for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
+ EVT VT = TempVTs[i];
+ uint64_t Off = TempOffsets[i];
+ if (VT.isVector())
+ for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
+ ValueVTs.push_back(VT.getVectorElementType());
+ if (Offsets)
+ Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
+ }
+ else {
+ ValueVTs.push_back(VT);
+ if (Offsets)
+ Offsets->push_back(Off);
+ }
+ }
+}
+
// NVPTXTargetLowering Constructor.
NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
: TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
@@ -90,7 +123,6 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
setSchedulingPreference(Sched::Source);
addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
- addRegisterClass(MVT::i8, &NVPTX::Int8RegsRegClass);
addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
@@ -106,10 +138,12 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
setOperationAction(ISD::BR_CC, MVT::i16, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::BR_CC, MVT::i64, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
+ // Some SIGN_EXTEND_INREG can be done using cvt instruction.
+ // For others we will expand to a SHL/SRA pair.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
if (nvptxSubtarget.hasROT64()) {
@@ -170,6 +204,9 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
// TRAP can be lowered to PTX trap
setOperationAction(ISD::TRAP, MVT::Other, Legal);
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+
// Register custom handling for vector loads/stores
for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
++i) {
@@ -181,6 +218,25 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
}
}
+ // Custom handling for i8 intrinsics
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
+
+ setOperationAction(ISD::CTLZ, MVT::i16, Legal);
+ setOperationAction(ISD::CTLZ, MVT::i32, Legal);
+ setOperationAction(ISD::CTLZ, MVT::i64, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
+ setOperationAction(ISD::CTTZ, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTPOP, MVT::i16, Legal);
+ setOperationAction(ISD::CTPOP, MVT::i32, Legal);
+ setOperationAction(ISD::CTPOP, MVT::i64, Legal);
+
// Now deduce the information based on the above mentioned
// actions
computeRegisterProperties();
@@ -196,8 +252,6 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "NVPTXISD::RET_FLAG";
case NVPTXISD::Wrapper:
return "NVPTXISD::Wrapper";
- case NVPTXISD::NVBuiltin:
- return "NVPTXISD::NVBuiltin";
case NVPTXISD::DeclareParam:
return "NVPTXISD::DeclareParam";
case NVPTXISD::DeclareScalarParam:
@@ -210,14 +264,20 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "NVPTXISD::PrintCall";
case NVPTXISD::LoadParam:
return "NVPTXISD::LoadParam";
+ case NVPTXISD::LoadParamV2:
+ return "NVPTXISD::LoadParamV2";
+ case NVPTXISD::LoadParamV4:
+ return "NVPTXISD::LoadParamV4";
case NVPTXISD::StoreParam:
return "NVPTXISD::StoreParam";
+ case NVPTXISD::StoreParamV2:
+ return "NVPTXISD::StoreParamV2";
+ case NVPTXISD::StoreParamV4:
+ return "NVPTXISD::StoreParamV4";
case NVPTXISD::StoreParamS32:
return "NVPTXISD::StoreParamS32";
case NVPTXISD::StoreParamU32:
return "NVPTXISD::StoreParamU32";
- case NVPTXISD::MoveToParam:
- return "NVPTXISD::MoveToParam";
case NVPTXISD::CallArgBegin:
return "NVPTXISD::CallArgBegin";
case NVPTXISD::CallArg:
@@ -236,12 +296,12 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "NVPTXISD::Prototype";
case NVPTXISD::MoveParam:
return "NVPTXISD::MoveParam";
- case NVPTXISD::MoveRetval:
- return "NVPTXISD::MoveRetval";
- case NVPTXISD::MoveToRetval:
- return "NVPTXISD::MoveToRetval";
case NVPTXISD::StoreRetval:
return "NVPTXISD::StoreRetval";
+ case NVPTXISD::StoreRetvalV2:
+ return "NVPTXISD::StoreRetvalV2";
+ case NVPTXISD::StoreRetvalV4:
+ return "NVPTXISD::StoreRetvalV4";
case NVPTXISD::PseudoUseParam:
return "NVPTXISD::PseudoUseParam";
case NVPTXISD::RETURN:
@@ -250,6 +310,8 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "NVPTXISD::CallSeqBegin";
case NVPTXISD::CallSeqEnd:
return "NVPTXISD::CallSeqEnd";
+ case NVPTXISD::CallPrototype:
+ return "NVPTXISD::CallPrototype";
case NVPTXISD::LoadV2:
return "NVPTXISD::LoadV2";
case NVPTXISD::LoadV4:
@@ -275,89 +337,68 @@ bool NVPTXTargetLowering::shouldSplitVectorElementType(EVT VT) const {
SDValue
NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
}
-std::string NVPTXTargetLowering::getPrototype(
- Type *retTy, const ArgListTy &Args,
- const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment) const {
+std::string
+NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ unsigned retAlignment,
+ const ImmutableCallSite *CS) const {
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return "";
std::stringstream O;
O << "prototype_" << uniqueCallSite << " : .callprototype ";
- if (retTy->getTypeID() == Type::VoidTyID)
+ if (retTy->getTypeID() == Type::VoidTyID) {
O << "()";
- else {
+ } else {
O << "(";
- if (isABI) {
- if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
- unsigned size = 0;
- if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
- size = ITy->getBitWidth();
- if (size < 32)
- size = 32;
- } else {
- assert(retTy->isFloatingPointTy() &&
- "Floating point type expected here");
- size = retTy->getPrimitiveSizeInBits();
- }
-
- O << ".param .b" << size << " _";
- } else if (isa<PointerType>(retTy))
- O << ".param .b" << getPointerTy().getSizeInBits() << " _";
- else {
- if ((retTy->getTypeID() == Type::StructTyID) ||
- isa<VectorType>(retTy)) {
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, retTy, vtparts);
- unsigned totalsz = 0;
- for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
- unsigned elems = 1;
- EVT elemtype = vtparts[i];
- if (vtparts[i].isVector()) {
- elems = vtparts[i].getVectorNumElements();
- elemtype = vtparts[i].getVectorElementType();
- }
- for (unsigned j = 0, je = elems; j != je; ++j) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8))
- sz = 8;
- totalsz += sz / 8;
- }
- }
- O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
- } else {
- assert(false && "Unknown return type");
- }
+ if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
+ unsigned size = 0;
+ if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
+ size = ITy->getBitWidth();
+ if (size < 32)
+ size = 32;
+ } else {
+ assert(retTy->isFloatingPointTy() &&
+ "Floating point type expected here");
+ size = retTy->getPrimitiveSizeInBits();
}
- } else {
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, retTy, vtparts);
- unsigned idx = 0;
- for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
- unsigned elems = 1;
- EVT elemtype = vtparts[i];
- if (vtparts[i].isVector()) {
- elems = vtparts[i].getVectorNumElements();
- elemtype = vtparts[i].getVectorElementType();
- }
- for (unsigned j = 0, je = elems; j != je; ++j) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 32))
- sz = 32;
- O << ".reg .b" << sz << " _";
- if (j < je - 1)
- O << ", ";
- ++idx;
+ O << ".param .b" << size << " _";
+ } else if (isa<PointerType>(retTy)) {
+ O << ".param .b" << getPointerTy().getSizeInBits() << " _";
+ } else {
+ if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) {
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, retTy, vtparts);
+ unsigned totalsz = 0;
+ for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[i];
+ if (vtparts[i].isVector()) {
+ elems = vtparts[i].getVectorNumElements();
+ elemtype = vtparts[i].getVectorElementType();
+ }
+ // TODO: no need to loop
+ for (unsigned j = 0, je = elems; j != je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 8))
+ sz = 8;
+ totalsz += sz / 8;
+ }
}
- if (i < e - 1)
- O << ", ";
+ O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
+ } else {
+ assert(false && "Unknown return type");
}
}
O << ") ";
@@ -367,14 +408,38 @@ std::string NVPTXTargetLowering::getPrototype(
bool first = true;
MVT thePointerTy = getPointerTy();
- for (unsigned i = 0, e = Args.size(); i != e; ++i) {
- const Type *Ty = Args[i].Ty;
+ unsigned OIdx = 0;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ Type *Ty = Args[i].Ty;
if (!first) {
O << ", ";
}
first = false;
- if (Outs[i].Flags.isByVal() == false) {
+ if (Outs[OIdx].Flags.isByVal() == false) {
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ unsigned align = 0;
+ const CallInst *CallI = cast<CallInst>(CS->getInstruction());
+ const DataLayout *TD = getDataLayout();
+ // +1 because index 0 is reserved for return type alignment
+ if (!llvm::getAlign(*CallI, i + 1, align))
+ align = TD->getABITypeAlignment(Ty);
+ unsigned sz = TD->getTypeAllocSize(Ty);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ // update the index for Outs
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, Ty, vtparts);
+ if (unsigned len = vtparts.size())
+ OIdx += len - 1;
+ continue;
+ }
+ // i8 types in IR will be i16 types in SDAG
+ assert((getValueType(Ty) == Outs[OIdx].VT ||
+ (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
+ "type mismatch between callee prototype and arguments");
+ // scalar type
unsigned sz = 0;
if (isa<IntegerType>(Ty)) {
sz = cast<IntegerType>(Ty)->getBitWidth();
@@ -384,10 +449,7 @@ std::string NVPTXTargetLowering::getPrototype(
sz = thePointerTy.getSizeInBits();
else
sz = Ty->getPrimitiveSizeInBits();
- if (isABI)
- O << ".param .b" << sz << " ";
- else
- O << ".reg .b" << sz << " ";
+ O << ".param .b" << sz << " ";
O << "_";
continue;
}
@@ -395,50 +457,72 @@ std::string NVPTXTargetLowering::getPrototype(
assert(PTy && "Param with byval attribute should be a pointer type");
Type *ETy = PTy->getElementType();
- if (isABI) {
- unsigned align = Outs[i].Flags.getByValAlign();
- unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
- O << ".param .align " << align << " .b8 ";
- O << "_";
- O << "[" << sz << "]";
- continue;
- } else {
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, ETy, vtparts);
- for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
- unsigned elems = 1;
- EVT elemtype = vtparts[i];
- if (vtparts[i].isVector()) {
- elems = vtparts[i].getVectorNumElements();
- elemtype = vtparts[i].getVectorElementType();
- }
+ unsigned align = Outs[OIdx].Flags.getByValAlign();
+ unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ }
+ O << ");";
+ return O.str();
+}
- for (unsigned j = 0, je = elems; j != je; ++j) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 32))
- sz = 32;
- O << ".reg .b" << sz << " ";
- O << "_";
- if (j < je - 1)
- O << ", ";
- }
- if (i < e - 1)
- O << ", ";
+unsigned
+NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
+ const ImmutableCallSite *CS,
+ Type *Ty,
+ unsigned Idx) const {
+ const DataLayout *TD = getDataLayout();
+ unsigned Align = 0;
+ const Value *DirectCallee = CS->getCalledFunction();
+
+ if (!DirectCallee) {
+ // We don't have a direct function symbol, but that may be because of
+ // constant cast instructions in the call.
+ const Instruction *CalleeI = CS->getInstruction();
+ assert(CalleeI && "Call target is not a function or derived value?");
+
+ // With bitcast'd call targets, the instruction will be the call
+ if (isa<CallInst>(CalleeI)) {
+ // Check if we have call alignment metadata
+ if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
+ return Align;
+
+ const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
+ // Ignore any bitcast instructions
+ while(isa<ConstantExpr>(CalleeV)) {
+ const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
+ if (!CE->isCast())
+ break;
+ // Look through the bitcast
+ CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
}
- continue;
+
+ // We have now looked past all of the bitcasts. Do we finally have a
+ // Function?
+ if (isa<Function>(CalleeV))
+ DirectCallee = CalleeV;
}
}
- O << ");";
- return O.str();
+
+ // Check for function alignment information if we found that the
+ // ultimate target is a Function
+ if (DirectCallee)
+ if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
+ return Align;
+
+ // Call is indirect or alignment information is not available, fall back to
+ // the ABI type alignment
+ return TD->getABITypeAlignment(Ty);
}
SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -447,53 +531,258 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
ImmutableCallSite *CS = CLI.CS;
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+ const DataLayout *TD = getDataLayout();
+ MachineFunction &MF = DAG.getMachineFunction();
+ const Function *F = MF.getFunction();
SDValue tempChain = Chain;
Chain =
- DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true));
+ DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
+ dl);
SDValue InFlag = Chain.getValue(1);
- assert((Outs.size() == Args.size()) &&
- "Unexpected number of arguments to function call");
unsigned paramCount = 0;
+ // Args.size() and Outs.size() need not match.
+ // Outs.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Outs)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Outs.
+ // So a different index should be used for indexing into Outs/OutVals.
+ // See similar issue in LowerFormalArguments.
+ unsigned OIdx = 0;
// Declare the .params or .reg need to pass values
// to the function
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].VT;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ EVT VT = Outs[OIdx].VT;
+ Type *Ty = Args[i].Ty;
+
+ if (Outs[OIdx].Flags.isByVal() == false) {
+ if (Ty->isAggregateType()) {
+ // aggregate
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, Ty, vtparts);
+
+ unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ unsigned sz = TD->getTypeAllocSize(Ty);
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(sz, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps, 5);
+ InFlag = Chain.getValue(1);
+ unsigned curOffset = 0;
+ for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[j];
+ if (vtparts[j].isVector()) {
+ elems = vtparts[j].getVectorNumElements();
+ elemtype = vtparts[j].getVectorElementType();
+ }
+ for (unsigned k = 0, ke = elems; k != ke; ++k) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 8))
+ sz = 8;
+ SDValue StVal = OutVals[OIdx];
+ if (elemtype.getSizeInBits() < 16) {
+ StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
+ }
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(curOffset, MVT::i32),
+ StVal, InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
+ CopyParamVTs, &CopyParamOps[0], 5,
+ elemtype, MachinePointerInfo());
+ InFlag = Chain.getValue(1);
+ curOffset += sz / 8;
+ ++OIdx;
+ }
+ }
+ if (vtparts.size() > 0)
+ --OIdx;
+ ++paramCount;
+ continue;
+ }
+ if (Ty->isVectorTy()) {
+ EVT ObjectVT = getValueType(Ty);
+ unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ unsigned sz = TD->getTypeAllocSize(Ty);
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(sz, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps, 5);
+ InFlag = Chain.getValue(1);
+ unsigned NumElts = ObjectVT.getVectorNumElements();
+ EVT EltVT = ObjectVT.getVectorElementType();
+ EVT MemVT = EltVT;
+ bool NeedExtend = false;
+ if (EltVT.getSizeInBits() < 16) {
+ NeedExtend = true;
+ EltVT = MVT::i16;
+ }
+
+ // V1 store
+ if (NumElts == 1) {
+ SDValue Elt = OutVals[OIdx++];
+ if (NeedExtend)
+ Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
+
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(0, MVT::i32), Elt,
+ InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
+ CopyParamVTs, &CopyParamOps[0], 5,
+ MemVT, MachinePointerInfo());
+ InFlag = Chain.getValue(1);
+ } else if (NumElts == 2) {
+ SDValue Elt0 = OutVals[OIdx++];
+ SDValue Elt1 = OutVals[OIdx++];
+ if (NeedExtend) {
+ Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
+ Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
+ }
+
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(0, MVT::i32), Elt0, Elt1,
+ InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
+ CopyParamVTs, &CopyParamOps[0], 6,
+ MemVT, MachinePointerInfo());
+ InFlag = Chain.getValue(1);
+ } else {
+ unsigned curOffset = 0;
+ // V4 stores
+ // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
+ // the
+ // vector will be expanded to a power of 2 elements, so we know we can
+ // always round up to the next multiple of 4 when creating the vector
+ // stores.
+ // e.g. 4 elem => 1 st.v4
+ // 6 elem => 2 st.v4
+ // 8 elem => 2 st.v4
+ // 11 elem => 3 st.v4
+ unsigned VecSize = 4;
+ if (EltVT.getSizeInBits() == 64)
+ VecSize = 2;
+
+ // This is potentially only part of a vector, so assume all elements
+ // are packed together.
+ unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
+
+ for (unsigned i = 0; i < NumElts; i += VecSize) {
+ // Get values
+ SDValue StoreVal;
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
+ Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
+
+ unsigned Opc = NVPTXISD::StoreParamV2;
+
+ StoreVal = OutVals[OIdx++];
+ if (NeedExtend)
+ StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
+ Ops.push_back(StoreVal);
+
+ if (i + 1 < NumElts) {
+ StoreVal = OutVals[OIdx++];
+ if (NeedExtend)
+ StoreVal =
+ DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
+ } else {
+ StoreVal = DAG.getUNDEF(EltVT);
+ }
+ Ops.push_back(StoreVal);
+
+ if (VecSize == 4) {
+ Opc = NVPTXISD::StoreParamV4;
+ if (i + 2 < NumElts) {
+ StoreVal = OutVals[OIdx++];
+ if (NeedExtend)
+ StoreVal =
+ DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
+ } else {
+ StoreVal = DAG.getUNDEF(EltVT);
+ }
+ Ops.push_back(StoreVal);
+
+ if (i + 3 < NumElts) {
+ StoreVal = OutVals[OIdx++];
+ if (NeedExtend)
+ StoreVal =
+ DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
+ } else {
+ StoreVal = DAG.getUNDEF(EltVT);
+ }
+ Ops.push_back(StoreVal);
+ }
- if (Outs[i].Flags.isByVal() == false) {
+ Ops.push_back(InFlag);
+
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, &Ops[0],
+ Ops.size(), MemVT,
+ MachinePointerInfo());
+ InFlag = Chain.getValue(1);
+ curOffset += PerStoreOffset;
+ }
+ }
+ ++paramCount;
+ --OIdx;
+ continue;
+ }
// Plain scalar
// for ABI, declare .param .b<size> .param<n>;
- // for nonABI, declare .reg .b<size> .param<n>;
- unsigned isReg = 1;
- if (isABI)
- isReg = 0;
unsigned sz = VT.getSizeInBits();
- if (VT.isInteger() && (sz < 32))
- sz = 32;
+ bool needExtend = false;
+ if (VT.isInteger()) {
+ if (sz < 16)
+ needExtend = true;
+ if (sz < 32)
+ sz = 32;
+ }
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue DeclareParamOps[] = { Chain,
DAG.getConstant(paramCount, MVT::i32),
DAG.getConstant(sz, MVT::i32),
- DAG.getConstant(isReg, MVT::i32), InFlag };
+ DAG.getConstant(0, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
DeclareParamOps, 5);
InFlag = Chain.getValue(1);
+ SDValue OutV = OutVals[OIdx];
+ if (needExtend) {
+ // zext/sext i1 to i16
+ unsigned opc = ISD::ZERO_EXTEND;
+ if (Outs[OIdx].Flags.isSExt())
+ opc = ISD::SIGN_EXTEND;
+ OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
+ }
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(0, MVT::i32), OutVals[i],
- InFlag };
+ DAG.getConstant(0, MVT::i32), OutV, InFlag };
unsigned opcode = NVPTXISD::StoreParam;
- if (isReg)
- opcode = NVPTXISD::MoveToParam;
- else {
- if (Outs[i].Flags.isZExt())
- opcode = NVPTXISD::StoreParamU32;
- else if (Outs[i].Flags.isSExt())
- opcode = NVPTXISD::StoreParamS32;
- }
- Chain = DAG.getNode(opcode, dl, CopyParamVTs, CopyParamOps, 5);
+ if (Outs[OIdx].Flags.isZExt())
+ opcode = NVPTXISD::StoreParamU32;
+ else if (Outs[OIdx].Flags.isSExt())
+ opcode = NVPTXISD::StoreParamS32;
+ Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps, 5,
+ VT, MachinePointerInfo());
InFlag = Chain.getValue(1);
++paramCount;
@@ -505,55 +794,20 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
assert(PTy && "Type of a byval parameter should be pointer");
ComputeValueVTs(*this, PTy->getElementType(), vtparts);
- if (isABI) {
- // declare .param .align 16 .b8 .param<n>[<size>];
- unsigned sz = Outs[i].Flags.getByValSize();
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- // The ByValAlign in the Outs[i].Flags is alway set at this point, so we
- // don't need to
- // worry about natural alignment or not. See TargetLowering::LowerCallTo()
- SDValue DeclareParamOps[] = {
- Chain, DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32),
- DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
- InFlag
- };
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps, 5);
- InFlag = Chain.getValue(1);
- unsigned curOffset = 0;
- for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
- unsigned elems = 1;
- EVT elemtype = vtparts[j];
- if (vtparts[j].isVector()) {
- elems = vtparts[j].getVectorNumElements();
- elemtype = vtparts[j].getVectorElementType();
- }
- for (unsigned k = 0, ke = elems; k != ke; ++k) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8))
- sz = 8;
- SDValue srcAddr =
- DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
- DAG.getConstant(curOffset, getPointerTy()));
- SDValue theVal =
- DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), false, false, false, 0);
- SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(curOffset, MVT::i32),
- theVal, InFlag };
- Chain = DAG.getNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
- CopyParamOps, 5);
- InFlag = Chain.getValue(1);
- curOffset += sz / 8;
- }
- }
- ++paramCount;
- continue;
- }
- // Non-abi, struct or vector
- // Declare a bunch or .reg .b<size> .param<n>
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ unsigned sz = Outs[OIdx].Flags.getByValSize();
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
+ // so we don't need to worry about natural alignment or not.
+ // See TargetLowering::LowerCallTo().
+ SDValue DeclareParamOps[] = {
+ Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
+ DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
+ InFlag
+ };
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps, 5);
+ InFlag = Chain.getValue(1);
unsigned curOffset = 0;
for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
unsigned elems = 1;
@@ -564,107 +818,66 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
for (unsigned k = 0, ke = elems; k != ke; ++k) {
unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 32))
- sz = 32;
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareParamOps[] = { Chain,
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(sz, MVT::i32),
- DAG.getConstant(1, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
- DeclareParamOps, 5);
- InFlag = Chain.getValue(1);
+ if (elemtype.isInteger() && (sz < 8))
+ sz = 8;
SDValue srcAddr =
- DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
+ DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
DAG.getConstant(curOffset, getPointerTy()));
- SDValue theVal =
- DAG.getLoad(elemtype, dl, tempChain, srcAddr, MachinePointerInfo(),
- false, false, false, 0);
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), false, false, false,
+ 0);
+ if (elemtype.getSizeInBits() < 16) {
+ theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
+ }
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(0, MVT::i32), theVal,
+ DAG.getConstant(curOffset, MVT::i32), theVal,
InFlag };
- Chain = DAG.getNode(NVPTXISD::MoveToParam, dl, CopyParamVTs,
- CopyParamOps, 5);
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
+ CopyParamOps, 5, elemtype,
+ MachinePointerInfo());
+
InFlag = Chain.getValue(1);
- ++paramCount;
+ curOffset += sz / 8;
}
}
+ ++paramCount;
}
GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
unsigned retAlignment = 0;
// Handle Result
- unsigned retCount = 0;
if (Ins.size() > 0) {
SmallVector<EVT, 16> resvtparts;
ComputeValueVTs(*this, retTy, resvtparts);
- // Declare one .param .align 16 .b8 func_retval0[<size>] for ABI or
- // individual .reg .b<size> func_retval<0..> for non ABI
- unsigned resultsz = 0;
- for (unsigned i = 0, e = resvtparts.size(); i != e; ++i) {
- unsigned elems = 1;
- EVT elemtype = resvtparts[i];
- if (resvtparts[i].isVector()) {
- elems = resvtparts[i].getVectorNumElements();
- elemtype = resvtparts[i].getVectorElementType();
- }
- for (unsigned j = 0, je = elems; j != je; ++j) {
- unsigned sz = elemtype.getSizeInBits();
- if (isABI == false) {
- if (elemtype.isInteger() && (sz < 32))
- sz = 32;
- } else {
- if (elemtype.isInteger() && (sz < 8))
- sz = 8;
- }
- if (isABI == false) {
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(2, MVT::i32),
- DAG.getConstant(sz, MVT::i32),
- DAG.getConstant(retCount, MVT::i32),
- InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
- DeclareRetOps, 5);
- InFlag = Chain.getValue(1);
- ++retCount;
- }
- resultsz += sz;
- }
- }
- if (isABI) {
- if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
- retTy->isPointerTy()) {
- // Scalar needs to be at least 32bit wide
- if (resultsz < 32)
- resultsz = 32;
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(resultsz, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
- DeclareRetOps, 5);
- InFlag = Chain.getValue(1);
- } else {
- if (Func) { // direct call
- if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment))
- retAlignment = getDataLayout()->getABITypeAlignment(retTy);
- } else { // indirect call
- const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction());
- if (!llvm::getAlign(*CallI, 0, retAlignment))
- retAlignment = getDataLayout()->getABITypeAlignment(retTy);
- }
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain,
- DAG.getConstant(retAlignment, MVT::i32),
- DAG.getConstant(resultsz / 8, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
- DeclareRetOps, 5);
- InFlag = Chain.getValue(1);
- }
+ // Declare
+ // .param .align 16 .b8 retval0[<size-in-bytes>], or
+ // .param .b<size-in-bits> retval0
+ unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
+ if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
+ retTy->isPointerTy()) {
+ // Scalar needs to be at least 32bit wide
+ if (resultsz < 32)
+ resultsz = 32;
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(resultsz, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
+ DeclareRetOps, 5);
+ InFlag = Chain.getValue(1);
+ } else {
+ retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain,
+ DAG.getConstant(retAlignment, MVT::i32),
+ DAG.getConstant(resultsz / 8, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
+ DeclareRetOps, 5);
+ InFlag = Chain.getValue(1);
}
}
@@ -674,25 +887,22 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
// to be emitted, and the label has to used as the last arg of call
// instruction.
- // The prototype is embedded in a string and put as the operand for an
- // INLINEASM SDNode.
- SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- std::string proto_string = getPrototype(retTy, Args, Outs, retAlignment);
- const char *asmstr = nvTM->getManagedStrPool()
- ->getManagedString(proto_string.c_str())->c_str();
- SDValue InlineAsmOps[] = {
- Chain, DAG.getTargetExternalSymbol(asmstr, getPointerTy()),
- DAG.getMDNode(0), DAG.getTargetConstant(0, MVT::i32), InFlag
+ // The prototype is embedded in a string and put as the operand for a
+ // CallPrototype SDNode which will print out to the value of the string.
+ SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
+ const char *ProtoStr =
+ nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
+ SDValue ProtoOps[] = {
+ Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
};
- Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5);
+ Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, &ProtoOps[0], 3);
InFlag = Chain.getValue(1);
}
// Op to just print "call"
SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue PrintCallOps[] = {
- Chain,
- DAG.getConstant(isABI ? ((Ins.size() == 0) ? 0 : 1) : retCount, MVT::i32),
- InFlag
+ Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
};
Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
dl, PrintCallVTs, PrintCallOps, 3);
@@ -740,62 +950,183 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Generate loads from param memory/moves from registers for result
if (Ins.size() > 0) {
- if (isABI) {
- unsigned resoffset = 0;
- for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- unsigned sz = Ins[i].VT.getSizeInBits();
- if (Ins[i].VT.isInteger() && (sz < 8))
- sz = 8;
- EVT LoadRetVTs[] = { Ins[i].VT, MVT::Other, MVT::Glue };
- SDValue LoadRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(resoffset, MVT::i32), InFlag };
- SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, LoadRetVTs,
- LoadRetOps, array_lengthof(LoadRetOps));
+ unsigned resoffset = 0;
+ if (retTy && retTy->isVectorTy()) {
+ EVT ObjectVT = getValueType(retTy);
+ unsigned NumElts = ObjectVT.getVectorNumElements();
+ EVT EltVT = ObjectVT.getVectorElementType();
+ assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
+ ObjectVT) == NumElts &&
+ "Vector was not scalarized");
+ unsigned sz = EltVT.getSizeInBits();
+ bool needTruncate = sz < 16 ? true : false;
+
+ if (NumElts == 1) {
+ // Just a simple load
+ std::vector<EVT> LoadRetVTs;
+ if (needTruncate) {
+ // If loading i1 result, generate
+ // load i16
+ // trunc i16 to i1
+ LoadRetVTs.push_back(MVT::i16);
+ } else
+ LoadRetVTs.push_back(EltVT);
+ LoadRetVTs.push_back(MVT::Other);
+ LoadRetVTs.push_back(MVT::Glue);
+ std::vector<SDValue> LoadRetOps;
+ LoadRetOps.push_back(Chain);
+ LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
+ LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
+ LoadRetOps.push_back(InFlag);
+ SDValue retval = DAG.getMemIntrinsicNode(
+ NVPTXISD::LoadParam, dl,
+ DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
+ LoadRetOps.size(), EltVT, MachinePointerInfo());
Chain = retval.getValue(1);
InFlag = retval.getValue(2);
- InVals.push_back(retval);
- resoffset += sz / 8;
+ SDValue Ret0 = retval;
+ if (needTruncate)
+ Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
+ InVals.push_back(Ret0);
+ } else if (NumElts == 2) {
+ // LoadV2
+ std::vector<EVT> LoadRetVTs;
+ if (needTruncate) {
+ // If loading i1 result, generate
+ // load i16
+ // trunc i16 to i1
+ LoadRetVTs.push_back(MVT::i16);
+ LoadRetVTs.push_back(MVT::i16);
+ } else {
+ LoadRetVTs.push_back(EltVT);
+ LoadRetVTs.push_back(EltVT);
+ }
+ LoadRetVTs.push_back(MVT::Other);
+ LoadRetVTs.push_back(MVT::Glue);
+ std::vector<SDValue> LoadRetOps;
+ LoadRetOps.push_back(Chain);
+ LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
+ LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
+ LoadRetOps.push_back(InFlag);
+ SDValue retval = DAG.getMemIntrinsicNode(
+ NVPTXISD::LoadParamV2, dl,
+ DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
+ LoadRetOps.size(), EltVT, MachinePointerInfo());
+ Chain = retval.getValue(2);
+ InFlag = retval.getValue(3);
+ SDValue Ret0 = retval.getValue(0);
+ SDValue Ret1 = retval.getValue(1);
+ if (needTruncate) {
+ Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
+ InVals.push_back(Ret0);
+ Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
+ InVals.push_back(Ret1);
+ } else {
+ InVals.push_back(Ret0);
+ InVals.push_back(Ret1);
+ }
+ } else {
+ // Split into N LoadV4
+ unsigned Ofst = 0;
+ unsigned VecSize = 4;
+ unsigned Opc = NVPTXISD::LoadParamV4;
+ if (EltVT.getSizeInBits() == 64) {
+ VecSize = 2;
+ Opc = NVPTXISD::LoadParamV2;
+ }
+ EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
+ for (unsigned i = 0; i < NumElts; i += VecSize) {
+ SmallVector<EVT, 8> LoadRetVTs;
+ if (needTruncate) {
+ // If loading i1 result, generate
+ // load i16
+ // trunc i16 to i1
+ for (unsigned j = 0; j < VecSize; ++j)
+ LoadRetVTs.push_back(MVT::i16);
+ } else {
+ for (unsigned j = 0; j < VecSize; ++j)
+ LoadRetVTs.push_back(EltVT);
+ }
+ LoadRetVTs.push_back(MVT::Other);
+ LoadRetVTs.push_back(MVT::Glue);
+ SmallVector<SDValue, 4> LoadRetOps;
+ LoadRetOps.push_back(Chain);
+ LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
+ LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
+ LoadRetOps.push_back(InFlag);
+ SDValue retval = DAG.getMemIntrinsicNode(
+ Opc, dl, DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()),
+ &LoadRetOps[0], LoadRetOps.size(), EltVT, MachinePointerInfo());
+ if (VecSize == 2) {
+ Chain = retval.getValue(2);
+ InFlag = retval.getValue(3);
+ } else {
+ Chain = retval.getValue(4);
+ InFlag = retval.getValue(5);
+ }
+
+ for (unsigned j = 0; j < VecSize; ++j) {
+ if (i + j >= NumElts)
+ break;
+ SDValue Elt = retval.getValue(j);
+ if (needTruncate)
+ Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
+ InVals.push_back(Elt);
+ }
+ Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
+ }
}
} else {
- SmallVector<EVT, 16> resvtparts;
- ComputeValueVTs(*this, retTy, resvtparts);
-
- assert(Ins.size() == resvtparts.size() &&
- "Unexpected number of return values in non-ABI case");
- unsigned paramNum = 0;
+ SmallVector<EVT, 16> VTs;
+ ComputePTXValueVTs(*this, retTy, VTs);
+ assert(VTs.size() == Ins.size() && "Bad value decomposition");
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- assert(EVT(Ins[i].VT) == resvtparts[i] &&
- "Unexpected EVT type in non-ABI case");
- unsigned numelems = 1;
- EVT elemtype = Ins[i].VT;
- if (Ins[i].VT.isVector()) {
- numelems = Ins[i].VT.getVectorNumElements();
- elemtype = Ins[i].VT.getVectorElementType();
- }
- std::vector<SDValue> tempRetVals;
- for (unsigned j = 0; j < numelems; ++j) {
- EVT MoveRetVTs[] = { elemtype, MVT::Other, MVT::Glue };
- SDValue MoveRetOps[] = { Chain, DAG.getConstant(0, MVT::i32),
- DAG.getConstant(paramNum, MVT::i32),
- InFlag };
- SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, MoveRetVTs,
- MoveRetOps, array_lengthof(MoveRetOps));
- Chain = retval.getValue(1);
- InFlag = retval.getValue(2);
- tempRetVals.push_back(retval);
- ++paramNum;
- }
- if (Ins[i].VT.isVector())
- InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, Ins[i].VT,
- &tempRetVals[0], tempRetVals.size()));
- else
- InVals.push_back(tempRetVals[0]);
+ unsigned sz = VTs[i].getSizeInBits();
+ bool needTruncate = sz < 8 ? true : false;
+ if (VTs[i].isInteger() && (sz < 8))
+ sz = 8;
+
+ SmallVector<EVT, 4> LoadRetVTs;
+ EVT TheLoadType = VTs[i];
+ if (retTy->isIntegerTy() &&
+ TD->getTypeAllocSizeInBits(retTy) < 32) {
+ // This is for integer types only, and specifically not for
+ // aggregates.
+ LoadRetVTs.push_back(MVT::i32);
+ TheLoadType = MVT::i32;
+ } else if (sz < 16) {
+ // If loading i1/i8 result, generate
+ // load i8 (-> i16)
+ // trunc i16 to i1/i8
+ LoadRetVTs.push_back(MVT::i16);
+ } else
+ LoadRetVTs.push_back(Ins[i].VT);
+ LoadRetVTs.push_back(MVT::Other);
+ LoadRetVTs.push_back(MVT::Glue);
+
+ SmallVector<SDValue, 4> LoadRetOps;
+ LoadRetOps.push_back(Chain);
+ LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
+ LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
+ LoadRetOps.push_back(InFlag);
+ SDValue retval = DAG.getMemIntrinsicNode(
+ NVPTXISD::LoadParam, dl,
+ DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
+ LoadRetOps.size(), TheLoadType, MachinePointerInfo());
+ Chain = retval.getValue(1);
+ InFlag = retval.getValue(2);
+ SDValue Ret0 = retval.getValue(0);
+ if (needTruncate)
+ Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
+ InVals.push_back(Ret0);
+ resoffset += sz / 8;
}
}
}
+
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
DAG.getIntPtrConstant(uniqueCallSite + 1, true),
- InFlag);
+ InFlag, dl);
uniqueCallSite++;
// set isTailCall to false for now, until we figure out how to express
@@ -810,7 +1141,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue
NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
SmallVector<SDValue, 8> Ops;
unsigned NumOperands = Node->getNumOperands();
for (unsigned i = 0; i < NumOperands; ++i) {
@@ -861,17 +1192,17 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// v = ld i1* addr
// =>
-// v1 = ld i8* addr
-// v = trunc v1 to i1
+// v1 = ld i8* addr (-> i16)
+// v = trunc i16 to i1
SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
LoadSDNode *LD = cast<LoadSDNode>(Node);
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
assert(Node->getValueType(0) == MVT::i1 &&
"Custom lowering for i1 load only");
SDValue newLD =
- DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(),
+ DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
LD->isInvariant(), LD->getAlignment());
SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
@@ -896,7 +1227,7 @@ SDValue
NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
SDNode *N = Op.getNode();
SDValue Val = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT ValVT = Val.getValueType();
if (ValVT.isVector()) {
@@ -955,8 +1286,6 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
DAG.getIntPtrConstant(i));
if (NeedExt)
- // ANY_EXTEND is correct here since the store will only look at the
- // lower-order bits anyway.
ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
Ops.push_back(ExtVal);
}
@@ -981,11 +1310,11 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
// st i1 v, addr
// =>
-// v1 = zxt v to i8
-// st i8, addr
+// v1 = zxt v to i16
+// st.u8 i16, addr
SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
StoreSDNode *ST = cast<StoreSDNode>(Node);
SDValue Tmp1 = ST->getChain();
SDValue Tmp2 = ST->getBasePtr();
@@ -994,9 +1323,10 @@ SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
- Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Tmp3);
- SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
- isVolatile, isNonTemporal, Alignment);
+ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
+ SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
+ ST->getPointerInfo(), MVT::i8, isNonTemporal,
+ isVolatile, Alignment);
return Result;
}
@@ -1011,7 +1341,15 @@ SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
SDValue
NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
- return getExtSymb(DAG, ".PARAM", idx, v);
+ std::string ParamSym;
+ raw_string_ostream ParamStr(ParamSym);
+
+ ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
+ ParamStr.flush();
+
+ std::string *SavedStr =
+ nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
+ return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
}
SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
@@ -1046,19 +1384,23 @@ bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
SDValue NVPTXTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
const DataLayout *TD = getDataLayout();
const Function *F = MF.getFunction();
const AttributeSet &PAL = F->getAttributes();
+ const TargetLowering *TLI = nvTM->getTargetLowering();
SDValue Root = DAG.getRoot();
std::vector<SDValue> OutChains;
bool isKernel = llvm::isKernelFunction(*F);
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
std::vector<Type *> argTypes;
std::vector<const Argument *> theArgs;
@@ -1067,15 +1409,20 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
theArgs.push_back(I);
argTypes.push_back(I->getType());
}
- //assert(argTypes.size() == Ins.size() &&
- // "Ins types and function types did not match");
+ // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
+ // Ins.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Ins)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Ins.
+ // So a different index should be used for indexing into Ins.
+ // See similar issue in LowerCall.
+ unsigned InsIdx = 0;
int idx = 0;
- for (unsigned i = 0, e = argTypes.size(); i != e; ++i, ++idx) {
+ for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
Type *Ty = argTypes[i];
- EVT ObjectVT = getValueType(Ty);
- //assert(ObjectVT == Ins[i].VT &&
- // "Ins type did not match function type");
// If the kernel argument is image*_t or sampler_t, convert it to
// a i32 constant holding the parameter position. This can later
@@ -1091,142 +1438,248 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
if (theArgs[i]->use_empty()) {
// argument is dead
- if (ObjectVT.isVector()) {
- EVT EltVT = ObjectVT.getVectorElementType();
- unsigned NumElts = ObjectVT.getVectorNumElements();
- for (unsigned vi = 0; vi < NumElts; ++vi) {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, EltVT));
+ if (Ty->isAggregateType()) {
+ SmallVector<EVT, 16> vtparts;
+
+ ComputePTXValueVTs(*this, Ty, vtparts);
+ assert(vtparts.size() > 0 && "empty aggregate type not expected");
+ for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
+ ++parti) {
+ EVT partVT = vtparts[parti];
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
+ ++InsIdx;
}
- } else {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT));
+ if (vtparts.size() > 0)
+ --InsIdx;
+ continue;
+ }
+ if (Ty->isVectorTy()) {
+ EVT ObjectVT = getValueType(Ty);
+ unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
+ for (unsigned parti = 0; parti < NumRegs; ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (NumRegs > 0)
+ --InsIdx;
+ continue;
}
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
continue;
}
// In the following cases, assign a node order of "idx+1"
- // to newly created nodes. The SDNOdes for params have to
+ // to newly created nodes. The SDNodes for params have to
// appear in the same order as their order of appearance
// in the original function. "idx+1" holds that order.
if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
- if (ObjectVT.isVector()) {
+ if (Ty->isAggregateType()) {
+ SmallVector<EVT, 16> vtparts;
+ SmallVector<uint64_t, 16> offsets;
+
+ // NOTE: Here, we lose the ability to issue vector loads for vectors
+ // that are a part of a struct. This should be investigated in the
+ // future.
+ ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
+ assert(vtparts.size() > 0 && "empty aggregate type not expected");
+ bool aggregateIsPacked = false;
+ if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
+ aggregateIsPacked = STy->isPacked();
+
+ SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
+ for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
+ ++parti) {
+ EVT partVT = vtparts[parti];
+ Value *srcValue = Constant::getNullValue(
+ PointerType::get(partVT.getTypeForEVT(F->getContext()),
+ llvm::ADDRESS_SPACE_PARAM));
+ SDValue srcAddr =
+ DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
+ DAG.getConstant(offsets[parti], getPointerTy()));
+ unsigned partAlign =
+ aggregateIsPacked ? 1
+ : TD->getABITypeAlignment(
+ partVT.getTypeForEVT(F->getContext()));
+ SDValue p;
+ if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
+ ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
+ ISD::SEXTLOAD : ISD::ZEXTLOAD;
+ p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
+ MachinePointerInfo(srcValue), partVT, false,
+ false, partAlign);
+ } else {
+ p = DAG.getLoad(partVT, dl, Root, srcAddr,
+ MachinePointerInfo(srcValue), false, false, false,
+ partAlign);
+ }
+ if (p.getNode())
+ p.getNode()->setIROrder(idx + 1);
+ InVals.push_back(p);
+ ++InsIdx;
+ }
+ if (vtparts.size() > 0)
+ --InsIdx;
+ continue;
+ }
+ if (Ty->isVectorTy()) {
+ EVT ObjectVT = getValueType(Ty);
+ SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
unsigned NumElts = ObjectVT.getVectorNumElements();
+ assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
+ "Vector was not scalarized");
+ unsigned Ofst = 0;
EVT EltVT = ObjectVT.getVectorElementType();
- unsigned Offset = 0;
- for (unsigned vi = 0; vi < NumElts; ++vi) {
- SDValue A = getParamSymbol(DAG, idx, getPointerTy());
- SDValue B = DAG.getIntPtrConstant(Offset);
- SDValue Addr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
- //getParamSymbol(DAG, idx, EltVT),
- //DAG.getConstant(Offset, getPointerTy()));
- A, B);
+
+ // V1 load
+ // f32 = load ...
+ if (NumElts == 1) {
+ // We only have one element, so just directly load it
Value *SrcValue = Constant::getNullValue(PointerType::get(
EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
- SDValue Ld = DAG.getLoad(
- EltVT, dl, Root, Addr, MachinePointerInfo(SrcValue), false, false,
- false,
+ SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
+ DAG.getConstant(Ofst, getPointerTy()));
+ SDValue P = DAG.getLoad(
+ EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
+ false, true,
TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
- Offset += EltVT.getStoreSizeInBits() / 8;
- InVals.push_back(Ld);
+ if (P.getNode())
+ P.getNode()->setIROrder(idx + 1);
+
+ if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
+ P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
+ InVals.push_back(P);
+ Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
+ ++InsIdx;
+ } else if (NumElts == 2) {
+ // V2 load
+ // f32,f32 = load ...
+ EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
+ Value *SrcValue = Constant::getNullValue(PointerType::get(
+ VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
+ SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
+ DAG.getConstant(Ofst, getPointerTy()));
+ SDValue P = DAG.getLoad(
+ VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
+ false, true,
+ TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
+ if (P.getNode())
+ P.getNode()->setIROrder(idx + 1);
+
+ SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
+ DAG.getIntPtrConstant(0));
+ SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
+ DAG.getIntPtrConstant(1));
+
+ if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
+ Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
+ Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
+ }
+
+ InVals.push_back(Elt0);
+ InVals.push_back(Elt1);
+ Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
+ InsIdx += 2;
+ } else {
+ // V4 loads
+ // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
+ // the
+ // vector will be expanded to a power of 2 elements, so we know we can
+ // always round up to the next multiple of 4 when creating the vector
+ // loads.
+ // e.g. 4 elem => 1 ld.v4
+ // 6 elem => 2 ld.v4
+ // 8 elem => 2 ld.v4
+ // 11 elem => 3 ld.v4
+ unsigned VecSize = 4;
+ if (EltVT.getSizeInBits() == 64) {
+ VecSize = 2;
+ }
+ EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
+ for (unsigned i = 0; i < NumElts; i += VecSize) {
+ Value *SrcValue = Constant::getNullValue(
+ PointerType::get(VecVT.getTypeForEVT(F->getContext()),
+ llvm::ADDRESS_SPACE_PARAM));
+ SDValue SrcAddr =
+ DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
+ DAG.getConstant(Ofst, getPointerTy()));
+ SDValue P = DAG.getLoad(
+ VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
+ false, true,
+ TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
+ if (P.getNode())
+ P.getNode()->setIROrder(idx + 1);
+
+ for (unsigned j = 0; j < VecSize; ++j) {
+ if (i + j >= NumElts)
+ break;
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
+ DAG.getIntPtrConstant(j));
+ if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
+ Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
+ InVals.push_back(Elt);
+ }
+ Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
+ }
+ InsIdx += NumElts;
}
+
+ if (NumElts > 0)
+ --InsIdx;
continue;
}
-
// A plain scalar.
- if (isABI || isKernel) {
- // If ABI, load from the param symbol
- SDValue Arg = getParamSymbol(DAG, idx);
- // Conjure up a value that we can get the address space from.
- // FIXME: Using a constant here is a hack.
- Value *srcValue = Constant::getNullValue(
- PointerType::get(ObjectVT.getTypeForEVT(F->getContext()),
- llvm::ADDRESS_SPACE_PARAM));
- SDValue p = DAG.getLoad(
- ObjectVT, dl, Root, Arg, MachinePointerInfo(srcValue), false, false,
- false,
- TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
- if (p.getNode())
- DAG.AssignOrdering(p.getNode(), idx + 1);
- InVals.push_back(p);
+ EVT ObjectVT = getValueType(Ty);
+ // If ABI, load from the param symbol
+ SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
+ Value *srcValue = Constant::getNullValue(PointerType::get(
+ ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
+ SDValue p;
+ if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
+ ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
+ ISD::SEXTLOAD : ISD::ZEXTLOAD;
+ p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
+ MachinePointerInfo(srcValue), ObjectVT, false, false,
+ TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
} else {
- // If no ABI, just move the param symbol
- SDValue Arg = getParamSymbol(DAG, idx, ObjectVT);
- SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
- if (p.getNode())
- DAG.AssignOrdering(p.getNode(), idx + 1);
- InVals.push_back(p);
+ p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
+ MachinePointerInfo(srcValue), false, false, false,
+ TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
}
+ if (p.getNode())
+ p.getNode()->setIROrder(idx + 1);
+ InVals.push_back(p);
continue;
}
// Param has ByVal attribute
- if (isABI || isKernel) {
- // Return MoveParam(param symbol).
- // Ideally, the param symbol can be returned directly,
- // but when SDNode builder decides to use it in a CopyToReg(),
- // machine instruction fails because TargetExternalSymbol
- // (not lowered) is target dependent, and CopyToReg assumes
- // the source is lowered.
- SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
- SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
- if (p.getNode())
- DAG.AssignOrdering(p.getNode(), idx + 1);
- if (isKernel)
- InVals.push_back(p);
- else {
- SDValue p2 = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
- DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
- InVals.push_back(p2);
- }
- } else {
- // Have to move a set of param symbols to registers and
- // store them locally and return the local pointer in InVals
- const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]);
- assert(elemPtrType && "Byval parameter should be a pointer type");
- Type *elemType = elemPtrType->getElementType();
- // Compute the constituent parts
- SmallVector<EVT, 16> vtparts;
- SmallVector<uint64_t, 16> offsets;
- ComputeValueVTs(*this, elemType, vtparts, &offsets, 0);
- unsigned totalsize = 0;
- for (unsigned j = 0, je = vtparts.size(); j != je; ++j)
- totalsize += vtparts[j].getStoreSizeInBits();
- SDValue localcopy = DAG.getFrameIndex(
- MF.getFrameInfo()->CreateStackObject(totalsize / 8, 16, false),
- getPointerTy());
- unsigned sizesofar = 0;
- std::vector<SDValue> theChains;
- for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
- unsigned numElems = 1;
- if (vtparts[j].isVector())
- numElems = vtparts[j].getVectorNumElements();
- for (unsigned k = 0, ke = numElems; k != ke; ++k) {
- EVT tmpvt = vtparts[j];
- if (tmpvt.isVector())
- tmpvt = tmpvt.getVectorElementType();
- SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt,
- getParamSymbol(DAG, idx, tmpvt));
- SDValue addr =
- DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy,
- DAG.getConstant(sizesofar, getPointerTy()));
- theChains.push_back(DAG.getStore(
- Chain, dl, arg, addr, MachinePointerInfo(), false, false, 0));
- sizesofar += tmpvt.getStoreSizeInBits() / 8;
- ++idx;
- }
- }
- --idx;
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &theChains[0],
- theChains.size());
- InVals.push_back(localcopy);
+ // Return MoveParam(param symbol).
+ // Ideally, the param symbol can be returned directly,
+ // but when SDNode builder decides to use it in a CopyToReg(),
+ // machine instruction fails because TargetExternalSymbol
+ // (not lowered) is target dependent, and CopyToReg assumes
+ // the source is lowered.
+ EVT ObjectVT = getValueType(Ty);
+ assert(ObjectVT == Ins[InsIdx].VT &&
+ "Ins type did not match function type");
+ SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
+ SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
+ if (p.getNode())
+ p.getNode()->setIROrder(idx + 1);
+ if (isKernel)
+ InVals.push_back(p);
+ else {
+ SDValue p2 = DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
+ DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
+ InVals.push_back(p2);
}
}
// Clang will check explicit VarArg and issue error if any. However, Clang
// will let code with
- // implicit var arg like f() pass.
+ // implicit var arg like f() pass. See bug 617733.
// We treat this case as if the arg list is empty.
- //if (F.isVarArg()) {
+ // if (F.isVarArg()) {
// assert(0 && "VarArg not supported yet!");
//}
@@ -1237,43 +1690,185 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
return Chain;
}
-SDValue NVPTXTargetLowering::LowerReturn(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl,
- SelectionDAG &DAG) const {
+
+SDValue
+NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc dl, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const Function *F = MF.getFunction();
+ Type *RetTy = F->getReturnType();
+ const DataLayout *TD = getDataLayout();
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
+ // If we have a vector type, the OutVals array will be the scalarized
+ // components and we have combine them into 1 or more vector stores.
+ unsigned NumElts = VTy->getNumElements();
+ assert(NumElts == Outs.size() && "Bad scalarization of return value");
+
+ // const_cast can be removed in later LLVM versions
+ EVT EltVT = getValueType(RetTy).getVectorElementType();
+ bool NeedExtend = false;
+ if (EltVT.getSizeInBits() < 16)
+ NeedExtend = true;
+
+ // V1 store
+ if (NumElts == 1) {
+ SDValue StoreVal = OutVals[0];
+ // We only have one element, so just directly store it
+ if (NeedExtend)
+ StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
+ SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
+ DAG.getVTList(MVT::Other), &Ops[0], 3,
+ EltVT, MachinePointerInfo());
+
+ } else if (NumElts == 2) {
+ // V2 store
+ SDValue StoreVal0 = OutVals[0];
+ SDValue StoreVal1 = OutVals[1];
+
+ if (NeedExtend) {
+ StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
+ StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
+ }
- unsigned sizesofar = 0;
- unsigned idx = 0;
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- SDValue theVal = OutVals[i];
- EVT theValType = theVal.getValueType();
- unsigned numElems = 1;
- if (theValType.isVector())
- numElems = theValType.getVectorNumElements();
- for (unsigned j = 0, je = numElems; j != je; ++j) {
- SDValue tmpval = theVal;
- if (theValType.isVector())
- tmpval = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- theValType.getVectorElementType(), tmpval,
- DAG.getIntPtrConstant(j));
- Chain = DAG.getNode(
- isABI ? NVPTXISD::StoreRetval : NVPTXISD::MoveToRetval, dl,
- MVT::Other, Chain, DAG.getConstant(isABI ? sizesofar : idx, MVT::i32),
- tmpval);
- if (theValType.isVector())
- sizesofar += theValType.getVectorElementType().getStoreSizeInBits() / 8;
- else
- sizesofar += theValType.getStoreSizeInBits() / 8;
- ++idx;
+ SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
+ StoreVal1 };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
+ DAG.getVTList(MVT::Other), &Ops[0], 4,
+ EltVT, MachinePointerInfo());
+ } else {
+ // V4 stores
+ // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
+ // vector will be expanded to a power of 2 elements, so we know we can
+ // always round up to the next multiple of 4 when creating the vector
+ // stores.
+ // e.g. 4 elem => 1 st.v4
+ // 6 elem => 2 st.v4
+ // 8 elem => 2 st.v4
+ // 11 elem => 3 st.v4
+
+ unsigned VecSize = 4;
+ if (OutVals[0].getValueType().getSizeInBits() == 64)
+ VecSize = 2;
+
+ unsigned Offset = 0;
+
+ EVT VecVT =
+ EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
+ unsigned PerStoreOffset =
+ TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
+
+ for (unsigned i = 0; i < NumElts; i += VecSize) {
+ // Get values
+ SDValue StoreVal;
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(DAG.getConstant(Offset, MVT::i32));
+ unsigned Opc = NVPTXISD::StoreRetvalV2;
+ EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
+
+ StoreVal = OutVals[i];
+ if (NeedExtend)
+ StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
+ Ops.push_back(StoreVal);
+
+ if (i + 1 < NumElts) {
+ StoreVal = OutVals[i + 1];
+ if (NeedExtend)
+ StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
+ } else {
+ StoreVal = DAG.getUNDEF(ExtendedVT);
+ }
+ Ops.push_back(StoreVal);
+
+ if (VecSize == 4) {
+ Opc = NVPTXISD::StoreRetvalV4;
+ if (i + 2 < NumElts) {
+ StoreVal = OutVals[i + 2];
+ if (NeedExtend)
+ StoreVal =
+ DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
+ } else {
+ StoreVal = DAG.getUNDEF(ExtendedVT);
+ }
+ Ops.push_back(StoreVal);
+
+ if (i + 3 < NumElts) {
+ StoreVal = OutVals[i + 3];
+ if (NeedExtend)
+ StoreVal =
+ DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
+ } else {
+ StoreVal = DAG.getUNDEF(ExtendedVT);
+ }
+ Ops.push_back(StoreVal);
+ }
+
+ // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
+ Chain =
+ DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), &Ops[0],
+ Ops.size(), EltVT, MachinePointerInfo());
+ Offset += PerStoreOffset;
+ }
+ }
+ } else {
+ SmallVector<EVT, 16> ValVTs;
+ // const_cast is necessary since we are still using an LLVM version from
+ // before the type system re-write.
+ ComputePTXValueVTs(*this, RetTy, ValVTs);
+ assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
+
+ unsigned SizeSoFar = 0;
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ SDValue theVal = OutVals[i];
+ EVT TheValType = theVal.getValueType();
+ unsigned numElems = 1;
+ if (TheValType.isVector())
+ numElems = TheValType.getVectorNumElements();
+ for (unsigned j = 0, je = numElems; j != je; ++j) {
+ SDValue TmpVal = theVal;
+ if (TheValType.isVector())
+ TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ TheValType.getVectorElementType(), TmpVal,
+ DAG.getIntPtrConstant(j));
+ EVT TheStoreType = ValVTs[i];
+ if (RetTy->isIntegerTy() &&
+ TD->getTypeAllocSizeInBits(RetTy) < 32) {
+ // The following zero-extension is for integer types only, and
+ // specifically not for aggregates.
+ TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
+ TheStoreType = MVT::i32;
+ }
+ else if (TmpVal.getValueType().getSizeInBits() < 16)
+ TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
+
+ SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
+ DAG.getVTList(MVT::Other), &Ops[0],
+ 3, TheStoreType,
+ MachinePointerInfo());
+ if(TheValType.isVector())
+ SizeSoFar +=
+ TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
+ else
+ SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
+ }
}
}
return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
}
+
void NVPTXTargetLowering::LowerAsmOperandForConstraint(
SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
@@ -1337,9 +1932,9 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic(
Info.opc = ISD::INTRINSIC_W_CHAIN;
if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
- Info.memVT = MVT::i32;
+ Info.memVT = getValueType(I.getType());
else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
- Info.memVT = getPointerTy();
+ Info.memVT = getValueType(I.getType());
else
Info.memVT = MVT::f32;
Info.ptrVal = I.getArgOperand(0);
@@ -1420,11 +2015,11 @@ NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
std::pair<unsigned, const TargetRegisterClass *>
NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'c':
- return std::make_pair(0U, &NVPTX::Int8RegsRegClass);
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
case 'h':
return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
case 'r':
@@ -1450,7 +2045,7 @@ unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &Results) {
EVT ResVT = N->getValueType(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
assert(ResVT.isVector() && "Vector load must have vector type");
@@ -1543,7 +2138,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &Results) {
SDValue Chain = N->getOperand(0);
SDValue Intrin = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// Get the intrinsic ID
unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
@@ -1564,7 +2159,8 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
unsigned NumElts = ResVT.getVectorNumElements();
EVT EltVT = ResVT.getVectorElementType();
- // Since LDU/LDG are target nodes, we cannot rely on DAG type legalization.
+ // Since LDU/LDG are target nodes, we cannot rely on DAG type
+ // legalization.
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
// loaded type to i16 and propogate the "real" type as the memory type.
bool NeedTrunc = false;
@@ -1623,7 +2219,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
OtherOps.push_back(Chain); // Chain
// Skip operand 1 (intrinsic ID)
- // Others
+ // Others
for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
OtherOps.push_back(N->getOperand(i));
@@ -1671,7 +2267,8 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0],
Ops.size(), MVT::i8, MemSD->getMemOperand());
- Results.push_back(NewLD.getValue(0));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
+ NewLD.getValue(0)));
Results.push_back(NewLD.getValue(1));
}
}
@@ -1691,3 +2288,29 @@ void NVPTXTargetLowering::ReplaceNodeResults(
return;
}
}
+
+// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
+void NVPTXSection::anchor() {}
+
+NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
+ delete TextSection;
+ delete DataSection;
+ delete BSSSection;
+ delete ReadOnlySection;
+
+ delete StaticCtorSection;
+ delete StaticDtorSection;
+ delete LSDASection;
+ delete EHFrameSection;
+ delete DwarfAbbrevSection;
+ delete DwarfInfoSection;
+ delete DwarfLineSection;
+ delete DwarfFrameSection;
+ delete DwarfPubTypesSection;
+ delete DwarfDebugInlineSection;
+ delete DwarfStrSection;
+ delete DwarfLocSection;
+ delete DwarfARangesSection;
+ delete DwarfRangesSection;
+ delete DwarfMacroInfoSection;
+}
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h
index 3cd49d38af76..66e708fcea07 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -29,17 +29,11 @@ enum NodeType {
CALL,
RET_FLAG,
LOAD_PARAM,
- NVBuiltin,
DeclareParam,
DeclareScalarParam,
DeclareRetParam,
DeclareRet,
DeclareScalarRet,
- LoadParam,
- StoreParam,
- StoreParamS32, // to sext and store a <32bit value, not used currently
- StoreParamU32, // to zext and store a <32bit value, not used currently
- MoveToParam,
PrintCall,
PrintCallUni,
CallArgBegin,
@@ -51,13 +45,11 @@ enum NodeType {
CallSymbol,
Prototype,
MoveParam,
- MoveRetval,
- MoveToRetval,
- StoreRetval,
PseudoUseParam,
RETURN,
CallSeqBegin,
CallSeqEnd,
+ CallPrototype,
Dummy,
LoadV2 = ISD::FIRST_TARGET_MEMORY_OPCODE,
@@ -67,7 +59,18 @@ enum NodeType {
LDUV2, // LDU.v2
LDUV4, // LDU.v4
StoreV2,
- StoreV4
+ StoreV4,
+ LoadParam,
+ LoadParamV2,
+ LoadParamV4,
+ StoreParam,
+ StoreParamV2,
+ StoreParamV4,
+ StoreParamS32, // to sext and store a <32bit value, not used currently
+ StoreParamU32, // to zext and store a <32bit value, not used currently
+ StoreRetval,
+ StoreRetvalV2,
+ StoreRetvalV4
};
}
@@ -100,7 +103,7 @@ public:
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;
- virtual EVT getSetCCResultType(EVT VT) const {
+ virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const {
if (VT.isVector())
return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
return MVT::i1;
@@ -108,11 +111,11 @@ public:
ConstraintType getConstraintType(const std::string &Constraint) const;
std::pair<unsigned, const TargetRegisterClass *>
- getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
virtual SDValue LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -120,12 +123,13 @@ public:
std::string getPrototype(Type *, const ArgListTy &,
const SmallVectorImpl<ISD::OutputArg> &,
- unsigned retAlignment) const;
+ unsigned retAlignment,
+ const ImmutableCallSite *CS) const;
virtual SDValue
LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl,
+ const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
SelectionDAG &DAG) const;
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
@@ -144,7 +148,7 @@ private:
SDValue getExtSymb(SelectionDAG &DAG, const char *name, int idx,
EVT = MVT::i32) const;
- SDValue getParamSymbol(SelectionDAG &DAG, int idx, EVT = MVT::i32) const;
+ SDValue getParamSymbol(SelectionDAG &DAG, int idx, EVT) const;
SDValue getParamHelpSymbol(SelectionDAG &DAG, int idx);
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
@@ -158,6 +162,9 @@ private:
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
+
+ unsigned getArgumentAlignment(SDValue Callee, const ImmutableCallSite *CS,
+ Type *Ty, unsigned Idx) const;
};
} // namespace llvm
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index 33a63c26f4e2..86ddd3817709 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -14,54 +14,53 @@
#include "NVPTX.h"
#include "NVPTXInstrInfo.h"
#include "NVPTXTargetMachine.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "NVPTXGenInstrInfo.inc"
#include "llvm/IR/Function.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include <cstdio>
using namespace llvm;
+// Pin the vtable to this file.
+void NVPTXInstrInfo::anchor() {}
+
// FIXME: Add the subtarget support on this constructor.
NVPTXInstrInfo::NVPTXInstrInfo(NVPTXTargetMachine &tm)
- : NVPTXGenInstrInfo(), TM(tm), RegInfo(*this, *TM.getSubtargetImpl()) {}
+ : NVPTXGenInstrInfo(), TM(tm), RegInfo(*TM.getSubtargetImpl()) {}
void NVPTXInstrInfo::copyPhysReg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg, bool KillSrc) const {
- if (NVPTX::Int32RegsRegClass.contains(DestReg) &&
- NVPTX::Int32RegsRegClass.contains(SrcReg))
+ const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg);
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
+
+ if (DestRC != SrcRC)
+ report_fatal_error("Attempted to created cross-class register copy");
+
+ if (DestRC == &NVPTX::Int32RegsRegClass)
BuildMI(MBB, I, DL, get(NVPTX::IMOV32rr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- else if (NVPTX::Int8RegsRegClass.contains(DestReg) &&
- NVPTX::Int8RegsRegClass.contains(SrcReg))
- BuildMI(MBB, I, DL, get(NVPTX::IMOV8rr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- else if (NVPTX::Int1RegsRegClass.contains(DestReg) &&
- NVPTX::Int1RegsRegClass.contains(SrcReg))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (DestRC == &NVPTX::Int1RegsRegClass)
BuildMI(MBB, I, DL, get(NVPTX::IMOV1rr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- else if (NVPTX::Float32RegsRegClass.contains(DestReg) &&
- NVPTX::Float32RegsRegClass.contains(SrcReg))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (DestRC == &NVPTX::Float32RegsRegClass)
BuildMI(MBB, I, DL, get(NVPTX::FMOV32rr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- else if (NVPTX::Int16RegsRegClass.contains(DestReg) &&
- NVPTX::Int16RegsRegClass.contains(SrcReg))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (DestRC == &NVPTX::Int16RegsRegClass)
BuildMI(MBB, I, DL, get(NVPTX::IMOV16rr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- else if (NVPTX::Int64RegsRegClass.contains(DestReg) &&
- NVPTX::Int64RegsRegClass.contains(SrcReg))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (DestRC == &NVPTX::Int64RegsRegClass)
BuildMI(MBB, I, DL, get(NVPTX::IMOV64rr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- else if (NVPTX::Float64RegsRegClass.contains(DestReg) &&
- NVPTX::Float64RegsRegClass.contains(SrcReg))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (DestRC == &NVPTX::Float64RegsRegClass)
BuildMI(MBB, I, DL, get(NVPTX::FMOV64rr), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
+ .addReg(SrcReg, getKillRegState(KillSrc));
else {
- llvm_unreachable("Don't know how to copy a register");
+ llvm_unreachable("Bad register copy");
}
}
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.h b/lib/Target/NVPTX/NVPTXInstrInfo.h
index b1972e9b7254..600fc5c60a1b 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.h
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.h
@@ -26,6 +26,7 @@ namespace llvm {
class NVPTXInstrInfo : public NVPTXGenInstrInfo {
NVPTXTargetMachine &TM;
const NVPTXRegisterInfo RegInfo;
+ virtual void anchor();
public:
explicit NVPTXInstrInfo(NVPTXTargetMachine &TM);
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.td b/lib/Target/NVPTX/NVPTXInstrInfo.td
index da6dd39b9314..b23f1e4b458d 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -32,6 +32,86 @@ def isVecOther : VecInstTypeEnum<15>;
def brtarget : Operand<OtherVT>;
+// CVT conversion modes
+// These must match the enum in NVPTX.h
+def CvtNONE : PatLeaf<(i32 0x0)>;
+def CvtRNI : PatLeaf<(i32 0x1)>;
+def CvtRZI : PatLeaf<(i32 0x2)>;
+def CvtRMI : PatLeaf<(i32 0x3)>;
+def CvtRPI : PatLeaf<(i32 0x4)>;
+def CvtRN : PatLeaf<(i32 0x5)>;
+def CvtRZ : PatLeaf<(i32 0x6)>;
+def CvtRM : PatLeaf<(i32 0x7)>;
+def CvtRP : PatLeaf<(i32 0x8)>;
+
+def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
+def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
+def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
+def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
+def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
+def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
+def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
+def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
+def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
+
+def CvtSAT : PatLeaf<(i32 0x20)>;
+def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
+
+def CvtMode : Operand<i32> {
+ let PrintMethod = "printCvtMode";
+}
+
+// Compare modes
+// These must match the enum in NVPTX.h
+def CmpEQ : PatLeaf<(i32 0)>;
+def CmpNE : PatLeaf<(i32 1)>;
+def CmpLT : PatLeaf<(i32 2)>;
+def CmpLE : PatLeaf<(i32 3)>;
+def CmpGT : PatLeaf<(i32 4)>;
+def CmpGE : PatLeaf<(i32 5)>;
+def CmpLO : PatLeaf<(i32 6)>;
+def CmpLS : PatLeaf<(i32 7)>;
+def CmpHI : PatLeaf<(i32 8)>;
+def CmpHS : PatLeaf<(i32 9)>;
+def CmpEQU : PatLeaf<(i32 10)>;
+def CmpNEU : PatLeaf<(i32 11)>;
+def CmpLTU : PatLeaf<(i32 12)>;
+def CmpLEU : PatLeaf<(i32 13)>;
+def CmpGTU : PatLeaf<(i32 14)>;
+def CmpGEU : PatLeaf<(i32 15)>;
+def CmpNUM : PatLeaf<(i32 16)>;
+def CmpNAN : PatLeaf<(i32 17)>;
+
+def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
+def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
+def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
+def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
+def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
+def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
+def CmpLO_FTZ : PatLeaf<(i32 0x106)>;
+def CmpLS_FTZ : PatLeaf<(i32 0x107)>;
+def CmpHI_FTZ : PatLeaf<(i32 0x108)>;
+def CmpHS_FTZ : PatLeaf<(i32 0x109)>;
+def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
+def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
+def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
+def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
+def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
+def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
+def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
+def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
+
+def CmpMode : Operand<i32> {
+ let PrintMethod = "printCmpMode";
+}
+
+def F32ConstZero : Operand<f32>, PatLeaf<(f32 fpimm)>, SDNodeXForm<fpimm, [{
+ return CurDAG->getTargetConstantFP(0.0, MVT::f32);
+ }]>;
+def F32ConstOne : Operand<f32>, PatLeaf<(f32 fpimm)>, SDNodeXForm<fpimm, [{
+ return CurDAG->getTargetConstantFP(1.0, MVT::f32);
+ }]>;
+
//===----------------------------------------------------------------------===//
// NVPTX Instruction Predicate Definitions
//===----------------------------------------------------------------------===//
@@ -56,127 +136,31 @@ def hasLDG : Predicate<"Subtarget.hasLDG()">;
def hasLDU : Predicate<"Subtarget.hasLDU()">;
def hasGenericLdSt : Predicate<"Subtarget.hasGenericLdSt()">;
-def doF32FTZ : Predicate<"UseF32FTZ">;
+def doF32FTZ : Predicate<"useF32FTZ()">;
+def doNoF32FTZ : Predicate<"!useF32FTZ()">;
def doFMAF32 : Predicate<"doFMAF32">;
-def doFMAF32_ftz : Predicate<"(doFMAF32 && UseF32FTZ)">;
+def doFMAF32_ftz : Predicate<"(doFMAF32 && useF32FTZ())">;
def doFMAF32AGG : Predicate<"doFMAF32AGG">;
-def doFMAF32AGG_ftz : Predicate<"(doFMAF32AGG && UseF32FTZ)">;
+def doFMAF32AGG_ftz : Predicate<"(doFMAF32AGG && useF32FTZ())">;
def doFMAF64 : Predicate<"doFMAF64">;
def doFMAF64AGG : Predicate<"doFMAF64AGG">;
-def doFMADF32 : Predicate<"doFMADF32">;
-def doFMADF32_ftz : Predicate<"(doFMADF32 && UseF32FTZ)">;
def doMulWide : Predicate<"doMulWide">;
def allowFMA : Predicate<"allowFMA">;
-def allowFMA_ftz : Predicate<"(allowFMA && UseF32FTZ)">;
+def allowFMA_ftz : Predicate<"(allowFMA && useF32FTZ())">;
-def do_DIVF32_APPROX : Predicate<"do_DIVF32_PREC==0">;
-def do_DIVF32_FULL : Predicate<"do_DIVF32_PREC==1">;
+def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
+def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
-def do_SQRTF32_APPROX : Predicate<"do_SQRTF32_PREC==0">;
-def do_SQRTF32_RN : Predicate<"do_SQRTF32_PREC==1">;
+def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
+def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
def hasHWROT32 : Predicate<"Subtarget.hasHWROT32()">;
def true : Predicate<"1">;
-//===----------------------------------------------------------------------===//
-// Special Handling for 8-bit Operands and Operations
-//
-// PTX supports 8-bit signed and unsigned types, but does not support 8-bit
-// operations (like add, shift, etc) except for ld/st/cvt. SASS does not have
-// 8-bit registers.
-//
-// PTX ld, st and cvt instructions permit source and destination data operands
-// to be wider than the instruction-type size, so that narrow values may be
-// loaded, stored, and converted using regular-width registers.
-//
-// So in PTX generation, we
-// - always use 16-bit registers in place in 8-bit registers.
-// (8-bit variables should stay as 8-bit as they represent memory layout.)
-// - for the following 8-bit operations, we sign-ext/zero-ext the 8-bit values
-// before operation
-// . div
-// . rem
-// . neg (sign)
-// . set, setp
-// . shr
-//
-// We are patching the operations by inserting the cvt instructions in the
-// asm strings of the affected instructions.
-//
-// Since vector operations, except for ld/st, are eventually elementized. We
-// do not need to special-hand the vector 8-bit operations.
-//
-//
-//===----------------------------------------------------------------------===//
-
-// Generate string block like
-// {
-// .reg .s16 %temp1;
-// .reg .s16 %temp2;
-// cvt.s16.s8 %temp1, %a;
-// cvt.s16.s8 %temp2, %b;
-// opc.s16 %dst, %temp1, %temp2;
-// }
-// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8
-class Handle_i8rr<string OpcStr, string TypeStr, string CVTStr> {
- string s = !strconcat("{{\n\t",
- !strconcat(".reg .", !strconcat(TypeStr,
- !strconcat(" \t%temp1;\n\t",
- !strconcat(".reg .", !strconcat(TypeStr,
- !strconcat(" \t%temp2;\n\t",
- !strconcat(CVTStr, !strconcat(" \t%temp1, $a;\n\t",
- !strconcat(CVTStr, !strconcat(" \t%temp2, $b;\n\t",
- !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}"))))))))))));
-}
-
-// Generate string block like
-// {
-// .reg .s16 %temp1;
-// .reg .s16 %temp2;
-// cvt.s16.s8 %temp1, %a;
-// mov.b16 %temp2, %b;
-// cvt.s16.s8 %temp2, %temp2;
-// opc.s16 %dst, %temp1, %temp2;
-// }
-// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8
-class Handle_i8ri<string OpcStr, string TypeStr, string CVTStr> {
- string s = !strconcat("{{\n\t",
- !strconcat(".reg .", !strconcat(TypeStr,
- !strconcat(" \t%temp1;\n\t",
- !strconcat(".reg .",
- !strconcat(TypeStr, !strconcat(" \t%temp2;\n\t",
- !strconcat(CVTStr, !strconcat(" \t%temp1, $a;\n\t",
- !strconcat("mov.b16 \t%temp2, $b;\n\t",
- !strconcat(CVTStr, !strconcat(" \t%temp2, %temp2;\n\t",
- !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}")))))))))))));
-}
-
-// Generate string block like
-// {
-// .reg .s16 %temp1;
-// .reg .s16 %temp2;
-// mov.b16 %temp1, %b;
-// cvt.s16.s8 %temp1, %temp1;
-// cvt.s16.s8 %temp2, %a;
-// opc.s16 %dst, %temp1, %temp2;
-// }
-// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8
-class Handle_i8ir<string OpcStr, string TypeStr, string CVTStr> {
- string s = !strconcat("{{\n\t",
- !strconcat(".reg .", !strconcat(TypeStr,
- !strconcat(" \t%temp1;\n\t",
- !strconcat(".reg .", !strconcat(TypeStr,
- !strconcat(" \t%temp2;\n\t",
- !strconcat("mov.b16 \t%temp1, $a;\n\t",
- !strconcat(CVTStr, !strconcat(" \t%temp1, %temp1;\n\t",
- !strconcat(CVTStr, !strconcat(" \t%temp2, $b;\n\t",
- !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}")))))))))))));
-}
-
//===----------------------------------------------------------------------===//
// Some Common Instruction Class Templates
@@ -204,66 +188,6 @@ multiclass I3<string OpcStr, SDNode OpNode> {
def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
!strconcat(OpcStr, "16 \t$dst, $a, $b;"),
[(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
- def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
- def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a, (imm):$b))]>;
-}
-
-multiclass I3_i8<string OpcStr, SDNode OpNode, string TypeStr, string CVTStr> {
- def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- Int64Regs:$b))]>;
- def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- Int32Regs:$b))]>;
- def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- Int16Regs:$b))]>;
- def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
- def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- Handle_i8rr<OpcStr, TypeStr, CVTStr>.s,
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
- def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
- Handle_i8ri<OpcStr, TypeStr, CVTStr>.s,
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a, (imm):$b))]>;
-}
-
-multiclass I3_noi8<string OpcStr, SDNode OpNode> {
- def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- Int64Regs:$b))]>;
- def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- Int32Regs:$b))]>;
- def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- Int16Regs:$b))]>;
- def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
}
multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
@@ -369,6 +293,90 @@ multiclass F2<string OpcStr, SDNode OpNode> {
//===----------------------------------------------------------------------===//
//-----------------------------------
+// General Type Conversion
+//-----------------------------------
+
+let neverHasSideEffects = 1 in {
+// Generate a cvt to the given type from all possible types.
+// Each instance takes a CvtMode immediate that defines the conversion mode to
+// use. It can be CvtNONE to omit a conversion mode.
+multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
+ def _s16 : NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s16\t$dst, $src;"),
+ []>;
+ def _u16 : NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u16\t$dst, $src;"),
+ []>;
+ def _f16 : NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f16\t$dst, $src;"),
+ []>;
+ def _s32 : NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s32\t$dst, $src;"),
+ []>;
+ def _u32 : NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u32\t$dst, $src;"),
+ []>;
+ def _s64 : NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s64\t$dst, $src;"),
+ []>;
+ def _u64 : NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u64\t$dst, $src;"),
+ []>;
+ def _f32 : NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f32\t$dst, $src;"),
+ []>;
+ def _f64 : NVPTXInst<(outs RC:$dst),
+ (ins Float64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f64\t$dst, $src;"),
+ []>;
+}
+
+// Generate a cvt to all possible types.
+defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
+defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
+defm CVT_f16 : CVT_FROM_ALL<"f16", Int16Regs>;
+defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
+defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
+defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
+defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
+defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
+defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
+
+// This set of cvt is different from the above. The type of the source
+// and target are the same.
+//
+def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.s16.s8 \t$dst, $src;", []>;
+def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s8 \t$dst, $src;", []>;
+def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s16 \t$dst, $src;", []>;
+def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s8 \t$dst, $src;", []>;
+def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s16 \t$dst, $src;", []>;
+def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s32 \t$dst, $src;", []>;
+}
+
+//-----------------------------------
// Integer Arithmetic
//-----------------------------------
@@ -522,81 +530,17 @@ def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
defm MULT : I3<"mul.lo.s", mul>;
-defm MULTHS : I3_noi8<"mul.hi.s", mulhs>;
-defm MULTHU : I3_noi8<"mul.hi.u", mulhu>;
-def MULTHSi8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- !strconcat("{{ \n\t",
- !strconcat(".reg \t.s16 temp1; \n\t",
- !strconcat(".reg \t.s16 temp2; \n\t",
- !strconcat("cvt.s16.s8 \ttemp1, $a; \n\t",
- !strconcat("cvt.s16.s8 \ttemp2, $b; \n\t",
- !strconcat("mul.lo.s16 \t$dst, temp1, temp2; \n\t",
- !strconcat("shr.s16 \t$dst, $dst, 8; \n\t",
- !strconcat("}}", "")))))))),
- [(set Int8Regs:$dst, (mulhs Int8Regs:$a, Int8Regs:$b))]>;
-def MULTHSi8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
- !strconcat("{{ \n\t",
- !strconcat(".reg \t.s16 temp1; \n\t",
- !strconcat(".reg \t.s16 temp2; \n\t",
- !strconcat("cvt.s16.s8 \ttemp1, $a; \n\t",
- !strconcat("mov.b16 \ttemp2, $b; \n\t",
- !strconcat("cvt.s16.s8 \ttemp2, temp2; \n\t",
- !strconcat("mul.lo.s16 \t$dst, temp1, temp2; \n\t",
- !strconcat("shr.s16 \t$dst, $dst, 8; \n\t",
- !strconcat("}}", ""))))))))),
- [(set Int8Regs:$dst, (mulhs Int8Regs:$a, imm:$b))]>;
-def MULTHUi8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- !strconcat("{{ \n\t",
- !strconcat(".reg \t.u16 temp1; \n\t",
- !strconcat(".reg \t.u16 temp2; \n\t",
- !strconcat("cvt.u16.u8 \ttemp1, $a; \n\t",
- !strconcat("cvt.u16.u8 \ttemp2, $b; \n\t",
- !strconcat("mul.lo.u16 \t$dst, temp1, temp2; \n\t",
- !strconcat("shr.u16 \t$dst, $dst, 8; \n\t",
- !strconcat("}}", "")))))))),
- [(set Int8Regs:$dst, (mulhu Int8Regs:$a, Int8Regs:$b))]>;
-def MULTHUi8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
- !strconcat("{{ \n\t",
- !strconcat(".reg \t.u16 temp1; \n\t",
- !strconcat(".reg \t.u16 temp2; \n\t",
- !strconcat("cvt.u16.u8 \ttemp1, $a; \n\t",
- !strconcat("mov.b16 \ttemp2, $b; \n\t",
- !strconcat("cvt.u16.u8 \ttemp2, temp2; \n\t",
- !strconcat("mul.lo.u16 \t$dst, temp1, temp2; \n\t",
- !strconcat("shr.u16 \t$dst, $dst, 8; \n\t",
- !strconcat("}}", ""))))))))),
- [(set Int8Regs:$dst, (mulhu Int8Regs:$a, imm:$b))]>;
-
-
-defm SDIV : I3_i8<"div.s", sdiv, "s16", "cvt.s16.s8">;
-defm UDIV : I3_i8<"div.u", udiv, "u16", "cvt.u16.u8">;
-
-defm SREM : I3_i8<"rem.s", srem, "s16", "cvt.s16.s8">;
+defm MULTHS : I3<"mul.hi.s", mulhs>;
+defm MULTHU : I3<"mul.hi.u", mulhu>;
+
+defm SDIV : I3<"div.s", sdiv>;
+defm UDIV : I3<"div.u", udiv>;
+
+defm SREM : I3<"rem.s", srem>;
// The ri version will not be selected as DAGCombiner::visitSREM will lower it.
-defm UREM : I3_i8<"rem.u", urem, "u16", "cvt.u16.u8">;
+defm UREM : I3<"rem.u", urem>;
// The ri version will not be selected as DAGCombiner::visitUREM will lower it.
-def MAD8rrr : NVPTXInst<(outs Int8Regs:$dst),
- (ins Int8Regs:$a, Int8Regs:$b, Int8Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int8Regs:$dst, (add (mul Int8Regs:$a, Int8Regs:$b),
- Int8Regs:$c))]>;
-def MAD8rri : NVPTXInst<(outs Int8Regs:$dst),
- (ins Int8Regs:$a, Int8Regs:$b, i8imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int8Regs:$dst, (add (mul Int8Regs:$a, Int8Regs:$b),
- imm:$c))]>;
-def MAD8rir : NVPTXInst<(outs Int8Regs:$dst),
- (ins Int8Regs:$a, i8imm:$b, Int8Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int8Regs:$dst, (add (mul Int8Regs:$a, imm:$b),
- Int8Regs:$c))]>;
-def MAD8rii : NVPTXInst<(outs Int8Regs:$dst),
- (ins Int8Regs:$a, i8imm:$b, i8imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int8Regs:$dst, (add (mul Int8Regs:$a, imm:$b),
- imm:$c))]>;
-
def MAD16rrr : NVPTXInst<(outs Int16Regs:$dst),
(ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
"mad.lo.s16 \t$dst, $a, $b, $c;",
@@ -661,10 +605,6 @@ def MAD64rii : NVPTXInst<(outs Int64Regs:$dst),
(mul Int64Regs:$a, imm:$b), imm:$c))]>;
-def INEG8 : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src),
- !strconcat("cvt.s16.s8 \t$dst, $src;\n\t",
- "neg.s16 \t$dst, $dst;"),
- [(set Int8Regs:$dst, (ineg Int8Regs:$src))]>;
def INEG16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
"neg.s16 \t$dst, $src;",
[(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
@@ -842,6 +782,16 @@ def FDIV32ri_prec : NVPTXInst<(outs Float32Regs:$dst),
(fdiv Float32Regs:$a, fpimm:$b))]>,
Requires<[reqPTX20]>;
+//
+// F32 rsqrt
+//
+
+def RSQRTF32approx1r : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$b),
+ "rsqrt.approx.f32 \t$dst, $b;", []>;
+
+def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_f Float32Regs:$b)),
+ (RSQRTF32approx1r Float32Regs:$b)>,
+ Requires<[do_DIVF32_FULL, do_SQRTF32_APPROX, doNoF32FTZ]>;
multiclass FPCONTRACT32<string OpcStr, Predicate Pred> {
def rrr : NVPTXInst<(outs Float32Regs:$dst),
@@ -912,8 +862,6 @@ multiclass FPCONTRACT64<string OpcStr, Predicate Pred> {
// If we reverse the order of the following two lines, then rrr2 rule will be
// generated for FMA32, but not for rrr.
// Therefore, we manually write the rrr2 rule in FPCONTRACT32.
-defm FMAD32_ftz : FPCONTRACT32<"mad.ftz.f32", doFMADF32_ftz>;
-defm FMAD32 : FPCONTRACT32<"mad.f32", doFMADF32>;
defm FMA32_ftz : FPCONTRACT32<"fma.rn.ftz.f32", doFMAF32_ftz>;
defm FMA32 : FPCONTRACT32<"fma.rn.f32", doFMAF32>;
defm FMA64 : FPCONTRACT64<"fma.rn.f64", doFMAF64>;
@@ -952,8 +900,6 @@ multiclass FPCONTRACT64_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
defm FMAF32ext_ftz : FPCONTRACT32_SUB_PAT<FMA32_ftzrrr, doFMAF32AGG_ftz>;
defm FMAF32ext : FPCONTRACT32_SUB_PAT<FMA32rrr, doFMAF32AGG>;
-defm FMADF32ext_ftz : FPCONTRACT32_SUB_PAT_MAD<FMAD32_ftzrrr, doFMADF32_ftz>;
-defm FMADF32ext : FPCONTRACT32_SUB_PAT_MAD<FMAD32rrr, doFMADF32>;
defm FMAF64ext : FPCONTRACT64_SUB_PAT<FMA64rrr, doFMAF64AGG>;
def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
@@ -963,6 +909,41 @@ def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
"cos.approx.f32 \t$dst, $src;",
[(set Float32Regs:$dst, (fcos Float32Regs:$src))]>;
+// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y))
+// e.g. "poor man's fmod()"
+
+// frem - f32 FTZ
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
+ (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
+ Float32Regs:$y))>,
+ Requires<[doF32FTZ]>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
+ (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
+ fpimm:$y))>,
+ Requires<[doF32FTZ]>;
+
+// frem - f32
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
+ (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
+ Float32Regs:$y))>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
+ (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+// frem - f64
+def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
+ (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
+ Float64Regs:$y))>;
+def : Pat<(frem Float64Regs:$x, fpimm:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
+ (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
//-----------------------------------
// Logical Arithmetic
//-----------------------------------
@@ -974,12 +955,6 @@ multiclass LOG_FORMAT<string OpcStr, SDNode OpNode> {
def b1ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
!strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
[(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
- def b8rr: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
- def b8ri: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>;
def b16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
!strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
[(set Int16Regs:$dst, (OpNode Int16Regs:$a,
@@ -1010,9 +985,6 @@ defm XOR : LOG_FORMAT<"xor", xor>;
def NOT1: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
"not.pred \t$dst, $src;",
[(set Int1Regs:$dst, (not Int1Regs:$src))]>;
-def NOT8: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src),
- "not.b16 \t$dst, $src;",
- [(set Int8Regs:$dst, (not Int8Regs:$src))]>;
def NOT16: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
"not.b16 \t$dst, $src;",
[(set Int16Regs:$dst, (not Int16Regs:$src))]>;
@@ -1056,21 +1028,13 @@ multiclass LSHIFT_FORMAT<string OpcStr, SDNode OpNode> {
!strconcat(OpcStr, "16 \t$dst, $a, $b;"),
[(set Int16Regs:$dst, (OpNode Int16Regs:$a,
(i32 imm:$b)))]>;
- def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
- Int32Regs:$b))]>;
- def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
- (i32 imm:$b)))]>;
}
defm SHL : LSHIFT_FORMAT<"shl.b", shl>;
// For shifts, the second src operand must be 32-bit value
// Need to add cvt for the 8-bits.
-multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode, string CVTStr> {
+multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode> {
def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a,
Int32Regs:$b),
!strconcat(OpcStr, "64 \t$dst, $a, $b;"),
@@ -1102,20 +1066,10 @@ multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode, string CVTStr> {
!strconcat(OpcStr, "16 \t$dst, $a, $b;"),
[(set Int16Regs:$dst, (OpNode Int16Regs:$a,
(i32 imm:$b)))]>;
- def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int32Regs:$b),
- !strconcat(CVTStr, !strconcat(" \t$dst, $a;\n\t",
- !strconcat(OpcStr, "16 \t$dst, $dst, $b;"))),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
- Int32Regs:$b))]>;
- def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i32imm:$b),
- !strconcat(CVTStr, !strconcat(" \t$dst, $a;\n\t",
- !strconcat(OpcStr, "16 \t$dst, $dst, $b;"))),
- [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
- (i32 imm:$b)))]>;
}
-defm SRA : RSHIFT_FORMAT<"shr.s", sra, "cvt.s16.s8">;
-defm SRL : RSHIFT_FORMAT<"shr.u", srl, "cvt.u16.u8">;
+defm SRA : RSHIFT_FORMAT<"shr.s", sra>;
+defm SRL : RSHIFT_FORMAT<"shr.u", srl>;
// 32bit
def ROT32imm_sw : NVPTXInst<(outs Int32Regs:$dst),
@@ -1213,6 +1167,120 @@ def ROTR64reg_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
//-----------------------------------
+// General Comparison
+//-----------------------------------
+
+// General setp instructions
+multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs Int1Regs:$dst),
+ (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"),
+ []>;
+ def ri : NVPTXInst<(outs Int1Regs:$dst),
+ (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"),
+ []>;
+ def ir : NVPTXInst<(outs Int1Regs:$dst),
+ (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"),
+ []>;
+}
+
+defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
+defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
+defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
+defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
+defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
+defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
+defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
+defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
+defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
+defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
+defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
+
+// General set instructions
+multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
+ def ri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
+ def ir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
+}
+
+defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
+defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
+defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
+defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
+defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
+defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
+defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
+defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
+defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
+defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
+defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
+
+//-----------------------------------
+// General Selection
+//-----------------------------------
+
+// General selp instructions
+multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+ def ri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+ def ir : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+ def ii : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+}
+
+multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
+ SDNode ImmNode> {
+ def rr : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
+ def ri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
+ def ir : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
+ def ii : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+}
+
+defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
+defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
+defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
+defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
+defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
+defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
+defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
+defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
+defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
+defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
+defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
+
+// Special select for predicate operands
+def : Pat<(i1 (select Int1Regs:$p, Int1Regs:$a, Int1Regs:$b)),
+ (ORb1rr (ANDb1rr Int1Regs:$p, Int1Regs:$a),
+ (ANDb1rr (NOT1 Int1Regs:$p), Int1Regs:$b))>;
+
+//-----------------------------------
// Data Movement (Load / Store, Move)
//-----------------------------------
@@ -1253,12 +1321,19 @@ def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
"mov.u64 \t$dst, $a;",
[(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+// Get pointer to local stack
+def MOV_DEPOT_ADDR
+ : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
+ "mov.u32 \t$d, __local_depot$num;", []>;
+def MOV_DEPOT_ADDR_64
+ : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
+ "mov.u64 \t$d, __local_depot$num;", []>;
+
+
// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
let IsSimpleMove=1 in {
def IMOV1rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
"mov.pred \t$dst, $sss;", []>;
-def IMOV8rr: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$sss),
- "mov.u16 \t$dst, $sss;", []>;
def IMOV16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
"mov.u16 \t$dst, $sss;", []>;
def IMOV32rr: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
@@ -1274,9 +1349,6 @@ def FMOV64rr: NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
def IMOV1ri: NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
"mov.pred \t$dst, $src;",
[(set Int1Regs:$dst, imm:$src)]>;
-def IMOV8ri: NVPTXInst<(outs Int8Regs:$dst), (ins i8imm:$src),
- "mov.u16 \t$dst, $src;",
- [(set Int8Regs:$dst, imm:$src)]>;
def IMOV16ri: NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
"mov.u16 \t$dst, $src;",
[(set Int16Regs:$dst, imm:$src)]>;
@@ -1308,440 +1380,194 @@ def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
// Comparison and Selection
//-----------------------------------
-// Generate string block like
-// {
-// .reg .pred p;
-// setp.gt.s16 p, %a, %b;
-// selp.s16 %dst, -1, 0, p;
-// }
-// when OpcStr=setp.gt.s sz1=16 sz2=16 d=%dst a=%a b=%b
-class Set_Str<string OpcStr, string sz1, string sz2, string d, string a,
- string b> {
- string t1 = "{{\n\t.reg .pred p;\n\t";
- string t2 = !strconcat(t1 , OpcStr);
- string t3 = !strconcat(t2 , sz1);
- string t4 = !strconcat(t3 , " \tp, ");
- string t5 = !strconcat(t4 , a);
- string t6 = !strconcat(t5 , ", ");
- string t7 = !strconcat(t6 , b);
- string t8 = !strconcat(t7 , ";\n\tselp.s");
- string t9 = !strconcat(t8 , sz2);
- string t10 = !strconcat(t9, " \t");
- string t11 = !strconcat(t10, d);
- string s = !strconcat(t11, ", -1, 0, p;\n\t}}");
+multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
+ Instruction setp_16rr,
+ Instruction setp_16ri,
+ Instruction setp_16ir,
+ Instruction setp_32rr,
+ Instruction setp_32ri,
+ Instruction setp_32ir,
+ Instruction setp_64rr,
+ Instruction setp_64ri,
+ Instruction setp_64ir,
+ Instruction set_16rr,
+ Instruction set_16ri,
+ Instruction set_16ir,
+ Instruction set_32rr,
+ Instruction set_32ri,
+ Instruction set_32ir,
+ Instruction set_64rr,
+ Instruction set_64ri,
+ Instruction set_64ir> {
+ // i16 -> pred
+ def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
+ (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
+ (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> pred
+ def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
+ (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
+ (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> pred
+ def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
+ (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
+ (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
+
+ // i16 -> i32
+ def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
+ (set_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
+ (set_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> i32
+ def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
+ (set_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
+ (set_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> i32
+ def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
+ (set_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
+ (set_64ir imm:$a, Int64Regs:$b, Mode)>;
}
-// Generate string block like
-// {
-// .reg .pred p;
-// .reg .s16 %temp1;
-// .reg .s16 %temp2;
-// cvt.s16.s8 %temp1, %a;
-// cvt s16.s8 %temp1, %b;
-// setp.gt.s16 p, %temp1, %temp2;
-// selp.s16 %dst, -1, 0, p;
-// }
-// when OpcStr=setp.gt.s d=%dst a=%a b=%b type=s16 cvt=cvt.s16.s8
-class Set_Stri8<string OpcStr, string d, string a, string b, string type,
- string cvt> {
- string t1 = "{{\n\t.reg .pred p;\n\t";
- string t2 = !strconcat(t1, ".reg .");
- string t3 = !strconcat(t2, type);
- string t4 = !strconcat(t3, " %temp1;\n\t");
- string t5 = !strconcat(t4, ".reg .");
- string t6 = !strconcat(t5, type);
- string t7 = !strconcat(t6, " %temp2;\n\t");
- string t8 = !strconcat(t7, cvt);
- string t9 = !strconcat(t8, " \t%temp1, ");
- string t10 = !strconcat(t9, a);
- string t11 = !strconcat(t10, ";\n\t");
- string t12 = !strconcat(t11, cvt);
- string t13 = !strconcat(t12, " \t%temp2, ");
- string t14 = !strconcat(t13, b);
- string t15 = !strconcat(t14, ";\n\t");
- string t16 = !strconcat(t15, OpcStr);
- string t17 = !strconcat(t16, "16");
- string t18 = !strconcat(t17, " \tp, %temp1, %temp2;\n\t");
- string t19 = !strconcat(t18, "selp.s16 \t");
- string t20 = !strconcat(t19, d);
- string s = !strconcat(t20, ", -1, 0, p;\n\t}}");
+multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_s16rr, SETP_s16ri, SETP_s16ir,
+ SETP_s32rr, SETP_s32ri, SETP_s32ir,
+ SETP_s64rr, SETP_s64ri, SETP_s64ir,
+ SET_s16rr, SET_s16ri, SET_s16ir,
+ SET_s32rr, SET_s32ri, SET_s32ir,
+ SET_s64rr, SET_s64ri, SET_s64ir> {
+ // TableGen doesn't like empty multiclasses
+ def : PatLeaf<(i32 0)>;
}
-multiclass ISET_FORMAT<string OpcStr, string OpcStr_u32, PatFrag OpNode,
- string TypeStr, string CVTStr> {
- def i8rr_toi8: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- Set_Stri8<OpcStr, "$dst", "$a", "$b", TypeStr, CVTStr>.s,
- []>;
- def i16rr_toi16: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a,
- Int16Regs:$b),
- Set_Str<OpcStr, "16", "16", "$dst", "$a", "$b">.s,
- []>;
- def i32rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
- Int32Regs:$b),
- Set_Str<OpcStr, "32", "32", "$dst", "$a", "$b">.s,
- []>;
- def i64rr_toi64: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a,
- Int64Regs:$b),
- Set_Str<OpcStr, "64", "64", "$dst", "$a", "$b">.s,
- []>;
-
- def i8rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- Handle_i8rr<OpcStr, TypeStr, CVTStr>.s,
- [(set Int1Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
- def i8ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
- Handle_i8ri<OpcStr, TypeStr, CVTStr>.s,
- [(set Int1Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>;
- def i8ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i8imm:$a, Int8Regs:$b),
- Handle_i8ir<OpcStr, TypeStr, CVTStr>.s,
- [(set Int1Regs:$dst, (OpNode imm:$a, Int8Regs:$b))]>;
- def i16rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def i16ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
- def i16ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i16imm:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode imm:$a, Int16Regs:$b))]>;
- def i32rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i32ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i32imm:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode imm:$a, Int32Regs:$b))]>;
- def i64rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def i64ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i64ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i64imm:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode imm:$a, Int64Regs:$b))]>;
-
- def i8rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
- Handle_i8rr<OpcStr_u32, TypeStr, CVTStr>.s,
- [(set Int32Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
- def i8ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
- Handle_i8ri<OpcStr_u32, TypeStr, CVTStr>.s,
- [(set Int32Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>;
- def i8ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i8imm:$a, Int8Regs:$b),
- Handle_i8ir<OpcStr_u32, TypeStr, CVTStr>.s,
- [(set Int32Regs:$dst, (OpNode imm:$a, Int8Regs:$b))]>;
- def i16rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a,
- Int16Regs:$b),
- !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def i16ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
- def i16ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i16imm:$a, Int16Regs:$b),
- !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode imm:$a, Int16Regs:$b))]>;
- def i32rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i32ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, Int32Regs:$b),
- !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode imm:$a, Int32Regs:$b))]>;
- def i64rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$a,
- Int64Regs:$b),
- !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def i64ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i64ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i64imm:$a, Int64Regs:$b),
- !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode imm:$a, Int64Regs:$b))]>;
+multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_u16rr, SETP_u16ri, SETP_u16ir,
+ SETP_u32rr, SETP_u32ri, SETP_u32ir,
+ SETP_u64rr, SETP_u64ri, SETP_u64ir,
+ SET_u16rr, SET_u16ri, SET_u16ir,
+ SET_u32rr, SET_u32ri, SET_u32ir,
+ SET_u64rr, SET_u64ri, SET_u64ir> {
+ // TableGen doesn't like empty multiclasses
+ def : PatLeaf<(i32 0)>;
}
-multiclass FSET_FORMAT<string OpcStr, string OpcStr_u32, PatFrag OpNode> {
- def f32rr_toi32_ftz: NVPTXInst<(outs Int32Regs:$dst), (ins Float32Regs:$a,
- Float32Regs:$b),
- Set_Str<OpcStr, "ftz.f32", "32", "$dst", "$a", "$b">.s,
- []>, Requires<[doF32FTZ]>;
- def f32rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Float32Regs:$a,
- Float32Regs:$b),
- Set_Str<OpcStr, "f32", "32", "$dst", "$a", "$b">.s,
- []>;
- def f64rr_toi64: NVPTXInst<(outs Int64Regs:$dst), (ins Float64Regs:$a,
- Float64Regs:$b),
- Set_Str<OpcStr, "f64", "64", "$dst", "$a", "$b">.s,
- []>;
- def f64rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Float64Regs:$a,
- Float64Regs:$b),
- Set_Str<OpcStr, "f64", "32", "$dst", "$a", "$b">.s,
- []>;
-
- def f32rr_p_ftz: NVPTXInst<(outs Int1Regs:$dst), (ins Float32Regs:$a
- , Float32Regs:$b),
- !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>
- , Requires<[doF32FTZ]>;
- def f32rr_p: NVPTXInst<(outs Int1Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, "f32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
- def f32ri_p_ftz: NVPTXInst<(outs Int1Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ]>;
- def f32ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, "f32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
- def f32ir_p_ftz: NVPTXInst<(outs Int1Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ]>;
- def f32ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins f32imm:$a, Float32Regs:$b),
- !strconcat(OpcStr, "f32 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>;
- def f64rr_p: NVPTXInst<(outs Int1Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, "f64 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
- def f64ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, "f64 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
- def f64ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins f64imm:$a, Float64Regs:$b),
- !strconcat(OpcStr, "f64 \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode fpimm:$a, Float64Regs:$b))]>;
-
- def f32rr_u32_ftz: NVPTXInst<(outs Int32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
- def f32rr_u32: NVPTXInst<(outs Int32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
- def f32ri_u32_ftz: NVPTXInst<(outs Int32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
- def f32ri_u32: NVPTXInst<(outs Int32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
- def f32ir_u32_ftz: NVPTXInst<(outs Int32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>;
- def f32ir_u32: NVPTXInst<(outs Int32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>;
- def f64rr_u32: NVPTXInst<(outs Int32Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
- def f64ri_u32: NVPTXInst<(outs Int32Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
- def f64ir_u32: NVPTXInst<(outs Int32Regs:$dst),
- (ins f64imm:$a, Float64Regs:$b),
- !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode fpimm:$a, Float64Regs:$b))]>;
+defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
+defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
+defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
+defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
+defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
+defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
+defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
+defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
+defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
+defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
+defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
+defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
+
+// i1 compares
+def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+
+def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+// i1 compare -> i32
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+
+
+multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
+ // f32 -> pred
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> pred
+ def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+
+ // f32 -> i32
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> i32
+ def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
}
-defm ISetSGT
-: ISET_FORMAT<"setp.gt.s", "set.gt.u32.s", setgt, "s16", "cvt.s16.s8">;
-defm ISetUGT
-: ISET_FORMAT<"setp.gt.u", "set.gt.u32.u", setugt, "u16", "cvt.u16.u8">;
-defm ISetSLT
-: ISET_FORMAT<"setp.lt.s", "set.lt.u32.s", setlt, "s16", "cvt.s16.s8">;
-defm ISetULT
-: ISET_FORMAT<"setp.lt.u", "set.lt.u32.u", setult, "u16", "cvt.u16.u8">;
-defm ISetSGE
-: ISET_FORMAT<"setp.ge.s", "set.ge.u32.s", setge, "s16", "cvt.s16.s8">;
-defm ISetUGE
-: ISET_FORMAT<"setp.ge.u", "set.ge.u32.u", setuge, "u16", "cvt.u16.u8">;
-defm ISetSLE
-: ISET_FORMAT<"setp.le.s", "set.le.u32.s", setle, "s16", "cvt.s16.s8">;
-defm ISetULE
-: ISET_FORMAT<"setp.le.u", "set.le.u32.u", setule, "u16", "cvt.u16.u8">;
-defm ISetSEQ
-: ISET_FORMAT<"setp.eq.s", "set.eq.u32.s", seteq, "s16", "cvt.s16.s8">;
-defm ISetUEQ
-: ISET_FORMAT<"setp.eq.u", "set.eq.u32.u", setueq, "u16", "cvt.u16.u8">;
-defm ISetSNE
-: ISET_FORMAT<"setp.ne.s", "set.ne.u32.s", setne, "s16", "cvt.s16.s8">;
-defm ISetUNE
-: ISET_FORMAT<"setp.ne.u", "set.ne.u32.u", setune, "u16", "cvt.u16.u8">;
-
-def ISetSNEi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
- (ins Int1Regs:$a, Int1Regs:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (setne Int1Regs:$a, Int1Regs:$b))]>;
-def ISetUNEi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
- (ins Int1Regs:$a, Int1Regs:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (setune Int1Regs:$a, Int1Regs:$b))]>;
-def ISetSEQi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
- (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat("{{\n\t",
- !strconcat(".reg .pred temp;\n\t",
- !strconcat("xor.pred \ttemp, $a, $b;\n\t",
- !strconcat("not.pred \t$dst, temp;\n\t}}","")))),
- [(set Int1Regs:$dst, (seteq Int1Regs:$a, Int1Regs:$b))]>;
-def ISetUEQi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
- (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat("{{\n\t",
- !strconcat(".reg .pred temp;\n\t",
- !strconcat("xor.pred \ttemp, $a, $b;\n\t",
- !strconcat("not.pred \t$dst, temp;\n\t}}","")))),
- [(set Int1Regs:$dst, (setueq Int1Regs:$a, Int1Regs:$b))]>;
-
-// Compare 2 i1's and produce a u32
-def ISETSNEi1rr_u32 : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat("{{\n\t",
- !strconcat(".reg .pred temp;\n\t",
- !strconcat("xor.pred \ttemp, $a, $b;\n\t",
- !strconcat("selp.u32 \t$dst, -1, 0, temp;", "\n\t}}")))),
- [(set Int32Regs:$dst, (setne Int1Regs:$a, Int1Regs:$b))]>;
-def ISETSEQi1rr_u32 : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat("{{\n\t",
- !strconcat(".reg .pred temp;\n\t",
- !strconcat("xor.pred \ttemp, $a, $b;\n\t",
- !strconcat("selp.u32 \t$dst, 0, -1, temp;", "\n\t}}")))),
- [(set Int32Regs:$dst, (seteq Int1Regs:$a, Int1Regs:$b))]>;
-
-defm FSetGT : FSET_FORMAT<"setp.gt.", "set.gt.u32.", setogt>;
-defm FSetLT : FSET_FORMAT<"setp.lt.", "set.lt.u32.", setolt>;
-defm FSetGE : FSET_FORMAT<"setp.ge.", "set.ge.u32.", setoge>;
-defm FSetLE : FSET_FORMAT<"setp.le.", "set.le.u32.", setole>;
-defm FSetEQ : FSET_FORMAT<"setp.eq.", "set.eq.u32.", setoeq>;
-defm FSetNE : FSET_FORMAT<"setp.ne.", "set.ne.u32.", setone>;
-
-defm FSetUGT : FSET_FORMAT<"setp.gtu.", "set.gtu.u32.", setugt>;
-defm FSetULT : FSET_FORMAT<"setp.ltu.", "set.ltu.u32.",setult>;
-defm FSetUGE : FSET_FORMAT<"setp.geu.", "set.geu.u32.",setuge>;
-defm FSetULE : FSET_FORMAT<"setp.leu.", "set.leu.u32.",setule>;
-defm FSetUEQ : FSET_FORMAT<"setp.equ.", "set.equ.u32.",setueq>;
-defm FSetUNE : FSET_FORMAT<"setp.neu.", "set.neu.u32.",setune>;
-
-defm FSetNUM : FSET_FORMAT<"setp.num.", "set.num.u32.",seto>;
-defm FSetNAN : FSET_FORMAT<"setp.nan.", "set.nan.u32.",setuo>;
-
-def SELECTi1rr : Pat<(i1 (select Int1Regs:$p, Int1Regs:$a, Int1Regs:$b)),
- (ORb1rr (ANDb1rr Int1Regs:$p, Int1Regs:$a),
- (ANDb1rr (NOT1 Int1Regs:$p), Int1Regs:$b))>;
-def SELECTi8rr : NVPTXInst<(outs Int8Regs:$dst),
- (ins Int8Regs:$a, Int8Regs:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int8Regs:$dst, (select Int1Regs:$p, Int8Regs:$a, Int8Regs:$b))]>;
-def SELECTi8ri : NVPTXInst<(outs Int8Regs:$dst),
- (ins Int8Regs:$a, i8imm:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int8Regs:$dst, (select Int1Regs:$p, Int8Regs:$a, imm:$b))]>;
-def SELECTi8ir : NVPTXInst<(outs Int8Regs:$dst),
- (ins i8imm:$a, Int8Regs:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int8Regs:$dst, (select Int1Regs:$p, imm:$a, Int8Regs:$b))]>;
-def SELECTi8ii : NVPTXInst<(outs Int8Regs:$dst),
- (ins i8imm:$a, i8imm:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int8Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
-
-def SELECTi16rr : NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int16Regs:$dst, (select Int1Regs:$p, Int16Regs:$a, Int16Regs:$b))]>;
-def SELECTi16ri : NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int16Regs:$dst, (select Int1Regs:$p, Int16Regs:$a, imm:$b))]>;
-def SELECTi16ir : NVPTXInst<(outs Int16Regs:$dst),
- (ins i16imm:$a, Int16Regs:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int16Regs:$dst, (select Int1Regs:$p, imm:$a, Int16Regs:$b))]>;
-def SELECTi16ii : NVPTXInst<(outs Int16Regs:$dst),
- (ins i16imm:$a, i16imm:$b, Int1Regs:$p),
- "selp.b16 \t$dst, $a, $b, $p;",
- [(set Int16Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
-
-def SELECTi32rr : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, Int1Regs:$p),
- "selp.b32 \t$dst, $a, $b, $p;",
- [(set Int32Regs:$dst, (select Int1Regs:$p, Int32Regs:$a, Int32Regs:$b))]>;
-def SELECTi32ri : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, Int1Regs:$p),
- "selp.b32 \t$dst, $a, $b, $p;",
- [(set Int32Regs:$dst, (select Int1Regs:$p, Int32Regs:$a, imm:$b))]>;
-def SELECTi32ir : NVPTXInst<(outs Int32Regs:$dst),
- (ins i32imm:$a, Int32Regs:$b, Int1Regs:$p),
- "selp.b32 \t$dst, $a, $b, $p;",
- [(set Int32Regs:$dst, (select Int1Regs:$p, imm:$a, Int32Regs:$b))]>;
-def SELECTi32ii : NVPTXInst<(outs Int32Regs:$dst),
- (ins i32imm:$a, i32imm:$b, Int1Regs:$p),
- "selp.b32 \t$dst, $a, $b, $p;",
- [(set Int32Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
-
-def SELECTi64rr : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, Int1Regs:$p),
- "selp.b64 \t$dst, $a, $b, $p;",
- [(set Int64Regs:$dst, (select Int1Regs:$p, Int64Regs:$a, Int64Regs:$b))]>;
-def SELECTi64ri : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, Int1Regs:$p),
- "selp.b64 \t$dst, $a, $b, $p;",
- [(set Int64Regs:$dst, (select Int1Regs:$p, Int64Regs:$a, imm:$b))]>;
-def SELECTi64ir : NVPTXInst<(outs Int64Regs:$dst),
- (ins i64imm:$a, Int64Regs:$b, Int1Regs:$p),
- "selp.b64 \t$dst, $a, $b, $p;",
- [(set Int64Regs:$dst, (select Int1Regs:$p, imm:$a, Int64Regs:$b))]>;
-def SELECTi64ii : NVPTXInst<(outs Int64Regs:$dst),
- (ins i64imm:$a, i64imm:$b, Int1Regs:$p),
- "selp.b64 \t$dst, $a, $b, $p;",
- [(set Int64Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
-
-def SELECTf32rr : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b, Int1Regs:$p),
- "selp.f32 \t$dst, $a, $b, $p;",
- [(set Float32Regs:$dst,
- (select Int1Regs:$p, Float32Regs:$a, Float32Regs:$b))]>;
-def SELECTf32ri : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b, Int1Regs:$p),
- "selp.f32 \t$dst, $a, $b, $p;",
- [(set Float32Regs:$dst, (select Int1Regs:$p, Float32Regs:$a, fpimm:$b))]>;
-def SELECTf32ir : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b, Int1Regs:$p),
- "selp.f32 \t$dst, $a, $b, $p;",
- [(set Float32Regs:$dst, (select Int1Regs:$p, fpimm:$a, Float32Regs:$b))]>;
-def SELECTf32ii : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, f32imm:$b, Int1Regs:$p),
- "selp.f32 \t$dst, $a, $b, $p;",
- [(set Float32Regs:$dst, (select Int1Regs:$p, fpimm:$a, fpimm:$b))]>;
-
-def SELECTf64rr : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b, Int1Regs:$p),
- "selp.f64 \t$dst, $a, $b, $p;",
- [(set Float64Regs:$dst,
- (select Int1Regs:$p, Float64Regs:$a, Float64Regs:$b))]>;
-def SELECTf64ri : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b, Int1Regs:$p),
- "selp.f64 \t$dst, $a, $b, $p;",
- [(set Float64Regs:$dst, (select Int1Regs:$p, Float64Regs:$a, fpimm:$b))]>;
-def SELECTf64ir : NVPTXInst<(outs Float64Regs:$dst),
- (ins f64imm:$a, Float64Regs:$b, Int1Regs:$p),
- "selp.f64 \t$dst, $a, $b, $p;",
- [(set Float64Regs:$dst, (select Int1Regs:$p, fpimm:$a, Float64Regs:$b))]>;
-def SELECTf64ii : NVPTXInst<(outs Float64Regs:$dst),
- (ins f64imm:$a, f64imm:$b, Int1Regs:$p),
- "selp.f64 \t $dst, $a, $b, $p;",
- [(set Float64Regs:$dst, (select Int1Regs:$p, fpimm:$a, fpimm:$b))]>;
+defm FSetGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
+defm FSetLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
+defm FSetGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
+defm FSetLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
+defm FSetEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
+defm FSetNE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
+
+defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
+defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
+defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
+defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
+defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
+defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
+
+defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
+defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
//def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
@@ -1751,17 +1577,22 @@ def SDTDeclareParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
def SDTDeclareScalarParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>,
SDTCisInt<1>, SDTCisInt<2>]>;
def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
+def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
def SDTCallValProfile : SDTypeProfile<1, 0, []>;
def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
-def SDTMoveRetvalProfile : SDTypeProfile<0, 1, []>;
def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
+def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
def DeclareParam : SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
@@ -1776,18 +1607,24 @@ def DeclareRet : SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def LoadParam : SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
[SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV2 : SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV4 : SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
def PrintCall : SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def PrintCallUni : SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def StoreParam : SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV2 : SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV4 : SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def StoreParamU32 : SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def StoreParamS32 : SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def MoveToParam : SDNode<"NVPTXISD::MoveToParam", SDTStoreParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def CallArgBegin : SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def CallArg : SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
@@ -1804,12 +1641,12 @@ def CallVal : SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def MoveParam : SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile,
[]>;
-def MoveRetval : SDNode<"NVPTXISD::MoveRetval", SDTMoveRetvalProfile,
- [SDNPHasChain, SDNPSideEffect]>;
def StoreRetval : SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
[SDNPHasChain, SDNPSideEffect]>;
-def MoveToRetval : SDNode<"NVPTXISD::MoveToRetval", SDTStoreRetvalProfile,
- [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV2 : SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV4 : SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
def PseudoUseParam : SDNode<"NVPTXISD::PseudoUseParam",
SDTPseudoUseParamProfile,
[SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
@@ -1820,7 +1657,7 @@ class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
!strconcat(!strconcat("ld.param", opstr),
"\t$dst, [retval0+$b];"),
- [(set regclass:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>;
+ []>;
class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
@@ -1828,35 +1665,57 @@ class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
"\t$dst, retval$b;"),
[(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
+class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
+ !strconcat(!strconcat("ld.param.v2", opstr),
+ "\t{{$dst, $dst2}}, [retval0+$b];"), []>;
+
+class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins i32imm:$b),
+ !strconcat(!strconcat("ld.param.v4", opstr),
+ "\t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"), []>;
+
class StoreParamInst<NVPTXRegClass regclass, string opstr> :
NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
!strconcat(!strconcat("st.param", opstr),
"\t[param$a+$b], $val;"),
- [(StoreParam (i32 imm:$a), (i32 imm:$b), regclass:$val)]>;
+ []>;
-class MoveToParamInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
- !strconcat(!strconcat("mov", opstr),
- "\tparam$a, $val;"),
- [(MoveToParam (i32 imm:$a), (i32 imm:$b), regclass:$val)]>;
+class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
+ i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("st.param.v2", opstr),
+ "\t[param$a+$b], {{$val, $val2}};"),
+ []>;
+
+class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val1, regclass:$val2,
+ regclass:$val3, i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("st.param.v4", opstr),
+ "\t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
+ []>;
class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
!strconcat(!strconcat("st.param", opstr),
"\t[func_retval0+$a], $val;"),
- [(StoreRetval (i32 imm:$a), regclass:$val)]>;
+ []>;
-class MoveToRetvalInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins i32imm:$num, regclass:$val),
- !strconcat(!strconcat("mov", opstr),
- "\tfunc_retval$num, $val;"),
- [(MoveToRetval (i32 imm:$num), regclass:$val)]>;
+class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
+ !strconcat(!strconcat("st.param.v2", opstr),
+ "\t[func_retval0+$a], {{$val, $val2}};"),
+ []>;
-class MoveRetvalInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val),
- !strconcat(!strconcat("mov", opstr),
- "\tfunc_retval0, $val;"),
- [(MoveRetval regclass:$val)]>;
+class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a),
+ !strconcat(!strconcat("st.param.v4", opstr),
+ "\t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
+ []>;
def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
"call (retval0), ",
@@ -1919,126 +1778,81 @@ def PrintCallUniNoRetInst : NVPTXInst<(outs), (ins), "call.uni ",
def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
-def LoadParamMemI8 : LoadParamMemInst<Int8Regs, ".b8">;
-
-//def LoadParamMemI16 : NVPTXInst<(outs Int16Regs:$dst), (ins i32imm:$b),
-// !strconcat("ld.param.b32\ttemp_param_reg, [retval0+$b];\n\t",
-// "cvt.u16.u32\t$dst, temp_param_reg;"),
-// [(set Int16Regs:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>;
-//def LoadParamMemI8 : NVPTXInst<(outs Int8Regs:$dst), (ins i32imm:$b),
-// !strconcat("ld.param.b32\ttemp_param_reg, [retval0+$b];\n\t",
-// "cvt.u16.u32\t$dst, temp_param_reg;"),
-// [(set Int8Regs:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>;
-
+def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
+def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
+def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
+def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
+def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
+def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
+def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
+def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
-
-def LoadParamRegI64 : LoadParamRegInst<Int64Regs, ".b64">;
-def LoadParamRegI32 : LoadParamRegInst<Int32Regs, ".b32">;
-def LoadParamRegI16 : NVPTXInst<(outs Int16Regs:$dst), (ins i32imm:$b),
- "cvt.u16.u32\t$dst, retval$b;",
- [(set Int16Regs:$dst,
- (LoadParam (i32 0), (i32 imm:$b)))]>;
-def LoadParamRegI8 : NVPTXInst<(outs Int8Regs:$dst), (ins i32imm:$b),
- "cvt.u16.u32\t$dst, retval$b;",
- [(set Int8Regs:$dst,
- (LoadParam (i32 0), (i32 imm:$b)))]>;
-
-def LoadParamRegF32 : LoadParamRegInst<Float32Regs, ".f32">;
-def LoadParamRegF64 : LoadParamRegInst<Float64Regs, ".f64">;
+def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
+def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
+def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
-def StoreParamI16 : NVPTXInst<(outs),
- (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
- "st.param.b16\t[param$a+$b], $val;",
- [(StoreParam (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
-
-def StoreParamI8 : NVPTXInst<(outs),
- (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
- "st.param.b8\t[param$a+$b], $val;",
- [(StoreParam
- (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
-
-def StoreParamS32I16 : NVPTXInst<(outs),
- (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
- !strconcat("cvt.s32.s16\ttemp_param_reg, $val;\n\t",
- "st.param.b32\t[param$a+$b], temp_param_reg;"),
- [(StoreParamS32 (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
-def StoreParamU32I16 : NVPTXInst<(outs),
- (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
- !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t",
- "st.param.b32\t[param$a+$b], temp_param_reg;"),
- [(StoreParamU32 (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
-
-def StoreParamU32I8 : NVPTXInst<(outs),
- (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
- !strconcat("cvt.u32.u8\ttemp_param_reg, $val;\n\t",
- "st.param.b32\t[param$a+$b], temp_param_reg;"),
- [(StoreParamU32 (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
-def StoreParamS32I8 : NVPTXInst<(outs),
- (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
- !strconcat("cvt.s32.s8\ttemp_param_reg, $val;\n\t",
- "st.param.b32\t[param$a+$b], temp_param_reg;"),
- [(StoreParamS32 (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
+def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
+def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
+def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
+def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
+def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
+def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
+
+// FIXME: StoreParamV4Inst crashes llvm-tblgen :(
+//def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
+def StoreParamV4I32 : NVPTXInst<(outs), (ins Int32Regs:$val, Int32Regs:$val2,
+ Int32Regs:$val3, Int32Regs:$val4,
+ i32imm:$a, i32imm:$b),
+ "st.param.b32\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
+ []>;
+
+def StoreParamV4I16 : NVPTXInst<(outs), (ins Int16Regs:$val, Int16Regs:$val2,
+ Int16Regs:$val3, Int16Regs:$val4,
+ i32imm:$a, i32imm:$b),
+ "st.param.v4.b16\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
+ []>;
+
+def StoreParamV4I8 : NVPTXInst<(outs), (ins Int16Regs:$val, Int16Regs:$val2,
+ Int16Regs:$val3, Int16Regs:$val4,
+ i32imm:$a, i32imm:$b),
+ "st.param.v4.b8\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
+ []>;
def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
+def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
+def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
+// FIXME: StoreParamV4Inst crashes llvm-tblgen :(
+//def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
+def StoreParamV4F32 : NVPTXInst<(outs),
+ (ins Float32Regs:$val, Float32Regs:$val2,
+ Float32Regs:$val3, Float32Regs:$val4,
+ i32imm:$a, i32imm:$b),
+ "st.param.v4.f32\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
+ []>;
-def MoveToParamI64 : MoveToParamInst<Int64Regs, ".b64">;
-def MoveToParamI32 : MoveToParamInst<Int32Regs, ".b32">;
-def MoveToParamF64 : MoveToParamInst<Float64Regs, ".f64">;
-def MoveToParamF32 : MoveToParamInst<Float32Regs, ".f32">;
-def MoveToParamI16 : NVPTXInst<(outs),
- (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
- !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t",
- "mov.b32\tparam$a, temp_param_reg;"),
- [(MoveToParam (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
-def MoveToParamI8 : NVPTXInst<(outs),
- (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
- !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t",
- "mov.b32\tparam$a, temp_param_reg;"),
- [(MoveToParam (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
-def StoreRetvalI8 : StoreRetvalInst<Int8Regs, ".b8">;
-
-//def StoreRetvalI16 : NVPTXInst<(outs), (ins Int16Regs:$val, i32imm:$a),
-// !strconcat("\{\n\t",
-// !strconcat(".reg .b32 temp_retval_reg;\n\t",
-// !strconcat("cvt.u32.u16\ttemp_retval_reg, $val;\n\t",
-// "st.param.b32\t[func_retval0+$a], temp_retval_reg;\n\t\}"))),
-// [(StoreRetval (i32 imm:$a), Int16Regs:$val)]>;
-//def StoreRetvalI8 : NVPTXInst<(outs), (ins Int8Regs:$val, i32imm:$a),
-// !strconcat("\{\n\t",
-// !strconcat(".reg .b32 temp_retval_reg;\n\t",
-// !strconcat("cvt.u32.u16\ttemp_retval_reg, $val;\n\t",
-// "st.param.b32\t[func_retval0+$a], temp_retval_reg;\n\t\}"))),
-// [(StoreRetval (i32 imm:$a), Int8Regs:$val)]>;
+def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
+def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
+def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
+def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
+def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
+def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
+def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
+def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
-
-def MoveRetvalI64 : MoveRetvalInst<Int64Regs, ".b64">;
-def MoveRetvalI32 : MoveRetvalInst<Int32Regs, ".b32">;
-def MoveRetvalI16 : MoveRetvalInst<Int16Regs, ".b16">;
-def MoveRetvalI8 : MoveRetvalInst<Int8Regs, ".b8">;
-def MoveRetvalF64 : MoveRetvalInst<Float64Regs, ".f64">;
-def MoveRetvalF32 : MoveRetvalInst<Float32Regs, ".f32">;
-
-def MoveToRetvalI64 : MoveToRetvalInst<Int64Regs, ".b64">;
-def MoveToRetvalI32 : MoveToRetvalInst<Int32Regs, ".b32">;
-def MoveToRetvalF64 : MoveToRetvalInst<Float64Regs, ".f64">;
-def MoveToRetvalF32 : MoveToRetvalInst<Float32Regs, ".f32">;
-def MoveToRetvalI16 : NVPTXInst<(outs), (ins i32imm:$num, Int16Regs:$val),
- "cvt.u32.u16\tfunc_retval$num, $val;",
- [(MoveToRetval (i32 imm:$num), Int16Regs:$val)]>;
-def MoveToRetvalI8 : NVPTXInst<(outs), (ins i32imm:$num, Int8Regs:$val),
- "cvt.u32.u16\tfunc_retval$num, $val;",
- [(MoveToRetval (i32 imm:$num), Int8Regs:$val)]>;
+def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
+def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
+def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
@@ -2056,7 +1870,6 @@ class LastCallArgInst<NVPTXRegClass regclass> :
def CallArgI64 : CallArgInst<Int64Regs>;
def CallArgI32 : CallArgInst<Int32Regs>;
def CallArgI16 : CallArgInst<Int16Regs>;
-def CallArgI8 : CallArgInst<Int8Regs>;
def CallArgF64 : CallArgInst<Float64Regs>;
def CallArgF32 : CallArgInst<Float32Regs>;
@@ -2064,7 +1877,6 @@ def CallArgF32 : CallArgInst<Float32Regs>;
def LastCallArgI64 : LastCallArgInst<Int64Regs>;
def LastCallArgI32 : LastCallArgInst<Int32Regs>;
def LastCallArgI16 : LastCallArgInst<Int16Regs>;
-def LastCallArgI8 : LastCallArgInst<Int8Regs>;
def LastCallArgF64 : LastCallArgInst<Float64Regs>;
def LastCallArgF32 : LastCallArgInst<Float32Regs>;
@@ -2124,9 +1936,6 @@ def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
def MoveParamI16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
"cvt.u16.u32\t$dst, $src;",
[(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
-def MoveParamI8 : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src),
- "cvt.u16.u32\t$dst, $src;",
- [(set Int8Regs:$dst, (MoveParam Int8Regs:$src))]>;
def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
@@ -2138,7 +1947,6 @@ class PseudoUseParamInst<NVPTXRegClass regclass> :
def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
-def PseudoUseParamI8 : PseudoUseParamInst<Int8Regs>;
def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
@@ -2180,7 +1988,7 @@ multiclass LD<NVPTXRegClass regclass> {
}
let mayLoad=1, neverHasSideEffects=1 in {
-defm LD_i8 : LD<Int8Regs>;
+defm LD_i8 : LD<Int16Regs>;
defm LD_i16 : LD<Int16Regs>;
defm LD_i32 : LD<Int32Regs>;
defm LD_i64 : LD<Int64Regs>;
@@ -2222,7 +2030,7 @@ multiclass ST<NVPTXRegClass regclass> {
}
let mayStore=1, neverHasSideEffects=1 in {
-defm ST_i8 : ST<Int8Regs>;
+defm ST_i8 : ST<Int16Regs>;
defm ST_i16 : ST<Int16Regs>;
defm ST_i32 : ST<Int32Regs>;
defm ST_i64 : ST<Int64Regs>;
@@ -2306,7 +2114,7 @@ multiclass LD_VEC<NVPTXRegClass regclass> {
[]>;
}
let mayLoad=1, neverHasSideEffects=1 in {
-defm LDV_i8 : LD_VEC<Int8Regs>;
+defm LDV_i8 : LD_VEC<Int16Regs>;
defm LDV_i16 : LD_VEC<Int16Regs>;
defm LDV_i32 : LD_VEC<Int32Regs>;
defm LDV_i64 : LD_VEC<Int64Regs>;
@@ -2389,7 +2197,7 @@ multiclass ST_VEC<NVPTXRegClass regclass> {
[]>;
}
let mayStore=1, neverHasSideEffects=1 in {
-defm STV_i8 : ST_VEC<Int8Regs>;
+defm STV_i8 : ST_VEC<Int16Regs>;
defm STV_i16 : ST_VEC<Int16Regs>;
defm STV_i32 : ST_VEC<Int32Regs>;
defm STV_i64 : ST_VEC<Int64Regs>;
@@ -2400,291 +2208,6 @@ defm STV_f64 : ST_VEC<Float64Regs>;
//---- Conversion ----
-multiclass CVT_INT_TO_FP <string OpStr, SDNode OpNode> {
-// FIXME: need to add f16 support
-// def CVTf16i8 :
-// NVPTXInst<(outs Float16Regs:$d), (ins Int8Regs:$a),
-// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "8 \t$d, $a;"),
-// [(set Float16Regs:$d, (OpNode Int8Regs:$a))]>;
-// def CVTf16i16 :
-// NVPTXInst<(outs Float16Regs:$d), (ins Int16Regs:$a),
-// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "16 \t$d, $a;"),
-// [(set Float16Regs:$d, (OpNode Int16Regs:$a))]>;
-// def CVTf16i32 :
-// NVPTXInst<(outs Float16Regs:$d), (ins Int32Regs:$a),
-// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "32 \t$d, $a;"),
-// [(set Float16Regs:$d, (OpNode Int32Regs:$a))]>;
-// def CVTf16i64:
-// NVPTXInst<(outs Float16Regs:$d), (ins Int64Regs:$a),
-// !strconcat(!strconcat("cvt.rn.f32.", OpStr), "64 \t$d, $a;"),
-// [(set Float32Regs:$d, (OpNode Int64Regs:$a))]>;
-
- def CVTf32i1 :
- NVPTXInst<(outs Float32Regs:$d), (ins Int1Regs:$a),
- "selp.f32 \t$d, 1.0, 0.0, $a;",
- [(set Float32Regs:$d, (OpNode Int1Regs:$a))]>;
- def CVTf32i8 :
- NVPTXInst<(outs Float32Regs:$d), (ins Int8Regs:$a),
- !strconcat(!strconcat("cvt.rn.f32.", OpStr), "8 \t$d, $a;"),
- [(set Float32Regs:$d, (OpNode Int8Regs:$a))]>;
- def CVTf32i16 :
- NVPTXInst<(outs Float32Regs:$d), (ins Int16Regs:$a),
- !strconcat(!strconcat("cvt.rn.f32.", OpStr), "16 \t$d, $a;"),
- [(set Float32Regs:$d, (OpNode Int16Regs:$a))]>;
- def CVTf32i32 :
- NVPTXInst<(outs Float32Regs:$d), (ins Int32Regs:$a),
- !strconcat(!strconcat("cvt.rn.f32.", OpStr), "32 \t$d, $a;"),
- [(set Float32Regs:$d, (OpNode Int32Regs:$a))]>;
- def CVTf32i64:
- NVPTXInst<(outs Float32Regs:$d), (ins Int64Regs:$a),
- !strconcat(!strconcat("cvt.rn.f32.", OpStr), "64 \t$d, $a;"),
- [(set Float32Regs:$d, (OpNode Int64Regs:$a))]>;
-
- def CVTf64i1 :
- NVPTXInst<(outs Float64Regs:$d), (ins Int1Regs:$a),
- "selp.f64 \t$d, 1.0, 0.0, $a;",
- [(set Float64Regs:$d, (OpNode Int1Regs:$a))]>;
- def CVTf64i8 :
- NVPTXInst<(outs Float64Regs:$d), (ins Int8Regs:$a),
- !strconcat(!strconcat("cvt.rn.f64.", OpStr), "8 \t$d, $a;"),
- [(set Float64Regs:$d, (OpNode Int8Regs:$a))]>;
- def CVTf64i16 :
- NVPTXInst<(outs Float64Regs:$d), (ins Int16Regs:$a),
- !strconcat(!strconcat("cvt.rn.f64.", OpStr), "16 \t$d, $a;"),
- [(set Float64Regs:$d, (OpNode Int16Regs:$a))]>;
- def CVTf64i32 :
- NVPTXInst<(outs Float64Regs:$d), (ins Int32Regs:$a),
- !strconcat(!strconcat("cvt.rn.f64.", OpStr), "32 \t$d, $a;"),
- [(set Float64Regs:$d, (OpNode Int32Regs:$a))]>;
- def CVTf64i64:
- NVPTXInst<(outs Float64Regs:$d), (ins Int64Regs:$a),
- !strconcat(!strconcat("cvt.rn.f64.", OpStr), "64 \t$d, $a;"),
- [(set Float64Regs:$d, (OpNode Int64Regs:$a))]>;
-}
-
-defm Sint_to_fp : CVT_INT_TO_FP <"s", sint_to_fp>;
-defm Uint_to_fp : CVT_INT_TO_FP <"u", uint_to_fp>;
-
-multiclass CVT_FP_TO_INT <string OpStr, SDNode OpNode> {
-// FIXME: need to add f16 support
-// def CVTi8f16:
-// NVPTXInst<(outs Int8Regs:$d), (ins Float16Regs:$a),
-// !strconcat(!strconcat("cvt.rzi.", OpStr), "8.f16 $d, $a;"),
-// [(set Int8Regs:$d, (OpNode Float16Regs:$a))]>;
- def CVTi8f32_ftz:
- NVPTXInst<(outs Int8Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "16.f32 \t$d, $a;"),
- [(set Int8Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
- def CVTi8f32:
- NVPTXInst<(outs Int8Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f32 \t$d, $a;"),
- [(set Int8Regs:$d, (OpNode Float32Regs:$a))]>;
- def CVTi8f64:
- NVPTXInst<(outs Int8Regs:$d), (ins Float64Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f64 \t$d, $a;"),
- [(set Int8Regs:$d, (OpNode Float64Regs:$a))]>;
-
-// FIXME: need to add f16 support
-// def CVTi16f16:
-// NVPTXInst<(outs Int16Regs:$d), (ins Float16Regs:$a),
-// !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f16 \t$d, $a;"),
-// [(set Int16Regs:$d, (OpNode Float16Regs:$a))]>;
- def CVTi16f32_ftz:
- NVPTXInst<(outs Int16Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "16.f32 \t$d, $a;"),
- [(set Int16Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
- def CVTi16f32:
- NVPTXInst<(outs Int16Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f32 \t$d, $a;"),
- [(set Int16Regs:$d, (OpNode Float32Regs:$a))]>;
- def CVTi16f64:
- NVPTXInst<(outs Int16Regs:$d), (ins Float64Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f64 \t$d, $a;"),
- [(set Int16Regs:$d, (OpNode Float64Regs:$a))]>;
-
-// FIXME: need to add f16 support
-// def CVTi32f16: def CVTi32f16:
-// NVPTXInst<(outs Int32Regs:$d), (ins Float16Regs:$a),
-// !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f16 \t$d, $a;"),
-// [(set Int32Regs:$d, (OpNode Float16Regs:$a))]>;
- def CVTi32f32_ftz:
- NVPTXInst<(outs Int32Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "32.f32 \t$d, $a;"),
- [(set Int32Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
- def CVTi32f32:
- NVPTXInst<(outs Int32Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f32 \t$d, $a;"),
- [(set Int32Regs:$d, (OpNode Float32Regs:$a))]>;
- def CVTi32f64:
- NVPTXInst<(outs Int32Regs:$d), (ins Float64Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f64 \t$d, $a;"),
- [(set Int32Regs:$d, (OpNode Float64Regs:$a))]>;
-
-// FIXME: need to add f16 support
-// def CVTi64f16:
-// NVPTXInst<(outs Int64Regs:$d), (ins Float16Regs:$a),
-// !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f16 \t$d, $a;"),
-// [(set Int64Regs:$d, (OpNode Float16Regs:$a))]>;
- def CVTi64f32_ftz:
- NVPTXInst<(outs Int64Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "64.f32 \t$d, $a;"),
- [(set Int64Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
- def CVTi64f32:
- NVPTXInst<(outs Int64Regs:$d), (ins Float32Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f32 \t$d, $a;"),
- [(set Int64Regs:$d, (OpNode Float32Regs:$a))]>;
- def CVTi64f64:
- NVPTXInst<(outs Int64Regs:$d), (ins Float64Regs:$a),
- !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f64 \t$d, $a;"),
- [(set Int64Regs:$d, (OpNode Float64Regs:$a))]>;
-}
-
-defm Fp_to_sint : CVT_FP_TO_INT <"s", fp_to_sint>;
-defm Fp_to_uint : CVT_FP_TO_INT <"u", fp_to_uint>;
-
-multiclass INT_EXTEND_UNSIGNED_1 <SDNode OpNode> {
- def ext1to8:
- NVPTXInst<(outs Int8Regs:$d), (ins Int1Regs:$a),
- "selp.u16 \t$d, 1, 0, $a;",
- [(set Int8Regs:$d, (OpNode Int1Regs:$a))]>;
- def ext1to16:
- NVPTXInst<(outs Int16Regs:$d), (ins Int1Regs:$a),
- "selp.u16 \t$d, 1, 0, $a;",
- [(set Int16Regs:$d, (OpNode Int1Regs:$a))]>;
- def ext1to32:
- NVPTXInst<(outs Int32Regs:$d), (ins Int1Regs:$a),
- "selp.u32 \t$d, 1, 0, $a;",
- [(set Int32Regs:$d, (OpNode Int1Regs:$a))]>;
- def ext1to64:
- NVPTXInst<(outs Int64Regs:$d), (ins Int1Regs:$a),
- "selp.u64 \t$d, 1, 0, $a;",
- [(set Int64Regs:$d, (OpNode Int1Regs:$a))]>;
-}
-
-multiclass INT_EXTEND_SIGNED_1 <SDNode OpNode> {
- def ext1to8:
- NVPTXInst<(outs Int8Regs:$d), (ins Int1Regs:$a),
- "selp.s16 \t$d, -1, 0, $a;",
- [(set Int8Regs:$d, (OpNode Int1Regs:$a))]>;
- def ext1to16:
- NVPTXInst<(outs Int16Regs:$d), (ins Int1Regs:$a),
- "selp.s16 \t$d, -1, 0, $a;",
- [(set Int16Regs:$d, (OpNode Int1Regs:$a))]>;
- def ext1to32:
- NVPTXInst<(outs Int32Regs:$d), (ins Int1Regs:$a),
- "selp.s32 \t$d, -1, 0, $a;",
- [(set Int32Regs:$d, (OpNode Int1Regs:$a))]>;
- def ext1to64:
- NVPTXInst<(outs Int64Regs:$d), (ins Int1Regs:$a),
- "selp.s64 \t$d, -1, 0, $a;",
- [(set Int64Regs:$d, (OpNode Int1Regs:$a))]>;
-}
-
-multiclass INT_EXTEND <string OpStr, SDNode OpNode> {
- // All Int8Regs are emiited as 16bit registers in ptx.
- // And there is no selp.u8 in ptx.
- def ext8to16:
- NVPTXInst<(outs Int16Regs:$d), (ins Int8Regs:$a),
- !strconcat("cvt.", !strconcat(OpStr, !strconcat("16.",
- !strconcat(OpStr, "8 \t$d, $a;")))),
- [(set Int16Regs:$d, (OpNode Int8Regs:$a))]>;
- def ext8to32:
- NVPTXInst<(outs Int32Regs:$d), (ins Int8Regs:$a),
- !strconcat("cvt.", !strconcat(OpStr, !strconcat("32.",
- !strconcat(OpStr, "8 \t$d, $a;")))),
- [(set Int32Regs:$d, (OpNode Int8Regs:$a))]>;
- def ext8to64:
- NVPTXInst<(outs Int64Regs:$d), (ins Int8Regs:$a),
- !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.",
- !strconcat(OpStr, "8 \t$d, $a;")))),
- [(set Int64Regs:$d, (OpNode Int8Regs:$a))]>;
- def ext16to32:
- NVPTXInst<(outs Int32Regs:$d), (ins Int16Regs:$a),
- !strconcat("cvt.", !strconcat(OpStr, !strconcat("32.",
- !strconcat(OpStr, "16 \t$d, $a;")))),
- [(set Int32Regs:$d, (OpNode Int16Regs:$a))]>;
- def ext16to64:
- NVPTXInst<(outs Int64Regs:$d), (ins Int16Regs:$a),
- !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.",
- !strconcat(OpStr, "16 \t$d, $a;")))),
- [(set Int64Regs:$d, (OpNode Int16Regs:$a))]>;
- def ext32to64:
- NVPTXInst<(outs Int64Regs:$d), (ins Int32Regs:$a),
- !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.",
- !strconcat(OpStr, "32 \t$d, $a;")))),
- [(set Int64Regs:$d, (OpNode Int32Regs:$a))]>;
-}
-
-defm Sint_extend_1 : INT_EXTEND_SIGNED_1<sext>;
-defm Zint_extend_1 : INT_EXTEND_UNSIGNED_1<zext>;
-defm Aint_extend_1 : INT_EXTEND_UNSIGNED_1<anyext>;
-
-defm Sint_extend : INT_EXTEND <"s", sext>;
-defm Zint_extend : INT_EXTEND <"u", zext>;
-defm Aint_extend : INT_EXTEND <"u", anyext>;
-
-class TRUNC_to1_asm<string sz> {
- string s = !strconcat("{{\n\t",
- !strconcat(".reg ",
- !strconcat(sz,
- !strconcat(" temp;\n\t",
- !strconcat("and",
- !strconcat(sz,
- !strconcat("\t temp, $a, 1;\n\t",
- !strconcat("setp",
- !strconcat(sz, ".eq \t $d, temp, 1;\n\t}}")))))))));
-}
-
-def TRUNC_64to32 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "cvt.u32.u64 \t$d, $a;",
- [(set Int32Regs:$d, (trunc Int64Regs:$a))]>;
-def TRUNC_64to16 : NVPTXInst<(outs Int16Regs:$d), (ins Int64Regs:$a),
- "cvt.u16.u64 \t$d, $a;",
- [(set Int16Regs:$d, (trunc Int64Regs:$a))]>;
-def TRUNC_64to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int64Regs:$a),
- "cvt.u8.u64 \t$d, $a;",
- [(set Int8Regs:$d, (trunc Int64Regs:$a))]>;
-def TRUNC_32to16 : NVPTXInst<(outs Int16Regs:$d), (ins Int32Regs:$a),
- "cvt.u16.u32 \t$d, $a;",
- [(set Int16Regs:$d, (trunc Int32Regs:$a))]>;
-def TRUNC_32to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int32Regs:$a),
- "cvt.u8.u32 \t$d, $a;",
- [(set Int8Regs:$d, (trunc Int32Regs:$a))]>;
-def TRUNC_16to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int16Regs:$a),
- "cvt.u8.u16 \t$d, $a;",
- [(set Int8Regs:$d, (trunc Int16Regs:$a))]>;
-def TRUNC_64to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
- TRUNC_to1_asm<".b64">.s,
- [(set Int1Regs:$d, (trunc Int64Regs:$a))]>;
-def TRUNC_32to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a),
- TRUNC_to1_asm<".b32">.s,
- [(set Int1Regs:$d, (trunc Int32Regs:$a))]>;
-def TRUNC_16to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int16Regs:$a),
- TRUNC_to1_asm<".b16">.s,
- [(set Int1Regs:$d, (trunc Int16Regs:$a))]>;
-def TRUNC_8to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int8Regs:$a),
- TRUNC_to1_asm<".b16">.s,
- [(set Int1Regs:$d, (trunc Int8Regs:$a))]>;
-
-// Select instructions
-def : Pat<(select Int32Regs:$pred, Int8Regs:$a, Int8Regs:$b),
- (SELECTi8rr Int8Regs:$a, Int8Regs:$b, (TRUNC_32to1 Int32Regs:$pred))>;
-def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
- (SELECTi16rr Int16Regs:$a, Int16Regs:$b,
- (TRUNC_32to1 Int32Regs:$pred))>;
-def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
- (SELECTi32rr Int32Regs:$a, Int32Regs:$b,
- (TRUNC_32to1 Int32Regs:$pred))>;
-def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
- (SELECTi64rr Int64Regs:$a, Int64Regs:$b,
- (TRUNC_32to1 Int32Regs:$pred))>;
-def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
- (SELECTf32rr Float32Regs:$a, Float32Regs:$b,
- (TRUNC_32to1 Int32Regs:$pred))>;
-def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
- (SELECTf64rr Float64Regs:$a, Float64Regs:$b,
- (TRUNC_32to1 Int32Regs:$pred))>;
-
class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
NVPTXRegClass regclassOut> :
NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
@@ -2696,29 +2219,209 @@ def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
+// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
+// we cannot specify floating-point literals in isel patterns. Therefore, we
+// use an integer selp to select either 1 or 0 and then cvt to floating-point.
+
+// sint -> f32
+def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
+ (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
+ (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
+ (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
+ (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f32
+def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
+ (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
+ (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
+ (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
+ (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f64
+def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
+ (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
+ (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
+ (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
+ (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f64
+def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
+ (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
+ (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
+ (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
+ (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
+
+
+// f32 -> sint
+def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f32 -> uint
+def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f64 -> sint
+def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
+ (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
+ (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
+ (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
+
+// f64 -> uint
+def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
+ (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
+ (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
+ (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
+
+// sext i1
+def : Pat<(i16 (sext Int1Regs:$a)),
+ (SELP_s16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (sext Int1Regs:$a)),
+ (SELP_s32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (sext Int1Regs:$a)),
+ (SELP_s64ii -1, 0, Int1Regs:$a)>;
+
+// zext i1
+def : Pat<(i16 (zext Int1Regs:$a)),
+ (SELP_u16ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (zext Int1Regs:$a)),
+ (SELP_u32ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (zext Int1Regs:$a)),
+ (SELP_u64ii 1, 0, Int1Regs:$a)>;
+
+// anyext i1
+def : Pat<(i16 (anyext Int1Regs:$a)),
+ (SELP_u16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (anyext Int1Regs:$a)),
+ (SELP_u32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (anyext Int1Regs:$a)),
+ (SELP_u64ii -1, 0, Int1Regs:$a)>;
+
+// sext i16
+def : Pat<(i32 (sext Int16Regs:$a)),
+ (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (sext Int16Regs:$a)),
+ (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
+
+// zext i16
+def : Pat<(i32 (zext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (zext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// anyext i16
+def : Pat<(i32 (anyext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (anyext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// sext i32
+def : Pat<(i64 (sext Int32Regs:$a)),
+ (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
+
+// zext i32
+def : Pat<(i64 (zext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+// anyext i32
+def : Pat<(i64 (anyext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+
+// truncate i64
+def : Pat<(i32 (trunc Int64Regs:$a)),
+ (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i16 (trunc Int64Regs:$a)),
+ (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int64Regs:$a)),
+ (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i32
+def : Pat<(i16 (trunc Int32Regs:$a)),
+ (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int32Regs:$a)),
+ (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i16
+def : Pat<(i1 (trunc Int16Regs:$a)),
+ (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
+
+// sext_inreg
+def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
+
+
+// Select instructions with 32-bit predicates
+def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
+ (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
+ (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
+ (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
+ (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
+ (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+
+
// pack a set of smaller int registers to a larger int register
-def V4I8toI32 : NVPTXInst<(outs Int32Regs:$d),
- (ins Int8Regs:$s1, Int8Regs:$s2,
- Int8Regs:$s3, Int8Regs:$s4),
- !strconcat("{{\n\t.reg .b8\t%t<4>;",
- !strconcat("\n\tcvt.u8.u8\t%t0, $s1;",
- !strconcat("\n\tcvt.u8.u8\t%t1, $s2;",
- !strconcat("\n\tcvt.u8.u8\t%t2, $s3;",
- !strconcat("\n\tcvt.u8.u8\t%t3, $s4;",
- "\n\tmov.b32\t$d, {%t0, %t1, %t2, %t3};\n\t}}"))))),
- []>;
def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
(ins Int16Regs:$s1, Int16Regs:$s2,
Int16Regs:$s3, Int16Regs:$s4),
"mov.b64\t$d, {{$s1, $s2, $s3, $s4}};",
[]>;
-def V2I8toI16 : NVPTXInst<(outs Int16Regs:$d),
- (ins Int8Regs:$s1, Int8Regs:$s2),
- !strconcat("{{\n\t.reg .b8\t%t<2>;",
- !strconcat("\n\tcvt.u8.u8\t%t0, $s1;",
- !strconcat("\n\tcvt.u8.u8\t%t1, $s2;",
- "\n\tmov.b16\t$d, {%t0, %t1};\n\t}}"))),
- []>;
def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
(ins Int16Regs:$s1, Int16Regs:$s2),
"mov.b32\t$d, {{$s1, $s2}};",
@@ -2733,28 +2436,11 @@ def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
[]>;
// unpack a larger int register to a set of smaller int registers
-def I32toV4I8 : NVPTXInst<(outs Int8Regs:$d1, Int8Regs:$d2,
- Int8Regs:$d3, Int8Regs:$d4),
- (ins Int32Regs:$s),
- !strconcat("{{\n\t.reg .b8\t%t<4>;",
- !strconcat("\n\tmov.b32\t{%t0, %t1, %t2, %t3}, $s;",
- !strconcat("\n\tcvt.u8.u8\t$d1, %t0;",
- !strconcat("\n\tcvt.u8.u8\t$d2, %t1;",
- !strconcat("\n\tcvt.u8.u8\t$d3, %t2;",
- "\n\tcvt.u8.u8\t$d4, %t3;\n\t}}"))))),
- []>;
def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
Int16Regs:$d3, Int16Regs:$d4),
(ins Int64Regs:$s),
"mov.b64\t{{$d1, $d2, $d3, $d4}}, $s;",
[]>;
-def I16toV2I8 : NVPTXInst<(outs Int8Regs:$d1, Int8Regs:$d2),
- (ins Int16Regs:$s),
- !strconcat("{{\n\t.reg .b8\t%t<2>;",
- !strconcat("\n\tmov.b16\t{%t0, %t1}, $s;",
- !strconcat("\n\tcvt.u8.u8\t$d1, %t0;",
- "\n\tcvt.u8.u8\t$d2, %t1;\n\t}}"))),
- []>;
def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
(ins Int32Regs:$s),
"mov.b32\t{{$d1, $d2}}, $s;",
@@ -2768,21 +2454,75 @@ def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
"mov.b64\t{{$d1, $d2}}, $s;",
[]>;
-def FPRound_ftz : NVPTXInst<(outs Float32Regs:$d), (ins Float64Regs:$a),
- "cvt.rn.ftz.f32.f64 \t$d, $a;",
- [(set Float32Regs:$d, (fround Float64Regs:$a))]>, Requires<[doF32FTZ]>;
-
-def FPRound : NVPTXInst<(outs Float32Regs:$d), (ins Float64Regs:$a),
- "cvt.rn.f32.f64 \t$d, $a;",
- [(set Float32Regs:$d, (fround Float64Regs:$a))]>;
-
-def FPExtend_ftz : NVPTXInst<(outs Float64Regs:$d), (ins Float32Regs:$a),
- "cvt.ftz.f64.f32 \t$d, $a;",
- [(set Float64Regs:$d, (fextend Float32Regs:$a))]>, Requires<[doF32FTZ]>;
-
-def FPExtend : NVPTXInst<(outs Float64Regs:$d), (ins Float32Regs:$a),
- "cvt.f64.f32 \t$d, $a;",
- [(set Float64Regs:$d, (fextend Float32Regs:$a))]>;
+// Count leading zeros
+def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "clz.b32\t$d, $a;",
+ []>;
+def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "clz.b64\t$d, $a;",
+ []>;
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctlz Int32Regs:$a),
+ (CLZr32 Int32Regs:$a)>;
+def : Pat<(ctlz_zero_undef Int32Regs:$a),
+ (CLZr32 Int32Regs:$a)>;
+
+// For 64-bit, the result in PTX is actually 32-bit so we zero-extend
+// to 64-bit to match the LLVM semantics
+def : Pat<(ctlz Int64Regs:$a),
+ (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(ctlz_zero_undef Int64Regs:$a),
+ (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+
+// For 16-bit, we zero-extend to 32-bit, then trunc the result back
+// to 16-bits (ctlz of a 16-bit value is guaranteed to require less
+// than 16 bits to store). We also need to subtract 16 because the
+// high-order 16 zeros were counted.
+def : Pat<(ctlz Int16Regs:$a),
+ (SUBi16ri (CVT_u16_u32 (CLZr32
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)),
+ CvtNONE), 16)>;
+def : Pat<(ctlz_zero_undef Int16Regs:$a),
+ (SUBi16ri (CVT_u16_u32 (CLZr32
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)),
+ CvtNONE), 16)>;
+
+// Population count
+def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "popc.b32\t$d, $a;",
+ []>;
+def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "popc.b64\t$d, $a;",
+ []>;
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctpop Int32Regs:$a),
+ (POPCr32 Int32Regs:$a)>;
+
+// For 64-bit, the result in PTX is actually 32-bit so we zero-extend
+// to 64-bit to match the LLVM semantics
+def : Pat<(ctpop Int64Regs:$a),
+ (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
+
+// For 16-bit, we zero-extend to 32-bit, then trunc the result back
+// to 16-bits (ctpop of a 16-bit value is guaranteed to require less
+// than 16 bits to store)
+def : Pat<(ctpop Int16Regs:$a),
+ (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)),
+ CvtNONE)>;
+
+// fround f64 -> f32
+def : Pat<(f32 (fround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
+
+// fextend f32 -> f64
+def : Pat<(f64 (fextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInGlue]>;
@@ -2810,8 +2550,8 @@ let isTerminator=1 in {
[(br bb:$target)]>;
}
-def : Pat<(brcond Int32Regs:$a, bb:$target), (CBranch
- (ISetUNEi32ri_p Int32Regs:$a, 0), bb:$target)>;
+def : Pat<(brcond Int32Regs:$a, bb:$target),
+ (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
// conditional branch if
@@ -2867,6 +2607,20 @@ def trapinst : NVPTXInst<(outs), (ins),
"trap;",
[(trap)]>;
+// Call prototype wrapper
+def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def CallPrototype
+ : SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def ProtoIdent : Operand<i32> {
+ let PrintMethod = "printProtoIdent";
+}
+def CALL_PROTOTYPE
+ : NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
+
+
+
include "NVPTXIntrinsics.td"
diff --git a/lib/Target/NVPTX/NVPTXIntrinsics.td b/lib/Target/NVPTX/NVPTXIntrinsics.td
index 24037cafefe9..14049b1cf8eb 100644
--- a/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -82,49 +82,36 @@ def INT_MEMBAR_SYS : MEMBAR<"membar.sys;", int_nvvm_membar_sys>;
//-----------------------------------
// Map min(1.0, max(0.0, x)) to sat(x)
-multiclass SAT<NVPTXRegClass regclass, Operand fimm, Intrinsic IntMinOp,
- Intrinsic IntMaxOp, PatLeaf f0, PatLeaf f1, string OpStr> {
-
- // fmin(1.0, fmax(0.0, x)) => sat(x)
- def SAT11 : NVPTXInst<(outs regclass:$dst),
- (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
- OpStr,
- [(set regclass:$dst, (IntMinOp f1:$srcf0 ,
- (IntMaxOp f0:$srcf1, regclass:$src)))]>;
-
- // fmin(1.0, fmax(x, 0.0)) => sat(x)
- def SAT12 : NVPTXInst<(outs regclass:$dst),
- (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
- OpStr,
- [(set regclass:$dst, (IntMinOp f1:$srcf0 ,
- (IntMaxOp regclass:$src, f0:$srcf1)))]>;
-
- // fmin(fmax(0.0, x), 1.0) => sat(x)
- def SAT13 : NVPTXInst<(outs regclass:$dst),
- (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
- OpStr,
- [(set regclass:$dst, (IntMinOp
- (IntMaxOp f0:$srcf0, regclass:$src), f1:$srcf1))]>;
-
- // fmin(fmax(x, 0.0), 1.0) => sat(x)
- def SAT14 : NVPTXInst<(outs regclass:$dst),
- (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
- OpStr,
- [(set regclass:$dst, (IntMinOp
- (IntMaxOp regclass:$src, f0:$srcf0), f1:$srcf1))]>;
-
-}
-// Note that max(0.0, min(x, 1.0)) cannot be mapped to sat(x) because when x
-// is NaN
+// Note that max(0.0, min(x, 1.0)) cannot be mapped to sat(x) because when x is
+// NaN
// max(0.0, min(x, 1.0)) is 1.0 while sat(x) is 0.
// Same story for fmax, fmin.
-defm SAT_fmin_fmax_f : SAT<Float32Regs, f32imm, int_nvvm_fmin_f,
- int_nvvm_fmax_f, immFloat0, immFloat1,
- "cvt.sat.f32.f32 \t$dst, $src; \n">;
-defm SAT_fmin_fmax_d : SAT<Float64Regs, f64imm, int_nvvm_fmin_d,
- int_nvvm_fmax_d, immDouble0, immDouble1,
- "cvt.sat.f64.f64 \t$dst, $src; \n">;
+def : Pat<(int_nvvm_fmin_f immFloat1,
+ (int_nvvm_fmax_f immFloat0, Float32Regs:$a)),
+ (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
+def : Pat<(int_nvvm_fmin_f immFloat1,
+ (int_nvvm_fmax_f Float32Regs:$a, immFloat0)),
+ (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
+def : Pat<(int_nvvm_fmin_f
+ (int_nvvm_fmax_f immFloat0, Float32Regs:$a), immFloat1),
+ (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
+def : Pat<(int_nvvm_fmin_f
+ (int_nvvm_fmax_f Float32Regs:$a, immFloat0), immFloat1),
+ (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
+
+def : Pat<(int_nvvm_fmin_d immDouble1,
+ (int_nvvm_fmax_d immDouble0, Float64Regs:$a)),
+ (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
+def : Pat<(int_nvvm_fmin_d immDouble1,
+ (int_nvvm_fmax_d Float64Regs:$a, immDouble0)),
+ (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
+def : Pat<(int_nvvm_fmin_d
+ (int_nvvm_fmax_d immDouble0, Float64Regs:$a), immDouble1),
+ (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
+def : Pat<(int_nvvm_fmin_d
+ (int_nvvm_fmax_d Float64Regs:$a, immDouble0), immDouble1),
+ (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
// We need a full string for OpcStr here because we need to deal with case like
@@ -312,19 +299,19 @@ def INT_NVVM_SAD_UI : F_MATH_3<"sad.u32 \t$dst, $src0, $src1, $src2;",
// Floor Ceil
//
-def INT_NVVM_FLOOR_FTZ_F : F_MATH_1<"cvt.rmi.ftz.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_floor_ftz_f>;
-def INT_NVVM_FLOOR_F : F_MATH_1<"cvt.rmi.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_floor_f>;
-def INT_NVVM_FLOOR_D : F_MATH_1<"cvt.rmi.f64.f64 \t$dst, $src0;",
- Float64Regs, Float64Regs, int_nvvm_floor_d>;
+def : Pat<(int_nvvm_floor_ftz_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>;
+def : Pat<(int_nvvm_floor_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_floor_d Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
-def INT_NVVM_CEIL_FTZ_F : F_MATH_1<"cvt.rpi.ftz.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_ceil_ftz_f>;
-def INT_NVVM_CEIL_F : F_MATH_1<"cvt.rpi.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_ceil_f>;
-def INT_NVVM_CEIL_D : F_MATH_1<"cvt.rpi.f64.f64 \t$dst, $src0;",
- Float64Regs, Float64Regs, int_nvvm_ceil_d>;
+def : Pat<(int_nvvm_ceil_ftz_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>;
+def : Pat<(int_nvvm_ceil_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI)>;
+def : Pat<(int_nvvm_ceil_d Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
//
// Abs
@@ -347,37 +334,34 @@ def INT_NVVM_FABS_D : F_MATH_1<"abs.f64 \t$dst, $src0;", Float64Regs,
// Round
//
-def INT_NVVM_ROUND_FTZ_F : F_MATH_1<"cvt.rni.ftz.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_round_ftz_f>;
-def INT_NVVM_ROUND_F : F_MATH_1<"cvt.rni.f32.f32 \t$dst, $src0;", Float32Regs,
- Float32Regs, int_nvvm_round_f>;
-
-def INT_NVVM_ROUND_D : F_MATH_1<"cvt.rni.f64.f64 \t$dst, $src0;", Float64Regs,
- Float64Regs, int_nvvm_round_d>;
+def : Pat<(int_nvvm_round_ftz_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>;
+def : Pat<(int_nvvm_round_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_round_d Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
//
// Trunc
//
-def INT_NVVM_TRUNC_FTZ_F : F_MATH_1<"cvt.rzi.ftz.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_trunc_ftz_f>;
-def INT_NVVM_TRUNC_F : F_MATH_1<"cvt.rzi.f32.f32 \t$dst, $src0;", Float32Regs,
- Float32Regs, int_nvvm_trunc_f>;
-
-def INT_NVVM_TRUNC_D : F_MATH_1<"cvt.rzi.f64.f64 \t$dst, $src0;", Float64Regs,
- Float64Regs, int_nvvm_trunc_d>;
+def : Pat<(int_nvvm_trunc_ftz_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>;
+def : Pat<(int_nvvm_trunc_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_trunc_d Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
//
// Saturate
//
-def INT_NVVM_SATURATE_FTZ_F : F_MATH_1<"cvt.sat.ftz.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_saturate_ftz_f>;
-def INT_NVVM_SATURATE_F : F_MATH_1<"cvt.sat.f32.f32 \t$dst, $src0;",
- Float32Regs, Float32Regs, int_nvvm_saturate_f>;
-
-def INT_NVVM_SATURATE_D : F_MATH_1<"cvt.sat.f64.f64 \t$dst, $src0;",
- Float64Regs, Float64Regs, int_nvvm_saturate_d>;
+def : Pat<(int_nvvm_saturate_ftz_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtSAT_FTZ)>;
+def : Pat<(int_nvvm_saturate_f Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
+def : Pat<(int_nvvm_saturate_d Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
//
// Exp2 Log2
@@ -568,110 +552,110 @@ def INT_NVVM_ADD_RP_D : F_MATH_2<"add.rp.f64 \t$dst, $src0, $src1;",
// Convert
//
-def INT_NVVM_D2F_RN_FTZ : F_MATH_1<"cvt.rn.ftz.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rn_ftz>;
-def INT_NVVM_D2F_RN : F_MATH_1<"cvt.rn.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rn>;
-def INT_NVVM_D2F_RZ_FTZ : F_MATH_1<"cvt.rz.ftz.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rz_ftz>;
-def INT_NVVM_D2F_RZ : F_MATH_1<"cvt.rz.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rz>;
-def INT_NVVM_D2F_RM_FTZ : F_MATH_1<"cvt.rm.ftz.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rm_ftz>;
-def INT_NVVM_D2F_RM : F_MATH_1<"cvt.rm.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rm>;
-def INT_NVVM_D2F_RP_FTZ : F_MATH_1<"cvt.rp.ftz.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rp_ftz>;
-def INT_NVVM_D2F_RP : F_MATH_1<"cvt.rp.f32.f64 \t$dst, $src0;",
- Float32Regs, Float64Regs, int_nvvm_d2f_rp>;
-
-def INT_NVVM_D2I_RN : F_MATH_1<"cvt.rni.s32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2i_rn>;
-def INT_NVVM_D2I_RZ : F_MATH_1<"cvt.rzi.s32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2i_rz>;
-def INT_NVVM_D2I_RM : F_MATH_1<"cvt.rmi.s32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2i_rm>;
-def INT_NVVM_D2I_RP : F_MATH_1<"cvt.rpi.s32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2i_rp>;
-
-def INT_NVVM_D2UI_RN : F_MATH_1<"cvt.rni.u32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2ui_rn>;
-def INT_NVVM_D2UI_RZ : F_MATH_1<"cvt.rzi.u32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2ui_rz>;
-def INT_NVVM_D2UI_RM : F_MATH_1<"cvt.rmi.u32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2ui_rm>;
-def INT_NVVM_D2UI_RP : F_MATH_1<"cvt.rpi.u32.f64 \t$dst, $src0;",
- Int32Regs, Float64Regs, int_nvvm_d2ui_rp>;
-
-def INT_NVVM_I2D_RN : F_MATH_1<"cvt.rn.f64.s32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_i2d_rn>;
-def INT_NVVM_I2D_RZ : F_MATH_1<"cvt.rz.f64.s32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_i2d_rz>;
-def INT_NVVM_I2D_RM : F_MATH_1<"cvt.rm.f64.s32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_i2d_rm>;
-def INT_NVVM_I2D_RP : F_MATH_1<"cvt.rp.f64.s32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_i2d_rp>;
-
-def INT_NVVM_UI2D_RN : F_MATH_1<"cvt.rn.f64.u32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_ui2d_rn>;
-def INT_NVVM_UI2D_RZ : F_MATH_1<"cvt.rz.f64.u32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_ui2d_rz>;
-def INT_NVVM_UI2D_RM : F_MATH_1<"cvt.rm.f64.u32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_ui2d_rm>;
-def INT_NVVM_UI2D_RP : F_MATH_1<"cvt.rp.f64.u32 \t$dst, $src0;",
- Float64Regs, Int32Regs, int_nvvm_ui2d_rp>;
-
-def INT_NVVM_F2I_RN_FTZ : F_MATH_1<"cvt.rni.ftz.s32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2i_rn_ftz>;
-def INT_NVVM_F2I_RN : F_MATH_1<"cvt.rni.s32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2i_rn>;
-def INT_NVVM_F2I_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.s32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2i_rz_ftz>;
-def INT_NVVM_F2I_RZ : F_MATH_1<"cvt.rzi.s32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2i_rz>;
-def INT_NVVM_F2I_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.s32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2i_rm_ftz>;
-def INT_NVVM_F2I_RM : F_MATH_1<"cvt.rmi.s32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2i_rm>;
-def INT_NVVM_F2I_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.s32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2i_rp_ftz>;
-def INT_NVVM_F2I_RP : F_MATH_1<"cvt.rpi.s32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2i_rp>;
-
-def INT_NVVM_F2UI_RN_FTZ : F_MATH_1<"cvt.rni.ftz.u32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2ui_rn_ftz>;
-def INT_NVVM_F2UI_RN : F_MATH_1<"cvt.rni.u32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2ui_rn>;
-def INT_NVVM_F2UI_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.u32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2ui_rz_ftz>;
-def INT_NVVM_F2UI_RZ : F_MATH_1<"cvt.rzi.u32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2ui_rz>;
-def INT_NVVM_F2UI_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.u32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2ui_rm_ftz>;
-def INT_NVVM_F2UI_RM : F_MATH_1<"cvt.rmi.u32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2ui_rm>;
-def INT_NVVM_F2UI_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.u32.f32 \t$dst, $src0;",
- Int32Regs, Float32Regs, int_nvvm_f2ui_rp_ftz>;
-def INT_NVVM_F2UI_RP : F_MATH_1<"cvt.rpi.u32.f32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_f2ui_rp>;
-
-def INT_NVVM_I2F_RN : F_MATH_1<"cvt.rn.f32.s32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_i2f_rn>;
-def INT_NVVM_I2F_RZ : F_MATH_1<"cvt.rz.f32.s32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_i2f_rz>;
-def INT_NVVM_I2F_RM : F_MATH_1<"cvt.rm.f32.s32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_i2f_rm>;
-def INT_NVVM_I2F_RP : F_MATH_1<"cvt.rp.f32.s32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_i2f_rp>;
-
-def INT_NVVM_UI2F_RN : F_MATH_1<"cvt.rn.f32.u32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_ui2f_rn>;
-def INT_NVVM_UI2F_RZ : F_MATH_1<"cvt.rz.f32.u32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_ui2f_rz>;
-def INT_NVVM_UI2F_RM : F_MATH_1<"cvt.rm.f32.u32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_ui2f_rm>;
-def INT_NVVM_UI2F_RP : F_MATH_1<"cvt.rp.f32.u32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_ui2f_rp>;
+def : Pat<(int_nvvm_d2f_rn_ftz Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>;
+def : Pat<(int_nvvm_d2f_rn Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_d2f_rz_ftz Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRZ_FTZ)>;
+def : Pat<(int_nvvm_d2f_rz Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_d2f_rm_ftz Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRM_FTZ)>;
+def : Pat<(int_nvvm_d2f_rm Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_d2f_rp_ftz Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRP_FTZ)>;
+def : Pat<(int_nvvm_d2f_rp Float64Regs:$a),
+ (CVT_f32_f64 Float64Regs:$a, CvtRP)>;
+
+def : Pat<(int_nvvm_d2i_rn Float64Regs:$a),
+ (CVT_s32_f64 Float64Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_d2i_rz Float64Regs:$a),
+ (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_d2i_rm Float64Regs:$a),
+ (CVT_s32_f64 Float64Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_d2i_rp Float64Regs:$a),
+ (CVT_s32_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_d2ui_rn Float64Regs:$a),
+ (CVT_u32_f64 Float64Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_d2ui_rz Float64Regs:$a),
+ (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_d2ui_rm Float64Regs:$a),
+ (CVT_u32_f64 Float64Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_d2ui_rp Float64Regs:$a),
+ (CVT_u32_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_i2d_rn Int32Regs:$a),
+ (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_i2d_rz Int32Regs:$a),
+ (CVT_f64_s32 Int32Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_i2d_rm Int32Regs:$a),
+ (CVT_f64_s32 Int32Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_i2d_rp Int32Regs:$a),
+ (CVT_f64_s32 Int32Regs:$a, CvtRP)>;
+
+def : Pat<(int_nvvm_ui2d_rn Int32Regs:$a),
+ (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_ui2d_rz Int32Regs:$a),
+ (CVT_f64_u32 Int32Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_ui2d_rm Int32Regs:$a),
+ (CVT_f64_u32 Int32Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_ui2d_rp Int32Regs:$a),
+ (CVT_f64_u32 Int32Regs:$a, CvtRP)>;
+
+def : Pat<(int_nvvm_f2i_rn_ftz Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRNI_FTZ)>;
+def : Pat<(int_nvvm_f2i_rn Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_f2i_rz_ftz Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>;
+def : Pat<(int_nvvm_f2i_rz Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_f2i_rm_ftz Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRMI_FTZ)>;
+def : Pat<(int_nvvm_f2i_rm Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_f2i_rp_ftz Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRPI_FTZ)>;
+def : Pat<(int_nvvm_f2i_rp Float32Regs:$a),
+ (CVT_s32_f32 Float32Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_f2ui_rn_ftz Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRNI_FTZ)>;
+def : Pat<(int_nvvm_f2ui_rn Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_f2ui_rz_ftz Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>;
+def : Pat<(int_nvvm_f2ui_rz Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_f2ui_rm_ftz Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRMI_FTZ)>;
+def : Pat<(int_nvvm_f2ui_rm Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_f2ui_rp_ftz Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRPI_FTZ)>;
+def : Pat<(int_nvvm_f2ui_rp Float32Regs:$a),
+ (CVT_u32_f32 Float32Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_i2f_rn Int32Regs:$a),
+ (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_i2f_rz Int32Regs:$a),
+ (CVT_f32_s32 Int32Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_i2f_rm Int32Regs:$a),
+ (CVT_f32_s32 Int32Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_i2f_rp Int32Regs:$a),
+ (CVT_f32_s32 Int32Regs:$a, CvtRP)>;
+
+def : Pat<(int_nvvm_ui2f_rn Int32Regs:$a),
+ (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_ui2f_rz Int32Regs:$a),
+ (CVT_f32_u32 Int32Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_ui2f_rm Int32Regs:$a),
+ (CVT_f32_u32 Int32Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_ui2f_rp Int32Regs:$a),
+ (CVT_f32_u32 Int32Regs:$a, CvtRP)>;
def INT_NVVM_LOHI_I2D : F_MATH_2<"mov.b64 \t$dst, {{$src0, $src1}};",
Float64Regs, Int32Regs, Int32Regs, int_nvvm_lohi_i2d>;
@@ -687,91 +671,106 @@ def INT_NVVM_D2I_HI : F_MATH_1<!strconcat("{{\n\t",
"}}"))),
Int32Regs, Float64Regs, int_nvvm_d2i_hi>;
-def INT_NVVM_F2LL_RN_FTZ : F_MATH_1<"cvt.rni.ftz.s64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ll_rn_ftz>;
-def INT_NVVM_F2LL_RN : F_MATH_1<"cvt.rni.s64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ll_rn>;
-def INT_NVVM_F2LL_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.s64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ll_rz_ftz>;
-def INT_NVVM_F2LL_RZ : F_MATH_1<"cvt.rzi.s64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ll_rz>;
-def INT_NVVM_F2LL_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.s64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ll_rm_ftz>;
-def INT_NVVM_F2LL_RM : F_MATH_1<"cvt.rmi.s64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ll_rm>;
-def INT_NVVM_F2LL_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.s64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ll_rp_ftz>;
-def INT_NVVM_F2LL_RP : F_MATH_1<"cvt.rpi.s64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ll_rp>;
-
-def INT_NVVM_F2ULL_RN_FTZ : F_MATH_1<"cvt.rni.ftz.u64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ull_rn_ftz>;
-def INT_NVVM_F2ULL_RN : F_MATH_1<"cvt.rni.u64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ull_rn>;
-def INT_NVVM_F2ULL_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.u64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ull_rz_ftz>;
-def INT_NVVM_F2ULL_RZ : F_MATH_1<"cvt.rzi.u64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ull_rz>;
-def INT_NVVM_F2ULL_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.u64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ull_rm_ftz>;
-def INT_NVVM_F2ULL_RM : F_MATH_1<"cvt.rmi.u64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ull_rm>;
-def INT_NVVM_F2ULL_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.u64.f32 \t$dst, $src0;",
- Int64Regs, Float32Regs, int_nvvm_f2ull_rp_ftz>;
-def INT_NVVM_F2ULL_RP : F_MATH_1<"cvt.rpi.u64.f32 \t$dst, $src0;", Int64Regs,
- Float32Regs, int_nvvm_f2ull_rp>;
-
-def INT_NVVM_D2LL_RN : F_MATH_1<"cvt.rni.s64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ll_rn>;
-def INT_NVVM_D2LL_RZ : F_MATH_1<"cvt.rzi.s64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ll_rz>;
-def INT_NVVM_D2LL_RM : F_MATH_1<"cvt.rmi.s64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ll_rm>;
-def INT_NVVM_D2LL_RP : F_MATH_1<"cvt.rpi.s64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ll_rp>;
-
-def INT_NVVM_D2ULL_RN : F_MATH_1<"cvt.rni.u64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ull_rn>;
-def INT_NVVM_D2ULL_RZ : F_MATH_1<"cvt.rzi.u64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ull_rz>;
-def INT_NVVM_D2ULL_RM : F_MATH_1<"cvt.rmi.u64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ull_rm>;
-def INT_NVVM_D2ULL_RP : F_MATH_1<"cvt.rpi.u64.f64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_d2ull_rp>;
-
-def INT_NVVM_LL2F_RN : F_MATH_1<"cvt.rn.f32.s64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ll2f_rn>;
-def INT_NVVM_LL2F_RZ : F_MATH_1<"cvt.rz.f32.s64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ll2f_rz>;
-def INT_NVVM_LL2F_RM : F_MATH_1<"cvt.rm.f32.s64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ll2f_rm>;
-def INT_NVVM_LL2F_RP : F_MATH_1<"cvt.rp.f32.s64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ll2f_rp>;
-def INT_NVVM_ULL2F_RN : F_MATH_1<"cvt.rn.f32.u64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ull2f_rn>;
-def INT_NVVM_ULL2F_RZ : F_MATH_1<"cvt.rz.f32.u64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ull2f_rz>;
-def INT_NVVM_ULL2F_RM : F_MATH_1<"cvt.rm.f32.u64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ull2f_rm>;
-def INT_NVVM_ULL2F_RP : F_MATH_1<"cvt.rp.f32.u64 \t$dst, $src0;", Float32Regs,
- Int64Regs, int_nvvm_ull2f_rp>;
-
-def INT_NVVM_LL2D_RN : F_MATH_1<"cvt.rn.f64.s64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ll2d_rn>;
-def INT_NVVM_LL2D_RZ : F_MATH_1<"cvt.rz.f64.s64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ll2d_rz>;
-def INT_NVVM_LL2D_RM : F_MATH_1<"cvt.rm.f64.s64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ll2d_rm>;
-def INT_NVVM_LL2D_RP : F_MATH_1<"cvt.rp.f64.s64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ll2d_rp>;
-def INT_NVVM_ULL2D_RN : F_MATH_1<"cvt.rn.f64.u64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ull2d_rn>;
-def INT_NVVM_ULL2D_RZ : F_MATH_1<"cvt.rz.f64.u64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ull2d_rz>;
-def INT_NVVM_ULL2D_RM : F_MATH_1<"cvt.rm.f64.u64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ull2d_rm>;
-def INT_NVVM_ULL2D_RP : F_MATH_1<"cvt.rp.f64.u64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_ull2d_rp>;
+def : Pat<(int_nvvm_f2ll_rn_ftz Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRNI_FTZ)>;
+def : Pat<(int_nvvm_f2ll_rn Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_f2ll_rz_ftz Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>;
+def : Pat<(int_nvvm_f2ll_rz Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_f2ll_rm_ftz Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRMI_FTZ)>;
+def : Pat<(int_nvvm_f2ll_rm Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_f2ll_rp_ftz Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRPI_FTZ)>;
+def : Pat<(int_nvvm_f2ll_rp Float32Regs:$a),
+ (CVT_s64_f32 Float32Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_f2ull_rn_ftz Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRNI_FTZ)>;
+def : Pat<(int_nvvm_f2ull_rn Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_f2ull_rz_ftz Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>;
+def : Pat<(int_nvvm_f2ull_rz Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_f2ull_rm_ftz Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRMI_FTZ)>;
+def : Pat<(int_nvvm_f2ull_rm Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_f2ull_rp_ftz Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRPI_FTZ)>;
+def : Pat<(int_nvvm_f2ull_rp Float32Regs:$a),
+ (CVT_u64_f32 Float32Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_d2ll_rn Float64Regs:$a),
+ (CVT_s64_f64 Float64Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_d2ll_rz Float64Regs:$a),
+ (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_d2ll_rm Float64Regs:$a),
+ (CVT_s64_f64 Float64Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_d2ll_rp Float64Regs:$a),
+ (CVT_s64_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_d2ull_rn Float64Regs:$a),
+ (CVT_u64_f64 Float64Regs:$a, CvtRNI)>;
+def : Pat<(int_nvvm_d2ull_rz Float64Regs:$a),
+ (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(int_nvvm_d2ull_rm Float64Regs:$a),
+ (CVT_u64_f64 Float64Regs:$a, CvtRMI)>;
+def : Pat<(int_nvvm_d2ull_rp Float64Regs:$a),
+ (CVT_u64_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(int_nvvm_ll2f_rn Int64Regs:$a),
+ (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_ll2f_rz Int64Regs:$a),
+ (CVT_f32_s64 Int64Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_ll2f_rm Int64Regs:$a),
+ (CVT_f32_s64 Int64Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_ll2f_rp Int64Regs:$a),
+ (CVT_f32_s64 Int64Regs:$a, CvtRP)>;
+
+def : Pat<(int_nvvm_ull2f_rn Int64Regs:$a),
+ (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_ull2f_rz Int64Regs:$a),
+ (CVT_f32_u64 Int64Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_ull2f_rm Int64Regs:$a),
+ (CVT_f32_u64 Int64Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_ull2f_rp Int64Regs:$a),
+ (CVT_f32_u64 Int64Regs:$a, CvtRP)>;
+
+def : Pat<(int_nvvm_ll2d_rn Int64Regs:$a),
+ (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_ll2d_rz Int64Regs:$a),
+ (CVT_f64_s64 Int64Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_ll2d_rm Int64Regs:$a),
+ (CVT_f64_s64 Int64Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_ll2d_rp Int64Regs:$a),
+ (CVT_f64_s64 Int64Regs:$a, CvtRP)>;
+
+def : Pat<(int_nvvm_ull2d_rn Int64Regs:$a),
+ (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_ull2d_rz Int64Regs:$a),
+ (CVT_f64_u64 Int64Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_ull2d_rm Int64Regs:$a),
+ (CVT_f64_u64 Int64Regs:$a, CvtRM)>;
+def : Pat<(int_nvvm_ull2d_rp Int64Regs:$a),
+ (CVT_f64_u64 Int64Regs:$a, CvtRP)>;
+
+
+// FIXME: Ideally, we could use these patterns instead of the scope-creating
+// patterns, but ptxas does not like these since .s16 is not compatible with
+// .f16. The solution is to use .bXX for all integer register types, but we
+// are not there yet.
+//def : Pat<(int_nvvm_f2h_rn_ftz Float32Regs:$a),
+// (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>;
+//def : Pat<(int_nvvm_f2h_rn Float32Regs:$a),
+// (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
+//
+//def : Pat<(int_nvvm_h2f Int16Regs:$a),
+// (CVT_f32_f16 Int16Regs:$a, CvtNONE)>;
def INT_NVVM_F2H_RN_FTZ : F_MATH_1<!strconcat("{{\n\t",
!strconcat(".reg .b16 %temp;\n\t",
@@ -793,6 +792,13 @@ def INT_NVVM_H2F : F_MATH_1<!strconcat("{{\n\t",
"}}")))),
Float32Regs, Int16Regs, int_nvvm_h2f>;
+def : Pat<(f32 (f16_to_f32 Int16Regs:$a)),
+ (CVT_f32_f16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i16 (f32_to_f16 Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (f32_to_f16 Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
+
//
// Bitcast
//
@@ -1270,6 +1276,11 @@ def INT_PTX_SREG_WARPSIZE : F_SREG<"mov.u32 \t$dst, WARP_SZ;", Int32Regs,
// Support for ldu on sm_20 or later
//-----------------------------------
+def ldu_i8 : PatFrag<(ops node:$ptr), (int_nvvm_ldu_global_i node:$ptr), [{
+ MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
+ return M->getMemoryVT() == MVT::i8;
+}]>;
+
// Scalar
// @TODO: Revisit this, Changed imemAny to imem
multiclass LDU_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp> {
@@ -1291,8 +1302,27 @@ multiclass LDU_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp> {
[(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDU]>;
}
-defm INT_PTX_LDU_GLOBAL_i8 : LDU_G<"u8 \t$result, [$src];", Int8Regs,
-int_nvvm_ldu_global_i>;
+multiclass LDU_G_NOINTRIN<string TyStr, NVPTXRegClass regclass, PatFrag IntOp> {
+ def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp Int32Regs:$src))]>, Requires<[hasLDU]>;
+ def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp Int64Regs:$src))]>, Requires<[hasLDU]>;
+ def avar: NVPTXInst<(outs regclass:$result), (ins imem:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp (Wrapper tglobaladdr:$src)))]>,
+ Requires<[hasLDU]>;
+ def ari : NVPTXInst<(outs regclass:$result), (ins MEMri:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp ADDRri:$src))]>, Requires<[hasLDU]>;
+ def ari64 : NVPTXInst<(outs regclass:$result), (ins MEMri64:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDU]>;
+}
+
+defm INT_PTX_LDU_GLOBAL_i8 : LDU_G_NOINTRIN<"u8 \t$result, [$src];", Int16Regs,
+ ldu_i8>;
defm INT_PTX_LDU_GLOBAL_i16 : LDU_G<"u16 \t$result, [$src];", Int16Regs,
int_nvvm_ldu_global_i>;
defm INT_PTX_LDU_GLOBAL_i32 : LDU_G<"u32 \t$result, [$src];", Int32Regs,
@@ -1312,25 +1342,43 @@ int_nvvm_ldu_global_p>;
// Elementized vector ldu
multiclass VLDU_G_ELE_V2<string TyStr, NVPTXRegClass regclass> {
- def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
- (ins Int32Regs:$src),
+ def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins Int32Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins Int64Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins MEMri:$src),
!strconcat("ldu.global.", TyStr), []>;
- def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
- (ins Int64Regs:$src),
+ def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins MEMri64:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins imemAny:$src),
!strconcat("ldu.global.", TyStr), []>;
}
-multiclass VLDU_G_ELE_V4<string TyStr, NVPTXRegClass regclass> {
- def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
- regclass:$dst4), (ins Int32Regs:$src),
+multiclass VLDU_G_ELE_V4<string TyStr, NVPTXRegClass regclass> {
+ def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins Int32Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins Int64Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins MEMri:$src),
!strconcat("ldu.global.", TyStr), []>;
- def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
- regclass:$dst4), (ins Int64Regs:$src),
+ def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins MEMri64:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins imemAny:$src),
!strconcat("ldu.global.", TyStr), []>;
}
defm INT_PTX_LDU_G_v2i8_ELE
- : VLDU_G_ELE_V2<"v2.u8 \t{{$dst1, $dst2}}, [$src];", Int8Regs>;
+ : VLDU_G_ELE_V2<"v2.u8 \t{{$dst1, $dst2}}, [$src];", Int16Regs>;
defm INT_PTX_LDU_G_v2i16_ELE
: VLDU_G_ELE_V2<"v2.u16 \t{{$dst1, $dst2}}, [$src];", Int16Regs>;
defm INT_PTX_LDU_G_v2i32_ELE
@@ -1342,7 +1390,7 @@ defm INT_PTX_LDU_G_v2i64_ELE
defm INT_PTX_LDU_G_v2f64_ELE
: VLDU_G_ELE_V2<"v2.f64 \t{{$dst1, $dst2}}, [$src];", Float64Regs>;
defm INT_PTX_LDU_G_v4i8_ELE
- : VLDU_G_ELE_V4<"v4.u8 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", Int8Regs>;
+ : VLDU_G_ELE_V4<"v4.u8 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", Int16Regs>;
defm INT_PTX_LDU_G_v4i16_ELE
: VLDU_G_ELE_V4<"v4.u16 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];",
Int16Regs>;
@@ -1422,20 +1470,38 @@ defm INT_PTX_LDG_GLOBAL_p64
// Elementized vector ldg
multiclass VLDG_G_ELE_V2<string TyStr, NVPTXRegClass regclass> {
- def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
(ins Int32Regs:$src),
!strconcat("ld.global.nc.", TyStr), []>;
- def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
(ins Int64Regs:$src),
!strconcat("ld.global.nc.", TyStr), []>;
+ def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins MEMri:$src),
+ !strconcat("ld.global.nc.", TyStr), []>;
+ def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins MEMri64:$src),
+ !strconcat("ld.global.nc.", TyStr), []>;
+ def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins imemAny:$src),
+ !strconcat("ld.global.nc.", TyStr), []>;
}
multiclass VLDG_G_ELE_V4<string TyStr, NVPTXRegClass regclass> {
- def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2,
- regclass:$dst3, regclass:$dst4), (ins Int32Regs:$src),
+ def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins Int32Regs:$src),
+ !strconcat("ld.global.nc.", TyStr), []>;
+ def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins Int64Regs:$src),
+ !strconcat("ld.global.nc.", TyStr), []>;
+ def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins MEMri:$src),
!strconcat("ld.global.nc.", TyStr), []>;
- def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2,
- regclass:$dst3, regclass:$dst4), (ins Int64Regs:$src),
+ def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins MEMri64:$src),
+ !strconcat("ld.global.nc.", TyStr), []>;
+ def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins imemAny:$src),
!strconcat("ld.global.nc.", TyStr), []>;
}
@@ -1542,10 +1608,6 @@ def nvvm_ptr_gen_to_param_64 : NVPTXInst<(outs Int64Regs:$result),
// nvvm.move intrinsicc
-def nvvm_move_i8 : NVPTXInst<(outs Int8Regs:$r), (ins Int8Regs:$s),
- "mov.b16 \t$r, $s;",
- [(set Int8Regs:$r,
- (int_nvvm_move_i8 Int8Regs:$s))]>;
def nvvm_move_i16 : NVPTXInst<(outs Int16Regs:$r), (ins Int16Regs:$s),
"mov.b16 \t$r, $s;",
[(set Int16Regs:$r,
diff --git a/lib/Target/NVPTX/NVPTXMCExpr.cpp b/lib/Target/NVPTX/NVPTXMCExpr.cpp
new file mode 100644
index 000000000000..ca247642048a
--- /dev/null
+++ b/lib/Target/NVPTX/NVPTXMCExpr.cpp
@@ -0,0 +1,46 @@
+//===-- NVPTXMCExpr.cpp - NVPTX specific MC expression classes ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "nvptx-mcexpr"
+#include "NVPTXMCExpr.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+using namespace llvm;
+
+const NVPTXFloatMCExpr*
+NVPTXFloatMCExpr::Create(VariantKind Kind, APFloat Flt, MCContext &Ctx) {
+ return new (Ctx) NVPTXFloatMCExpr(Kind, Flt);
+}
+
+void NVPTXFloatMCExpr::PrintImpl(raw_ostream &OS) const {
+ bool Ignored;
+ unsigned NumHex;
+ APFloat APF = getAPFloat();
+
+ switch (Kind) {
+ default: llvm_unreachable("Invalid kind!");
+ case VK_NVPTX_SINGLE_PREC_FLOAT:
+ OS << "0f";
+ NumHex = 8;
+ APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &Ignored);
+ break;
+ case VK_NVPTX_DOUBLE_PREC_FLOAT:
+ OS << "0d";
+ NumHex = 16;
+ APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &Ignored);
+ break;
+ }
+
+ APInt API = APF.bitcastToAPInt();
+ std::string HexStr(utohexstr(API.getZExtValue()));
+ if (HexStr.length() < NumHex)
+ OS << std::string(NumHex - HexStr.length(), '0');
+ OS << utohexstr(API.getZExtValue());
+}
diff --git a/lib/Target/NVPTX/NVPTXMCExpr.h b/lib/Target/NVPTX/NVPTXMCExpr.h
new file mode 100644
index 000000000000..0efb231d7d2e
--- /dev/null
+++ b/lib/Target/NVPTX/NVPTXMCExpr.h
@@ -0,0 +1,83 @@
+//===-- NVPTXMCExpr.h - NVPTX specific MC expression classes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Modeled after ARMMCExpr
+
+#ifndef NVPTXMCEXPR_H
+#define NVPTXMCEXPR_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/MC/MCExpr.h"
+
+namespace llvm {
+
+class NVPTXFloatMCExpr : public MCTargetExpr {
+public:
+ enum VariantKind {
+ VK_NVPTX_None,
+ VK_NVPTX_SINGLE_PREC_FLOAT, // FP constant in single-precision
+ VK_NVPTX_DOUBLE_PREC_FLOAT // FP constant in double-precision
+ };
+
+private:
+ const VariantKind Kind;
+ const APFloat Flt;
+
+ explicit NVPTXFloatMCExpr(VariantKind _Kind, APFloat _Flt)
+ : Kind(_Kind), Flt(_Flt) {}
+
+public:
+ /// @name Construction
+ /// @{
+
+ static const NVPTXFloatMCExpr *Create(VariantKind Kind, APFloat Flt,
+ MCContext &Ctx);
+
+ static const NVPTXFloatMCExpr *CreateConstantFPSingle(APFloat Flt,
+ MCContext &Ctx) {
+ return Create(VK_NVPTX_SINGLE_PREC_FLOAT, Flt, Ctx);
+ }
+
+ static const NVPTXFloatMCExpr *CreateConstantFPDouble(APFloat Flt,
+ MCContext &Ctx) {
+ return Create(VK_NVPTX_DOUBLE_PREC_FLOAT, Flt, Ctx);
+ }
+
+ /// @}
+ /// @name Accessors
+ /// @{
+
+ /// getOpcode - Get the kind of this expression.
+ VariantKind getKind() const { return Kind; }
+
+ /// getSubExpr - Get the child of this expression.
+ APFloat getAPFloat() const { return Flt; }
+
+/// @}
+
+ void PrintImpl(raw_ostream &OS) const;
+ bool EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const {
+ return false;
+ }
+ void AddValueSymbols(MCAssembler *) const {};
+ const MCSection *FindAssociatedSection() const {
+ return NULL;
+ }
+
+ // There are no TLS NVPTXMCExprs at the moment.
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
new file mode 100644
index 000000000000..843ebed5e4a4
--- /dev/null
+++ b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
@@ -0,0 +1,225 @@
+//===-- NVPTXPrologEpilogPass.cpp - NVPTX prolog/epilog inserter ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a copy of the generic LLVM PrologEpilogInserter pass, modified
+// to remove unneeded functionality and to handle virtual registers. Most code
+// here is a copy of PrologEpilogInserter.cpp.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "llvm/Pass.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+class NVPTXPrologEpilogPass : public MachineFunctionPass {
+public:
+ static char ID;
+ NVPTXPrologEpilogPass() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+private:
+ void calculateFrameObjectOffsets(MachineFunction &Fn);
+};
+}
+
+MachineFunctionPass *llvm::createNVPTXPrologEpilogPass() {
+ return new NVPTXPrologEpilogPass();
+}
+
+char NVPTXPrologEpilogPass::ID = 0;
+
+bool NVPTXPrologEpilogPass::runOnMachineFunction(MachineFunction &MF) {
+ const TargetMachine &TM = MF.getTarget();
+ const TargetFrameLowering &TFI = *TM.getFrameLowering();
+ const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
+ bool Modified = false;
+
+ calculateFrameObjectOffsets(MF);
+
+ for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) {
+ for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
+ MachineInstr *MI = I;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ if (!MI->getOperand(i).isFI())
+ continue;
+ TRI.eliminateFrameIndex(MI, 0, i, NULL);
+ Modified = true;
+ }
+ }
+ }
+
+ // Add function prolog/epilog
+ TFI.emitPrologue(MF);
+
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
+ // If last instruction is a return instruction, add an epilogue
+ if (!I->empty() && I->back().isReturn())
+ TFI.emitEpilogue(MF, *I);
+ }
+
+ return Modified;
+}
+
+/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
+static inline void
+AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
+ bool StackGrowsDown, int64_t &Offset,
+ unsigned &MaxAlign) {
+ // If the stack grows down, add the object size to find the lowest address.
+ if (StackGrowsDown)
+ Offset += MFI->getObjectSize(FrameIdx);
+
+ unsigned Align = MFI->getObjectAlignment(FrameIdx);
+
+ // If the alignment of this object is greater than that of the stack, then
+ // increase the stack alignment to match.
+ MaxAlign = std::max(MaxAlign, Align);
+
+ // Adjust to alignment boundary.
+ Offset = (Offset + Align - 1) / Align * Align;
+
+ if (StackGrowsDown) {
+ DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
+ MFI->setObjectOffset(FrameIdx, -Offset); // Set the computed offset
+ } else {
+ DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
+ MFI->setObjectOffset(FrameIdx, Offset);
+ Offset += MFI->getObjectSize(FrameIdx);
+ }
+}
+
+void
+NVPTXPrologEpilogPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
+ const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
+ const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
+
+ bool StackGrowsDown =
+ TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
+
+ // Loop over all of the stack objects, assigning sequential addresses...
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
+
+ // Start at the beginning of the local area.
+ // The Offset is the distance from the stack top in the direction
+ // of stack growth -- so it's always nonnegative.
+ int LocalAreaOffset = TFI.getOffsetOfLocalArea();
+ if (StackGrowsDown)
+ LocalAreaOffset = -LocalAreaOffset;
+ assert(LocalAreaOffset >= 0
+ && "Local area offset should be in direction of stack growth");
+ int64_t Offset = LocalAreaOffset;
+
+ // If there are fixed sized objects that are preallocated in the local area,
+ // non-fixed objects can't be allocated right at the start of local area.
+ // We currently don't support filling in holes in between fixed sized
+ // objects, so we adjust 'Offset' to point to the end of last fixed sized
+ // preallocated object.
+ for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) {
+ int64_t FixedOff;
+ if (StackGrowsDown) {
+ // The maximum distance from the stack pointer is at lower address of
+ // the object -- which is given by offset. For down growing stack
+ // the offset is negative, so we negate the offset to get the distance.
+ FixedOff = -MFI->getObjectOffset(i);
+ } else {
+ // The maximum distance from the start pointer is at the upper
+ // address of the object.
+ FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i);
+ }
+ if (FixedOff > Offset) Offset = FixedOff;
+ }
+
+ // NOTE: We do not have a call stack
+
+ unsigned MaxAlign = MFI->getMaxAlignment();
+
+ // No scavenger
+
+ // FIXME: Once this is working, then enable flag will change to a target
+ // check for whether the frame is large enough to want to use virtual
+ // frame index registers. Functions which don't want/need this optimization
+ // will continue to use the existing code path.
+ if (MFI->getUseLocalStackAllocationBlock()) {
+ unsigned Align = MFI->getLocalFrameMaxAlign();
+
+ // Adjust to alignment boundary.
+ Offset = (Offset + Align - 1) / Align * Align;
+
+ DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
+
+ // Resolve offsets for objects in the local block.
+ for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) {
+ std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
+ int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
+ DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
+ FIOffset << "]\n");
+ MFI->setObjectOffset(Entry.first, FIOffset);
+ }
+ // Allocate the local block
+ Offset += MFI->getLocalFrameSize();
+
+ MaxAlign = std::max(Align, MaxAlign);
+ }
+
+ // No stack protector
+
+ // Then assign frame offsets to stack objects that are not used to spill
+ // callee saved registers.
+ for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (MFI->isObjectPreAllocated(i) &&
+ MFI->getUseLocalStackAllocationBlock())
+ continue;
+ if (MFI->isDeadObjectIndex(i))
+ continue;
+
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
+ }
+
+ // No scavenger
+
+ if (!TFI.targetHandlesStackFrameRounding()) {
+ // If we have reserved argument space for call sites in the function
+ // immediately on entry to the current function, count it as part of the
+ // overall stack size.
+ if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn))
+ Offset += MFI->getMaxCallFrameSize();
+
+ // Round up the size to a multiple of the alignment. If the function has
+ // any calls or alloca's, align to the target's StackAlignment value to
+ // ensure that the callee's frame or the alloca data is suitably aligned;
+ // otherwise, for leaf functions, align to the TransientStackAlignment
+ // value.
+ unsigned StackAlign;
+ if (MFI->adjustsStack() || MFI->hasVarSizedObjects() ||
+ (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0))
+ StackAlign = TFI.getStackAlignment();
+ else
+ StackAlign = TFI.getTransientStackAlignment();
+
+ // If the frame pointer is eliminated, all frame offsets will be relative to
+ // SP not FP. Align to MaxAlign so this works.
+ StackAlign = std::max(StackAlign, MaxAlign);
+ unsigned AlignMask = StackAlign - 1;
+ Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
+ }
+
+ // Update frame info to pretend that this is part of the stack...
+ int64_t StackSize = Offset - LocalAreaOffset;
+ MFI->setStackSize(StackSize);
+}
diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
index 282465359b07..4d3a1d9b40ce 100644
--- a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
@@ -38,10 +38,6 @@ std::string getNVPTXRegClassName(TargetRegisterClass const *RC) {
return ".s32";
} else if (RC == &NVPTX::Int16RegsRegClass) {
return ".s16";
- }
- // Int8Regs become 16-bit registers in PTX
- else if (RC == &NVPTX::Int8RegsRegClass) {
- return ".s16";
} else if (RC == &NVPTX::Int1RegsRegClass) {
return ".pred";
} else if (RC == &NVPTX::SpecialRegsRegClass) {
@@ -57,15 +53,13 @@ std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) {
return "%f";
}
if (RC == &NVPTX::Float64RegsRegClass) {
- return "%fd";
+ return "%fl";
} else if (RC == &NVPTX::Int64RegsRegClass) {
- return "%rd";
+ return "%rl";
} else if (RC == &NVPTX::Int32RegsRegClass) {
return "%r";
} else if (RC == &NVPTX::Int16RegsRegClass) {
return "%rs";
- } else if (RC == &NVPTX::Int8RegsRegClass) {
- return "%rc";
} else if (RC == &NVPTX::Int1RegsRegClass) {
return "%p";
} else if (RC == &NVPTX::SpecialRegsRegClass) {
@@ -77,8 +71,7 @@ std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) {
}
}
-NVPTXRegisterInfo::NVPTXRegisterInfo(const TargetInstrInfo &tii,
- const NVPTXSubtarget &st)
+NVPTXRegisterInfo::NVPTXRegisterInfo(const NVPTXSubtarget &st)
: NVPTXGenRegisterInfo(0), Is64Bit(st.is64Bit()) {}
#define GET_REGINFO_TARGET_DESC
diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.h b/lib/Target/NVPTX/NVPTXRegisterInfo.h
index d40682066142..0a20f29ccb1e 100644
--- a/lib/Target/NVPTX/NVPTXRegisterInfo.h
+++ b/lib/Target/NVPTX/NVPTXRegisterInfo.h
@@ -35,7 +35,7 @@ private:
ManagedStringPool ManagedStrPool;
public:
- NVPTXRegisterInfo(const TargetInstrInfo &tii, const NVPTXSubtarget &st);
+ NVPTXRegisterInfo(const NVPTXSubtarget &st);
//------------------------------------------------------
// Pure virtual functions from TargetRegisterInfo
diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.td b/lib/Target/NVPTX/NVPTXRegisterInfo.td
index 8d100d631683..7a38a66b9227 100644
--- a/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -29,9 +29,10 @@ def VRFrameLocal : NVPTXReg<"%SPL">;
// Special Registers used as the stack
def VRDepot : NVPTXReg<"%Depot">;
-foreach i = 0-395 in {
+// We use virtual registers, but define a few physical registers here to keep
+// SDAG and the MachineInstr layers happy.
+foreach i = 0-4 in {
def P#i : NVPTXReg<"%p"#i>; // Predicate
- def RC#i : NVPTXReg<"%rc"#i>; // 8-bit
def RS#i : NVPTXReg<"%rs"#i>; // 16-bit
def R#i : NVPTXReg<"%r"#i>; // 32-bit
def RL#i : NVPTXReg<"%rl"#i>; // 64-bit
@@ -48,17 +49,16 @@ foreach i = 0-395 in {
//===----------------------------------------------------------------------===//
// Register classes
//===----------------------------------------------------------------------===//
-def Int1Regs : NVPTXRegClass<[i1], 8, (add (sequence "P%u", 0, 395))>;
-def Int8Regs : NVPTXRegClass<[i8], 8, (add (sequence "RC%u", 0, 395))>;
-def Int16Regs : NVPTXRegClass<[i16], 16, (add (sequence "RS%u", 0, 395))>;
-def Int32Regs : NVPTXRegClass<[i32], 32, (add (sequence "R%u", 0, 395))>;
-def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%u", 0, 395))>;
-def Float32Regs : NVPTXRegClass<[f32], 32, (add (sequence "F%u", 0, 395))>;
-def Float64Regs : NVPTXRegClass<[f64], 64, (add (sequence "FL%u", 0, 395))>;
-def Int32ArgRegs : NVPTXRegClass<[i32], 32, (add (sequence "ia%u", 0, 395))>;
-def Int64ArgRegs : NVPTXRegClass<[i64], 64, (add (sequence "la%u", 0, 395))>;
-def Float32ArgRegs : NVPTXRegClass<[f32], 32, (add (sequence "fa%u", 0, 395))>;
-def Float64ArgRegs : NVPTXRegClass<[f64], 64, (add (sequence "da%u", 0, 395))>;
+def Int1Regs : NVPTXRegClass<[i1], 8, (add (sequence "P%u", 0, 4))>;
+def Int16Regs : NVPTXRegClass<[i16], 16, (add (sequence "RS%u", 0, 4))>;
+def Int32Regs : NVPTXRegClass<[i32], 32, (add (sequence "R%u", 0, 4))>;
+def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%u", 0, 4))>;
+def Float32Regs : NVPTXRegClass<[f32], 32, (add (sequence "F%u", 0, 4))>;
+def Float64Regs : NVPTXRegClass<[f64], 64, (add (sequence "FL%u", 0, 4))>;
+def Int32ArgRegs : NVPTXRegClass<[i32], 32, (add (sequence "ia%u", 0, 4))>;
+def Int64ArgRegs : NVPTXRegClass<[i64], 64, (add (sequence "la%u", 0, 4))>;
+def Float32ArgRegs : NVPTXRegClass<[f32], 32, (add (sequence "fa%u", 0, 4))>;
+def Float64ArgRegs : NVPTXRegClass<[f64], 64, (add (sequence "da%u", 0, 4))>;
// Read NVPTXRegisterInfo.cpp to see how VRFrame and VRDepot are used.
def SpecialRegs : NVPTXRegClass<[i32], 32, (add VRFrame, VRDepot)>;
diff --git a/lib/Target/NVPTX/NVPTXSection.h b/lib/Target/NVPTX/NVPTXSection.h
index e57ace92e850..f8a692e9b1a3 100644
--- a/lib/Target/NVPTX/NVPTXSection.h
+++ b/lib/Target/NVPTX/NVPTXSection.h
@@ -24,10 +24,10 @@ namespace llvm {
/// the ASMPrint interface.
///
class NVPTXSection : public MCSection {
-
+ virtual void anchor();
public:
NVPTXSection(SectionVariant V, SectionKind K) : MCSection(V, K) {}
- ~NVPTXSection() {}
+ virtual ~NVPTXSection() {}
/// Override this as NVPTX has its own way of printing switching
/// to a section.
diff --git a/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp b/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp
index 83dfe120899a..b64c30880b94 100644
--- a/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp
+++ b/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp
@@ -36,7 +36,7 @@ bool NVPTXSplitBBatBar::runOnFunction(Function &F) {
BasicBlock::iterator II = IB;
BasicBlock::iterator IE = BI->end();
- // Skit the first intruction. No splitting is needed at this
+ // Skit the first instruction. No splitting is needed at this
// point even if this is a bar.
while (II != IE) {
if (IntrinsicInst *inst = dyn_cast<IntrinsicInst>(II)) {
diff --git a/lib/Target/NVPTX/NVPTXSubtarget.cpp b/lib/Target/NVPTX/NVPTXSubtarget.cpp
index 2dcd73dcff9c..9771a176d8dc 100644
--- a/lib/Target/NVPTX/NVPTXSubtarget.cpp
+++ b/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -19,23 +19,21 @@
using namespace llvm;
-// Select Driver Interface
-#include "llvm/Support/CommandLine.h"
-namespace {
-cl::opt<NVPTX::DrvInterface> DriverInterface(
- cl::desc("Choose driver interface:"),
- cl::values(clEnumValN(NVPTX::NVCL, "drvnvcl", "Nvidia OpenCL driver"),
- clEnumValN(NVPTX::CUDA, "drvcuda", "Nvidia CUDA driver"),
- clEnumValN(NVPTX::TEST, "drvtest", "Plain Test"), clEnumValEnd),
- cl::init(NVPTX::NVCL));
-}
+
+// Pin the vtable to this file.
+void NVPTXSubtarget::anchor() {}
NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64Bit)
: NVPTXGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit), PTXVersion(0),
SmVersion(20) {
- drvInterface = DriverInterface;
+ Triple T(TT);
+
+ if (T.getOS() == Triple::NVCL)
+ drvInterface = NVPTX::NVCL;
+ else
+ drvInterface = NVPTX::CUDA;
// Provide the default CPU if none
std::string defCPU = "sm_20";
diff --git a/lib/Target/NVPTX/NVPTXSubtarget.h b/lib/Target/NVPTX/NVPTXSubtarget.h
index 670077daaa69..004be116a96c 100644
--- a/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -25,7 +25,7 @@
namespace llvm {
class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
-
+ virtual void anchor();
std::string TargetName;
NVPTX::DrvInterface drvInterface;
bool Is64Bit;
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 1ae2a7cc8622..46edd6d83f65 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -57,9 +57,6 @@ extern "C" void LLVMInitializeNVPTXTarget() {
RegisterTargetMachine<NVPTXTargetMachine32> X(TheNVPTXTarget32);
RegisterTargetMachine<NVPTXTargetMachine64> Y(TheNVPTXTarget64);
- RegisterMCAsmInfo<NVPTXMCAsmInfo> A(TheNVPTXTarget32);
- RegisterMCAsmInfo<NVPTXMCAsmInfo> B(TheNVPTXTarget64);
-
// FIXME: This pass is really intended to be invoked during IR optimization,
// but it's very NVPTX-specific.
initializeNVVMReflectPass(*PassRegistry::getPassRegistry());
@@ -74,7 +71,9 @@ NVPTXTargetMachine::NVPTXTargetMachine(
Subtarget(TT, CPU, FS, is64bit), DL(Subtarget.getDataLayout()),
InstrInfo(*this), TLInfo(*this), TSInfo(*this),
FrameLowering(
- *this, is64bit) /*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {}
+ *this, is64bit) /*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {
+ initAsmInfo();
+}
void NVPTXTargetMachine32::anchor() {}
@@ -92,7 +91,7 @@ NVPTXTargetMachine64::NVPTXTargetMachine64(
CodeGenOpt::Level OL)
: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
-namespace llvm {
+namespace {
class NVPTXPassConfig : public TargetPassConfig {
public:
NVPTXPassConfig(NVPTXTargetMachine *TM, PassManagerBase &PM)
@@ -105,8 +104,13 @@ public:
virtual void addIRPasses();
virtual bool addInstSelector();
virtual bool addPreRegAlloc();
+ virtual bool addPostRegAlloc();
+
+ virtual FunctionPass *createTargetRegisterAllocator(bool) LLVM_OVERRIDE;
+ virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
+ virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
};
-}
+} // end anonymous namespace
TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) {
NVPTXPassConfig *PassConfig = new NVPTXPassConfig(this, PM);
@@ -114,6 +118,16 @@ TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) {
}
void NVPTXPassConfig::addIRPasses() {
+ // The following passes are known to not play well with virtual regs hanging
+ // around after register allocation (which in our case, is *all* registers).
+ // We explicitly disable them here. We do, however, need some functionality
+ // of the PrologEpilogCodeInserter pass, so we emulate that behavior in the
+ // NVPTXPrologEpilog pass (see NVPTXPrologEpilogPass.cpp).
+ disablePass(&PrologEpilogCodeInserterID);
+ disablePass(&MachineCopyPropagationID);
+ disablePass(&BranchFolderPassID);
+ disablePass(&TailDuplicateID);
+
TargetPassConfig::addIRPasses();
addPass(createGenericToNVVMPass());
}
@@ -127,3 +141,41 @@ bool NVPTXPassConfig::addInstSelector() {
}
bool NVPTXPassConfig::addPreRegAlloc() { return false; }
+bool NVPTXPassConfig::addPostRegAlloc() {
+ addPass(createNVPTXPrologEpilogPass());
+ return false;
+}
+
+FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) {
+ return 0; // No reg alloc
+}
+
+void NVPTXPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
+ assert(!RegAllocPass && "NVPTX uses no regalloc!");
+ addPass(&PHIEliminationID);
+ addPass(&TwoAddressInstructionPassID);
+}
+
+void NVPTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
+ assert(!RegAllocPass && "NVPTX uses no regalloc!");
+
+ addPass(&ProcessImplicitDefsID);
+ addPass(&LiveVariablesID);
+ addPass(&MachineLoopInfoID);
+ addPass(&PHIEliminationID);
+
+ addPass(&TwoAddressInstructionPassID);
+ addPass(&RegisterCoalescerID);
+
+ // PreRA instruction scheduling.
+ if (addPass(&MachineSchedulerID))
+ printAndVerify("After Machine Scheduling");
+
+
+ addPass(&StackSlotColoringID);
+
+ // FIXME: Needs physical registers
+ //addPass(&PostRAMachineLICMID);
+
+ printAndVerify("After StackSlotColoring");
+}
diff --git a/lib/Target/NVPTX/NVPTXTargetObjectFile.h b/lib/Target/NVPTX/NVPTXTargetObjectFile.h
index 6ab0e08ad091..2a7394b79ae6 100644
--- a/lib/Target/NVPTX/NVPTXTargetObjectFile.h
+++ b/lib/Target/NVPTX/NVPTXTargetObjectFile.h
@@ -21,31 +21,33 @@ class Module;
class NVPTXTargetObjectFile : public TargetLoweringObjectFile {
public:
- NVPTXTargetObjectFile() {}
- ~NVPTXTargetObjectFile() {
- delete TextSection;
- delete DataSection;
- delete BSSSection;
- delete ReadOnlySection;
+ NVPTXTargetObjectFile() {
+ TextSection = 0;
+ DataSection = 0;
+ BSSSection = 0;
+ ReadOnlySection = 0;
- delete StaticCtorSection;
- delete StaticDtorSection;
- delete LSDASection;
- delete EHFrameSection;
- delete DwarfAbbrevSection;
- delete DwarfInfoSection;
- delete DwarfLineSection;
- delete DwarfFrameSection;
- delete DwarfPubTypesSection;
- delete DwarfDebugInlineSection;
- delete DwarfStrSection;
- delete DwarfLocSection;
- delete DwarfARangesSection;
- delete DwarfRangesSection;
- delete DwarfMacroInfoSection;
+ StaticCtorSection = 0;
+ StaticDtorSection = 0;
+ LSDASection = 0;
+ EHFrameSection = 0;
+ DwarfAbbrevSection = 0;
+ DwarfInfoSection = 0;
+ DwarfLineSection = 0;
+ DwarfFrameSection = 0;
+ DwarfPubTypesSection = 0;
+ DwarfDebugInlineSection = 0;
+ DwarfStrSection = 0;
+ DwarfLocSection = 0;
+ DwarfARangesSection = 0;
+ DwarfRangesSection = 0;
+ DwarfMacroInfoSection = 0;
}
+ virtual ~NVPTXTargetObjectFile();
+
virtual void Initialize(MCContext &ctx, const TargetMachine &TM) {
+ TargetLoweringObjectFile::Initialize(ctx, TM);
TextSection = new NVPTXSection(MCSection::SV_ELF, SectionKind::getText());
DataSection =
new NVPTXSection(MCSection::SV_ELF, SectionKind::getDataRel());
diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp
index 3cc324b85e35..7406207c94b5 100644
--- a/lib/Target/NVPTX/NVVMReflect.cpp
+++ b/lib/Target/NVPTX/NVVMReflect.cpp
@@ -79,7 +79,7 @@ ModulePass *llvm::createNVVMReflectPass(const StringMap<int>& Mapping) {
}
static cl::opt<bool>
-NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true),
+NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true), cl::Hidden,
cl::desc("NVVM reflection, enabled by default"));
char NVVMReflect::ID = 0;
@@ -88,7 +88,7 @@ INITIALIZE_PASS(NVVMReflect, "nvvm-reflect",
false)
static cl::list<std::string>
-ReflectList("nvvm-reflect-list", cl::value_desc("name=<int>"),
+ReflectList("nvvm-reflect-list", cl::value_desc("name=<int>"), cl::Hidden,
cl::desc("A list of string=num assignments"),
cl::ValueRequired);
diff --git a/lib/Target/PowerPC/AsmParser/LLVMBuild.txt b/lib/Target/PowerPC/AsmParser/LLVMBuild.txt
index bd08c132b7ec..02ebf1d3d3ed 100644
--- a/lib/Target/PowerPC/AsmParser/LLVMBuild.txt
+++ b/lib/Target/PowerPC/AsmParser/LLVMBuild.txt
@@ -19,5 +19,5 @@
type = Library
name = PowerPCAsmParser
parent = PowerPC
-required_libraries = PowerPCInfo MC MCParser Support
+required_libraries = PowerPCDesc PowerPCInfo MC MCParser Support
add_to_library_groups = PowerPC
diff --git a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index f2cb8b84fce4..fe83fe1438ce 100644
--- a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -8,15 +8,18 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/PPCMCTargetDesc.h"
+#include "MCTargetDesc/PPCMCExpr.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
@@ -106,11 +109,73 @@ static unsigned CRRegs[8] = {
PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7
};
+// Evaluate an expression containing condition register
+// or condition register field symbols. Returns positive
+// value on success, or -1 on error.
+static int64_t
+EvaluateCRExpr(const MCExpr *E) {
+ switch (E->getKind()) {
+ case MCExpr::Target:
+ return -1;
+
+ case MCExpr::Constant: {
+ int64_t Res = cast<MCConstantExpr>(E)->getValue();
+ return Res < 0 ? -1 : Res;
+ }
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
+ StringRef Name = SRE->getSymbol().getName();
+
+ if (Name == "lt") return 0;
+ if (Name == "gt") return 1;
+ if (Name == "eq") return 2;
+ if (Name == "so") return 3;
+ if (Name == "un") return 3;
+
+ if (Name == "cr0") return 0;
+ if (Name == "cr1") return 1;
+ if (Name == "cr2") return 2;
+ if (Name == "cr3") return 3;
+ if (Name == "cr4") return 4;
+ if (Name == "cr5") return 5;
+ if (Name == "cr6") return 6;
+ if (Name == "cr7") return 7;
+
+ return -1;
+ }
+
+ case MCExpr::Unary:
+ return -1;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(E);
+ int64_t LHSVal = EvaluateCRExpr(BE->getLHS());
+ int64_t RHSVal = EvaluateCRExpr(BE->getRHS());
+ int64_t Res;
+
+ if (LHSVal < 0 || RHSVal < 0)
+ return -1;
+
+ switch (BE->getOpcode()) {
+ default: return -1;
+ case MCBinaryExpr::Add: Res = LHSVal + RHSVal; break;
+ case MCBinaryExpr::Mul: Res = LHSVal * RHSVal; break;
+ }
+
+ return Res < 0 ? -1 : Res;
+ }
+ }
+
+ llvm_unreachable("Invalid expression kind!");
+}
+
struct PPCOperand;
class PPCAsmParser : public MCTargetAsmParser {
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ const MCInstrInfo &MII;
bool IsPPC64;
MCAsmParser &getParser() const { return Parser; }
@@ -126,10 +191,16 @@ class PPCAsmParser : public MCTargetAsmParser {
virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
+ const MCExpr *ExtractModifierFromExpr(const MCExpr *E,
+ PPCMCExpr::VariantKind &Variant);
+ const MCExpr *FixupVariantKind(const MCExpr *E);
+ bool ParseExpression(const MCExpr *&EVal);
+
bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseDirectiveTC(unsigned Size, SMLoc L);
+ bool ParseDirectiveMachine(SMLoc L);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
@@ -149,11 +220,13 @@ class PPCAsmParser : public MCTargetAsmParser {
public:
- PPCAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
- : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
+ PPCAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
+ const MCInstrInfo &_MII)
+ : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(_MII) {
// Check for 64-bit vs. 32-bit pointer mode.
Triple TheTriple(STI.getTargetTriple());
- IsPPC64 = TheTriple.getArch() == Triple::ppc64;
+ IsPPC64 = (TheTriple.getArch() == Triple::ppc64 ||
+ TheTriple.getArch() == Triple::ppc64le);
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
@@ -163,6 +236,12 @@ public:
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
virtual bool ParseDirective(AsmToken DirectiveID);
+
+ unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
+
+ virtual const MCExpr *applyModifierToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind,
+ MCContext &Ctx);
};
/// PPCOperand - Instances of this class represent a parsed PowerPC machine
@@ -171,7 +250,8 @@ struct PPCOperand : public MCParsedAsmOperand {
enum KindTy {
Token,
Immediate,
- Expression
+ Expression,
+ TLSRegister
} Kind;
SMLoc StartLoc, EndLoc;
@@ -188,12 +268,18 @@ struct PPCOperand : public MCParsedAsmOperand {
struct ExprOp {
const MCExpr *Val;
+ int64_t CRVal; // Cached result of EvaluateCRExpr(Val)
+ };
+
+ struct TLSRegOp {
+ const MCSymbolRefExpr *Sym;
};
union {
struct TokOp Tok;
struct ImmOp Imm;
struct ExprOp Expr;
+ struct TLSRegOp TLSReg;
};
PPCOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
@@ -213,6 +299,9 @@ public:
case Expression:
Expr = o.Expr;
break;
+ case TLSRegister:
+ TLSReg = o.TLSReg;
+ break;
}
}
@@ -235,6 +324,16 @@ public:
return Expr.Val;
}
+ int64_t getExprCRVal() const {
+ assert(Kind == Expression && "Invalid access!");
+ return Expr.CRVal;
+ }
+
+ const MCExpr *getTLSReg() const {
+ assert(Kind == TLSRegister && "Invalid access!");
+ return TLSReg.Sym;
+ }
+
unsigned getReg() const {
assert(isRegNumber() && "Invalid access!");
return (unsigned) Imm.Val;
@@ -242,12 +341,17 @@ public:
unsigned getCCReg() const {
assert(isCCRegNumber() && "Invalid access!");
- return (unsigned) Imm.Val;
+ return (unsigned) (Kind == Immediate ? Imm.Val : Expr.CRVal);
+ }
+
+ unsigned getCRBit() const {
+ assert(isCRBitNumber() && "Invalid access!");
+ return (unsigned) (Kind == Immediate ? Imm.Val : Expr.CRVal);
}
unsigned getCRBitMask() const {
assert(isCRBitMask() && "Invalid access!");
- return 7 - CountTrailingZeros_32(Imm.Val);
+ return 7 - countTrailingZeros<uint64_t>(Imm.Val);
}
bool isToken() const { return Kind == Token; }
@@ -262,9 +366,24 @@ public:
bool isS16ImmX4() const { return Kind == Expression ||
(Kind == Immediate && isInt<16>(getImm()) &&
(getImm() & 3) == 0); }
+ bool isS17Imm() const { return Kind == Expression ||
+ (Kind == Immediate && isInt<17>(getImm())); }
+ bool isTLSReg() const { return Kind == TLSRegister; }
+ bool isDirectBr() const { return Kind == Expression ||
+ (Kind == Immediate && isInt<26>(getImm()) &&
+ (getImm() & 3) == 0); }
+ bool isCondBr() const { return Kind == Expression ||
+ (Kind == Immediate && isInt<16>(getImm()) &&
+ (getImm() & 3) == 0); }
bool isRegNumber() const { return Kind == Immediate && isUInt<5>(getImm()); }
- bool isCCRegNumber() const { return Kind == Immediate &&
- isUInt<3>(getImm()); }
+ bool isCCRegNumber() const { return (Kind == Expression
+ && isUInt<3>(getExprCRVal())) ||
+ (Kind == Immediate
+ && isUInt<3>(getImm())); }
+ bool isCRBitNumber() const { return (Kind == Expression
+ && isUInt<5>(getExprCRVal())) ||
+ (Kind == Immediate
+ && isUInt<5>(getImm())); }
bool isCRBitMask() const { return Kind == Immediate && isUInt<8>(getImm()) &&
isPowerOf2_32(getImm()); }
bool isMem() const { return false; }
@@ -325,7 +444,7 @@ public:
void addRegCRBITRCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(CRBITRegs[getReg()]));
+ Inst.addOperand(MCOperand::CreateReg(CRBITRegs[getCRBit()]));
}
void addRegCRRCOperands(MCInst &Inst, unsigned N) const {
@@ -346,20 +465,17 @@ public:
Inst.addOperand(MCOperand::CreateExpr(getExpr()));
}
- void addDispRIOperands(MCInst &Inst, unsigned N) const {
+ void addBranchTargetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
if (Kind == Immediate)
- Inst.addOperand(MCOperand::CreateImm(getImm()));
+ Inst.addOperand(MCOperand::CreateImm(getImm() / 4));
else
Inst.addOperand(MCOperand::CreateExpr(getExpr()));
}
- void addDispRIXOperands(MCInst &Inst, unsigned N) const {
+ void addTLSRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- if (Kind == Immediate)
- Inst.addOperand(MCOperand::CreateImm(getImm() / 4));
- else
- Inst.addOperand(MCOperand::CreateExpr(getExpr()));
+ Inst.addOperand(MCOperand::CreateExpr(getTLSReg()));
}
StringRef getToken() const {
@@ -379,6 +495,20 @@ public:
return Op;
}
+ static PPCOperand *CreateTokenWithStringCopy(StringRef Str, SMLoc S,
+ bool IsPPC64) {
+ // Allocate extra memory for the string and copy it.
+ void *Mem = ::operator new(sizeof(PPCOperand) + Str.size());
+ PPCOperand *Op = new (Mem) PPCOperand(Token);
+ Op->Tok.Data = (const char *)(Op + 1);
+ Op->Tok.Length = Str.size();
+ std::memcpy((char *)(Op + 1), Str.data(), Str.size());
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ Op->IsPPC64 = IsPPC64;
+ return Op;
+ }
+
static PPCOperand *CreateImm(int64_t Val, SMLoc S, SMLoc E, bool IsPPC64) {
PPCOperand *Op = new PPCOperand(Immediate);
Op->Imm.Val = Val;
@@ -392,11 +522,34 @@ public:
SMLoc S, SMLoc E, bool IsPPC64) {
PPCOperand *Op = new PPCOperand(Expression);
Op->Expr.Val = Val;
+ Op->Expr.CRVal = EvaluateCRExpr(Val);
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ Op->IsPPC64 = IsPPC64;
+ return Op;
+ }
+
+ static PPCOperand *CreateTLSReg(const MCSymbolRefExpr *Sym,
+ SMLoc S, SMLoc E, bool IsPPC64) {
+ PPCOperand *Op = new PPCOperand(TLSRegister);
+ Op->TLSReg.Sym = Sym;
Op->StartLoc = S;
Op->EndLoc = E;
Op->IsPPC64 = IsPPC64;
return Op;
}
+
+ static PPCOperand *CreateFromMCExpr(const MCExpr *Val,
+ SMLoc S, SMLoc E, bool IsPPC64) {
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val))
+ return CreateImm(CE->getValue(), S, E, IsPPC64);
+
+ if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(Val))
+ if (SRE->getKind() == MCSymbolRefExpr::VK_PPC_TLS)
+ return CreateTLSReg(SRE, S, E, IsPPC64);
+
+ return CreateExpr(Val, S, E, IsPPC64);
+ }
};
} // end anonymous namespace.
@@ -412,6 +565,9 @@ void PPCOperand::print(raw_ostream &OS) const {
case Expression:
getExpr()->print(OS);
break;
+ case TLSRegister:
+ getTLSReg()->print(OS);
+ break;
}
}
@@ -419,11 +575,133 @@ void PPCOperand::print(raw_ostream &OS) const {
void PPCAsmParser::
ProcessInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- switch (Inst.getOpcode()) {
- case PPC::SLWI: {
+ int Opcode = Inst.getOpcode();
+ switch (Opcode) {
+ case PPC::LAx: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(PPC::LA);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(1));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::SUBI: {
MCInst TmpInst;
int64_t N = Inst.getOperand(2).getImm();
- TmpInst.setOpcode(PPC::RLWINM);
+ TmpInst.setOpcode(PPC::ADDI);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(-N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::SUBIS: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(PPC::ADDIS);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(-N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::SUBIC: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(PPC::ADDIC);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(-N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::SUBICo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(PPC::ADDICo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(-N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::EXTLWI:
+ case PPC::EXTLWIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ int64_t B = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::EXTLWI? PPC::RLWINM : PPC::RLWINMo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(B));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ TmpInst.addOperand(MCOperand::CreateImm(N - 1));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::EXTRWI:
+ case PPC::EXTRWIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ int64_t B = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::EXTRWI? PPC::RLWINM : PPC::RLWINMo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(B + N));
+ TmpInst.addOperand(MCOperand::CreateImm(32 - N));
+ TmpInst.addOperand(MCOperand::CreateImm(31));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::INSLWI:
+ case PPC::INSLWIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ int64_t B = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::INSLWI? PPC::RLWIMI : PPC::RLWIMIo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(32 - B));
+ TmpInst.addOperand(MCOperand::CreateImm(B));
+ TmpInst.addOperand(MCOperand::CreateImm((B + N) - 1));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::INSRWI:
+ case PPC::INSRWIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ int64_t B = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::INSRWI? PPC::RLWIMI : PPC::RLWIMIo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(32 - (B + N)));
+ TmpInst.addOperand(MCOperand::CreateImm(B));
+ TmpInst.addOperand(MCOperand::CreateImm((B + N) - 1));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::ROTRWI:
+ case PPC::ROTRWIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(Opcode == PPC::ROTRWI? PPC::RLWINM : PPC::RLWINMo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(32 - N));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ TmpInst.addOperand(MCOperand::CreateImm(31));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::SLWI:
+ case PPC::SLWIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(Opcode == PPC::SLWI? PPC::RLWINM : PPC::RLWINMo);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(MCOperand::CreateImm(N));
@@ -432,10 +710,11 @@ ProcessInstruction(MCInst &Inst,
Inst = TmpInst;
break;
}
- case PPC::SRWI: {
+ case PPC::SRWI:
+ case PPC::SRWIo: {
MCInst TmpInst;
int64_t N = Inst.getOperand(2).getImm();
- TmpInst.setOpcode(PPC::RLWINM);
+ TmpInst.setOpcode(Opcode == PPC::SRWI? PPC::RLWINM : PPC::RLWINMo);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(MCOperand::CreateImm(32 - N));
@@ -444,10 +723,90 @@ ProcessInstruction(MCInst &Inst,
Inst = TmpInst;
break;
}
- case PPC::SLDI: {
+ case PPC::CLRRWI:
+ case PPC::CLRRWIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(Opcode == PPC::CLRRWI? PPC::RLWINM : PPC::RLWINMo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ TmpInst.addOperand(MCOperand::CreateImm(31 - N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::CLRLSLWI:
+ case PPC::CLRLSLWIo: {
+ MCInst TmpInst;
+ int64_t B = Inst.getOperand(2).getImm();
+ int64_t N = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::CLRLSLWI? PPC::RLWINM : PPC::RLWINMo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(N));
+ TmpInst.addOperand(MCOperand::CreateImm(B - N));
+ TmpInst.addOperand(MCOperand::CreateImm(31 - N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::EXTLDI:
+ case PPC::EXTLDIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ int64_t B = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::EXTLDI? PPC::RLDICR : PPC::RLDICRo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(B));
+ TmpInst.addOperand(MCOperand::CreateImm(N - 1));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::EXTRDI:
+ case PPC::EXTRDIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ int64_t B = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::EXTRDI? PPC::RLDICL : PPC::RLDICLo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(B + N));
+ TmpInst.addOperand(MCOperand::CreateImm(64 - N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::INSRDI:
+ case PPC::INSRDIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ int64_t B = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::INSRDI? PPC::RLDIMI : PPC::RLDIMIo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(64 - (B + N)));
+ TmpInst.addOperand(MCOperand::CreateImm(B));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::ROTRDI:
+ case PPC::ROTRDIo: {
MCInst TmpInst;
int64_t N = Inst.getOperand(2).getImm();
- TmpInst.setOpcode(PPC::RLDICR);
+ TmpInst.setOpcode(Opcode == PPC::ROTRDI? PPC::RLDICL : PPC::RLDICLo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(64 - N));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::SLDI:
+ case PPC::SLDIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(Opcode == PPC::SLDI? PPC::RLDICR : PPC::RLDICRo);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(MCOperand::CreateImm(N));
@@ -455,10 +814,11 @@ ProcessInstruction(MCInst &Inst,
Inst = TmpInst;
break;
}
- case PPC::SRDI: {
+ case PPC::SRDI:
+ case PPC::SRDIo: {
MCInst TmpInst;
int64_t N = Inst.getOperand(2).getImm();
- TmpInst.setOpcode(PPC::RLDICL);
+ TmpInst.setOpcode(Opcode == PPC::SRDI? PPC::RLDICL : PPC::RLDICLo);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(MCOperand::CreateImm(64 - N));
@@ -466,6 +826,31 @@ ProcessInstruction(MCInst &Inst,
Inst = TmpInst;
break;
}
+ case PPC::CLRRDI:
+ case PPC::CLRRDIo: {
+ MCInst TmpInst;
+ int64_t N = Inst.getOperand(2).getImm();
+ TmpInst.setOpcode(Opcode == PPC::CLRRDI? PPC::RLDICR : PPC::RLDICRo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ TmpInst.addOperand(MCOperand::CreateImm(63 - N));
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::CLRLSLDI:
+ case PPC::CLRLSLDIo: {
+ MCInst TmpInst;
+ int64_t B = Inst.getOperand(2).getImm();
+ int64_t N = Inst.getOperand(3).getImm();
+ TmpInst.setOpcode(Opcode == PPC::CLRLSLDI? PPC::RLDIC : PPC::RLDICo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateImm(N));
+ TmpInst.addOperand(MCOperand::CreateImm(B - N));
+ Inst = TmpInst;
+ break;
+ }
}
}
@@ -518,19 +903,23 @@ MatchRegisterName(const AsmToken &Tok, unsigned &RegNo, int64_t &IntVal) {
RegNo = isPPC64()? PPC::CTR8 : PPC::CTR;
IntVal = 9;
return false;
- } else if (Name.substr(0, 1).equals_lower("r") &&
+ } else if (Name.equals_lower("vrsave")) {
+ RegNo = PPC::VRSAVE;
+ IntVal = 256;
+ return false;
+ } else if (Name.startswith_lower("r") &&
!Name.substr(1).getAsInteger(10, IntVal) && IntVal < 32) {
RegNo = isPPC64()? XRegs[IntVal] : RRegs[IntVal];
return false;
- } else if (Name.substr(0, 1).equals_lower("f") &&
+ } else if (Name.startswith_lower("f") &&
!Name.substr(1).getAsInteger(10, IntVal) && IntVal < 32) {
RegNo = FRegs[IntVal];
return false;
- } else if (Name.substr(0, 1).equals_lower("v") &&
+ } else if (Name.startswith_lower("v") &&
!Name.substr(1).getAsInteger(10, IntVal) && IntVal < 32) {
RegNo = VRegs[IntVal];
return false;
- } else if (Name.substr(0, 2).equals_lower("cr") &&
+ } else if (Name.startswith_lower("cr") &&
!Name.substr(2).getAsInteger(10, IntVal) && IntVal < 8) {
RegNo = CRRegs[IntVal];
return false;
@@ -556,6 +945,159 @@ ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
return Error(StartLoc, "invalid register name");
}
+/// Extract \code @l/@ha \endcode modifier from expression. Recursively scan
+/// the expression and check for VK_PPC_LO/HI/HA
+/// symbol variants. If all symbols with modifier use the same
+/// variant, return the corresponding PPCMCExpr::VariantKind,
+/// and a modified expression using the default symbol variant.
+/// Otherwise, return NULL.
+const MCExpr *PPCAsmParser::
+ExtractModifierFromExpr(const MCExpr *E,
+ PPCMCExpr::VariantKind &Variant) {
+ MCContext &Context = getParser().getContext();
+ Variant = PPCMCExpr::VK_PPC_None;
+
+ switch (E->getKind()) {
+ case MCExpr::Target:
+ case MCExpr::Constant:
+ return 0;
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
+
+ switch (SRE->getKind()) {
+ case MCSymbolRefExpr::VK_PPC_LO:
+ Variant = PPCMCExpr::VK_PPC_LO;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HI:
+ Variant = PPCMCExpr::VK_PPC_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HA:
+ Variant = PPCMCExpr::VK_PPC_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HIGHER:
+ Variant = PPCMCExpr::VK_PPC_HIGHER;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HIGHERA:
+ Variant = PPCMCExpr::VK_PPC_HIGHERA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HIGHEST:
+ Variant = PPCMCExpr::VK_PPC_HIGHEST;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HIGHESTA:
+ Variant = PPCMCExpr::VK_PPC_HIGHESTA;
+ break;
+ default:
+ return 0;
+ }
+
+ return MCSymbolRefExpr::Create(&SRE->getSymbol(), Context);
+ }
+
+ case MCExpr::Unary: {
+ const MCUnaryExpr *UE = cast<MCUnaryExpr>(E);
+ const MCExpr *Sub = ExtractModifierFromExpr(UE->getSubExpr(), Variant);
+ if (!Sub)
+ return 0;
+ return MCUnaryExpr::Create(UE->getOpcode(), Sub, Context);
+ }
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(E);
+ PPCMCExpr::VariantKind LHSVariant, RHSVariant;
+ const MCExpr *LHS = ExtractModifierFromExpr(BE->getLHS(), LHSVariant);
+ const MCExpr *RHS = ExtractModifierFromExpr(BE->getRHS(), RHSVariant);
+
+ if (!LHS && !RHS)
+ return 0;
+
+ if (!LHS) LHS = BE->getLHS();
+ if (!RHS) RHS = BE->getRHS();
+
+ if (LHSVariant == PPCMCExpr::VK_PPC_None)
+ Variant = RHSVariant;
+ else if (RHSVariant == PPCMCExpr::VK_PPC_None)
+ Variant = LHSVariant;
+ else if (LHSVariant == RHSVariant)
+ Variant = LHSVariant;
+ else
+ return 0;
+
+ return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, Context);
+ }
+ }
+
+ llvm_unreachable("Invalid expression kind!");
+}
+
+/// Find all VK_TLSGD/VK_TLSLD symbol references in expression and replace
+/// them by VK_PPC_TLSGD/VK_PPC_TLSLD. This is necessary to avoid having
+/// _GLOBAL_OFFSET_TABLE_ created via ELFObjectWriter::RelocNeedsGOT.
+/// FIXME: This is a hack.
+const MCExpr *PPCAsmParser::
+FixupVariantKind(const MCExpr *E) {
+ MCContext &Context = getParser().getContext();
+
+ switch (E->getKind()) {
+ case MCExpr::Target:
+ case MCExpr::Constant:
+ return E;
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
+ MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
+
+ switch (SRE->getKind()) {
+ case MCSymbolRefExpr::VK_TLSGD:
+ Variant = MCSymbolRefExpr::VK_PPC_TLSGD;
+ break;
+ case MCSymbolRefExpr::VK_TLSLD:
+ Variant = MCSymbolRefExpr::VK_PPC_TLSLD;
+ break;
+ default:
+ return E;
+ }
+ return MCSymbolRefExpr::Create(&SRE->getSymbol(), Variant, Context);
+ }
+
+ case MCExpr::Unary: {
+ const MCUnaryExpr *UE = cast<MCUnaryExpr>(E);
+ const MCExpr *Sub = FixupVariantKind(UE->getSubExpr());
+ if (Sub == UE->getSubExpr())
+ return E;
+ return MCUnaryExpr::Create(UE->getOpcode(), Sub, Context);
+ }
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(E);
+ const MCExpr *LHS = FixupVariantKind(BE->getLHS());
+ const MCExpr *RHS = FixupVariantKind(BE->getRHS());
+ if (LHS == BE->getLHS() && RHS == BE->getRHS())
+ return E;
+ return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, Context);
+ }
+ }
+
+ llvm_unreachable("Invalid expression kind!");
+}
+
+/// Parse an expression. This differs from the default "parseExpression"
+/// in that it handles complex \code @l/@ha \endcode modifiers.
+bool PPCAsmParser::
+ParseExpression(const MCExpr *&EVal) {
+ if (getParser().parseExpression(EVal))
+ return true;
+
+ EVal = FixupVariantKind(EVal);
+
+ PPCMCExpr::VariantKind Variant;
+ const MCExpr *E = ExtractModifierFromExpr(EVal, Variant);
+ if (E)
+ EVal = PPCMCExpr::Create(Variant, E, false, getParser().getContext());
+
+ return false;
+}
+
bool PPCAsmParser::
ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Parser.getTok().getLoc();
@@ -587,23 +1129,40 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
case AsmToken::Identifier:
case AsmToken::Dot:
case AsmToken::Dollar:
- if (!getParser().parseExpression(EVal))
+ if (!ParseExpression(EVal))
break;
/* fall through */
default:
return Error(S, "unknown operand");
}
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(EVal))
- Op = PPCOperand::CreateImm(CE->getValue(), S, E, isPPC64());
- else
- Op = PPCOperand::CreateExpr(EVal, S, E, isPPC64());
-
// Push the parsed operand into the list of operands
+ Op = PPCOperand::CreateFromMCExpr(EVal, S, E, isPPC64());
Operands.push_back(Op);
- // Check for D-form memory operands
- if (getLexer().is(AsmToken::LParen)) {
+ // Check whether this is a TLS call expression
+ bool TLSCall = false;
+ if (const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(EVal))
+ TLSCall = Ref->getSymbol().getName() == "__tls_get_addr";
+
+ if (TLSCall && getLexer().is(AsmToken::LParen)) {
+ const MCExpr *TLSSym;
+
+ Parser.Lex(); // Eat the '('.
+ S = Parser.getTok().getLoc();
+ if (ParseExpression(TLSSym))
+ return Error(S, "invalid TLS call expression");
+ if (getLexer().isNot(AsmToken::RParen))
+ return Error(Parser.getTok().getLoc(), "missing ')'");
+ E = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat the ')'.
+
+ Op = PPCOperand::CreateFromMCExpr(TLSSym, S, E, isPPC64());
+ Operands.push_back(Op);
+ }
+
+ // Otherwise, check for D-form memory operands
+ if (!TLSCall && getLexer().is(AsmToken::LParen)) {
Parser.Lex(); // Eat the '('.
S = Parser.getTok().getLoc();
@@ -644,15 +1203,38 @@ bool PPCAsmParser::
ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// The first operand is the token for the instruction name.
+ // If the next character is a '+' or '-', we need to add it to the
+ // instruction name, to match what TableGen is doing.
+ std::string NewOpcode;
+ if (getLexer().is(AsmToken::Plus)) {
+ getLexer().Lex();
+ NewOpcode = Name;
+ NewOpcode += '+';
+ Name = NewOpcode;
+ }
+ if (getLexer().is(AsmToken::Minus)) {
+ getLexer().Lex();
+ NewOpcode = Name;
+ NewOpcode += '-';
+ Name = NewOpcode;
+ }
// If the instruction ends in a '.', we need to create a separate
// token for it, to match what TableGen is doing.
size_t Dot = Name.find('.');
StringRef Mnemonic = Name.slice(0, Dot);
- Operands.push_back(PPCOperand::CreateToken(Mnemonic, NameLoc, isPPC64()));
+ if (!NewOpcode.empty()) // Underlying memory for Name is volatile.
+ Operands.push_back(
+ PPCOperand::CreateTokenWithStringCopy(Mnemonic, NameLoc, isPPC64()));
+ else
+ Operands.push_back(PPCOperand::CreateToken(Mnemonic, NameLoc, isPPC64()));
if (Dot != StringRef::npos) {
SMLoc DotLoc = SMLoc::getFromPointer(NameLoc.getPointer() + Dot);
StringRef DotStr = Name.slice(Dot, StringRef::npos);
- Operands.push_back(PPCOperand::CreateToken(DotStr, DotLoc, isPPC64()));
+ if (!NewOpcode.empty()) // Underlying memory for Name is volatile.
+ Operands.push_back(
+ PPCOperand::CreateTokenWithStringCopy(DotStr, DotLoc, isPPC64()));
+ else
+ Operands.push_back(PPCOperand::CreateToken(DotStr, DotLoc, isPPC64()));
}
// If there are no more operands then finish
@@ -680,9 +1262,13 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
bool PPCAsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
if (IDVal == ".word")
- return ParseDirectiveWord(4, DirectiveID.getLoc());
+ return ParseDirectiveWord(2, DirectiveID.getLoc());
+ if (IDVal == ".llong")
+ return ParseDirectiveWord(8, DirectiveID.getLoc());
if (IDVal == ".tc")
return ParseDirectiveTC(isPPC64()? 8 : 4, DirectiveID.getLoc());
+ if (IDVal == ".machine")
+ return ParseDirectiveMachine(DirectiveID.getLoc());
return true;
}
@@ -728,12 +1314,84 @@ bool PPCAsmParser::ParseDirectiveTC(unsigned Size, SMLoc L) {
return ParseDirectiveWord(Size, L);
}
+/// ParseDirectiveMachine
+/// ::= .machine [ cpu | "push" | "pop" ]
+bool PPCAsmParser::ParseDirectiveMachine(SMLoc L) {
+ if (getLexer().isNot(AsmToken::Identifier) &&
+ getLexer().isNot(AsmToken::String))
+ return Error(L, "unexpected token in directive");
+
+ StringRef CPU = Parser.getTok().getIdentifier();
+ Parser.Lex();
+
+ // FIXME: Right now, the parser always allows any available
+ // instruction, so the .machine directive is not useful.
+ // Implement ".machine any" (by doing nothing) for the benefit
+ // of existing assembler code. Likewise, we can then implement
+ // ".machine push" and ".machine pop" as no-op.
+ if (CPU != "any" && CPU != "push" && CPU != "pop")
+ return Error(L, "unrecognized machine type");
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return Error(L, "unexpected token in directive");
+
+ return false;
+}
+
/// Force static initialization.
extern "C" void LLVMInitializePowerPCAsmParser() {
RegisterMCAsmParser<PPCAsmParser> A(ThePPC32Target);
RegisterMCAsmParser<PPCAsmParser> B(ThePPC64Target);
+ RegisterMCAsmParser<PPCAsmParser> C(ThePPC64LETarget);
}
#define GET_REGISTER_MATCHER
#define GET_MATCHER_IMPLEMENTATION
#include "PPCGenAsmMatcher.inc"
+
+// Define this matcher function after the auto-generated include so we
+// have the match class enum definitions.
+unsigned PPCAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
+ unsigned Kind) {
+ // If the kind is a token for a literal immediate, check if our asm
+ // operand matches. This is for InstAliases which have a fixed-value
+ // immediate in the syntax.
+ int64_t ImmVal;
+ switch (Kind) {
+ case MCK_0: ImmVal = 0; break;
+ case MCK_1: ImmVal = 1; break;
+ case MCK_2: ImmVal = 2; break;
+ case MCK_3: ImmVal = 3; break;
+ default: return Match_InvalidOperand;
+ }
+
+ PPCOperand *Op = static_cast<PPCOperand*>(AsmOp);
+ if (Op->isImm() && Op->getImm() == ImmVal)
+ return Match_Success;
+
+ return Match_InvalidOperand;
+}
+
+const MCExpr *
+PPCAsmParser::applyModifierToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind Variant,
+ MCContext &Ctx) {
+ switch (Variant) {
+ case MCSymbolRefExpr::VK_PPC_LO:
+ return PPCMCExpr::Create(PPCMCExpr::VK_PPC_LO, E, false, Ctx);
+ case MCSymbolRefExpr::VK_PPC_HI:
+ return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HI, E, false, Ctx);
+ case MCSymbolRefExpr::VK_PPC_HA:
+ return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HA, E, false, Ctx);
+ case MCSymbolRefExpr::VK_PPC_HIGHER:
+ return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HIGHER, E, false, Ctx);
+ case MCSymbolRefExpr::VK_PPC_HIGHERA:
+ return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HIGHERA, E, false, Ctx);
+ case MCSymbolRefExpr::VK_PPC_HIGHEST:
+ return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HIGHEST, E, false, Ctx);
+ case MCSymbolRefExpr::VK_PPC_HIGHESTA:
+ return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HIGHESTA, E, false, Ctx);
+ default:
+ return 0;
+ }
+}
diff --git a/lib/Target/PowerPC/CMakeLists.txt b/lib/Target/PowerPC/CMakeLists.txt
index 71803cdac9cf..9a763f53a2d1 100644
--- a/lib/Target/PowerPC/CMakeLists.txt
+++ b/lib/Target/PowerPC/CMakeLists.txt
@@ -7,6 +7,7 @@ tablegen(LLVM PPCGenMCCodeEmitter.inc -gen-emitter -mc-emitter)
tablegen(LLVM PPCGenRegisterInfo.inc -gen-register-info)
tablegen(LLVM PPCGenInstrInfo.inc -gen-instr-info)
tablegen(LLVM PPCGenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM PPCGenFastISel.inc -gen-fast-isel)
tablegen(LLVM PPCGenCallingConv.inc -gen-callingconv)
tablegen(LLVM PPCGenSubtargetInfo.inc -gen-subtarget)
add_public_tablegen_target(PowerPCCommonTableGen)
@@ -20,6 +21,7 @@ add_llvm_target(PowerPCCodeGen
PPCInstrInfo.cpp
PPCISelDAGToDAG.cpp
PPCISelLowering.cpp
+ PPCFastISel.cpp
PPCFrameLowering.cpp
PPCJITInfo.cpp
PPCMCInstLower.cpp
@@ -27,11 +29,12 @@ add_llvm_target(PowerPCCodeGen
PPCRegisterInfo.cpp
PPCSubtarget.cpp
PPCTargetMachine.cpp
+ PPCTargetObjectFile.cpp
PPCTargetTransformInfo.cpp
PPCSelectionDAGInfo.cpp
)
-add_dependencies(LLVMPowerPCCodeGen intrinsics_gen)
+add_dependencies(LLVMPowerPCCodeGen PowerPCCommonTableGen intrinsics_gen)
add_subdirectory(AsmParser)
add_subdirectory(InstPrinter)
diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index 93fca00cca49..8281b5ca03c8 100644
--- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -18,9 +18,17 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetOpcodes.h"
using namespace llvm;
+// FIXME: Once the integrated assembler supports full register names, tie this
+// to the verbose-asm setting.
+static cl::opt<bool>
+FullRegNames("ppc-asm-full-reg-names", cl::Hidden, cl::init(false),
+ cl::desc("Use full register names when printing assembly"));
+
#include "PPCGenAsmWriter.inc"
void PPCInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
@@ -78,6 +86,17 @@ void PPCInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
}
}
+ // For fast-isel, a COPY_TO_REGCLASS may survive this long. This is
+ // used when converting a 32-bit float to a 64-bit float as part of
+ // conversion to an integer (see PPCFastISel.cpp:SelectFPToI()),
+ // as otherwise we have problems with incorrect register classes
+ // in machine instruction verification. For now, just avoid trying
+ // to print it as such an instruction has no effect (a 32-bit float
+ // in a register is already in 64-bit form, just with lower
+ // precision). FIXME: Is there a better solution?
+ if (MI->getOpcode() == TargetOpcode::COPY_TO_REGCLASS)
+ return;
+
printInstruction(MI, O);
printAnnotation(O, Annot);
}
@@ -90,19 +109,87 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
if (StringRef(Modifier) == "cc") {
switch ((PPC::Predicate)Code) {
- case PPC::PRED_LT: O << "lt"; return;
- case PPC::PRED_LE: O << "le"; return;
- case PPC::PRED_EQ: O << "eq"; return;
- case PPC::PRED_GE: O << "ge"; return;
- case PPC::PRED_GT: O << "gt"; return;
- case PPC::PRED_NE: O << "ne"; return;
- case PPC::PRED_UN: O << "un"; return;
- case PPC::PRED_NU: O << "nu"; return;
+ case PPC::PRED_LT_MINUS:
+ case PPC::PRED_LT_PLUS:
+ case PPC::PRED_LT:
+ O << "lt";
+ return;
+ case PPC::PRED_LE_MINUS:
+ case PPC::PRED_LE_PLUS:
+ case PPC::PRED_LE:
+ O << "le";
+ return;
+ case PPC::PRED_EQ_MINUS:
+ case PPC::PRED_EQ_PLUS:
+ case PPC::PRED_EQ:
+ O << "eq";
+ return;
+ case PPC::PRED_GE_MINUS:
+ case PPC::PRED_GE_PLUS:
+ case PPC::PRED_GE:
+ O << "ge";
+ return;
+ case PPC::PRED_GT_MINUS:
+ case PPC::PRED_GT_PLUS:
+ case PPC::PRED_GT:
+ O << "gt";
+ return;
+ case PPC::PRED_NE_MINUS:
+ case PPC::PRED_NE_PLUS:
+ case PPC::PRED_NE:
+ O << "ne";
+ return;
+ case PPC::PRED_UN_MINUS:
+ case PPC::PRED_UN_PLUS:
+ case PPC::PRED_UN:
+ O << "un";
+ return;
+ case PPC::PRED_NU_MINUS:
+ case PPC::PRED_NU_PLUS:
+ case PPC::PRED_NU:
+ O << "nu";
+ return;
}
+ llvm_unreachable("Invalid predicate code");
+ }
+
+ if (StringRef(Modifier) == "pm") {
+ switch ((PPC::Predicate)Code) {
+ case PPC::PRED_LT:
+ case PPC::PRED_LE:
+ case PPC::PRED_EQ:
+ case PPC::PRED_GE:
+ case PPC::PRED_GT:
+ case PPC::PRED_NE:
+ case PPC::PRED_UN:
+ case PPC::PRED_NU:
+ return;
+ case PPC::PRED_LT_MINUS:
+ case PPC::PRED_LE_MINUS:
+ case PPC::PRED_EQ_MINUS:
+ case PPC::PRED_GE_MINUS:
+ case PPC::PRED_GT_MINUS:
+ case PPC::PRED_NE_MINUS:
+ case PPC::PRED_UN_MINUS:
+ case PPC::PRED_NU_MINUS:
+ O << "-";
+ return;
+ case PPC::PRED_LT_PLUS:
+ case PPC::PRED_LE_PLUS:
+ case PPC::PRED_EQ_PLUS:
+ case PPC::PRED_GE_PLUS:
+ case PPC::PRED_GT_PLUS:
+ case PPC::PRED_NE_PLUS:
+ case PPC::PRED_UN_PLUS:
+ case PPC::PRED_NU_PLUS:
+ O << "+";
+ return;
+ }
+ llvm_unreachable("Invalid predicate code");
}
assert(StringRef(Modifier) == "reg" &&
- "Need to specify 'cc' or 'reg' as predicate op modifier!");
+ "Need to specify 'cc', 'pm' or 'reg' as predicate op modifier!");
printOperand(MI, OpNo+1, O);
}
@@ -129,18 +216,16 @@ void PPCInstPrinter::printU6ImmOperand(const MCInst *MI, unsigned OpNo,
void PPCInstPrinter::printS16ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- O << (short)MI->getOperand(OpNo).getImm();
+ if (MI->getOperand(OpNo).isImm())
+ O << (short)MI->getOperand(OpNo).getImm();
+ else
+ printOperand(MI, OpNo, O);
}
void PPCInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- O << (unsigned short)MI->getOperand(OpNo).getImm();
-}
-
-void PPCInstPrinter::printS16X4ImmOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
if (MI->getOperand(OpNo).isImm())
- O << (short)(MI->getOperand(OpNo).getImm()*4);
+ O << (unsigned short)MI->getOperand(OpNo).getImm();
else
printOperand(MI, OpNo, O);
}
@@ -153,11 +238,14 @@ void PPCInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo,
// Branches can take an immediate operand. This is used by the branch
// selection pass to print .+8, an eight byte displacement from the PC.
O << ".+";
- printAbsAddrOperand(MI, OpNo, O);
+ printAbsBranchOperand(MI, OpNo, O);
}
-void PPCInstPrinter::printAbsAddrOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
+void PPCInstPrinter::printAbsBranchOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ if (!MI->getOperand(OpNo).isImm())
+ return printOperand(MI, OpNo, O);
+
O << (int)MI->getOperand(OpNo).getImm()*4;
}
@@ -182,7 +270,7 @@ void PPCInstPrinter::printcrbitm(const MCInst *MI, unsigned OpNo,
void PPCInstPrinter::printMemRegImm(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- printSymbolLo(MI, OpNo, O);
+ printS16ImmOperand(MI, OpNo, O);
O << '(';
if (MI->getOperand(OpNo+1).getReg() == PPC::R0)
O << "0";
@@ -191,22 +279,6 @@ void PPCInstPrinter::printMemRegImm(const MCInst *MI, unsigned OpNo,
O << ')';
}
-void PPCInstPrinter::printMemRegImmShifted(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- if (MI->getOperand(OpNo).isImm())
- printS16X4ImmOperand(MI, OpNo, O);
- else
- printSymbolLo(MI, OpNo, O);
- O << '(';
-
- if (MI->getOperand(OpNo+1).getReg() == PPC::R0)
- O << "0";
- else
- printOperand(MI, OpNo+1, O);
- O << ')';
-}
-
-
void PPCInstPrinter::printMemRegReg(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
// When used as the base register, r0 reads constant zero rather than
@@ -220,11 +292,21 @@ void PPCInstPrinter::printMemRegReg(const MCInst *MI, unsigned OpNo,
printOperand(MI, OpNo+1, O);
}
+void PPCInstPrinter::printTLSCall(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printBranchOperand(MI, OpNo, O);
+ O << '(';
+ printOperand(MI, OpNo+1, O);
+ O << ')';
+}
/// stripRegisterPrefix - This method strips the character prefix from a
/// register name so that only the number is left. Used by for linux asm.
static const char *stripRegisterPrefix(const char *RegName) {
+ if (FullRegNames)
+ return RegName;
+
switch (RegName[0]) {
case 'r':
case 'f':
@@ -256,39 +338,4 @@ void PPCInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << *Op.getExpr();
}
-
-void PPCInstPrinter::printSymbolLo(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- if (MI->getOperand(OpNo).isImm())
- return printS16ImmOperand(MI, OpNo, O);
-
- // FIXME: This is a terrible hack because we can't encode lo16() as an operand
- // flag of a subtraction. See the FIXME in GetSymbolRef in PPCMCInstLower.
- if (MI->getOperand(OpNo).isExpr() &&
- isa<MCBinaryExpr>(MI->getOperand(OpNo).getExpr())) {
- O << "lo16(";
- printOperand(MI, OpNo, O);
- O << ')';
- } else {
- printOperand(MI, OpNo, O);
- }
-}
-
-void PPCInstPrinter::printSymbolHi(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- if (MI->getOperand(OpNo).isImm())
- return printS16ImmOperand(MI, OpNo, O);
-
- // FIXME: This is a terrible hack because we can't encode lo16() as an operand
- // flag of a subtraction. See the FIXME in GetSymbolRef in PPCMCInstLower.
- if (MI->getOperand(OpNo).isExpr() &&
- isa<MCBinaryExpr>(MI->getOperand(OpNo).getExpr())) {
- O << "ha16(";
- printOperand(MI, OpNo, O);
- O << ')';
- } else {
- printOperand(MI, OpNo, O);
- }
-}
-
diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
index 8f1e211c3e96..8a4c03d64540 100644
--- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
+++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -21,15 +21,14 @@ namespace llvm {
class MCOperand;
class PPCInstPrinter : public MCInstPrinter {
- // 0 -> AIX, 1 -> Darwin.
- unsigned SyntaxVariant;
+ bool IsDarwin;
public:
PPCInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
- const MCRegisterInfo &MRI, unsigned syntaxVariant)
- : MCInstPrinter(MAI, MII, MRI), SyntaxVariant(syntaxVariant) {}
+ const MCRegisterInfo &MRI, bool isDarwin)
+ : MCInstPrinter(MAI, MII, MRI), IsDarwin(isDarwin) {}
bool isDarwinSyntax() const {
- return SyntaxVariant == 1;
+ return IsDarwin;
}
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
@@ -50,19 +49,14 @@ public:
void printU6ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printS16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printS16X4ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printAbsAddrOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printAbsBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printTLSCall(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printcrbitm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemRegImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printMemRegImmShifted(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemRegReg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-
- // FIXME: Remove
- void printSymbolLo(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printSymbolHi(const MCInst *MI, unsigned OpNo, raw_ostream &O);
};
} // end namespace llvm
diff --git a/lib/Target/PowerPC/MCTargetDesc/CMakeLists.txt b/lib/Target/PowerPC/MCTargetDesc/CMakeLists.txt
index b674883db7de..3efa5ecf9096 100644
--- a/lib/Target/PowerPC/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/PowerPC/MCTargetDesc/CMakeLists.txt
@@ -3,7 +3,9 @@ add_llvm_library(LLVMPowerPCDesc
PPCMCTargetDesc.cpp
PPCMCAsmInfo.cpp
PPCMCCodeEmitter.cpp
+ PPCMCExpr.cpp
PPCPredicates.cpp
+ PPCMachObjectWriter.cpp
PPCELFObjectWriter.cpp
)
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index ec2657403e0c..0d420815ea62 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -16,13 +16,13 @@
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCValue.h"
-#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachO.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
switch (Kind) {
default:
llvm_unreachable("Unknown fixup kind!");
@@ -30,40 +30,45 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case FK_Data_2:
case FK_Data_4:
case FK_Data_8:
- case PPC::fixup_ppc_tlsreg:
case PPC::fixup_ppc_nofixup:
return Value;
case PPC::fixup_ppc_brcond14:
+ case PPC::fixup_ppc_brcond14abs:
return Value & 0xfffc;
case PPC::fixup_ppc_br24:
+ case PPC::fixup_ppc_br24abs:
return Value & 0x3fffffc;
-#if 0
- case PPC::fixup_ppc_hi16:
- return (Value >> 16) & 0xffff;
-#endif
- case PPC::fixup_ppc_ha16:
- return ((Value >> 16) + ((Value & 0x8000) ? 1 : 0)) & 0xffff;
- case PPC::fixup_ppc_lo16:
+ case PPC::fixup_ppc_half16:
return Value & 0xffff;
- case PPC::fixup_ppc_lo16_ds:
+ case PPC::fixup_ppc_half16ds:
return Value & 0xfffc;
}
}
-namespace {
-class PPCMachObjectWriter : public MCMachObjectTargetWriter {
-public:
- PPCMachObjectWriter(bool Is64Bit, uint32_t CPUType,
- uint32_t CPUSubtype)
- : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype) {}
-
- void RecordRelocation(MachObjectWriter *Writer,
- const MCAssembler &Asm, const MCAsmLayout &Layout,
- const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue) {
- llvm_unreachable("Relocation emission for MachO/PPC unimplemented!");
+static unsigned getFixupKindNumBytes(unsigned Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case FK_Data_1:
+ return 1;
+ case FK_Data_2:
+ case PPC::fixup_ppc_half16:
+ case PPC::fixup_ppc_half16ds:
+ return 2;
+ case FK_Data_4:
+ case PPC::fixup_ppc_brcond14:
+ case PPC::fixup_ppc_brcond14abs:
+ case PPC::fixup_ppc_br24:
+ case PPC::fixup_ppc_br24abs:
+ return 4;
+ case FK_Data_8:
+ return 8;
+ case PPC::fixup_ppc_nofixup:
+ return 0;
}
-};
+}
+
+namespace {
class PPCAsmBackend : public MCAsmBackend {
const Target &TheTarget;
@@ -77,10 +82,10 @@ public:
// name offset bits flags
{ "fixup_ppc_br24", 6, 24, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_ppc_brcond14", 16, 14, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_ppc_lo16", 16, 16, 0 },
- { "fixup_ppc_ha16", 16, 16, 0 },
- { "fixup_ppc_lo16_ds", 16, 14, 0 },
- { "fixup_ppc_tlsreg", 0, 0, 0 },
+ { "fixup_ppc_br24abs", 6, 24, 0 },
+ { "fixup_ppc_brcond14abs", 16, 14, 0 },
+ { "fixup_ppc_half16", 0, 16, 0 },
+ { "fixup_ppc_half16ds", 0, 14, 0 },
{ "fixup_ppc_nofixup", 0, 0, 0 }
};
@@ -98,12 +103,13 @@ public:
if (!Value) return; // Doesn't change encoding.
unsigned Offset = Fixup.getOffset();
+ unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
// For each byte of the fragment that the fixup touches, mask in the bits
// from the fixup value. The Value has been "split up" into the appropriate
// bitfields above.
- for (unsigned i = 0; i != 4; ++i)
- Data[Offset + i] |= uint8_t((Value >> ((4 - i - 1)*8)) & 0xff);
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= uint8_t((Value >> ((NumBytes - i - 1)*8)) & 0xff);
}
bool mayNeedRelaxation(const MCInst &Inst) const {
@@ -126,16 +132,23 @@ public:
}
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
- // FIXME: Zero fill for now. That's not right, but at least will get the
- // section size right.
- for (uint64_t i = 0; i != Count; ++i)
- OW->Write8(0);
+ uint64_t NumNops = Count / 4;
+ for (uint64_t i = 0; i != NumNops; ++i)
+ OW->Write32(0x60000000);
+
+ switch (Count % 4) {
+ default: break; // No leftover bytes to write
+ case 1: OW->Write8(0); break;
+ case 2: OW->Write16(0); break;
+ case 3: OW->Write16(0); OW->Write8(0); break;
+ }
+
return true;
}
unsigned getPointerSize() const {
StringRef Name = TheTarget.getName();
- if (Name == "ppc64") return 8;
+ if (Name == "ppc64" || Name == "ppc64le") return 8;
assert(Name == "ppc32" && "Unknown target name!");
return 4;
}
@@ -151,12 +164,11 @@ namespace {
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
bool is64 = getPointerSize() == 8;
- return createMachObjectWriter(new PPCMachObjectWriter(
- /*Is64Bit=*/is64,
- (is64 ? object::mach::CTM_PowerPC64 :
- object::mach::CTM_PowerPC),
- object::mach::CSPPC_ALL),
- OS, /*IsLittleEndian=*/false);
+ return createPPCMachObjectWriter(
+ OS,
+ /*Is64Bit=*/is64,
+ (is64 ? MachO::CPU_TYPE_POWERPC64 : MachO::CPU_TYPE_POWERPC),
+ MachO::CPU_SUBTYPE_POWERPC_ALL);
}
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -183,10 +195,9 @@ namespace {
} // end anonymous namespace
-
-
-
-MCAsmBackend *llvm::createPPCAsmBackend(const Target &T, StringRef TT, StringRef CPU) {
+MCAsmBackend *llvm::createPPCAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
if (Triple(TT).isOSDarwin())
return new DarwinPPCAsmBackend(T);
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
index 7a84723ed56a..54de70eff71a 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
@@ -30,29 +30,17 @@ namespace {
virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsPCRel, bool IsRelocWithSymbol,
int64_t Addend) const;
+ virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const;
- virtual void adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset);
-
- virtual void sortRelocs(const MCAssembler &Asm,
- std::vector<ELFRelocationEntry> &Relocs);
- };
-
- class PPCELFRelocationEntry : public ELFRelocationEntry {
- public:
- PPCELFRelocationEntry(const ELFRelocationEntry &RE);
- bool operator<(const PPCELFRelocationEntry &RE) const {
- return (RE.r_offset < r_offset ||
- (RE.r_offset == r_offset && RE.Type > Type));
- }
};
}
-PPCELFRelocationEntry::PPCELFRelocationEntry(const ELFRelocationEntry &RE)
- : ELFRelocationEntry(RE.r_offset, RE.Index, RE.Type, RE.Symbol,
- RE.r_addend, *RE.Fixup) {}
-
PPCELFObjectWriter::PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI)
: MCELFObjectTargetWriter(Is64Bit, OSABI,
Is64Bit ? ELF::EM_PPC64 : ELF::EM_PPC,
@@ -75,11 +63,30 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target,
default:
llvm_unreachable("Unimplemented");
case PPC::fixup_ppc_br24:
+ case PPC::fixup_ppc_br24abs:
Type = ELF::R_PPC_REL24;
break;
case PPC::fixup_ppc_brcond14:
+ case PPC::fixup_ppc_brcond14abs:
Type = ELF::R_PPC_REL14;
break;
+ case PPC::fixup_ppc_half16:
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_PPC_REL16;
+ break;
+ case MCSymbolRefExpr::VK_PPC_LO:
+ Type = ELF::R_PPC_REL16_LO;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HI:
+ Type = ELF::R_PPC_REL16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HA:
+ Type = ELF::R_PPC_REL16_HA;
+ break;
+ }
+ break;
case FK_Data_4:
case FK_PCRel_4:
Type = ELF::R_PPC_REL32;
@@ -92,93 +99,216 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target,
} else {
switch ((unsigned)Fixup.getKind()) {
default: llvm_unreachable("invalid fixup kind!");
- case PPC::fixup_ppc_br24:
+ case PPC::fixup_ppc_br24abs:
Type = ELF::R_PPC_ADDR24;
break;
- case PPC::fixup_ppc_brcond14:
+ case PPC::fixup_ppc_brcond14abs:
Type = ELF::R_PPC_ADDR14; // XXX: or BRNTAKEN?_
break;
- case PPC::fixup_ppc_ha16:
+ case PPC::fixup_ppc_half16:
switch (Modifier) {
default: llvm_unreachable("Unsupported Modifier");
- case MCSymbolRefExpr::VK_PPC_TPREL16_HA:
- Type = ELF::R_PPC_TPREL16_HA;
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_PPC_ADDR16;
break;
- case MCSymbolRefExpr::VK_PPC_DTPREL16_HA:
- Type = ELF::R_PPC64_DTPREL16_HA;
+ case MCSymbolRefExpr::VK_PPC_LO:
+ Type = ELF::R_PPC_ADDR16_LO;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HI:
+ Type = ELF::R_PPC_ADDR16_HI;
break;
- case MCSymbolRefExpr::VK_PPC_GAS_HA16:
- case MCSymbolRefExpr::VK_PPC_DARWIN_HA16:
+ case MCSymbolRefExpr::VK_PPC_HA:
Type = ELF::R_PPC_ADDR16_HA;
- break;
- case MCSymbolRefExpr::VK_PPC_TOC16_HA:
- Type = ELF::R_PPC64_TOC16_HA;
break;
- case MCSymbolRefExpr::VK_PPC_GOT_TPREL16_HA:
- Type = ELF::R_PPC64_GOT_TPREL16_HA;
+ case MCSymbolRefExpr::VK_PPC_HIGHER:
+ Type = ELF::R_PPC64_ADDR16_HIGHER;
break;
- case MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_HA:
- Type = ELF::R_PPC64_GOT_TLSGD16_HA;
+ case MCSymbolRefExpr::VK_PPC_HIGHERA:
+ Type = ELF::R_PPC64_ADDR16_HIGHERA;
break;
- case MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_HA:
- Type = ELF::R_PPC64_GOT_TLSLD16_HA;
+ case MCSymbolRefExpr::VK_PPC_HIGHEST:
+ Type = ELF::R_PPC64_ADDR16_HIGHEST;
break;
- }
- break;
- case PPC::fixup_ppc_lo16:
- switch (Modifier) {
- default: llvm_unreachable("Unsupported Modifier");
- case MCSymbolRefExpr::VK_PPC_TPREL16_LO:
- Type = ELF::R_PPC_TPREL16_LO;
+ case MCSymbolRefExpr::VK_PPC_HIGHESTA:
+ Type = ELF::R_PPC64_ADDR16_HIGHESTA;
break;
- case MCSymbolRefExpr::VK_PPC_DTPREL16_LO:
- Type = ELF::R_PPC64_DTPREL16_LO;
+ case MCSymbolRefExpr::VK_GOT:
+ Type = ELF::R_PPC_GOT16;
break;
- case MCSymbolRefExpr::VK_None:
- Type = ELF::R_PPC_ADDR16;
+ case MCSymbolRefExpr::VK_PPC_GOT_LO:
+ Type = ELF::R_PPC_GOT16_LO;
break;
- case MCSymbolRefExpr::VK_PPC_GAS_LO16:
- case MCSymbolRefExpr::VK_PPC_DARWIN_LO16:
- Type = ELF::R_PPC_ADDR16_LO;
- break;
- case MCSymbolRefExpr::VK_PPC_TOC_ENTRY:
+ case MCSymbolRefExpr::VK_PPC_GOT_HI:
+ Type = ELF::R_PPC_GOT16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_HA:
+ Type = ELF::R_PPC_GOT16_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TOC:
Type = ELF::R_PPC64_TOC16;
break;
- case MCSymbolRefExpr::VK_PPC_TOC16_LO:
+ case MCSymbolRefExpr::VK_PPC_TOC_LO:
Type = ELF::R_PPC64_TOC16_LO;
break;
- case MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_LO:
+ case MCSymbolRefExpr::VK_PPC_TOC_HI:
+ Type = ELF::R_PPC64_TOC16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TOC_HA:
+ Type = ELF::R_PPC64_TOC16_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL:
+ Type = ELF::R_PPC_TPREL16;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_LO:
+ Type = ELF::R_PPC_TPREL16_LO;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_HI:
+ Type = ELF::R_PPC_TPREL16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_HA:
+ Type = ELF::R_PPC_TPREL16_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_HIGHER:
+ Type = ELF::R_PPC64_TPREL16_HIGHER;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_HIGHERA:
+ Type = ELF::R_PPC64_TPREL16_HIGHERA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_HIGHEST:
+ Type = ELF::R_PPC64_TPREL16_HIGHEST;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_HIGHESTA:
+ Type = ELF::R_PPC64_TPREL16_HIGHESTA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL:
+ Type = ELF::R_PPC64_DTPREL16;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_LO:
+ Type = ELF::R_PPC64_DTPREL16_LO;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_HI:
+ Type = ELF::R_PPC64_DTPREL16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_HA:
+ Type = ELF::R_PPC64_DTPREL16_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHER:
+ Type = ELF::R_PPC64_DTPREL16_HIGHER;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHERA:
+ Type = ELF::R_PPC64_DTPREL16_HIGHERA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHEST:
+ Type = ELF::R_PPC64_DTPREL16_HIGHEST;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHESTA:
+ Type = ELF::R_PPC64_DTPREL16_HIGHESTA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSGD:
+ Type = ELF::R_PPC64_GOT_TLSGD16;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO:
Type = ELF::R_PPC64_GOT_TLSGD16_LO;
break;
- case MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO:
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HI:
+ Type = ELF::R_PPC64_GOT_TLSGD16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HA:
+ Type = ELF::R_PPC64_GOT_TLSGD16_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSLD:
+ Type = ELF::R_PPC64_GOT_TLSLD16;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO:
Type = ELF::R_PPC64_GOT_TLSLD16_LO;
break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HI:
+ Type = ELF::R_PPC64_GOT_TLSLD16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HA:
+ Type = ELF::R_PPC64_GOT_TLSLD16_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TPREL:
+ /* We don't have R_PPC64_GOT_TPREL16, but since GOT offsets
+ are always 4-aligned, we can use R_PPC64_GOT_TPREL16_DS. */
+ Type = ELF::R_PPC64_GOT_TPREL16_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO:
+ /* We don't have R_PPC64_GOT_TPREL16_LO, but since GOT offsets
+ are always 4-aligned, we can use R_PPC64_GOT_TPREL16_LO_DS. */
+ Type = ELF::R_PPC64_GOT_TPREL16_LO_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TPREL_HI:
+ Type = ELF::R_PPC64_GOT_TPREL16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_DTPREL:
+ /* We don't have R_PPC64_GOT_DTPREL16, but since GOT offsets
+ are always 4-aligned, we can use R_PPC64_GOT_DTPREL16_DS. */
+ Type = ELF::R_PPC64_GOT_DTPREL16_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_LO:
+ /* We don't have R_PPC64_GOT_DTPREL16_LO, but since GOT offsets
+ are always 4-aligned, we can use R_PPC64_GOT_DTPREL16_LO_DS. */
+ Type = ELF::R_PPC64_GOT_DTPREL16_LO_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TPREL_HA:
+ Type = ELF::R_PPC64_GOT_TPREL16_HA;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_HI:
+ Type = ELF::R_PPC64_GOT_DTPREL16_HI;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_HA:
+ Type = ELF::R_PPC64_GOT_DTPREL16_HA;
+ break;
}
break;
- case PPC::fixup_ppc_lo16_ds:
+ case PPC::fixup_ppc_half16ds:
switch (Modifier) {
default: llvm_unreachable("Unsupported Modifier");
case MCSymbolRefExpr::VK_None:
Type = ELF::R_PPC64_ADDR16_DS;
break;
- case MCSymbolRefExpr::VK_PPC_GAS_LO16:
- case MCSymbolRefExpr::VK_PPC_DARWIN_LO16:
+ case MCSymbolRefExpr::VK_PPC_LO:
Type = ELF::R_PPC64_ADDR16_LO_DS;
break;
- case MCSymbolRefExpr::VK_PPC_TOC_ENTRY:
+ case MCSymbolRefExpr::VK_GOT:
+ Type = ELF::R_PPC64_GOT16_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_LO:
+ Type = ELF::R_PPC64_GOT16_LO_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TOC:
Type = ELF::R_PPC64_TOC16_DS;
break;
- case MCSymbolRefExpr::VK_PPC_TOC16_LO:
+ case MCSymbolRefExpr::VK_PPC_TOC_LO:
Type = ELF::R_PPC64_TOC16_LO_DS;
break;
- case MCSymbolRefExpr::VK_PPC_GOT_TPREL16_LO:
+ case MCSymbolRefExpr::VK_PPC_TPREL:
+ Type = ELF::R_PPC64_TPREL16_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL_LO:
+ Type = ELF::R_PPC64_TPREL16_LO_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL:
+ Type = ELF::R_PPC64_DTPREL16_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL_LO:
+ Type = ELF::R_PPC64_DTPREL16_LO_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TPREL:
+ Type = ELF::R_PPC64_GOT_TPREL16_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO:
Type = ELF::R_PPC64_GOT_TPREL16_LO_DS;
break;
+ case MCSymbolRefExpr::VK_PPC_GOT_DTPREL:
+ Type = ELF::R_PPC64_GOT_DTPREL16_DS;
+ break;
+ case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_LO:
+ Type = ELF::R_PPC64_GOT_DTPREL16_LO_DS;
+ break;
}
break;
- case PPC::fixup_ppc_tlsreg:
- Type = ELF::R_PPC64_TLS;
- break;
case PPC::fixup_ppc_nofixup:
switch (Modifier) {
default: llvm_unreachable("Unsupported Modifier");
@@ -188,17 +318,29 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_PPC_TLSLD:
Type = ELF::R_PPC64_TLSLD;
break;
+ case MCSymbolRefExpr::VK_PPC_TLS:
+ Type = ELF::R_PPC64_TLS;
+ break;
}
break;
case FK_Data_8:
switch (Modifier) {
default: llvm_unreachable("Unsupported Modifier");
- case MCSymbolRefExpr::VK_PPC_TOC:
+ case MCSymbolRefExpr::VK_PPC_TOCBASE:
Type = ELF::R_PPC64_TOC;
break;
case MCSymbolRefExpr::VK_None:
Type = ELF::R_PPC64_ADDR64;
break;
+ case MCSymbolRefExpr::VK_PPC_DTPMOD:
+ Type = ELF::R_PPC64_DTPMOD64;
+ break;
+ case MCSymbolRefExpr::VK_PPC_TPREL:
+ Type = ELF::R_PPC64_TPREL64;
+ break;
+ case MCSymbolRefExpr::VK_PPC_DTPREL:
+ Type = ELF::R_PPC64_DTPREL64;
+ break;
}
break;
case FK_Data_4:
@@ -220,6 +362,35 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
return getRelocTypeInner(Target, Fixup, IsPCRel);
}
+const MCSymbol *PPCELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ assert(Target.getSymA() && "SymA cannot be 0");
+ MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+
+ bool EmitThisSym;
+ switch (Modifier) {
+ // GOT references always need a relocation, even if the
+ // target symbol is local.
+ case MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_PPC_GOT_LO:
+ case MCSymbolRefExpr::VK_PPC_GOT_HI:
+ case MCSymbolRefExpr::VK_PPC_GOT_HA:
+ EmitThisSym = true;
+ break;
+ default:
+ EmitThisSym = false;
+ break;
+ }
+
+ if (EmitThisSym)
+ return &Target.getSymA()->getSymbol().AliasedSymbol();
+ return NULL;
+}
+
const MCSymbol *PPCELFObjectWriter::undefinedExplicitRelSym(const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
@@ -240,47 +411,6 @@ const MCSymbol *PPCELFObjectWriter::undefinedExplicitRelSym(const MCValue &Targe
return NULL;
}
-void PPCELFObjectWriter::
-adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) {
- switch ((unsigned)Fixup.getKind()) {
- case PPC::fixup_ppc_ha16:
- case PPC::fixup_ppc_lo16:
- case PPC::fixup_ppc_lo16_ds:
- RelocOffset += 2;
- break;
- default:
- break;
- }
-}
-
-// The standard sorter only sorts on the r_offset field, but PowerPC can
-// have multiple relocations at the same offset. Sort secondarily on the
-// relocation type to avoid nondeterminism.
-void PPCELFObjectWriter::sortRelocs(const MCAssembler &Asm,
- std::vector<ELFRelocationEntry> &Relocs) {
-
- // Copy to a temporary vector of relocation entries having a different
- // sort function.
- std::vector<PPCELFRelocationEntry> TmpRelocs;
-
- for (std::vector<ELFRelocationEntry>::iterator R = Relocs.begin();
- R != Relocs.end(); ++R) {
- TmpRelocs.push_back(PPCELFRelocationEntry(*R));
- }
-
- // Sort in place by ascending r_offset and descending r_type.
- array_pod_sort(TmpRelocs.begin(), TmpRelocs.end());
-
- // Copy back to the original vector.
- unsigned I = 0;
- for (std::vector<PPCELFRelocationEntry>::iterator R = TmpRelocs.begin();
- R != TmpRelocs.end(); ++R, ++I) {
- Relocs[I] = ELFRelocationEntry(R->r_offset, R->Index, R->Type,
- R->Symbol, R->r_addend, *R->Fixup);
- }
-}
-
-
MCObjectWriter *llvm::createPPCELFObjectWriter(raw_ostream &OS,
bool Is64Bit,
uint8_t OSABI) {
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
index 86c44f57a5e2..68de8c1f27a5 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
@@ -25,23 +25,25 @@ enum Fixups {
/// branches.
fixup_ppc_brcond14,
- /// fixup_ppc_lo16 - A 16-bit fixup corresponding to lo16(_foo) for instrs
- /// like 'li'.
- fixup_ppc_lo16,
-
- /// fixup_ppc_ha16 - A 16-bit fixup corresponding to ha16(_foo) for instrs
- /// like 'lis'.
- fixup_ppc_ha16,
+ /// fixup_ppc_br24abs - 24-bit absolute relocation for direct branches
+ /// like 'ba' and 'bla'.
+ fixup_ppc_br24abs,
+
+ /// fixup_ppc_brcond14abs - 14-bit absolute relocation for conditional
+ /// branches.
+ fixup_ppc_brcond14abs,
+
+ /// fixup_ppc_half16 - A 16-bit fixup corresponding to lo16(_foo)
+ /// or ha16(_foo) for instrs like 'li' or 'addis'.
+ fixup_ppc_half16,
- /// fixup_ppc_lo16_ds - A 14-bit fixup corresponding to lo16(_foo) with
+ /// fixup_ppc_half16ds - A 14-bit fixup corresponding to lo16(_foo) with
/// implied 2 zero bits for instrs like 'std'.
- fixup_ppc_lo16_ds,
-
- /// fixup_ppc_tlsreg - Insert thread-pointer register number.
- fixup_ppc_tlsreg,
+ fixup_ppc_half16ds,
/// fixup_ppc_nofixup - Not a true fixup, but ties a symbol to a call
- /// to __tls_get_addr for the TLS general and local dynamic models.
+ /// to __tls_get_addr for the TLS general and local dynamic models,
+ /// or inserts the thread-pointer register number.
fixup_ppc_nofixup,
// Marker
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
index a25d7fe64f3a..f3dddce30120 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
@@ -22,7 +22,6 @@ PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit) {
}
IsLittleEndian = false;
- PCSymbol = ".";
CommentString = ";";
ExceptionsType = ExceptionHandling::DwarfCFI;
@@ -47,24 +46,24 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
CommentString = "#";
GlobalPrefix = "";
PrivateGlobalPrefix = ".L";
- WeakRefDirective = "\t.weak\t";
-
+
// Uses '.section' before '.bss' directive
UsesELFSectionDirectiveForBSS = true;
// Debug Information
SupportsDebugInformation = true;
- PCSymbol = ".";
+ DollarIsPC = true;
// Set up DWARF directives
HasLEB128 = true; // Target asm supports leb128 directives (little-endian)
+ MinInstAlignment = 4;
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
ZeroDirective = "\t.space\t";
Data64bitsDirective = is64Bit ? "\t.quad\t" : 0;
- AssemblerDialect = 0; // Old-Style mnemonics.
+ AssemblerDialect = 1; // New-Style mnemonics.
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
index 7b4ed9f14eb6..1530e774cfc7 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
@@ -15,6 +15,7 @@
#define PPCTARGETASMINFO_H
#include "llvm/MC/MCAsmInfoDarwin.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
@@ -24,7 +25,7 @@ namespace llvm {
explicit PPCMCAsmInfoDarwin(bool is64Bit);
};
- class PPCLinuxMCAsmInfo : public MCAsmInfo {
+ class PPCLinuxMCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
explicit PPCLinuxMCAsmInfo(bool is64Bit);
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index 2223cd623cb5..346a9beada90 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -23,6 +23,7 @@
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetOpcodes.h"
using namespace llvm;
STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
@@ -48,16 +49,20 @@ public:
SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getCondBrEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getHA16Encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getLO16Encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getAbsDirectBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getAbsCondBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getImm16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getMemRIEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getTLSRegEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getTLSCallEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
unsigned get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
@@ -72,13 +77,19 @@ public:
SmallVectorImpl<MCFixup> &Fixups) const;
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const {
+ // For fast-isel, a float COPY_TO_REGCLASS can survive this long.
+ // It's just a nop to keep the register classes happy, so don't
+ // generate anything.
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == TargetOpcode::COPY_TO_REGCLASS)
+ return;
+
uint64_t Bits = getBinaryCodeForInstr(MI, Fixups);
// BL8_NOP etc. all have a size of 8 because of the following 'nop'.
unsigned Size = 4; // FIXME: Have Desc.getSize() return the correct value!
- unsigned Opcode = MI.getOpcode();
if (Opcode == PPC::BL8_NOP || Opcode == PPC::BLA8_NOP ||
- Opcode == PPC::BL8_NOP_TLSGD || Opcode == PPC::BL8_NOP_TLSLD)
+ Opcode == PPC::BL8_NOP_TLS)
Size = 8;
// Output the constant in big endian byte order.
@@ -111,17 +122,6 @@ getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_br24));
-
- // For special TLS calls, add another fixup for the symbol. Apparently
- // BL8_NOP, BL8_NOP_TLSGD, and BL8_NOP_TLSLD are sufficiently
- // similar that TblGen will not generate a separate case for the latter
- // two, so this is the only way to get the extra fixup generated.
- unsigned Opcode = MI.getOpcode();
- if (Opcode == PPC::BL8_NOP_TLSGD || Opcode == PPC::BL8_NOP_TLSLD) {
- const MCOperand &MO2 = MI.getOperand(OpNo+1);
- Fixups.push_back(MCFixup::Create(0, MO2.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_nofixup));
- }
return 0;
}
@@ -136,25 +136,38 @@ unsigned PPCMCCodeEmitter::getCondBrEncoding(const MCInst &MI, unsigned OpNo,
return 0;
}
-unsigned PPCMCCodeEmitter::getHA16Encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+unsigned PPCMCCodeEmitter::
+getAbsDirectBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
-
+
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_ha16));
+ (MCFixupKind)PPC::fixup_ppc_br24abs));
return 0;
}
-unsigned PPCMCCodeEmitter::getLO16Encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+unsigned PPCMCCodeEmitter::
+getAbsCondBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
-
+
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_lo16));
+ (MCFixupKind)PPC::fixup_ppc_brcond14abs));
+ return 0;
+}
+
+unsigned PPCMCCodeEmitter::getImm16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+
+ // Add a fixup for the immediate field.
+ Fixups.push_back(MCFixup::Create(2, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_half16));
return 0;
}
@@ -170,8 +183,8 @@ unsigned PPCMCCodeEmitter::getMemRIEncoding(const MCInst &MI, unsigned OpNo,
return (getMachineOpValue(MI, MO, Fixups) & 0xFFFF) | RegBits;
// Add a fixup for the displacement field.
- Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_lo16));
+ Fixups.push_back(MCFixup::Create(2, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_half16));
return RegBits;
}
@@ -185,11 +198,11 @@ unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isImm())
- return (getMachineOpValue(MI, MO, Fixups) & 0x3FFF) | RegBits;
+ return ((getMachineOpValue(MI, MO, Fixups) >> 2) & 0x3FFF) | RegBits;
// Add a fixup for the displacement field.
- Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_lo16_ds));
+ Fixups.push_back(MCFixup::Create(2, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_half16ds));
return RegBits;
}
@@ -203,19 +216,29 @@ unsigned PPCMCCodeEmitter::getTLSRegEncoding(const MCInst &MI, unsigned OpNo,
// hint to the linker that this statement is part of a relocation sequence.
// Return the thread-pointer register's encoding.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_tlsreg));
- return CTX.getRegisterInfo().getEncodingValue(PPC::X13);
+ (MCFixupKind)PPC::fixup_ppc_nofixup));
+ return CTX.getRegisterInfo()->getEncodingValue(PPC::X13);
+}
+
+unsigned PPCMCCodeEmitter::getTLSCallEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // For special TLS calls, we need two fixups; one for the branch target
+ // (__tls_get_addr), which we create via getDirectBrEncoding as usual,
+ // and one for the TLSGD or TLSLD symbol, which is emitted here.
+ const MCOperand &MO = MI.getOperand(OpNo+1);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_nofixup));
+ return getDirectBrEncoding(MI, OpNo, Fixups);
}
unsigned PPCMCCodeEmitter::
get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(OpNo);
- assert((MI.getOpcode() == PPC::MTCRF ||
- MI.getOpcode() == PPC::MFOCRF ||
- MI.getOpcode() == PPC::MTCRF8) &&
+ assert((MI.getOpcode() == PPC::MTOCRF || MI.getOpcode() == PPC::MTOCRF8 ||
+ MI.getOpcode() == PPC::MFOCRF || MI.getOpcode() == PPC::MFOCRF8) &&
(MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7));
- return 0x80 >> CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ return 0x80 >> CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
}
@@ -223,11 +246,12 @@ unsigned PPCMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const {
if (MO.isReg()) {
- // MTCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
+ // MTOCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
// The GPR operand should come through here though.
- assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MFOCRF) ||
+ assert((MI.getOpcode() != PPC::MTOCRF && MI.getOpcode() != PPC::MTOCRF8 &&
+ MI.getOpcode() != PPC::MFOCRF && MI.getOpcode() != PPC::MFOCRF8) ||
MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7);
- return CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ return CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
}
assert(MO.isImm() &&
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
new file mode 100644
index 000000000000..d7e84021595e
--- /dev/null
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
@@ -0,0 +1,155 @@
+//===-- PPCMCExpr.cpp - PPC specific MC expression classes ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ppcmcexpr"
+#include "PPCMCExpr.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCAsmInfo.h"
+
+using namespace llvm;
+
+const PPCMCExpr*
+PPCMCExpr::Create(VariantKind Kind, const MCExpr *Expr,
+ bool isDarwin, MCContext &Ctx) {
+ return new (Ctx) PPCMCExpr(Kind, Expr, isDarwin);
+}
+
+void PPCMCExpr::PrintImpl(raw_ostream &OS) const {
+ if (isDarwinSyntax()) {
+ switch (Kind) {
+ default: llvm_unreachable("Invalid kind!");
+ case VK_PPC_LO: OS << "lo16"; break;
+ case VK_PPC_HI: OS << "hi16"; break;
+ case VK_PPC_HA: OS << "ha16"; break;
+ }
+
+ OS << '(';
+ getSubExpr()->print(OS);
+ OS << ')';
+ } else {
+ getSubExpr()->print(OS);
+
+ switch (Kind) {
+ default: llvm_unreachable("Invalid kind!");
+ case VK_PPC_LO: OS << "@l"; break;
+ case VK_PPC_HI: OS << "@h"; break;
+ case VK_PPC_HA: OS << "@ha"; break;
+ case VK_PPC_HIGHER: OS << "@higher"; break;
+ case VK_PPC_HIGHERA: OS << "@highera"; break;
+ case VK_PPC_HIGHEST: OS << "@highest"; break;
+ case VK_PPC_HIGHESTA: OS << "@highesta"; break;
+ }
+ }
+}
+
+bool
+PPCMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const {
+ MCValue Value;
+
+ if (!Layout || !getSubExpr()->EvaluateAsRelocatable(Value, *Layout))
+ return false;
+
+ if (Value.isAbsolute()) {
+ int64_t Result = Value.getConstant();
+ switch (Kind) {
+ default:
+ llvm_unreachable("Invalid kind!");
+ case VK_PPC_LO:
+ Result = Result & 0xffff;
+ break;
+ case VK_PPC_HI:
+ Result = (Result >> 16) & 0xffff;
+ break;
+ case VK_PPC_HA:
+ Result = ((Result + 0x8000) >> 16) & 0xffff;
+ break;
+ case VK_PPC_HIGHER:
+ Result = (Result >> 32) & 0xffff;
+ break;
+ case VK_PPC_HIGHERA:
+ Result = ((Result + 0x8000) >> 32) & 0xffff;
+ break;
+ case VK_PPC_HIGHEST:
+ Result = (Result >> 48) & 0xffff;
+ break;
+ case VK_PPC_HIGHESTA:
+ Result = ((Result + 0x8000) >> 48) & 0xffff;
+ break;
+ }
+ Res = MCValue::get(Result);
+ } else {
+ MCContext &Context = Layout->getAssembler().getContext();
+ const MCSymbolRefExpr *Sym = Value.getSymA();
+ MCSymbolRefExpr::VariantKind Modifier = Sym->getKind();
+ if (Modifier != MCSymbolRefExpr::VK_None)
+ return false;
+ switch (Kind) {
+ default:
+ llvm_unreachable("Invalid kind!");
+ case VK_PPC_LO:
+ Modifier = MCSymbolRefExpr::VK_PPC_LO;
+ break;
+ case VK_PPC_HI:
+ Modifier = MCSymbolRefExpr::VK_PPC_HI;
+ break;
+ case VK_PPC_HA:
+ Modifier = MCSymbolRefExpr::VK_PPC_HA;
+ break;
+ case VK_PPC_HIGHERA:
+ Modifier = MCSymbolRefExpr::VK_PPC_HIGHERA;
+ break;
+ case VK_PPC_HIGHER:
+ Modifier = MCSymbolRefExpr::VK_PPC_HIGHER;
+ break;
+ case VK_PPC_HIGHEST:
+ Modifier = MCSymbolRefExpr::VK_PPC_HIGHEST;
+ break;
+ case VK_PPC_HIGHESTA:
+ Modifier = MCSymbolRefExpr::VK_PPC_HIGHESTA;
+ break;
+ }
+ Sym = MCSymbolRefExpr::Create(&Sym->getSymbol(), Modifier, Context);
+ Res = MCValue::get(Sym, Value.getSymB(), Value.getConstant());
+ }
+
+ return true;
+}
+
+// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
+// that method should be made public?
+static void AddValueSymbols_(const MCExpr *Value, MCAssembler *Asm) {
+ switch (Value->getKind()) {
+ case MCExpr::Target:
+ llvm_unreachable("Can't handle nested target expr!");
+
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
+ AddValueSymbols_(BE->getLHS(), Asm);
+ AddValueSymbols_(BE->getRHS(), Asm);
+ break;
+ }
+
+ case MCExpr::SymbolRef:
+ Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
+ break;
+
+ case MCExpr::Unary:
+ AddValueSymbols_(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
+ break;
+ }
+}
+
+void PPCMCExpr::AddValueSymbols(MCAssembler *Asm) const {
+ AddValueSymbols_(getSubExpr(), Asm);
+}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h
new file mode 100644
index 000000000000..e44c7c1adc67
--- /dev/null
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h
@@ -0,0 +1,96 @@
+//===-- PPCMCExpr.h - PPC specific MC expression classes --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PPCMCEXPR_H
+#define PPCMCEXPR_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCAsmLayout.h"
+
+namespace llvm {
+
+class PPCMCExpr : public MCTargetExpr {
+public:
+ enum VariantKind {
+ VK_PPC_None,
+ VK_PPC_LO,
+ VK_PPC_HI,
+ VK_PPC_HA,
+ VK_PPC_HIGHER,
+ VK_PPC_HIGHERA,
+ VK_PPC_HIGHEST,
+ VK_PPC_HIGHESTA
+ };
+
+private:
+ const VariantKind Kind;
+ const MCExpr *Expr;
+ bool IsDarwin;
+
+ explicit PPCMCExpr(VariantKind _Kind, const MCExpr *_Expr,
+ bool _IsDarwin)
+ : Kind(_Kind), Expr(_Expr), IsDarwin(_IsDarwin) {}
+
+public:
+ /// @name Construction
+ /// @{
+
+ static const PPCMCExpr *Create(VariantKind Kind, const MCExpr *Expr,
+ bool isDarwin, MCContext &Ctx);
+
+ static const PPCMCExpr *CreateLo(const MCExpr *Expr,
+ bool isDarwin, MCContext &Ctx) {
+ return Create(VK_PPC_LO, Expr, isDarwin, Ctx);
+ }
+
+ static const PPCMCExpr *CreateHi(const MCExpr *Expr,
+ bool isDarwin, MCContext &Ctx) {
+ return Create(VK_PPC_HI, Expr, isDarwin, Ctx);
+ }
+
+ static const PPCMCExpr *CreateHa(const MCExpr *Expr,
+ bool isDarwin, MCContext &Ctx) {
+ return Create(VK_PPC_HA, Expr, isDarwin, Ctx);
+ }
+
+ /// @}
+ /// @name Accessors
+ /// @{
+
+ /// getOpcode - Get the kind of this expression.
+ VariantKind getKind() const { return Kind; }
+
+ /// getSubExpr - Get the child of this expression.
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ /// isDarwinSyntax - True if expression is to be printed using Darwin syntax.
+ bool isDarwinSyntax() const { return IsDarwin; }
+
+
+ /// @}
+
+ void PrintImpl(raw_ostream &OS) const;
+ bool EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const;
+ void AddValueSymbols(MCAssembler *) const;
+ const MCSection *FindAssociatedSection() const {
+ return getSubExpr()->FindAssociatedSection();
+ }
+
+ // There are no TLS PPCMCExprs at the moment.
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 2209f936ec33..f18d095c6d02 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -14,13 +14,16 @@
#include "PPCMCTargetDesc.h"
#include "InstPrinter/PPCInstPrinter.h"
#include "PPCMCAsmInfo.h"
+#include "PPCTargetStreamer.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -34,6 +37,9 @@
using namespace llvm;
+// Pin the vtable to this file.
+PPCTargetStreamer::~PPCTargetStreamer() {}
+
static MCInstrInfo *createPPCMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitPPCMCInstrInfo(X);
@@ -42,7 +48,8 @@ static MCInstrInfo *createPPCMCInstrInfo() {
static MCRegisterInfo *createPPCMCRegisterInfo(StringRef TT) {
Triple TheTriple(TT);
- bool isPPC64 = (TheTriple.getArch() == Triple::ppc64);
+ bool isPPC64 = (TheTriple.getArch() == Triple::ppc64 ||
+ TheTriple.getArch() == Triple::ppc64le);
unsigned Flavour = isPPC64 ? 0 : 1;
unsigned RA = isPPC64 ? PPC::LR8 : PPC::LR;
@@ -58,9 +65,10 @@ static MCSubtargetInfo *createPPCMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createPPCMCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createPPCMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
Triple TheTriple(TT);
- bool isPPC64 = TheTriple.getArch() == Triple::ppc64;
+ bool isPPC64 = (TheTriple.getArch() == Triple::ppc64 ||
+ TheTriple.getArch() == Triple::ppc64le);
MCAsmInfo *MAI;
if (TheTriple.isOSDarwin())
@@ -69,9 +77,10 @@ static MCAsmInfo *createPPCMCAsmInfo(const Target &T, StringRef TT) {
MAI = new PPCLinuxMCAsmInfo(isPPC64);
// Initial state of the frame pointer is R1.
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(isPPC64? PPC::X1 : PPC::R1, 0);
- MAI->addInitialFrameState(0, Dst, Src);
+ unsigned Reg = isPPC64 ? PPC::X1 : PPC::R1;
+ MCCFIInstruction Inst =
+ MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(Reg, true), 0);
+ MAI->addInitialFrameState(Inst);
return MAI;
}
@@ -90,13 +99,37 @@ static MCCodeGenInfo *createPPCMCCodeGenInfo(StringRef TT, Reloc::Model RM,
}
if (CM == CodeModel::Default) {
Triple T(TT);
- if (!T.isOSDarwin() && T.getArch() == Triple::ppc64)
+ if (!T.isOSDarwin() &&
+ (T.getArch() == Triple::ppc64 || T.getArch() == Triple::ppc64le))
CM = CodeModel::Medium;
}
X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
+namespace {
+class PPCTargetAsmStreamer : public PPCTargetStreamer {
+ formatted_raw_ostream &OS;
+
+public:
+ PPCTargetAsmStreamer(formatted_raw_ostream &OS) : OS(OS) {}
+ virtual void emitTCEntry(const MCSymbol &S) {
+ OS << "\t.tc ";
+ OS << S.getName();
+ OS << "[TC],";
+ OS << S.getName();
+ OS << '\n';
+ }
+};
+
+class PPCTargetELFStreamer : public PPCTargetStreamer {
+ virtual void emitTCEntry(const MCSymbol &S) {
+ // Creates a R_PPC64_TOC relocation
+ Streamer->EmitSymbolValue(&S, 8);
+ }
+};
+}
+
// This is duplicated code. Refactor this.
static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCContext &Ctx, MCAsmBackend &MAB,
@@ -107,7 +140,20 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
if (Triple(TT).isOSDarwin())
return createMachOStreamer(Ctx, MAB, OS, Emitter, RelaxAll);
- return createELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ PPCTargetStreamer *S = new PPCTargetELFStreamer();
+ return createELFStreamer(Ctx, S, MAB, OS, Emitter, RelaxAll, NoExecStack);
+}
+
+static MCStreamer *
+createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useLoc, bool useCFI,
+ bool useDwarfDirectory, MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE, MCAsmBackend *TAB, bool ShowInst) {
+ PPCTargetStreamer *S = new PPCTargetAsmStreamer(OS);
+
+ return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
+ useDwarfDirectory, InstPrint, CE, TAB,
+ ShowInst);
}
static MCInstPrinter *createPPCMCInstPrinter(const Target &T,
@@ -116,45 +162,65 @@ static MCInstPrinter *createPPCMCInstPrinter(const Target &T,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
- return new PPCInstPrinter(MAI, MII, MRI, SyntaxVariant);
+ bool isDarwin = Triple(STI.getTargetTriple()).isOSDarwin();
+ return new PPCInstPrinter(MAI, MII, MRI, isDarwin);
}
extern "C" void LLVMInitializePowerPCTargetMC() {
// Register the MC asm info.
RegisterMCAsmInfoFn C(ThePPC32Target, createPPCMCAsmInfo);
RegisterMCAsmInfoFn D(ThePPC64Target, createPPCMCAsmInfo);
+ RegisterMCAsmInfoFn E(ThePPC64LETarget, createPPCMCAsmInfo);
// Register the MC codegen info.
TargetRegistry::RegisterMCCodeGenInfo(ThePPC32Target, createPPCMCCodeGenInfo);
TargetRegistry::RegisterMCCodeGenInfo(ThePPC64Target, createPPCMCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(ThePPC64LETarget,
+ createPPCMCCodeGenInfo);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(ThePPC32Target, createPPCMCInstrInfo);
TargetRegistry::RegisterMCInstrInfo(ThePPC64Target, createPPCMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(ThePPC64LETarget,
+ createPPCMCInstrInfo);
// Register the MC register info.
TargetRegistry::RegisterMCRegInfo(ThePPC32Target, createPPCMCRegisterInfo);
TargetRegistry::RegisterMCRegInfo(ThePPC64Target, createPPCMCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(ThePPC64LETarget, createPPCMCRegisterInfo);
// Register the MC subtarget info.
TargetRegistry::RegisterMCSubtargetInfo(ThePPC32Target,
createPPCMCSubtargetInfo);
TargetRegistry::RegisterMCSubtargetInfo(ThePPC64Target,
createPPCMCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(ThePPC64LETarget,
+ createPPCMCSubtargetInfo);
// Register the MC Code Emitter
TargetRegistry::RegisterMCCodeEmitter(ThePPC32Target, createPPCMCCodeEmitter);
TargetRegistry::RegisterMCCodeEmitter(ThePPC64Target, createPPCMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(ThePPC64LETarget,
+ createPPCMCCodeEmitter);
// Register the asm backend.
TargetRegistry::RegisterMCAsmBackend(ThePPC32Target, createPPCAsmBackend);
TargetRegistry::RegisterMCAsmBackend(ThePPC64Target, createPPCAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(ThePPC64LETarget, createPPCAsmBackend);
// Register the object streamer.
TargetRegistry::RegisterMCObjectStreamer(ThePPC32Target, createMCStreamer);
TargetRegistry::RegisterMCObjectStreamer(ThePPC64Target, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(ThePPC64LETarget, createMCStreamer);
+
+ // Register the asm streamer.
+ TargetRegistry::RegisterAsmStreamer(ThePPC32Target, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(ThePPC64Target, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(ThePPC64LETarget, createMCAsmStreamer);
// Register the MCInstPrinter.
TargetRegistry::RegisterMCInstPrinter(ThePPC32Target, createPPCMCInstPrinter);
TargetRegistry::RegisterMCInstPrinter(ThePPC64Target, createPPCMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(ThePPC64LETarget,
+ createPPCMCInstPrinter);
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
index 38a7420d972d..0b0ca241e26b 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
@@ -33,18 +33,24 @@ class raw_ostream;
extern Target ThePPC32Target;
extern Target ThePPC64Target;
+extern Target ThePPC64LETarget;
MCCodeEmitter *createPPCMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createPPCAsmBackend(const Target &T, StringRef TT, StringRef CPU);
+MCAsmBackend *createPPCAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
/// createPPCELFObjectWriter - Construct an PPC ELF object writer.
MCObjectWriter *createPPCELFObjectWriter(raw_ostream &OS,
bool Is64Bit,
uint8_t OSABI);
+/// createPPCELFObjectWriter - Construct a PPC Mach-O object writer.
+MCObjectWriter *createPPCMachObjectWriter(raw_ostream &OS, bool Is64Bit,
+ uint32_t CPUType,
+ uint32_t CPUSubtype);
} // End llvm namespace
// Generated files will use "namespace PPC". To avoid symbol clash,
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
new file mode 100644
index 000000000000..bbafe2e78955
--- /dev/null
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
@@ -0,0 +1,389 @@
+//===-- PPCMachObjectWriter.cpp - PPC Mach-O Writer -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/PPCMCTargetDesc.h"
+#include "MCTargetDesc/PPCFixupKinds.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MachO.h"
+
+using namespace llvm;
+
+namespace {
+class PPCMachObjectWriter : public MCMachObjectTargetWriter {
+ bool RecordScatteredRelocation(MachObjectWriter *Writer,
+ const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ unsigned Log2Size, uint64_t &FixedValue);
+
+ void RecordPPCRelocation(MachObjectWriter *Writer, const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue);
+
+public:
+ PPCMachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
+ : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype,
+ /*UseAggressiveSymbolFolding=*/Is64Bit) {}
+
+ void RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm,
+ const MCAsmLayout &Layout, const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ if (Writer->is64Bit()) {
+ report_fatal_error("Relocation emission for MachO/PPC64 unimplemented.");
+ } else
+ RecordPPCRelocation(Writer, Asm, Layout, Fragment, Fixup, Target,
+ FixedValue);
+ }
+};
+}
+
+/// computes the log2 of the size of the relocation,
+/// used for relocation_info::r_length.
+static unsigned getFixupKindLog2Size(unsigned Kind) {
+ switch (Kind) {
+ default:
+ report_fatal_error("log2size(FixupKind): Unhandled fixup kind!");
+ case FK_PCRel_1:
+ case FK_Data_1:
+ return 0;
+ case FK_PCRel_2:
+ case FK_Data_2:
+ return 1;
+ case FK_PCRel_4:
+ case PPC::fixup_ppc_brcond14:
+ case PPC::fixup_ppc_half16:
+ case PPC::fixup_ppc_br24:
+ case FK_Data_4:
+ return 2;
+ case FK_PCRel_8:
+ case FK_Data_8:
+ return 3;
+ }
+ return 0;
+}
+
+/// Translates generic PPC fixup kind to Mach-O/PPC relocation type enum.
+/// Outline based on PPCELFObjectWriter::getRelocTypeInner().
+static unsigned getRelocType(const MCValue &Target,
+ const MCFixupKind FixupKind, // from
+ // Fixup.getKind()
+ const bool IsPCRel) {
+ const MCSymbolRefExpr::VariantKind Modifier =
+ Target.isAbsolute() ? MCSymbolRefExpr::VK_None
+ : Target.getSymA()->getKind();
+ // determine the type of the relocation
+ unsigned Type = MachO::GENERIC_RELOC_VANILLA;
+ if (IsPCRel) { // relative to PC
+ switch ((unsigned)FixupKind) {
+ default:
+ report_fatal_error("Unimplemented fixup kind (relative)");
+ case PPC::fixup_ppc_br24:
+ Type = MachO::PPC_RELOC_BR24; // R_PPC_REL24
+ break;
+ case PPC::fixup_ppc_brcond14:
+ Type = MachO::PPC_RELOC_BR14;
+ break;
+ case PPC::fixup_ppc_half16:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unsupported modifier for half16 fixup");
+ case MCSymbolRefExpr::VK_PPC_HA:
+ Type = MachO::PPC_RELOC_HA16;
+ break;
+ case MCSymbolRefExpr::VK_PPC_LO:
+ Type = MachO::PPC_RELOC_LO16;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HI:
+ Type = MachO::PPC_RELOC_HI16;
+ break;
+ }
+ break;
+ }
+ } else {
+ switch ((unsigned)FixupKind) {
+ default:
+ report_fatal_error("Unimplemented fixup kind (absolute)!");
+ case PPC::fixup_ppc_half16:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unsupported modifier for half16 fixup");
+ case MCSymbolRefExpr::VK_PPC_HA:
+ Type = MachO::PPC_RELOC_HA16_SECTDIFF;
+ break;
+ case MCSymbolRefExpr::VK_PPC_LO:
+ Type = MachO::PPC_RELOC_LO16_SECTDIFF;
+ break;
+ case MCSymbolRefExpr::VK_PPC_HI:
+ Type = MachO::PPC_RELOC_HI16_SECTDIFF;
+ break;
+ }
+ break;
+ case FK_Data_4:
+ break;
+ case FK_Data_2:
+ break;
+ }
+ }
+ return Type;
+}
+
+static void makeRelocationInfo(MachO::any_relocation_info &MRE,
+ const uint32_t FixupOffset, const uint32_t Index,
+ const unsigned IsPCRel, const unsigned Log2Size,
+ const unsigned IsExtern, const unsigned Type) {
+ MRE.r_word0 = FixupOffset;
+ // The bitfield offsets that work (as determined by trial-and-error)
+ // are different than what is documented in the mach-o manuals.
+ // This appears to be an endianness issue; reversing the order of the
+ // documented bitfields in <llvm/Support/MachO.h> fixes this (but
+ // breaks x86/ARM assembly).
+ MRE.r_word1 = ((Index << 8) | // was << 0
+ (IsPCRel << 7) | // was << 24
+ (Log2Size << 5) | // was << 25
+ (IsExtern << 4) | // was << 27
+ (Type << 0)); // was << 28
+}
+
+static void
+makeScatteredRelocationInfo(MachO::any_relocation_info &MRE,
+ const uint32_t Addr, const unsigned Type,
+ const unsigned Log2Size, const unsigned IsPCRel,
+ const uint32_t Value2) {
+ // For notes on bitfield positions and endianness, see:
+ // https://developer.apple.com/library/mac/documentation/developertools/conceptual/MachORuntime/Reference/reference.html#//apple_ref/doc/uid/20001298-scattered_relocation_entry
+ MRE.r_word0 = ((Addr << 0) | (Type << 24) | (Log2Size << 28) |
+ (IsPCRel << 30) | MachO::R_SCATTERED);
+ MRE.r_word1 = Value2;
+}
+
+/// Compute fixup offset (address).
+static uint32_t getFixupOffset(const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup) {
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
+ // On Mach-O, ppc_fixup_half16 relocations must refer to the
+ // start of the instruction, not the second halfword, as ELF does
+ if (unsigned(Fixup.getKind()) == PPC::fixup_ppc_half16)
+ FixupOffset &= ~uint32_t(3);
+ return FixupOffset;
+}
+
+/// \return false if falling back to using non-scattered relocation,
+/// otherwise true for normal scattered relocation.
+/// based on X86MachObjectWriter::RecordScatteredRelocation
+/// and ARMMachObjectWriter::RecordScatteredRelocation
+bool PPCMachObjectWriter::RecordScatteredRelocation(
+ MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
+ unsigned Log2Size, uint64_t &FixedValue) {
+ // caller already computes these, can we just pass and reuse?
+ const uint32_t FixupOffset = getFixupOffset(Layout, Fragment, Fixup);
+ const MCFixupKind FK = Fixup.getKind();
+ const unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, FK);
+ const unsigned Type = getRelocType(Target, FK, IsPCRel);
+
+ // Is this a local or SECTDIFF relocation entry?
+ // SECTDIFF relocation entries have symbol subtractions,
+ // and require two entries, the first for the add-symbol value,
+ // the second for the subtract-symbol value.
+
+ // See <reloc.h>.
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+
+ if (!A_SD->getFragment())
+ report_fatal_error("symbol '" + A->getName() +
+ "' can not be undefined in a subtraction expression");
+
+ uint32_t Value = Writer->getSymbolAddress(A_SD, Layout);
+ uint64_t SecAddr =
+ Writer->getSectionAddress(A_SD->getFragment()->getParent());
+ FixedValue += SecAddr;
+ uint32_t Value2 = 0;
+
+ if (const MCSymbolRefExpr *B = Target.getSymB()) {
+ MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+
+ if (!B_SD->getFragment())
+ report_fatal_error("symbol '" + B->getSymbol().getName() +
+ "' can not be undefined in a subtraction expression");
+
+ // FIXME: is Type correct? see include/llvm/Support/MachO.h
+ Value2 = Writer->getSymbolAddress(B_SD, Layout);
+ FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent());
+ }
+ // FIXME: does FixedValue get used??
+
+ // Relocations are written out in reverse order, so the PAIR comes first.
+ if (Type == MachO::PPC_RELOC_SECTDIFF ||
+ Type == MachO::PPC_RELOC_HI16_SECTDIFF ||
+ Type == MachO::PPC_RELOC_LO16_SECTDIFF ||
+ Type == MachO::PPC_RELOC_HA16_SECTDIFF ||
+ Type == MachO::PPC_RELOC_LO14_SECTDIFF ||
+ Type == MachO::PPC_RELOC_LOCAL_SECTDIFF) {
+ // X86 had this piece, but ARM does not
+ // If the offset is too large to fit in a scattered relocation,
+ // we're hosed. It's an unfortunate limitation of the MachO format.
+ if (FixupOffset > 0xffffff) {
+ char Buffer[32];
+ format("0x%x", FixupOffset).print(Buffer, sizeof(Buffer));
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ Twine("Section too large, can't encode "
+ "r_address (") +
+ Buffer + ") into 24 bits of scattered "
+ "relocation entry.");
+ llvm_unreachable("fatal error returned?!");
+ }
+
+ // Is this supposed to follow MCTarget/PPCAsmBackend.cpp:adjustFixupValue()?
+ // see PPCMCExpr::EvaluateAsRelocatableImpl()
+ uint32_t other_half = 0;
+ switch (Type) {
+ case MachO::PPC_RELOC_LO16_SECTDIFF:
+ other_half = (FixedValue >> 16) & 0xffff;
+ // applyFixupOffset longer extracts the high part because it now assumes
+ // this was already done.
+ // It looks like this is not true for the FixedValue needed with Mach-O
+ // relocs.
+ // So we need to adjust FixedValue again here.
+ FixedValue &= 0xffff;
+ break;
+ case MachO::PPC_RELOC_HA16_SECTDIFF:
+ other_half = FixedValue & 0xffff;
+ FixedValue =
+ ((FixedValue >> 16) + ((FixedValue & 0x8000) ? 1 : 0)) & 0xffff;
+ break;
+ case MachO::PPC_RELOC_HI16_SECTDIFF:
+ other_half = FixedValue & 0xffff;
+ FixedValue = (FixedValue >> 16) & 0xffff;
+ break;
+ default:
+ llvm_unreachable("Invalid PPC scattered relocation type.");
+ break;
+ }
+
+ MachO::any_relocation_info MRE;
+ makeScatteredRelocationInfo(MRE, other_half, MachO::GENERIC_RELOC_PAIR,
+ Log2Size, IsPCRel, Value2);
+ Writer->addRelocation(Fragment->getParent(), MRE);
+ } else {
+ // If the offset is more than 24-bits, it won't fit in a scattered
+ // relocation offset field, so we fall back to using a non-scattered
+ // relocation. This is a bit risky, as if the offset reaches out of
+ // the block and the linker is doing scattered loading on this
+ // symbol, things can go badly.
+ //
+ // Required for 'as' compatibility.
+ if (FixupOffset > 0xffffff)
+ return false;
+ }
+ MachO::any_relocation_info MRE;
+ makeScatteredRelocationInfo(MRE, FixupOffset, Type, Log2Size, IsPCRel, Value);
+ Writer->addRelocation(Fragment->getParent(), MRE);
+ return true;
+}
+
+// see PPCELFObjectWriter for a general outline of cases
+void PPCMachObjectWriter::RecordPPCRelocation(
+ MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ const MCFixupKind FK = Fixup.getKind(); // unsigned
+ const unsigned Log2Size = getFixupKindLog2Size(FK);
+ const bool IsPCRel = Writer->isFixupKindPCRel(Asm, FK);
+ const unsigned RelocType = getRelocType(Target, FK, IsPCRel);
+
+ // If this is a difference or a defined symbol plus an offset, then we need a
+ // scattered relocation entry. Differences always require scattered
+ // relocations.
+ if (Target.getSymB() &&
+ // Q: are branch targets ever scattered?
+ RelocType != MachO::PPC_RELOC_BR24 &&
+ RelocType != MachO::PPC_RELOC_BR14) {
+ RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, Target,
+ Log2Size, FixedValue);
+ return;
+ }
+
+ // this doesn't seem right for RIT_PPC_BR24
+ // Get the symbol data, if any.
+ MCSymbolData *SD = 0;
+ if (Target.getSymA())
+ SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+
+ // See <reloc.h>.
+ const uint32_t FixupOffset = getFixupOffset(Layout, Fragment, Fixup);
+ unsigned Index = 0;
+ unsigned IsExtern = 0;
+ unsigned Type = RelocType;
+
+ if (Target.isAbsolute()) { // constant
+ // SymbolNum of 0 indicates the absolute section.
+ //
+ // FIXME: Currently, these are never generated (see code below). I cannot
+ // find a case where they are actually emitted.
+ report_fatal_error("FIXME: relocations to absolute targets "
+ "not yet implemented");
+ // the above line stolen from ARM, not sure
+ } else {
+ // Resolve constant variables.
+ if (SD->getSymbol().isVariable()) {
+ int64_t Res;
+ if (SD->getSymbol().getVariableValue()->EvaluateAsAbsolute(
+ Res, Layout, Writer->getSectionAddressMap())) {
+ FixedValue = Res;
+ return;
+ }
+ }
+
+ // Check whether we need an external or internal relocation.
+ if (Writer->doesSymbolRequireExternRelocation(SD)) {
+ IsExtern = 1;
+ Index = SD->getIndex();
+ // For external relocations, make sure to offset the fixup value to
+ // compensate for the addend of the symbol address, if it was
+ // undefined. This occurs with weak definitions, for example.
+ if (!SD->Symbol->isUndefined())
+ FixedValue -= Layout.getSymbolOffset(SD);
+ } else {
+ // The index is the section ordinal (1-based).
+ const MCSectionData &SymSD =
+ Asm.getSectionData(SD->getSymbol().getSection());
+ Index = SymSD.getOrdinal() + 1;
+ FixedValue += Writer->getSectionAddress(&SymSD);
+ }
+ if (IsPCRel)
+ FixedValue -= Writer->getSectionAddress(Fragment->getParent());
+ }
+
+ // struct relocation_info (8 bytes)
+ MachO::any_relocation_info MRE;
+ makeRelocationInfo(MRE, FixupOffset, Index, IsPCRel, Log2Size, IsExtern,
+ Type);
+ Writer->addRelocation(Fragment->getParent(), MRE);
+}
+
+MCObjectWriter *llvm::createPPCMachObjectWriter(raw_ostream &OS, bool Is64Bit,
+ uint32_t CPUType,
+ uint32_t CPUSubtype) {
+ return createMachObjectWriter(
+ new PPCMachObjectWriter(Is64Bit, CPUType, CPUSubtype), OS,
+ /*IsLittleEndian=*/false);
+}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp
index 853e5053956b..63facc5446d3 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp
@@ -26,6 +26,22 @@ PPC::Predicate PPC::InvertPredicate(PPC::Predicate Opcode) {
case PPC::PRED_LE: return PPC::PRED_GT;
case PPC::PRED_NU: return PPC::PRED_UN;
case PPC::PRED_UN: return PPC::PRED_NU;
+ case PPC::PRED_EQ_MINUS: return PPC::PRED_NE_PLUS;
+ case PPC::PRED_NE_MINUS: return PPC::PRED_EQ_PLUS;
+ case PPC::PRED_LT_MINUS: return PPC::PRED_GE_PLUS;
+ case PPC::PRED_GE_MINUS: return PPC::PRED_LT_PLUS;
+ case PPC::PRED_GT_MINUS: return PPC::PRED_LE_PLUS;
+ case PPC::PRED_LE_MINUS: return PPC::PRED_GT_PLUS;
+ case PPC::PRED_NU_MINUS: return PPC::PRED_UN_PLUS;
+ case PPC::PRED_UN_MINUS: return PPC::PRED_NU_PLUS;
+ case PPC::PRED_EQ_PLUS: return PPC::PRED_NE_MINUS;
+ case PPC::PRED_NE_PLUS: return PPC::PRED_EQ_MINUS;
+ case PPC::PRED_LT_PLUS: return PPC::PRED_GE_MINUS;
+ case PPC::PRED_GE_PLUS: return PPC::PRED_LT_MINUS;
+ case PPC::PRED_GT_PLUS: return PPC::PRED_LE_MINUS;
+ case PPC::PRED_LE_PLUS: return PPC::PRED_GT_MINUS;
+ case PPC::PRED_NU_PLUS: return PPC::PRED_UN_MINUS;
+ case PPC::PRED_UN_PLUS: return PPC::PRED_NU_MINUS;
}
llvm_unreachable("Unknown PPC branch opcode!");
}
@@ -40,6 +56,22 @@ PPC::Predicate PPC::getSwappedPredicate(PPC::Predicate Opcode) {
case PPC::PRED_LE: return PPC::PRED_GE;
case PPC::PRED_NU: return PPC::PRED_NU;
case PPC::PRED_UN: return PPC::PRED_UN;
+ case PPC::PRED_EQ_MINUS: return PPC::PRED_EQ_MINUS;
+ case PPC::PRED_NE_MINUS: return PPC::PRED_NE_MINUS;
+ case PPC::PRED_LT_MINUS: return PPC::PRED_GT_MINUS;
+ case PPC::PRED_GE_MINUS: return PPC::PRED_LE_MINUS;
+ case PPC::PRED_GT_MINUS: return PPC::PRED_LT_MINUS;
+ case PPC::PRED_LE_MINUS: return PPC::PRED_GE_MINUS;
+ case PPC::PRED_NU_MINUS: return PPC::PRED_NU_MINUS;
+ case PPC::PRED_UN_MINUS: return PPC::PRED_UN_MINUS;
+ case PPC::PRED_EQ_PLUS: return PPC::PRED_EQ_PLUS;
+ case PPC::PRED_NE_PLUS: return PPC::PRED_NE_PLUS;
+ case PPC::PRED_LT_PLUS: return PPC::PRED_GT_PLUS;
+ case PPC::PRED_GE_PLUS: return PPC::PRED_LE_PLUS;
+ case PPC::PRED_GT_PLUS: return PPC::PRED_LT_PLUS;
+ case PPC::PRED_LE_PLUS: return PPC::PRED_GE_PLUS;
+ case PPC::PRED_NU_PLUS: return PPC::PRED_NU_PLUS;
+ case PPC::PRED_UN_PLUS: return PPC::PRED_UN_PLUS;
}
llvm_unreachable("Unknown PPC branch opcode!");
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
index 444758cc8b6f..d498c2f8f446 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
@@ -25,14 +25,30 @@ namespace llvm {
namespace PPC {
/// Predicate - These are "(BI << 5) | BO" for various predicates.
enum Predicate {
- PRED_LT = (0 << 5) | 12,
- PRED_LE = (1 << 5) | 4,
- PRED_EQ = (2 << 5) | 12,
- PRED_GE = (0 << 5) | 4,
- PRED_GT = (1 << 5) | 12,
- PRED_NE = (2 << 5) | 4,
- PRED_UN = (3 << 5) | 12,
- PRED_NU = (3 << 5) | 4
+ PRED_LT = (0 << 5) | 12,
+ PRED_LE = (1 << 5) | 4,
+ PRED_EQ = (2 << 5) | 12,
+ PRED_GE = (0 << 5) | 4,
+ PRED_GT = (1 << 5) | 12,
+ PRED_NE = (2 << 5) | 4,
+ PRED_UN = (3 << 5) | 12,
+ PRED_NU = (3 << 5) | 4,
+ PRED_LT_MINUS = (0 << 5) | 14,
+ PRED_LE_MINUS = (1 << 5) | 6,
+ PRED_EQ_MINUS = (2 << 5) | 14,
+ PRED_GE_MINUS = (0 << 5) | 6,
+ PRED_GT_MINUS = (1 << 5) | 14,
+ PRED_NE_MINUS = (2 << 5) | 6,
+ PRED_UN_MINUS = (3 << 5) | 14,
+ PRED_NU_MINUS = (3 << 5) | 6,
+ PRED_LT_PLUS = (0 << 5) | 15,
+ PRED_LE_PLUS = (1 << 5) | 7,
+ PRED_EQ_PLUS = (2 << 5) | 15,
+ PRED_GE_PLUS = (0 << 5) | 7,
+ PRED_GT_PLUS = (1 << 5) | 15,
+ PRED_NE_PLUS = (2 << 5) | 7,
+ PRED_UN_PLUS = (3 << 5) | 15,
+ PRED_NU_PLUS = (3 << 5) | 7
};
/// Invert the specified predicate. != -> ==, < -> >=.
diff --git a/lib/Target/PowerPC/Makefile b/lib/Target/PowerPC/Makefile
index 6666694ecc7b..21fdcd9350e1 100644
--- a/lib/Target/PowerPC/Makefile
+++ b/lib/Target/PowerPC/Makefile
@@ -16,7 +16,7 @@ BUILT_SOURCES = PPCGenRegisterInfo.inc PPCGenAsmMatcher.inc \
PPCGenAsmWriter.inc PPCGenCodeEmitter.inc \
PPCGenInstrInfo.inc PPCGenDAGISel.inc \
PPCGenSubtargetInfo.inc PPCGenCallingConv.inc \
- PPCGenMCCodeEmitter.inc
+ PPCGenMCCodeEmitter.inc PPCGenFastISel.inc
DIRS = AsmParser InstPrinter TargetInfo MCTargetDesc
diff --git a/lib/Target/PowerPC/PPC.h b/lib/Target/PowerPC/PPC.h
index b4be51a8caec..f0d5af24466e 100644
--- a/lib/Target/PowerPC/PPC.h
+++ b/lib/Target/PowerPC/PPC.h
@@ -30,7 +30,10 @@ namespace llvm {
class AsmPrinter;
class MCInst;
- FunctionPass *createPPCCTRLoops();
+ FunctionPass *createPPCCTRLoops(PPCTargetMachine &TM);
+#ifndef NDEBUG
+ FunctionPass *createPPCCTRLoopsVerify();
+#endif
FunctionPass *createPPCEarlyReturnPass();
FunctionPass *createPPCBranchSelectionPass();
FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
@@ -71,18 +74,21 @@ namespace llvm {
/// The next are not flags but distinct values.
MO_ACCESS_MASK = 0xf0,
- /// MO_LO16, MO_HA16 - lo16(symbol) and ha16(symbol)
- MO_LO16 = 1 << 4,
- MO_HA16 = 2 << 4,
+ /// MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
+ MO_LO = 1 << 4,
+ MO_HA = 2 << 4,
- MO_TPREL16_HA = 3 << 4,
- MO_TPREL16_LO = 4 << 4,
+ MO_TPREL_LO = 4 << 4,
+ MO_TPREL_HA = 3 << 4,
/// These values identify relocations on immediates folded
/// into memory operations.
- MO_DTPREL16_LO = 5 << 4,
- MO_TLSLD16_LO = 6 << 4,
- MO_TOC16_LO = 7 << 4
+ MO_DTPREL_LO = 5 << 4,
+ MO_TLSLD_LO = 6 << 4,
+ MO_TOC_LO = 7 << 4,
+
+ // Symbol for VK_PPC_TLS fixup attached to an ADD instruction
+ MO_TLS = 8 << 4
};
} // end namespace PPCII
diff --git a/lib/Target/PowerPC/PPC.td b/lib/Target/PowerPC/PPC.td
index eb73c676ad71..54e3d400a9d9 100644
--- a/lib/Target/PowerPC/PPC.td
+++ b/lib/Target/PowerPC/PPC.td
@@ -57,6 +57,8 @@ def FeatureMFOCRF : SubtargetFeature<"mfocrf","HasMFOCRF", "true",
"Enable the MFOCRF instruction">;
def FeatureFSqrt : SubtargetFeature<"fsqrt","HasFSQRT", "true",
"Enable the fsqrt instruction">;
+def FeatureFCPSGN : SubtargetFeature<"fcpsgn", "HasFCPSGN", "true",
+ "Enable the fcpsgn instruction">;
def FeatureFRE : SubtargetFeature<"fre", "HasFRE", "true",
"Enable the fre instruction">;
def FeatureFRES : SubtargetFeature<"fres", "HasFRES", "true",
@@ -85,6 +87,13 @@ def FeatureBookE : SubtargetFeature<"booke", "IsBookE", "true",
"Enable Book E instructions">;
def FeatureQPX : SubtargetFeature<"qpx","HasQPX", "true",
"Enable QPX instructions">;
+def FeatureVSX : SubtargetFeature<"vsx","HasVSX", "true",
+ "Enable VSX instructions">;
+
+def DeprecatedMFTB : SubtargetFeature<"", "DeprecatedMFTB", "true",
+ "Treat mftb as deprecated">;
+def DeprecatedDST : SubtargetFeature<"", "DeprecatedDST", "true",
+ "Treat vector data stream cache control instructions as deprecated">;
// Note: Future features to add when support is extended to more
// recent ISA levels:
@@ -146,10 +155,10 @@ include "PPCInstrInfo.td"
def : Processor<"generic", G3Itineraries, [Directive32]>;
def : Processor<"440", PPC440Itineraries, [Directive440, FeatureISEL,
FeatureFRES, FeatureFRSQRTE,
- FeatureBookE]>;
+ FeatureBookE, DeprecatedMFTB]>;
def : Processor<"450", PPC440Itineraries, [Directive440, FeatureISEL,
FeatureFRES, FeatureFRSQRTE,
- FeatureBookE]>;
+ FeatureBookE, DeprecatedMFTB]>;
def : Processor<"601", G3Itineraries, [Directive601]>;
def : Processor<"602", G3Itineraries, [Directive602]>;
def : Processor<"603", G3Itineraries, [Directive603,
@@ -185,29 +194,32 @@ def : ProcessorModel<"g5", G5Model,
[Directive970, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
FeatureFRES, FeatureFRSQRTE,
- Feature64Bit /*, Feature64BitRegs */]>;
+ Feature64Bit /*, Feature64BitRegs */,
+ DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"e500mc", PPCE500mcModel,
[DirectiveE500mc, FeatureMFOCRF,
- FeatureSTFIWX, FeatureBookE, FeatureISEL]>;
+ FeatureSTFIWX, FeatureBookE, FeatureISEL,
+ DeprecatedMFTB]>;
def : ProcessorModel<"e5500", PPCE5500Model,
[DirectiveE5500, FeatureMFOCRF, Feature64Bit,
- FeatureSTFIWX, FeatureBookE, FeatureISEL]>;
+ FeatureSTFIWX, FeatureBookE, FeatureISEL,
+ DeprecatedMFTB]>;
def : ProcessorModel<"a2", PPCA2Model,
[DirectiveA2, FeatureBookE, FeatureMFOCRF,
- FeatureFSqrt, FeatureFRE, FeatureFRES,
+ FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
FeaturePOPCNTD, FeatureLDBRX, Feature64Bit
- /*, Feature64BitRegs */]>;
+ /*, Feature64BitRegs */, DeprecatedMFTB]>;
def : ProcessorModel<"a2q", PPCA2Model,
[DirectiveA2, FeatureBookE, FeatureMFOCRF,
- FeatureFSqrt, FeatureFRE, FeatureFRES,
+ FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
FeaturePOPCNTD, FeatureLDBRX, Feature64Bit
- /*, Feature64BitRegs */, FeatureQPX]>;
+ /*, Feature64BitRegs */, FeatureQPX, DeprecatedMFTB]>;
def : ProcessorModel<"pwr3", G5Model,
[DirectivePwr3, FeatureAltivec,
FeatureFRES, FeatureFRSQRTE, FeatureMFOCRF,
@@ -220,38 +232,48 @@ def : ProcessorModel<"pwr5", G5Model,
[DirectivePwr5, FeatureAltivec, FeatureMFOCRF,
FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES,
- FeatureSTFIWX, Feature64Bit]>;
+ FeatureSTFIWX, Feature64Bit,
+ DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr5x", G5Model,
[DirectivePwr5x, FeatureAltivec, FeatureMFOCRF,
FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES,
- FeatureSTFIWX, FeatureFPRND, Feature64Bit]>;
+ FeatureSTFIWX, FeatureFPRND, Feature64Bit,
+ DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr6", G5Model,
[DirectivePwr6, FeatureAltivec,
- FeatureMFOCRF, FeatureFSqrt, FeatureFRE,
+ FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX,
- FeatureFPRND, Feature64Bit /*, Feature64BitRegs */]>;
+ FeatureFPRND, Feature64Bit /*, Feature64BitRegs */,
+ DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr6x", G5Model,
[DirectivePwr5x, FeatureAltivec, FeatureMFOCRF,
- FeatureFSqrt, FeatureFRE, FeatureFRES,
+ FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX,
- FeatureFPRND, Feature64Bit]>;
+ FeatureFPRND, Feature64Bit,
+ DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr7", G5Model,
[DirectivePwr7, FeatureAltivec,
- FeatureMFOCRF, FeatureFSqrt, FeatureFRE,
+ FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
FeaturePOPCNTD, FeatureLDBRX,
- Feature64Bit /*, Feature64BitRegs */]>;
+ Feature64Bit /*, Feature64BitRegs */,
+ DeprecatedMFTB, DeprecatedDST]>;
def : Processor<"ppc", G3Itineraries, [Directive32]>;
def : ProcessorModel<"ppc64", G5Model,
[Directive64, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt, FeatureFRES,
FeatureFRSQRTE, FeatureSTFIWX,
Feature64Bit /*, Feature64BitRegs */]>;
+def : ProcessorModel<"ppc64le", G5Model,
+ [Directive64, FeatureAltivec,
+ FeatureMFOCRF, FeatureFSqrt, FeatureFRES,
+ FeatureFRSQRTE, FeatureSTFIWX,
+ Feature64Bit /*, Feature64BitRegs */]>;
//===----------------------------------------------------------------------===//
// Calling Conventions
@@ -272,10 +294,20 @@ def PPCAsmParser : AsmParser {
let ShouldEmitMatchRegisterName = 0;
}
+def PPCAsmParserVariant : AsmParserVariant {
+ int Variant = 0;
+
+ // We do not use hard coded registers in asm strings. However, some
+ // InstAlias definitions use immediate literals. Set RegisterPrefix
+ // so that those are not misinterpreted as registers.
+ string RegisterPrefix = "%";
+}
+
def PPC : Target {
// Information about the instructions.
let InstructionSet = PPCInstrInfo;
let AssemblyWriters = [PPCAsmWriter];
let AssemblyParsers = [PPCAsmParser];
+ let AssemblyParserVariants = [PPCAsmParserVariant];
}
diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 3c7cc4ed0eb3..ada34ed9e18a 100644
--- a/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -20,8 +20,10 @@
#include "PPC.h"
#include "InstPrinter/PPCInstPrinter.h"
#include "MCTargetDesc/PPCPredicates.h"
+#include "MCTargetDesc/PPCMCExpr.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
+#include "PPCTargetStreamer.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -85,18 +87,6 @@ namespace {
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O);
-
- MachineLocation getDebugValueLocation(const MachineInstr *MI) const {
- MachineLocation Location;
- assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
- // Frame address. Currently handles register +- offset only.
- if (MI->getOperand(0).isReg() && MI->getOperand(2).isImm())
- Location.set(MI->getOperand(0).getReg(), MI->getOperand(2).getImm());
- else {
- DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
- }
- return Location;
- }
};
/// PPCLinuxAsmPrinter - PowerPC assembly printer, customized for Linux
@@ -213,7 +203,7 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
.getGVStubEntry(SymToPrint);
if (StubSym.getPointer() == 0)
StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
} else if (GV->isDeclaration() || GV->hasCommonLinkage() ||
GV->hasAvailableExternallyLinkage()) {
SymToPrint = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
@@ -223,12 +213,12 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
getHiddenGVStubEntry(SymToPrint);
if (StubSym.getPointer() == 0)
StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
} else {
- SymToPrint = Mang->getSymbol(GV);
+ SymToPrint = getSymbol(GV);
}
} else {
- SymToPrint = Mang->getSymbol(GV);
+ SymToPrint = getSymbol(GV);
}
O << *SymToPrint;
@@ -339,28 +329,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Lower multi-instruction pseudo operations.
switch (MI->getOpcode()) {
default: break;
- case TargetOpcode::DBG_VALUE: {
- if (!isVerbose() || !OutStreamer.hasRawTextSupport()) return;
-
- SmallString<32> Str;
- raw_svector_ostream O(Str);
- unsigned NOps = MI->getNumOperands();
- assert(NOps==4);
- O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
- O << V.getName();
- O << " <- ";
- // Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
- O << '['; printOperand(MI, 0, O); O << '+'; printOperand(MI, 1, O);
- O << ']';
- O << "+";
- printOperand(MI, NOps-2, O);
- OutStreamer.EmitRawText(O.str());
- return;
- }
-
+ case TargetOpcode::DBG_VALUE:
+ llvm_unreachable("Should be handled target independently");
case PPC::MovePCtoLR:
case PPC::MovePCtoLR8: {
// Transform %LR = MovePCtoLR
@@ -394,7 +364,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(MO.isGlobal() || MO.isCPI() || MO.isJTI());
MCSymbol *MOSymbol = 0;
if (MO.isGlobal())
- MOSymbol = Mang->getSymbol(MO.getGlobal());
+ MOSymbol = getSymbol(MO.getGlobal());
else if (MO.isCPI())
MOSymbol = GetCPISymbol(MO.getIndex());
else if (MO.isJTI())
@@ -403,7 +373,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbol *TOCEntry = lookUpOrCreateTOCEntry(MOSymbol);
const MCExpr *Exp =
- MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC_ENTRY,
+ MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC,
OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
OutStreamer.EmitInstruction(TmpInst);
@@ -433,7 +403,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue);
const GlobalValue *RealGValue = GAlias ?
GAlias->resolveAliasedGlobal(false) : GValue;
- MOSymbol = Mang->getSymbol(RealGValue);
+ MOSymbol = getSymbol(RealGValue);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue);
IsExternal = GVar && !GVar->hasInitializer();
IsCommon = GVar && RealGValue->hasCommonLinkage();
@@ -444,11 +414,12 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
else if (MO.isJTI())
MOSymbol = GetJTISymbol(MO.getIndex());
- if (IsExternal || IsFunction || IsCommon || IsAvailExt || MO.isJTI())
+ if (IsExternal || IsFunction || IsCommon || IsAvailExt || MO.isJTI() ||
+ TM.getCodeModel() == CodeModel::Large)
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
const MCExpr *Exp =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC16_HA,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_HA,
OutContext);
TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp);
OutStreamer.EmitInstruction(TmpInst);
@@ -469,23 +440,27 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (MO.isJTI())
MOSymbol = lookUpOrCreateTOCEntry(GetJTISymbol(MO.getIndex()));
- else if (MO.isCPI())
+ else if (MO.isCPI()) {
MOSymbol = GetCPISymbol(MO.getIndex());
+ if (TM.getCodeModel() == CodeModel::Large)
+ MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
+ }
else if (MO.isGlobal()) {
const GlobalValue *GValue = MO.getGlobal();
const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue);
const GlobalValue *RealGValue = GAlias ?
GAlias->resolveAliasedGlobal(false) : GValue;
- MOSymbol = Mang->getSymbol(RealGValue);
+ MOSymbol = getSymbol(RealGValue);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue);
if (!GVar || !GVar->hasInitializer() || RealGValue->hasCommonLinkage() ||
- RealGValue->hasAvailableExternallyLinkage())
+ RealGValue->hasAvailableExternallyLinkage() ||
+ TM.getCodeModel() == CodeModel::Large)
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
}
const MCExpr *Exp =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC16_LO,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO,
OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
OutStreamer.EmitInstruction(TmpInst);
@@ -510,18 +485,18 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue);
const GlobalValue *RealGValue = GAlias ?
GAlias->resolveAliasedGlobal(false) : GValue;
- MOSymbol = Mang->getSymbol(RealGValue);
+ MOSymbol = getSymbol(RealGValue);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue);
IsExternal = GVar && !GVar->hasInitializer();
IsFunction = !GVar;
} else if (MO.isCPI())
MOSymbol = GetCPISymbol(MO.getIndex());
- if (IsFunction || IsExternal)
+ if (IsFunction || IsExternal || TM.getCodeModel() == CodeModel::Large)
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
const MCExpr *Exp =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC16_LO,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO,
OutContext);
TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp);
OutStreamer.EmitInstruction(TmpInst);
@@ -533,9 +508,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymGotTprel =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL16_HA,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_HA,
OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
@@ -551,9 +526,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
TmpInst.setOpcode(PPC::LD);
const MachineOperand &MO = MI->getOperand(1);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *Exp =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL16_LO,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO,
OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
OutStreamer.EmitInstruction(TmpInst);
@@ -565,9 +540,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymGotTlsGD =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_HA,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HA,
OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
@@ -581,9 +556,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymGotTlsGD =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_LO,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO,
OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8)
.addReg(MI->getOperand(0).getReg())
@@ -593,7 +568,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
case PPC::GETtlsADDR: {
// Transform: %X3 = GETtlsADDR %X3, <ga:@sym>
- // Into: BL8_NOP_TLSGD __tls_get_addr(sym@tlsgd)
+ // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsgd)
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
StringRef Name = "__tls_get_addr";
@@ -602,11 +577,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::Create(TlsGetAddr, MCSymbolRefExpr::VK_None, OutContext);
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymVar =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSGD,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLSGD)
+ OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLS)
.addExpr(TlsRef)
.addExpr(SymVar));
return;
@@ -617,9 +592,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymGotTlsLD =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_HA,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HA,
OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
@@ -633,9 +608,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymGotTlsLD =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO,
OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8)
.addReg(MI->getOperand(0).getReg())
@@ -645,7 +620,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
case PPC::GETtlsldADDR: {
// Transform: %X3 = GETtlsldADDR %X3, <ga:@sym>
- // Into: BL8_NOP_TLSLD __tls_get_addr(sym@tlsld)
+ // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsld)
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
StringRef Name = "__tls_get_addr";
@@ -654,11 +629,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::Create(TlsGetAddr, MCSymbolRefExpr::VK_None, OutContext);
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymVar =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSLD,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLSLD)
+ OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLS)
.addExpr(TlsRef)
.addExpr(SymVar));
return;
@@ -669,9 +644,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymDtprel =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL16_HA,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_HA,
OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
@@ -685,9 +660,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
- MCSymbol *MOSymbol = Mang->getSymbol(GValue);
+ MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymDtprel =
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL16_LO,
+ MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_LO,
OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8)
.addReg(MI->getOperand(0).getReg())
@@ -695,21 +670,63 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addExpr(SymDtprel));
return;
}
- case PPC::MFCRpseud:
- case PPC::MFCR8pseud:
- // Transform: %R3 = MFCRpseud %CR7
- // Into: %R3 = MFCR ;; cr7
- OutStreamer.AddComment(PPCInstPrinter::
- getRegisterName(MI->getOperand(1).getReg()));
- OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ? PPC::MFCR8 : PPC::MFCR)
- .addReg(MI->getOperand(0).getReg()));
- return;
+ case PPC::MFOCRF:
+ case PPC::MFOCRF8:
+ if (!Subtarget.hasMFOCRF()) {
+ // Transform: %R3 = MFOCRF %CR7
+ // Into: %R3 = MFCR ;; cr7
+ unsigned NewOpcode =
+ MI->getOpcode() == PPC::MFOCRF ? PPC::MFCR : PPC::MFCR8;
+ OutStreamer.AddComment(PPCInstPrinter::
+ getRegisterName(MI->getOperand(1).getReg()));
+ OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode)
+ .addReg(MI->getOperand(0).getReg()));
+ return;
+ }
+ break;
+ case PPC::MTOCRF:
+ case PPC::MTOCRF8:
+ if (!Subtarget.hasMFOCRF()) {
+ // Transform: %CR7 = MTOCRF %R3
+ // Into: MTCRF mask, %R3 ;; cr7
+ unsigned NewOpcode =
+ MI->getOpcode() == PPC::MTOCRF ? PPC::MTCRF : PPC::MTCRF8;
+ unsigned Mask = 0x80 >> OutContext.getRegisterInfo()
+ ->getEncodingValue(MI->getOperand(0).getReg());
+ OutStreamer.AddComment(PPCInstPrinter::
+ getRegisterName(MI->getOperand(0).getReg()));
+ OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode)
+ .addImm(Mask)
+ .addReg(MI->getOperand(1).getReg()));
+ return;
+ }
+ break;
case PPC::SYNC:
// In Book E sync is called msync, handle this special case here...
if (Subtarget.isBookE()) {
OutStreamer.EmitRawText(StringRef("\tmsync"));
return;
}
+ break;
+ case PPC::LD:
+ case PPC::STD:
+ case PPC::LWA_32:
+ case PPC::LWA: {
+ // Verify alignment is legal, so we don't create relocations
+ // that can't be supported.
+ // FIXME: This test is currently disabled for Darwin. The test
+ // suite shows a handful of test cases that fail this check for
+ // Darwin. Those need to be investigated before this sanity test
+ // can be enabled for those subtargets.
+ if (!Subtarget.isDarwin()) {
+ unsigned OpNum = (MI->getOpcode() == PPC::STD) ? 2 : 1;
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ if (MO.isGlobal() && MO.getGlobal()->getAlignment() < 4)
+ llvm_unreachable("Global must be word-aligned for LD, STD, LWA!");
+ }
+ // Now process the instruction normally.
+ break;
+ }
}
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin());
@@ -737,7 +754,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
MCSymbol *Symbol2 = OutContext.GetOrCreateSymbol(StringRef(".TOC."));
// Generates a R_PPC64_TOC relocation for TOC base insertion.
OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol2,
- MCSymbolRefExpr::VK_PPC_TOC, OutContext),
+ MCSymbolRefExpr::VK_PPC_TOCBASE, OutContext),
8/*size*/);
// Emit a null environment pointer.
OutStreamer.EmitIntValue(0, 8 /* size */);
@@ -755,6 +772,9 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
bool isPPC64 = TD->getPointerSizeInBits() == 64;
+ PPCTargetStreamer &TS =
+ static_cast<PPCTargetStreamer &>(OutStreamer.getTargetStreamer());
+
if (isPPC64 && !TOC.empty()) {
const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".toc",
ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
@@ -765,7 +785,7 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
E = TOC.end(); I != E; ++I) {
OutStreamer.EmitLabel(I->second);
MCSymbol *S = OutContext.GetOrCreateSymbol(I->first->getName());
- OutStreamer.EmitTCEntry(*S);
+ TS.emitTCEntry(*S);
}
}
@@ -781,7 +801,7 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
// .long _foo
OutStreamer.EmitValue(MCSymbolRefExpr::Create(Stubs[i].second.getPointer(),
OutContext),
- isPPC64 ? 8 : 4/*size*/, 0/*addrspace*/);
+ isPPC64 ? 8 : 4/*size*/);
}
Stubs.clear();
@@ -829,7 +849,8 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
"power6",
"power6x",
"power7",
- "ppc64"
+ "ppc64",
+ "ppc64le"
};
unsigned Directive = Subtarget.getDarwinDirective();
@@ -843,7 +864,7 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
// FIXME: This is a total hack, finish mc'izing the PPC backend.
if (OutStreamer.hasRawTextSupport()) {
- assert(Directive < sizeof(CPUDirectives) / sizeof(*CPUDirectives) &&
+ assert(Directive < array_lengthof(CPUDirectives) &&
"CPUDirectives[] might not be up-to-date!");
OutStreamer.EmitRawText("\t.machine " + Twine(CPUDirectives[Directive]));
}
@@ -883,6 +904,7 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) {
void PPCDarwinAsmPrinter::
EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
+ bool isDarwin = Subtarget.isDarwin();
const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
@@ -910,6 +932,9 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
OutStreamer.EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol);
const MCExpr *Anon = MCSymbolRefExpr::Create(AnonSymbol, OutContext);
+ const MCExpr *LazyPtrExpr = MCSymbolRefExpr::Create(LazyPtr, OutContext);
+ const MCExpr *Sub =
+ MCBinaryExpr::CreateSub(LazyPtrExpr, Anon, OutContext);
// mflr r0
OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR).addReg(PPC::R0));
@@ -919,21 +944,20 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// mflr r11
OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR).addReg(PPC::R11));
// addis r11, r11, ha16(LazyPtr - AnonSymbol)
- const MCExpr *Sub =
- MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create(LazyPtr, OutContext),
- Anon, OutContext);
+ const MCExpr *SubHa16 = PPCMCExpr::CreateHa(Sub, isDarwin, OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS)
.addReg(PPC::R11)
.addReg(PPC::R11)
- .addExpr(Sub));
+ .addExpr(SubHa16));
// mtlr r0
OutStreamer.EmitInstruction(MCInstBuilder(PPC::MTLR).addReg(PPC::R0));
// ldu r12, lo16(LazyPtr - AnonSymbol)(r11)
// lwzu r12, lo16(LazyPtr - AnonSymbol)(r11)
+ const MCExpr *SubLo16 = PPCMCExpr::CreateLo(Sub, isDarwin, OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
.addReg(PPC::R12)
- .addExpr(Sub).addExpr(Sub)
+ .addExpr(SubLo16).addExpr(SubLo16)
.addReg(PPC::R11));
// mtctr r12
OutStreamer.EmitInstruction(MCInstBuilder(PPC::MTCTR).addReg(PPC::R12));
@@ -967,24 +991,24 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
MCSymbol *Stub = Stubs[i].first;
MCSymbol *RawSym = Stubs[i].second.getPointer();
MCSymbol *LazyPtr = GetLazyPtr(Stub, OutContext);
+ const MCExpr *LazyPtrExpr = MCSymbolRefExpr::Create(LazyPtr, OutContext);
OutStreamer.SwitchSection(StubSection);
EmitAlignment(4);
OutStreamer.EmitLabel(Stub);
OutStreamer.EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol);
+
// lis r11, ha16(LazyPtr)
const MCExpr *LazyPtrHa16 =
- MCSymbolRefExpr::Create(LazyPtr, MCSymbolRefExpr::VK_PPC_DARWIN_HA16,
- OutContext);
+ PPCMCExpr::CreateHa(LazyPtrExpr, isDarwin, OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(PPC::LIS)
.addReg(PPC::R11)
.addExpr(LazyPtrHa16));
- const MCExpr *LazyPtrLo16 =
- MCSymbolRefExpr::Create(LazyPtr, MCSymbolRefExpr::VK_PPC_DARWIN_LO16,
- OutContext);
// ldu r12, lo16(LazyPtr)(r11)
// lwzu r12, lo16(LazyPtr)(r11)
+ const MCExpr *LazyPtrLo16 =
+ PPCMCExpr::CreateLo(LazyPtrExpr, isDarwin, OutContext);
OutStreamer.EmitInstruction(MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
.addReg(PPC::R12)
.addExpr(LazyPtrLo16).addExpr(LazyPtrLo16)
@@ -1037,7 +1061,7 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
MCSymbol *NLPSym = GetSymbolWithGlobalValueBase(*I, "$non_lazy_ptr");
MachineModuleInfoImpl::StubValueTy &StubSym =
MMIMacho.getGVStubEntry(NLPSym);
- StubSym = MachineModuleInfoImpl::StubValueTy(Mang->getSymbol(*I), true);
+ StubSym = MachineModuleInfoImpl::StubValueTy(getSymbol(*I), true);
}
}
}
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp
index 81a54d7015b0..4224ae2d273c 100644
--- a/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -9,767 +9,641 @@
//
// This pass identifies loops where we can generate the PPC branch instructions
// that decrement and test the count register (CTR) (bdnz and friends).
-// This pass is based on the HexagonHardwareLoops pass.
//
// The pattern that defines the induction variable can changed depending on
// prior optimizations. For example, the IndVarSimplify phase run by 'opt'
// normalizes induction variables, and the Loop Strength Reduction pass
// run by 'llc' may also make changes to the induction variable.
-// The pattern detected by this phase is due to running Strength Reduction.
//
// Criteria for CTR loops:
// - Countable loops (w/ ind. var for a trip count)
-// - Assumes loops are normalized by IndVarSimplify
// - Try inner-most loops first
// - No nested CTR loops.
// - No function calls in loops.
//
-// Note: As with unconverted loops, PPCBranchSelector must be run after this
-// pass in order to convert long-displacement jumps into jump pairs.
-//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "ctrloops"
-#include "PPC.h"
-#include "MCTargetDesc/PPCPredicates.h"
-#include "PPCTargetMachine.h"
-#include "llvm/ADT/DenseMap.h"
+
+#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/PassSupport.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "PPCTargetMachine.h"
+#include "PPC.h"
+
+#ifndef NDEBUG
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#endif
+
#include <algorithm>
+#include <vector>
using namespace llvm;
+#ifndef NDEBUG
+static cl::opt<int> CTRLoopLimit("ppc-max-ctrloop", cl::Hidden, cl::init(-1));
+#endif
+
STATISTIC(NumCTRLoops, "Number of loops converted to CTR loops");
namespace llvm {
void initializePPCCTRLoopsPass(PassRegistry&);
+#ifndef NDEBUG
+ void initializePPCCTRLoopsVerifyPass(PassRegistry&);
+#endif
}
namespace {
- class CountValue;
- struct PPCCTRLoops : public MachineFunctionPass {
- MachineLoopInfo *MLI;
- MachineRegisterInfo *MRI;
- const TargetInstrInfo *TII;
+ struct PPCCTRLoops : public FunctionPass {
+
+#ifndef NDEBUG
+ static int Counter;
+#endif
public:
- static char ID; // Pass identification, replacement for typeid
+ static char ID;
- PPCCTRLoops() : MachineFunctionPass(ID) {
+ PPCCTRLoops() : FunctionPass(ID), TM(0) {
+ initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
+ }
+ PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) {
initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- const char *getPassName() const { return "PPC CTR Loops"; }
+ virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<MachineDominatorTree>();
- AU.addPreserved<MachineDominatorTree>();
- AU.addRequired<MachineLoopInfo>();
- AU.addPreserved<MachineLoopInfo>();
- MachineFunctionPass::getAnalysisUsage(AU);
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<LoopInfo>();
+ AU.addRequired<DominatorTree>();
+ AU.addPreserved<DominatorTree>();
+ AU.addRequired<ScalarEvolution>();
}
private:
- /// getCanonicalInductionVariable - Check to see if the loop has a canonical
- /// induction variable.
- /// Should be defined in MachineLoop. Based upon version in class Loop.
- void getCanonicalInductionVariable(MachineLoop *L,
- SmallVector<MachineInstr *, 4> &IVars,
- SmallVector<MachineInstr *, 4> &IOps) const;
-
- /// getTripCount - Return a loop-invariant LLVM register indicating the
- /// number of times the loop will be executed. If the trip-count cannot
- /// be determined, this return null.
- CountValue *getTripCount(MachineLoop *L,
- SmallVector<MachineInstr *, 2> &OldInsts) const;
-
- /// isInductionOperation - Return true if the instruction matches the
- /// pattern for an opertion that defines an induction variable.
- bool isInductionOperation(const MachineInstr *MI, unsigned IVReg) const;
-
- /// isInvalidOperation - Return true if the instruction is not valid within
- /// a CTR loop.
- bool isInvalidLoopOperation(const MachineInstr *MI) const;
-
- /// containsInavlidInstruction - Return true if the loop contains an
- /// instruction that inhibits using the CTR loop.
- bool containsInvalidInstruction(MachineLoop *L) const;
-
- /// converToCTRLoop - Given a loop, check if we can convert it to a
- /// CTR loop. If so, then perform the conversion and return true.
- bool convertToCTRLoop(MachineLoop *L);
-
- /// isDead - Return true if the instruction is now dead.
- bool isDead(const MachineInstr *MI,
- SmallVector<MachineInstr *, 1> &DeadPhis) const;
-
- /// removeIfDead - Remove the instruction if it is now dead.
- void removeIfDead(MachineInstr *MI);
+ bool mightUseCTR(const Triple &TT, BasicBlock *BB);
+ bool convertToCTRLoop(Loop *L);
+
+ private:
+ PPCTargetMachine *TM;
+ LoopInfo *LI;
+ ScalarEvolution *SE;
+ DataLayout *TD;
+ DominatorTree *DT;
+ const TargetLibraryInfo *LibInfo;
};
char PPCCTRLoops::ID = 0;
+#ifndef NDEBUG
+ int PPCCTRLoops::Counter = 0;
+#endif
-
- // CountValue class - Abstraction for a trip count of a loop. A
- // smaller vesrsion of the MachineOperand class without the concerns
- // of changing the operand representation.
- class CountValue {
+#ifndef NDEBUG
+ struct PPCCTRLoopsVerify : public MachineFunctionPass {
public:
- enum CountValueType {
- CV_Register,
- CV_Immediate
- };
- private:
- CountValueType Kind;
- union Values {
- unsigned RegNum;
- int64_t ImmVal;
- Values(unsigned r) : RegNum(r) {}
- Values(int64_t i) : ImmVal(i) {}
- } Contents;
- bool isNegative;
+ static char ID;
- public:
- CountValue(unsigned r, bool neg) : Kind(CV_Register), Contents(r),
- isNegative(neg) {}
- explicit CountValue(int64_t i) : Kind(CV_Immediate), Contents(i),
- isNegative(i < 0) {}
- CountValueType getType() const { return Kind; }
- bool isReg() const { return Kind == CV_Register; }
- bool isImm() const { return Kind == CV_Immediate; }
- bool isNeg() const { return isNegative; }
-
- unsigned getReg() const {
- assert(isReg() && "Wrong CountValue accessor");
- return Contents.RegNum;
- }
- void setReg(unsigned Val) {
- Contents.RegNum = Val;
- }
- int64_t getImm() const {
- assert(isImm() && "Wrong CountValue accessor");
- if (isNegative) {
- return -Contents.ImmVal;
- }
- return Contents.ImmVal;
- }
- void setImm(int64_t Val) {
- Contents.ImmVal = Val;
+ PPCCTRLoopsVerify() : MachineFunctionPass(ID) {
+ initializePPCCTRLoopsVerifyPass(*PassRegistry::getPassRegistry());
}
- void print(raw_ostream &OS, const TargetMachine *TM = 0) const {
- if (isReg()) { OS << PrintReg(getReg()); }
- if (isImm()) { OS << getImm(); }
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ private:
+ MachineDominatorTree *MDT;
};
+
+ char PPCCTRLoopsVerify::ID = 0;
+#endif // NDEBUG
} // end anonymous namespace
INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
false, false)
-/// isCompareEquals - Returns true if the instruction is a compare equals
-/// instruction with an immediate operand.
-static bool isCompareEqualsImm(const MachineInstr *MI, bool &SignedCmp,
- bool &Int64Cmp) {
- if (MI->getOpcode() == PPC::CMPWI) {
- SignedCmp = true;
- Int64Cmp = false;
- return true;
- } else if (MI->getOpcode() == PPC::CMPDI) {
- SignedCmp = true;
- Int64Cmp = true;
- return true;
- } else if (MI->getOpcode() == PPC::CMPLWI) {
- SignedCmp = false;
- Int64Cmp = false;
- return true;
- } else if (MI->getOpcode() == PPC::CMPLDI) {
- SignedCmp = false;
- Int64Cmp = true;
- return true;
- }
-
- return false;
+FunctionPass *llvm::createPPCCTRLoops(PPCTargetMachine &TM) {
+ return new PPCCTRLoops(TM);
}
+#ifndef NDEBUG
+INITIALIZE_PASS_BEGIN(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
+ "PowerPC CTR Loops Verify", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
+ "PowerPC CTR Loops Verify", false, false)
-/// createPPCCTRLoops - Factory for creating
-/// the CTR loop phase.
-FunctionPass *llvm::createPPCCTRLoops() {
- return new PPCCTRLoops();
+FunctionPass *llvm::createPPCCTRLoopsVerify() {
+ return new PPCCTRLoopsVerify();
}
+#endif // NDEBUG
+bool PPCCTRLoops::runOnFunction(Function &F) {
+ LI = &getAnalysis<LoopInfo>();
+ SE = &getAnalysis<ScalarEvolution>();
+ DT = &getAnalysis<DominatorTree>();
+ TD = getAnalysisIfAvailable<DataLayout>();
+ LibInfo = getAnalysisIfAvailable<TargetLibraryInfo>();
-bool PPCCTRLoops::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********* PPC CTR Loops *********\n");
-
- bool Changed = false;
+ bool MadeChange = false;
- // get the loop information
- MLI = &getAnalysis<MachineLoopInfo>();
- // get the register information
- MRI = &MF.getRegInfo();
- // the target specific instructio info.
- TII = MF.getTarget().getInstrInfo();
-
- for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end();
+ for (LoopInfo::iterator I = LI->begin(), E = LI->end();
I != E; ++I) {
- MachineLoop *L = *I;
- if (!L->getParentLoop()) {
- Changed |= convertToCTRLoop(L);
- }
+ Loop *L = *I;
+ if (!L->getParentLoop())
+ MadeChange |= convertToCTRLoop(L);
}
- return Changed;
+ return MadeChange;
}
-/// getCanonicalInductionVariable - Check to see if the loop has a canonical
-/// induction variable. We check for a simple recurrence pattern - an
-/// integer recurrence that decrements by one each time through the loop and
-/// ends at zero. If so, return the phi node that corresponds to it.
-///
-/// Based upon the similar code in LoopInfo except this code is specific to
-/// the machine.
-/// This method assumes that the IndVarSimplify pass has been run by 'opt'.
-///
-void
-PPCCTRLoops::getCanonicalInductionVariable(MachineLoop *L,
- SmallVector<MachineInstr *, 4> &IVars,
- SmallVector<MachineInstr *, 4> &IOps) const {
- MachineBasicBlock *TopMBB = L->getTopBlock();
- MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
- assert(PI != TopMBB->pred_end() &&
- "Loop must have more than one incoming edge!");
- MachineBasicBlock *Backedge = *PI++;
- if (PI == TopMBB->pred_end()) return; // dead loop
- MachineBasicBlock *Incoming = *PI++;
- if (PI != TopMBB->pred_end()) return; // multiple backedges?
-
- // make sure there is one incoming and one backedge and determine which
- // is which.
- if (L->contains(Incoming)) {
- if (L->contains(Backedge))
- return;
- std::swap(Incoming, Backedge);
- } else if (!L->contains(Backedge))
- return;
-
- // Loop over all of the PHI nodes, looking for a canonical induction variable:
- // - The PHI node is "reg1 = PHI reg2, BB1, reg3, BB2".
- // - The recurrence comes from the backedge.
- // - the definition is an induction operatio.n
- for (MachineBasicBlock::iterator I = TopMBB->begin(), E = TopMBB->end();
- I != E && I->isPHI(); ++I) {
- MachineInstr *MPhi = &*I;
- unsigned DefReg = MPhi->getOperand(0).getReg();
- for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
- // Check each operand for the value from the backedge.
- MachineBasicBlock *MBB = MPhi->getOperand(i+1).getMBB();
- if (L->contains(MBB)) { // operands comes from the backedge
- // Check if the definition is an induction operation.
- MachineInstr *DI = MRI->getVRegDef(MPhi->getOperand(i).getReg());
- if (isInductionOperation(DI, DefReg)) {
- IOps.push_back(DI);
- IVars.push_back(MPhi);
+bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
+ for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
+ J != JE; ++J) {
+ if (CallInst *CI = dyn_cast<CallInst>(J)) {
+ if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
+ // Inline ASM is okay, unless it clobbers the ctr register.
+ InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
+ for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
+ InlineAsm::ConstraintInfo &C = CIV[i];
+ if (C.Type != InlineAsm::isInput)
+ for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
+ if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
+ return true;
}
- }
- }
- }
- return;
-}
-/// getTripCount - Return a loop-invariant LLVM value indicating the
-/// number of times the loop will be executed. The trip count can
-/// be either a register or a constant value. If the trip-count
-/// cannot be determined, this returns null.
-///
-/// We find the trip count from the phi instruction that defines the
-/// induction variable. We follow the links to the CMP instruction
-/// to get the trip count.
-///
-/// Based upon getTripCount in LoopInfo.
-///
-CountValue *PPCCTRLoops::getTripCount(MachineLoop *L,
- SmallVector<MachineInstr *, 2> &OldInsts) const {
- MachineBasicBlock *LastMBB = L->getExitingBlock();
- // Don't generate a CTR loop if the loop has more than one exit.
- if (LastMBB == 0)
- return 0;
-
- MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
- if (LastI->getOpcode() != PPC::BCC)
- return 0;
-
- // We need to make sure that this compare is defining the condition
- // register actually used by the terminating branch.
-
- unsigned PredReg = LastI->getOperand(1).getReg();
- DEBUG(dbgs() << "Examining loop with first terminator: " << *LastI);
-
- unsigned PredCond = LastI->getOperand(0).getImm();
- if (PredCond != PPC::PRED_EQ && PredCond != PPC::PRED_NE)
- return 0;
-
- // Check that the loop has a induction variable.
- SmallVector<MachineInstr *, 4> IVars, IOps;
- getCanonicalInductionVariable(L, IVars, IOps);
- for (unsigned i = 0; i < IVars.size(); ++i) {
- MachineInstr *IOp = IOps[i];
- MachineInstr *IV_Inst = IVars[i];
-
- // Canonical loops will end with a 'cmpwi/cmpdi cr, IV, Imm',
- // if Imm is 0, get the count from the PHI opnd
- // if Imm is -M, than M is the count
- // Otherwise, Imm is the count
- MachineOperand *IV_Opnd;
- const MachineOperand *InitialValue;
- if (!L->contains(IV_Inst->getOperand(2).getMBB())) {
- InitialValue = &IV_Inst->getOperand(1);
- IV_Opnd = &IV_Inst->getOperand(3);
- } else {
- InitialValue = &IV_Inst->getOperand(3);
- IV_Opnd = &IV_Inst->getOperand(1);
- }
+ continue;
+ }
- DEBUG(dbgs() << "Considering:\n");
- DEBUG(dbgs() << " induction operation: " << *IOp);
- DEBUG(dbgs() << " induction variable: " << *IV_Inst);
- DEBUG(dbgs() << " initial value: " << *InitialValue << "\n");
-
- // Look for the cmp instruction to determine if we
- // can get a useful trip count. The trip count can
- // be either a register or an immediate. The location
- // of the value depends upon the type (reg or imm).
- for (MachineRegisterInfo::reg_iterator
- RI = MRI->reg_begin(IV_Opnd->getReg()), RE = MRI->reg_end();
- RI != RE; ++RI) {
- IV_Opnd = &RI.getOperand();
- bool SignedCmp, Int64Cmp;
- MachineInstr *MI = IV_Opnd->getParent();
- if (L->contains(MI) && isCompareEqualsImm(MI, SignedCmp, Int64Cmp) &&
- MI->getOperand(0).getReg() == PredReg) {
-
- OldInsts.push_back(MI);
- OldInsts.push_back(IOp);
-
- DEBUG(dbgs() << " compare: " << *MI);
-
- const MachineOperand &MO = MI->getOperand(2);
- assert(MO.isImm() && "IV Cmp Operand should be an immediate");
-
- int64_t ImmVal;
- if (SignedCmp)
- ImmVal = (short) MO.getImm();
- else
- ImmVal = MO.getImm();
-
- const MachineInstr *IV_DefInstr = MRI->getVRegDef(IV_Opnd->getReg());
- assert(L->contains(IV_DefInstr->getParent()) &&
- "IV definition should occurs in loop");
- int64_t iv_value = (short) IV_DefInstr->getOperand(2).getImm();
-
- assert(InitialValue->isReg() && "Expecting register for init value");
- unsigned InitialValueReg = InitialValue->getReg();
-
- MachineInstr *DefInstr = MRI->getVRegDef(InitialValueReg);
-
- // Here we need to look for an immediate load (an li or lis/ori pair).
- if (DefInstr && (DefInstr->getOpcode() == PPC::ORI8 ||
- DefInstr->getOpcode() == PPC::ORI)) {
- int64_t start = DefInstr->getOperand(2).getImm();
- MachineInstr *DefInstr2 =
- MRI->getVRegDef(DefInstr->getOperand(1).getReg());
- if (DefInstr2 && (DefInstr2->getOpcode() == PPC::LIS8 ||
- DefInstr2->getOpcode() == PPC::LIS)) {
- DEBUG(dbgs() << " initial constant: " << *DefInstr);
- DEBUG(dbgs() << " initial constant: " << *DefInstr2);
-
- start |= int64_t(short(DefInstr2->getOperand(1).getImm())) << 16;
-
- int64_t count = ImmVal - start;
- if ((count % iv_value) != 0) {
- return 0;
- }
-
- OldInsts.push_back(DefInstr);
- OldInsts.push_back(DefInstr2);
-
- // count/iv_value, the trip count, should be positive here. If it
- // is negative, that indicates that the counter will wrap.
- if (Int64Cmp)
- return new CountValue(count/iv_value);
+ if (!TM)
+ return true;
+ const TargetLowering *TLI = TM->getTargetLowering();
+
+ if (Function *F = CI->getCalledFunction()) {
+ // Most intrinsics don't become function calls, but some might.
+ // sin, cos, exp and log are always calls.
+ unsigned Opcode;
+ if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
+ switch (F->getIntrinsicID()) {
+ default: continue;
+
+// VisualStudio defines setjmp as _setjmp
+#if defined(_MSC_VER) && defined(setjmp) && \
+ !defined(setjmp_undefined_for_msvc)
+# pragma push_macro("setjmp")
+# undef setjmp
+# define setjmp_undefined_for_msvc
+#endif
+
+ case Intrinsic::setjmp:
+
+#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
+ // let's return it to _setjmp state
+# pragma pop_macro("setjmp")
+# undef setjmp_undefined_for_msvc
+#endif
+
+ case Intrinsic::longjmp:
+
+ // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
+ // because, although it does clobber the counter register, the
+ // control can't then return to inside the loop unless there is also
+ // an eh_sjlj_setjmp.
+ case Intrinsic::eh_sjlj_setjmp:
+
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset:
+ case Intrinsic::powi:
+ case Intrinsic::log:
+ case Intrinsic::log2:
+ case Intrinsic::log10:
+ case Intrinsic::exp:
+ case Intrinsic::exp2:
+ case Intrinsic::pow:
+ case Intrinsic::sin:
+ case Intrinsic::cos:
+ return true;
+ case Intrinsic::copysign:
+ if (CI->getArgOperand(0)->getType()->getScalarType()->
+ isPPC_FP128Ty())
+ return true;
else
- return new CountValue(uint32_t(count/iv_value));
- }
- } else if (DefInstr && (DefInstr->getOpcode() == PPC::LI8 ||
- DefInstr->getOpcode() == PPC::LI)) {
- DEBUG(dbgs() << " initial constant: " << *DefInstr);
-
- int64_t count = ImmVal -
- int64_t(short(DefInstr->getOperand(1).getImm()));
- if ((count % iv_value) != 0) {
- return 0;
+ continue; // ISD::FCOPYSIGN is never a library call.
+ case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
+ case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
+ case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
+ case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
+ case Intrinsic::rint: Opcode = ISD::FRINT; break;
+ case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
+ case Intrinsic::round: Opcode = ISD::FROUND; break;
}
+ }
- OldInsts.push_back(DefInstr);
-
- if (Int64Cmp)
- return new CountValue(count/iv_value);
- else
- return new CountValue(uint32_t(count/iv_value));
- } else if (iv_value == 1 || iv_value == -1) {
- // We can't determine a constant starting value.
- if (ImmVal == 0) {
- return new CountValue(InitialValueReg, iv_value > 0);
+ // PowerPC does not use [US]DIVREM or other library calls for
+ // operations on regular types which are not otherwise library calls
+ // (i.e. soft float or atomics). If adapting for targets that do,
+ // additional care is required here.
+
+ LibFunc::Func Func;
+ if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
+ LibInfo->getLibFunc(F->getName(), Func) &&
+ LibInfo->hasOptimizedCodeGen(Func)) {
+ // Non-read-only functions are never treated as intrinsics.
+ if (!CI->onlyReadsMemory())
+ return true;
+
+ // Conversion happens only for FP calls.
+ if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
+ return true;
+
+ switch (Func) {
+ default: return true;
+ case LibFunc::copysign:
+ case LibFunc::copysignf:
+ continue; // ISD::FCOPYSIGN is never a library call.
+ case LibFunc::copysignl:
+ return true;
+ case LibFunc::fabs:
+ case LibFunc::fabsf:
+ case LibFunc::fabsl:
+ continue; // ISD::FABS is never a library call.
+ case LibFunc::sqrt:
+ case LibFunc::sqrtf:
+ case LibFunc::sqrtl:
+ Opcode = ISD::FSQRT; break;
+ case LibFunc::floor:
+ case LibFunc::floorf:
+ case LibFunc::floorl:
+ Opcode = ISD::FFLOOR; break;
+ case LibFunc::nearbyint:
+ case LibFunc::nearbyintf:
+ case LibFunc::nearbyintl:
+ Opcode = ISD::FNEARBYINT; break;
+ case LibFunc::ceil:
+ case LibFunc::ceilf:
+ case LibFunc::ceill:
+ Opcode = ISD::FCEIL; break;
+ case LibFunc::rint:
+ case LibFunc::rintf:
+ case LibFunc::rintl:
+ Opcode = ISD::FRINT; break;
+ case LibFunc::round:
+ case LibFunc::roundf:
+ case LibFunc::roundl:
+ Opcode = ISD::FROUND; break;
+ case LibFunc::trunc:
+ case LibFunc::truncf:
+ case LibFunc::truncl:
+ Opcode = ISD::FTRUNC; break;
}
- // FIXME: handle non-zero end value.
+
+ MVT VTy =
+ TLI->getSimpleValueType(CI->getArgOperand(0)->getType(), true);
+ if (VTy == MVT::Other)
+ return true;
+
+ if (TLI->isOperationLegalOrCustom(Opcode, VTy))
+ continue;
+ else if (VTy.isVector() &&
+ TLI->isOperationLegalOrCustom(Opcode, VTy.getScalarType()))
+ continue;
+
+ return true;
}
- // FIXME: handle non-unit increments (we might not want to introduce
- // division but we can handle some 2^n cases with shifts).
-
}
- }
- }
- return 0;
-}
-
-/// isInductionOperation - return true if the operation is matches the
-/// pattern that defines an induction variable:
-/// addi iv, c
-///
-bool
-PPCCTRLoops::isInductionOperation(const MachineInstr *MI,
- unsigned IVReg) const {
- return ((MI->getOpcode() == PPC::ADDI || MI->getOpcode() == PPC::ADDI8) &&
- MI->getOperand(1).isReg() && // could be a frame index instead
- MI->getOperand(1).getReg() == IVReg);
-}
-/// isInvalidOperation - Return true if the operation is invalid within
-/// CTR loop.
-bool
-PPCCTRLoops::isInvalidLoopOperation(const MachineInstr *MI) const {
-
- // call is not allowed because the callee may use a CTR loop
- if (MI->getDesc().isCall()) {
- return true;
- }
- // check if the instruction defines a CTR loop register
- // (this will also catch nested CTR loops)
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDef() &&
- (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8)) {
return true;
- }
- }
- return false;
-}
+ } else if (isa<BinaryOperator>(J) &&
+ J->getType()->getScalarType()->isPPC_FP128Ty()) {
+ // Most operations on ppc_f128 values become calls.
+ return true;
+ } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
+ isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
+ CastInst *CI = cast<CastInst>(J);
+ if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
+ CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
+ (TT.isArch32Bit() &&
+ (CI->getSrcTy()->getScalarType()->isIntegerTy(64) ||
+ CI->getDestTy()->getScalarType()->isIntegerTy(64))
+ ))
+ return true;
+ } else if (TT.isArch32Bit() &&
+ J->getType()->getScalarType()->isIntegerTy(64) &&
+ (J->getOpcode() == Instruction::UDiv ||
+ J->getOpcode() == Instruction::SDiv ||
+ J->getOpcode() == Instruction::URem ||
+ J->getOpcode() == Instruction::SRem)) {
+ return true;
+ } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
+ // On PowerPC, indirect jumps use the counter register.
+ return true;
+ } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
+ if (!TM)
+ return true;
+ const TargetLowering *TLI = TM->getTargetLowering();
-/// containsInvalidInstruction - Return true if the loop contains
-/// an instruction that inhibits the use of the CTR loop function.
-///
-bool PPCCTRLoops::containsInvalidInstruction(MachineLoop *L) const {
- const std::vector<MachineBasicBlock*> Blocks = L->getBlocks();
- for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
- MachineBasicBlock *MBB = Blocks[i];
- for (MachineBasicBlock::iterator
- MII = MBB->begin(), E = MBB->end(); MII != E; ++MII) {
- const MachineInstr *MI = &*MII;
- if (isInvalidLoopOperation(MI)) {
+ if (TLI->supportJumpTables() &&
+ SI->getNumCases()+1 >= (unsigned) TLI->getMinimumJumpTableEntries())
return true;
- }
}
}
+
return false;
}
-/// isDead returns true if the instruction is dead
-/// (this was essentially copied from DeadMachineInstructionElim::isDead, but
-/// with special cases for inline asm, physical registers and instructions with
-/// side effects removed)
-bool PPCCTRLoops::isDead(const MachineInstr *MI,
- SmallVector<MachineInstr *, 1> &DeadPhis) const {
- // Examine each operand.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDef()) {
- unsigned Reg = MO.getReg();
- if (!MRI->use_nodbg_empty(Reg)) {
- // This instruction has users, but if the only user is the phi node for
- // the parent block, and the only use of that phi node is this
- // instruction, then this instruction is dead: both it (and the phi
- // node) can be removed.
- MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg);
- if (llvm::next(I) == MRI->use_end() &&
- I.getOperand().getParent()->isPHI()) {
- MachineInstr *OnePhi = I.getOperand().getParent();
-
- for (unsigned j = 0, f = OnePhi->getNumOperands(); j != f; ++j) {
- const MachineOperand &OPO = OnePhi->getOperand(j);
- if (OPO.isReg() && OPO.isDef()) {
- unsigned OPReg = OPO.getReg();
-
- MachineRegisterInfo::use_iterator nextJ;
- for (MachineRegisterInfo::use_iterator J = MRI->use_begin(OPReg),
- E = MRI->use_end(); J!=E; J=nextJ) {
- nextJ = llvm::next(J);
- MachineOperand& Use = J.getOperand();
- MachineInstr *UseMI = Use.getParent();
-
- if (MI != UseMI) {
- // The phi node has a user that is not MI, bail...
- return false;
- }
- }
- }
- }
+bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
+ bool MadeChange = false;
- DeadPhis.push_back(OnePhi);
- } else {
- // This def has a non-debug use. Don't delete the instruction!
- return false;
- }
- }
- }
+ Triple TT = Triple(L->getHeader()->getParent()->getParent()->
+ getTargetTriple());
+ if (!TT.isArch32Bit() && !TT.isArch64Bit())
+ return MadeChange; // Unknown arch. type.
+
+ // Process nested loops first.
+ for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
+ MadeChange |= convertToCTRLoop(*I);
}
- // If there are no defs with uses, the instruction is dead.
- return true;
-}
+ // If a nested loop has been converted, then we can't convert this loop.
+ if (MadeChange)
+ return MadeChange;
+
+#ifndef NDEBUG
+ // Stop trying after reaching the limit (if any).
+ int Limit = CTRLoopLimit;
+ if (Limit >= 0) {
+ if (Counter >= CTRLoopLimit)
+ return false;
+ Counter++;
+ }
+#endif
+
+ // We don't want to spill/restore the counter register, and so we don't
+ // want to use the counter register if the loop contains calls.
+ for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
+ I != IE; ++I)
+ if (mightUseCTR(TT, *I))
+ return MadeChange;
+
+ SmallVector<BasicBlock*, 4> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+
+ BasicBlock *CountedExitBlock = 0;
+ const SCEV *ExitCount = 0;
+ BranchInst *CountedExitBranch = 0;
+ for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
+ IE = ExitingBlocks.end(); I != IE; ++I) {
+ const SCEV *EC = SE->getExitCount(L, *I);
+ DEBUG(dbgs() << "Exit Count for " << *L << " from block " <<
+ (*I)->getName() << ": " << *EC << "\n");
+ if (isa<SCEVCouldNotCompute>(EC))
+ continue;
+ if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
+ if (ConstEC->getValue()->isZero())
+ continue;
+ } else if (!SE->isLoopInvariant(EC, L))
+ continue;
+
+ if (SE->getTypeSizeInBits(EC->getType()) > (TT.isArch64Bit() ? 64 : 32))
+ continue;
+
+ // We now have a loop-invariant count of loop iterations (which is not the
+ // constant zero) for which we know that this loop will not exit via this
+ // exisiting block.
+
+ // We need to make sure that this block will run on every loop iteration.
+ // For this to be true, we must dominate all blocks with backedges. Such
+ // blocks are in-loop predecessors to the header block.
+ bool NotAlways = false;
+ for (pred_iterator PI = pred_begin(L->getHeader()),
+ PIE = pred_end(L->getHeader()); PI != PIE; ++PI) {
+ if (!L->contains(*PI))
+ continue;
-void PPCCTRLoops::removeIfDead(MachineInstr *MI) {
- // This procedure was essentially copied from DeadMachineInstructionElim
+ if (!DT->dominates(*I, *PI)) {
+ NotAlways = true;
+ break;
+ }
+ }
- SmallVector<MachineInstr *, 1> DeadPhis;
- if (isDead(MI, DeadPhis)) {
- DEBUG(dbgs() << "CTR looping will remove: " << *MI);
+ if (NotAlways)
+ continue;
- // It is possible that some DBG_VALUE instructions refer to this
- // instruction. Examine each def operand for such references;
- // if found, mark the DBG_VALUE as undef (but don't delete it).
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef())
+ // Make sure this blocks ends with a conditional branch.
+ Instruction *TI = (*I)->getTerminator();
+ if (!TI)
+ continue;
+
+ if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
+ if (!BI->isConditional())
continue;
- unsigned Reg = MO.getReg();
- MachineRegisterInfo::use_iterator nextI;
- for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg),
- E = MRI->use_end(); I!=E; I=nextI) {
- nextI = llvm::next(I); // I is invalidated by the setReg
- MachineOperand& Use = I.getOperand();
- MachineInstr *UseMI = Use.getParent();
- if (UseMI==MI)
- continue;
- if (Use.isDebug()) // this might also be a instr -> phi -> instr case
- // which can also be removed.
- UseMI->getOperand(0).setReg(0U);
- }
- }
- MI->eraseFromParent();
- for (unsigned i = 0; i < DeadPhis.size(); ++i) {
- DeadPhis[i]->eraseFromParent();
- }
+ CountedExitBranch = BI;
+ } else
+ continue;
+
+ // Note that this block may not be the loop latch block, even if the loop
+ // has a latch block.
+ CountedExitBlock = *I;
+ ExitCount = EC;
+ break;
}
+
+ if (!CountedExitBlock)
+ return MadeChange;
+
+ BasicBlock *Preheader = L->getLoopPreheader();
+
+ // If we don't have a preheader, then insert one. If we already have a
+ // preheader, then we can use it (except if the preheader contains a use of
+ // the CTR register because some such uses might be reordered by the
+ // selection DAG after the mtctr instruction).
+ if (!Preheader || mightUseCTR(TT, Preheader))
+ Preheader = InsertPreheaderForLoop(L, this);
+ if (!Preheader)
+ return MadeChange;
+
+ DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName() << "\n");
+
+ // Insert the count into the preheader and replace the condition used by the
+ // selected branch.
+ MadeChange = true;
+
+ SCEVExpander SCEVE(*SE, "loopcnt");
+ LLVMContext &C = SE->getContext();
+ Type *CountType = TT.isArch64Bit() ? Type::getInt64Ty(C) :
+ Type::getInt32Ty(C);
+ if (!ExitCount->getType()->isPointerTy() &&
+ ExitCount->getType() != CountType)
+ ExitCount = SE->getZeroExtendExpr(ExitCount, CountType);
+ ExitCount = SE->getAddExpr(ExitCount,
+ SE->getConstant(CountType, 1));
+ Value *ECValue = SCEVE.expandCodeFor(ExitCount, CountType,
+ Preheader->getTerminator());
+
+ IRBuilder<> CountBuilder(Preheader->getTerminator());
+ Module *M = Preheader->getParent()->getParent();
+ Value *MTCTRFunc = Intrinsic::getDeclaration(M, Intrinsic::ppc_mtctr,
+ CountType);
+ CountBuilder.CreateCall(MTCTRFunc, ECValue);
+
+ IRBuilder<> CondBuilder(CountedExitBranch);
+ Value *DecFunc =
+ Intrinsic::getDeclaration(M, Intrinsic::ppc_is_decremented_ctr_nonzero);
+ Value *NewCond = CondBuilder.CreateCall(DecFunc);
+ Value *OldCond = CountedExitBranch->getCondition();
+ CountedExitBranch->setCondition(NewCond);
+
+ // The false branch must exit the loop.
+ if (!L->contains(CountedExitBranch->getSuccessor(0)))
+ CountedExitBranch->swapSuccessors();
+
+ // The old condition may be dead now, and may have even created a dead PHI
+ // (the original induction variable).
+ RecursivelyDeleteTriviallyDeadInstructions(OldCond);
+ DeleteDeadPHIs(CountedExitBlock);
+
+ ++NumCTRLoops;
+ return MadeChange;
}
-/// converToCTRLoop - check if the loop is a candidate for
-/// converting to a CTR loop. If so, then perform the
-/// transformation.
-///
-/// This function works on innermost loops first. A loop can
-/// be converted if it is a counting loop; either a register
-/// value or an immediate.
-///
-/// The code makes several assumptions about the representation
-/// of the loop in llvm.
-bool PPCCTRLoops::convertToCTRLoop(MachineLoop *L) {
- bool Changed = false;
- // Process nested loops first.
- for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
- Changed |= convertToCTRLoop(*I);
- }
- // If a nested loop has been converted, then we can't convert this loop.
- if (Changed) {
- return Changed;
+#ifndef NDEBUG
+static bool clobbersCTR(const MachineInstr *MI) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg()) {
+ if (MO.isDef() && (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8))
+ return true;
+ } else if (MO.isRegMask()) {
+ if (MO.clobbersPhysReg(PPC::CTR) || MO.clobbersPhysReg(PPC::CTR8))
+ return true;
+ }
}
- SmallVector<MachineInstr *, 2> OldInsts;
- // Are we able to determine the trip count for the loop?
- CountValue *TripCount = getTripCount(L, OldInsts);
- if (TripCount == 0) {
- DEBUG(dbgs() << "failed to get trip count!\n");
- return false;
- }
+ return false;
+}
- if (TripCount->isImm()) {
- DEBUG(dbgs() << "constant trip count: " << TripCount->getImm() << "\n");
+static bool verifyCTRBranch(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I) {
+ MachineBasicBlock::iterator BI = I;
+ SmallSet<MachineBasicBlock *, 16> Visited;
+ SmallVector<MachineBasicBlock *, 8> Preds;
+ bool CheckPreds;
+
+ if (I == MBB->begin()) {
+ Visited.insert(MBB);
+ goto queue_preds;
+ } else
+ --I;
+
+check_block:
+ Visited.insert(MBB);
+ if (I == MBB->end())
+ goto queue_preds;
+
+ CheckPreds = true;
+ for (MachineBasicBlock::iterator IE = MBB->begin();; --I) {
+ unsigned Opc = I->getOpcode();
+ if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) {
+ CheckPreds = false;
+ break;
+ }
- // FIXME: We currently can't form 64-bit constants
- // (including 32-bit unsigned constants)
- if (!isInt<32>(TripCount->getImm()))
+ if (I != BI && clobbersCTR(I)) {
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" <<
+ MBB->getFullName() << ") instruction " << *I <<
+ " clobbers CTR, invalidating " << "BB#" <<
+ BI->getParent()->getNumber() << " (" <<
+ BI->getParent()->getFullName() << ") instruction " <<
+ *BI << "\n");
return false;
- }
+ }
- // Does the loop contain any invalid instructions?
- if (containsInvalidInstruction(L)) {
- return false;
+ if (I == IE)
+ break;
}
- MachineBasicBlock *Preheader = L->getLoopPreheader();
- // No preheader means there's not place for the loop instr.
- if (Preheader == 0) {
- return false;
- }
- MachineBasicBlock::iterator InsertPos = Preheader->getFirstTerminator();
- DebugLoc dl;
- if (InsertPos != Preheader->end())
- dl = InsertPos->getDebugLoc();
+ if (!CheckPreds && Preds.empty())
+ return true;
- MachineBasicBlock *LastMBB = L->getExitingBlock();
- // Don't generate CTR loop if the loop has more than one exit.
- if (LastMBB == 0) {
- return false;
- }
- MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
-
- // Determine the loop start.
- MachineBasicBlock *LoopStart = L->getTopBlock();
- if (L->getLoopLatch() != LastMBB) {
- // When the exit and latch are not the same, use the latch block as the
- // start.
- // The loop start address is used only after the 1st iteration, and the loop
- // latch may contains instrs. that need to be executed after the 1st iter.
- LoopStart = L->getLoopLatch();
- // Make sure the latch is a successor of the exit, otherwise it won't work.
- if (!LastMBB->isSuccessor(LoopStart)) {
+ if (CheckPreds) {
+queue_preds:
+ if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
+ DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" <<
+ BI->getParent()->getNumber() << " (" <<
+ BI->getParent()->getFullName() << ") instruction " <<
+ *BI << "\n");
return false;
}
+
+ for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
+ PIE = MBB->pred_end(); PI != PIE; ++PI)
+ Preds.push_back(*PI);
}
- // Convert the loop to a CTR loop
- DEBUG(dbgs() << "Change to CTR loop at "; L->dump());
-
- MachineFunction *MF = LastMBB->getParent();
- const PPCSubtarget &Subtarget = MF->getTarget().getSubtarget<PPCSubtarget>();
- bool isPPC64 = Subtarget.isPPC64();
-
- const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
- const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
- const TargetRegisterClass *RC = isPPC64 ? G8RC : GPRC;
-
- unsigned CountReg;
- if (TripCount->isReg()) {
- // Create a copy of the loop count register.
- const TargetRegisterClass *SrcRC =
- MF->getRegInfo().getRegClass(TripCount->getReg());
- CountReg = MF->getRegInfo().createVirtualRegister(RC);
- unsigned CopyOp = (isPPC64 && GPRC->hasSubClassEq(SrcRC)) ?
- (unsigned) PPC::EXTSW_32_64 :
- (unsigned) TargetOpcode::COPY;
- BuildMI(*Preheader, InsertPos, dl,
- TII->get(CopyOp), CountReg).addReg(TripCount->getReg());
- if (TripCount->isNeg()) {
- unsigned CountReg1 = CountReg;
- CountReg = MF->getRegInfo().createVirtualRegister(RC);
- BuildMI(*Preheader, InsertPos, dl,
- TII->get(isPPC64 ? PPC::NEG8 : PPC::NEG),
- CountReg).addReg(CountReg1);
+ do {
+ MBB = Preds.pop_back_val();
+ if (!Visited.count(MBB)) {
+ I = MBB->getLastNonDebugInstr();
+ goto check_block;
}
- } else {
- assert(TripCount->isImm() && "Expecting immedate vaule for trip count");
- // Put the trip count in a register for transfer into the count register.
-
- int64_t CountImm = TripCount->getImm();
- if (TripCount->isNeg())
- CountImm = -CountImm;
-
- CountReg = MF->getRegInfo().createVirtualRegister(RC);
- if (abs64(CountImm) > 0x7FFF) {
- BuildMI(*Preheader, InsertPos, dl,
- TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS),
- CountReg).addImm((CountImm >> 16) & 0xFFFF);
- unsigned CountReg1 = CountReg;
- CountReg = MF->getRegInfo().createVirtualRegister(RC);
- BuildMI(*Preheader, InsertPos, dl,
- TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
- CountReg).addReg(CountReg1).addImm(CountImm & 0xFFFF);
- } else {
- BuildMI(*Preheader, InsertPos, dl,
- TII->get(isPPC64 ? PPC::LI8 : PPC::LI),
- CountReg).addImm(CountImm);
- }
- }
+ } while (!Preds.empty());
- // Add the mtctr instruction to the beginning of the loop.
- BuildMI(*Preheader, InsertPos, dl,
- TII->get(isPPC64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(CountReg,
- TripCount->isImm() ? RegState::Kill : 0);
-
- // Make sure the loop start always has a reference in the CFG. We need to
- // create a BlockAddress operand to get this mechanism to work both the
- // MachineBasicBlock and BasicBlock objects need the flag set.
- LoopStart->setHasAddressTaken();
- // This line is needed to set the hasAddressTaken flag on the BasicBlock
- // object
- BlockAddress::get(const_cast<BasicBlock *>(LoopStart->getBasicBlock()));
-
- // Replace the loop branch with a bdnz instruction.
- dl = LastI->getDebugLoc();
- const std::vector<MachineBasicBlock*> Blocks = L->getBlocks();
- for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
- MachineBasicBlock *MBB = Blocks[i];
- if (MBB != Preheader)
- MBB->addLiveIn(isPPC64 ? PPC::CTR8 : PPC::CTR);
- }
+ return true;
+}
- // The loop ends with either:
- // - a conditional branch followed by an unconditional branch, or
- // - a conditional branch to the loop start.
- assert(LastI->getOpcode() == PPC::BCC &&
- "loop end must start with a BCC instruction");
- // Either the BCC branches to the beginning of the loop, or it
- // branches out of the loop and there is an unconditional branch
- // to the start of the loop.
- MachineBasicBlock *BranchTarget = LastI->getOperand(2).getMBB();
- BuildMI(*LastMBB, LastI, dl,
- TII->get((BranchTarget == LoopStart) ?
- (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
- (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(BranchTarget);
-
- // Conditional branch; just delete it.
- DEBUG(dbgs() << "Removing old branch: " << *LastI);
- LastMBB->erase(LastI);
-
- delete TripCount;
-
- // The induction operation (add) and the comparison (cmpwi) may now be
- // unneeded. If these are unneeded, then remove them.
- for (unsigned i = 0; i < OldInsts.size(); ++i)
- removeIfDead(OldInsts[i]);
+bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) {
+ MDT = &getAnalysis<MachineDominatorTree>();
+
+ // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before
+ // any other instructions that might clobber the ctr register.
+ for (MachineFunction::iterator I = MF.begin(), IE = MF.end();
+ I != IE; ++I) {
+ MachineBasicBlock *MBB = I;
+ if (!MDT->isReachableFromEntry(MBB))
+ continue;
+
+ for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(),
+ MIIE = MBB->end(); MII != MIIE; ++MII) {
+ unsigned Opc = MII->getOpcode();
+ if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ ||
+ Opc == PPC::BDZ8 || Opc == PPC::BDZ)
+ if (!verifyCTRBranch(MBB, MII))
+ llvm_unreachable("Invalid PPC CTR loop!");
+ }
+ }
- ++NumCTRLoops;
- return true;
+ return false;
}
+#endif // NDEBUG
diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td
index c8a29a3d2cfe..e8e7f4c2d226 100644
--- a/lib/Target/PowerPC/PPCCallingConv.td
+++ b/lib/Target/PowerPC/PPCCallingConv.td
@@ -37,6 +37,37 @@ def RetCC_PPC : CallingConv<[
]>;
+// Note that we don't currently have calling conventions for 64-bit
+// PowerPC, but handle all the complexities of the ABI in the lowering
+// logic. FIXME: See if the logic can be simplified with use of CCs.
+// This may require some extensions to current table generation.
+
+// Simple calling convention for 64-bit ELF PowerPC fast isel.
+// Only handle ints and floats. All ints are promoted to i64.
+// Vector types and quadword ints are not handled.
+def CC_PPC64_ELF_FIS : CallingConv<[
+ CCIfType<[i8], CCPromoteToType<i64>>,
+ CCIfType<[i16], CCPromoteToType<i64>>,
+ CCIfType<[i32], CCPromoteToType<i64>>,
+ CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>,
+ CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>
+]>;
+
+// Simple return-value convention for 64-bit ELF PowerPC fast isel.
+// All small ints are promoted to i64. Vector types, quadword ints,
+// and multiple register returns are "supported" to avoid compile
+// errors, but none are handled by the fast selector.
+def RetCC_PPC64_ELF_FIS : CallingConv<[
+ CCIfType<[i8], CCPromoteToType<i64>>,
+ CCIfType<[i16], CCPromoteToType<i64>>,
+ CCIfType<[i32], CCPromoteToType<i64>>,
+ CCIfType<[i64], CCAssignToReg<[X3, X4]>>,
+ CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
+ CCIfType<[f32], CCAssignToReg<[F1, F2]>>,
+ CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4]>>,
+ CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToReg<[V2]>>
+]>;
+
//===----------------------------------------------------------------------===//
// PowerPC System V Release 4 32-bit ABI
//===----------------------------------------------------------------------===//
@@ -105,40 +136,45 @@ def CC_PPC32_SVR4_ByVal : CallingConv<[
CCCustom<"CC_PPC32_SVR4_Custom_Dummy">
]>;
+def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27,
+ V28, V29, V30, V31)>;
+
def CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20,
R21, R22, R23, R24, R25, R26, R27, R28,
R29, R30, R31, F14, F15, F16, F17, F18,
F19, F20, F21, F22, F23, F24, F25, F26,
- F27, F28, F29, F30, F31, CR2, CR3, CR4,
- V20, V21, V22, V23, V24, V25, V26, V27,
- V28, V29, V30, V31)>;
+ F27, F28, F29, F30, F31, CR2, CR3, CR4
+ )>;
+
+def CSR_Darwin32_Altivec : CalleeSavedRegs<(add CSR_Darwin32, CSR_Altivec)>;
-def CSR_SVR432 : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, VRSAVE,
+def CSR_SVR432 : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20,
R21, R22, R23, R24, R25, R26, R27, R28,
R29, R30, R31, F14, F15, F16, F17, F18,
F19, F20, F21, F22, F23, F24, F25, F26,
- F27, F28, F29, F30, F31, CR2, CR3, CR4,
- V20, V21, V22, V23, V24, V25, V26, V27,
- V28, V29, V30, V31)>;
+ F27, F28, F29, F30, F31, CR2, CR3, CR4
+ )>;
+
+def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>;
def CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20,
X21, X22, X23, X24, X25, X26, X27, X28,
X29, X30, X31, F14, F15, F16, F17, F18,
F19, F20, F21, F22, F23, F24, F25, F26,
- F27, F28, F29, F30, F31, CR2, CR3, CR4,
- V20, V21, V22, V23, V24, V25, V26, V27,
- V28, V29, V30, V31)>;
+ F27, F28, F29, F30, F31, CR2, CR3, CR4
+ )>;
-def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, VRSAVE,
+def CSR_Darwin64_Altivec : CalleeSavedRegs<(add CSR_Darwin64, CSR_Altivec)>;
+
+def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
X21, X22, X23, X24, X25, X26, X27, X28,
X29, X30, X31, F14, F15, F16, F17, F18,
F19, F20, F21, F22, F23, F24, F25, F26,
- F27, F28, F29, F30, F31, CR2, CR3, CR4,
- V20, V21, V22, V23, V24, V25, V26, V27,
- V28, V29, V30, V31)>;
+ F27, F28, F29, F30, F31, CR2, CR3, CR4
+ )>;
+
-def CSR_NoRegs : CalleeSavedRegs<(add VRSAVE)>;
-def CSR_NoRegs_Darwin : CalleeSavedRegs<(add)>;
+def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>;
-def CSR_NoRegs_Altivec : CalleeSavedRegs<(add (sequence "V%u", 0, 31), VRSAVE)>;
+def CSR_NoRegs : CalleeSavedRegs<(add)>;
diff --git a/lib/Target/PowerPC/PPCCodeEmitter.cpp b/lib/Target/PowerPC/PPCCodeEmitter.cpp
index 64787185138b..418736e21e86 100644
--- a/lib/Target/PowerPC/PPCCodeEmitter.cpp
+++ b/lib/Target/PowerPC/PPCCodeEmitter.cpp
@@ -63,12 +63,15 @@ namespace {
unsigned get_crbitm_encoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getDirectBrEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getCondBrEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getAbsDirectBrEncoding(const MachineInstr &MI,
+ unsigned OpNo) const;
+ unsigned getAbsCondBrEncoding(const MachineInstr &MI, unsigned OpNo) const;
- unsigned getHA16Encoding(const MachineInstr &MI, unsigned OpNo) const;
- unsigned getLO16Encoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getImm16Encoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemRIEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemRIXEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getTLSRegEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getTLSCallEncoding(const MachineInstr &MI, unsigned OpNo) const;
const char *getPassName() const { return "PowerPC Machine Code Emitter"; }
@@ -139,8 +142,8 @@ void PPCCodeEmitter::emitBasicBlock(MachineBasicBlock &MBB) {
unsigned PPCCodeEmitter::get_crbitm_encoding(const MachineInstr &MI,
unsigned OpNo) const {
const MachineOperand &MO = MI.getOperand(OpNo);
- assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MTCRF8 ||
- MI.getOpcode() == PPC::MFOCRF) &&
+ assert((MI.getOpcode() == PPC::MTOCRF || MI.getOpcode() == PPC::MTOCRF8 ||
+ MI.getOpcode() == PPC::MFOCRF || MI.getOpcode() == PPC::MFOCRF8) &&
(MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7));
return 0x80 >> TM.getRegisterInfo()->getEncodingValue(MO.getReg());
}
@@ -194,21 +197,32 @@ unsigned PPCCodeEmitter::getCondBrEncoding(const MachineInstr &MI,
return 0;
}
-unsigned PPCCodeEmitter::getHA16Encoding(const MachineInstr &MI,
- unsigned OpNo) const {
+unsigned PPCCodeEmitter::getAbsDirectBrEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
const MachineOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO);
- MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_high));
- return 0;
+ llvm_unreachable("Absolute branch relocations unsupported on the old JIT.");
+}
+
+unsigned PPCCodeEmitter::getAbsCondBrEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Absolute branch relocations unsupported on the old JIT.");
}
-unsigned PPCCodeEmitter::getLO16Encoding(const MachineInstr &MI,
- unsigned OpNo) const {
+unsigned PPCCodeEmitter::getImm16Encoding(const MachineInstr &MI,
+ unsigned OpNo) const {
const MachineOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO);
-
- MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_low));
+
+ unsigned RelocID;
+ switch (MO.getTargetFlags() & PPCII::MO_ACCESS_MASK) {
+ default: llvm_unreachable("Unsupported target operand flags!");
+ case PPCII::MO_LO: RelocID = PPC::reloc_absolute_low; break;
+ case PPCII::MO_HA: RelocID = PPC::reloc_absolute_high; break;
+ }
+
+ MCE.addRelocation(GetRelocation(MO, RelocID));
return 0;
}
@@ -237,7 +251,7 @@ unsigned PPCCodeEmitter::getMemRIXEncoding(const MachineInstr &MI,
const MachineOperand &MO = MI.getOperand(OpNo);
if (MO.isImm())
- return (getMachineOpValue(MI, MO) & 0x3FFF) | RegBits;
+ return ((getMachineOpValue(MI, MO) >> 2) & 0x3FFF) | RegBits;
MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_low_ix));
return RegBits;
@@ -250,15 +264,20 @@ unsigned PPCCodeEmitter::getTLSRegEncoding(const MachineInstr &MI,
return 0;
}
+unsigned PPCCodeEmitter::getTLSCallEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("TLS not supported on the old JIT.");
+ return 0;
+}
unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI,
const MachineOperand &MO) const {
if (MO.isReg()) {
- // MTCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
+ // MTOCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
// The GPR operand should come through here though.
- assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MTCRF8 &&
- MI.getOpcode() != PPC::MFOCRF) ||
+ assert((MI.getOpcode() != PPC::MTOCRF && MI.getOpcode() != PPC::MTOCRF8 &&
+ MI.getOpcode() != PPC::MFOCRF && MI.getOpcode() != PPC::MFOCRF8) ||
MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7);
return TM.getRegisterInfo()->getEncodingValue(MO.getReg());
}
diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp
new file mode 100644
index 000000000000..09117e7ded49
--- /dev/null
+++ b/lib/Target/PowerPC/PPCFastISel.cpp
@@ -0,0 +1,2236 @@
+//===-- PPCFastISel.cpp - PowerPC FastISel implementation -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PowerPC-specific support for the FastISel class. Some
+// of the target-specific code is generated by tablegen in the file
+// PPCGenFastISel.inc, which is #included here.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ppcfastisel"
+#include "PPC.h"
+#include "PPCISelLowering.h"
+#include "PPCSubtarget.h"
+#include "PPCTargetMachine.h"
+#include "MCTargetDesc/PPCPredicates.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+//===----------------------------------------------------------------------===//
+//
+// TBD:
+// FastLowerArguments: Handle simple cases.
+// PPCMaterializeGV: Handle TLS.
+// SelectCall: Handle function pointers.
+// SelectCall: Handle multi-register return values.
+// SelectCall: Optimize away nops for local calls.
+// processCallArgs: Handle bit-converted arguments.
+// finishCall: Handle multi-register return values.
+// PPCComputeAddress: Handle parameter references as FrameIndex's.
+// PPCEmitCmp: Handle immediate as operand 1.
+// SelectCall: Handle small byval arguments.
+// SelectIntrinsicCall: Implement.
+// SelectSelect: Implement.
+// Consider factoring isTypeLegal into the base class.
+// Implement switches and jump tables.
+//
+//===----------------------------------------------------------------------===//
+using namespace llvm;
+
+namespace {
+
+typedef struct Address {
+ enum {
+ RegBase,
+ FrameIndexBase
+ } BaseType;
+
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+
+ long Offset;
+
+ // Innocuous defaults for our address.
+ Address()
+ : BaseType(RegBase), Offset(0) {
+ Base.Reg = 0;
+ }
+} Address;
+
+class PPCFastISel : public FastISel {
+
+ const TargetMachine &TM;
+ const TargetInstrInfo &TII;
+ const TargetLowering &TLI;
+ const PPCSubtarget &PPCSubTarget;
+ LLVMContext *Context;
+
+ public:
+ explicit PPCFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo)
+ : FastISel(FuncInfo, LibInfo),
+ TM(FuncInfo.MF->getTarget()),
+ TII(*TM.getInstrInfo()),
+ TLI(*TM.getTargetLowering()),
+ PPCSubTarget(
+ *((static_cast<const PPCTargetMachine *>(&TM))->getSubtargetImpl())
+ ),
+ Context(&FuncInfo.Fn->getContext()) { }
+
+ // Backend specific FastISel code.
+ private:
+ virtual bool TargetSelectInstruction(const Instruction *I);
+ virtual unsigned TargetMaterializeConstant(const Constant *C);
+ virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
+ virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI);
+ virtual bool FastLowerArguments();
+ virtual unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm);
+ virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm);
+ virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill);
+ virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
+
+ // Instruction selection routines.
+ private:
+ bool SelectLoad(const Instruction *I);
+ bool SelectStore(const Instruction *I);
+ bool SelectBranch(const Instruction *I);
+ bool SelectIndirectBr(const Instruction *I);
+ bool SelectCmp(const Instruction *I);
+ bool SelectFPExt(const Instruction *I);
+ bool SelectFPTrunc(const Instruction *I);
+ bool SelectIToFP(const Instruction *I, bool IsSigned);
+ bool SelectFPToI(const Instruction *I, bool IsSigned);
+ bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
+ bool SelectCall(const Instruction *I);
+ bool SelectRet(const Instruction *I);
+ bool SelectTrunc(const Instruction *I);
+ bool SelectIntExt(const Instruction *I);
+
+ // Utility routines.
+ private:
+ bool isTypeLegal(Type *Ty, MVT &VT);
+ bool isLoadTypeLegal(Type *Ty, MVT &VT);
+ bool PPCEmitCmp(const Value *Src1Value, const Value *Src2Value,
+ bool isZExt, unsigned DestReg);
+ bool PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
+ const TargetRegisterClass *RC, bool IsZExt = true,
+ unsigned FP64LoadOpc = PPC::LFD);
+ bool PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr);
+ bool PPCComputeAddress(const Value *Obj, Address &Addr);
+ void PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
+ unsigned &IndexReg);
+ bool PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
+ unsigned DestReg, bool IsZExt);
+ unsigned PPCMaterializeFP(const ConstantFP *CFP, MVT VT);
+ unsigned PPCMaterializeGV(const GlobalValue *GV, MVT VT);
+ unsigned PPCMaterializeInt(const Constant *C, MVT VT);
+ unsigned PPCMaterialize32BitInt(int64_t Imm,
+ const TargetRegisterClass *RC);
+ unsigned PPCMaterialize64BitInt(int64_t Imm,
+ const TargetRegisterClass *RC);
+ unsigned PPCMoveToIntReg(const Instruction *I, MVT VT,
+ unsigned SrcReg, bool IsSigned);
+ unsigned PPCMoveToFPReg(MVT VT, unsigned SrcReg, bool IsSigned);
+
+ // Call handling routines.
+ private:
+ bool processCallArgs(SmallVectorImpl<Value*> &Args,
+ SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<MVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs,
+ CallingConv::ID CC,
+ unsigned &NumBytes,
+ bool IsVarArg);
+ void finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC,
+ unsigned &NumBytes, bool IsVarArg);
+ CCAssignFn *usePPC32CCs(unsigned Flag);
+
+ private:
+ #include "PPCGenFastISel.inc"
+
+};
+
+} // end anonymous namespace
+
+#include "PPCGenCallingConv.inc"
+
+// Function whose sole purpose is to kill compiler warnings
+// stemming from unused functions included from PPCGenCallingConv.inc.
+CCAssignFn *PPCFastISel::usePPC32CCs(unsigned Flag) {
+ if (Flag == 1)
+ return CC_PPC32_SVR4;
+ else if (Flag == 2)
+ return CC_PPC32_SVR4_ByVal;
+ else if (Flag == 3)
+ return CC_PPC32_SVR4_VarArg;
+ else
+ return RetCC_PPC;
+}
+
+static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
+ switch (Pred) {
+ // These are not representable with any single compare.
+ case CmpInst::FCMP_FALSE:
+ case CmpInst::FCMP_UEQ:
+ case CmpInst::FCMP_UGT:
+ case CmpInst::FCMP_UGE:
+ case CmpInst::FCMP_ULT:
+ case CmpInst::FCMP_ULE:
+ case CmpInst::FCMP_UNE:
+ case CmpInst::FCMP_TRUE:
+ default:
+ return Optional<PPC::Predicate>();
+
+ case CmpInst::FCMP_OEQ:
+ case CmpInst::ICMP_EQ:
+ return PPC::PRED_EQ;
+
+ case CmpInst::FCMP_OGT:
+ case CmpInst::ICMP_UGT:
+ case CmpInst::ICMP_SGT:
+ return PPC::PRED_GT;
+
+ case CmpInst::FCMP_OGE:
+ case CmpInst::ICMP_UGE:
+ case CmpInst::ICMP_SGE:
+ return PPC::PRED_GE;
+
+ case CmpInst::FCMP_OLT:
+ case CmpInst::ICMP_ULT:
+ case CmpInst::ICMP_SLT:
+ return PPC::PRED_LT;
+
+ case CmpInst::FCMP_OLE:
+ case CmpInst::ICMP_ULE:
+ case CmpInst::ICMP_SLE:
+ return PPC::PRED_LE;
+
+ case CmpInst::FCMP_ONE:
+ case CmpInst::ICMP_NE:
+ return PPC::PRED_NE;
+
+ case CmpInst::FCMP_ORD:
+ return PPC::PRED_NU;
+
+ case CmpInst::FCMP_UNO:
+ return PPC::PRED_UN;
+ }
+}
+
+// Determine whether the type Ty is simple enough to be handled by
+// fast-isel, and return its equivalent machine type in VT.
+// FIXME: Copied directly from ARM -- factor into base class?
+bool PPCFastISel::isTypeLegal(Type *Ty, MVT &VT) {
+ EVT Evt = TLI.getValueType(Ty, true);
+
+ // Only handle simple types.
+ if (Evt == MVT::Other || !Evt.isSimple()) return false;
+ VT = Evt.getSimpleVT();
+
+ // Handle all legal types, i.e. a register that will directly hold this
+ // value.
+ return TLI.isTypeLegal(VT);
+}
+
+// Determine whether the type Ty is simple enough to be handled by
+// fast-isel as a load target, and return its equivalent machine type in VT.
+bool PPCFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
+ if (isTypeLegal(Ty, VT)) return true;
+
+ // If this is a type than can be sign or zero-extended to a basic operation
+ // go ahead and accept it now.
+ if (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) {
+ return true;
+ }
+
+ return false;
+}
+
+// Given a value Obj, create an Address object Addr that represents its
+// address. Return false if we can't handle it.
+bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
+ const User *U = NULL;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::BitCast:
+ // Look through bitcasts.
+ return PPCComputeAddress(U->getOperand(0), Addr);
+ case Instruction::IntToPtr:
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
+ return PPCComputeAddress(U->getOperand(0), Addr);
+ break;
+ case Instruction::PtrToInt:
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
+ return PPCComputeAddress(U->getOperand(0), Addr);
+ break;
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ long TmpOffset = Addr.Offset;
+
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ gep_type_iterator GTI = gep_type_begin(U);
+ for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end();
+ II != IE; ++II, ++GTI) {
+ const Value *Op = *II;
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ const StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ // Unsupported
+ goto unsupported_gep;
+ }
+ }
+ }
+
+ // Try to grab the base operand now.
+ Addr.Offset = TmpOffset;
+ if (PPCComputeAddress(U->getOperand(0), Addr)) return true;
+
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+
+ unsupported_gep:
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Addr.BaseType = Address::FrameIndexBase;
+ Addr.Base.FI = SI->second;
+ return true;
+ }
+ break;
+ }
+ }
+
+ // FIXME: References to parameters fall through to the behavior
+ // below. They should be able to reference a frame index since
+ // they are stored to the stack, so we can get "ld rx, offset(r1)"
+ // instead of "addi ry, r1, offset / ld rx, 0(ry)". Obj will
+ // just contain the parameter. Try to handle this with a FI.
+
+ // Try to get this in a register if nothing else has worked.
+ if (Addr.Base.Reg == 0)
+ Addr.Base.Reg = getRegForValue(Obj);
+
+ // Prevent assignment of base register to X0, which is inappropriate
+ // for loads and stores alike.
+ if (Addr.Base.Reg != 0)
+ MRI.setRegClass(Addr.Base.Reg, &PPC::G8RC_and_G8RC_NOX0RegClass);
+
+ return Addr.Base.Reg != 0;
+}
+
+// Fix up some addresses that can't be used directly. For example, if
+// an offset won't fit in an instruction field, we may need to move it
+// into an index register.
+void PPCFastISel::PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
+ unsigned &IndexReg) {
+
+ // Check whether the offset fits in the instruction field.
+ if (!isInt<16>(Addr.Offset))
+ UseOffset = false;
+
+ // If this is a stack pointer and the offset needs to be simplified then
+ // put the alloca address into a register, set the base type back to
+ // register and continue. This should almost never happen.
+ if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) {
+ unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
+ ResultReg).addFrameIndex(Addr.Base.FI).addImm(0);
+ Addr.Base.Reg = ResultReg;
+ Addr.BaseType = Address::RegBase;
+ }
+
+ if (!UseOffset) {
+ IntegerType *OffsetTy = ((VT == MVT::i32) ? Type::getInt32Ty(*Context)
+ : Type::getInt64Ty(*Context));
+ const ConstantInt *Offset =
+ ConstantInt::getSigned(OffsetTy, (int64_t)(Addr.Offset));
+ IndexReg = PPCMaterializeInt(Offset, MVT::i64);
+ assert(IndexReg && "Unexpected error in PPCMaterializeInt!");
+ }
+}
+
+// Emit a load instruction if possible, returning true if we succeeded,
+// otherwise false. See commentary below for how the register class of
+// the load is determined.
+bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
+ const TargetRegisterClass *RC,
+ bool IsZExt, unsigned FP64LoadOpc) {
+ unsigned Opc;
+ bool UseOffset = true;
+
+ // If ResultReg is given, it determines the register class of the load.
+ // Otherwise, RC is the register class to use. If the result of the
+ // load isn't anticipated in this block, both may be zero, in which
+ // case we must make a conservative guess. In particular, don't assign
+ // R0 or X0 to the result register, as the result may be used in a load,
+ // store, add-immediate, or isel that won't permit this. (Though
+ // perhaps the spill and reload of live-exit values would handle this?)
+ const TargetRegisterClass *UseRC =
+ (ResultReg ? MRI.getRegClass(ResultReg) :
+ (RC ? RC :
+ (VT == MVT::f64 ? &PPC::F8RCRegClass :
+ (VT == MVT::f32 ? &PPC::F4RCRegClass :
+ (VT == MVT::i64 ? &PPC::G8RC_and_G8RC_NOX0RegClass :
+ &PPC::GPRC_and_GPRC_NOR0RegClass)))));
+
+ bool Is32BitInt = UseRC->hasSuperClassEq(&PPC::GPRCRegClass);
+
+ switch (VT.SimpleTy) {
+ default: // e.g., vector types not handled
+ return false;
+ case MVT::i8:
+ Opc = Is32BitInt ? PPC::LBZ : PPC::LBZ8;
+ break;
+ case MVT::i16:
+ Opc = (IsZExt ?
+ (Is32BitInt ? PPC::LHZ : PPC::LHZ8) :
+ (Is32BitInt ? PPC::LHA : PPC::LHA8));
+ break;
+ case MVT::i32:
+ Opc = (IsZExt ?
+ (Is32BitInt ? PPC::LWZ : PPC::LWZ8) :
+ (Is32BitInt ? PPC::LWA_32 : PPC::LWA));
+ if ((Opc == PPC::LWA || Opc == PPC::LWA_32) && ((Addr.Offset & 3) != 0))
+ UseOffset = false;
+ break;
+ case MVT::i64:
+ Opc = PPC::LD;
+ assert(UseRC->hasSuperClassEq(&PPC::G8RCRegClass) &&
+ "64-bit load with 32-bit target??");
+ UseOffset = ((Addr.Offset & 3) == 0);
+ break;
+ case MVT::f32:
+ Opc = PPC::LFS;
+ break;
+ case MVT::f64:
+ Opc = FP64LoadOpc;
+ break;
+ }
+
+ // If necessary, materialize the offset into a register and use
+ // the indexed form. Also handle stack pointers with special needs.
+ unsigned IndexReg = 0;
+ PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
+ if (ResultReg == 0)
+ ResultReg = createResultReg(UseRC);
+
+ // Note: If we still have a frame index here, we know the offset is
+ // in range, as otherwise PPCSimplifyAddress would have converted it
+ // into a RegBase.
+ if (Addr.BaseType == Address::FrameIndexBase) {
+
+ MachineMemOperand *MMO =
+ FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(Addr.Base.FI, Addr.Offset),
+ MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI),
+ MFI.getObjectAlignment(Addr.Base.FI));
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
+
+ // Base reg with offset in range.
+ } else if (UseOffset) {
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addImm(Addr.Offset).addReg(Addr.Base.Reg);
+
+ // Indexed form.
+ } else {
+ // Get the RR opcode corresponding to the RI one. FIXME: It would be
+ // preferable to use the ImmToIdxMap from PPCRegisterInfo.cpp, but it
+ // is hard to get at.
+ switch (Opc) {
+ default: llvm_unreachable("Unexpected opcode!");
+ case PPC::LBZ: Opc = PPC::LBZX; break;
+ case PPC::LBZ8: Opc = PPC::LBZX8; break;
+ case PPC::LHZ: Opc = PPC::LHZX; break;
+ case PPC::LHZ8: Opc = PPC::LHZX8; break;
+ case PPC::LHA: Opc = PPC::LHAX; break;
+ case PPC::LHA8: Opc = PPC::LHAX8; break;
+ case PPC::LWZ: Opc = PPC::LWZX; break;
+ case PPC::LWZ8: Opc = PPC::LWZX8; break;
+ case PPC::LWA: Opc = PPC::LWAX; break;
+ case PPC::LWA_32: Opc = PPC::LWAX_32; break;
+ case PPC::LD: Opc = PPC::LDX; break;
+ case PPC::LFS: Opc = PPC::LFSX; break;
+ case PPC::LFD: Opc = PPC::LFDX; break;
+ }
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addReg(Addr.Base.Reg).addReg(IndexReg);
+ }
+
+ return true;
+}
+
+// Attempt to fast-select a load instruction.
+bool PPCFastISel::SelectLoad(const Instruction *I) {
+ // FIXME: No atomic loads are supported.
+ if (cast<LoadInst>(I)->isAtomic())
+ return false;
+
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(I->getType(), VT))
+ return false;
+
+ // See if we can handle this address.
+ Address Addr;
+ if (!PPCComputeAddress(I->getOperand(0), Addr))
+ return false;
+
+ // Look at the currently assigned register for this instruction
+ // to determine the required register class. This is necessary
+ // to constrain RA from using R0/X0 when this is not legal.
+ unsigned AssignedReg = FuncInfo.ValueMap[I];
+ const TargetRegisterClass *RC =
+ AssignedReg ? MRI.getRegClass(AssignedReg) : 0;
+
+ unsigned ResultReg = 0;
+ if (!PPCEmitLoad(VT, ResultReg, Addr, RC))
+ return false;
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+// Emit a store instruction to store SrcReg at Addr.
+bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
+ assert(SrcReg && "Nothing to store!");
+ unsigned Opc;
+ bool UseOffset = true;
+
+ const TargetRegisterClass *RC = MRI.getRegClass(SrcReg);
+ bool Is32BitInt = RC->hasSuperClassEq(&PPC::GPRCRegClass);
+
+ switch (VT.SimpleTy) {
+ default: // e.g., vector types not handled
+ return false;
+ case MVT::i8:
+ Opc = Is32BitInt ? PPC::STB : PPC::STB8;
+ break;
+ case MVT::i16:
+ Opc = Is32BitInt ? PPC::STH : PPC::STH8;
+ break;
+ case MVT::i32:
+ assert(Is32BitInt && "Not GPRC for i32??");
+ Opc = PPC::STW;
+ break;
+ case MVT::i64:
+ Opc = PPC::STD;
+ UseOffset = ((Addr.Offset & 3) == 0);
+ break;
+ case MVT::f32:
+ Opc = PPC::STFS;
+ break;
+ case MVT::f64:
+ Opc = PPC::STFD;
+ break;
+ }
+
+ // If necessary, materialize the offset into a register and use
+ // the indexed form. Also handle stack pointers with special needs.
+ unsigned IndexReg = 0;
+ PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
+
+ // Note: If we still have a frame index here, we know the offset is
+ // in range, as otherwise PPCSimplifyAddress would have converted it
+ // into a RegBase.
+ if (Addr.BaseType == Address::FrameIndexBase) {
+ MachineMemOperand *MMO =
+ FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(Addr.Base.FI, Addr.Offset),
+ MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI),
+ MFI.getObjectAlignment(Addr.Base.FI));
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)).addReg(SrcReg)
+ .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
+
+ // Base reg with offset in range.
+ } else if (UseOffset)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
+ .addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg);
+
+ // Indexed form.
+ else {
+ // Get the RR opcode corresponding to the RI one. FIXME: It would be
+ // preferable to use the ImmToIdxMap from PPCRegisterInfo.cpp, but it
+ // is hard to get at.
+ switch (Opc) {
+ default: llvm_unreachable("Unexpected opcode!");
+ case PPC::STB: Opc = PPC::STBX; break;
+ case PPC::STH : Opc = PPC::STHX; break;
+ case PPC::STW : Opc = PPC::STWX; break;
+ case PPC::STB8: Opc = PPC::STBX8; break;
+ case PPC::STH8: Opc = PPC::STHX8; break;
+ case PPC::STW8: Opc = PPC::STWX8; break;
+ case PPC::STD: Opc = PPC::STDX; break;
+ case PPC::STFS: Opc = PPC::STFSX; break;
+ case PPC::STFD: Opc = PPC::STFDX; break;
+ }
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
+ .addReg(SrcReg).addReg(Addr.Base.Reg).addReg(IndexReg);
+ }
+
+ return true;
+}
+
+// Attempt to fast-select a store instruction.
+bool PPCFastISel::SelectStore(const Instruction *I) {
+ Value *Op0 = I->getOperand(0);
+ unsigned SrcReg = 0;
+
+ // FIXME: No atomics loads are supported.
+ if (cast<StoreInst>(I)->isAtomic())
+ return false;
+
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(Op0->getType(), VT))
+ return false;
+
+ // Get the value to be stored into a register.
+ SrcReg = getRegForValue(Op0);
+ if (SrcReg == 0)
+ return false;
+
+ // See if we can handle this address.
+ Address Addr;
+ if (!PPCComputeAddress(I->getOperand(1), Addr))
+ return false;
+
+ if (!PPCEmitStore(VT, SrcReg, Addr))
+ return false;
+
+ return true;
+}
+
+// Attempt to fast-select a branch instruction.
+bool PPCFastISel::SelectBranch(const Instruction *I) {
+ const BranchInst *BI = cast<BranchInst>(I);
+ MachineBasicBlock *BrBB = FuncInfo.MBB;
+ MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
+ MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
+
+ // For now, just try the simplest case where it's fed by a compare.
+ if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
+ Optional<PPC::Predicate> OptPPCPred = getComparePred(CI->getPredicate());
+ if (!OptPPCPred)
+ return false;
+
+ PPC::Predicate PPCPred = OptPPCPred.getValue();
+
+ // Take advantage of fall-through opportunities.
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ PPCPred = PPC::InvertPredicate(PPCPred);
+ }
+
+ unsigned CondReg = createResultReg(&PPC::CRRCRegClass);
+
+ if (!PPCEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
+ CondReg))
+ return false;
+
+ BuildMI(*BrBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCC))
+ .addImm(PPCPred).addReg(CondReg).addMBB(TBB);
+ FastEmitBranch(FBB, DL);
+ FuncInfo.MBB->addSuccessor(TBB);
+ return true;
+
+ } else if (const ConstantInt *CI =
+ dyn_cast<ConstantInt>(BI->getCondition())) {
+ uint64_t Imm = CI->getZExtValue();
+ MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
+ FastEmitBranch(Target, DL);
+ return true;
+ }
+
+ // FIXME: ARM looks for a case where the block containing the compare
+ // has been split from the block containing the branch. If this happens,
+ // there is a vreg available containing the result of the compare. I'm
+ // not sure we can do much, as we've lost the predicate information with
+ // the compare instruction -- we have a 4-bit CR but don't know which bit
+ // to test here.
+ return false;
+}
+
+// Attempt to emit a compare of the two source values. Signed and unsigned
+// comparisons are supported. Return false if we can't handle it.
+bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
+ bool IsZExt, unsigned DestReg) {
+ Type *Ty = SrcValue1->getType();
+ EVT SrcEVT = TLI.getValueType(Ty, true);
+ if (!SrcEVT.isSimple())
+ return false;
+ MVT SrcVT = SrcEVT.getSimpleVT();
+
+ // See if operand 2 is an immediate encodeable in the compare.
+ // FIXME: Operands are not in canonical order at -O0, so an immediate
+ // operand in position 1 is a lost opportunity for now. We are
+ // similar to ARM in this regard.
+ long Imm = 0;
+ bool UseImm = false;
+
+ // Only 16-bit integer constants can be represented in compares for
+ // PowerPC. Others will be materialized into a register.
+ if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(SrcValue2)) {
+ if (SrcVT == MVT::i64 || SrcVT == MVT::i32 || SrcVT == MVT::i16 ||
+ SrcVT == MVT::i8 || SrcVT == MVT::i1) {
+ const APInt &CIVal = ConstInt->getValue();
+ Imm = (IsZExt) ? (long)CIVal.getZExtValue() : (long)CIVal.getSExtValue();
+ if ((IsZExt && isUInt<16>(Imm)) || (!IsZExt && isInt<16>(Imm)))
+ UseImm = true;
+ }
+ }
+
+ unsigned CmpOpc;
+ bool NeedsExt = false;
+ switch (SrcVT.SimpleTy) {
+ default: return false;
+ case MVT::f32:
+ CmpOpc = PPC::FCMPUS;
+ break;
+ case MVT::f64:
+ CmpOpc = PPC::FCMPUD;
+ break;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ NeedsExt = true;
+ // Intentional fall-through.
+ case MVT::i32:
+ if (!UseImm)
+ CmpOpc = IsZExt ? PPC::CMPLW : PPC::CMPW;
+ else
+ CmpOpc = IsZExt ? PPC::CMPLWI : PPC::CMPWI;
+ break;
+ case MVT::i64:
+ if (!UseImm)
+ CmpOpc = IsZExt ? PPC::CMPLD : PPC::CMPD;
+ else
+ CmpOpc = IsZExt ? PPC::CMPLDI : PPC::CMPDI;
+ break;
+ }
+
+ unsigned SrcReg1 = getRegForValue(SrcValue1);
+ if (SrcReg1 == 0)
+ return false;
+
+ unsigned SrcReg2 = 0;
+ if (!UseImm) {
+ SrcReg2 = getRegForValue(SrcValue2);
+ if (SrcReg2 == 0)
+ return false;
+ }
+
+ if (NeedsExt) {
+ unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
+ if (!PPCEmitIntExt(SrcVT, SrcReg1, MVT::i32, ExtReg, IsZExt))
+ return false;
+ SrcReg1 = ExtReg;
+
+ if (!UseImm) {
+ unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
+ if (!PPCEmitIntExt(SrcVT, SrcReg2, MVT::i32, ExtReg, IsZExt))
+ return false;
+ SrcReg2 = ExtReg;
+ }
+ }
+
+ if (!UseImm)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
+ .addReg(SrcReg1).addReg(SrcReg2);
+ else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
+ .addReg(SrcReg1).addImm(Imm);
+
+ return true;
+}
+
+// Attempt to fast-select a floating-point extend instruction.
+bool PPCFastISel::SelectFPExt(const Instruction *I) {
+ Value *Src = I->getOperand(0);
+ EVT SrcVT = TLI.getValueType(Src->getType(), true);
+ EVT DestVT = TLI.getValueType(I->getType(), true);
+
+ if (SrcVT != MVT::f32 || DestVT != MVT::f64)
+ return false;
+
+ unsigned SrcReg = getRegForValue(Src);
+ if (!SrcReg)
+ return false;
+
+ // No code is generated for a FP extend.
+ UpdateValueMap(I, SrcReg);
+ return true;
+}
+
+// Attempt to fast-select a floating-point truncate instruction.
+bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
+ Value *Src = I->getOperand(0);
+ EVT SrcVT = TLI.getValueType(Src->getType(), true);
+ EVT DestVT = TLI.getValueType(I->getType(), true);
+
+ if (SrcVT != MVT::f64 || DestVT != MVT::f32)
+ return false;
+
+ unsigned SrcReg = getRegForValue(Src);
+ if (!SrcReg)
+ return false;
+
+ // Round the result to single precision.
+ unsigned DestReg = createResultReg(&PPC::F4RCRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP), DestReg)
+ .addReg(SrcReg);
+
+ UpdateValueMap(I, DestReg);
+ return true;
+}
+
+// Move an i32 or i64 value in a GPR to an f64 value in an FPR.
+// FIXME: When direct register moves are implemented (see PowerISA 2.08),
+// those should be used instead of moving via a stack slot when the
+// subtarget permits.
+// FIXME: The code here is sloppy for the 4-byte case. Can use a 4-byte
+// stack slot and 4-byte store/load sequence. Or just sext the 4-byte
+// case to 8 bytes which produces tighter code but wastes stack space.
+unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg,
+ bool IsSigned) {
+
+ // If necessary, extend 32-bit int to 64-bit.
+ if (SrcVT == MVT::i32) {
+ unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
+ if (!PPCEmitIntExt(MVT::i32, SrcReg, MVT::i64, TmpReg, !IsSigned))
+ return 0;
+ SrcReg = TmpReg;
+ }
+
+ // Get a stack slot 8 bytes wide, aligned on an 8-byte boundary.
+ Address Addr;
+ Addr.BaseType = Address::FrameIndexBase;
+ Addr.Base.FI = MFI.CreateStackObject(8, 8, false);
+
+ // Store the value from the GPR.
+ if (!PPCEmitStore(MVT::i64, SrcReg, Addr))
+ return 0;
+
+ // Load the integer value into an FPR. The kind of load used depends
+ // on a number of conditions.
+ unsigned LoadOpc = PPC::LFD;
+
+ if (SrcVT == MVT::i32) {
+ Addr.Offset = 4;
+ if (!IsSigned)
+ LoadOpc = PPC::LFIWZX;
+ else if (PPCSubTarget.hasLFIWAX())
+ LoadOpc = PPC::LFIWAX;
+ }
+
+ const TargetRegisterClass *RC = &PPC::F8RCRegClass;
+ unsigned ResultReg = 0;
+ if (!PPCEmitLoad(MVT::f64, ResultReg, Addr, RC, !IsSigned, LoadOpc))
+ return 0;
+
+ return ResultReg;
+}
+
+// Attempt to fast-select an integer-to-floating-point conversion.
+bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
+ MVT DstVT;
+ Type *DstTy = I->getType();
+ if (!isTypeLegal(DstTy, DstVT))
+ return false;
+
+ if (DstVT != MVT::f32 && DstVT != MVT::f64)
+ return false;
+
+ Value *Src = I->getOperand(0);
+ EVT SrcEVT = TLI.getValueType(Src->getType(), true);
+ if (!SrcEVT.isSimple())
+ return false;
+
+ MVT SrcVT = SrcEVT.getSimpleVT();
+
+ if (SrcVT != MVT::i8 && SrcVT != MVT::i16 &&
+ SrcVT != MVT::i32 && SrcVT != MVT::i64)
+ return false;
+
+ unsigned SrcReg = getRegForValue(Src);
+ if (SrcReg == 0)
+ return false;
+
+ // We can only lower an unsigned convert if we have the newer
+ // floating-point conversion operations.
+ if (!IsSigned && !PPCSubTarget.hasFPCVT())
+ return false;
+
+ // FIXME: For now we require the newer floating-point conversion operations
+ // (which are present only on P7 and A2 server models) when converting
+ // to single-precision float. Otherwise we have to generate a lot of
+ // fiddly code to avoid double rounding. If necessary, the fiddly code
+ // can be found in PPCTargetLowering::LowerINT_TO_FP().
+ if (DstVT == MVT::f32 && !PPCSubTarget.hasFPCVT())
+ return false;
+
+ // Extend the input if necessary.
+ if (SrcVT == MVT::i8 || SrcVT == MVT::i16) {
+ unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
+ if (!PPCEmitIntExt(SrcVT, SrcReg, MVT::i64, TmpReg, !IsSigned))
+ return false;
+ SrcVT = MVT::i64;
+ SrcReg = TmpReg;
+ }
+
+ // Move the integer value to an FPR.
+ unsigned FPReg = PPCMoveToFPReg(SrcVT, SrcReg, IsSigned);
+ if (FPReg == 0)
+ return false;
+
+ // Determine the opcode for the conversion.
+ const TargetRegisterClass *RC = &PPC::F8RCRegClass;
+ unsigned DestReg = createResultReg(RC);
+ unsigned Opc;
+
+ if (DstVT == MVT::f32)
+ Opc = IsSigned ? PPC::FCFIDS : PPC::FCFIDUS;
+ else
+ Opc = IsSigned ? PPC::FCFID : PPC::FCFIDU;
+
+ // Generate the convert.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addReg(FPReg);
+
+ UpdateValueMap(I, DestReg);
+ return true;
+}
+
+// Move the floating-point value in SrcReg into an integer destination
+// register, and return the register (or zero if we can't handle it).
+// FIXME: When direct register moves are implemented (see PowerISA 2.08),
+// those should be used instead of moving via a stack slot when the
+// subtarget permits.
+unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
+ unsigned SrcReg, bool IsSigned) {
+ // Get a stack slot 8 bytes wide, aligned on an 8-byte boundary.
+ // Note that if have STFIWX available, we could use a 4-byte stack
+ // slot for i32, but this being fast-isel we'll just go with the
+ // easiest code gen possible.
+ Address Addr;
+ Addr.BaseType = Address::FrameIndexBase;
+ Addr.Base.FI = MFI.CreateStackObject(8, 8, false);
+
+ // Store the value from the FPR.
+ if (!PPCEmitStore(MVT::f64, SrcReg, Addr))
+ return 0;
+
+ // Reload it into a GPR. If we want an i32, modify the address
+ // to have a 4-byte offset so we load from the right place.
+ if (VT == MVT::i32)
+ Addr.Offset = 4;
+
+ // Look at the currently assigned register for this instruction
+ // to determine the required register class.
+ unsigned AssignedReg = FuncInfo.ValueMap[I];
+ const TargetRegisterClass *RC =
+ AssignedReg ? MRI.getRegClass(AssignedReg) : 0;
+
+ unsigned ResultReg = 0;
+ if (!PPCEmitLoad(VT, ResultReg, Addr, RC, !IsSigned))
+ return 0;
+
+ return ResultReg;
+}
+
+// Attempt to fast-select a floating-point-to-integer conversion.
+bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
+ MVT DstVT, SrcVT;
+ Type *DstTy = I->getType();
+ if (!isTypeLegal(DstTy, DstVT))
+ return false;
+
+ if (DstVT != MVT::i32 && DstVT != MVT::i64)
+ return false;
+
+ Value *Src = I->getOperand(0);
+ Type *SrcTy = Src->getType();
+ if (!isTypeLegal(SrcTy, SrcVT))
+ return false;
+
+ if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
+ return false;
+
+ unsigned SrcReg = getRegForValue(Src);
+ if (SrcReg == 0)
+ return false;
+
+ // Convert f32 to f64 if necessary. This is just a meaningless copy
+ // to get the register class right. COPY_TO_REGCLASS is needed since
+ // a COPY from F4RC to F8RC is converted to a F4RC-F4RC copy downstream.
+ const TargetRegisterClass *InRC = MRI.getRegClass(SrcReg);
+ if (InRC == &PPC::F4RCRegClass) {
+ unsigned TmpReg = createResultReg(&PPC::F8RCRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::COPY_TO_REGCLASS), TmpReg)
+ .addReg(SrcReg).addImm(PPC::F8RCRegClassID);
+ SrcReg = TmpReg;
+ }
+
+ // Determine the opcode for the conversion, which takes place
+ // entirely within FPRs.
+ unsigned DestReg = createResultReg(&PPC::F8RCRegClass);
+ unsigned Opc;
+
+ if (DstVT == MVT::i32)
+ if (IsSigned)
+ Opc = PPC::FCTIWZ;
+ else
+ Opc = PPCSubTarget.hasFPCVT() ? PPC::FCTIWUZ : PPC::FCTIDZ;
+ else
+ Opc = IsSigned ? PPC::FCTIDZ : PPC::FCTIDUZ;
+
+ // Generate the convert.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addReg(SrcReg);
+
+ // Now move the integer value from a float register to an integer register.
+ unsigned IntReg = PPCMoveToIntReg(I, DstVT, DestReg, IsSigned);
+ if (IntReg == 0)
+ return false;
+
+ UpdateValueMap(I, IntReg);
+ return true;
+}
+
+// Attempt to fast-select a binary integer operation that isn't already
+// handled automatically.
+bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
+ EVT DestVT = TLI.getValueType(I->getType(), true);
+
+ // We can get here in the case when we have a binary operation on a non-legal
+ // type and the target independent selector doesn't know how to handle it.
+ if (DestVT != MVT::i16 && DestVT != MVT::i8)
+ return false;
+
+ // Look at the currently assigned register for this instruction
+ // to determine the required register class. If there is no register,
+ // make a conservative choice (don't assign R0).
+ unsigned AssignedReg = FuncInfo.ValueMap[I];
+ const TargetRegisterClass *RC =
+ (AssignedReg ? MRI.getRegClass(AssignedReg) :
+ &PPC::GPRC_and_GPRC_NOR0RegClass);
+ bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
+
+ unsigned Opc;
+ switch (ISDOpcode) {
+ default: return false;
+ case ISD::ADD:
+ Opc = IsGPRC ? PPC::ADD4 : PPC::ADD8;
+ break;
+ case ISD::OR:
+ Opc = IsGPRC ? PPC::OR : PPC::OR8;
+ break;
+ case ISD::SUB:
+ Opc = IsGPRC ? PPC::SUBF : PPC::SUBF8;
+ break;
+ }
+
+ unsigned ResultReg = createResultReg(RC ? RC : &PPC::G8RCRegClass);
+ unsigned SrcReg1 = getRegForValue(I->getOperand(0));
+ if (SrcReg1 == 0) return false;
+
+ // Handle case of small immediate operand.
+ if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ const APInt &CIVal = ConstInt->getValue();
+ int Imm = (int)CIVal.getSExtValue();
+ bool UseImm = true;
+ if (isInt<16>(Imm)) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("Missing case!");
+ case PPC::ADD4:
+ Opc = PPC::ADDI;
+ MRI.setRegClass(SrcReg1, &PPC::GPRC_and_GPRC_NOR0RegClass);
+ break;
+ case PPC::ADD8:
+ Opc = PPC::ADDI8;
+ MRI.setRegClass(SrcReg1, &PPC::G8RC_and_G8RC_NOX0RegClass);
+ break;
+ case PPC::OR:
+ Opc = PPC::ORI;
+ break;
+ case PPC::OR8:
+ Opc = PPC::ORI8;
+ break;
+ case PPC::SUBF:
+ if (Imm == -32768)
+ UseImm = false;
+ else {
+ Opc = PPC::ADDI;
+ MRI.setRegClass(SrcReg1, &PPC::GPRC_and_GPRC_NOR0RegClass);
+ Imm = -Imm;
+ }
+ break;
+ case PPC::SUBF8:
+ if (Imm == -32768)
+ UseImm = false;
+ else {
+ Opc = PPC::ADDI8;
+ MRI.setRegClass(SrcReg1, &PPC::G8RC_and_G8RC_NOX0RegClass);
+ Imm = -Imm;
+ }
+ break;
+ }
+
+ if (UseImm) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addReg(SrcReg1).addImm(Imm);
+ UpdateValueMap(I, ResultReg);
+ return true;
+ }
+ }
+ }
+
+ // Reg-reg case.
+ unsigned SrcReg2 = getRegForValue(I->getOperand(1));
+ if (SrcReg2 == 0) return false;
+
+ // Reverse operands for subtract-from.
+ if (ISDOpcode == ISD::SUB)
+ std::swap(SrcReg1, SrcReg2);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addReg(SrcReg1).addReg(SrcReg2);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+// Handle arguments to a call that we're attempting to fast-select.
+// Return false if the arguments are too complex for us at the moment.
+bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
+ SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<MVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs,
+ CallingConv::ID CC,
+ unsigned &NumBytes,
+ bool IsVarArg) {
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs, *Context);
+ CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS);
+
+ // Bail out if we can't handle any of the arguments.
+ for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
+ CCValAssign &VA = ArgLocs[I];
+ MVT ArgVT = ArgVTs[VA.getValNo()];
+
+ // Skip vector arguments for now, as well as long double and
+ // uint128_t, and anything that isn't passed in a register.
+ if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64 ||
+ !VA.isRegLoc() || VA.needsCustom())
+ return false;
+
+ // Skip bit-converted arguments for now.
+ if (VA.getLocInfo() == CCValAssign::BCvt)
+ return false;
+ }
+
+ // Get a count of how many bytes are to be pushed onto the stack.
+ NumBytes = CCInfo.getNextStackOffset();
+
+ // Issue CALLSEQ_START.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TII.getCallFrameSetupOpcode()))
+ .addImm(NumBytes);
+
+ // Prepare to assign register arguments. Every argument uses up a
+ // GPR protocol register even if it's passed in a floating-point
+ // register.
+ unsigned NextGPR = PPC::X3;
+ unsigned NextFPR = PPC::F1;
+
+ // Process arguments.
+ for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
+ CCValAssign &VA = ArgLocs[I];
+ unsigned Arg = ArgRegs[VA.getValNo()];
+ MVT ArgVT = ArgVTs[VA.getValNo()];
+
+ // Handle argument promotion and bitcasts.
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt: {
+ MVT DestVT = VA.getLocVT();
+ const TargetRegisterClass *RC =
+ (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
+ unsigned TmpReg = createResultReg(RC);
+ if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/false))
+ llvm_unreachable("Failed to emit a sext!");
+ ArgVT = DestVT;
+ Arg = TmpReg;
+ break;
+ }
+ case CCValAssign::AExt:
+ case CCValAssign::ZExt: {
+ MVT DestVT = VA.getLocVT();
+ const TargetRegisterClass *RC =
+ (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
+ unsigned TmpReg = createResultReg(RC);
+ if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/true))
+ llvm_unreachable("Failed to emit a zext!");
+ ArgVT = DestVT;
+ Arg = TmpReg;
+ break;
+ }
+ case CCValAssign::BCvt: {
+ // FIXME: Not yet handled.
+ llvm_unreachable("Should have bailed before getting here!");
+ break;
+ }
+ }
+
+ // Copy this argument to the appropriate register.
+ unsigned ArgReg;
+ if (ArgVT == MVT::f32 || ArgVT == MVT::f64) {
+ ArgReg = NextFPR++;
+ ++NextGPR;
+ } else
+ ArgReg = NextGPR++;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ArgReg).addReg(Arg);
+ RegArgs.push_back(ArgReg);
+ }
+
+ return true;
+}
+
+// For a call that we've determined we can fast-select, finish the
+// call sequence and generate a copy to obtain the return value (if any).
+void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC,
+ unsigned &NumBytes, bool IsVarArg) {
+ // Issue CallSEQ_END.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TII.getCallFrameDestroyOpcode()))
+ .addImm(NumBytes).addImm(0);
+
+ // Next, generate a copy to obtain the return value.
+ // FIXME: No multi-register return values yet, though I don't foresee
+ // any real difficulties there.
+ if (RetVT != MVT::isVoid) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS);
+ CCValAssign &VA = RVLocs[0];
+ assert(RVLocs.size() == 1 && "No support for multi-reg return values!");
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ MVT DestVT = VA.getValVT();
+ MVT CopyVT = DestVT;
+
+ // Ints smaller than a register still arrive in a full 64-bit
+ // register, so make sure we recognize this.
+ if (RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32)
+ CopyVT = MVT::i64;
+
+ unsigned SourcePhysReg = VA.getLocReg();
+ unsigned ResultReg = 0;
+
+ if (RetVT == CopyVT) {
+ const TargetRegisterClass *CpyRC = TLI.getRegClassFor(CopyVT);
+ ResultReg = createResultReg(CpyRC);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(SourcePhysReg);
+
+ // If necessary, round the floating result to single precision.
+ } else if (CopyVT == MVT::f64) {
+ ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP),
+ ResultReg).addReg(SourcePhysReg);
+
+ // If only the low half of a general register is needed, generate
+ // a GPRC copy instead of a G8RC copy. (EXTRACT_SUBREG can't be
+ // used along the fast-isel path (not lowered), and downstream logic
+ // also doesn't like a direct subreg copy on a physical reg.)
+ } else if (RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32) {
+ ResultReg = createResultReg(&PPC::GPRCRegClass);
+ // Convert physical register from G8RC to GPRC.
+ SourcePhysReg -= PPC::X0 - PPC::R0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(SourcePhysReg);
+ }
+
+ assert(ResultReg && "ResultReg unset!");
+ UsedRegs.push_back(SourcePhysReg);
+ UpdateValueMap(I, ResultReg);
+ }
+}
+
+// Attempt to fast-select a call instruction.
+bool PPCFastISel::SelectCall(const Instruction *I) {
+ const CallInst *CI = cast<CallInst>(I);
+ const Value *Callee = CI->getCalledValue();
+
+ // Can't handle inline asm.
+ if (isa<InlineAsm>(Callee))
+ return false;
+
+ // Allow SelectionDAG isel to handle tail calls.
+ if (CI->isTailCall())
+ return false;
+
+ // Obtain calling convention.
+ ImmutableCallSite CS(CI);
+ CallingConv::ID CC = CS.getCallingConv();
+
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ bool IsVarArg = FTy->isVarArg();
+
+ // Not ready for varargs yet.
+ if (IsVarArg)
+ return false;
+
+ // Handle simple calls for now, with legal return types and
+ // those that can be extended.
+ Type *RetTy = I->getType();
+ MVT RetVT;
+ if (RetTy->isVoidTy())
+ RetVT = MVT::isVoid;
+ else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
+ RetVT != MVT::i8)
+ return false;
+
+ // FIXME: No multi-register return values yet.
+ if (RetVT != MVT::isVoid && RetVT != MVT::i8 && RetVT != MVT::i16 &&
+ RetVT != MVT::i32 && RetVT != MVT::i64 && RetVT != MVT::f32 &&
+ RetVT != MVT::f64) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS);
+ if (RVLocs.size() > 1)
+ return false;
+ }
+
+ // Bail early if more than 8 arguments, as we only currently
+ // handle arguments passed in registers.
+ unsigned NumArgs = CS.arg_size();
+ if (NumArgs > 8)
+ return false;
+
+ // Set up the argument vectors.
+ SmallVector<Value*, 8> Args;
+ SmallVector<unsigned, 8> ArgRegs;
+ SmallVector<MVT, 8> ArgVTs;
+ SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
+
+ Args.reserve(NumArgs);
+ ArgRegs.reserve(NumArgs);
+ ArgVTs.reserve(NumArgs);
+ ArgFlags.reserve(NumArgs);
+
+ for (ImmutableCallSite::arg_iterator II = CS.arg_begin(), IE = CS.arg_end();
+ II != IE; ++II) {
+ // FIXME: ARM does something for intrinsic calls here, check into that.
+
+ unsigned AttrIdx = II - CS.arg_begin() + 1;
+
+ // Only handle easy calls for now. It would be reasonably easy
+ // to handle <= 8-byte structures passed ByVal in registers, but we
+ // have to ensure they are right-justified in the register.
+ if (CS.paramHasAttr(AttrIdx, Attribute::InReg) ||
+ CS.paramHasAttr(AttrIdx, Attribute::StructRet) ||
+ CS.paramHasAttr(AttrIdx, Attribute::Nest) ||
+ CS.paramHasAttr(AttrIdx, Attribute::ByVal))
+ return false;
+
+ ISD::ArgFlagsTy Flags;
+ if (CS.paramHasAttr(AttrIdx, Attribute::SExt))
+ Flags.setSExt();
+ if (CS.paramHasAttr(AttrIdx, Attribute::ZExt))
+ Flags.setZExt();
+
+ Type *ArgTy = (*II)->getType();
+ MVT ArgVT;
+ if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8)
+ return false;
+
+ if (ArgVT.isVector())
+ return false;
+
+ unsigned Arg = getRegForValue(*II);
+ if (Arg == 0)
+ return false;
+
+ unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ Flags.setOrigAlign(OriginalAlignment);
+
+ Args.push_back(*II);
+ ArgRegs.push_back(Arg);
+ ArgVTs.push_back(ArgVT);
+ ArgFlags.push_back(Flags);
+ }
+
+ // Process the arguments.
+ SmallVector<unsigned, 8> RegArgs;
+ unsigned NumBytes;
+
+ if (!processCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
+ RegArgs, CC, NumBytes, IsVarArg))
+ return false;
+
+ // FIXME: No handling for function pointers yet. This requires
+ // implementing the function descriptor (OPD) setup.
+ const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
+ if (!GV)
+ return false;
+
+ // Build direct call with NOP for TOC restore.
+ // FIXME: We can and should optimize away the NOP for local calls.
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(PPC::BL8_NOP));
+ // Add callee.
+ MIB.addGlobalAddress(GV);
+
+ // Add implicit physical register uses to the call.
+ for (unsigned II = 0, IE = RegArgs.size(); II != IE; ++II)
+ MIB.addReg(RegArgs[II], RegState::Implicit);
+
+ // Add a register mask with the call-preserved registers. Proper
+ // defs for return values will be added by setPhysRegsDeadExcept().
+ MIB.addRegMask(TRI.getCallPreservedMask(CC));
+
+ // Finish off the call including any return values.
+ SmallVector<unsigned, 4> UsedRegs;
+ finishCall(RetVT, UsedRegs, I, CC, NumBytes, IsVarArg);
+
+ // Set all unused physregs defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
+ return true;
+}
+
+// Attempt to fast-select a return instruction.
+bool PPCFastISel::SelectRet(const Instruction *I) {
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+
+ // Build a list of return value registers.
+ SmallVector<unsigned, 4> RetRegs;
+ CallingConv::ID CC = F.getCallingConv();
+
+ if (Ret->getNumOperands() > 0) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, *Context);
+ CCInfo.AnalyzeReturn(Outs, RetCC_PPC64_ELF_FIS);
+ const Value *RV = Ret->getOperand(0);
+
+ // FIXME: Only one output register for now.
+ if (ValLocs.size() > 1)
+ return false;
+
+ // Special case for returning a constant integer of any size.
+ // Materialize the constant as an i64 and copy it to the return
+ // register. This avoids an unnecessary extend or truncate.
+ if (isa<ConstantInt>(*RV)) {
+ const Constant *C = cast<Constant>(RV);
+ unsigned SrcReg = PPCMaterializeInt(C, MVT::i64);
+ unsigned RetReg = ValLocs[0].getLocReg();
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ RetReg).addReg(SrcReg);
+ RetRegs.push_back(RetReg);
+
+ } else {
+ unsigned Reg = getRegForValue(RV);
+
+ if (Reg == 0)
+ return false;
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i < ValLocs.size(); ++i) {
+
+ CCValAssign &VA = ValLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+ RetRegs.push_back(VA.getLocReg());
+ unsigned SrcReg = Reg + VA.getValNo();
+
+ EVT RVEVT = TLI.getValueType(RV->getType());
+ if (!RVEVT.isSimple())
+ return false;
+ MVT RVVT = RVEVT.getSimpleVT();
+ MVT DestVT = VA.getLocVT();
+
+ if (RVVT != DestVT && RVVT != MVT::i8 &&
+ RVVT != MVT::i16 && RVVT != MVT::i32)
+ return false;
+
+ if (RVVT != DestVT) {
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ llvm_unreachable("Full value assign but types don't match?");
+ case CCValAssign::AExt:
+ case CCValAssign::ZExt: {
+ const TargetRegisterClass *RC =
+ (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
+ unsigned TmpReg = createResultReg(RC);
+ if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, true))
+ return false;
+ SrcReg = TmpReg;
+ break;
+ }
+ case CCValAssign::SExt: {
+ const TargetRegisterClass *RC =
+ (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
+ unsigned TmpReg = createResultReg(RC);
+ if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, false))
+ return false;
+ SrcReg = TmpReg;
+ break;
+ }
+ }
+ }
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::COPY), RetRegs[i])
+ .addReg(SrcReg);
+ }
+ }
+ }
+
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(PPC::BLR));
+
+ for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
+ MIB.addReg(RetRegs[i], RegState::Implicit);
+
+ return true;
+}
+
+// Attempt to emit an integer extend of SrcReg into DestReg. Both
+// signed and zero extensions are supported. Return false if we
+// can't handle it.
+bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
+ unsigned DestReg, bool IsZExt) {
+ if (DestVT != MVT::i32 && DestVT != MVT::i64)
+ return false;
+ if (SrcVT != MVT::i8 && SrcVT != MVT::i16 && SrcVT != MVT::i32)
+ return false;
+
+ // Signed extensions use EXTSB, EXTSH, EXTSW.
+ if (!IsZExt) {
+ unsigned Opc;
+ if (SrcVT == MVT::i8)
+ Opc = (DestVT == MVT::i32) ? PPC::EXTSB : PPC::EXTSB8_32_64;
+ else if (SrcVT == MVT::i16)
+ Opc = (DestVT == MVT::i32) ? PPC::EXTSH : PPC::EXTSH8_32_64;
+ else {
+ assert(DestVT == MVT::i64 && "Signed extend from i32 to i32??");
+ Opc = PPC::EXTSW_32_64;
+ }
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addReg(SrcReg);
+
+ // Unsigned 32-bit extensions use RLWINM.
+ } else if (DestVT == MVT::i32) {
+ unsigned MB;
+ if (SrcVT == MVT::i8)
+ MB = 24;
+ else {
+ assert(SrcVT == MVT::i16 && "Unsigned extend from i32 to i32??");
+ MB = 16;
+ }
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLWINM),
+ DestReg)
+ .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB).addImm(/*ME=*/31);
+
+ // Unsigned 64-bit extensions use RLDICL (with a 32-bit source).
+ } else {
+ unsigned MB;
+ if (SrcVT == MVT::i8)
+ MB = 56;
+ else if (SrcVT == MVT::i16)
+ MB = 48;
+ else
+ MB = 32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(PPC::RLDICL_32_64), DestReg)
+ .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB);
+ }
+
+ return true;
+}
+
+// Attempt to fast-select an indirect branch instruction.
+bool PPCFastISel::SelectIndirectBr(const Instruction *I) {
+ unsigned AddrReg = getRegForValue(I->getOperand(0));
+ if (AddrReg == 0)
+ return false;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::MTCTR8))
+ .addReg(AddrReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCTR8));
+
+ const IndirectBrInst *IB = cast<IndirectBrInst>(I);
+ for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
+ FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]);
+
+ return true;
+}
+
+// Attempt to fast-select an integer truncate instruction.
+bool PPCFastISel::SelectTrunc(const Instruction *I) {
+ Value *Src = I->getOperand(0);
+ EVT SrcVT = TLI.getValueType(Src->getType(), true);
+ EVT DestVT = TLI.getValueType(I->getType(), true);
+
+ if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16)
+ return false;
+
+ if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
+ return false;
+
+ unsigned SrcReg = getRegForValue(Src);
+ if (!SrcReg)
+ return false;
+
+ // The only interesting case is when we need to switch register classes.
+ if (SrcVT == MVT::i64) {
+ unsigned ResultReg = createResultReg(&PPC::GPRCRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(SrcReg, 0, PPC::sub_32);
+ SrcReg = ResultReg;
+ }
+
+ UpdateValueMap(I, SrcReg);
+ return true;
+}
+
+// Attempt to fast-select an integer extend instruction.
+bool PPCFastISel::SelectIntExt(const Instruction *I) {
+ Type *DestTy = I->getType();
+ Value *Src = I->getOperand(0);
+ Type *SrcTy = Src->getType();
+
+ bool IsZExt = isa<ZExtInst>(I);
+ unsigned SrcReg = getRegForValue(Src);
+ if (!SrcReg) return false;
+
+ EVT SrcEVT, DestEVT;
+ SrcEVT = TLI.getValueType(SrcTy, true);
+ DestEVT = TLI.getValueType(DestTy, true);
+ if (!SrcEVT.isSimple())
+ return false;
+ if (!DestEVT.isSimple())
+ return false;
+
+ MVT SrcVT = SrcEVT.getSimpleVT();
+ MVT DestVT = DestEVT.getSimpleVT();
+
+ // If we know the register class needed for the result of this
+ // instruction, use it. Otherwise pick the register class of the
+ // correct size that does not contain X0/R0, since we don't know
+ // whether downstream uses permit that assignment.
+ unsigned AssignedReg = FuncInfo.ValueMap[I];
+ const TargetRegisterClass *RC =
+ (AssignedReg ? MRI.getRegClass(AssignedReg) :
+ (DestVT == MVT::i64 ? &PPC::G8RC_and_G8RC_NOX0RegClass :
+ &PPC::GPRC_and_GPRC_NOR0RegClass));
+ unsigned ResultReg = createResultReg(RC);
+
+ if (!PPCEmitIntExt(SrcVT, SrcReg, DestVT, ResultReg, IsZExt))
+ return false;
+
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+// Attempt to fast-select an instruction that wasn't handled by
+// the table-generated machinery.
+bool PPCFastISel::TargetSelectInstruction(const Instruction *I) {
+
+ switch (I->getOpcode()) {
+ case Instruction::Load:
+ return SelectLoad(I);
+ case Instruction::Store:
+ return SelectStore(I);
+ case Instruction::Br:
+ return SelectBranch(I);
+ case Instruction::IndirectBr:
+ return SelectIndirectBr(I);
+ case Instruction::FPExt:
+ return SelectFPExt(I);
+ case Instruction::FPTrunc:
+ return SelectFPTrunc(I);
+ case Instruction::SIToFP:
+ return SelectIToFP(I, /*IsSigned*/ true);
+ case Instruction::UIToFP:
+ return SelectIToFP(I, /*IsSigned*/ false);
+ case Instruction::FPToSI:
+ return SelectFPToI(I, /*IsSigned*/ true);
+ case Instruction::FPToUI:
+ return SelectFPToI(I, /*IsSigned*/ false);
+ case Instruction::Add:
+ return SelectBinaryIntOp(I, ISD::ADD);
+ case Instruction::Or:
+ return SelectBinaryIntOp(I, ISD::OR);
+ case Instruction::Sub:
+ return SelectBinaryIntOp(I, ISD::SUB);
+ case Instruction::Call:
+ if (dyn_cast<IntrinsicInst>(I))
+ return false;
+ return SelectCall(I);
+ case Instruction::Ret:
+ return SelectRet(I);
+ case Instruction::Trunc:
+ return SelectTrunc(I);
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ return SelectIntExt(I);
+ // Here add other flavors of Instruction::XXX that automated
+ // cases don't catch. For example, switches are terminators
+ // that aren't yet handled.
+ default:
+ break;
+ }
+ return false;
+}
+
+// Materialize a floating-point constant into a register, and return
+// the register number (or zero if we failed to handle it).
+unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
+ // No plans to handle long double here.
+ if (VT != MVT::f32 && VT != MVT::f64)
+ return 0;
+
+ // All FP constants are loaded from the constant pool.
+ unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
+ assert(Align > 0 && "Unexpectedly missing alignment information!");
+ unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ CodeModel::Model CModel = TM.getCodeModel();
+
+ MachineMemOperand *MMO =
+ FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,
+ (VT == MVT::f32) ? 4 : 8, Align);
+
+ unsigned Opc = (VT == MVT::f32) ? PPC::LFS : PPC::LFD;
+ unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+
+ // For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
+ if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocCPT),
+ TmpReg)
+ .addConstantPoolIndex(Idx).addReg(PPC::X2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addImm(0).addReg(TmpReg).addMemOperand(MMO);
+ } else {
+ // Otherwise we generate LF[SD](Idx[lo], ADDIStocHA(X2, Idx)).
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
+ TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx);
+ // But for large code model, we must generate a LDtocL followed
+ // by the LF[SD].
+ if (CModel == CodeModel::Large) {
+ unsigned TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
+ TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addImm(0).addReg(TmpReg2);
+ } else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO)
+ .addReg(TmpReg)
+ .addMemOperand(MMO);
+ }
+
+ return DestReg;
+}
+
+// Materialize the address of a global value into a register, and return
+// the register number (or zero if we failed to handle it).
+unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
+ assert(VT == MVT::i64 && "Non-address!");
+ const TargetRegisterClass *RC = &PPC::G8RC_and_G8RC_NOX0RegClass;
+ unsigned DestReg = createResultReg(RC);
+
+ // Global values may be plain old object addresses, TLS object
+ // addresses, constant pool entries, or jump tables. How we generate
+ // code for these may depend on small, medium, or large code model.
+ CodeModel::Model CModel = TM.getCodeModel();
+
+ // FIXME: Jump tables are not yet required because fast-isel doesn't
+ // handle switches; if that changes, we need them as well. For now,
+ // what follows assumes everything's a generic (or TLS) global address.
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+ if (!GVar) {
+ // If GV is an alias, use the aliasee for determining thread-locality.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
+ }
+
+ // FIXME: We don't yet handle the complexity of TLS.
+ bool IsTLS = GVar && GVar->isThreadLocal();
+ if (IsTLS)
+ return 0;
+
+ // For small code model, generate a simple TOC load.
+ if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtoc), DestReg)
+ .addGlobalAddress(GV).addReg(PPC::X2);
+ else {
+ // If the address is an externally defined symbol, a symbol with
+ // common or externally available linkage, a function address, or a
+ // jump table address (not yet needed), or if we are generating code
+ // for large code model, we generate:
+ // LDtocL(GV, ADDIStocHA(%X2, GV))
+ // Otherwise we generate:
+ // ADDItocL(ADDIStocHA(%X2, GV), GV)
+ // Either way, start with the ADDIStocHA:
+ unsigned HighPartReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
+ HighPartReg).addReg(PPC::X2).addGlobalAddress(GV);
+
+ // !GVar implies a function address. An external variable is one
+ // without an initializer.
+ // If/when switches are implemented, jump tables should be handled
+ // on the "if" path here.
+ if (CModel == CodeModel::Large || !GVar || !GVar->hasInitializer() ||
+ GVar->hasCommonLinkage() || GVar->hasAvailableExternallyLinkage())
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
+ DestReg).addGlobalAddress(GV).addReg(HighPartReg);
+ else
+ // Otherwise generate the ADDItocL.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDItocL),
+ DestReg).addReg(HighPartReg).addGlobalAddress(GV);
+ }
+
+ return DestReg;
+}
+
+// Materialize a 32-bit integer constant into a register, and return
+// the register number (or zero if we failed to handle it).
+unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
+ const TargetRegisterClass *RC) {
+ unsigned Lo = Imm & 0xFFFF;
+ unsigned Hi = (Imm >> 16) & 0xFFFF;
+
+ unsigned ResultReg = createResultReg(RC);
+ bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
+
+ if (isInt<16>(Imm))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg)
+ .addImm(Imm);
+ else if (Lo) {
+ // Both Lo and Hi have nonzero bits.
+ unsigned TmpReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg)
+ .addImm(Hi);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg)
+ .addReg(TmpReg).addImm(Lo);
+ } else
+ // Just Hi bits.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg)
+ .addImm(Hi);
+
+ return ResultReg;
+}
+
+// Materialize a 64-bit integer constant into a register, and return
+// the register number (or zero if we failed to handle it).
+unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
+ const TargetRegisterClass *RC) {
+ unsigned Remainder = 0;
+ unsigned Shift = 0;
+
+ // If the value doesn't fit in 32 bits, see if we can shift it
+ // so that it fits in 32 bits.
+ if (!isInt<32>(Imm)) {
+ Shift = countTrailingZeros<uint64_t>(Imm);
+ int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
+
+ if (isInt<32>(ImmSh))
+ Imm = ImmSh;
+ else {
+ Remainder = Imm;
+ Shift = 32;
+ Imm >>= 32;
+ }
+ }
+
+ // Handle the high-order 32 bits (if shifted) or the whole 32 bits
+ // (if not shifted).
+ unsigned TmpReg1 = PPCMaterialize32BitInt(Imm, RC);
+ if (!Shift)
+ return TmpReg1;
+
+ // If upper 32 bits were not zero, we've built them and need to shift
+ // them into place.
+ unsigned TmpReg2;
+ if (Imm) {
+ TmpReg2 = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLDICR),
+ TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift);
+ } else
+ TmpReg2 = TmpReg1;
+
+ unsigned TmpReg3, Hi, Lo;
+ if ((Hi = (Remainder >> 16) & 0xFFFF)) {
+ TmpReg3 = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORIS8),
+ TmpReg3).addReg(TmpReg2).addImm(Hi);
+ } else
+ TmpReg3 = TmpReg2;
+
+ if ((Lo = Remainder & 0xFFFF)) {
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORI8),
+ ResultReg).addReg(TmpReg3).addImm(Lo);
+ return ResultReg;
+ }
+
+ return TmpReg3;
+}
+
+
+// Materialize an integer constant into a register, and return
+// the register number (or zero if we failed to handle it).
+unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
+
+ if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
+ VT != MVT::i8 && VT != MVT::i1)
+ return 0;
+
+ const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
+ &PPC::GPRCRegClass);
+
+ // If the constant is in range, use a load-immediate.
+ const ConstantInt *CI = cast<ConstantInt>(C);
+ if (isInt<16>(CI->getSExtValue())) {
+ unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
+ unsigned ImmReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ImmReg)
+ .addImm(CI->getSExtValue());
+ return ImmReg;
+ }
+
+ // Construct the constant piecewise.
+ int64_t Imm = CI->getZExtValue();
+
+ if (VT == MVT::i64)
+ return PPCMaterialize64BitInt(Imm, RC);
+ else if (VT == MVT::i32)
+ return PPCMaterialize32BitInt(Imm, RC);
+
+ return 0;
+}
+
+// Materialize a constant into a register, and return the register
+// number (or zero if we failed to handle it).
+unsigned PPCFastISel::TargetMaterializeConstant(const Constant *C) {
+ EVT CEVT = TLI.getValueType(C->getType(), true);
+
+ // Only handle simple types.
+ if (!CEVT.isSimple()) return 0;
+ MVT VT = CEVT.getSimpleVT();
+
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+ return PPCMaterializeFP(CFP, VT);
+ else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return PPCMaterializeGV(GV, VT);
+ else if (isa<ConstantInt>(C))
+ return PPCMaterializeInt(C, VT);
+
+ return 0;
+}
+
+// Materialize the address created by an alloca into a register, and
+// return the register number (or zero if we failed to handle it).
+unsigned PPCFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
+ // Don't handle dynamic allocas.
+ if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
+
+ MVT VT;
+ if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
+
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
+ ResultReg).addFrameIndex(SI->second).addImm(0);
+ return ResultReg;
+ }
+
+ return 0;
+}
+
+// Fold loads into extends when possible.
+// FIXME: We can have multiple redundant extend/trunc instructions
+// following a load. The folding only picks up one. Extend this
+// to check subsequent instructions for the same pattern and remove
+// them. Thus ResultReg should be the def reg for the last redundant
+// instruction in a chain, and all intervening instructions can be
+// removed from parent. Change test/CodeGen/PowerPC/fast-isel-fold.ll
+// to add ELF64-NOT: rldicl to the appropriate tests when this works.
+bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) {
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(LI->getType(), VT))
+ return false;
+
+ // Combine load followed by zero- or sign-extend.
+ bool IsZExt = false;
+ switch(MI->getOpcode()) {
+ default:
+ return false;
+
+ case PPC::RLDICL:
+ case PPC::RLDICL_32_64: {
+ IsZExt = true;
+ unsigned MB = MI->getOperand(3).getImm();
+ if ((VT == MVT::i8 && MB <= 56) ||
+ (VT == MVT::i16 && MB <= 48) ||
+ (VT == MVT::i32 && MB <= 32))
+ break;
+ return false;
+ }
+
+ case PPC::RLWINM:
+ case PPC::RLWINM8: {
+ IsZExt = true;
+ unsigned MB = MI->getOperand(3).getImm();
+ if ((VT == MVT::i8 && MB <= 24) ||
+ (VT == MVT::i16 && MB <= 16))
+ break;
+ return false;
+ }
+
+ case PPC::EXTSB:
+ case PPC::EXTSB8:
+ case PPC::EXTSB8_32_64:
+ /* There is no sign-extending load-byte instruction. */
+ return false;
+
+ case PPC::EXTSH:
+ case PPC::EXTSH8:
+ case PPC::EXTSH8_32_64: {
+ if (VT != MVT::i16 && VT != MVT::i8)
+ return false;
+ break;
+ }
+
+ case PPC::EXTSW:
+ case PPC::EXTSW_32_64: {
+ if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8)
+ return false;
+ break;
+ }
+ }
+
+ // See if we can handle this address.
+ Address Addr;
+ if (!PPCComputeAddress(LI->getOperand(0), Addr))
+ return false;
+
+ unsigned ResultReg = MI->getOperand(0).getReg();
+
+ if (!PPCEmitLoad(VT, ResultReg, Addr, 0, IsZExt))
+ return false;
+
+ MI->eraseFromParent();
+ return true;
+}
+
+// Attempt to lower call arguments in a faster way than done by
+// the selection DAG code.
+bool PPCFastISel::FastLowerArguments() {
+ // Defer to normal argument lowering for now. It's reasonably
+ // efficient. Consider doing something like ARM to handle the
+ // case where all args fit in registers, no varargs, no float
+ // or vector args.
+ return false;
+}
+
+// Handle materializing integer constants into a register. This is not
+// automatically generated for PowerPC, so must be explicitly created here.
+unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
+
+ if (Opc != ISD::Constant)
+ return 0;
+
+ if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
+ VT != MVT::i8 && VT != MVT::i1)
+ return 0;
+
+ const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
+ &PPC::GPRCRegClass);
+ if (VT == MVT::i64)
+ return PPCMaterialize64BitInt(Imm, RC);
+ else
+ return PPCMaterialize32BitInt(Imm, RC);
+}
+
+// Override for ADDI and ADDI8 to set the correct register class
+// on RHS operand 0. The automatic infrastructure naively assumes
+// GPRC for i32 and G8RC for i64; the concept of "no R0" is lost
+// for these cases. At the moment, none of the other automatically
+// generated RI instructions require special treatment. However, once
+// SelectSelect is implemented, "isel" requires similar handling.
+//
+// Also be conservative about the output register class. Avoid
+// assigning R0 or X0 to the output register for GPRC and G8RC
+// register classes, as any such result could be used in ADDI, etc.,
+// where those regs have another meaning.
+unsigned PPCFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm) {
+ if (MachineInstOpcode == PPC::ADDI)
+ MRI.setRegClass(Op0, &PPC::GPRC_and_GPRC_NOR0RegClass);
+ else if (MachineInstOpcode == PPC::ADDI8)
+ MRI.setRegClass(Op0, &PPC::G8RC_and_G8RC_NOX0RegClass);
+
+ const TargetRegisterClass *UseRC =
+ (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
+ (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
+
+ return FastISel::FastEmitInst_ri(MachineInstOpcode, UseRC,
+ Op0, Op0IsKill, Imm);
+}
+
+// Override for instructions with one register operand to avoid use of
+// R0/X0. The automatic infrastructure isn't aware of the context so
+// we must be conservative.
+unsigned PPCFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
+ const TargetRegisterClass* RC,
+ unsigned Op0, bool Op0IsKill) {
+ const TargetRegisterClass *UseRC =
+ (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
+ (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
+
+ return FastISel::FastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
+}
+
+// Override for instructions with two register operands to avoid use
+// of R0/X0. The automatic infrastructure isn't aware of the context
+// so we must be conservative.
+unsigned PPCFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
+ const TargetRegisterClass* RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill) {
+ const TargetRegisterClass *UseRC =
+ (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
+ (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
+
+ return FastISel::FastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
+ Op1, Op1IsKill);
+}
+
+namespace llvm {
+ // Create the fast instruction selector for PowerPC64 ELF.
+ FastISel *PPC::createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) {
+ const TargetMachine &TM = FuncInfo.MF->getTarget();
+
+ // Only available on 64-bit ELF for now.
+ const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
+ if (Subtarget->isPPC64() && Subtarget->isSVR4ABI())
+ return new PPCFastISel(FuncInfo, LibInfo);
+
+ return 0;
+ }
+}
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index c8459090d97d..0ac2ceddcc9e 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -26,17 +26,6 @@
using namespace llvm;
-// FIXME This disables some code that aligns the stack to a boundary bigger than
-// the default (16 bytes on Darwin) when there is a stack local of greater
-// alignment. This does not currently work, because the delta between old and
-// new stack pointers is added to offsets that reference incoming parameters
-// after the prolog is generated, and the code that does that doesn't handle a
-// variable delta. You don't want to do that anyway; a better approach is to
-// reserve another register that retains to the incoming stack pointer, and
-// reference parameters relative to that.
-#define ALIGN_STACK 0
-
-
/// VRRegNo - Map from a numbered VR register to its enum value.
///
static const uint16_t VRRegNo[] = {
@@ -215,11 +204,13 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
unsigned FrameSize =
UseEstimate ? MFI->estimateStackSize(MF) : MFI->getStackSize();
- // Get the alignments provided by the target, and the maximum alignment
- // (if any) of the fixed frame objects.
- unsigned MaxAlign = MFI->getMaxAlignment();
- unsigned TargetAlign = getStackAlignment();
- unsigned AlignMask = TargetAlign - 1; //
+ // Get stack alignments. The frame must be aligned to the greatest of these:
+ unsigned TargetAlign = getStackAlignment(); // alignment required per the ABI
+ unsigned MaxAlign = MFI->getMaxAlignment(); // algmt required by data in frame
+ unsigned AlignMask = std::max(MaxAlign, TargetAlign) - 1;
+
+ const PPCRegisterInfo *RegInfo =
+ static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo());
// If we are a leaf function, and use up to 224 bytes of stack space,
// don't have a frame pointer, calls, or dynamic alloca then we do not need
@@ -235,7 +226,7 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
FrameSize <= 224 && // Fits in red zone.
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
- (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment.
+ !RegInfo->hasBasePointer(MF)) { // No special alignment.
// No need for frame
if (UpdateMF)
MFI->setStackSize(0);
@@ -305,6 +296,12 @@ void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const {
unsigned FPReg = is31 ? PPC::R31 : PPC::R1;
unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1;
+ const PPCRegisterInfo *RegInfo =
+ static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ bool HasBP = RegInfo->hasBasePointer(MF);
+ unsigned BPReg = HasBP ? (unsigned) PPC::R30 : FPReg;
+ unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FPReg;
+
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
BI != BE; ++BI)
for (MachineBasicBlock::iterator MBBI = BI->end(); MBBI != BI->begin(); ) {
@@ -321,6 +318,13 @@ void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const {
case PPC::FP8:
MO.setReg(FP8Reg);
break;
+ case PPC::BP:
+ MO.setReg(BPReg);
+ break;
+ case PPC::BP8:
+ MO.setReg(BP8Reg);
+ break;
+
}
}
}
@@ -332,18 +336,29 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
const PPCInstrInfo &TII =
*static_cast<const PPCInstrInfo*>(MF.getTarget().getInstrInfo());
+ const PPCRegisterInfo *RegInfo =
+ static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo());
MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
DebugLoc dl;
bool needsFrameMoves = MMI.hasDebugInfo() ||
MF.getFunction()->needsUnwindTableEntry();
+ // Get processor type.
+ bool isPPC64 = Subtarget.isPPC64();
+ // Get the ABI.
+ bool isDarwinABI = Subtarget.isDarwinABI();
+ bool isSVR4ABI = Subtarget.isSVR4ABI();
+ assert((isDarwinABI || isSVR4ABI) &&
+ "Currently only Darwin and SVR4 ABIs are supported for PowerPC.");
+
// Prepare for frame info.
MCSymbol *FrameLabel = 0;
// Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
// process it.
- if (!Subtarget.isSVR4ABI())
+ if (!isSVR4ABI)
for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) {
if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) {
HandleVRSaveUpdate(MBBI, TII);
@@ -357,26 +372,58 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// Work out frame sizes.
unsigned FrameSize = determineFrameLayout(MF);
int NegFrameSize = -FrameSize;
+ if (!isInt<32>(NegFrameSize))
+ llvm_unreachable("Unhandled stack size!");
if (MFI->isFrameAddressTaken())
replaceFPWithRealFP(MF);
- // Get processor type.
- bool isPPC64 = Subtarget.isPPC64();
- // Get operating system
- bool isDarwinABI = Subtarget.isDarwinABI();
// Check if the link register (LR) must be saved.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
bool MustSaveLR = FI->mustSaveLR();
- const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs();
- // Do we have a frame pointer for this function?
+ const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs();
+ // Do we have a frame pointer and/or base pointer for this function?
bool HasFP = hasFP(MF);
+ bool HasBP = RegInfo->hasBasePointer(MF);
+
+ unsigned SPReg = isPPC64 ? PPC::X1 : PPC::R1;
+ unsigned BPReg = isPPC64 ? PPC::X30 : PPC::R30;
+ unsigned FPReg = isPPC64 ? PPC::X31 : PPC::R31;
+ unsigned LRReg = isPPC64 ? PPC::LR8 : PPC::LR;
+ unsigned ScratchReg = isPPC64 ? PPC::X0 : PPC::R0;
+ unsigned TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
+ // ...(R12/X12 is volatile in both Darwin & SVR4, & can't be a function arg.)
+ const MCInstrDesc& MFLRInst = TII.get(isPPC64 ? PPC::MFLR8
+ : PPC::MFLR );
+ const MCInstrDesc& StoreInst = TII.get(isPPC64 ? PPC::STD
+ : PPC::STW );
+ const MCInstrDesc& StoreUpdtInst = TII.get(isPPC64 ? PPC::STDU
+ : PPC::STWU );
+ const MCInstrDesc& StoreUpdtIdxInst = TII.get(isPPC64 ? PPC::STDUX
+ : PPC::STWUX);
+ const MCInstrDesc& LoadImmShiftedInst = TII.get(isPPC64 ? PPC::LIS8
+ : PPC::LIS );
+ const MCInstrDesc& OrImmInst = TII.get(isPPC64 ? PPC::ORI8
+ : PPC::ORI );
+ const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8
+ : PPC::OR );
+ const MCInstrDesc& SubtractCarryingInst = TII.get(isPPC64 ? PPC::SUBFC8
+ : PPC::SUBFC);
+ const MCInstrDesc& SubtractImmCarryingInst = TII.get(isPPC64 ? PPC::SUBFIC8
+ : PPC::SUBFIC);
+
+ // Regarding this assert: Even though LR is saved in the caller's frame (i.e.,
+ // LROffset is positive), that slot is callee-owned. Because PPC32 SVR4 has no
+ // Red Zone, an asynchronous event (a form of "callee") could claim a frame &
+ // overwrite it, so PPC32 SVR4 must claim at least a minimal frame to save LR.
+ assert((isPPC64 || !isSVR4ABI || !(!FrameSize && (MustSaveLR || HasFP))) &&
+ "FrameSize must be >0 to save/restore the FP or LR for 32-bit SVR4.");
int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
int FPOffset = 0;
if (HasFP) {
- if (Subtarget.isSVR4ABI()) {
+ if (isSVR4ABI) {
MachineFrameInfo *FFI = MF.getFrameInfo();
int FPIndex = FI->getFramePointerSaveIndex();
assert(FPIndex && "No Frame Pointer Save Slot!");
@@ -386,136 +433,130 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
}
}
- if (isPPC64) {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR8), PPC::X0);
-
- if (!MustSaveCRs.empty()) {
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MFCR8), PPC::X12);
- for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
- MIB.addReg(MustSaveCRs[i], RegState::ImplicitKill);
+ int BPOffset = 0;
+ if (HasBP) {
+ if (isSVR4ABI) {
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ int BPIndex = FI->getBasePointerSaveIndex();
+ assert(BPIndex && "No Base Pointer Save Slot!");
+ BPOffset = FFI->getObjectOffset(BPIndex);
+ } else {
+ BPOffset =
+ PPCFrameLowering::getBasePointerSaveOffset(isPPC64, isDarwinABI);
}
-
- if (HasFP)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
- .addReg(PPC::X31)
- .addImm(FPOffset/4)
- .addReg(PPC::X1);
-
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
- .addReg(PPC::X0)
- .addImm(LROffset / 4)
- .addReg(PPC::X1);
-
- if (!MustSaveCRs.empty())
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STW8))
- .addReg(PPC::X12, getKillRegState(true))
- .addImm(8)
- .addReg(PPC::X1);
- } else {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR), PPC::R0);
-
- if (HasFP)
- // FIXME: On PPC32 SVR4, FPOffset is negative and access to negative
- // offsets of R1 is not allowed.
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STW))
- .addReg(PPC::R31)
- .addImm(FPOffset)
- .addReg(PPC::R1);
-
- assert(MustSaveCRs.empty() &&
- "Prologue CR saving supported only in 64-bit mode");
-
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STW))
- .addReg(PPC::R0)
- .addImm(LROffset)
- .addReg(PPC::R1);
}
- // Skip if a leaf routine.
- if (!FrameSize) return;
-
// Get stack alignments.
- unsigned TargetAlign = getStackAlignment();
unsigned MaxAlign = MFI->getMaxAlignment();
+ if (HasBP && MaxAlign > 1)
+ assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
+ "Invalid alignment!");
+
+ // Frames of 32KB & larger require special handling because they cannot be
+ // indexed into with a simple STDU/STWU/STD/STW immediate offset operand.
+ bool isLargeFrame = !isInt<16>(NegFrameSize);
+
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, MFLRInst, ScratchReg);
+
+ assert((isPPC64 || MustSaveCRs.empty()) &&
+ "Prologue CR saving supported only in 64-bit mode");
+
+ if (!MustSaveCRs.empty()) { // will only occur for PPC64
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::MFCR8), TempReg);
+ for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
+ MIB.addReg(MustSaveCRs[i], RegState::ImplicitKill);
+ }
+
+ if (HasFP)
+ // FIXME: On PPC32 SVR4, we must not spill before claiming the stackframe.
+ BuildMI(MBB, MBBI, dl, StoreInst)
+ .addReg(FPReg)
+ .addImm(FPOffset)
+ .addReg(SPReg);
+
+ if (HasBP)
+ // FIXME: On PPC32 SVR4, we must not spill before claiming the stackframe.
+ BuildMI(MBB, MBBI, dl, StoreInst)
+ .addReg(BPReg)
+ .addImm(BPOffset)
+ .addReg(SPReg);
+
+ if (MustSaveLR)
+ // FIXME: On PPC32 SVR4, we must not spill before claiming the stackframe.
+ BuildMI(MBB, MBBI, dl, StoreInst)
+ .addReg(ScratchReg)
+ .addImm(LROffset)
+ .addReg(SPReg);
+
+ if (!MustSaveCRs.empty()) // will only occur for PPC64
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STW8))
+ .addReg(TempReg, getKillRegState(true))
+ .addImm(8)
+ .addReg(SPReg);
+
+ // Skip the rest if this is a leaf function & all spills fit in the Red Zone.
+ if (!FrameSize) return;
// Adjust stack pointer: r1 += NegFrameSize.
// If there is a preferred stack alignment, align R1 now
- if (!isPPC64) {
- // PPC32.
- if (ALIGN_STACK && MaxAlign > TargetAlign) {
- assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
- "Invalid alignment!");
- assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
-
- BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), PPC::R0)
- .addReg(PPC::R1)
+
+ if (HasBP) {
+ // Save a copy of r1 as the base pointer.
+ BuildMI(MBB, MBBI, dl, OrInst, BPReg)
+ .addReg(SPReg)
+ .addReg(SPReg);
+ }
+
+ if (HasBP && MaxAlign > 1) {
+ if (isPPC64)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), ScratchReg)
+ .addReg(SPReg)
+ .addImm(0)
+ .addImm(64 - Log2_32(MaxAlign));
+ else // PPC32...
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), ScratchReg)
+ .addReg(SPReg)
.addImm(0)
.addImm(32 - Log2_32(MaxAlign))
.addImm(31);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC) ,PPC::R0)
- .addReg(PPC::R0, RegState::Kill)
+ if (!isLargeFrame) {
+ BuildMI(MBB, MBBI, dl, SubtractImmCarryingInst, ScratchReg)
+ .addReg(ScratchReg, RegState::Kill)
.addImm(NegFrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX), PPC::R1)
- .addReg(PPC::R1, RegState::Kill)
- .addReg(PPC::R1)
- .addReg(PPC::R0);
- } else if (isInt<16>(NegFrameSize)) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWU), PPC::R1)
- .addReg(PPC::R1)
- .addImm(NegFrameSize)
- .addReg(PPC::R1);
} else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS), PPC::R0)
+ BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, TempReg)
.addImm(NegFrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R0)
- .addReg(PPC::R0, RegState::Kill)
+ BuildMI(MBB, MBBI, dl, OrImmInst, TempReg)
+ .addReg(TempReg, RegState::Kill)
.addImm(NegFrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX), PPC::R1)
- .addReg(PPC::R1, RegState::Kill)
- .addReg(PPC::R1)
- .addReg(PPC::R0);
+ BuildMI(MBB, MBBI, dl, SubtractCarryingInst, ScratchReg)
+ .addReg(ScratchReg, RegState::Kill)
+ .addReg(TempReg, RegState::Kill);
}
- } else { // PPC64.
- if (ALIGN_STACK && MaxAlign > TargetAlign) {
- assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
- "Invalid alignment!");
- assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
-
- BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), PPC::X0)
- .addReg(PPC::X1)
- .addImm(0)
- .addImm(64 - Log2_32(MaxAlign));
- BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC8), PPC::X0)
- .addReg(PPC::X0)
- .addImm(NegFrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX), PPC::X1)
- .addReg(PPC::X1, RegState::Kill)
- .addReg(PPC::X1)
- .addReg(PPC::X0);
- } else if (isInt<16>(NegFrameSize)) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
- .addReg(PPC::X1)
- .addImm(NegFrameSize / 4)
- .addReg(PPC::X1);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X0)
- .addImm(NegFrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X0)
- .addReg(PPC::X0, RegState::Kill)
- .addImm(NegFrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX), PPC::X1)
- .addReg(PPC::X1, RegState::Kill)
- .addReg(PPC::X1)
- .addReg(PPC::X0);
- }
- }
+ BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
+ .addReg(SPReg, RegState::Kill)
+ .addReg(SPReg)
+ .addReg(ScratchReg);
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ } else if (!isLargeFrame) {
+ BuildMI(MBB, MBBI, dl, StoreUpdtInst, SPReg)
+ .addReg(SPReg)
+ .addImm(NegFrameSize)
+ .addReg(SPReg);
+
+ } else {
+ BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
+ .addImm(NegFrameSize >> 16);
+ BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
+ .addReg(ScratchReg, RegState::Kill)
+ .addImm(NegFrameSize & 0xFFFF);
+ BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
+ .addReg(SPReg, RegState::Kill)
+ .addReg(SPReg)
+ .addReg(ScratchReg);
+ }
// Add the "machine moves" for the instructions we generated above, but in
// reverse order.
@@ -525,25 +566,26 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(FrameLabel);
// Show update of SP.
- if (NegFrameSize) {
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, NegFrameSize);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
- } else {
- MachineLocation SP(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabel, SP, SP));
- }
+ assert(NegFrameSize);
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(FrameLabel, NegFrameSize));
if (HasFP) {
- MachineLocation FPDst(MachineLocation::VirtualFP, FPOffset);
- MachineLocation FPSrc(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
+ MMI.addFrameInst(
+ MCCFIInstruction::createOffset(FrameLabel, Reg, FPOffset));
+ }
+
+ if (HasBP) {
+ unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
+ MMI.addFrameInst(
+ MCCFIInstruction::createOffset(FrameLabel, Reg, BPOffset));
}
if (MustSaveLR) {
- MachineLocation LRDst(MachineLocation::VirtualFP, LROffset);
- MachineLocation LRSrc(isPPC64 ? PPC::LR8 : PPC::LR);
- Moves.push_back(MachineMove(FrameLabel, LRDst, LRSrc));
+ unsigned Reg = MRI->getDwarfRegNum(LRReg, true);
+ MMI.addFrameInst(
+ MCCFIInstruction::createOffset(FrameLabel, Reg, LROffset));
}
}
@@ -551,15 +593,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// If there is a frame pointer, copy R1 into R31
if (HasFP) {
- if (!isPPC64) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::OR), PPC::R31)
- .addReg(PPC::R1)
- .addReg(PPC::R1);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::OR8), PPC::X31)
- .addReg(PPC::X1)
- .addReg(PPC::X1);
- }
+ BuildMI(MBB, MBBI, dl, OrInst, FPReg)
+ .addReg(SPReg)
+ .addReg(SPReg);
if (needsFrameMoves) {
ReadyLabel = MMI.getContext().CreateTempSymbol();
@@ -567,10 +603,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// Mark effective beginning of when frame pointer is ready.
BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(ReadyLabel);
- MachineLocation FPDst(HasFP ? (isPPC64 ? PPC::X31 : PPC::R31) :
- (isPPC64 ? PPC::X1 : PPC::R1));
- MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(ReadyLabel, FPDst, FPSrc));
+ unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(ReadyLabel, Reg));
}
}
@@ -590,26 +624,21 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// For SVR4, don't emit a move for the CR spill slot if we haven't
// spilled CRs.
- if (Subtarget.isSVR4ABI()
- && (PPC::CR2 <= Reg && Reg <= PPC::CR4)
- && MustSaveCRs.empty())
- continue;
+ if (isSVR4ABI && (PPC::CR2 <= Reg && Reg <= PPC::CR4)
+ && MustSaveCRs.empty())
+ continue;
// For 64-bit SVR4 when we have spilled CRs, the spill location
// is SP+8, not a frame-relative slot.
- if (Subtarget.isSVR4ABI()
- && Subtarget.isPPC64()
- && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
- MachineLocation CSDst(PPC::X1, 8);
- MachineLocation CSSrc(PPC::CR2);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
- continue;
+ if (isSVR4ABI && isPPC64 && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ Label, MRI->getDwarfRegNum(PPC::CR2, true), 8));
+ continue;
}
int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ Label, MRI->getDwarfRegNum(Reg, true), Offset));
}
}
}
@@ -620,6 +649,8 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
assert(MBBI != MBB.end() && "Returning block has no terminator");
const PPCInstrInfo &TII =
*static_cast<const PPCInstrInfo*>(MF.getTarget().getInstrInfo());
+ const PPCRegisterInfo *RegInfo =
+ static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo());
unsigned RetOpcode = MBBI->getOpcode();
DebugLoc dl;
@@ -633,30 +664,49 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
RetOpcode == PPC::TCRETURNai8) &&
"Can only insert epilog into returning blocks");
- // Get alignment info so we know how to restore r1
+ // Get alignment info so we know how to restore the SP.
const MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned TargetAlign = getStackAlignment();
- unsigned MaxAlign = MFI->getMaxAlignment();
// Get the number of bytes allocated from the FrameInfo.
int FrameSize = MFI->getStackSize();
// Get processor type.
bool isPPC64 = Subtarget.isPPC64();
- // Get operating system
+ // Get the ABI.
bool isDarwinABI = Subtarget.isDarwinABI();
+ bool isSVR4ABI = Subtarget.isSVR4ABI();
+
// Check if the link register (LR) has been saved.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
bool MustSaveLR = FI->mustSaveLR();
- const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs();
- // Do we have a frame pointer for this function?
+ const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs();
+ // Do we have a frame pointer and/or base pointer for this function?
bool HasFP = hasFP(MF);
+ bool HasBP = RegInfo->hasBasePointer(MF);
+
+ unsigned SPReg = isPPC64 ? PPC::X1 : PPC::R1;
+ unsigned BPReg = isPPC64 ? PPC::X30 : PPC::R30;
+ unsigned FPReg = isPPC64 ? PPC::X31 : PPC::R31;
+ unsigned ScratchReg = isPPC64 ? PPC::X0 : PPC::R0;
+ unsigned TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
+ const MCInstrDesc& MTLRInst = TII.get( isPPC64 ? PPC::MTLR8
+ : PPC::MTLR );
+ const MCInstrDesc& LoadInst = TII.get( isPPC64 ? PPC::LD
+ : PPC::LWZ );
+ const MCInstrDesc& LoadImmShiftedInst = TII.get( isPPC64 ? PPC::LIS8
+ : PPC::LIS );
+ const MCInstrDesc& OrImmInst = TII.get( isPPC64 ? PPC::ORI8
+ : PPC::ORI );
+ const MCInstrDesc& AddImmInst = TII.get( isPPC64 ? PPC::ADDI8
+ : PPC::ADDI );
+ const MCInstrDesc& AddInst = TII.get( isPPC64 ? PPC::ADD8
+ : PPC::ADD4 );
int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
int FPOffset = 0;
if (HasFP) {
- if (Subtarget.isSVR4ABI()) {
+ if (isSVR4ABI) {
MachineFrameInfo *FFI = MF.getFrameInfo();
int FPIndex = FI->getFramePointerSaveIndex();
assert(FPIndex && "No Frame Pointer Save Slot!");
@@ -666,6 +716,19 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
}
}
+ int BPOffset = 0;
+ if (HasBP) {
+ if (isSVR4ABI) {
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ int BPIndex = FI->getBasePointerSaveIndex();
+ assert(BPIndex && "No Base Pointer Save Slot!");
+ BPOffset = FFI->getObjectOffset(BPIndex);
+ } else {
+ BPOffset =
+ PPCFrameLowering::getBasePointerSaveOffset(isPPC64, isDarwinABI);
+ }
+ }
+
bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
RetOpcode == PPC::TCRETURNdi ||
RetOpcode == PPC::TCRETURNai ||
@@ -687,98 +750,76 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
FrameSize += StackAdj;
}
+ // Frames of 32KB & larger require special handling because they cannot be
+ // indexed into with a simple LD/LWZ immediate offset operand.
+ bool isLargeFrame = !isInt<16>(FrameSize);
+
if (FrameSize) {
- // The loaded (or persistent) stack pointer value is offset by the 'stwu'
- // on entry to the function. Add this offset back now.
- if (!isPPC64) {
- // If this function contained a fastcc call and GuaranteedTailCallOpt is
- // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
- // call which invalidates the stack pointer value in SP(0). So we use the
- // value of R31 in this case.
- if (FI->hasFastCall() && isInt<16>(FrameSize)) {
- assert(hasFP(MF) && "Expecting a valid the frame pointer.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
- .addReg(PPC::R31).addImm(FrameSize);
- } else if(FI->hasFastCall()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS), PPC::R0)
- .addImm(FrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R0)
- .addReg(PPC::R0, RegState::Kill)
- .addImm(FrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD4))
- .addReg(PPC::R1)
- .addReg(PPC::R31)
- .addReg(PPC::R0);
- } else if (isInt<16>(FrameSize) &&
- (!ALIGN_STACK || TargetAlign >= MaxAlign) &&
- !MFI->hasVarSizedObjects()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
- .addReg(PPC::R1).addImm(FrameSize);
+ // In the prologue, the loaded (or persistent) stack pointer value is offset
+ // by the STDU/STDUX/STWU/STWUX instruction. Add this offset back now.
+
+ // If this function contained a fastcc call and GuaranteedTailCallOpt is
+ // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
+ // call which invalidates the stack pointer value in SP(0). So we use the
+ // value of R31 in this case.
+ if (FI->hasFastCall()) {
+ assert(HasFP && "Expecting a valid frame pointer.");
+ if (!isLargeFrame) {
+ BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
+ .addReg(FPReg).addImm(FrameSize);
} else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ),PPC::R1)
- .addImm(0).addReg(PPC::R1);
- }
- } else {
- if (FI->hasFastCall() && isInt<16>(FrameSize)) {
- assert(hasFP(MF) && "Expecting a valid the frame pointer.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
- .addReg(PPC::X31).addImm(FrameSize);
- } else if(FI->hasFastCall()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X0)
+ BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
.addImm(FrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X0)
- .addReg(PPC::X0, RegState::Kill)
+ BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
+ .addReg(ScratchReg, RegState::Kill)
.addImm(FrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD8))
- .addReg(PPC::X1)
- .addReg(PPC::X31)
- .addReg(PPC::X0);
- } else if (isInt<16>(FrameSize) && TargetAlign >= MaxAlign &&
- !MFI->hasVarSizedObjects()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
- .addReg(PPC::X1).addImm(FrameSize);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X1)
- .addImm(0).addReg(PPC::X1);
+ BuildMI(MBB, MBBI, dl, AddInst)
+ .addReg(SPReg)
+ .addReg(FPReg)
+ .addReg(ScratchReg);
}
+ } else if (!isLargeFrame && !HasBP && !MFI->hasVarSizedObjects()) {
+ BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
+ .addReg(SPReg)
+ .addImm(FrameSize);
+ } else {
+ BuildMI(MBB, MBBI, dl, LoadInst, SPReg)
+ .addImm(0)
+ .addReg(SPReg);
}
- }
- if (isPPC64) {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X0)
- .addImm(LROffset/4).addReg(PPC::X1);
+ }
- if (!MustSaveCRs.empty())
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ8), PPC::X12)
- .addImm(8).addReg(PPC::X1);
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg)
+ .addImm(LROffset)
+ .addReg(SPReg);
- if (HasFP)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X31)
- .addImm(FPOffset/4).addReg(PPC::X1);
+ assert((isPPC64 || MustSaveCRs.empty()) &&
+ "Epilogue CR restoring supported only in 64-bit mode");
- if (!MustSaveCRs.empty())
- for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MTCRF8), MustSaveCRs[i])
- .addReg(PPC::X12, getKillRegState(i == e-1));
+ if (!MustSaveCRs.empty()) // will only occur for PPC64
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ8), TempReg)
+ .addImm(8)
+ .addReg(SPReg);
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR8)).addReg(PPC::X0);
- } else {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R0)
- .addImm(LROffset).addReg(PPC::R1);
+ if (HasFP)
+ BuildMI(MBB, MBBI, dl, LoadInst, FPReg)
+ .addImm(FPOffset)
+ .addReg(SPReg);
- assert(MustSaveCRs.empty() &&
- "Epilogue CR restoring supported only in 64-bit mode");
+ if (HasBP)
+ BuildMI(MBB, MBBI, dl, LoadInst, BPReg)
+ .addImm(BPOffset)
+ .addReg(SPReg);
- if (HasFP)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R31)
- .addImm(FPOffset).addReg(PPC::R1);
+ if (!MustSaveCRs.empty()) // will only occur for PPC64
+ for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::MTOCRF8), MustSaveCRs[i])
+ .addReg(TempReg, getKillRegState(i == e-1));
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR)).addReg(PPC::R0);
- }
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, MTLRInst).addReg(ScratchReg);
// Callee pop calling convention. Pop parameter/linkage area. Used for tail
// call optimization
@@ -786,27 +827,20 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
MF.getFunction()->getCallingConv() == CallingConv::Fast) {
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
unsigned CallerAllocatedAmt = FI->getMinReservedArea();
- unsigned StackReg = isPPC64 ? PPC::X1 : PPC::R1;
- unsigned FPReg = isPPC64 ? PPC::X31 : PPC::R31;
- unsigned TmpReg = isPPC64 ? PPC::X0 : PPC::R0;
- unsigned ADDIInstr = isPPC64 ? PPC::ADDI8 : PPC::ADDI;
- unsigned ADDInstr = isPPC64 ? PPC::ADD8 : PPC::ADD4;
- unsigned LISInstr = isPPC64 ? PPC::LIS8 : PPC::LIS;
- unsigned ORIInstr = isPPC64 ? PPC::ORI8 : PPC::ORI;
if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
- BuildMI(MBB, MBBI, dl, TII.get(ADDIInstr), StackReg)
- .addReg(StackReg).addImm(CallerAllocatedAmt);
+ BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
+ .addReg(SPReg).addImm(CallerAllocatedAmt);
} else {
- BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg)
+ BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
.addImm(CallerAllocatedAmt >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg)
- .addReg(TmpReg, RegState::Kill)
+ BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
+ .addReg(ScratchReg, RegState::Kill)
.addImm(CallerAllocatedAmt & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(ADDInstr))
- .addReg(StackReg)
+ BuildMI(MBB, MBBI, dl, AddInst)
+ .addReg(SPReg)
.addReg(FPReg)
- .addReg(TmpReg);
+ .addReg(ScratchReg);
}
} else if (RetOpcode == PPC::TCRETURNdi) {
MBBI = MBB.getLastNonDebugInstr();
@@ -854,7 +888,8 @@ static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
void
PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *) const {
- const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+ const PPCRegisterInfo *RegInfo =
+ static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo());
// Save and clear the LR state.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
@@ -879,6 +914,15 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
FI->setFramePointerSaveIndex(FPSI);
}
+ int BPSI = FI->getBasePointerSaveIndex();
+ if (!BPSI && RegInfo->hasBasePointer(MF)) {
+ int BPOffset = getBasePointerSaveOffset(isPPC64, isDarwinABI);
+ // Allocate the frame index for the base pointer save area.
+ BPSI = MFI->CreateFixedObject(isPPC64? 8 : 4, BPOffset, true);
+ // Save the result.
+ FI->setBasePointerSaveIndex(BPSI);
+ }
+
// Reserve stack space to move the linkage area to in case of a tail call.
int TCSPDelta = 0;
if (MF.getTarget().Options.GuaranteedTailCallOpt &&
@@ -1010,6 +1054,17 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
}
+ const PPCRegisterInfo *RegInfo =
+ static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ if (RegInfo->hasBasePointer(MF)) {
+ HasGPSaveArea = true;
+
+ int FI = PFI->getBasePointerSaveIndex();
+ assert(FI && "No Base Pointer Save Slot!");
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+
// General register save area starts right below the Floating-point
// register save area.
if (HasGPSaveArea || HasG8SaveArea) {
@@ -1122,8 +1177,12 @@ PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
RC->getAlignment(),
false));
+ // Might we have over-aligned allocas?
+ bool HasAlVars = MFI->hasVarSizedObjects() &&
+ MFI->getMaxAlignment() > getStackAlignment();
+
// These kinds of spills might need two registers.
- if (spillsCR(MF) || spillsVRSAVE(MF))
+ if (spillsCR(MF) || spillsVRSAVE(MF) || HasAlVars)
RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
@@ -1151,6 +1210,12 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
+ // Only Darwin actually uses the VRSAVE register, but it can still appear
+ // here if, for example, @llvm.eh.unwind.init() is used. If we're not on
+ // Darwin, ignore it.
+ if (Reg == PPC::VRSAVE && !Subtarget.isDarwinABI())
+ continue;
+
// CR2 through CR4 are the nonvolatile CR fields.
bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4;
@@ -1212,7 +1277,7 @@ restoreCRs(bool isPPC64, bool is31,
MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ),
PPC::R12),
CSI[CSIIndex].getFrameIdx()));
- RestoreOp = PPC::MTCRF;
+ RestoreOp = PPC::MTOCRF;
MoveReg = PPC::R12;
}
@@ -1300,6 +1365,12 @@ PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
+ // Only Darwin actually uses the VRSAVE register, but it can still appear
+ // here if, for example, @llvm.eh.unwind.init() is used. If we're not on
+ // Darwin, ignore it.
+ if (Reg == PPC::VRSAVE && !Subtarget.isDarwinABI())
+ continue;
+
if (Reg == PPC::CR2) {
CR2Spilled = true;
// The spill slot is associated only with CR2, which is the
diff --git a/lib/Target/PowerPC/PPCFrameLowering.h b/lib/Target/PowerPC/PPCFrameLowering.h
index 6f5f9368c6c6..7aab37e188fe 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/lib/Target/PowerPC/PPCFrameLowering.h
@@ -94,6 +94,16 @@ public:
return isPPC64 ? -8U : -4U;
}
+ /// getBasePointerSaveOffset - Return the previous frame offset to save the
+ /// base pointer.
+ static unsigned getBasePointerSaveOffset(bool isPPC64, bool isDarwinABI) {
+ if (isDarwinABI)
+ return isPPC64 ? -16U : -8U;
+
+ // SVR4 ABI: First slot in the general register save area.
+ return isPPC64 ? -16U : -8U;
+ }
+
/// getLinkageSize - Return the size of the PowerPC ABI linkage area.
///
static unsigned getLinkageSize(bool isPPC64, bool isDarwinABI) {
diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 4bf1e3396429..0df50e17dd9d 100644
--- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -71,8 +71,8 @@ void PPCScoreboardHazardRecognizer::Reset() {
// 3. Handling of the esoteric cases in "Resource-based Instruction Grouping".
//
-PPCHazardRecognizer970::PPCHazardRecognizer970(const TargetInstrInfo &tii)
- : TII(tii) {
+PPCHazardRecognizer970::PPCHazardRecognizer970(const TargetMachine &TM)
+ : TM(TM) {
EndDispatchGroup();
}
@@ -91,7 +91,7 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
bool &isFirst, bool &isSingle,
bool &isCracked,
bool &isLoad, bool &isStore) {
- const MCInstrDesc &MCID = TII.get(Opcode);
+ const MCInstrDesc &MCID = TM.getInstrInfo()->get(Opcode);
isLoad = MCID.mayLoad();
isStore = MCID.mayStore();
diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.h b/lib/Target/PowerPC/PPCHazardRecognizers.h
index 55b45d01b20e..84b8e6de4579 100644
--- a/lib/Target/PowerPC/PPCHazardRecognizers.h
+++ b/lib/Target/PowerPC/PPCHazardRecognizers.h
@@ -43,7 +43,7 @@ public:
/// setting the CTR register then branching through it within a dispatch group),
/// or storing then loading from the same address within a dispatch group.
class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
- const TargetInstrInfo &TII;
+ const TargetMachine &TM;
unsigned NumIssued; // Number of insts issued, including advanced cycles.
@@ -64,7 +64,7 @@ class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
unsigned NumStores;
public:
- PPCHazardRecognizer970(const TargetInstrInfo &TII);
+ PPCHazardRecognizer970(const TargetMachine &TM);
virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void EmitInstruction(SUnit *SU);
virtual void AdvanceCycle();
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index aed0fbb6c84e..6ba6af6446e5 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -110,13 +110,13 @@ namespace {
/// SelectCC - Select a comparison of the specified values with the
/// specified condition code, returning the CR# of the expression.
- SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, DebugLoc dl);
+ SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDLoc dl);
/// SelectAddrImm - Returns true if the address N can be represented by
/// a base register plus a signed 16-bit displacement [r+imm].
bool SelectAddrImm(SDValue N, SDValue &Disp,
SDValue &Base) {
- return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG);
+ return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG, false);
}
/// SelectAddrImmOffs - Return true if the operand is valid for a preinc
@@ -145,11 +145,11 @@ namespace {
return PPCLowering.SelectAddressRegRegOnly(N, Base, Index, *CurDAG);
}
- /// SelectAddrImmShift - Returns true if the address N can be represented by
- /// a base register plus a signed 14-bit displacement [r+imm*4]. Suitable
- /// for use by STD and friends.
- bool SelectAddrImmShift(SDValue N, SDValue &Disp, SDValue &Base) {
- return PPCLowering.SelectAddressRegImmShift(N, Disp, Base, *CurDAG);
+ /// SelectAddrImmX4 - Returns true if the address N can be represented by
+ /// a base register plus a signed 16-bit displacement that is a multiple of 4.
+ /// Suitable for use by STD and friends.
+ bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) {
+ return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG, true);
}
// Select an address into a single register.
@@ -330,19 +330,22 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
}
bool PPCDAGToDAGISel::isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME) {
+ if (!Val)
+ return false;
+
if (isShiftedMask_32(Val)) {
// look for the first non-zero bit
- MB = CountLeadingZeros_32(Val);
+ MB = countLeadingZeros(Val);
// look for the first zero bit after the run of ones
- ME = CountLeadingZeros_32((Val - 1) ^ Val);
+ ME = countLeadingZeros((Val - 1) ^ Val);
return true;
} else {
Val = ~Val; // invert mask
if (isShiftedMask_32(Val)) {
// effectively look for the first zero bit
- ME = CountLeadingZeros_32(Val) - 1;
+ ME = countLeadingZeros(Val) - 1;
// effectively look for the first one bit after the run of zeros
- MB = CountLeadingZeros_32((Val - 1) ^ Val) + 1;
+ MB = countLeadingZeros((Val - 1) ^ Val) + 1;
return true;
}
}
@@ -397,7 +400,7 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
APInt LKZ, LKO, RKZ, RKO;
CurDAG->ComputeMaskedBits(Op0, LKZ, LKO);
@@ -435,7 +438,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
}
unsigned MB, ME;
- if (InsertMask && isRunOfOnes(InsertMask, MB, ME)) {
+ if (isRunOfOnes(InsertMask, MB, ME)) {
SDValue Tmp1, Tmp2;
if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) &&
@@ -447,10 +450,10 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
unsigned SHOpc = Op1.getOperand(0).getOpcode();
if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) &&
isInt32Immediate(Op1.getOperand(0).getOperand(1), Value)) {
+ // Note that Value must be in range here (less than 32) because
+ // otherwise there would not be any bits set in InsertMask.
Op1 = Op1.getOperand(0).getOperand(0);
SH = (SHOpc == ISD::SHL) ? Value : 32 - Value;
- } else {
- Op1 = Op1.getOperand(0);
}
}
@@ -466,7 +469,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
/// SelectCC - Select a comparison of the specified values with the specified
/// condition code, returning the CR# of the expression.
SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
- ISD::CondCode CC, DebugLoc dl) {
+ ISD::CondCode CC, SDLoc dl) {
// Always select the LHS.
unsigned Opc;
@@ -594,12 +597,8 @@ static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC) {
/// getCRIdxForSetCC - Return the index of the condition register field
/// associated with the SetCC condition, and whether or not the field is
/// treated as inverted. That is, lt = 0; ge = 0 inverted.
-///
-/// If this returns with Other != -1, then the returned comparison is an or of
-/// two simpler comparisons. In this case, Invert is guaranteed to be false.
-static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) {
+static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert) {
Invert = false;
- Other = -1;
switch (CC) {
default: llvm_unreachable("Unknown condition!");
case ISD::SETOLT:
@@ -710,7 +709,7 @@ static unsigned int getVCmpEQInst(MVT::SimpleValueType VecVT) {
SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
unsigned Imm;
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
EVT PtrVT = CurDAG->getTargetLoweringInfo().getPointerTy();
@@ -847,8 +846,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
}
bool Inv;
- int OtherCondIdx;
- unsigned Idx = getCRIdxForSetCC(CC, Inv, OtherCondIdx);
+ unsigned Idx = getCRIdxForSetCC(CC, Inv);
SDValue CCReg = SelectCC(LHS, RHS, CC, dl);
SDValue IntCR;
@@ -859,44 +857,29 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
InFlag).getValue(1);
- if (PPCSubTarget.hasMFOCRF() && OtherCondIdx == -1)
- IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
- CCReg), 0);
- else
- IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
- CR7Reg, CCReg), 0);
+ IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
+ CCReg), 0);
SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
getI32Imm(31), getI32Imm(31) };
- if (OtherCondIdx == -1 && !Inv)
+ if (!Inv)
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
// Get the specified bit.
SDValue Tmp =
SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
- if (Inv) {
- assert(OtherCondIdx == -1 && "Can't have split plus negation");
- return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1));
- }
-
- // Otherwise, we have to turn an operation like SETONE -> SETOLT | SETOGT.
- // We already got the bit for the first part of the comparison (e.g. SETULE).
-
- // Get the other bit of the comparison.
- Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31);
- SDValue OtherCond =
- SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
-
- return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond);
+ return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1));
}
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
- if (N->isMachineOpcode())
+ SDLoc dl(N);
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
return NULL; // Already selected.
+ }
switch (N->getOpcode()) {
default: break;
@@ -912,7 +895,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// If it can't be represented as a 32 bit value.
if (!isInt<32>(Imm)) {
- Shift = CountTrailingZeros_64(Imm);
+ Shift = countTrailingZeros<uint64_t>(Imm);
int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
// If the shifted value fits 32 bits.
@@ -992,15 +975,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
getSmallIPtrImm(0));
}
- case PPCISD::MFCR: {
+ case PPCISD::MFOCRF: {
SDValue InFlag = N->getOperand(1);
- // Use MFOCRF if supported.
- if (PPCSubTarget.hasMFOCRF())
- return CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32,
- N->getOperand(0), InFlag);
- else
- return CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
- N->getOperand(0), InFlag);
+ return CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32,
+ N->getOperand(0), InFlag);
}
case ISD::SDIV: {
@@ -1242,6 +1220,15 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
getI32Imm(BROpc) };
return CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops, 4);
}
+ case PPCISD::BDNZ:
+ case PPCISD::BDZ: {
+ bool IsPPC64 = PPCSubTarget.isPPC64();
+ SDValue Ops[] = { N->getOperand(1), N->getOperand(0) };
+ return CurDAG->SelectNodeTo(N, N->getOpcode() == PPCISD::BDNZ ?
+ (IsPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
+ (IsPPC64 ? PPC::BDZ8 : PPC::BDZ),
+ MVT::Other, Ops, 2);
+ }
case PPCISD::COND_BRANCH: {
// Op #0 is the Chain.
// Op #1 is the PPC::PRED_* number.
@@ -1493,13 +1480,13 @@ void PPCDAGToDAGISel::PostprocessISelDAG() {
continue;
break;
case PPC::ADDIdtprelL:
- Flags = PPCII::MO_DTPREL16_LO;
+ Flags = PPCII::MO_DTPREL_LO;
break;
case PPC::ADDItlsldL:
- Flags = PPCII::MO_TLSLD16_LO;
+ Flags = PPCII::MO_TLSLD_LO;
break;
case PPC::ADDItocL:
- Flags = PPCII::MO_TOC16_LO;
+ Flags = PPCII::MO_TOC_LO;
break;
}
@@ -1519,8 +1506,16 @@ void PPCDAGToDAGISel::PostprocessISelDAG() {
// immediate operand, add it now.
if (ReplaceFlags) {
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) {
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
const GlobalValue *GV = GA->getGlobal();
+ // We can't perform this optimization for data whose alignment
+ // is insufficient for the instruction encoding.
+ if (GV->getAlignment() < 4 &&
+ (StorageOpcode == PPC::LD || StorageOpcode == PPC::STD ||
+ StorageOpcode == PPC::LWA)) {
+ DEBUG(dbgs() << "Rejected this candidate for alignment.\n\n");
+ continue;
+ }
ImmOpnd = CurDAG->getTargetGlobalAddress(GV, dl, MVT::i64, 0, Flags);
} else if (ConstantPoolSDNode *CP =
dyn_cast<ConstantPoolSDNode>(ImmOpnd)) {
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 3fcafdcbca44..8da5f0563c6a 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -16,6 +16,7 @@
#include "PPCMachineFunctionInfo.h"
#include "PPCPerfectShuffle.h"
#include "PPCTargetMachine.h"
+#include "PPCTargetObjectFile.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -36,21 +37,6 @@
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
-static bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-
static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
@@ -64,14 +50,15 @@ static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) {
if (TM.getSubtargetImpl()->isDarwin())
return new TargetLoweringObjectFileMachO();
+ if (TM.getSubtargetImpl()->isSVR4ABI())
+ return new PPC64LinuxTargetObjectFile();
+
return new TargetLoweringObjectFileELF();
}
PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
: TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) {
const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
- PPCRegInfo = TM.getRegisterInfo();
- PPCII = TM.getInstrInfo();
setPow2DivIsCheap();
@@ -162,28 +149,24 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
Subtarget->hasFRSQRTES() && Subtarget->hasFRES()))
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ if (Subtarget->hasFCPSGN()) {
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
+ } else {
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ }
if (Subtarget->hasFPRND()) {
setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
setOperationAction(ISD::FCEIL, MVT::f64, Legal);
setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
+ setOperationAction(ISD::FROUND, MVT::f64, Legal);
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FCEIL, MVT::f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
-
- // frin does not implement "ties to even." Thus, this is safe only in
- // fast-math mode.
- if (TM.Options.UnsafeFPMath) {
- setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
-
- // These need to set FE_INEXACT, and use a custom inserter.
- setOperationAction(ISD::FRINT, MVT::f64, Legal);
- setOperationAction(ISD::FRINT, MVT::f32, Legal);
- }
+ setOperationAction(ISD::FROUND, MVT::f32, Legal);
}
// PowerPC does not have BSWAP, CTPOP or CTTZ
@@ -241,11 +224,6 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// We cannot sextinreg(i1). Expand to shifts.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
-
// NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
// SjLj exception handling but a light-weight setjmp/longjmp replacement to
// support continuation, user-level threading, and etc.. As a result, no
@@ -298,8 +276,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
} else
setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ if (Subtarget->isSVR4ABI() && !isPPC64)
+ // VACOPY is custom lowered with the 32-bit SVR4 ABI.
+ setOperationAction(ISD::VACOPY , MVT::Other, Custom);
+ else
+ setOperationAction(ISD::VACOPY , MVT::Other, Expand);
+
// Use the default implementation.
- setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
@@ -309,6 +292,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ // To handle counter-based loop conditions.
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
+
// Comparisons that require checking two conditions.
setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
@@ -407,6 +393,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
setOperationAction(ISD::FDIV, VT, Expand);
+ setOperationAction(ISD::FREM, VT, Expand);
setOperationAction(ISD::FNEG, VT, Expand);
setOperationAction(ISD::FSQRT, VT, Expand);
setOperationAction(ISD::FLOG, VT, Expand);
@@ -501,6 +488,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand);
setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand);
setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand);
+
+ setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
}
if (Subtarget->has64BitSupport()) {
@@ -529,9 +519,11 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::SINT_TO_FP);
+ setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::BR_CC);
setTargetDAGCombine(ISD::BSWAP);
+ setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
// Use reciprocal estimates.
if (TM.Options.UnsafeFPMath) {
@@ -564,7 +556,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setInsertFencesForAtomic(true);
- setSchedulingPreference(Sched::Hybrid);
+ if (Subtarget->enableMachineScheduler())
+ setSchedulingPreference(Sched::Source);
+ else
+ setSchedulingPreference(Sched::Hybrid);
computeRegisterProperties();
@@ -583,24 +578,47 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
}
}
+/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
+/// the desired ByVal argument alignment.
+static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
+ unsigned MaxMaxAlign) {
+ if (MaxAlign == MaxMaxAlign)
+ return;
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
+ MaxAlign = 32;
+ else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
+ MaxAlign = 16;
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ unsigned EltAlign = 0;
+ getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
+ if (EltAlign > MaxAlign)
+ MaxAlign = EltAlign;
+ } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ unsigned EltAlign = 0;
+ getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign);
+ if (EltAlign > MaxAlign)
+ MaxAlign = EltAlign;
+ if (MaxAlign == MaxMaxAlign)
+ break;
+ }
+ }
+}
+
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area.
unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
- const TargetMachine &TM = getTargetMachine();
// Darwin passes everything on 4 byte boundary.
- if (TM.getSubtarget<PPCSubtarget>().isDarwin())
+ if (PPCSubTarget.isDarwin())
return 4;
// 16byte and wider vectors are passed on 16byte boundary.
- if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- if (VTy->getBitWidth() >= 128)
- return 16;
-
// The rest is 8 on PPC64 and 4 on PPC32 boundary.
- if (PPCSubTarget.isPPC64())
- return 8;
-
- return 4;
+ unsigned Align = PPCSubTarget.isPPC64() ? 8 : 4;
+ if (PPCSubTarget.hasAltivec() || PPCSubTarget.hasQPX())
+ getMaxByValAlign(Ty, Align, PPCSubTarget.hasQPX() ? 32 : 16);
+ return Align;
}
const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -634,7 +652,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
- case PPCISD::MFCR: return "PPCISD::MFCR";
+ case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
case PPCISD::VCMP: return "PPCISD::VCMP";
case PPCISD::VCMPo: return "PPCISD::VCMPo";
case PPCISD::LBRX: return "PPCISD::LBRX";
@@ -642,6 +660,8 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::LARX: return "PPCISD::LARX";
case PPCISD::STCX: return "PPCISD::STCX";
case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
+ case PPCISD::BDNZ: return "PPCISD::BDNZ";
+ case PPCISD::BDZ: return "PPCISD::BDZ";
case PPCISD::MFFS: return "PPCISD::MFFS";
case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
@@ -662,10 +682,11 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
+ case PPCISD::SC: return "PPCISD::SC";
}
}
-EVT PPCTargetLowering::getSetCCResultType(EVT VT) const {
+EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector())
return MVT::i32;
return VT.changeVectorElementTypeToInteger();
@@ -1036,24 +1057,68 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
return false;
}
+// If we happen to be doing an i64 load or store into a stack slot that has
+// less than a 4-byte alignment, then the frame-index elimination may need to
+// use an indexed load or store instruction (because the offset may not be a
+// multiple of 4). The extra register needed to hold the offset comes from the
+// register scavenger, and it is possible that the scavenger will need to use
+// an emergency spill slot. As a result, we need to make sure that a spill slot
+// is allocated when doing an i64 load/store into a less-than-4-byte-aligned
+// stack slot.
+static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
+ // FIXME: This does not handle the LWA case.
+ if (VT != MVT::i64)
+ return;
+
+ // NOTE: We'll exclude negative FIs here, which come from argument
+ // lowering, because there are no known test cases triggering this problem
+ // using packed structures (or similar). We can remove this exclusion if
+ // we find such a test case. The reason why this is so test-case driven is
+ // because this entire 'fixup' is only to prevent crashes (from the
+ // register scavenger) on not-really-valid inputs. For example, if we have:
+ // %a = alloca i1
+ // %b = bitcast i1* %a to i64*
+ // store i64* a, i64 b
+ // then the store should really be marked as 'align 1', but is not. If it
+ // were marked as 'align 1' then the indexed form would have been
+ // instruction-selected initially, and the problem this 'fixup' is preventing
+ // won't happen regardless.
+ if (FrameIdx < 0)
+ return;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ unsigned Align = MFI->getObjectAlignment(FrameIdx);
+ if (Align >= 4)
+ return;
+
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ FuncInfo->setHasNonRISpills();
+}
+
/// Returns true if the address N can be represented by a base register plus
/// a signed 16-bit displacement [r+imm], and if it is not better
-/// represented as reg+reg.
+/// represented as reg+reg. If Aligned is true, only accept displacements
+/// suitable for STD and friends, i.e. multiples of 4.
bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
SDValue &Base,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG,
+ bool Aligned) const {
// FIXME dl should come from parent load or store, not from address
- DebugLoc dl = N.getDebugLoc();
+ SDLoc dl(N);
// If this can be more profitably realized as r+r, fail.
if (SelectAddressRegReg(N, Disp, Base, DAG))
return false;
if (N.getOpcode() == ISD::ADD) {
short imm = 0;
- if (isIntS16Immediate(N.getOperand(1), imm)) {
- Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
+ if (isIntS16Immediate(N.getOperand(1), imm) &&
+ (!Aligned || (imm & 3) == 0)) {
+ Disp = DAG.getTargetConstant(imm, N.getValueType());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
+ fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
} else {
Base = N.getOperand(0);
}
@@ -1072,7 +1137,8 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
}
} else if (N.getOpcode() == ISD::OR) {
short imm = 0;
- if (isIntS16Immediate(N.getOperand(1), imm)) {
+ if (isIntS16Immediate(N.getOperand(1), imm) &&
+ (!Aligned || (imm & 3) == 0)) {
// If this is an or of disjoint bitfields, we can codegen this as an add
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
@@ -1083,7 +1149,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
Base = N.getOperand(0);
- Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
+ Disp = DAG.getTargetConstant(imm, N.getValueType());
return true;
}
}
@@ -1093,7 +1159,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
// If this address fits entirely in a 16-bit sext immediate field, codegen
// this as "d, 0"
short Imm;
- if (isIntS16Immediate(CN, Imm)) {
+ if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
CN->getValueType(0));
@@ -1101,8 +1167,9 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
}
// Handle 32-bit sext immediates with LIS + addr mode.
- if (CN->getValueType(0) == MVT::i32 ||
- (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
+ if ((CN->getValueType(0) == MVT::i32 ||
+ (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
+ (!Aligned || (CN->getZExtValue() & 3) == 0)) {
int Addr = (int)CN->getZExtValue();
// Otherwise, break this down into an LIS + disp.
@@ -1116,9 +1183,10 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
}
Disp = DAG.getTargetConstant(0, getPointerTy());
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
- else
+ fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
+ } else
Base = N;
return true; // [r+0]
}
@@ -1150,92 +1218,6 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
return true;
}
-/// SelectAddressRegImmShift - Returns true if the address N can be
-/// represented by a base register plus a signed 14-bit displacement
-/// [r+imm*4]. Suitable for use by STD and friends.
-bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
- SDValue &Base,
- SelectionDAG &DAG) const {
- // FIXME dl should come from the parent load or store, not the address
- DebugLoc dl = N.getDebugLoc();
- // If this can be more profitably realized as r+r, fail.
- if (SelectAddressRegReg(N, Disp, Base, DAG))
- return false;
-
- if (N.getOpcode() == ISD::ADD) {
- short imm = 0;
- if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
- Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
- Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
- } else {
- Base = N.getOperand(0);
- }
- return true; // [r+i]
- } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
- // Match LOAD (ADD (X, Lo(G))).
- assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
- && "Cannot handle constant offsets yet!");
- Disp = N.getOperand(1).getOperand(0); // The global address.
- assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
- Disp.getOpcode() == ISD::TargetConstantPool ||
- Disp.getOpcode() == ISD::TargetJumpTable);
- Base = N.getOperand(0);
- return true; // [&g+r]
- }
- } else if (N.getOpcode() == ISD::OR) {
- short imm = 0;
- if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
- // If this is an or of disjoint bitfields, we can codegen this as an add
- // (for better address arithmetic) if the LHS and RHS of the OR are
- // provably disjoint.
- APInt LHSKnownZero, LHSKnownOne;
- DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
- if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
- // If all of the bits are known zero on the LHS or RHS, the add won't
- // carry.
- Base = N.getOperand(0);
- Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
- return true;
- }
- }
- } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
- // Loading from a constant address. Verify low two bits are clear.
- if ((CN->getZExtValue() & 3) == 0) {
- // If this address fits entirely in a 14-bit sext immediate field, codegen
- // this as "d, 0"
- short Imm;
- if (isIntS16Immediate(CN, Imm)) {
- Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
- Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
- CN->getValueType(0));
- return true;
- }
-
- // Fold the low-part of 32-bit absolute addresses into addr mode.
- if (CN->getValueType(0) == MVT::i32 ||
- (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
- int Addr = (int)CN->getZExtValue();
-
- // Otherwise, break this down into an LIS + disp.
- Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
- Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
- unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
- Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base),0);
- return true;
- }
- }
- }
-
- Disp = DAG.getTargetConstant(0, getPointerTy());
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
- Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
- else
- Base = N;
- return true; // [r+0]
-}
-
-
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
@@ -1288,18 +1270,16 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
return true;
}
- // LDU/STU use reg+imm*4, others use reg+imm.
+ // LDU/STU can only handle immediates that are a multiple of 4.
if (VT != MVT::i64) {
- // reg + imm
- if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
+ if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false))
return false;
} else {
// LDU/STU need an address with at least 4-byte alignment.
if (Alignment < 4)
return false;
- // reg + imm * 4.
- if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
+ if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true))
return false;
}
@@ -1324,8 +1304,8 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
unsigned &LoOpFlags, const GlobalValue *GV = 0) {
- HiOpFlags = PPCII::MO_HA16;
- LoOpFlags = PPCII::MO_LO16;
+ HiOpFlags = PPCII::MO_HA;
+ LoOpFlags = PPCII::MO_LO;
// Don't use the pic base if not in PIC relocation model. Or if we are on a
// non-darwin platform. We don't support PIC on other platforms yet.
@@ -1355,7 +1335,7 @@ static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
SelectionDAG &DAG) {
EVT PtrVT = HiPart.getValueType();
SDValue Zero = DAG.getConstant(0, PtrVT);
- DebugLoc DL = HiPart.getDebugLoc();
+ SDLoc DL(HiPart);
SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
@@ -1380,7 +1360,7 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
// The actual address of the GlobalValue is stored in the TOC.
if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
- return DAG.getNode(PPCISD::TOC_ENTRY, CP->getDebugLoc(), MVT::i64, GA,
+ return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA,
DAG.getRegister(PPC::X2, MVT::i64));
}
@@ -1401,7 +1381,7 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
// The actual address of the GlobalValue is stored in the TOC.
if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
- return DAG.getNode(PPCISD::TOC_ENTRY, JT->getDebugLoc(), MVT::i64, GA,
+ return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA,
DAG.getRegister(PPC::X2, MVT::i64));
}
@@ -1428,8 +1408,12 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
+ // FIXME: TLS addresses currently use medium model code sequences,
+ // which is the most useful form. Eventually support for small and
+ // large models could be added if users need it, at the cost of
+ // additional complexity.
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy();
bool is64bit = PPCSubTarget.isPPC64();
@@ -1438,9 +1422,9 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
if (Model == TLSModel::LocalExec) {
SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
- PPCII::MO_TPREL16_HA);
+ PPCII::MO_TPREL_HA);
SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
- PPCII::MO_TPREL16_LO);
+ PPCII::MO_TPREL_LO);
SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
is64bit ? MVT::i64 : MVT::i32);
SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
@@ -1452,12 +1436,14 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
if (Model == TLSModel::InitialExec) {
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
+ SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
+ PPCII::MO_TLS);
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
SDValue TPOffsetHi = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
PtrVT, GOTReg, TGA);
SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
PtrVT, TGA, TPOffsetHi);
- return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGA);
+ return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
}
if (Model == TLSModel::GeneralDynamic) {
@@ -1515,7 +1501,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
- DebugLoc DL = GSDN->getDebugLoc();
+ SDLoc DL(GSDN);
const GlobalValue *GV = GSDN->getGlobal();
// 64-bit SVR4 ABI code is always position-independent.
@@ -1546,7 +1532,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// If we're comparing for equality to zero, expose the fact that this is
// implented as a ctlz/srl pair on ppc, so that the dag combiner can
@@ -1595,7 +1581,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
SDValue InChain = Node->getOperand(0);
SDValue VAListPtr = Node->getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
@@ -1695,6 +1681,18 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
false, false, false, 0);
}
+SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) const {
+ assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
+
+ // We have to copy the entire va_list struct:
+ // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
+ return DAG.getMemcpy(Op.getOperand(0), Op,
+ Op.getOperand(1), Op.getOperand(2),
+ DAG.getConstant(12, MVT::i32), 8, false, true,
+ MachinePointerInfo(), MachinePointerInfo());
+}
+
SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const {
return Op.getOperand(0);
@@ -1706,7 +1704,7 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDValue Trmp = Op.getOperand(1); // trampoline
SDValue FPtr = Op.getOperand(2); // nested function
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = (PtrVT == MVT::i64);
@@ -1748,7 +1746,7 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
MachineFunction &MF = DAG.getMachineFunction();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
@@ -1842,18 +1840,24 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
#include "PPCGenCallingConv.inc"
-static bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
+// Function whose sole purpose is to kill compiler warnings
+// stemming from unused functions included from PPCGenCallingConv.inc.
+CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const {
+ return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
+}
+
+bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
return true;
}
-static bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
+bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
static const uint16_t ArgRegs[] = {
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
@@ -1876,11 +1880,11 @@ static bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
return false;
}
-static bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
+bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
static const uint16_t ArgRegs[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8
@@ -1931,7 +1935,7 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
if (PPCSubTarget.isSVR4ABI()) {
@@ -1953,7 +1957,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// 32-bit SVR4 ABI Stack Frame Layout:
@@ -2170,14 +2174,14 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
SDValue
PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
SelectionDAG &DAG, SDValue ArgVal,
- DebugLoc dl) const {
+ SDLoc dl) const {
if (Flags.isSExt())
ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
else if (Flags.isZExt())
ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
-
+
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
}
@@ -2213,7 +2217,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// TODO: add description of PPC stack frame format, or at least some docs.
//
@@ -2304,6 +2308,13 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
InVals.push_back(FIN);
continue;
}
+
+ unsigned BVAlign = Flags.getByValAlign();
+ if (BVAlign > 8) {
+ ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign;
+ CurArgOffset = ArgOffset;
+ }
+
// All aggregates smaller than 8 bytes must be passed right-justified.
if (ObjSize < PtrByteSize)
CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize);
@@ -2502,7 +2513,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// TODO: add description of PPC stack frame format, or at least some docs.
//
@@ -2600,17 +2611,17 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
SmallVector<SDValue, 8> MemOps;
unsigned nAltivecParamsAtEnd = 0;
- // FIXME: FuncArg and Ins[ArgNo] must reference the same argument.
- // When passing anonymous aggregates, this is currently not true.
- // See LowerFormalArguments_64SVR4 for a fix.
Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
- for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo, ++FuncArg) {
+ unsigned CurArgIdx = 0;
+ for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue ArgVal;
bool needsLoad = false;
EVT ObjectVT = Ins[ArgNo].VT;
unsigned ObjSize = ObjectVT.getSizeInBits()/8;
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
+ std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
+ CurArgIdx = Ins[ArgNo].OrigArgIndex;
unsigned CurArgOffset = ArgOffset;
@@ -3002,9 +3013,9 @@ struct TailCallArgumentInfo {
static void
StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
SDValue Chain,
- const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
- SmallVector<SDValue, 8> &MemOpChains,
- DebugLoc dl) {
+ const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
+ SmallVectorImpl<SDValue> &MemOpChains,
+ SDLoc dl) {
for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
SDValue Arg = TailCallArgs[i].Arg;
SDValue FIN = TailCallArgs[i].FrameIdxOp;
@@ -3026,7 +3037,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
int SPDiff,
bool isPPC64,
bool isDarwinABI,
- DebugLoc dl) {
+ SDLoc dl) {
if (SPDiff) {
// Calculate the new stack slot for the return address.
int SlotSize = isPPC64 ? 8 : 4;
@@ -3061,7 +3072,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
static void
CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
SDValue Arg, int SPDiff, unsigned ArgOffset,
- SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
+ SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
int Offset = ArgOffset + SPDiff;
uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
@@ -3083,7 +3094,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
SDValue &LROpOut,
SDValue &FPOpOut,
bool isDarwinABI,
- DebugLoc dl) const {
+ SDLoc dl) const {
if (SPDiff) {
// Load the LR and FP stack slot for later adjusting.
EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
@@ -3113,7 +3124,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- DebugLoc dl) {
+ SDLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
false, false, MachinePointerInfo(0),
@@ -3126,9 +3137,9 @@ static void
LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
SDValue Arg, SDValue PtrOff, int SPDiff,
unsigned ArgOffset, bool isPPC64, bool isTailCall,
- bool isVector, SmallVector<SDValue, 8> &MemOpChains,
- SmallVector<TailCallArgumentInfo, 8> &TailCallArguments,
- DebugLoc dl) {
+ bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
+ SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments,
+ SDLoc dl) {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
if (!isTailCall) {
if (isVector) {
@@ -3149,9 +3160,9 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
static
void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
- DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
+ SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
SDValue LROp, SDValue FPOp, bool isDarwinABI,
- SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) {
+ SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
MachineFunction &MF = DAG.getMachineFunction();
// Emit a sequence of copyto/copyfrom virtual registers for arguments that
@@ -3171,15 +3182,15 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
// Emit callseq_end just before tailcall node.
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
static
unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
- SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall,
- SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
- SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
+ SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall,
+ SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
+ SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
const PPCSubtarget &PPCSubTarget) {
bool isPPC64 = PPCSubTarget.isPPC64();
@@ -3363,7 +3374,7 @@ SDValue
PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
SmallVector<CCValAssign, 16> RVLocs;
@@ -3406,7 +3417,7 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
}
SDValue
-PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
+PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
bool isTailCall, bool isVarArg,
SelectionDAG &DAG,
SmallVector<std::pair<unsigned, SDValue>, 8>
@@ -3476,7 +3487,9 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
// from allocating it), resulting in an additional register being
// allocated and an unnecessary move instruction being generated.
needsTOCRestore = true;
- } else if ((CallOpc == PPCISD::CALL) && !isLocalCall(Callee)) {
+ } else if ((CallOpc == PPCISD::CALL) &&
+ (!isLocalCall(Callee) ||
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
// Otherwise insert NOP for non-local calls.
CallOpc = PPCISD::CALL_NOP;
}
@@ -3493,7 +3506,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(BytesCalleePops, true),
- InFlag);
+ InFlag, dl);
if (!Ins.empty())
InFlag = Chain.getValue(1);
@@ -3505,10 +3518,10 @@ SDValue
PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -3542,7 +3555,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
// of the 32-bit SVR4 ABI stack frame layout.
@@ -3628,7 +3641,8 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ dl);
SDValue CallSeqStart = Chain;
// Load the return address and frame pointer so it can be moved somewhere else
@@ -3679,7 +3693,8 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// This must go outside the CALLSEQ_START..END.
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
- CallSeqStart.getNode()->getOperand(1));
+ CallSeqStart.getNode()->getOperand(1),
+ SDLoc(MemcpyCall));
DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
NewCallSeqStart.getNode());
Chain = CallSeqStart = NewCallSeqStart;
@@ -3755,13 +3770,14 @@ PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
SDValue CallSeqStart,
ISD::ArgFlagsTy Flags,
SelectionDAG &DAG,
- DebugLoc dl) const {
+ SDLoc dl) const {
SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
CallSeqStart.getNode()->getOperand(0),
Flags, DAG, dl);
// The MEMCPY must go outside the CALLSEQ_START..END.
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
- CallSeqStart.getNode()->getOperand(1));
+ CallSeqStart.getNode()->getOperand(1),
+ SDLoc(MemcpyCall));
DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
NewCallSeqStart.getNode());
return NewCallSeqStart;
@@ -3774,7 +3790,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
unsigned NumOps = Outs.size();
@@ -3815,7 +3831,8 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ dl);
SDValue CallSeqStart = Chain;
// Load the return address and frame pointer so it can be move somewhere else
@@ -3889,6 +3906,15 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (Size == 0)
continue;
+ unsigned BVAlign = Flags.getByValAlign();
+ if (BVAlign > 8) {
+ if (BVAlign % PtrByteSize != 0)
+ llvm_unreachable(
+ "ByVal alignment is not a multiple of the pointer size");
+
+ ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign;
+ }
+
// All aggregates smaller than 8 bytes must be passed right-justified.
if (Size==1 || Size==2 || Size==4) {
EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
@@ -3940,7 +3966,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// register.
// FIXME: The memcpy seems to produce pretty awful code for
// small aggregates, particularly for packed ones.
- // FIXME: It would be preferable to use the slot in the
+ // FIXME: It would be preferable to use the slot in the
// parameter save area instead of a new local variable.
SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType());
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
@@ -3980,7 +4006,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
continue;
}
- switch (Arg.getValueType().getSimpleVT().SimpleTy) {
+ switch (Arg.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unexpected ValueType for argument!");
case MVT::i32:
case MVT::i64:
@@ -4003,7 +4029,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// must be passed right-justified in the stack doubleword, and
// in the GPR, if one is available.
SDValue StoreOff;
- if (Arg.getValueType().getSimpleVT().SimpleTy == MVT::f32) {
+ if (Arg.getSimpleValueType().SimpleTy == MVT::f32) {
SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
} else
@@ -4145,7 +4171,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
unsigned NumOps = Outs.size();
@@ -4186,7 +4212,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ dl);
SDValue CallSeqStart = Chain;
// Load the return address and frame pointer so it can be move somewhere else
@@ -4310,7 +4337,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
continue;
}
- switch (Arg.getValueType().getSimpleVT().SimpleTy) {
+ switch (Arg.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unexpected ValueType for argument!");
case MVT::i32:
case MVT::i64:
@@ -4502,7 +4529,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
@@ -4551,7 +4578,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) const {
// When we pop the dynamic allocation we need to restore the SP link.
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Get the corect type for pointers.
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -4636,7 +4663,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// Get the inputs.
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Get the corect type for pointers.
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -4653,7 +4680,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
DAG.getVTList(MVT::i32, MVT::Other),
Op.getOperand(0), Op.getOperand(1));
@@ -4661,7 +4688,7 @@ SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
Op.getOperand(0), Op.getOperand(1));
}
@@ -4687,7 +4714,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
EVT CmpVT = Op.getOperand(0).getValueType();
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// If the RHS of the comparison is a 0.0, we don't need to do the
// subtraction at all.
@@ -4768,14 +4795,14 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
// FIXME: Split this code up when LegalizeDAGTypes lands.
SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
- DebugLoc dl) const {
+ SDLoc dl) const {
assert(Op.getOperand(0).getValueType().isFloatingPoint());
SDValue Src = Op.getOperand(0);
if (Src.getValueType() == MVT::f32)
Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
SDValue Tmp;
- switch (Op.getValueType().getSimpleVT().SimpleTy) {
+ switch (Op.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
case MVT::i32:
Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
@@ -4827,7 +4854,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Don't handle ppc_fp128 here; let it be lowered to a libcall.
if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
return SDValue();
@@ -4961,7 +4988,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
/*
The rounding mode is in bits 30:31 of FPSR, and has the following
settings:
@@ -5027,7 +5054,7 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SHL!");
@@ -5055,7 +5082,7 @@ SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
@@ -5083,7 +5110,7 @@ SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
}
SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
@@ -5118,7 +5145,7 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
/// BuildSplatI - Build a canonical splati of Val with an element size of
/// SplatSize. Cast the result to VT.
static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
- SelectionDAG &DAG, DebugLoc dl) {
+ SelectionDAG &DAG, SDLoc dl) {
assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
static const EVT VTys[] = { // canonical VT to use for each size.
@@ -5142,10 +5169,20 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
}
+/// BuildIntrinsicOp - Return a unary operator intrinsic node with the
+/// specified intrinsic ID.
+static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op,
+ SelectionDAG &DAG, SDLoc dl,
+ EVT DestVT = MVT::Other) {
+ if (DestVT == MVT::Other) DestVT = Op.getValueType();
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
+ DAG.getConstant(IID, MVT::i32), Op);
+}
+
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
- SelectionDAG &DAG, DebugLoc dl,
+ SelectionDAG &DAG, SDLoc dl,
EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = LHS.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
@@ -5156,7 +5193,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
SDValue Op2, SelectionDAG &DAG,
- DebugLoc dl, EVT DestVT = MVT::Other) {
+ SDLoc dl, EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op0.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
@@ -5166,7 +5203,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
/// amount. The result has the specified value type.
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
- EVT VT, SelectionDAG &DAG, DebugLoc dl) {
+ EVT VT, SelectionDAG &DAG, SDLoc dl) {
// Force LHS/RHS to be the right type.
LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
@@ -5185,7 +5222,7 @@ static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
// sequence of ops that should be used.
SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
@@ -5341,7 +5378,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
- DebugLoc dl) {
+ SDLoc dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
@@ -5420,7 +5457,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
/// lowered into a vperm.
SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
@@ -5587,7 +5624,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
// If this is a lowered altivec predicate compare, CompareOpc is set to the
// opcode number of the comparison.
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
int CompareOpc;
bool isDot;
if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
@@ -5612,7 +5649,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// Now that we have the comparison, emit a copy from the CR to a GPR.
// This is flagged to the above dot comparison.
- SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32,
+ SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
DAG.getRegister(PPC::CR6, MVT::i32),
CompNode.getValue(1));
@@ -5651,7 +5688,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Create a stack slot that is 16-byte aligned.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(16, 16, false);
@@ -5668,7 +5705,7 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
}
SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (Op.getValueType() == MVT::v4i32) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
@@ -5745,6 +5782,9 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::VAARG:
return LowerVAARG(Op, DAG, PPCSubTarget);
+ case ISD::VACOPY:
+ return LowerVACOPY(Op, DAG, PPCSubTarget);
+
case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
case ISD::DYNAMIC_STACKALLOC:
return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
@@ -5755,7 +5795,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::FP_TO_UINT:
case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG,
- Op.getDebugLoc());
+ SDLoc(Op));
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
@@ -5772,6 +5812,9 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
+ // For counter-based loop handling.
+ case ISD::INTRINSIC_W_CHAIN: return SDValue();
+
// Frame & Return address.
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
@@ -5782,10 +5825,26 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
const TargetMachine &TM = getTargetMachine();
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
switch (N->getOpcode()) {
default:
llvm_unreachable("Do not know how to custom type legalize this operation!");
+ case ISD::INTRINSIC_W_CHAIN: {
+ if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
+ Intrinsic::ppc_is_decremented_ctr_nonzero)
+ break;
+
+ assert(N->getValueType(0) == MVT::i1 &&
+ "Unexpected result type for CTR decrement intrinsic");
+ EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0));
+ SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
+ SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
+ N->getOperand(1));
+
+ Results.push_back(NewInt);
+ Results.push_back(NewInt.getValue(1));
+ break;
+ }
case ISD::VAARG: {
if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI()
|| TM.getSubtarget<PPCSubtarget>().isPPC64())
@@ -5821,6 +5880,9 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
return;
}
case ISD::FP_TO_SINT:
+ // LowerFP_TO_INT() can only handle f32 and f64.
+ if (N->getOperand(0).getValueType() == MVT::ppcf128)
+ return;
Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
return;
}
@@ -6092,6 +6154,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// thisMBB:
const int64_t LabelOffset = 1 * PVT.getStoreSize();
const int64_t TOCOffset = 3 * PVT.getStoreSize();
+ const int64_t BPOffset = 4 * PVT.getStoreSize();
// Prepare IP either in reg.
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
@@ -6101,15 +6164,32 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) {
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
.addReg(PPC::X2)
- .addImm(TOCOffset / 4)
+ .addImm(TOCOffset)
.addReg(BufReg);
-
MIB.setMemRefs(MMOBegin, MMOEnd);
}
+ // Naked functions never have a base pointer, and so we use r1. For all
+ // other functions, this decision must be delayed until during PEI.
+ unsigned BaseReg;
+ if (MF->getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::Naked))
+ BaseReg = PPCSubTarget.isPPC64() ? PPC::X1 : PPC::R1;
+ else
+ BaseReg = PPCSubTarget.isPPC64() ? PPC::BP8 : PPC::BP;
+
+ MIB = BuildMI(*thisMBB, MI, DL,
+ TII->get(PPCSubTarget.isPPC64() ? PPC::STD : PPC::STW))
+ .addReg(BaseReg)
+ .addImm(BPOffset)
+ .addReg(BufReg);
+ MIB.setMemRefs(MMOBegin, MMOEnd);
+
// Setup
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
- MIB.addRegMask(PPCRegInfo->getNoPreservedMask());
+ const PPCRegisterInfo *TRI =
+ static_cast<const PPCRegisterInfo*>(getTargetMachine().getRegisterInfo());
+ MIB.addRegMask(TRI->getNoPreservedMask());
BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
@@ -6129,7 +6209,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
if (PPCSubTarget.isPPC64()) {
MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
.addReg(LabelReg)
- .addImm(LabelOffset / 4)
+ .addImm(LabelOffset)
.addReg(BufReg);
} else {
MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
@@ -6176,12 +6256,14 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Since FP is only updated here but NOT referenced, it's treated as GPR.
unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
+ unsigned BP = (PVT == MVT::i64) ? PPC::X30 : PPC::R30;
MachineInstrBuilder MIB;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
const int64_t SPOffset = 2 * PVT.getStoreSize();
const int64_t TOCOffset = 3 * PVT.getStoreSize();
+ const int64_t BPOffset = 4 * PVT.getStoreSize();
unsigned BufReg = MI->getOperand(0).getReg();
@@ -6202,7 +6284,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Reload IP
if (PVT == MVT::i64) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
- .addImm(LabelOffset / 4)
+ .addImm(LabelOffset)
.addReg(BufReg);
} else {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
@@ -6214,7 +6296,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Reload SP
if (PVT == MVT::i64) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
- .addImm(SPOffset / 4)
+ .addImm(SPOffset)
.addReg(BufReg);
} else {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
@@ -6223,13 +6305,22 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
}
MIB.setMemRefs(MMOBegin, MMOEnd);
- // FIXME: When we also support base pointers, that register must also be
- // restored here.
+ // Reload BP
+ if (PVT == MVT::i64) {
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
+ .addImm(BPOffset)
+ .addReg(BufReg);
+ } else {
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
+ .addImm(BPOffset)
+ .addReg(BufReg);
+ }
+ MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload TOC
if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
- .addImm(TOCOffset / 4)
+ .addImm(TOCOffset)
.addReg(BufReg);
MIB.setMemRefs(MMOBegin, MMOEnd);
@@ -6272,8 +6363,10 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
Cond.push_back(MI->getOperand(1));
DebugLoc dl = MI->getDebugLoc();
- PPCII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), Cond,
- MI->getOperand(2).getReg(), MI->getOperand(3).getReg());
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(),
+ Cond, MI->getOperand(2).getReg(),
+ MI->getOperand(3).getReg());
} else if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
MI->getOpcode() == PPC::SELECT_CC_I8 ||
MI->getOpcode() == PPC::SELECT_CC_F4 ||
@@ -6633,51 +6726,6 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Restore FPSCR value.
BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg);
- } else if (MI->getOpcode() == PPC::FRINDrint ||
- MI->getOpcode() == PPC::FRINSrint) {
- bool isf32 = MI->getOpcode() == PPC::FRINSrint;
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
- DebugLoc dl = MI->getDebugLoc();
-
- MachineRegisterInfo &RegInfo = F->getRegInfo();
- unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
-
- // Perform the rounding.
- BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FRINS : PPC::FRIND), Dest)
- .addReg(Src);
-
- // Compare the results.
- BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FCMPUS : PPC::FCMPUD), CRReg)
- .addReg(Dest).addReg(Src);
-
- // If the results were not equal, then set the FPSCR XX bit.
- MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
- F->insert(It, midMBB);
- F->insert(It, exitMBB);
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- BuildMI(*BB, MI, dl, TII->get(PPC::BCC))
- .addImm(PPC::PRED_EQ).addReg(CRReg).addMBB(exitMBB);
-
- BB->addSuccessor(midMBB);
- BB->addSuccessor(exitMBB);
-
- BB = midMBB;
-
- // Set the FPSCR XX bit (FE_INEXACT). Note that we cannot just set
- // the FI bit here because that will not automatically set XX also,
- // and XX is what libm interprets as the FE_INEXACT flag.
- BuildMI(BB, dl, TII->get(PPC::MTFSB1)).addImm(/* 38 - 32 = */ 6);
- BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
-
- BB->addSuccessor(exitMBB);
-
- BB = exitMBB;
} else {
llvm_unreachable("Unexpected instr type to insert");
}
@@ -6717,7 +6765,7 @@ SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op,
++Iterations;
SelectionDAG &DAG = DCI.DAG;
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue FPOne =
DAG.getConstantFP(1.0, VT.getScalarType());
@@ -6779,7 +6827,7 @@ SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op,
++Iterations;
SelectionDAG &DAG = DCI.DAG;
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue FPThreeHalves =
DAG.getConstantFP(1.5, VT.getScalarType());
@@ -6823,11 +6871,120 @@ SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op,
return SDValue();
}
+// Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
+// not enforce equality of the chain operands.
+static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base,
+ unsigned Bytes, int Dist,
+ SelectionDAG &DAG) {
+ EVT VT = LS->getMemoryVT();
+ if (VT.getSizeInBits() / 8 != Bytes)
+ return false;
+
+ SDValue Loc = LS->getBasePtr();
+ SDValue BaseLoc = Base->getBasePtr();
+ if (Loc.getOpcode() == ISD::FrameIndex) {
+ if (BaseLoc.getOpcode() != ISD::FrameIndex)
+ return false;
+ const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
+ int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
+ int FS = MFI->getObjectSize(FI);
+ int BFS = MFI->getObjectSize(BFI);
+ if (FS != BFS || FS != (int)Bytes) return false;
+ return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
+ }
+
+ // Handle X+C
+ if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
+ cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
+ return true;
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ const GlobalValue *GV1 = NULL;
+ const GlobalValue *GV2 = NULL;
+ int64_t Offset1 = 0;
+ int64_t Offset2 = 0;
+ bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
+ bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
+ if (isGA1 && isGA2 && GV1 == GV2)
+ return Offset1 == (Offset2 + Dist*Bytes);
+ return false;
+}
+
+// Return true is there is a nearyby consecutive load to the one provided
+// (regardless of alignment). We search up and down the chain, looking though
+// token factors and other loads (but nothing else). As a result, a true
+// results indicates that it is safe to create a new consecutive load adjacent
+// to the load provided.
+static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
+ SDValue Chain = LD->getChain();
+ EVT VT = LD->getMemoryVT();
+
+ SmallSet<SDNode *, 16> LoadRoots;
+ SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
+ SmallSet<SDNode *, 16> Visited;
+
+ // First, search up the chain, branching to follow all token-factor operands.
+ // If we find a consecutive load, then we're done, otherwise, record all
+ // nodes just above the top-level loads and token factors.
+ while (!Queue.empty()) {
+ SDNode *ChainNext = Queue.pop_back_val();
+ if (!Visited.insert(ChainNext))
+ continue;
+
+ if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(ChainNext)) {
+ if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
+ return true;
+
+ if (!Visited.count(ChainLD->getChain().getNode()))
+ Queue.push_back(ChainLD->getChain().getNode());
+ } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
+ for (SDNode::op_iterator O = ChainNext->op_begin(),
+ OE = ChainNext->op_end(); O != OE; ++O)
+ if (!Visited.count(O->getNode()))
+ Queue.push_back(O->getNode());
+ } else
+ LoadRoots.insert(ChainNext);
+ }
+
+ // Second, search down the chain, starting from the top-level nodes recorded
+ // in the first phase. These top-level nodes are the nodes just above all
+ // loads and token factors. Starting with their uses, recursively look though
+ // all loads (just the chain uses) and token factors to find a consecutive
+ // load.
+ Visited.clear();
+ Queue.clear();
+
+ for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
+ IE = LoadRoots.end(); I != IE; ++I) {
+ Queue.push_back(*I);
+
+ while (!Queue.empty()) {
+ SDNode *LoadRoot = Queue.pop_back_val();
+ if (!Visited.insert(LoadRoot))
+ continue;
+
+ if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(LoadRoot))
+ if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
+ return true;
+
+ for (SDNode::use_iterator UI = LoadRoot->use_begin(),
+ UE = LoadRoot->use_end(); UI != UE; ++UI)
+ if (((isa<LoadSDNode>(*UI) &&
+ cast<LoadSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
+ UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
+ Queue.push_back(*UI);
+ }
+ }
+
+ return false;
+}
+
SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
const TargetMachine &TM = getTargetMachine();
SelectionDAG &DAG = DCI.DAG;
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
switch (N->getOpcode()) {
default: break;
case PPCISD::SHL:
@@ -6868,7 +7025,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DCI);
if (RV.getNode() != 0) {
DCI.AddToWorklist(RV.getNode());
- RV = DAG.getNode(ISD::FP_EXTEND, N->getOperand(1).getDebugLoc(),
+ RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)),
N->getValueType(0), RV);
DCI.AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, dl, N->getValueType(0),
@@ -6881,7 +7038,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DCI);
if (RV.getNode() != 0) {
DCI.AddToWorklist(RV.getNode());
- RV = DAG.getNode(ISD::FP_ROUND, N->getOperand(1).getDebugLoc(),
+ RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)),
N->getValueType(0), RV,
N->getOperand(1).getOperand(1));
DCI.AddToWorklist(RV.getNode());
@@ -6909,8 +7066,28 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
if (RV.getNode() != 0) {
DCI.AddToWorklist(RV.getNode());
RV = DAGCombineFastRecip(RV, DCI);
- if (RV.getNode() != 0)
+ if (RV.getNode() != 0) {
+ // Unfortunately, RV is now NaN if the input was exactly 0. Select out
+ // this case and force the answer to 0.
+
+ EVT VT = RV.getValueType();
+
+ SDValue Zero = DAG.getConstantFP(0.0, VT.getScalarType());
+ if (VT.isVector()) {
+ assert(VT.getVectorNumElements() == 4 && "Unknown vector type");
+ Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Zero, Zero, Zero, Zero);
+ }
+
+ SDValue ZeroCmp =
+ DAG.getSetCC(dl, getSetCCResultType(*DAG.getContext(), VT),
+ N->getOperand(0), Zero, ISD::SETEQ);
+ DCI.AddToWorklist(ZeroCmp.getNode());
+ DCI.AddToWorklist(RV.getNode());
+
+ RV = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, dl, VT,
+ ZeroCmp, Zero, RV);
return RV;
+ }
}
}
@@ -6999,6 +7176,160 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
cast<StoreSDNode>(N)->getMemOperand());
}
break;
+ case ISD::LOAD: {
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ EVT VT = LD->getValueType(0);
+ Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
+ if (ISD::isNON_EXTLoad(N) && VT.isVector() &&
+ TM.getSubtarget<PPCSubtarget>().hasAltivec() &&
+ (VT == MVT::v16i8 || VT == MVT::v8i16 ||
+ VT == MVT::v4i32 || VT == MVT::v4f32) &&
+ LD->getAlignment() < ABIAlignment) {
+ // This is a type-legal unaligned Altivec load.
+ SDValue Chain = LD->getChain();
+ SDValue Ptr = LD->getBasePtr();
+
+ // This implements the loading of unaligned vectors as described in
+ // the venerable Apple Velocity Engine overview. Specifically:
+ // https://developer.apple.com/hardwaredrivers/ve/alignment.html
+ // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
+ //
+ // The general idea is to expand a sequence of one or more unaligned
+ // loads into a alignment-based permutation-control instruction (lvsl),
+ // a series of regular vector loads (which always truncate their
+ // input address to an aligned address), and a series of permutations.
+ // The results of these permutations are the requested loaded values.
+ // The trick is that the last "extra" load is not taken from the address
+ // you might suspect (sizeof(vector) bytes after the last requested
+ // load), but rather sizeof(vector) - 1 bytes after the last
+ // requested vector. The point of this is to avoid a page fault if the
+ // base address happend to be aligned. This works because if the base
+ // address is aligned, then adding less than a full vector length will
+ // cause the last vector in the sequence to be (re)loaded. Otherwise,
+ // the next vector will be fetched as you might suspect was necessary.
+
+ // We might be able to reuse the permutation generation from
+ // a different base address offset from this one by an aligned amount.
+ // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
+ // optimization later.
+ SDValue PermCntl = BuildIntrinsicOp(Intrinsic::ppc_altivec_lvsl, Ptr,
+ DAG, dl, MVT::v16i8);
+
+ // Refine the alignment of the original load (a "new" load created here
+ // which was identical to the first except for the alignment would be
+ // merged with the existing node regardless).
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(LD->getPointerInfo(),
+ LD->getMemOperand()->getFlags(),
+ LD->getMemoryVT().getStoreSize(),
+ ABIAlignment);
+ LD->refineAlignment(MMO);
+ SDValue BaseLoad = SDValue(LD, 0);
+
+ // Note that the value of IncOffset (which is provided to the next
+ // load's pointer info offset value, and thus used to calculate the
+ // alignment), and the value of IncValue (which is actually used to
+ // increment the pointer value) are different! This is because we
+ // require the next load to appear to be aligned, even though it
+ // is actually offset from the base pointer by a lesser amount.
+ int IncOffset = VT.getSizeInBits() / 8;
+ int IncValue = IncOffset;
+
+ // Walk (both up and down) the chain looking for another load at the real
+ // (aligned) offset (the alignment of the other load does not matter in
+ // this case). If found, then do not use the offset reduction trick, as
+ // that will prevent the loads from being later combined (as they would
+ // otherwise be duplicates).
+ if (!findConsecutiveLoad(LD, DAG))
+ --IncValue;
+
+ SDValue Increment = DAG.getConstant(IncValue, getPointerTy());
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
+
+ SDValue ExtraLoad =
+ DAG.getLoad(VT, dl, Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(IncOffset),
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->isInvariant(), ABIAlignment);
+
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ BaseLoad.getValue(1), ExtraLoad.getValue(1));
+
+ if (BaseLoad.getValueType() != MVT::v4i32)
+ BaseLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, BaseLoad);
+
+ if (ExtraLoad.getValueType() != MVT::v4i32)
+ ExtraLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ExtraLoad);
+
+ SDValue Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm,
+ BaseLoad, ExtraLoad, PermCntl, DAG, dl);
+
+ if (VT != MVT::v4i32)
+ Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm);
+
+ // Now we need to be really careful about how we update the users of the
+ // original load. We cannot just call DCI.CombineTo (or
+ // DAG.ReplaceAllUsesWith for that matter), because the load still has
+ // uses created here (the permutation for example) that need to stay.
+ SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
+ while (UI != UE) {
+ SDUse &Use = UI.getUse();
+ SDNode *User = *UI;
+ // Note: BaseLoad is checked here because it might not be N, but a
+ // bitcast of N.
+ if (User == Perm.getNode() || User == BaseLoad.getNode() ||
+ User == TF.getNode() || Use.getResNo() > 1) {
+ ++UI;
+ continue;
+ }
+
+ SDValue To = Use.getResNo() ? TF : Perm;
+ ++UI;
+
+ SmallVector<SDValue, 8> Ops;
+ for (SDNode::op_iterator O = User->op_begin(),
+ OE = User->op_end(); O != OE; ++O) {
+ if (*O == Use)
+ Ops.push_back(To);
+ else
+ Ops.push_back(*O);
+ }
+
+ DAG.UpdateNodeOperands(User, Ops.data(), Ops.size());
+ }
+
+ return SDValue(N, 0);
+ }
+ }
+ break;
+ case ISD::INTRINSIC_WO_CHAIN:
+ if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() ==
+ Intrinsic::ppc_altivec_lvsl &&
+ N->getOperand(1)->getOpcode() == ISD::ADD) {
+ SDValue Add = N->getOperand(1);
+
+ if (DAG.MaskedValueIsZero(Add->getOperand(1),
+ APInt::getAllOnesValue(4 /* 16 byte alignment */).zext(
+ Add.getValueType().getScalarType().getSizeInBits()))) {
+ SDNode *BasePtr = Add->getOperand(0).getNode();
+ for (SDNode::use_iterator UI = BasePtr->use_begin(),
+ UE = BasePtr->use_end(); UI != UE; ++UI) {
+ if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
+ Intrinsic::ppc_altivec_lvsl) {
+ // We've found another LVSL, and this address if an aligned
+ // multiple of that one. The results will be the same, so use the
+ // one we've just found instead.
+
+ return SDValue(*UI, 0);
+ }
+ }
+ }
+ }
+
+ break;
case ISD::BSWAP:
// Turn BSWAP (LOAD) -> lhbrx/lwbrx.
if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
@@ -7083,20 +7414,53 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
}
- // If the user is a MFCR instruction, we know this is safe. Otherwise we
- // give up for right now.
- if (FlagUser->getOpcode() == PPCISD::MFCR)
+ // If the user is a MFOCRF instruction, we know this is safe.
+ // Otherwise we give up for right now.
+ if (FlagUser->getOpcode() == PPCISD::MFOCRF)
return SDValue(VCMPoNode, 0);
}
break;
}
case ISD::BR_CC: {
// If this is a branch on an altivec predicate comparison, lower this so
- // that we don't have to do a MFCR: instead, branch directly on CR6. This
+ // that we don't have to do a MFOCRF: instead, branch directly on CR6. This
// lowering is done pre-legalize, because the legalizer lowers the predicate
// compare down to code that is difficult to reassemble.
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
+
+ // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
+ // value. If so, pass-through the AND to get to the intrinsic.
+ if (LHS.getOpcode() == ISD::AND &&
+ LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
+ cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
+ Intrinsic::ppc_is_decremented_ctr_nonzero &&
+ isa<ConstantSDNode>(LHS.getOperand(1)) &&
+ !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()->
+ isZero())
+ LHS = LHS.getOperand(0);
+
+ if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
+ cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
+ Intrinsic::ppc_is_decremented_ctr_nonzero &&
+ isa<ConstantSDNode>(RHS)) {
+ assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
+ "Counter decrement comparison is not EQ or NE");
+
+ unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
+ bool isBDNZ = (CC == ISD::SETEQ && Val) ||
+ (CC == ISD::SETNE && !Val);
+
+ // We now need to make the intrinsic dead (it cannot be instruction
+ // selected).
+ DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
+ assert(LHS.getNode()->hasOneUse() &&
+ "Counter decrement has more than one use");
+
+ return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
+ N->getOperand(0), N->getOperand(4));
+ }
+
int CompareOpc;
bool isDot;
@@ -7271,7 +7635,7 @@ PPCTargetLowering::getSingleConstraintMatchWeight(
std::pair<unsigned, const TargetRegisterClass*>
PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC RS6000 Constraint Letters
switch (Constraint[0]) {
@@ -7296,7 +7660,24 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
}
}
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ std::pair<unsigned, const TargetRegisterClass*> R =
+ TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+
+ // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
+ // (which we call X[0-9]+). If a 64-bit value has been requested, and a
+ // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
+ // register.
+ // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
+ // the AsmName field from *RegisterInfo.td, then this would not be necessary.
+ if (R.first && VT == MVT::i64 && PPCSubTarget.isPPC64() &&
+ PPC::GPRCRegClass.contains(R.first)) {
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ return std::make_pair(TRI->getMatchingSuperReg(R.first,
+ PPC::sub_32, &PPC::G8RCRegClass),
+ &PPC::G8RCRegClass);
+ }
+
+ return R;
}
@@ -7406,25 +7787,13 @@ bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
return true;
}
-/// isLegalAddressImmediate - Return true if the integer value can be used
-/// as the offset of the target addressing mode for load / store of the
-/// given type.
-bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,Type *Ty) const{
- // PPC allows a sign-extended 16-bit immediate field.
- return (V > -(1 << 16) && V < (1 << 16)-1);
-}
-
-bool PPCTargetLowering::isLegalAddressImmediate(GlobalValue* GV) const {
- return false;
-}
-
SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
// Make sure the function does not optimize away the store of the RA to
@@ -7454,7 +7823,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -7537,18 +7906,15 @@ bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
return true;
}
-/// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
-/// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
-/// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
-/// is expanded to mul + add.
-bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const {
+bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ VT = VT.getScalarType();
+
if (!VT.isSimple())
return false;
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f32:
case MVT::f64:
- case MVT::v4f32:
return true;
default:
break;
@@ -7558,9 +7924,15 @@ bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const {
}
Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
- if (DisableILPPref)
+ if (DisableILPPref || PPCSubTarget.enableMachineScheduler())
return TargetLowering::getSchedulingPreference(N);
return Sched::ILP;
}
+// Create a fast isel object.
+FastISel *
+PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) const {
+ return PPC::createFastISel(FuncInfo, LibInfo);
+}
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 423e9839807b..df3af35761ee 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -20,6 +20,7 @@
#include "PPCRegisterInfo.h"
#include "PPCSubtarget.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/Target/TargetLowering.h"
namespace llvm {
@@ -115,11 +116,10 @@ namespace llvm {
/// Return with a flag operand, matched by 'blr'
RET_FLAG,
- /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF
- /// instructions. This copies the bits corresponding to the specified
- /// CRREG into the resultant GPR. Bits corresponding to other CR regs
- /// are undefined.
- MFCR,
+ /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
+ /// This copies the bits corresponding to the specified CRREG into the
+ /// resultant GPR. Bits corresponding to other CR regs are undefined.
+ MFOCRF,
// EH_SJLJ_SETJMP - SjLj exception handling setjmp.
EH_SJLJ_SETJMP,
@@ -146,6 +146,10 @@ namespace llvm {
/// an optional input flag argument.
COND_BRANCH,
+ /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
+ /// loops.
+ BDNZ, BDZ,
+
/// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
/// towards zero. Used only as part of the long double-to-int
/// conversion sequence.
@@ -175,61 +179,61 @@ namespace llvm {
/// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
/// TLS model, produces an ADDIS8 instruction that adds the GOT
- /// base to sym@got@tprel@ha.
+ /// base to sym\@got\@tprel\@ha.
ADDIS_GOT_TPREL_HA,
/// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
/// TLS model, produces a LD instruction with base register G8RReg
- /// and offset sym@got@tprel@l. This completes the addition that
+ /// and offset sym\@got\@tprel\@l. This completes the addition that
/// finds the offset of "sym" relative to the thread pointer.
LD_GOT_TPREL_L,
/// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
/// model, produces an ADD instruction that adds the contents of
/// G8RReg to the thread pointer. Symbol contains a relocation
- /// sym@tls which is to be replaced by the thread pointer and
+ /// sym\@tls which is to be replaced by the thread pointer and
/// identifies to the linker that the instruction is part of a
/// TLS sequence.
ADD_TLS,
/// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS
/// model, produces an ADDIS8 instruction that adds the GOT base
- /// register to sym@got@tlsgd@ha.
+ /// register to sym\@got\@tlsgd\@ha.
ADDIS_TLSGD_HA,
/// G8RC = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
/// model, produces an ADDI8 instruction that adds G8RReg to
- /// sym@got@tlsgd@l.
+ /// sym\@got\@tlsgd\@l.
ADDI_TLSGD_L,
/// G8RC = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
- /// model, produces a call to __tls_get_addr(sym@tlsgd).
+ /// model, produces a call to __tls_get_addr(sym\@tlsgd).
GET_TLS_ADDR,
/// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
/// model, produces an ADDIS8 instruction that adds the GOT base
- /// register to sym@got@tlsld@ha.
+ /// register to sym\@got\@tlsld\@ha.
ADDIS_TLSLD_HA,
/// G8RC = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
/// model, produces an ADDI8 instruction that adds G8RReg to
- /// sym@got@tlsld@l.
+ /// sym\@got\@tlsld\@l.
ADDI_TLSLD_L,
/// G8RC = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
- /// model, produces a call to __tls_get_addr(sym@tlsld).
+ /// model, produces a call to __tls_get_addr(sym\@tlsld).
GET_TLSLD_ADDR,
/// G8RC = ADDIS_DTPREL_HA %X3, Symbol, Chain - For the
/// local-dynamic TLS model, produces an ADDIS8 instruction
- /// that adds X3 to sym@dtprel@ha. The Chain operand is needed
+ /// that adds X3 to sym\@dtprel\@ha. The Chain operand is needed
/// to tie this in place following a copy to %X3 from the result
/// of a GET_TLSLD_ADDR.
ADDIS_DTPREL_HA,
/// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
/// model, produces an ADDI8 instruction that adds G8RReg to
- /// sym@got@dtprel@l.
+ /// sym\@got\@dtprel\@l.
ADDI_DTPREL_L,
/// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
@@ -238,6 +242,10 @@ namespace llvm {
/// optimizations due to constant folding.
VADD_SPLAT,
+ /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
+ /// operand identifies the operating system entry point.
+ SC,
+
/// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
/// the GPRC input, then stores it through Ptr. Type can be either i16 or
@@ -266,16 +274,16 @@ namespace llvm {
/// G8RC = ADDIS_TOC_HA %X2, Symbol - For medium and large code model,
/// produces an ADDIS8 instruction that adds the TOC base register to
- /// sym@toc@ha.
+ /// sym\@toc\@ha.
ADDIS_TOC_HA,
/// G8RC = LD_TOC_L Symbol, G8RReg - For medium and large code model,
/// produces a LD instruction with base register G8RReg and offset
- /// sym@toc@l. Preceded by an ADDIS_TOC_HA to form a full 32-bit offset.
+ /// sym\@toc\@l. Preceded by an ADDIS_TOC_HA to form a full 32-bit offset.
LD_TOC_L,
/// G8RC = ADDI_TOC_L G8RReg, Symbol - For medium code model, produces
- /// an ADDI8 instruction that adds G8RReg to sym@toc@l.
+ /// an ADDI8 instruction that adds G8RReg to sym\@toc\@l.
/// Preceded by an ADDIS_TOC_HA to form a full 32-bit offset.
ADDI_TOC_L
};
@@ -327,8 +335,6 @@ namespace llvm {
class PPCTargetLowering : public TargetLowering {
const PPCSubtarget &PPCSubTarget;
- const PPCRegisterInfo *PPCRegInfo;
- const PPCInstrInfo *PPCII;
public:
explicit PPCTargetLowering(PPCTargetMachine &TM);
@@ -340,7 +346,7 @@ namespace llvm {
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
/// getSetCCResultType - Return the ISD::SETCC ValueType
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
@@ -358,21 +364,16 @@ namespace llvm {
/// SelectAddressRegImm - Returns true if the address N can be represented
/// by a base register plus a signed 16-bit displacement [r+imm], and if it
- /// is not better represented as reg+reg.
+ /// is not better represented as reg+reg. If Aligned is true, only accept
+ /// displacements suitable for STD and friends, i.e. multiples of 4.
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
- SelectionDAG &DAG) const;
+ SelectionDAG &DAG, bool Aligned) const;
/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation.
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
SelectionDAG &DAG) const;
- /// SelectAddressRegImmShift - Returns true if the address N can be
- /// represented by a base register plus a signed 14-bit displacement
- /// [r+imm*4]. Suitable for use by STD and friends.
- bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base,
- SelectionDAG &DAG) const;
-
Sched::Preference getSchedulingPreference(SDNode *N) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
@@ -418,7 +419,7 @@ namespace llvm {
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
@@ -436,15 +437,6 @@ namespace llvm {
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
- /// isLegalAddressImmediate - Return true if the integer value can be used
- /// as the offset of the target addressing mode for load / store of the
- /// given type.
- virtual bool isLegalAddressImmediate(int64_t V, Type *Ty) const;
-
- /// isLegalAddressImmediate - Return true if the GlobalValue can be used as
- /// the offset of the target addressing mode.
- virtual bool isLegalAddressImmediate(GlobalValue *GV) const;
-
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
/// getOptimalMemOpType - Returns the target specific optimal type for load
@@ -459,7 +451,7 @@ namespace llvm {
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
virtual EVT
- getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
MachineFunction &MF) const;
@@ -467,11 +459,16 @@ namespace llvm {
/// relative to software emulation.
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast = 0) const;
- /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
- /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
- /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
- /// is expanded to mul + add.
- virtual bool isFMAFasterThanMulAndAdd(EVT VT) const;
+ /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
+ /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
+ /// expanded to FMAs when this method returns true, otherwise fmuladd is
+ /// expanded to fmul + fadd.
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
+
+ /// createFastISel - This method returns a target-specific FastISel object,
+ /// or null if the target does not support "fast" instruction selection.
+ virtual FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) const;
private:
SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
@@ -490,7 +487,7 @@ namespace llvm {
SDValue &LROpOut,
SDValue &FPOpOut,
bool isDarwinABI,
- DebugLoc dl) const;
+ SDLoc dl) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
@@ -506,12 +503,14 @@ namespace llvm {
const PPCSubtarget &Subtarget) const;
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) const;
+ SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) const;
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const;
+ SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, SDLoc dl) const;
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
@@ -526,9 +525,9 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall,
+ SDValue FinishCall(CallingConv::ID CallConv, SDLoc dl, bool isTailCall,
bool isVarArg,
SelectionDAG &DAG,
SmallVector<std::pair<unsigned, SDValue>, 8>
@@ -543,7 +542,7 @@ namespace llvm {
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -561,11 +560,11 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
SDValue
extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG,
- SDValue ArgVal, DebugLoc dl) const;
+ SDValue ArgVal, SDLoc dl) const;
void
setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG,
@@ -576,25 +575,25 @@ namespace llvm {
LowerFormalArguments_Darwin(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerFormalArguments_64SVR4(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerFormalArguments_32SVR4(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
- SelectionDAG &DAG, DebugLoc dl) const;
+ SelectionDAG &DAG, SDLoc dl) const;
SDValue
LowerCall_Darwin(SDValue Chain, SDValue Callee,
@@ -603,7 +602,7 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerCall_64SVR4(SDValue Chain, SDValue Callee,
@@ -612,7 +611,7 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
@@ -620,7 +619,7 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
@@ -628,7 +627,31 @@ namespace llvm {
SDValue DAGCombineFastRecip(SDValue Op, DAGCombinerInfo &DCI) const;
SDValue DAGCombineFastRecipFSQRT(SDValue Op, DAGCombinerInfo &DCI) const;
+
+ CCAssignFn *useFastISelCCs(unsigned Flag) const;
};
+
+ namespace PPC {
+ FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo);
+ }
+
+ bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
+
+ bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
+
+ bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
}
#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td
index bff4c230ce68..46db4fe91308 100644
--- a/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -17,29 +17,39 @@
//
def s16imm64 : Operand<i64> {
let PrintMethod = "printS16ImmOperand";
+ let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS16ImmAsmOperand;
}
def u16imm64 : Operand<i64> {
let PrintMethod = "printU16ImmOperand";
+ let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCU16ImmAsmOperand;
}
-def symbolHi64 : Operand<i64> {
- let PrintMethod = "printSymbolHi";
- let EncoderMethod = "getHA16Encoding";
- let ParserMatchClass = PPCS16ImmAsmOperand;
-}
-def symbolLo64 : Operand<i64> {
- let PrintMethod = "printSymbolLo";
- let EncoderMethod = "getLO16Encoding";
- let ParserMatchClass = PPCS16ImmAsmOperand;
+def s17imm64 : Operand<i64> {
+ // This operand type is used for addis/lis to allow the assembler parser
+ // to accept immediates in the range -65536..65535 for compatibility with
+ // the GNU assembler. The operand is treated as 16-bit otherwise.
+ let PrintMethod = "printS16ImmOperand";
+ let EncoderMethod = "getImm16Encoding";
+ let ParserMatchClass = PPCS17ImmAsmOperand;
}
def tocentry : Operand<iPTR> {
let MIOperandInfo = (ops i64imm:$imm);
}
+def PPCTLSRegOperand : AsmOperandClass {
+ let Name = "TLSReg"; let PredicateMethod = "isTLSReg";
+ let RenderMethod = "addTLSRegOperands";
+}
def tlsreg : Operand<i64> {
let EncoderMethod = "getTLSRegEncoding";
+ let ParserMatchClass = PPCTLSRegOperand;
}
def tlsgd : Operand<i64> {}
+def tlscall : Operand<i64> {
+ let PrintMethod = "printTLSCall";
+ let MIOperandInfo = (ops calltarget:$func, tlsgd:$sym);
+ let EncoderMethod = "getTLSCallEncoding";
+}
//===----------------------------------------------------------------------===//
// 64-bit transformation functions.
@@ -78,7 +88,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isCodeGenOnly = 1 in
def BCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
- "b${cond:cc}ctr ${cond:reg}", BrB, []>,
+ "b${cond:cc}ctr${cond:pm} ${cond:reg}", BrB, []>,
Requires<[In64BitMode]>;
}
}
@@ -111,7 +121,10 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
def BL8 : IForm<18, 0, 1, (outs), (ins calltarget:$func),
"bl $func", BrB, []>; // See Pat patterns below.
- def BLA8 : IForm<18, 1, 1, (outs), (ins aaddr:$func),
+ def BL8_TLS : IForm<18, 0, 1, (outs), (ins tlscall:$func),
+ "bl $func", BrB, []>;
+
+ def BLA8 : IForm<18, 1, 1, (outs), (ins abscalltarget:$func),
"bla $func", BrB, [(PPCcall (i64 imm:$func))]>;
}
let Uses = [RM], isCodeGenOnly = 1 in {
@@ -119,16 +132,12 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
(outs), (ins calltarget:$func),
"bl $func\n\tnop", BrB, []>;
- def BL8_NOP_TLSGD : IForm_and_DForm_4_zero<18, 0, 1, 24,
- (outs), (ins calltarget:$func, tlsgd:$sym),
- "bl $func($sym)\n\tnop", BrB, []>;
-
- def BL8_NOP_TLSLD : IForm_and_DForm_4_zero<18, 0, 1, 24,
- (outs), (ins calltarget:$func, tlsgd:$sym),
- "bl $func($sym)\n\tnop", BrB, []>;
+ def BL8_NOP_TLS : IForm_and_DForm_4_zero<18, 0, 1, 24,
+ (outs), (ins tlscall:$func),
+ "bl $func\n\tnop", BrB, []>;
def BLA8_NOP : IForm_and_DForm_4_zero<18, 1, 1, 24,
- (outs), (ins aaddr:$func),
+ (outs), (ins abscalltarget:$func),
"bla $func\n\tnop", BrB,
[(PPCcall_nop (i64 imm:$func))]>;
}
@@ -139,7 +148,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
let isCodeGenOnly = 1 in
def BCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
- "b${cond:cc}ctrl ${cond:reg}", BrB, []>,
+ "b${cond:cc}ctrl${cond:pm} ${cond:reg}", BrB, []>,
Requires<[In64BitMode]>;
}
}
@@ -207,7 +216,7 @@ def TCRETURNdi8 :Pseudo< (outs),
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
-def TCRETURNai8 :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset),
+def TCRETURNai8 :Pseudo<(outs), (ins abscalltarget:$func, i32imm:$offset),
"#TC_RETURNa8 $func $offset",
[(PPCtc_return (i64 imm:$func), imm:$offset)]>;
@@ -233,7 +242,7 @@ def TAILB8 : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
-def TAILBA8 : IForm<18, 0, 0, (outs), (ins aaddr:$dst),
+def TAILBA8 : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst),
"ba $dst", BrB,
[]>;
@@ -253,22 +262,26 @@ def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
// 64-bit CR instructions
let Interpretation64Bit = 1 in {
let neverHasSideEffects = 1 in {
-def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins g8rc:$rS),
+def MTOCRF8: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins g8rc:$ST),
+ "mtocrf $FXM, $ST", BrMCRX>,
+ PPC970_DGroup_First, PPC970_Unit_CRU;
+
+def MTCRF8 : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, g8rc:$rS),
"mtcrf $FXM, $rS", BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
-let isCodeGenOnly = 1 in
-def MFCR8pseud: XFXForm_3<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM),
- "#MFCR8pseud", SprMFCR>,
- PPC970_MicroCode, PPC970_Unit_CRU;
-} // neverHasSideEffects = 1
+let hasExtraSrcRegAllocReq = 1 in // to enable post-ra anti-dep breaking.
+def MFOCRF8: XFXForm_5a<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM),
+ "mfocrf $rT, $FXM", SprMFCR>,
+ PPC970_DGroup_First, PPC970_Unit_CRU;
-let neverHasSideEffects = 1 in
def MFCR8 : XFXForm_3<31, 19, (outs g8rc:$rT), (ins),
"mfcr $rT", SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
+} // neverHasSideEffects = 1
let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
+ let Defs = [CTR8] in
def EH_SjLj_SetJmp64 : Pseudo<(outs gprc:$dst), (ins memr:$buf),
"#EH_SJLJ_SETJMP64",
[(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>,
@@ -293,8 +306,14 @@ def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS),
"mtctr $rS", SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
+let hasSideEffects = 1, isCodeGenOnly = 1, Defs = [CTR8] in {
+let Pattern = [(int_ppc_mtctr i64:$rS)] in
+def MTCTR8loop : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS),
+ "mtctr $rS", SprMTSPR>,
+ PPC970_DGroup_First, PPC970_Unit_FXU;
+}
-let Pattern = [(set i64:$rT, readcyclecounter)] in
+let isCodeGenOnly = 1, Pattern = [(set i64:$rT, readcyclecounter)] in
def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs g8rc:$rT), (ins),
"mfspr $rT, 268", SprMFTB>,
PPC970_DGroup_First, PPC970_Unit_FXU;
@@ -329,10 +348,10 @@ let Interpretation64Bit = 1 in {
let neverHasSideEffects = 1 in {
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
-def LI8 : DForm_2_r0<14, (outs g8rc:$rD), (ins symbolLo64:$imm),
+def LI8 : DForm_2_r0<14, (outs g8rc:$rD), (ins s16imm64:$imm),
"li $rD, $imm", IntSimple,
- [(set i64:$rD, immSExt16:$imm)]>;
-def LIS8 : DForm_2_r0<15, (outs g8rc:$rD), (ins symbolHi64:$imm),
+ [(set i64:$rD, imm64SExt16:$imm)]>;
+def LIS8 : DForm_2_r0<15, (outs g8rc:$rD), (ins s17imm64:$imm),
"lis $rD, $imm", IntSimple,
[(set i64:$rD, imm16ShiftedSExt:$imm)]>;
}
@@ -392,9 +411,8 @@ defm ADD8 : XOForm_1r<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
[(set i64:$rT, (add i64:$rA, i64:$rB))]>;
// ADD8 has a special form: reg = ADD8(reg, sym@tls) for use by the
// initial-exec thread-local storage model.
-let isCodeGenOnly = 1 in
def ADD8TLS : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, tlsreg:$rB),
- "add $rT, $rA, $rB@tls", IntSimple,
+ "add $rT, $rA, $rB", IntSimple,
[(set i64:$rT, (add i64:$rA, tglobaltlsaddr:$rB))]>;
defm ADDC8 : XOForm_1rc<31, 10, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
@@ -404,18 +422,18 @@ defm ADDC8 : XOForm_1rc<31, 10, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
let Defs = [CARRY] in
def ADDIC8 : DForm_2<12, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
"addic $rD, $rA, $imm", IntGeneral,
- [(set i64:$rD, (addc i64:$rA, immSExt16:$imm))]>;
-def ADDI8 : DForm_2<14, (outs g8rc:$rD), (ins g8rc_nox0:$rA, symbolLo64:$imm),
+ [(set i64:$rD, (addc i64:$rA, imm64SExt16:$imm))]>;
+def ADDI8 : DForm_2<14, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s16imm64:$imm),
"addi $rD, $rA, $imm", IntSimple,
- [(set i64:$rD, (add i64:$rA, immSExt16:$imm))]>;
-def ADDIS8 : DForm_2<15, (outs g8rc:$rD), (ins g8rc_nox0:$rA, symbolHi64:$imm),
+ [(set i64:$rD, (add i64:$rA, imm64SExt16:$imm))]>;
+def ADDIS8 : DForm_2<15, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s17imm64:$imm),
"addis $rD, $rA, $imm", IntSimple,
[(set i64:$rD, (add i64:$rA, imm16ShiftedSExt:$imm))]>;
let Defs = [CARRY] in {
def SUBFIC8: DForm_2< 8, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
"subfic $rD, $rA, $imm", IntGeneral,
- [(set i64:$rD, (subc immSExt16:$imm, i64:$rA))]>;
+ [(set i64:$rD, (subc imm64SExt16:$imm, i64:$rA))]>;
defm SUBFC8 : XOForm_1r<31, 8, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"subfc", "$rT, $rA, $rB", IntGeneral,
[(set i64:$rT, (subc i64:$rB, i64:$rA))]>,
@@ -489,6 +507,14 @@ defm EXTSH8 : XForm_11r<31, 922, (outs g8rc:$rA), (ins g8rc:$rS),
[(set i64:$rA, (sext_inreg i64:$rS, i16))]>;
} // Interpretation64Bit
+// For fast-isel:
+let isCodeGenOnly = 1 in {
+def EXTSB8_32_64 : XForm_11<31, 954, (outs g8rc:$rA), (ins gprc:$rS),
+ "extsb $rA, $rS", IntSimple, []>, isPPC64;
+def EXTSH8_32_64 : XForm_11<31, 922, (outs g8rc:$rA), (ins gprc:$rS),
+ "extsh $rA, $rS", IntSimple, []>, isPPC64;
+} // isCodeGenOnly for fast-isel
+
defm EXTSW : XForm_11r<31, 986, (outs g8rc:$rA), (ins g8rc:$rS),
"extsw", "$rA, $rS", IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i32))]>, isPPC64;
@@ -503,16 +529,16 @@ defm SRADI : XSForm_1rc<31, 413, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH),
defm CNTLZD : XForm_11r<31, 58, (outs g8rc:$rA), (ins g8rc:$rS),
"cntlzd", "$rA, $rS", IntGeneral,
[(set i64:$rA, (ctlz i64:$rS))]>;
-defm POPCNTD : XForm_11r<31, 506, (outs g8rc:$rA), (ins g8rc:$rS),
- "popcntd", "$rA, $rS", IntGeneral,
- [(set i64:$rA, (ctpop i64:$rS))]>;
+def POPCNTD : XForm_11<31, 506, (outs g8rc:$rA), (ins g8rc:$rS),
+ "popcntd $rA, $rS", IntGeneral,
+ [(set i64:$rA, (ctpop i64:$rS))]>;
// popcntw also does a population count on the high 32 bits (storing the
// results in the high 32-bits of the output). We'll ignore that here (which is
// safe because we never separately use the high part of the 64-bit registers).
-defm POPCNTW : XForm_11r<31, 378, (outs gprc:$rA), (ins gprc:$rS),
- "popcntw", "$rA, $rS", IntGeneral,
- [(set i32:$rA, (ctpop i32:$rS))]>;
+def POPCNTW : XForm_11<31, 378, (outs gprc:$rA), (ins gprc:$rS),
+ "popcntw $rA, $rS", IntGeneral,
+ [(set i32:$rA, (ctpop i32:$rS))]>;
defm DIVD : XOForm_1r<31, 489, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"divd", "$rT, $rA, $rB", IntDivD,
@@ -525,6 +551,9 @@ defm DIVDU : XOForm_1r<31, 457, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
defm MULLD : XOForm_1r<31, 233, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"mulld", "$rT, $rA, $rB", IntMulHD,
[(set i64:$rT, (mul i64:$rA, i64:$rB))]>, isPPC64;
+def MULLI8 : DForm_2<7, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
+ "mulli $rD, $rA, $imm", IntMulLI,
+ [(set i64:$rD, (mul i64:$rA, imm64SExt16:$imm))]>;
}
let neverHasSideEffects = 1 in {
@@ -541,14 +570,30 @@ defm RLDCL : MDSForm_1r<30, 8,
(outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE),
"rldcl", "$rA, $rS, $rB, $MBE", IntRotateD,
[]>, isPPC64;
+defm RLDCR : MDSForm_1r<30, 9,
+ (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE),
+ "rldcr", "$rA, $rS, $rB, $MBE", IntRotateD,
+ []>, isPPC64;
defm RLDICL : MDForm_1r<30, 0,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
"rldicl", "$rA, $rS, $SH, $MBE", IntRotateDI,
[]>, isPPC64;
+// For fast-isel:
+let isCodeGenOnly = 1 in
+def RLDICL_32_64 : MDForm_1<30, 0,
+ (outs g8rc:$rA),
+ (ins gprc:$rS, u6imm:$SH, u6imm:$MBE),
+ "rldicl $rA, $rS, $SH, $MBE", IntRotateDI,
+ []>, isPPC64;
+// End fast-isel.
defm RLDICR : MDForm_1r<30, 1,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
"rldicr", "$rA, $rS, $SH, $MBE", IntRotateDI,
[]>, isPPC64;
+defm RLDIC : MDForm_1r<30, 2,
+ (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
+ "rldic", "$rA, $rS, $SH, $MBE", IntRotateDI,
+ []>, isPPC64;
let Interpretation64Bit = 1 in {
defm RLWINM8 : MForm_2r<21, (outs g8rc:$rA),
@@ -592,6 +637,15 @@ def LWAX : XForm_1<31, 341, (outs g8rc:$rD), (ins memrr:$src),
"lwax $rD, $src", LdStLHA,
[(set i64:$rD, (sextloadi32 xaddr:$src))]>, isPPC64,
PPC970_DGroup_Cracked;
+// For fast-isel:
+let isCodeGenOnly = 1, mayLoad = 1 in {
+def LWA_32 : DSForm_1<58, 2, (outs gprc:$rD), (ins memrix:$src),
+ "lwa $rD, $src", LdStLWA, []>, isPPC64,
+ PPC970_DGroup_Cracked;
+def LWAX_32 : XForm_1<31, 341, (outs gprc:$rD), (ins memrr:$src),
+ "lwax $rD, $src", LdStLHA, []>, isPPC64,
+ PPC970_DGroup_Cracked;
+} // end fast-isel isCodeGenOnly
// Update forms.
let mayLoad = 1, neverHasSideEffects = 1 in {
@@ -750,25 +804,25 @@ def ADDItocL: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, tocentry:$disp),
(PPCaddiTocL i64:$reg, tglobaladdr:$disp))]>, isPPC64;
// Support for thread-local storage.
-def ADDISgotTprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp),
+def ADDISgotTprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDISgotTprelHA",
[(set i64:$rD,
(PPCaddisGotTprelHA i64:$reg,
tglobaltlsaddr:$disp))]>,
isPPC64;
-def LDgotTprelL: Pseudo<(outs g8rc:$rD), (ins symbolLo64:$disp, g8rc_nox0:$reg),
+def LDgotTprelL: Pseudo<(outs g8rc:$rD), (ins s16imm64:$disp, g8rc_nox0:$reg),
"#LDgotTprelL",
[(set i64:$rD,
(PPCldGotTprelL tglobaltlsaddr:$disp, i64:$reg))]>,
isPPC64;
def : Pat<(PPCaddTls i64:$in, tglobaltlsaddr:$g),
(ADD8TLS $in, tglobaltlsaddr:$g)>;
-def ADDIStlsgdHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp),
+def ADDIStlsgdHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDIStlsgdHA",
[(set i64:$rD,
(PPCaddisTlsgdHA i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
-def ADDItlsgdL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolLo64:$disp),
+def ADDItlsgdL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDItlsgdL",
[(set i64:$rD,
(PPCaddiTlsgdL i64:$reg, tglobaltlsaddr:$disp))]>,
@@ -778,12 +832,12 @@ def GETtlsADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
[(set i64:$rD,
(PPCgetTlsAddr i64:$reg, tglobaltlsaddr:$sym))]>,
isPPC64;
-def ADDIStlsldHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp),
+def ADDIStlsldHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDIStlsldHA",
[(set i64:$rD,
(PPCaddisTlsldHA i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
-def ADDItlsldL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolLo64:$disp),
+def ADDItlsldL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDItlsldL",
[(set i64:$rD,
(PPCaddiTlsldL i64:$reg, tglobaltlsaddr:$disp))]>,
@@ -793,13 +847,13 @@ def GETtlsldADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
[(set i64:$rD,
(PPCgetTlsldAddr i64:$reg, tglobaltlsaddr:$sym))]>,
isPPC64;
-def ADDISdtprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp),
+def ADDISdtprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDISdtprelHA",
[(set i64:$rD,
(PPCaddisDtprelHA i64:$reg,
tglobaltlsaddr:$disp))]>,
isPPC64;
-def ADDIdtprelL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolLo64:$disp),
+def ADDIdtprelL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDIdtprelL",
[(set i64:$rD,
(PPCaddiDtprelL i64:$reg, tglobaltlsaddr:$disp))]>,
@@ -914,6 +968,9 @@ let PPC970_Unit = 3, neverHasSideEffects = 1,
defm FCFID : XForm_26r<63, 846, (outs f8rc:$frD), (ins f8rc:$frB),
"fcfid", "$frD, $frB", FPGeneral,
[(set f64:$frD, (PPCfcfid f64:$frB))]>, isPPC64;
+defm FCTID : XForm_26r<63, 814, (outs f8rc:$frD), (ins f8rc:$frB),
+ "fctid", "$frD, $frB", FPGeneral,
+ []>, isPPC64;
defm FCTIDZ : XForm_26r<63, 815, (outs f8rc:$frD), (ins f8rc:$frB),
"fctidz", "$frD, $frB", FPGeneral,
[(set f64:$frD, (PPCfctidz f64:$frB))]>, isPPC64;
diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td
index cc9cf0abfc1a..a55abe373556 100644
--- a/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -229,35 +229,45 @@ let Predicates = [HasAltivec] in {
let isCodeGenOnly = 1 in {
def DSS : DSS_Form<822, (outs),
(ins u5imm:$ZERO0, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2),
- "dss $STRM", LdStLoad /*FIXME*/, []>;
+ "dss $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DSSALL : DSS_Form<822, (outs),
(ins u5imm:$ONE, u5imm:$ZERO0,u5imm:$ZERO1,u5imm:$ZERO2),
- "dssall", LdStLoad /*FIXME*/, []>;
+ "dssall", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DST : DSS_Form<342, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DSTT : DSS_Form<342, (outs),
(ins u5imm:$ONE, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DSTST : DSS_Form<374, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DSTSTT : DSS_Form<374, (outs),
(ins u5imm:$ONE, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DST64 : DSS_Form<342, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DSTT64 : DSS_Form<342, (outs),
(ins u5imm:$ONE, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DSTST64 : DSS_Form<374, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
def DSTSTT64 : DSS_Form<374, (outs),
(ins u5imm:$ONE, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
+ "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ Deprecated<DeprecatedDST>;
}
def MFVSCR : VXForm_4<1540, (outs vrrc:$vD), (ins),
@@ -392,7 +402,7 @@ def VCTUXS : VXForm_1<906, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
// Defines with the UIM field set to 0 for floating-point
// to integer (fp_to_sint/fp_to_uint) conversions and integer
// to floating-point (sint_to_fp/uint_to_fp) conversions.
-let VA = 0 in {
+let isCodeGenOnly = 1, VA = 0 in {
def VCFSX_0 : VXForm_1<842, (outs vrrc:$vD), (ins vrrc:$vB),
"vcfsx $vD, $vB, 0", VecFP,
[(set v4f32:$vD,
@@ -664,15 +674,29 @@ def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>;
def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>;
def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
-let isCodeGenOnly = 1 in
-def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
+let isCodeGenOnly = 1 in {
+def V_SET0B : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
+ "vxor $vD, $vD, $vD", VecFP,
+ [(set v16i8:$vD, (v16i8 immAllZerosV))]>;
+def V_SET0H : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
+ "vxor $vD, $vD, $vD", VecFP,
+ [(set v8i16:$vD, (v8i16 immAllZerosV))]>;
+def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
"vxor $vD, $vD, $vD", VecFP,
[(set v4i32:$vD, (v4i32 immAllZerosV))]>;
+
let IMM=-1 in {
-def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins),
+def V_SETALLONESB : VXForm_3<908, (outs vrrc:$vD), (ins),
+ "vspltisw $vD, -1", VecFP,
+ [(set v16i8:$vD, (v16i8 immAllOnesV))]>;
+def V_SETALLONESH : VXForm_3<908, (outs vrrc:$vD), (ins),
+ "vspltisw $vD, -1", VecFP,
+ [(set v8i16:$vD, (v8i16 immAllOnesV))]>;
+def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins),
"vspltisw $vD, -1", VecFP,
[(set v4i32:$vD, (v4i32 immAllOnesV))]>;
}
+}
} // VALU Operations.
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/PowerPC/PPCInstrFormats.td b/lib/Target/PowerPC/PPCInstrFormats.td
index b6f4e852154f..29233d49148d 100644
--- a/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/lib/Target/PowerPC/PPCInstrFormats.td
@@ -145,6 +145,33 @@ class BForm_2<bits<6> opcode, bits<5> bo, bits<5> bi, bit aa, bit lk,
let Inst{31} = lk;
}
+class BForm_3<bits<6> opcode, bit aa, bit lk,
+ dag OOL, dag IOL, string asmstr>
+ : I<opcode, OOL, IOL, asmstr, BrB> {
+ bits<5> BO;
+ bits<5> BI;
+ bits<14> BD;
+
+ let Inst{6-10} = BO;
+ let Inst{11-15} = BI;
+ let Inst{16-29} = BD;
+ let Inst{30} = aa;
+ let Inst{31} = lk;
+}
+
+// 1.7.3 SC-Form
+class SCForm<bits<6> opcode, bits<1> xo,
+ dag OOL, dag IOL, string asmstr, InstrItinClass itin,
+ list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<7> LEV;
+
+ let Pattern = pattern;
+
+ let Inst{20-26} = LEV;
+ let Inst{30} = xo;
+}
+
// 1.7.4 D-Form
class DForm_base<bits<6> opcode, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
@@ -371,6 +398,13 @@ class XForm_1a<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let RST = 0;
}
+class XForm_rs<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : XForm_base_r3xo<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let A = 0;
+ let B = 0;
+}
+
class XForm_6<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: XForm_base_r3xo_swapped<opcode, xo, OOL, IOL, asmstr, itin> {
@@ -411,6 +445,17 @@ class XForm_16<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = 0;
}
+class XForm_mtmsr<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> RS;
+ bits<1> L;
+
+ let Inst{6-10} = RS;
+ let Inst{15} = L;
+ let Inst{21-30} = xo;
+}
+
class XForm_16_ext<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin>
: XForm_16<opcode, xo, OOL, IOL, asmstr, itin> {
@@ -446,14 +491,23 @@ class XForm_24<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
class XForm_24_sync<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmstr, InstrItinClass itin, list<dag> pattern>
: I<opcode, OOL, IOL, asmstr, itin> {
+ bits<2> L;
+
let Pattern = pattern;
- let Inst{6-10} = 0;
+ let Inst{6-8} = 0;
+ let Inst{9-10} = L;
let Inst{11-15} = 0;
let Inst{16-20} = 0;
let Inst{21-30} = xo;
let Inst{31} = 0;
}
+class XForm_24_eieio<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : XForm_24_sync<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let L = 0;
+}
+
class XForm_25<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: XForm_base_r3xo<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
@@ -498,6 +552,21 @@ class XForm_43<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = RC;
}
+class XForm_0<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : XForm_base_r3xo<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let RST = 0;
+ let A = 0;
+ let B = 0;
+}
+
+class XForm_16b<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : XForm_base_r3xo<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let RST = 0;
+ let A = 0;
+}
+
// DCB_Form - Form X instruction, used for dcb* instructions.
class DCB_Form<bits<10> xo, bits<5> immfield, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index 1fb17eb50159..315ad04ebe3e 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -33,7 +33,7 @@
#include "llvm/Support/raw_ostream.h"
#define GET_INSTRMAP_INFO
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "PPCGenInstrInfo.inc"
using namespace llvm;
@@ -45,9 +45,12 @@ opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt",
cl::desc("Disable compare instruction optimization"), cl::Hidden);
+// Pin the vtable to this file.
+void PPCInstrInfo::anchor() {}
+
PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
: PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
- TM(tm), RI(*TM.getSubtargetImpl(), *this) {}
+ TM(tm), RI(*TM.getSubtargetImpl()) {}
/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
/// this target when scheduling the DAG.
@@ -74,10 +77,9 @@ ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer(
// Most subtargets use a PPC970 recognizer.
if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 &&
Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) {
- const TargetInstrInfo *TII = TM.getInstrInfo();
- assert(TII && "No InstrInfo?");
+ assert(TM.getInstrInfo() && "No InstrInfo?");
- return new PPCHazardRecognizer970(*TII);
+ return new PPCHazardRecognizer970(TM);
}
return new PPCScoreboardHazardRecognizer(II, DAG);
@@ -449,7 +451,9 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
// isel is for regular integer GPRs only.
if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
- !PPC::G8RCRegClass.hasSubClassEq(RC))
+ !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
+ !PPC::G8RCRegClass.hasSubClassEq(RC) &&
+ !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
return false;
// FIXME: These numbers are for the A2, how well they work for other cores is
@@ -479,12 +483,15 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
const TargetRegisterClass *RC =
RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
assert(RC && "TrueReg and FalseReg must have overlapping register classes");
- assert((PPC::GPRCRegClass.hasSubClassEq(RC) ||
- PPC::G8RCRegClass.hasSubClassEq(RC)) &&
+
+ bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
+ PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
+ assert((Is64Bit ||
+ PPC::GPRCRegClass.hasSubClassEq(RC) ||
+ PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
"isel is for regular integer GPRs only");
- unsigned OpCode =
- PPC::GPRCRegClass.hasSubClassEq(RC) ? PPC::ISEL : PPC::ISEL8;
+ unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
unsigned SelectPred = Cond[0].getImm();
unsigned SubIdx;
@@ -792,16 +799,6 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
NewMIs.back()->addMemOperand(MF, MMO);
}
-MachineInstr*
-PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx, uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const {
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(PPC::DBG_VALUE));
- addFrameReference(MIB, FrameIx, 0, false).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
bool PPCInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
@@ -991,6 +988,10 @@ bool PPCInstrInfo::SubsumesPredicate(
if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR)
return false;
+ // P1 can only subsume P2 if they test the same condition register.
+ if (Pred1[1].getReg() != Pred2[1].getReg())
+ return false;
+
PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm();
PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm();
@@ -1156,25 +1157,19 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
MachineInstr *UseMI = &*I;
if (UseMI->getOpcode() == PPC::BCC) {
unsigned Pred = UseMI->getOperand(0).getImm();
- if (Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE)
- continue;
-
- return false;
+ if (Pred != PPC::PRED_EQ && Pred != PPC::PRED_NE)
+ return false;
} else if (UseMI->getOpcode() == PPC::ISEL ||
UseMI->getOpcode() == PPC::ISEL8) {
unsigned SubIdx = UseMI->getOperand(3).getSubReg();
- if (SubIdx == PPC::sub_eq)
- continue;
-
- return false;
+ if (SubIdx != PPC::sub_eq)
+ return false;
} else
return false;
}
}
- // Get ready to iterate backward from CmpInstr.
- MachineBasicBlock::iterator I = CmpInstr, E = MI,
- B = CmpInstr->getParent()->begin();
+ MachineBasicBlock::iterator I = CmpInstr;
// Scan forward to find the first use of the compare.
for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end();
@@ -1191,9 +1186,6 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
break;
}
- // Early exit if we're at the beginning of the BB.
- if (I == B) return false;
-
// There are two possible candidates which can be changed to set CR[01].
// One is MI, the other is a SUB instruction.
// For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
@@ -1213,6 +1205,11 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
// Search for Sub.
const TargetRegisterInfo *TRI = &getRegisterInfo();
--I;
+
+ // Get ready to iterate backward from CmpInstr.
+ MachineBasicBlock::iterator E = MI,
+ B = CmpInstr->getParent()->begin();
+
for (; I != E && !noSub; --I) {
const MachineInstr &Instr = *I;
unsigned IOpC = Instr.getOpcode();
diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h
index 34a1a73a1804..f140c41a2a89 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/lib/Target/PowerPC/PPCInstrInfo.h
@@ -78,6 +78,7 @@ class PPCInstrInfo : public PPCGenInstrInfo {
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs,
bool &NonRI, bool &SpillsVRS) const;
+ virtual void anchor();
public:
explicit PPCInstrInfo(PPCTargetMachine &TM);
@@ -148,12 +149,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx,
- uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const;
-
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td
index 4763069f25ab..2bd3aadc798d 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/lib/Target/PowerPC/PPCInstrInfo.td
@@ -162,6 +162,10 @@ def PPCeh_sjlj_longjmp : SDNode<"PPCISD::EH_SJLJ_LONGJMP",
SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
[SDNPHasChain, SDNPSideEffect]>;
+def SDT_PPCsc : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def PPCsc : SDNode<"PPCISD::SC", SDT_PPCsc,
+ [SDNPHasChain, SDNPSideEffect]>;
+
def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>;
def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutGlue]>;
@@ -246,13 +250,15 @@ def maskimm32 : PatLeaf<(imm), [{
return false;
}]>;
-def immSExt16 : PatLeaf<(imm), [{
- // immSExt16 predicate - True if the immediate fits in a 16-bit sign extended
- // field. Used by instructions like 'addi'.
- if (N->getValueType(0) == MVT::i32)
- return (int32_t)N->getZExtValue() == (short)N->getZExtValue();
- else
- return (int64_t)N->getZExtValue() == (short)N->getZExtValue();
+def imm32SExt16 : Operand<i32>, ImmLeaf<i32, [{
+ // imm32SExt16 predicate - True if the i32 immediate fits in a 16-bit
+ // sign extended field. Used by instructions like 'addi'.
+ return (int32_t)Imm == (short)Imm;
+}]>;
+def imm64SExt16 : Operand<i64>, ImmLeaf<i64, [{
+ // imm64SExt16 predicate - True if the i64 immediate fits in a 16-bit
+ // sign extended field. Used by instructions like 'addi'.
+ return (int64_t)Imm == (short)Imm;
}]>;
def immZExt16 : PatLeaf<(imm), [{
// immZExt16 predicate - True if the immediate fits in a 16-bit zero extended
@@ -283,7 +289,7 @@ def imm16ShiftedSExt : PatLeaf<(imm), [{
}], HI16>;
// Some r+i load/store instructions (such as LD, STD, LDU, etc.) that require
-// restricted memrix (offset/4) constants are alignment sensitive. If these
+// restricted memrix (4-aligned) constants are alignment sensitive. If these
// offsets are hidden behind TOC entries than the values of the lower-order
// bits cannot be checked directly. As a result, we need to also incorporate
// an alignment check into the relevant patterns.
@@ -386,7 +392,7 @@ def vrrc : RegisterOperand<VRRC> {
let ParserMatchClass = PPCRegVRRCAsmOperand;
}
def PPCRegCRBITRCAsmOperand : AsmOperandClass {
- let Name = "RegCRBITRC"; let PredicateMethod = "isRegNumber";
+ let Name = "RegCRBITRC"; let PredicateMethod = "isCRBitNumber";
}
def crbitrc : RegisterOperand<CRBITRC> {
let ParserMatchClass = PPCRegCRBITRCAsmOperand;
@@ -428,6 +434,7 @@ def PPCS16ImmAsmOperand : AsmOperandClass {
}
def s16imm : Operand<i32> {
let PrintMethod = "printS16ImmOperand";
+ let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS16ImmAsmOperand;
}
def PPCU16ImmAsmOperand : AsmOperandClass {
@@ -436,31 +443,58 @@ def PPCU16ImmAsmOperand : AsmOperandClass {
}
def u16imm : Operand<i32> {
let PrintMethod = "printU16ImmOperand";
+ let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCU16ImmAsmOperand;
}
+def PPCS17ImmAsmOperand : AsmOperandClass {
+ let Name = "S17Imm"; let PredicateMethod = "isS17Imm";
+ let RenderMethod = "addImmOperands";
+}
+def s17imm : Operand<i32> {
+ // This operand type is used for addis/lis to allow the assembler parser
+ // to accept immediates in the range -65536..65535 for compatibility with
+ // the GNU assembler. The operand is treated as 16-bit otherwise.
+ let PrintMethod = "printS16ImmOperand";
+ let EncoderMethod = "getImm16Encoding";
+ let ParserMatchClass = PPCS17ImmAsmOperand;
+}
+def PPCDirectBrAsmOperand : AsmOperandClass {
+ let Name = "DirectBr"; let PredicateMethod = "isDirectBr";
+ let RenderMethod = "addBranchTargetOperands";
+}
def directbrtarget : Operand<OtherVT> {
let PrintMethod = "printBranchOperand";
let EncoderMethod = "getDirectBrEncoding";
+ let ParserMatchClass = PPCDirectBrAsmOperand;
+}
+def absdirectbrtarget : Operand<OtherVT> {
+ let PrintMethod = "printAbsBranchOperand";
+ let EncoderMethod = "getAbsDirectBrEncoding";
+ let ParserMatchClass = PPCDirectBrAsmOperand;
+}
+def PPCCondBrAsmOperand : AsmOperandClass {
+ let Name = "CondBr"; let PredicateMethod = "isCondBr";
+ let RenderMethod = "addBranchTargetOperands";
}
def condbrtarget : Operand<OtherVT> {
let PrintMethod = "printBranchOperand";
let EncoderMethod = "getCondBrEncoding";
+ let ParserMatchClass = PPCCondBrAsmOperand;
+}
+def abscondbrtarget : Operand<OtherVT> {
+ let PrintMethod = "printAbsBranchOperand";
+ let EncoderMethod = "getAbsCondBrEncoding";
+ let ParserMatchClass = PPCCondBrAsmOperand;
}
def calltarget : Operand<iPTR> {
+ let PrintMethod = "printBranchOperand";
let EncoderMethod = "getDirectBrEncoding";
+ let ParserMatchClass = PPCDirectBrAsmOperand;
}
-def aaddr : Operand<iPTR> {
- let PrintMethod = "printAbsAddrOperand";
-}
-def symbolHi: Operand<i32> {
- let PrintMethod = "printSymbolHi";
- let EncoderMethod = "getHA16Encoding";
- let ParserMatchClass = PPCS16ImmAsmOperand;
-}
-def symbolLo: Operand<i32> {
- let PrintMethod = "printSymbolLo";
- let EncoderMethod = "getLO16Encoding";
- let ParserMatchClass = PPCS16ImmAsmOperand;
+def abscalltarget : Operand<iPTR> {
+ let PrintMethod = "printAbsBranchOperand";
+ let EncoderMethod = "getAbsDirectBrEncoding";
+ let ParserMatchClass = PPCDirectBrAsmOperand;
}
def PPCCRBitMaskOperand : AsmOperandClass {
let Name = "CRBitMask"; let PredicateMethod = "isCRBitMask";
@@ -488,12 +522,14 @@ def ptr_rc_idx : Operand<iPTR>, PointerLikeRegClass<0> {
def PPCDispRIOperand : AsmOperandClass {
let Name = "DispRI"; let PredicateMethod = "isS16Imm";
+ let RenderMethod = "addImmOperands";
}
def dispRI : Operand<iPTR> {
let ParserMatchClass = PPCDispRIOperand;
}
def PPCDispRIXOperand : AsmOperandClass {
let Name = "DispRIX"; let PredicateMethod = "isS16ImmX4";
+ let RenderMethod = "addImmOperands";
}
def dispRIX : Operand<iPTR> {
let ParserMatchClass = PPCDispRIXOperand;
@@ -508,8 +544,8 @@ def memrr : Operand<iPTR> {
let PrintMethod = "printMemRegReg";
let MIOperandInfo = (ops ptr_rc_nor0:$ptrreg, ptr_rc_idx:$offreg);
}
-def memrix : Operand<iPTR> { // memri where the imm is shifted 2 bits.
- let PrintMethod = "printMemRegImmShifted";
+def memrix : Operand<iPTR> { // memri where the imm is 4-aligned.
+ let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispRIX:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getMemRIXEncoding";
}
@@ -530,7 +566,7 @@ def pred : Operand<OtherVT> {
def iaddr : ComplexPattern<iPTR, 2, "SelectAddrImm", [], []>;
def xaddr : ComplexPattern<iPTR, 2, "SelectAddrIdx", [], []>;
def xoaddr : ComplexPattern<iPTR, 2, "SelectAddrIdxOnly",[], []>;
-def ixaddr : ComplexPattern<iPTR, 2, "SelectAddrImmShift", [], []>; // "std"
+def ixaddr : ComplexPattern<iPTR, 2, "SelectAddrImmX4", [], []>; // "std"
// The address in a single register. This is used with the SjLj
// pseudo-instructions.
@@ -749,6 +785,20 @@ multiclass XForm_26r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
}
}
+multiclass XForm_28r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmbase, string asmstr, InstrItinClass itin,
+ list<dag> pattern> {
+ let BaseName = asmbase in {
+ def NAME : XForm_28<opcode, xo, OOL, IOL,
+ !strconcat(asmbase, !strconcat(" ", asmstr)), itin,
+ pattern>, RecFormRel;
+ let Defs = [CR1] in
+ def o : XForm_28<opcode, xo, OOL, IOL,
+ !strconcat(asmbase, !strconcat(". ", asmstr)), itin,
+ []>, isDOT, RecFormRel;
+ }
+}
+
multiclass AForm_1r<bits<6> opcode, bits<5> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
@@ -860,7 +910,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isCodeGenOnly = 1 in
def BCCTR : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
- "b${cond:cc}ctr ${cond:reg}", BrB, []>;
+ "b${cond:cc}ctr${cond:pm} ${cond:reg}", BrB, []>;
}
}
@@ -873,6 +923,8 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
def B : IForm<18, 0, 0, (outs), (ins directbrtarget:$dst),
"b $dst", BrB,
[(br bb:$dst)]>;
+ def BA : IForm<18, 1, 0, (outs), (ins absdirectbrtarget:$dst),
+ "ba $dst", BrB, []>;
}
// BCC represents an arbitrary conditional branch on a predicate.
@@ -880,18 +932,29 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
// a two-value operand where a dag node expects two operands. :(
let isCodeGenOnly = 1 in {
def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst),
- "b${cond:cc} ${cond:reg}, $dst"
+ "b${cond:cc}${cond:pm} ${cond:reg}, $dst"
/*[(PPCcondbranch crrc:$crS, imm:$opc, bb:$dst)]*/>;
+ def BCCA : BForm<16, 1, 0, (outs), (ins pred:$cond, abscondbrtarget:$dst),
+ "b${cond:cc}a${cond:pm} ${cond:reg}, $dst">;
+
let isReturn = 1, Uses = [LR, RM] in
def BCLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$cond),
- "b${cond:cc}lr ${cond:reg}", BrB, []>;
+ "b${cond:cc}lr${cond:pm} ${cond:reg}", BrB, []>;
+ }
- let isReturn = 1, Defs = [CTR], Uses = [CTR, LR, RM] in {
- def BDZLR : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins),
+ let isReturn = 1, Defs = [CTR], Uses = [CTR, LR, RM] in {
+ def BDZLR : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins),
"bdzlr", BrB, []>;
- def BDNZLR : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins),
+ def BDNZLR : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins),
"bdnzlr", BrB, []>;
- }
+ def BDZLRp : XLForm_2_ext<19, 16, 27, 0, 0, (outs), (ins),
+ "bdzlr+", BrB, []>;
+ def BDNZLRp: XLForm_2_ext<19, 16, 25, 0, 0, (outs), (ins),
+ "bdnzlr+", BrB, []>;
+ def BDZLRm : XLForm_2_ext<19, 16, 26, 0, 0, (outs), (ins),
+ "bdzlr-", BrB, []>;
+ def BDNZLRm: XLForm_2_ext<19, 16, 24, 0, 0, (outs), (ins),
+ "bdnzlr-", BrB, []>;
}
let Defs = [CTR], Uses = [CTR] in {
@@ -899,6 +962,26 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
"bdz $dst">;
def BDNZ : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
"bdnz $dst">;
+ def BDZA : BForm_1<16, 18, 1, 0, (outs), (ins abscondbrtarget:$dst),
+ "bdza $dst">;
+ def BDNZA : BForm_1<16, 16, 1, 0, (outs), (ins abscondbrtarget:$dst),
+ "bdnza $dst">;
+ def BDZp : BForm_1<16, 27, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdz+ $dst">;
+ def BDNZp: BForm_1<16, 25, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdnz+ $dst">;
+ def BDZAp : BForm_1<16, 27, 1, 0, (outs), (ins abscondbrtarget:$dst),
+ "bdza+ $dst">;
+ def BDNZAp: BForm_1<16, 25, 1, 0, (outs), (ins abscondbrtarget:$dst),
+ "bdnza+ $dst">;
+ def BDZm : BForm_1<16, 26, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdz- $dst">;
+ def BDNZm: BForm_1<16, 24, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdnz- $dst">;
+ def BDZAm : BForm_1<16, 26, 1, 0, (outs), (ins abscondbrtarget:$dst),
+ "bdza- $dst">;
+ def BDNZAm: BForm_1<16, 24, 1, 0, (outs), (ins abscondbrtarget:$dst),
+ "bdnza- $dst">;
}
}
@@ -915,8 +998,15 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
let Uses = [RM] in {
def BL : IForm<18, 0, 1, (outs), (ins calltarget:$func),
"bl $func", BrB, []>; // See Pat patterns below.
- def BLA : IForm<18, 1, 1, (outs), (ins aaddr:$func),
+ def BLA : IForm<18, 1, 1, (outs), (ins abscalltarget:$func),
"bla $func", BrB, [(PPCcall (i32 imm:$func))]>;
+
+ let isCodeGenOnly = 1 in {
+ def BCCL : BForm<16, 0, 1, (outs), (ins pred:$cond, condbrtarget:$dst),
+ "b${cond:cc}l${cond:pm} ${cond:reg}, $dst">;
+ def BCCLA : BForm<16, 1, 1, (outs), (ins pred:$cond, abscondbrtarget:$dst),
+ "b${cond:cc}la${cond:pm} ${cond:reg}, $dst">;
+ }
}
let Uses = [CTR, RM] in {
def BCTRL : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins),
@@ -925,7 +1015,55 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
let isCodeGenOnly = 1 in
def BCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
- "b${cond:cc}ctrl ${cond:reg}", BrB, []>;
+ "b${cond:cc}ctrl${cond:pm} ${cond:reg}", BrB, []>;
+ }
+ let Uses = [LR, RM] in {
+ def BLRL : XLForm_2_ext<19, 16, 20, 0, 1, (outs), (ins),
+ "blrl", BrB, []>;
+
+ let isCodeGenOnly = 1 in
+ def BCLRL : XLForm_2_br<19, 16, 1, (outs), (ins pred:$cond),
+ "b${cond:cc}lrl${cond:pm} ${cond:reg}", BrB, []>;
+ }
+ let Defs = [CTR], Uses = [CTR, RM] in {
+ def BDZL : BForm_1<16, 18, 0, 1, (outs), (ins condbrtarget:$dst),
+ "bdzl $dst">;
+ def BDNZL : BForm_1<16, 16, 0, 1, (outs), (ins condbrtarget:$dst),
+ "bdnzl $dst">;
+ def BDZLA : BForm_1<16, 18, 1, 1, (outs), (ins abscondbrtarget:$dst),
+ "bdzla $dst">;
+ def BDNZLA : BForm_1<16, 16, 1, 1, (outs), (ins abscondbrtarget:$dst),
+ "bdnzla $dst">;
+ def BDZLp : BForm_1<16, 27, 0, 1, (outs), (ins condbrtarget:$dst),
+ "bdzl+ $dst">;
+ def BDNZLp: BForm_1<16, 25, 0, 1, (outs), (ins condbrtarget:$dst),
+ "bdnzl+ $dst">;
+ def BDZLAp : BForm_1<16, 27, 1, 1, (outs), (ins abscondbrtarget:$dst),
+ "bdzla+ $dst">;
+ def BDNZLAp: BForm_1<16, 25, 1, 1, (outs), (ins abscondbrtarget:$dst),
+ "bdnzla+ $dst">;
+ def BDZLm : BForm_1<16, 26, 0, 1, (outs), (ins condbrtarget:$dst),
+ "bdzl- $dst">;
+ def BDNZLm: BForm_1<16, 24, 0, 1, (outs), (ins condbrtarget:$dst),
+ "bdnzl- $dst">;
+ def BDZLAm : BForm_1<16, 26, 1, 1, (outs), (ins abscondbrtarget:$dst),
+ "bdzla- $dst">;
+ def BDNZLAm: BForm_1<16, 24, 1, 1, (outs), (ins abscondbrtarget:$dst),
+ "bdnzla- $dst">;
+ }
+ let Defs = [CTR], Uses = [CTR, LR, RM] in {
+ def BDZLRL : XLForm_2_ext<19, 16, 18, 0, 1, (outs), (ins),
+ "bdzlrl", BrB, []>;
+ def BDNZLRL : XLForm_2_ext<19, 16, 16, 0, 1, (outs), (ins),
+ "bdnzlrl", BrB, []>;
+ def BDZLRLp : XLForm_2_ext<19, 16, 27, 0, 1, (outs), (ins),
+ "bdzlrl+", BrB, []>;
+ def BDNZLRLp: XLForm_2_ext<19, 16, 25, 0, 1, (outs), (ins),
+ "bdnzlrl+", BrB, []>;
+ def BDZLRLm : XLForm_2_ext<19, 16, 26, 0, 1, (outs), (ins),
+ "bdzlrl-", BrB, []>;
+ def BDNZLRLm: XLForm_2_ext<19, 16, 24, 0, 1, (outs), (ins),
+ "bdnzlrl-", BrB, []>;
}
}
@@ -937,7 +1075,7 @@ def TCRETURNdi :Pseudo< (outs),
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
-def TCRETURNai :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset),
+def TCRETURNai :Pseudo<(outs), (ins abscalltarget:$func, i32imm:$offset),
"#TC_RETURNa $func $offset",
[(PPCtc_return (i32 imm:$func), imm:$offset)]>;
@@ -954,23 +1092,22 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1,
def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>,
Requires<[In32BitMode]>;
-
-
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILB : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
"b $dst", BrB,
[]>;
-}
-
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
-def TAILBA : IForm<18, 0, 0, (outs), (ins aaddr:$dst),
+def TAILBA : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst),
"ba $dst", BrB,
[]>;
+}
+
let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
+ let Defs = [CTR] in
def EH_SjLj_SetJmp32 : Pseudo<(outs gprc:$dst), (ins memr:$buf),
"#EH_SJLJ_SETJMP32",
[(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>,
@@ -987,6 +1124,12 @@ let isBranch = 1, isTerminator = 1 in {
"#EH_SjLj_Setup\t$dst", []>;
}
+// System call.
+let PPC970_Unit = 7 in {
+ def SC : SCForm<17, 1, (outs), (ins i32imm:$lev),
+ "sc $lev", BrB, [(PPCsc (i32 imm:$lev))]>;
+}
+
// DCB* instructions.
def DCBA : DCB_Form<758, 0, (outs), (ins memrr:$dst),
"dcba $dst", LdStDCBF, [(int_ppc_dcba xoaddr:$dst)]>,
@@ -1110,6 +1253,15 @@ def STWCX : XForm_1<31, 150, (outs), (ins gprc:$rS, memrr:$dst),
let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStLoad, [(trap)]>;
+def TWI : DForm_base<3, (outs), (ins u5imm:$to, gprc:$rA, s16imm:$imm),
+ "twi $to, $rA, $imm", IntTrapW, []>;
+def TW : XForm_1<31, 4, (outs), (ins u5imm:$to, gprc:$rA, gprc:$rB),
+ "tw $to, $rA, $rB", IntTrapW, []>;
+def TDI : DForm_base<2, (outs), (ins u5imm:$to, g8rc:$rA, s16imm:$imm),
+ "tdi $to, $rA, $imm", IntTrapD, []>;
+def TD : XForm_1<31, 68, (outs), (ins u5imm:$to, g8rc:$rA, g8rc:$rB),
+ "td $to, $rA, $rB", IntTrapD, []>;
+
//===----------------------------------------------------------------------===//
// PPC32 Load Instructions.
//
@@ -1250,6 +1402,10 @@ def LFIWZX : XForm_25<31, 887, (outs f8rc:$frD), (ins memrr:$src),
[(set f64:$frD, (PPClfiwzx xoaddr:$src))]>;
}
+// Load Multiple
+def LMW : DForm_1<46, (outs gprc:$rD), (ins memri:$src),
+ "lmw $rD, $src", LdStLMW, []>;
+
//===----------------------------------------------------------------------===//
// PPC32 Store Instructions.
//
@@ -1380,50 +1536,54 @@ def : Pat<(pre_store f32:$rS, iPTR:$ptrreg, iPTR:$ptroff),
def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STFDUX $rS, $ptrreg, $ptroff)>;
-def SYNC : XForm_24_sync<31, 598, (outs), (ins),
- "sync", LdStSync,
- [(int_ppc_sync)]>;
+// Store Multiple
+def STMW : DForm_1<47, (outs), (ins gprc:$rS, memri:$dst),
+ "stmw $rS, $dst", LdStLMW, []>;
+
+def SYNC : XForm_24_sync<31, 598, (outs), (ins i32imm:$L),
+ "sync $L", LdStSync, []>;
+def : Pat<(int_ppc_sync), (SYNC 0)>;
//===----------------------------------------------------------------------===//
// PPC32 Arithmetic Instructions.
//
let PPC970_Unit = 1 in { // FXU Operations.
-def ADDI : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, symbolLo:$imm),
+def ADDI : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, s16imm:$imm),
"addi $rD, $rA, $imm", IntSimple,
- [(set i32:$rD, (add i32:$rA, immSExt16:$imm))]>;
+ [(set i32:$rD, (add i32:$rA, imm32SExt16:$imm))]>;
let BaseName = "addic" in {
let Defs = [CARRY] in
def ADDIC : DForm_2<12, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"addic $rD, $rA, $imm", IntGeneral,
- [(set i32:$rD, (addc i32:$rA, immSExt16:$imm))]>,
+ [(set i32:$rD, (addc i32:$rA, imm32SExt16:$imm))]>,
RecFormRel, PPC970_DGroup_Cracked;
let Defs = [CARRY, CR0] in
def ADDICo : DForm_2<13, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"addic. $rD, $rA, $imm", IntGeneral,
[]>, isDOT, RecFormRel;
}
-def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, symbolHi:$imm),
+def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, s17imm:$imm),
"addis $rD, $rA, $imm", IntSimple,
[(set i32:$rD, (add i32:$rA, imm16ShiftedSExt:$imm))]>;
let isCodeGenOnly = 1 in
-def LA : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, symbolLo:$sym),
+def LA : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, s16imm:$sym),
"la $rD, $sym($rA)", IntGeneral,
[(set i32:$rD, (add i32:$rA,
(PPClo tglobaladdr:$sym, 0)))]>;
def MULLI : DForm_2< 7, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"mulli $rD, $rA, $imm", IntMulLI,
- [(set i32:$rD, (mul i32:$rA, immSExt16:$imm))]>;
+ [(set i32:$rD, (mul i32:$rA, imm32SExt16:$imm))]>;
let Defs = [CARRY] in
def SUBFIC : DForm_2< 8, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"subfic $rD, $rA, $imm", IntGeneral,
- [(set i32:$rD, (subc immSExt16:$imm, i32:$rA))]>;
+ [(set i32:$rD, (subc imm32SExt16:$imm, i32:$rA))]>;
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
- def LI : DForm_2_r0<14, (outs gprc:$rD), (ins symbolLo:$imm),
+ def LI : DForm_2_r0<14, (outs gprc:$rD), (ins s16imm:$imm),
"li $rD, $imm", IntSimple,
- [(set i32:$rD, immSExt16:$imm)]>;
- def LIS : DForm_2_r0<15, (outs gprc:$rD), (ins symbolHi:$imm),
+ [(set i32:$rD, imm32SExt16:$imm)]>;
+ def LIS : DForm_2_r0<15, (outs gprc:$rD), (ins s17imm:$imm),
"lis $rD, $imm", IntSimple,
[(set i32:$rD, imm16ShiftedSExt:$imm)]>;
}
@@ -1532,6 +1692,9 @@ let isCompare = 1, neverHasSideEffects = 1 in {
let Uses = [RM] in {
let neverHasSideEffects = 1 in {
+ defm FCTIW : XForm_26r<63, 14, (outs f8rc:$frD), (ins f8rc:$frB),
+ "fctiw", "$frD, $frB", FPGeneral,
+ []>;
defm FCTIWZ : XForm_26r<63, 15, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiwz", "$frD, $frB", FPGeneral,
[(set f64:$frD, (PPCfctiwz f64:$frB))]>;
@@ -1540,23 +1703,13 @@ let Uses = [RM] in {
"frsp", "$frD, $frB", FPGeneral,
[(set f32:$frD, (fround f64:$frB))]>;
- // The frin -> nearbyint mapping is valid only in fast-math mode.
let Interpretation64Bit = 1 in
defm FRIND : XForm_26r<63, 392, (outs f8rc:$frD), (ins f8rc:$frB),
"frin", "$frD, $frB", FPGeneral,
- [(set f64:$frD, (fnearbyint f64:$frB))]>;
+ [(set f64:$frD, (frnd f64:$frB))]>;
defm FRINS : XForm_26r<63, 392, (outs f4rc:$frD), (ins f4rc:$frB),
"frin", "$frD, $frB", FPGeneral,
- [(set f32:$frD, (fnearbyint f32:$frB))]>;
- }
-
- // These pseudos expand to rint but also set FE_INEXACT when the result does
- // not equal the argument.
- let usesCustomInserter = 1, Defs = [RM] in { // FIXME: Model FPSCR!
- def FRINDrint : Pseudo<(outs f8rc:$frD), (ins f8rc:$frB),
- "#FRINDrint", [(set f64:$frD, (frint f64:$frB))]>;
- def FRINSrint : Pseudo<(outs f4rc:$frD), (ins f4rc:$frB),
- "#FRINSrint", [(set f32:$frD, (frint f32:$frB))]>;
+ [(set f32:$frD, (frnd f32:$frB))]>;
}
let neverHasSideEffects = 1 in {
@@ -1626,6 +1779,14 @@ defm FNEGD : XForm_26r<63, 40, (outs f8rc:$frD), (ins f8rc:$frB),
"fneg", "$frD, $frB", FPGeneral,
[(set f64:$frD, (fneg f64:$frB))]>;
+defm FCPSGNS : XForm_28r<63, 8, (outs f4rc:$frD), (ins f4rc:$frA, f4rc:$frB),
+ "fcpsgn", "$frD, $frA, $frB", FPGeneral,
+ [(set f32:$frD, (fcopysign f32:$frB, f32:$frA))]>;
+let Interpretation64Bit = 1 in
+defm FCPSGND : XForm_28r<63, 8, (outs f8rc:$frD), (ins f8rc:$frA, f8rc:$frB),
+ "fcpsgn", "$frD, $frA, $frB", FPGeneral,
+ [(set f64:$frD, (fcopysign f64:$frB, f64:$frA))]>;
+
// Reciprocal estimates.
defm FRE : XForm_26r<63, 24, (outs f8rc:$frD), (ins f8rc:$frB),
"fre", "$frD, $frB", FPGeneral,
@@ -1648,15 +1809,37 @@ def MCRF : XLForm_3<19, 0, (outs crrc:$BF), (ins crrc:$BFA),
"mcrf $BF, $BFA", BrMCR>,
PPC970_DGroup_First, PPC970_Unit_CRU;
+def CRAND : XLForm_1<19, 257, (outs crbitrc:$CRD),
+ (ins crbitrc:$CRA, crbitrc:$CRB),
+ "crand $CRD, $CRA, $CRB", BrCR, []>;
+
+def CRNAND : XLForm_1<19, 225, (outs crbitrc:$CRD),
+ (ins crbitrc:$CRA, crbitrc:$CRB),
+ "crnand $CRD, $CRA, $CRB", BrCR, []>;
+
+def CROR : XLForm_1<19, 449, (outs crbitrc:$CRD),
+ (ins crbitrc:$CRA, crbitrc:$CRB),
+ "cror $CRD, $CRA, $CRB", BrCR, []>;
+
+def CRXOR : XLForm_1<19, 193, (outs crbitrc:$CRD),
+ (ins crbitrc:$CRA, crbitrc:$CRB),
+ "crxor $CRD, $CRA, $CRB", BrCR, []>;
+
+def CRNOR : XLForm_1<19, 33, (outs crbitrc:$CRD),
+ (ins crbitrc:$CRA, crbitrc:$CRB),
+ "crnor $CRD, $CRA, $CRB", BrCR, []>;
+
def CREQV : XLForm_1<19, 289, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "creqv $CRD, $CRA, $CRB", BrCR,
- []>;
+ "creqv $CRD, $CRA, $CRB", BrCR, []>;
+
+def CRANDC : XLForm_1<19, 129, (outs crbitrc:$CRD),
+ (ins crbitrc:$CRA, crbitrc:$CRB),
+ "crandc $CRD, $CRA, $CRB", BrCR, []>;
-def CROR : XLForm_1<19, 449, (outs crbitrc:$CRD),
+def CRORC : XLForm_1<19, 417, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "cror $CRD, $CRA, $CRB", BrCR,
- []>;
+ "crorc $CRD, $CRA, $CRB", BrCR, []>;
let isCodeGenOnly = 1 in {
def CRSET : XLForm_1_ext<19, 289, (outs crbitrc:$dst), (ins),
@@ -1680,6 +1863,15 @@ def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins),
// XFX-Form instructions. Instructions that deal with SPRs.
//
+
+def MFSPR : XFXForm_1<31, 339, (outs gprc:$RT), (ins i32imm:$SPR),
+ "mfspr $RT, $SPR", SprMFSPR>;
+def MTSPR : XFXForm_1<31, 467, (outs), (ins i32imm:$SPR, gprc:$RT),
+ "mtspr $SPR, $RT", SprMTSPR>;
+
+def MFTB : XFXForm_1<31, 371, (outs gprc:$RT), (ins i32imm:$SPR),
+ "mftb $RT, $SPR", SprMFTB>, Deprecated<DeprecatedMFTB>;
+
let Uses = [CTR] in {
def MFCTR : XFXForm_1_ext<31, 339, 9, (outs gprc:$rT), (ins),
"mfctr $rT", SprMFSPR>,
@@ -1690,6 +1882,12 @@ def MTCTR : XFXForm_7_ext<31, 467, 9, (outs), (ins gprc:$rS),
"mtctr $rS", SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
+let hasSideEffects = 1, isCodeGenOnly = 1, Defs = [CTR] in {
+let Pattern = [(int_ppc_mtctr i32:$rS)] in
+def MTCTRloop : XFXForm_7_ext<31, 467, 9, (outs), (ins gprc:$rS),
+ "mtctr $rS", SprMTSPR>,
+ PPC970_DGroup_First, PPC970_Unit_FXU;
+}
let Defs = [LR] in {
def MTLR : XFXForm_7_ext<31, 467, 8, (outs), (ins gprc:$rS),
@@ -1702,17 +1900,17 @@ def MFLR : XFXForm_1_ext<31, 339, 8, (outs gprc:$rT), (ins),
PPC970_DGroup_First, PPC970_Unit_FXU;
}
-// Move to/from VRSAVE: despite being a SPR, the VRSAVE register is renamed like
-// a GPR on the PPC970. As such, copies in and out have the same performance
-// characteristics as an OR instruction.
-def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins gprc:$rS),
- "mtspr 256, $rS", IntGeneral>,
- PPC970_DGroup_Single, PPC970_Unit_FXU;
-def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), (ins),
- "mfspr $rT, 256", IntGeneral>,
- PPC970_DGroup_First, PPC970_Unit_FXU;
-
let isCodeGenOnly = 1 in {
+ // Move to/from VRSAVE: despite being a SPR, the VRSAVE register is renamed
+ // like a GPR on the PPC970. As such, copies in and out have the same
+ // performance characteristics as an OR instruction.
+ def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins gprc:$rS),
+ "mtspr 256, $rS", IntGeneral>,
+ PPC970_DGroup_Single, PPC970_Unit_FXU;
+ def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), (ins),
+ "mfspr $rT, 256", IntGeneral>,
+ PPC970_DGroup_First, PPC970_Unit_FXU;
+
def MTVRSAVEv : XFXForm_7_ext<31, 467, 256,
(outs VRSAVERC:$reg), (ins gprc:$rS),
"mtspr 256, $rS", IntGeneral>,
@@ -1736,34 +1934,23 @@ def RESTORE_VRSAVE : Pseudo<(outs VRSAVERC:$vrsave), (ins memri:$F),
"#RESTORE_VRSAVE", []>;
let neverHasSideEffects = 1 in {
-def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins gprc:$rS),
- "mtcrf $FXM, $rS", BrMCRX>,
- PPC970_MicroCode, PPC970_Unit_CRU;
+def MTOCRF: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins gprc:$ST),
+ "mtocrf $FXM, $ST", BrMCRX>,
+ PPC970_DGroup_First, PPC970_Unit_CRU;
-// This is a pseudo for MFCR, which implicitly uses all 8 of its subregisters;
-// declaring that here gives the local register allocator problems with this:
-// vreg = MCRF CR0
-// MFCR <kill of whatever preg got assigned to vreg>
-// while not declaring it breaks DeadMachineInstructionElimination.
-// As it turns out, in all cases where we currently use this,
-// we're only interested in one subregister of it. Represent this in the
-// instruction to keep the register allocator from becoming confused.
-//
-// FIXME: Make this a real Pseudo instruction when the JIT switches to MC.
-let isCodeGenOnly = 1 in
-def MFCRpseud: XFXForm_3<31, 19, (outs gprc:$rT), (ins crbitm:$FXM),
- "#MFCRpseud", SprMFCR>,
+def MTCRF : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, gprc:$rS),
+ "mtcrf $FXM, $rS", BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
+let hasExtraSrcRegAllocReq = 1 in // to enable post-ra anti-dep breaking.
def MFOCRF: XFXForm_5a<31, 19, (outs gprc:$rT), (ins crbitm:$FXM),
"mfocrf $rT, $FXM", SprMFCR>,
PPC970_DGroup_First, PPC970_Unit_CRU;
-} // neverHasSideEffects = 1
-let neverHasSideEffects = 1 in
def MFCR : XFXForm_3<31, 19, (outs gprc:$rT), (ins),
"mfcr $rT", SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
+} // neverHasSideEffects = 1
// Pseudo instruction to perform FADD in round-to-zero mode.
let usesCustomInserter = 1, Uses = [RM] in {
@@ -2004,7 +2191,7 @@ def : Pat<(or i32:$in, imm:$imm),
def : Pat<(xor i32:$in, imm:$imm),
(XORIS (XORI $in, (LO16 imm:$imm)), (HI16 imm:$imm))>;
// SUBFIC
-def : Pat<(sub immSExt16:$imm, i32:$in),
+def : Pat<(sub imm32SExt16:$imm, i32:$in),
(SUBFIC $in, imm:$imm)>;
// SHL/SRL
@@ -2097,7 +2284,7 @@ def : Pat<(f64 (extloadf32 xaddr:$src)),
def : Pat<(f64 (fextend f32:$src)),
(COPY_TO_REGCLASS $src, F8RC)>;
-def : Pat<(atomic_fence (imm), (imm)), (SYNC)>;
+def : Pat<(atomic_fence (imm), (imm)), (SYNC 0)>;
// Additional FNMSUB patterns: -a*c + b == -(a*c - b)
def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B),
@@ -2109,6 +2296,12 @@ def : Pat<(fma (fneg f32:$A), f32:$C, f32:$B),
def : Pat<(fma f32:$A, (fneg f32:$C), f32:$B),
(FNMSUBS $A, $C, $B)>;
+// FCOPYSIGN's operand types need not agree.
+def : Pat<(fcopysign f64:$frB, f32:$frA),
+ (FCPSGND (COPY_TO_REGCLASS $frA, F8RC), $frB)>;
+def : Pat<(fcopysign f32:$frB, f64:$frA),
+ (FCPSGNS (COPY_TO_REGCLASS $frA, F4RC), $frB)>;
+
include "PPCInstrAltivec.td"
include "PPCInstr64Bit.td"
@@ -2123,6 +2316,41 @@ def ISYNC : XLForm_2_ext<19, 150, 0, 0, 0, (outs), (ins),
def ICBI : XForm_1a<31, 982, (outs), (ins memrr:$src),
"icbi $src", LdStICBI, []>;
+def EIEIO : XForm_24_eieio<31, 854, (outs), (ins),
+ "eieio", LdStLoad, []>;
+
+def WAIT : XForm_24_sync<31, 62, (outs), (ins i32imm:$L),
+ "wait $L", LdStLoad, []>;
+
+def MTMSR: XForm_mtmsr<31, 146, (outs), (ins gprc:$RS, i32imm:$L),
+ "mtmsr $RS, $L", SprMTMSR>;
+
+def MFMSR : XForm_rs<31, 83, (outs gprc:$RT), (ins),
+ "mfmsr $RT", SprMFMSR, []>;
+
+def MTMSRD : XForm_mtmsr<31, 178, (outs), (ins gprc:$RS, i32imm:$L),
+ "mtmsrd $RS, $L", SprMTMSRD>;
+
+def SLBIE : XForm_16b<31, 434, (outs), (ins gprc:$RB),
+ "slbie $RB", SprSLBIE, []>;
+
+def SLBMTE : XForm_26<31, 402, (outs), (ins gprc:$RS, gprc:$RB),
+ "slbmte $RS, $RB", SprSLBMTE, []>;
+
+def SLBMFEE : XForm_26<31, 915, (outs gprc:$RT), (ins gprc:$RB),
+ "slbmfee $RT, $RB", SprSLBMFEE, []>;
+
+def SLBIA : XForm_0<31, 498, (outs), (ins), "slbia", SprSLBIA, []>;
+
+def TLBSYNC : XForm_0<31, 566, (outs), (ins),
+ "tlbsync", SprTLBSYNC, []>;
+
+def TLBIEL : XForm_16b<31, 274, (outs), (ins gprc:$RB),
+ "tlbiel $RB", SprTLBIEL, []>;
+
+def TLBIE : XForm_26<31, 306, (outs), (ins gprc:$RS, gprc:$RB),
+ "tlbie $RB,$RS", SprTLBIE, []>;
+
//===----------------------------------------------------------------------===//
// PowerPC Assembler Instruction Aliases
//
@@ -2143,66 +2371,339 @@ class PPCAsmPseudo<string asm, dag iops>
let isPseudo = 1;
}
-def : InstAlias<"mr $rA, $rB", (OR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
+def : InstAlias<"sc", (SC 0)>;
+
+def : InstAlias<"sync", (SYNC 0)>;
+def : InstAlias<"msync", (SYNC 0)>;
+def : InstAlias<"lwsync", (SYNC 1)>;
+def : InstAlias<"ptesync", (SYNC 2)>;
+
+def : InstAlias<"wait", (WAIT 0)>;
+def : InstAlias<"waitrsv", (WAIT 1)>;
+def : InstAlias<"waitimpl", (WAIT 2)>;
+
+def : InstAlias<"crset $bx", (CREQV crbitrc:$bx, crbitrc:$bx, crbitrc:$bx)>;
+def : InstAlias<"crclr $bx", (CRXOR crbitrc:$bx, crbitrc:$bx, crbitrc:$bx)>;
+def : InstAlias<"crmove $bx, $by", (CROR crbitrc:$bx, crbitrc:$by, crbitrc:$by)>;
+def : InstAlias<"crnot $bx, $by", (CRNOR crbitrc:$bx, crbitrc:$by, crbitrc:$by)>;
+
+def : InstAlias<"mtxer $Rx", (MTSPR 1, gprc:$Rx)>;
+def : InstAlias<"mfxer $Rx", (MFSPR gprc:$Rx, 1)>;
+def : InstAlias<"mftb $Rx", (MFTB gprc:$Rx, 268)>;
+def : InstAlias<"mftbu $Rx", (MFTB gprc:$Rx, 269)>;
+
+def : InstAlias<"xnop", (XORI R0, R0, 0)>;
+
+def : InstAlias<"mr $rA, $rB", (OR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
+def : InstAlias<"mr. $rA, $rB", (OR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
+
+def : InstAlias<"not $rA, $rB", (NOR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
+def : InstAlias<"not. $rA, $rB", (NOR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
+
+def : InstAlias<"mtcr $rA", (MTCRF8 255, g8rc:$rA)>;
+
+def LAx : PPCAsmPseudo<"la $rA, $addr", (ins gprc:$rA, memri:$addr)>;
+
+def SUBI : PPCAsmPseudo<"subi $rA, $rB, $imm",
+ (ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
+def SUBIS : PPCAsmPseudo<"subis $rA, $rB, $imm",
+ (ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
+def SUBIC : PPCAsmPseudo<"subic $rA, $rB, $imm",
+ (ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
+def SUBICo : PPCAsmPseudo<"subic. $rA, $rB, $imm",
+ (ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
+
+def : InstAlias<"sub $rA, $rB, $rC", (SUBF8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
+def : InstAlias<"sub. $rA, $rB, $rC", (SUBF8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
+def : InstAlias<"subc $rA, $rB, $rC", (SUBFC8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
+def : InstAlias<"subc. $rA, $rB, $rC", (SUBFC8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
+
+def : InstAlias<"mtmsrd $RS", (MTMSRD gprc:$RS, 0)>;
+def : InstAlias<"mtmsr $RS", (MTMSR gprc:$RS, 0)>;
+
+def : InstAlias<"mfsprg $RT, 0", (MFSPR gprc:$RT, 272)>;
+def : InstAlias<"mfsprg $RT, 1", (MFSPR gprc:$RT, 273)>;
+def : InstAlias<"mfsprg $RT, 2", (MFSPR gprc:$RT, 274)>;
+def : InstAlias<"mfsprg $RT, 3", (MFSPR gprc:$RT, 275)>;
+
+def : InstAlias<"mfsprg0 $RT", (MFSPR gprc:$RT, 272)>;
+def : InstAlias<"mfsprg1 $RT", (MFSPR gprc:$RT, 273)>;
+def : InstAlias<"mfsprg2 $RT", (MFSPR gprc:$RT, 274)>;
+def : InstAlias<"mfsprg3 $RT", (MFSPR gprc:$RT, 275)>;
+
+def : InstAlias<"mtsprg 0, $RT", (MTSPR 272, gprc:$RT)>;
+def : InstAlias<"mtsprg 1, $RT", (MTSPR 273, gprc:$RT)>;
+def : InstAlias<"mtsprg 2, $RT", (MTSPR 274, gprc:$RT)>;
+def : InstAlias<"mtsprg 3, $RT", (MTSPR 275, gprc:$RT)>;
+
+def : InstAlias<"mtsprg0 $RT", (MTSPR 272, gprc:$RT)>;
+def : InstAlias<"mtsprg1 $RT", (MTSPR 273, gprc:$RT)>;
+def : InstAlias<"mtsprg2 $RT", (MTSPR 274, gprc:$RT)>;
+def : InstAlias<"mtsprg3 $RT", (MTSPR 275, gprc:$RT)>;
+
+def : InstAlias<"mtasr $RS", (MTSPR 280, gprc:$RS)>;
+
+def : InstAlias<"mfdec $RT", (MFSPR gprc:$RT, 22)>;
+def : InstAlias<"mtdec $RT", (MTSPR 22, gprc:$RT)>;
+
+def : InstAlias<"mfpvr $RT", (MFSPR gprc:$RT, 287)>;
+
+def : InstAlias<"mfsdr1 $RT", (MFSPR gprc:$RT, 25)>;
+def : InstAlias<"mtsdr1 $RT", (MTSPR 25, gprc:$RT)>;
+
+def : InstAlias<"mfsrr0 $RT", (MFSPR gprc:$RT, 26)>;
+def : InstAlias<"mfsrr1 $RT", (MFSPR gprc:$RT, 27)>;
+def : InstAlias<"mtsrr0 $RT", (MTSPR 26, gprc:$RT)>;
+def : InstAlias<"mtsrr1 $RT", (MTSPR 27, gprc:$RT)>;
+
+def : InstAlias<"tlbie $RB", (TLBIE R0, gprc:$RB)>;
+
+def EXTLWI : PPCAsmPseudo<"extlwi $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def EXTLWIo : PPCAsmPseudo<"extlwi. $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def EXTRWI : PPCAsmPseudo<"extrwi $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def EXTRWIo : PPCAsmPseudo<"extrwi. $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def INSLWI : PPCAsmPseudo<"inslwi $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def INSLWIo : PPCAsmPseudo<"inslwi. $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def INSRWI : PPCAsmPseudo<"insrwi $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def INSRWIo : PPCAsmPseudo<"insrwi. $rA, $rS, $n, $b",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
+def ROTRWI : PPCAsmPseudo<"rotrwi $rA, $rS, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n)>;
+def ROTRWIo : PPCAsmPseudo<"rotrwi. $rA, $rS, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def SLWI : PPCAsmPseudo<"slwi $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
+def SLWIo : PPCAsmPseudo<"slwi. $rA, $rS, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def SRWI : PPCAsmPseudo<"srwi $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
+def SRWIo : PPCAsmPseudo<"srwi. $rA, $rS, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n)>;
+def CLRRWI : PPCAsmPseudo<"clrrwi $rA, $rS, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n)>;
+def CLRRWIo : PPCAsmPseudo<"clrrwi. $rA, $rS, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$n)>;
+def CLRLSLWI : PPCAsmPseudo<"clrlslwi $rA, $rS, $b, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>;
+def CLRLSLWIo : PPCAsmPseudo<"clrlslwi. $rA, $rS, $b, $n",
+ (ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>;
+
+def : InstAlias<"rotlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>;
+def : InstAlias<"rotlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>;
+def : InstAlias<"rotlw $rA, $rS, $rB", (RLWNM gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>;
+def : InstAlias<"rotlw. $rA, $rS, $rB", (RLWNMo gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>;
+def : InstAlias<"clrlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>;
+def : InstAlias<"clrlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>;
+
+def EXTLDI : PPCAsmPseudo<"extldi $rA, $rS, $n, $b",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
+def EXTLDIo : PPCAsmPseudo<"extldi. $rA, $rS, $n, $b",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
+def EXTRDI : PPCAsmPseudo<"extrdi $rA, $rS, $n, $b",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
+def EXTRDIo : PPCAsmPseudo<"extrdi. $rA, $rS, $n, $b",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
+def INSRDI : PPCAsmPseudo<"insrdi $rA, $rS, $n, $b",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
+def INSRDIo : PPCAsmPseudo<"insrdi. $rA, $rS, $n, $b",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
+def ROTRDI : PPCAsmPseudo<"rotrdi $rA, $rS, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
+def ROTRDIo : PPCAsmPseudo<"rotrdi. $rA, $rS, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def SLDI : PPCAsmPseudo<"sldi $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
+def SLDIo : PPCAsmPseudo<"sldi. $rA, $rS, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def SRDI : PPCAsmPseudo<"srdi $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
-
-def : InstAlias<"blt $cc, $dst", (BCC 12, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bgt $cc, $dst", (BCC 44, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"beq $cc, $dst", (BCC 76, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bun $cc, $dst", (BCC 108, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bso $cc, $dst", (BCC 108, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bge $cc, $dst", (BCC 4, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bnl $cc, $dst", (BCC 4, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"ble $cc, $dst", (BCC 36, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bng $cc, $dst", (BCC 36, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bne $cc, $dst", (BCC 68, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bnu $cc, $dst", (BCC 100, crrc:$cc, condbrtarget:$dst)>;
-def : InstAlias<"bns $cc, $dst", (BCC 100, crrc:$cc, condbrtarget:$dst)>;
-
-def : InstAlias<"bltlr $cc", (BCLR 12, crrc:$cc)>;
-def : InstAlias<"bgtlr $cc", (BCLR 44, crrc:$cc)>;
-def : InstAlias<"beqlr $cc", (BCLR 76, crrc:$cc)>;
-def : InstAlias<"bunlr $cc", (BCLR 108, crrc:$cc)>;
-def : InstAlias<"bsolr $cc", (BCLR 108, crrc:$cc)>;
-def : InstAlias<"bgelr $cc", (BCLR 4, crrc:$cc)>;
-def : InstAlias<"bnllr $cc", (BCLR 4, crrc:$cc)>;
-def : InstAlias<"blelr $cc", (BCLR 36, crrc:$cc)>;
-def : InstAlias<"bnglr $cc", (BCLR 36, crrc:$cc)>;
-def : InstAlias<"bnelr $cc", (BCLR 68, crrc:$cc)>;
-def : InstAlias<"bnulr $cc", (BCLR 100, crrc:$cc)>;
-def : InstAlias<"bnslr $cc", (BCLR 100, crrc:$cc)>;
-
-def : InstAlias<"bltctr $cc", (BCCTR 12, crrc:$cc)>;
-def : InstAlias<"bgtctr $cc", (BCCTR 44, crrc:$cc)>;
-def : InstAlias<"beqctr $cc", (BCCTR 76, crrc:$cc)>;
-def : InstAlias<"bunctr $cc", (BCCTR 108, crrc:$cc)>;
-def : InstAlias<"bsoctr $cc", (BCCTR 108, crrc:$cc)>;
-def : InstAlias<"bgectr $cc", (BCCTR 4, crrc:$cc)>;
-def : InstAlias<"bnlctr $cc", (BCCTR 4, crrc:$cc)>;
-def : InstAlias<"blectr $cc", (BCCTR 36, crrc:$cc)>;
-def : InstAlias<"bngctr $cc", (BCCTR 36, crrc:$cc)>;
-def : InstAlias<"bnectr $cc", (BCCTR 68, crrc:$cc)>;
-def : InstAlias<"bnuctr $cc", (BCCTR 100, crrc:$cc)>;
-def : InstAlias<"bnsctr $cc", (BCCTR 100, crrc:$cc)>;
-
-def : InstAlias<"bltctrl $cc", (BCCTRL 12, crrc:$cc)>;
-def : InstAlias<"bgtctrl $cc", (BCCTRL 44, crrc:$cc)>;
-def : InstAlias<"beqctrl $cc", (BCCTRL 76, crrc:$cc)>;
-def : InstAlias<"bunctrl $cc", (BCCTRL 108, crrc:$cc)>;
-def : InstAlias<"bsoctrl $cc", (BCCTRL 108, crrc:$cc)>;
-def : InstAlias<"bgectrl $cc", (BCCTRL 4, crrc:$cc)>;
-def : InstAlias<"bnlctrl $cc", (BCCTRL 4, crrc:$cc)>;
-def : InstAlias<"blectrl $cc", (BCCTRL 36, crrc:$cc)>;
-def : InstAlias<"bngctrl $cc", (BCCTRL 36, crrc:$cc)>;
-def : InstAlias<"bnectrl $cc", (BCCTRL 68, crrc:$cc)>;
-def : InstAlias<"bnuctrl $cc", (BCCTRL 100, crrc:$cc)>;
-def : InstAlias<"bnsctrl $cc", (BCCTRL 100, crrc:$cc)>;
+def SRDIo : PPCAsmPseudo<"srdi. $rA, $rS, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
+def CLRRDI : PPCAsmPseudo<"clrrdi $rA, $rS, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
+def CLRRDIo : PPCAsmPseudo<"clrrdi. $rA, $rS, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
+def CLRLSLDI : PPCAsmPseudo<"clrlsldi $rA, $rS, $b, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>;
+def CLRLSLDIo : PPCAsmPseudo<"clrlsldi. $rA, $rS, $b, $n",
+ (ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>;
+
+def : InstAlias<"rotldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>;
+def : InstAlias<"rotldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>;
+def : InstAlias<"rotld $rA, $rS, $rB", (RLDCL g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>;
+def : InstAlias<"rotld. $rA, $rS, $rB", (RLDCLo g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>;
+def : InstAlias<"clrldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>;
+def : InstAlias<"clrldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>;
+
+// These generic branch instruction forms are used for the assembler parser only.
+// Defs and Uses are conservative, since we don't know the BO value.
+let PPC970_Unit = 7 in {
+ let Defs = [CTR], Uses = [CTR, RM] in {
+ def gBC : BForm_3<16, 0, 0, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, condbrtarget:$dst),
+ "bc $bo, $bi, $dst">;
+ def gBCA : BForm_3<16, 1, 0, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, abscondbrtarget:$dst),
+ "bca $bo, $bi, $dst">;
+ }
+ let Defs = [LR, CTR], Uses = [CTR, RM] in {
+ def gBCL : BForm_3<16, 0, 1, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, condbrtarget:$dst),
+ "bcl $bo, $bi, $dst">;
+ def gBCLA : BForm_3<16, 1, 1, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, abscondbrtarget:$dst),
+ "bcla $bo, $bi, $dst">;
+ }
+ let Defs = [CTR], Uses = [CTR, LR, RM] in
+ def gBCLR : XLForm_2<19, 16, 0, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
+ "bclr $bo, $bi, $bh", BrB, []>;
+ let Defs = [LR, CTR], Uses = [CTR, LR, RM] in
+ def gBCLRL : XLForm_2<19, 16, 1, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
+ "bclrl $bo, $bi, $bh", BrB, []>;
+ let Defs = [CTR], Uses = [CTR, LR, RM] in
+ def gBCCTR : XLForm_2<19, 528, 0, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
+ "bcctr $bo, $bi, $bh", BrB, []>;
+ let Defs = [LR, CTR], Uses = [CTR, LR, RM] in
+ def gBCCTRL : XLForm_2<19, 528, 1, (outs),
+ (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
+ "bcctrl $bo, $bi, $bh", BrB, []>;
+}
+def : InstAlias<"bclr $bo, $bi", (gBCLR u5imm:$bo, crbitrc:$bi, 0)>;
+def : InstAlias<"bclrl $bo, $bi", (gBCLRL u5imm:$bo, crbitrc:$bi, 0)>;
+def : InstAlias<"bcctr $bo, $bi", (gBCCTR u5imm:$bo, crbitrc:$bi, 0)>;
+def : InstAlias<"bcctrl $bo, $bi", (gBCCTRL u5imm:$bo, crbitrc:$bi, 0)>;
+
+multiclass BranchSimpleMnemonic1<string name, string pm, int bo> {
+ def : InstAlias<"b"#name#pm#" $bi, $dst", (gBC bo, crbitrc:$bi, condbrtarget:$dst)>;
+ def : InstAlias<"b"#name#"a"#pm#" $bi, $dst", (gBCA bo, crbitrc:$bi, abscondbrtarget:$dst)>;
+ def : InstAlias<"b"#name#"lr"#pm#" $bi", (gBCLR bo, crbitrc:$bi, 0)>;
+ def : InstAlias<"b"#name#"l"#pm#" $bi, $dst", (gBCL bo, crbitrc:$bi, condbrtarget:$dst)>;
+ def : InstAlias<"b"#name#"la"#pm#" $bi, $dst", (gBCLA bo, crbitrc:$bi, abscondbrtarget:$dst)>;
+ def : InstAlias<"b"#name#"lrl"#pm#" $bi", (gBCLRL bo, crbitrc:$bi, 0)>;
+}
+multiclass BranchSimpleMnemonic2<string name, string pm, int bo>
+ : BranchSimpleMnemonic1<name, pm, bo> {
+ def : InstAlias<"b"#name#"ctr"#pm#" $bi", (gBCCTR bo, crbitrc:$bi, 0)>;
+ def : InstAlias<"b"#name#"ctrl"#pm#" $bi", (gBCCTRL bo, crbitrc:$bi, 0)>;
+}
+defm : BranchSimpleMnemonic2<"t", "", 12>;
+defm : BranchSimpleMnemonic2<"f", "", 4>;
+defm : BranchSimpleMnemonic2<"t", "-", 14>;
+defm : BranchSimpleMnemonic2<"f", "-", 6>;
+defm : BranchSimpleMnemonic2<"t", "+", 15>;
+defm : BranchSimpleMnemonic2<"f", "+", 7>;
+defm : BranchSimpleMnemonic1<"dnzt", "", 8>;
+defm : BranchSimpleMnemonic1<"dnzf", "", 0>;
+defm : BranchSimpleMnemonic1<"dzt", "", 10>;
+defm : BranchSimpleMnemonic1<"dzf", "", 2>;
+
+multiclass BranchExtendedMnemonicPM<string name, string pm, int bibo> {
+ def : InstAlias<"b"#name#pm#" $cc, $dst",
+ (BCC bibo, crrc:$cc, condbrtarget:$dst)>;
+ def : InstAlias<"b"#name#pm#" $dst",
+ (BCC bibo, CR0, condbrtarget:$dst)>;
+
+ def : InstAlias<"b"#name#"a"#pm#" $cc, $dst",
+ (BCCA bibo, crrc:$cc, abscondbrtarget:$dst)>;
+ def : InstAlias<"b"#name#"a"#pm#" $dst",
+ (BCCA bibo, CR0, abscondbrtarget:$dst)>;
+
+ def : InstAlias<"b"#name#"lr"#pm#" $cc",
+ (BCLR bibo, crrc:$cc)>;
+ def : InstAlias<"b"#name#"lr"#pm,
+ (BCLR bibo, CR0)>;
+
+ def : InstAlias<"b"#name#"ctr"#pm#" $cc",
+ (BCCTR bibo, crrc:$cc)>;
+ def : InstAlias<"b"#name#"ctr"#pm,
+ (BCCTR bibo, CR0)>;
+
+ def : InstAlias<"b"#name#"l"#pm#" $cc, $dst",
+ (BCCL bibo, crrc:$cc, condbrtarget:$dst)>;
+ def : InstAlias<"b"#name#"l"#pm#" $dst",
+ (BCCL bibo, CR0, condbrtarget:$dst)>;
+
+ def : InstAlias<"b"#name#"la"#pm#" $cc, $dst",
+ (BCCLA bibo, crrc:$cc, abscondbrtarget:$dst)>;
+ def : InstAlias<"b"#name#"la"#pm#" $dst",
+ (BCCLA bibo, CR0, abscondbrtarget:$dst)>;
+
+ def : InstAlias<"b"#name#"lrl"#pm#" $cc",
+ (BCLRL bibo, crrc:$cc)>;
+ def : InstAlias<"b"#name#"lrl"#pm,
+ (BCLRL bibo, CR0)>;
+
+ def : InstAlias<"b"#name#"ctrl"#pm#" $cc",
+ (BCCTRL bibo, crrc:$cc)>;
+ def : InstAlias<"b"#name#"ctrl"#pm,
+ (BCCTRL bibo, CR0)>;
+}
+multiclass BranchExtendedMnemonic<string name, int bibo> {
+ defm : BranchExtendedMnemonicPM<name, "", bibo>;
+ defm : BranchExtendedMnemonicPM<name, "-", !add(bibo, 2)>;
+ defm : BranchExtendedMnemonicPM<name, "+", !add(bibo, 3)>;
+}
+defm : BranchExtendedMnemonic<"lt", 12>;
+defm : BranchExtendedMnemonic<"gt", 44>;
+defm : BranchExtendedMnemonic<"eq", 76>;
+defm : BranchExtendedMnemonic<"un", 108>;
+defm : BranchExtendedMnemonic<"so", 108>;
+defm : BranchExtendedMnemonic<"ge", 4>;
+defm : BranchExtendedMnemonic<"nl", 4>;
+defm : BranchExtendedMnemonic<"le", 36>;
+defm : BranchExtendedMnemonic<"ng", 36>;
+defm : BranchExtendedMnemonic<"ne", 68>;
+defm : BranchExtendedMnemonic<"nu", 100>;
+defm : BranchExtendedMnemonic<"ns", 100>;
+
+def : InstAlias<"cmpwi $rA, $imm", (CMPWI CR0, gprc:$rA, s16imm:$imm)>;
+def : InstAlias<"cmpw $rA, $rB", (CMPW CR0, gprc:$rA, gprc:$rB)>;
+def : InstAlias<"cmplwi $rA, $imm", (CMPLWI CR0, gprc:$rA, u16imm:$imm)>;
+def : InstAlias<"cmplw $rA, $rB", (CMPLW CR0, gprc:$rA, gprc:$rB)>;
+def : InstAlias<"cmpdi $rA, $imm", (CMPDI CR0, g8rc:$rA, s16imm:$imm)>;
+def : InstAlias<"cmpd $rA, $rB", (CMPD CR0, g8rc:$rA, g8rc:$rB)>;
+def : InstAlias<"cmpldi $rA, $imm", (CMPLDI CR0, g8rc:$rA, u16imm:$imm)>;
+def : InstAlias<"cmpld $rA, $rB", (CMPLD CR0, g8rc:$rA, g8rc:$rB)>;
+
+def : InstAlias<"cmpi $bf, 0, $rA, $imm", (CMPWI crrc:$bf, gprc:$rA, s16imm:$imm)>;
+def : InstAlias<"cmp $bf, 0, $rA, $rB", (CMPW crrc:$bf, gprc:$rA, gprc:$rB)>;
+def : InstAlias<"cmpli $bf, 0, $rA, $imm", (CMPLWI crrc:$bf, gprc:$rA, u16imm:$imm)>;
+def : InstAlias<"cmpl $bf, 0, $rA, $rB", (CMPLW crrc:$bf, gprc:$rA, gprc:$rB)>;
+def : InstAlias<"cmpi $bf, 1, $rA, $imm", (CMPDI crrc:$bf, g8rc:$rA, s16imm:$imm)>;
+def : InstAlias<"cmp $bf, 1, $rA, $rB", (CMPD crrc:$bf, g8rc:$rA, g8rc:$rB)>;
+def : InstAlias<"cmpli $bf, 1, $rA, $imm", (CMPLDI crrc:$bf, g8rc:$rA, u16imm:$imm)>;
+def : InstAlias<"cmpl $bf, 1, $rA, $rB", (CMPLD crrc:$bf, g8rc:$rA, g8rc:$rB)>;
+
+multiclass TrapExtendedMnemonic<string name, int to> {
+ def : InstAlias<"td"#name#"i $rA, $imm", (TDI to, g8rc:$rA, s16imm:$imm)>;
+ def : InstAlias<"td"#name#" $rA, $rB", (TD to, g8rc:$rA, g8rc:$rB)>;
+ def : InstAlias<"tw"#name#"i $rA, $imm", (TWI to, gprc:$rA, s16imm:$imm)>;
+ def : InstAlias<"tw"#name#" $rA, $rB", (TW to, gprc:$rA, gprc:$rB)>;
+}
+defm : TrapExtendedMnemonic<"lt", 16>;
+defm : TrapExtendedMnemonic<"le", 20>;
+defm : TrapExtendedMnemonic<"eq", 4>;
+defm : TrapExtendedMnemonic<"ge", 12>;
+defm : TrapExtendedMnemonic<"gt", 8>;
+defm : TrapExtendedMnemonic<"nl", 12>;
+defm : TrapExtendedMnemonic<"ne", 24>;
+defm : TrapExtendedMnemonic<"ng", 20>;
+defm : TrapExtendedMnemonic<"llt", 2>;
+defm : TrapExtendedMnemonic<"lle", 6>;
+defm : TrapExtendedMnemonic<"lge", 5>;
+defm : TrapExtendedMnemonic<"lgt", 1>;
+defm : TrapExtendedMnemonic<"lnl", 5>;
+defm : TrapExtendedMnemonic<"lng", 6>;
+defm : TrapExtendedMnemonic<"u", 31>;
diff --git a/lib/Target/PowerPC/PPCJITInfo.cpp b/lib/Target/PowerPC/PPCJITInfo.cpp
index cfcd7490ed0d..5e3a48d8bbdb 100644
--- a/lib/Target/PowerPC/PPCJITInfo.cpp
+++ b/lib/Target/PowerPC/PPCJITInfo.cpp
@@ -71,8 +71,13 @@ static void EmitBranchToAt(uint64_t At, uint64_t To, bool isCall, bool is64Bit){
extern "C" void PPC32CompilationCallback();
extern "C" void PPC64CompilationCallback();
-#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
- !(defined(__ppc64__) || defined(__FreeBSD__))
+// The first clause of the preprocessor directive looks wrong, but it is
+// necessary when compiling this code on non-PowerPC hosts.
+#if (!defined(__ppc__) && !defined(__powerpc__)) || defined(__powerpc64__) || defined(__ppc64__)
+void PPC32CompilationCallback() {
+ llvm_unreachable("This is not a 32bit PowerPC, you can't execute this!");
+}
+#elif !defined(__ELF__)
// CompilationCallback stub - We can't use a C function with inline assembly in
// it, because we the prolog/epilog inserted by GCC won't work for us. Instead,
// write our own wrapper, which does things our way, so we have complete control
@@ -137,8 +142,8 @@ asm(
"bctr\n"
);
-#elif defined(__PPC__) && !defined(__ppc64__)
-// Linux & FreeBSD / PPC 32 support
+#else
+// ELF PPC 32 support
// CompilationCallback stub - We can't use a C function with inline assembly in
// it, because we the prolog/epilog inserted by GCC won't work for us. Instead,
@@ -197,15 +202,14 @@ asm(
"mtlr 0\n"
"bctr\n"
);
-#else
-void PPC32CompilationCallback() {
- llvm_unreachable("This is not a power pc, you can't execute this!");
-}
#endif
-#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
- defined(__ppc64__)
-#ifdef __ELF__
+#if !defined(__powerpc64__) && !defined(__ppc64__)
+void PPC64CompilationCallback() {
+ llvm_unreachable("This is not a 64bit PowerPC, you can't execute this!");
+}
+#else
+# ifdef __ELF__
asm(
".text\n"
".align 2\n"
@@ -219,13 +223,13 @@ asm(
".align 4\n"
".type PPC64CompilationCallback,@function\n"
".L.PPC64CompilationCallback:\n"
-#else
+# else
asm(
".text\n"
".align 2\n"
".globl _PPC64CompilationCallback\n"
"_PPC64CompilationCallback:\n"
-#endif
+# endif
// Make space for 8 ints r[3-10] and 13 doubles f[1-13] and the
// FIXME: need to save v[0-19] for altivec?
// Set up a proper stack frame
@@ -258,12 +262,12 @@ asm(
"ld 5, 280(1)\n" // stub's frame
"ld 4, 16(5)\n" // stub's lr
"li 5, 1\n" // 1 == 64 bit
-#ifdef __ELF__
+# ifdef __ELF__
"bl LLVMPPCCompilationCallback\n"
"nop\n"
-#else
+# else
"bl _LLVMPPCCompilationCallback\n"
-#endif
+# endif
"mtctr 3\n"
// Restore all int arg registers
"ld 10, 272(1)\n" "ld 9, 264(1)\n"
@@ -285,10 +289,6 @@ asm(
// XXX: any special TOC handling in the ELF case for JIT?
"bctr\n"
);
-#else
-void PPC64CompilationCallback() {
- llvm_unreachable("This is not a power pc, you can't execute this!");
-}
#endif
extern "C" {
diff --git a/lib/Target/PowerPC/PPCMCInstLower.cpp b/lib/Target/PowerPC/PPCMCInstLower.cpp
index f8cf3a589c1e..f61c8bf0216e 100644
--- a/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "PPC.h"
+#include "MCTargetDesc/PPCMCExpr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/AsmPrinter.h"
@@ -68,7 +69,7 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
if (MO.isGlobal()) {
StubSym =
MachineModuleInfoImpl::
- StubValueTy(AP.Mang->getSymbol(MO.getGlobal()),
+ StubValueTy(AP.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
} else {
Name.erase(Name.end()-5, Name.end());
@@ -94,7 +95,7 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
if (StubSym.getPointer() == 0) {
assert(MO.isGlobal() && "Extern symbol not handled yet");
StubSym = MachineModuleInfoImpl::
- StubValueTy(AP.Mang->getSymbol(MO.getGlobal()),
+ StubValueTy(AP.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
}
return Sym;
@@ -111,31 +112,26 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
unsigned access = MO.getTargetFlags() & PPCII::MO_ACCESS_MASK;
switch (access) {
- case PPCII::MO_HA16: RefKind = isDarwin ?
- MCSymbolRefExpr::VK_PPC_DARWIN_HA16 :
- MCSymbolRefExpr::VK_PPC_GAS_HA16;
- break;
- case PPCII::MO_LO16: RefKind = isDarwin ?
- MCSymbolRefExpr::VK_PPC_DARWIN_LO16 :
- MCSymbolRefExpr::VK_PPC_GAS_LO16;
- break;
- case PPCII::MO_TPREL16_HA: RefKind = MCSymbolRefExpr::VK_PPC_TPREL16_HA;
- break;
- case PPCII::MO_TPREL16_LO: RefKind = MCSymbolRefExpr::VK_PPC_TPREL16_LO;
- break;
- case PPCII::MO_DTPREL16_LO: RefKind = MCSymbolRefExpr::VK_PPC_DTPREL16_LO;
- break;
- case PPCII::MO_TLSLD16_LO: RefKind = MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO;
- break;
- case PPCII::MO_TOC16_LO: RefKind = MCSymbolRefExpr::VK_PPC_TOC16_LO;
- break;
- }
+ case PPCII::MO_TPREL_LO:
+ RefKind = MCSymbolRefExpr::VK_PPC_TPREL_LO;
+ break;
+ case PPCII::MO_TPREL_HA:
+ RefKind = MCSymbolRefExpr::VK_PPC_TPREL_HA;
+ break;
+ case PPCII::MO_DTPREL_LO:
+ RefKind = MCSymbolRefExpr::VK_PPC_DTPREL_LO;
+ break;
+ case PPCII::MO_TLSLD_LO:
+ RefKind = MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO;
+ break;
+ case PPCII::MO_TOC_LO:
+ RefKind = MCSymbolRefExpr::VK_PPC_TOC_LO;
+ break;
+ case PPCII::MO_TLS:
+ RefKind = MCSymbolRefExpr::VK_PPC_TLS;
+ break;
+ }
- // FIXME: This isn't right, but we don't have a good way to express this in
- // the MC Level, see below.
- if (MO.getTargetFlags() & PPCII::MO_PIC_FLAG)
- RefKind = MCSymbolRefExpr::VK_None;
-
const MCExpr *Expr = MCSymbolRefExpr::Create(Symbol, RefKind, Ctx);
if (!MO.isJTI() && MO.getOffset())
@@ -149,10 +145,18 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
const MCExpr *PB = MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
Expr = MCBinaryExpr::CreateSub(Expr, PB, Ctx);
- // FIXME: We have no way to make the result be VK_PPC_LO16/VK_PPC_HA16,
- // since it is not a symbol!
}
-
+
+ // Add ha16() / lo16() markers if required.
+ switch (access) {
+ case PPCII::MO_LO:
+ Expr = PPCMCExpr::CreateLo(Expr, isDarwin, Ctx);
+ break;
+ case PPCII::MO_HA:
+ Expr = PPCMCExpr::CreateHa(Expr, isDarwin, Ctx);
+ break;
+ }
+
return MCOperand::CreateExpr(Expr);
}
diff --git a/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
index 40d1f3a5fc27..33f843dfb432 100644
--- a/lib/Target/PowerPC/PPCMachineFunctionInfo.h
+++ b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
@@ -32,6 +32,9 @@ class PPCFunctionInfo : public MachineFunctionInfo {
///
int ReturnAddrSaveIndex;
+ /// Frame index where the old base pointer is stored.
+ int BasePointerSaveIndex;
+
/// MustSaveLR - Indicates whether LR is defined (or clobbered) in the current
/// function. This is only valid after the initial scan of the function by
/// PEI.
@@ -93,6 +96,7 @@ public:
explicit PPCFunctionInfo(MachineFunction &MF)
: FramePointerSaveIndex(0),
ReturnAddrSaveIndex(0),
+ BasePointerSaveIndex(0),
HasSpills(false),
HasNonRISpills(false),
SpillsCR(false),
@@ -113,6 +117,9 @@ public:
int getReturnAddrSaveIndex() const { return ReturnAddrSaveIndex; }
void setReturnAddrSaveIndex(int idx) { ReturnAddrSaveIndex = idx; }
+ int getBasePointerSaveIndex() const { return BasePointerSaveIndex; }
+ void setBasePointerSaveIndex(int Idx) { BasePointerSaveIndex = Idx; }
+
unsigned getMinReservedArea() const { return MinReservedArea; }
void setMinReservedArea(unsigned size) { MinReservedArea = size; }
@@ -160,7 +167,7 @@ public:
int getCRSpillFrameIndex() const { return CRSpillFrameIndex; }
void setCRSpillFrameIndex(int idx) { CRSpillFrameIndex = idx; }
- const SmallVector<unsigned, 3> &
+ const SmallVectorImpl<unsigned> &
getMustSaveCRs() const { return MustSaveCRs; }
void addMustSaveCR(unsigned Reg) { MustSaveCRs.push_back(Reg); }
};
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 2be6324fd7be..19ccbfcdb169 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -48,12 +48,19 @@
using namespace llvm;
-PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
- const TargetInstrInfo &tii)
+static cl::opt<bool>
+EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true),
+ cl::desc("Enable use of a base pointer for complex stack frames"));
+
+static cl::opt<bool>
+AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false),
+ cl::desc("Force the use of a base pointer in every function"));
+
+PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST)
: PPCGenRegisterInfo(ST.isPPC64() ? PPC::LR8 : PPC::LR,
ST.isPPC64() ? 0 : 1,
ST.isPPC64() ? 0 : 1),
- Subtarget(ST), TII(tii) {
+ Subtarget(ST) {
ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
@@ -62,6 +69,7 @@ PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX;
ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX;
ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
+ ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32;
// 64-bit
ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
@@ -92,32 +100,41 @@ PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
const uint16_t*
PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (Subtarget.isDarwinABI())
- return Subtarget.isPPC64() ? CSR_Darwin64_SaveList :
- CSR_Darwin32_SaveList;
-
- return Subtarget.isPPC64() ? CSR_SVR464_SaveList : CSR_SVR432_SaveList;
+ return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
+ CSR_Darwin64_Altivec_SaveList :
+ CSR_Darwin64_SaveList) :
+ (Subtarget.hasAltivec() ?
+ CSR_Darwin32_Altivec_SaveList :
+ CSR_Darwin32_SaveList);
+
+ return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
+ CSR_SVR464_Altivec_SaveList :
+ CSR_SVR464_SaveList) :
+ (Subtarget.hasAltivec() ?
+ CSR_SVR432_Altivec_SaveList :
+ CSR_SVR432_SaveList);
}
const uint32_t*
PPCRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
if (Subtarget.isDarwinABI())
- return Subtarget.isPPC64() ? CSR_Darwin64_RegMask :
- CSR_Darwin32_RegMask;
-
- return Subtarget.isPPC64() ? CSR_SVR464_RegMask : CSR_SVR432_RegMask;
+ return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
+ CSR_Darwin64_Altivec_RegMask :
+ CSR_Darwin64_RegMask) :
+ (Subtarget.hasAltivec() ?
+ CSR_Darwin32_Altivec_RegMask :
+ CSR_Darwin32_RegMask);
+
+ return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
+ CSR_SVR464_Altivec_RegMask :
+ CSR_SVR464_RegMask) :
+ (Subtarget.hasAltivec() ?
+ CSR_SVR432_Altivec_RegMask :
+ CSR_SVR432_RegMask);
}
const uint32_t*
PPCRegisterInfo::getNoPreservedMask() const {
- // The naming here is inverted: The CSR_NoRegs_Altivec has the
- // Altivec registers masked so that they're not saved and restored around
- // instructions with this preserved mask.
-
- if (!Subtarget.hasAltivec())
- return CSR_NoRegs_Altivec_RegMask;
-
- if (Subtarget.isDarwin())
- return CSR_NoRegs_Darwin_RegMask;
return CSR_NoRegs_RegMask;
}
@@ -136,11 +153,24 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(PPC::FP);
Reserved.set(PPC::FP8);
+ // The BP register is also not really a register, but is the representation
+ // of the base pointer register used by setjmp.
+ Reserved.set(PPC::BP);
+ Reserved.set(PPC::BP8);
+
+ // The counter registers must be reserved so that counter-based loops can
+ // be correctly formed (and the mtctr instructions are not DCE'd).
+ Reserved.set(PPC::CTR);
+ Reserved.set(PPC::CTR8);
+
Reserved.set(PPC::R1);
Reserved.set(PPC::LR);
Reserved.set(PPC::LR8);
Reserved.set(PPC::RM);
+ if (!Subtarget.isDarwinABI() || !Subtarget.hasAltivec())
+ Reserved.set(PPC::VRSAVE);
+
// The SVR4 ABI reserves r2 and r13
if (Subtarget.isSVR4ABI()) {
Reserved.set(PPC::R2); // System-reserved register
@@ -157,6 +187,9 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
if (PPCFI->needsFP(MF))
Reserved.set(PPC::X31);
+ if (hasBasePointer(MF))
+ Reserved.set(PPC::X30);
+
// The 64-bit SVR4 ABI reserves r2 for the TOC pointer.
if (Subtarget.isSVR4ABI()) {
Reserved.set(PPC::X2);
@@ -166,6 +199,15 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
if (PPCFI->needsFP(MF))
Reserved.set(PPC::R31);
+ if (hasBasePointer(MF))
+ Reserved.set(PPC::R30);
+
+ // Reserve Altivec registers when Altivec is unavailable.
+ if (!Subtarget.hasAltivec())
+ for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(),
+ IE = PPC::VRRCRegClass.end(); I != IE; ++I)
+ Reserved.set(*I);
+
return Reserved;
}
@@ -214,6 +256,8 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
MachineFunction &MF = *MBB.getParent();
// Get the frame info.
MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Get the instruction info.
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
// Determine whether 64-bit pointers are used.
bool LP64 = Subtarget.isPPC64();
DebugLoc dl = MI.getDebugLoc();
@@ -226,8 +270,8 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
// Get stack alignments.
unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
unsigned MaxAlign = MFI->getMaxAlignment();
- if (MaxAlign > TargetAlign)
- report_fatal_error("Dynamic alloca with large aligns not supported");
+ assert((maxCallFrameSize & (MaxAlign-1)) == 0 &&
+ "Maximum call-frame size not sufficiently aligned");
// Determine the previous frame's address. If FrameSize can't be
// represented as 16 bits or we need special alignment, then we load the
@@ -252,40 +296,62 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
.addImm(0)
.addReg(PPC::R1);
}
-
+
+ bool KillNegSizeReg = MI.getOperand(1).isKill();
+ unsigned NegSizeReg = MI.getOperand(1).getReg();
+
// Grow the stack and update the stack pointer link, then determine the
// address of new allocated space.
if (LP64) {
+ if (MaxAlign > TargetAlign) {
+ unsigned UnalNegSizeReg = NegSizeReg;
+ NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
+
+ // Unfortunately, there is no andi, only andi., and we can't insert that
+ // here because we might clobber cr0 while it is live.
+ BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg)
+ .addImm(~(MaxAlign-1));
+
+ unsigned NegSizeReg1 = NegSizeReg;
+ NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
+ BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg)
+ .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
+ .addReg(NegSizeReg1, RegState::Kill);
+ KillNegSizeReg = true;
+ }
+
BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
.addReg(Reg, RegState::Kill)
.addReg(PPC::X1)
- .addReg(MI.getOperand(1).getReg());
- if (!MI.getOperand(1).isKill())
- BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
- .addReg(PPC::X1)
- .addImm(maxCallFrameSize);
- else
- // Implicitly kill the register.
- BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
- .addReg(PPC::X1)
- .addImm(maxCallFrameSize)
- .addReg(MI.getOperand(1).getReg(), RegState::ImplicitKill);
+ .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
+ BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
+ .addReg(PPC::X1)
+ .addImm(maxCallFrameSize);
} else {
+ if (MaxAlign > TargetAlign) {
+ unsigned UnalNegSizeReg = NegSizeReg;
+ NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
+
+ // Unfortunately, there is no andi, only andi., and we can't insert that
+ // here because we might clobber cr0 while it is live.
+ BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg)
+ .addImm(~(MaxAlign-1));
+
+ unsigned NegSizeReg1 = NegSizeReg;
+ NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
+ BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg)
+ .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
+ .addReg(NegSizeReg1, RegState::Kill);
+ KillNegSizeReg = true;
+ }
+
BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1)
.addReg(Reg, RegState::Kill)
.addReg(PPC::R1)
- .addReg(MI.getOperand(1).getReg());
-
- if (!MI.getOperand(1).isKill())
- BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
- .addReg(PPC::R1)
- .addImm(maxCallFrameSize);
- else
- // Implicitly kill the register.
- BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
- .addReg(PPC::R1)
- .addImm(maxCallFrameSize)
- .addReg(MI.getOperand(1).getReg(), RegState::ImplicitKill);
+ .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
+ BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
+ .addReg(PPC::R1)
+ .addImm(maxCallFrameSize);
}
// Discard the DYNALLOC instruction.
@@ -307,6 +373,7 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = Subtarget.isPPC64();
@@ -317,8 +384,8 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
unsigned SrcReg = MI.getOperand(0).getReg();
// We need to store the CR in the low 4-bits of the saved value. First, issue
- // an MFCRpsued to save all of the CRBits and, if needed, kill the SrcReg.
- BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFCR8pseud : PPC::MFCRpseud), Reg)
+ // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg.
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
.addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
// If the saved register wasn't CR0, shift the bits left so that they are in
@@ -350,6 +417,7 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = Subtarget.isPPC64();
@@ -377,7 +445,7 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
.addImm(31);
}
- BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTCRF8 : PPC::MTCRF), DestReg)
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg)
.addReg(Reg, RegState::Kill);
// Discard the pseudo instruction.
@@ -391,6 +459,7 @@ void PPCRegisterInfo::lowerVRSAVESpilling(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
@@ -415,6 +484,7 @@ void PPCRegisterInfo::lowerVRSAVERestore(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
@@ -454,9 +524,8 @@ PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
return false;
}
-// Figure out if the offset in the instruction is shifted right two bits. This
-// is true for instructions like "STD", which the machine implicitly adds two
-// low zeros to.
+// Figure out if the offset in the instruction must be a multiple of 4.
+// This is true for instructions like "STD".
static bool usesIXAddr(const MachineInstr &MI) {
unsigned OpC = MI.getOpcode();
@@ -464,6 +533,7 @@ static bool usesIXAddr(const MachineInstr &MI) {
default:
return false;
case PPC::LWA:
+ case PPC::LWA_32:
case PPC::LD:
case PPC::STD:
return true;
@@ -493,9 +563,10 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineBasicBlock &MBB = *MI.getParent();
// Get the basic block's function.
MachineFunction &MF = *MBB.getParent();
+ // Get the instruction info.
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
// Get the frame info.
MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
DebugLoc dl = MI.getDebugLoc();
unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
@@ -533,12 +604,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
// Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
-
- bool is64Bit = Subtarget.isPPC64();
- MI.getOperand(FIOperandNum).ChangeToRegister(TFI->hasFP(MF) ?
- (is64Bit ? PPC::X31 : PPC::R31) :
- (is64Bit ? PPC::X1 : PPC::R1),
- false);
+ MI.getOperand(FIOperandNum).ChangeToRegister(
+ FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false);
// Figure out if the offset in the instruction is shifted right two bits.
bool isIXAddr = usesIXAddr(MI);
@@ -549,10 +616,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Now add the frame object offset to the offset from r1.
int Offset = MFI->getObjectOffset(FrameIndex);
- if (!isIXAddr)
- Offset += MI.getOperand(OffsetOperandNo).getImm();
- else
- Offset += MI.getOperand(OffsetOperandNo).getImm() << 2;
+ Offset += MI.getOperand(OffsetOperandNo).getImm();
// If we're not using a Frame Pointer that has been set to the value of the
// SP before having the stack size subtracted from it, then add the stack size
@@ -560,8 +624,10 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Naked functions have stack size 0, although getStackSize may not reflect that
// because we didn't call all the pieces that compute it for naked functions.
if (!MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked))
- Offset += MFI->getStackSize();
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked)) {
+ if (!(hasBasePointer(MF) && FrameIndex < 0))
+ Offset += MFI->getStackSize();
+ }
// If we can, encode the offset directly into the instruction. If this is a
// normal PPC "ri" instruction, any 16-bit value can be safely encoded. If
@@ -569,11 +635,9 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// clear can be encoded. This is extremely uncommon, because normally you
// only "std" to a stack slot that is at least 4-byte aligned, but it can
// happen in invalid code.
- if (OpC == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
- (!noImmForm &&
- isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0))) {
- if (isIXAddr)
- Offset >>= 2; // The actual encoded value has the low two bits zero.
+ assert(OpC != PPC::DBG_VALUE &&
+ "This should be handle in a target independent way");
+ if (!noImmForm && isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
return;
}
@@ -581,6 +645,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// The offset doesn't fit into a single register, scavenge one to build the
// offset in.
+ bool is64Bit = Subtarget.isPPC64();
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC;
@@ -626,12 +691,42 @@ unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
}
-unsigned PPCRegisterInfo::getEHExceptionRegister() const {
- return !Subtarget.isPPC64() ? PPC::R3 : PPC::X3;
+unsigned PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
+ if (!hasBasePointer(MF))
+ return getFrameRegister(MF);
+
+ return Subtarget.isPPC64() ? PPC::X30 : PPC::R30;
}
-unsigned PPCRegisterInfo::getEHHandlerRegister() const {
- return !Subtarget.isPPC64() ? PPC::R4 : PPC::X4;
+bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
+ if (!EnableBasePointer)
+ return false;
+ if (AlwaysBasePointer)
+ return true;
+
+ // If we need to realign the stack, then the stack pointer can no longer
+ // serve as an offset into the caller's stack space. As a result, we need a
+ // base pointer.
+ return needsStackRealignment(MF);
+}
+
+bool PPCRegisterInfo::canRealignStack(const MachineFunction &MF) const {
+ if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
+ return false;
+
+ return true;
+}
+
+bool PPCRegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const Function *F = MF.getFunction();
+ unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
+ bool requiresRealignment =
+ ((MFI->getMaxAlignment() > StackAlign) ||
+ F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::StackAlignment));
+
+ return requiresRealignment && canRealignStack(MF);
}
/// Returns true if the instruction's frame index
@@ -650,11 +745,7 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
}
unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
-
- if (!usesIXAddr(*MI))
- Offset += MI->getOperand(OffsetOperandNo).getImm();
- else
- Offset += MI->getOperand(OffsetOperandNo).getImm() << 2;
+ Offset += MI->getOperand(OffsetOperandNo).getImm();
// It's the load/store FI references that cause issues, as it can be difficult
// to materialize the offset if it won't fit in the literal field. Estimate
@@ -711,9 +802,10 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
if (Ins != MBB->end())
DL = Ins->getDebugLoc();
+ const MachineFunction &MF = *MBB->getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
const MCInstrDesc &MCID = TII.get(ADDriOpc);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- const MachineFunction &MF = *MBB->getParent();
MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
BuildMI(*MBB, Ins, DL, MCID, BaseReg)
@@ -734,17 +826,7 @@ PPCRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
-
- bool isIXAddr = usesIXAddr(MI);
- if (!isIXAddr)
- Offset += MI.getOperand(OffsetOperandNo).getImm();
- else
- Offset += MI.getOperand(OffsetOperandNo).getImm() << 2;
-
- // Figure out if the offset in the instruction is shifted right two bits.
- if (isIXAddr)
- Offset >>= 2; // The actual encoded value has the low two bits zero.
-
+ Offset += MI.getOperand(OffsetOperandNo).getImm();
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
}
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h
index 7a48b4b9799e..dd3bb405dac3 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -29,9 +29,8 @@ class Type;
class PPCRegisterInfo : public PPCGenRegisterInfo {
DenseMap<unsigned, unsigned> ImmToIdxMap;
const PPCSubtarget &Subtarget;
- const TargetInstrInfo &TII;
public:
- PPCRegisterInfo(const PPCSubtarget &SubTarget, const TargetInstrInfo &tii);
+ PPCRegisterInfo(const PPCSubtarget &SubTarget);
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
@@ -93,9 +92,11 @@ public:
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
+ // Base pointer (stack realignment) support.
+ unsigned getBaseRegister(const MachineFunction &MF) const;
+ bool hasBasePointer(const MachineFunction &MF) const;
+ bool canRealignStack(const MachineFunction &MF) const;
+ bool needsStackRealignment(const MachineFunction &MF) const;
};
} // end namespace llvm
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.td b/lib/Target/PowerPC/PPCRegisterInfo.td
index 57a25f5143fa..d566e2c3e52d 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -11,11 +11,11 @@
//===----------------------------------------------------------------------===//
let Namespace = "PPC" in {
-def sub_lt : SubRegIndex;
-def sub_gt : SubRegIndex;
-def sub_eq : SubRegIndex;
-def sub_un : SubRegIndex;
-def sub_32 : SubRegIndex;
+def sub_lt : SubRegIndex<1>;
+def sub_gt : SubRegIndex<1, 1>;
+def sub_eq : SubRegIndex<1, 2>;
+def sub_un : SubRegIndex<1, 3>;
+def sub_32 : SubRegIndex<32>;
}
@@ -94,6 +94,10 @@ def ZERO8 : GP8<ZERO, "0">;
def FP : GPR<0 /* arbitrary */, "**FRAME POINTER**">;
def FP8 : GP8<FP, "**FRAME POINTER**">;
+// Representations of the base pointer used by setjmp.
+def BP : GPR<0 /* arbitrary */, "**BASE POINTER**">;
+def BP8 : GP8<BP, "**BASE POINTER**">;
+
// Condition register bits
def CR0LT : CRBIT< 0, "0">;
def CR0GT : CRBIT< 1, "1">;
@@ -150,7 +154,7 @@ def CTR : SPR<9, "ctr">, DwarfRegNum<[-2, 66]>;
def CTR8 : SPR<9, "ctr">, DwarfRegNum<[66, -2]>;
// VRsave register
-def VRSAVE: SPR<256, "VRsave">, DwarfRegNum<[109]>;
+def VRSAVE: SPR<256, "vrsave">, DwarfRegNum<[109]>;
// Carry bit. In the architecture this is really bit 0 of the XER register
// (which really is SPR register 1); this is the only bit interesting to a
@@ -172,11 +176,11 @@ def RM: SPR<512, "**ROUNDING MODE**">;
// then nonvolatiles in reverse order since stmw/lmw save from rN to r31
def GPRC : RegisterClass<"PPC", [i32], 32, (add (sequence "R%u", 2, 12),
(sequence "R%u", 30, 13),
- R31, R0, R1, FP)>;
+ R31, R0, R1, FP, BP)>;
def G8RC : RegisterClass<"PPC", [i64], 64, (add (sequence "X%u", 2, 12),
(sequence "X%u", 30, 14),
- X31, X13, X0, X1, FP8)>;
+ X31, X13, X0, X1, FP8, BP8)>;
// For some instructions r0 is special (representing the value 0 instead of
// the value in the r0 register), and we use these register subclasses to
diff --git a/lib/Target/PowerPC/PPCSchedule.td b/lib/Target/PowerPC/PPCSchedule.td
index 660c0c3b6359..92ba69c2c6b8 100644
--- a/lib/Target/PowerPC/PPCSchedule.td
+++ b/lib/Target/PowerPC/PPCSchedule.td
@@ -108,6 +108,14 @@ def VecPerm : InstrItinClass;
def VecFPRound : InstrItinClass;
def VecVSL : InstrItinClass;
def VecVSR : InstrItinClass;
+def SprMTMSRD : InstrItinClass;
+def SprSLIE : InstrItinClass;
+def SprSLBIE : InstrItinClass;
+def SprSLBMTE : InstrItinClass;
+def SprSLBMFEE : InstrItinClass;
+def SprSLBIA : InstrItinClass;
+def SprTLBIEL : InstrItinClass;
+def SprTLBIE : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
diff --git a/lib/Target/PowerPC/PPCScheduleA2.td b/lib/Target/PowerPC/PPCScheduleA2.td
index 8d5838e54e15..1612cd2a0b84 100644
--- a/lib/Target/PowerPC/PPCScheduleA2.td
+++ b/lib/Target/PowerPC/PPCScheduleA2.td
@@ -14,39 +14,8 @@
//===----------------------------------------------------------------------===//
// Functional units on the PowerPC A2 chip sets
//
-def IU0to3_0 : FuncUnit; // Fetch unit 1 to 4 slot 1
-def IU0to3_1 : FuncUnit; // Fetch unit 1 to 4 slot 2
-def IU0to3_2 : FuncUnit; // Fetch unit 1 to 4 slot 3
-def IU0to3_3 : FuncUnit; // Fetch unit 1 to 4 slot 4
-def IU4_0 : FuncUnit; // Instruction buffer slot 1
-def IU4_1 : FuncUnit; // Instruction buffer slot 2
-def IU4_2 : FuncUnit; // Instruction buffer slot 3
-def IU4_3 : FuncUnit; // Instruction buffer slot 4
-def IU4_4 : FuncUnit; // Instruction buffer slot 5
-def IU4_5 : FuncUnit; // Instruction buffer slot 6
-def IU4_6 : FuncUnit; // Instruction buffer slot 7
-def IU4_7 : FuncUnit; // Instruction buffer slot 8
-def IU5 : FuncUnit; // Dependency resolution
-def IU6 : FuncUnit; // Instruction issue
-def RF0 : FuncUnit;
-def XRF1 : FuncUnit;
-def XEX1 : FuncUnit; // Execution stage 1 for the XU pipeline
-def XEX2 : FuncUnit; // Execution stage 2 for the XU pipeline
-def XEX3 : FuncUnit; // Execution stage 3 for the XU pipeline
-def XEX4 : FuncUnit; // Execution stage 4 for the XU pipeline
-def XEX5 : FuncUnit; // Execution stage 5 for the XU pipeline
-def XEX6 : FuncUnit; // Execution stage 6 for the XU pipeline
-def FRF1 : FuncUnit;
-def FEX1 : FuncUnit; // Execution stage 1 for the FU pipeline
-def FEX2 : FuncUnit; // Execution stage 2 for the FU pipeline
-def FEX3 : FuncUnit; // Execution stage 3 for the FU pipeline
-def FEX4 : FuncUnit; // Execution stage 4 for the FU pipeline
-def FEX5 : FuncUnit; // Execution stage 5 for the FU pipeline
-def FEX6 : FuncUnit; // Execution stage 6 for the FU pipeline
-
-def CR_Bypass : Bypass; // The bypass for condition regs.
-//def GPR_Bypass : Bypass; // The bypass for general-purpose regs.
-//def FPR_Bypass : Bypass; // The bypass for floating-point regs.
+def XU : FuncUnit; // XU pipeline
+def FU : FuncUnit; // FI pipeline
//
// This file defines the itinerary class data for the PPC A2 processor.
@@ -55,699 +24,119 @@ def CR_Bypass : Bypass; // The bypass for condition regs.
def PPCA2Itineraries : ProcessorItineraries<
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3,
- IU4_0, IU4_1, IU4_2, IU4_3, IU4_4, IU4_5, IU4_6, IU4_7,
- IU5, IU6, RF0, XRF1, XEX1, XEX2, XEX3, XEX4, XEX5, XEX6,
- FRF1, FEX1, FEX2, FEX3, FEX4, FEX5, FEX6],
- [CR_Bypass, GPR_Bypass, FPR_Bypass], [
- InstrItinData<IntSimple , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntGeneral , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntCompare , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntDivW , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<38, [XEX6]>],
- [53, 7, 7],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMFFS , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMTFSB0 , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulHW , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulHWU , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulLI , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotate , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotateD , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotateDI , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntShift , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntTrapW , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntTrapD , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<BrB , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<BrCR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [CR_Bypass, CR_Bypass, CR_Bypass]>,
- InstrItinData<BrMCR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [CR_Bypass, CR_Bypass, CR_Bypass]>,
- InstrItinData<BrMCRX , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7, 7],
- [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBA , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 11],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStDCBF , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 11],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStDCBI , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 11],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLoad , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLoadUpd , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLDU , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStStore , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStStoreUpd, [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStICBI , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTFD , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7, 7],
- [NoBypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<LdStSTFDU , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7, 7],
- [NoBypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<LdStLFD , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7, 7],
- [FPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLFDU , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7, 7],
- [FPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLHA , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLHAU , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLMW , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [14, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLWARX , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [26, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTD , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTDU , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTDCX , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [26, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTWCX , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [26, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSync , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<12, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>]>,
- InstrItinData<SprISYNC , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>]>,
- InstrItinData<SprMFSR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7],
- [GPR_Bypass, NoBypass]>,
- InstrItinData<SprMTMSR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMTSR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprTLBSYNC , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>]>,
- InstrItinData<SprMFCR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [10, 7],
- [GPR_Bypass, CR_Bypass]>,
- InstrItinData<SprMFMSR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7],
- [GPR_Bypass, NoBypass]>,
- InstrItinData<SprMFSPR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMFTB , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
- [29, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMTSPR , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [15, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMTSRIN , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
- [29, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprRFI , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
- [29, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprSC , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
- [29, 7],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<FPGeneral , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
- InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
- InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
- InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
- [15, 7, 7],
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPAddSub , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
- InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
- InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
- InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
- [15, 7, 7],
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPCompare , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
- InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
- InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
- InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
- [13, 7, 7],
- [CR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivD , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<71, [FRF1], 0>,
- InstrStage<71, [FEX1], 0>,
- InstrStage<71, [FEX2], 0>,
- InstrStage<71, [FEX3], 0>,
- InstrStage<71, [FEX4], 0>,
- InstrStage<71, [FEX5], 0>,
- InstrStage<71, [FEX6]>],
- [86, 7, 7],
- [NoBypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivS , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<58, [FRF1], 0>,
- InstrStage<58, [FEX1], 0>,
- InstrStage<58, [FEX2], 0>,
- InstrStage<58, [FEX3], 0>,
- InstrStage<58, [FEX4], 0>,
- InstrStage<58, [FEX5], 0>,
- InstrStage<58, [FEX6]>],
- [73, 7, 7],
- [NoBypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPSqrt , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<68, [FRF1], 0>,
- InstrStage<68, [FEX1], 0>,
- InstrStage<68, [FEX2], 0>,
- InstrStage<68, [FEX3], 0>,
- InstrStage<68, [FEX4], 0>,
- InstrStage<68, [FEX5], 0>,
- InstrStage<68, [FEX6]>],
- [86, 7], // FIXME: should be [86, 7] for double
- // and [82, 7] for single. Likewise,
- // the FEX? cycle count should be 68
- // for double and 64 for single.
- [NoBypass, FPR_Bypass]>,
- InstrItinData<FPFused , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
- InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
- InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
- InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
- [15, 7, 7, 7],
- [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPRes , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
- InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
- InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
- InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
- [15, 7],
- [FPR_Bypass, FPR_Bypass]>
+ [XU, FU], [], [
+ InstrItinData<IntSimple , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<IntGeneral , [InstrStage<1, [XU]>],
+ [2, 1, 1]>,
+ InstrItinData<IntCompare , [InstrStage<1, [XU]>],
+ [2, 1, 1]>,
+ InstrItinData<IntDivW , [InstrStage<1, [XU]>],
+ [39, 1, 1]>,
+ InstrItinData<IntDivD , [InstrStage<1, [XU]>],
+ [71, 1, 1]>,
+ InstrItinData<IntMulHW , [InstrStage<1, [XU]>],
+ [5, 1, 1]>,
+ InstrItinData<IntMulHWU , [InstrStage<1, [XU]>],
+ [5, 1, 1]>,
+ InstrItinData<IntMulLI , [InstrStage<1, [XU]>],
+ [6, 1, 1]>,
+ InstrItinData<IntRotate , [InstrStage<1, [XU]>],
+ [2, 1, 1]>,
+ InstrItinData<IntRotateD , [InstrStage<1, [XU]>],
+ [2, 1, 1]>,
+ InstrItinData<IntRotateDI , [InstrStage<1, [XU]>],
+ [2, 1, 1]>,
+ InstrItinData<IntShift , [InstrStage<1, [XU]>],
+ [2, 1, 1]>,
+ InstrItinData<IntTrapW , [InstrStage<1, [XU]>],
+ [2, 1]>,
+ InstrItinData<IntTrapD , [InstrStage<1, [XU]>],
+ [2, 1]>,
+ InstrItinData<BrB , [InstrStage<1, [XU]>],
+ [6, 1, 1]>,
+ InstrItinData<BrCR , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<BrMCR , [InstrStage<1, [XU]>],
+ [5, 1, 1]>,
+ InstrItinData<BrMCRX , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<LdStDCBA , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<LdStDCBF , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<LdStDCBI , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<LdStLoad , [InstrStage<1, [XU]>],
+ [6, 1, 1]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<1, [XU]>],
+ [6, 8, 1, 1]>,
+ InstrItinData<LdStLDU , [InstrStage<1, [XU]>],
+ [6, 1, 1]>,
+ InstrItinData<LdStStore , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<1, [XU]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<LdStICBI, [InstrStage<1, [XU]>],
+ [16, 1, 1]>,
+ InstrItinData<LdStSTFD , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<LdStSTFDU , [InstrStage<1, [XU]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<LdStLFD , [InstrStage<1, [XU]>],
+ [7, 1, 1]>,
+ InstrItinData<LdStLFDU , [InstrStage<1, [XU]>],
+ [7, 9, 1, 1]>,
+ InstrItinData<LdStLHA , [InstrStage<1, [XU]>],
+ [6, 1, 1]>,
+ InstrItinData<LdStLHAU , [InstrStage<1, [XU]>],
+ [6, 8, 1, 1]>,
+ InstrItinData<LdStLWARX , [InstrStage<1, [XU]>],
+ [82, 1, 1]>, // L2 latency
+ InstrItinData<LdStSTD , [InstrStage<1, [XU]>],
+ [1, 1, 1]>,
+ InstrItinData<LdStSTDU , [InstrStage<1, [XU]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<LdStSTDCX , [InstrStage<1, [XU]>],
+ [82, 1, 1]>, // L2 latency
+ InstrItinData<LdStSTWCX , [InstrStage<1, [XU]>],
+ [82, 1, 1]>, // L2 latency
+ InstrItinData<LdStSync , [InstrStage<1, [XU]>],
+ [6]>,
+ InstrItinData<SprISYNC , [InstrStage<1, [XU]>],
+ [16]>,
+ InstrItinData<SprMTMSR , [InstrStage<1, [XU]>],
+ [16, 1]>,
+ InstrItinData<SprMFCR , [InstrStage<1, [XU]>],
+ [6, 1]>,
+ InstrItinData<SprMFMSR , [InstrStage<1, [XU]>],
+ [4, 1]>,
+ InstrItinData<SprMFSPR , [InstrStage<1, [XU]>],
+ [6, 1]>,
+ InstrItinData<SprMFTB , [InstrStage<1, [XU]>],
+ [4, 1]>,
+ InstrItinData<SprMTSPR , [InstrStage<1, [XU]>],
+ [6, 1]>,
+ InstrItinData<SprRFI , [InstrStage<1, [XU]>],
+ [16]>,
+ InstrItinData<SprSC , [InstrStage<1, [XU]>],
+ [16]>,
+ InstrItinData<FPGeneral , [InstrStage<1, [FU]>],
+ [6, 1, 1]>,
+ InstrItinData<FPAddSub , [InstrStage<1, [FU]>],
+ [6, 1, 1]>,
+ InstrItinData<FPCompare , [InstrStage<1, [FU]>],
+ [5, 1, 1]>,
+ InstrItinData<FPDivD , [InstrStage<1, [FU]>],
+ [72, 1, 1]>,
+ InstrItinData<FPDivS , [InstrStage<1, [FU]>],
+ [59, 1, 1]>,
+ InstrItinData<FPSqrt , [InstrStage<1, [FU]>],
+ [69, 1, 1]>,
+ InstrItinData<FPFused , [InstrStage<1, [FU]>],
+ [6, 1, 1, 1]>,
+ InstrItinData<FPRes , [InstrStage<1, [FU]>],
+ [6, 1]>
]>;
// ===---------------------------------------------------------------------===//
diff --git a/lib/Target/PowerPC/PPCScheduleE500mc.td b/lib/Target/PowerPC/PPCScheduleE500mc.td
index 9bb779a0e62b..c189b9ed9a6c 100644
--- a/lib/Target/PowerPC/PPCScheduleE500mc.td
+++ b/lib/Target/PowerPC/PPCScheduleE500mc.td
@@ -36,6 +36,8 @@ def CFX_0 : FuncUnit; // CFX pipeline
def LSU_0 : FuncUnit; // LSU pipeline
def FPU_0 : FuncUnit; // FPU pipeline
+def CR_Bypass : Bypass;
+
def PPCE500mcItineraries : ProcessorItineraries<
[DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, LSU_0, FPU_0],
[CR_Bypass, GPR_Bypass, FPR_Bypass], [
diff --git a/lib/Target/PowerPC/PPCScheduleE5500.td b/lib/Target/PowerPC/PPCScheduleE5500.td
index d7e11acd9fc7..7a24d20323da 100644
--- a/lib/Target/PowerPC/PPCScheduleE5500.td
+++ b/lib/Target/PowerPC/PPCScheduleE5500.td
@@ -39,6 +39,7 @@ def CFX_1 : FuncUnit; // CFX pipeline stage 1
// def LSU_0 : FuncUnit; // LSU pipeline
// def FPU_0 : FuncUnit; // FPU pipeline
+// def CR_Bypass : Bypass;
def PPCE5500Itineraries : ProcessorItineraries<
[DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, CFX_1,
diff --git a/lib/Target/PowerPC/PPCSubtarget.cpp b/lib/Target/PowerPC/PPCSubtarget.cpp
index a8f2b3f47d1b..7231ab101a26 100644
--- a/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -14,7 +14,11 @@
#include "PPCSubtarget.h"
#include "PPC.h"
#include "PPCRegisterInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineScheduler.h"
+#include "llvm/IR/Attributes.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Function.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
@@ -29,32 +33,70 @@ using namespace llvm;
PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64Bit)
: PPCGenSubtargetInfo(TT, CPU, FS)
- , StackAlignment(16)
- , DarwinDirective(PPC::DIR_NONE)
- , HasMFOCRF(false)
- , Has64BitSupport(false)
- , Use64BitRegs(false)
, IsPPC64(is64Bit)
- , HasAltivec(false)
- , HasQPX(false)
- , HasFSQRT(false)
- , HasFRE(false)
- , HasFRES(false)
- , HasFRSQRTE(false)
- , HasFRSQRTES(false)
- , HasRecipPrec(false)
- , HasSTFIWX(false)
- , HasLFIWAX(false)
- , HasFPRND(false)
- , HasFPCVT(false)
- , HasISEL(false)
- , HasPOPCNTD(false)
- , HasLDBRX(false)
- , IsBookE(false)
- , HasLazyResolverStubs(false)
- , IsJITCodeModel(false)
, TargetTriple(TT) {
+ initializeEnvironment();
+ resetSubtargetFeatures(CPU, FS);
+}
+
+/// SetJITMode - This is called to inform the subtarget info that we are
+/// producing code for the JIT.
+void PPCSubtarget::SetJITMode() {
+ // JIT mode doesn't want lazy resolver stubs, it knows exactly where
+ // everything is. This matters for PPC64, which codegens in PIC mode without
+ // stubs.
+ HasLazyResolverStubs = false;
+
+ // Calls to external functions need to use indirect calls
+ IsJITCodeModel = true;
+}
+
+void PPCSubtarget::resetSubtargetFeatures(const MachineFunction *MF) {
+ AttributeSet FnAttrs = MF->getFunction()->getAttributes();
+ Attribute CPUAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex,
+ "target-cpu");
+ Attribute FSAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex,
+ "target-features");
+ std::string CPU =
+ !CPUAttr.hasAttribute(Attribute::None) ? CPUAttr.getValueAsString() : "";
+ std::string FS =
+ !FSAttr.hasAttribute(Attribute::None) ? FSAttr.getValueAsString() : "";
+ if (!FS.empty()) {
+ initializeEnvironment();
+ resetSubtargetFeatures(CPU, FS);
+ }
+}
+void PPCSubtarget::initializeEnvironment() {
+ StackAlignment = 16;
+ DarwinDirective = PPC::DIR_NONE;
+ HasMFOCRF = false;
+ Has64BitSupport = false;
+ Use64BitRegs = false;
+ HasAltivec = false;
+ HasQPX = false;
+ HasFCPSGN = false;
+ HasFSQRT = false;
+ HasFRE = false;
+ HasFRES = false;
+ HasFRSQRTE = false;
+ HasFRSQRTES = false;
+ HasRecipPrec = false;
+ HasSTFIWX = false;
+ HasLFIWAX = false;
+ HasFPRND = false;
+ HasFPCVT = false;
+ HasISEL = false;
+ HasPOPCNTD = false;
+ HasLDBRX = false;
+ IsBookE = false;
+ DeprecatedMFTB = false;
+ DeprecatedDST = false;
+ HasLazyResolverStubs = false;
+ IsJITCodeModel = false;
+}
+
+void PPCSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// Determine default and user specified characteristics
std::string CPUName = CPU;
if (CPUName.empty())
@@ -72,7 +114,7 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
std::string FullFS = FS;
// If we are generating code for ppc64, verify that options make sense.
- if (is64Bit) {
+ if (IsPPC64) {
Has64BitSupport = true;
// Silently force 64-bit register use on ppc64.
Use64BitRegs = true;
@@ -99,21 +141,11 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
// is enabled because external functions will assume this alignment.
if (hasQPX() || isBGQ())
StackAlignment = 32;
-}
-
-/// SetJITMode - This is called to inform the subtarget info that we are
-/// producing code for the JIT.
-void PPCSubtarget::SetJITMode() {
- // JIT mode doesn't want lazy resolver stubs, it knows exactly where
- // everything is. This matters for PPC64, which codegens in PIC mode without
- // stubs.
- HasLazyResolverStubs = false;
- // Calls to external functions need to use indirect calls
- IsJITCodeModel = true;
+ // Determine endianness.
+ IsLittleEndian = (TargetTriple.getArch() == Triple::ppc64le);
}
-
/// hasLazyResolverStub - Return true if accesses to the specified global have
/// to go through a dyld lazy resolution stub. This means that an extra load
/// is required to get the address of the global.
@@ -135,14 +167,7 @@ bool PPCSubtarget::enablePostRAScheduler(
CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const {
- // FIXME: It would be best to use TargetSubtargetInfo::ANTIDEP_ALL here,
- // but we can't because we can't reassign the cr registers. There is a
- // dependence between the cr register and the RLWINM instruction used
- // to extract its value which the anti-dependency breaker can't currently
- // see. Maybe we should make a late-expanded pseudo to encode this dependency.
- // (the relevant code is in PPCDAGToDAGISel::SelectSETCC)
-
- Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
+ Mode = TargetSubtargetInfo::ANTIDEP_ALL;
CriticalPathRCs.clear();
@@ -151,9 +176,44 @@ bool PPCSubtarget::enablePostRAScheduler(
else
CriticalPathRCs.push_back(&PPC::GPRCRegClass);
- CriticalPathRCs.push_back(&PPC::F8RCRegClass);
- CriticalPathRCs.push_back(&PPC::VRRCRegClass);
-
return OptLevel >= CodeGenOpt::Default;
}
+// Embedded cores need aggressive scheduling.
+static bool needsAggressiveScheduling(unsigned Directive) {
+ switch (Directive) {
+ default: return false;
+ case PPC::DIR_440:
+ case PPC::DIR_A2:
+ case PPC::DIR_E500mc:
+ case PPC::DIR_E5500:
+ return true;
+ }
+}
+
+bool PPCSubtarget::enableMachineScheduler() const {
+ // Enable MI scheduling for the embedded cores.
+ // FIXME: Enable this for all cores (some additional modeling
+ // may be necessary).
+ return needsAggressiveScheduling(DarwinDirective);
+}
+
+void PPCSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
+ MachineInstr *begin,
+ MachineInstr *end,
+ unsigned NumRegionInstrs) const {
+ if (needsAggressiveScheduling(DarwinDirective)) {
+ Policy.OnlyTopDown = false;
+ Policy.OnlyBottomUp = false;
+ }
+
+ // Spilling is generally expensive on all PPC cores, so always enable
+ // register-pressure tracking.
+ Policy.ShouldTrackPressure = true;
+}
+
+bool PPCSubtarget::useAA() const {
+ // Use AA during code generation for the embedded cores.
+ return needsAggressiveScheduling(DarwinDirective);
+}
+
diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h
index 65b4d211fc6a..c863a6ecc777 100644
--- a/lib/Target/PowerPC/PPCSubtarget.h
+++ b/lib/Target/PowerPC/PPCSubtarget.h
@@ -76,6 +76,8 @@ protected:
bool IsPPC64;
bool HasAltivec;
bool HasQPX;
+ bool HasVSX;
+ bool HasFCPSGN;
bool HasFSQRT;
bool HasFRE, HasFRES, HasFRSQRTE, HasFRSQRTES;
bool HasRecipPrec;
@@ -87,8 +89,11 @@ protected:
bool HasPOPCNTD;
bool HasLDBRX;
bool IsBookE;
+ bool DeprecatedMFTB;
+ bool DeprecatedDST;
bool HasLazyResolverStubs;
bool IsJITCodeModel;
+ bool IsLittleEndian;
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
@@ -128,7 +133,7 @@ public:
// documentation are wrong; these are correct (i.e. "what gcc does").
if (isPPC64() && isSVR4ABI()) {
if (TargetTriple.getOS() == llvm::Triple::FreeBSD)
- return "E-p:64:64-f64:64:64-i64:64:64-f128:64:64-v128:128:128-n32:64";
+ return "E-p:64:64-f64:64:64-i64:64:64-v128:128:128-n32:64";
else
return "E-p:64:64-f64:64:64-i64:64:64-f128:128:128-v128:128:128-n32:64";
}
@@ -137,6 +142,13 @@ public:
: "E-p:32:32-f64:64:64-i64:64:64-f128:64:128-n32";
}
+ /// \brief Reset the features for the PowerPC target.
+ virtual void resetSubtargetFeatures(const MachineFunction *MF);
+private:
+ void initializeEnvironment();
+ void resetSubtargetFeatures(StringRef CPU, StringRef FS);
+
+public:
/// isPPC64 - Return true if we are generating code for 64-bit pointer mode.
///
bool isPPC64() const { return IsPPC64; }
@@ -159,7 +171,11 @@ public:
// isJITCodeModel - True if we're generating code for the JIT
bool isJITCodeModel() const { return IsJITCodeModel; }
+ // isLittleEndian - True if generating little-endian code
+ bool isLittleEndian() const { return IsLittleEndian; }
+
// Specific obvious features.
+ bool hasFCPSGN() const { return HasFCPSGN; }
bool hasFSQRT() const { return HasFSQRT; }
bool hasFRE() const { return HasFRE; }
bool hasFRES() const { return HasFRES; }
@@ -177,6 +193,8 @@ public:
bool hasPOPCNTD() const { return HasPOPCNTD; }
bool hasLDBRX() const { return HasLDBRX; }
bool isBookE() const { return IsBookE; }
+ bool isDeprecatedMFTB() const { return DeprecatedMFTB; }
+ bool isDeprecatedDST() const { return DeprecatedDST; }
const Triple &getTargetTriple() const { return TargetTriple; }
@@ -194,6 +212,14 @@ public:
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
+
+ // Scheduling customization.
+ bool enableMachineScheduler() const;
+ void overrideSchedPolicy(MachineSchedPolicy &Policy,
+ MachineInstr *begin,
+ MachineInstr *end,
+ unsigned NumRegionInstrs) const;
+ bool useAA() const;
};
} // End llvm namespace
diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp
index 14dc794195da..9acefe53ce4a 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -30,6 +30,7 @@ extern "C" void LLVMInitializePowerPCTarget() {
// Register the targets
RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target);
RegisterTargetMachine<PPC64TargetMachine> B(ThePPC64Target);
+ RegisterTargetMachine<PPC64TargetMachine> C(ThePPC64LETarget);
}
PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT,
@@ -48,6 +49,7 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT,
// The binutils for the BG/P are too old for CFI.
if (Subtarget.isBGP())
setMCUseCFI(false);
+ initAsmInfo();
}
void PPC32TargetMachine::anchor() { }
@@ -90,7 +92,7 @@ public:
return *getPPCTargetMachine().getSubtargetImpl();
}
- virtual bool addPreRegAlloc();
+ virtual bool addPreISel();
virtual bool addILPOpts();
virtual bool addInstSelector();
virtual bool addPreSched2();
@@ -102,9 +104,9 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
return new PPCPassConfig(this, PM);
}
-bool PPCPassConfig::addPreRegAlloc() {
+bool PPCPassConfig::addPreISel() {
if (!DisableCTRLoops && getOptLevel() != CodeGenOpt::None)
- addPass(createPPCCTRLoops());
+ addPass(createPPCCTRLoops(getPPCTargetMachine()));
return false;
}
@@ -121,6 +123,12 @@ bool PPCPassConfig::addILPOpts() {
bool PPCPassConfig::addInstSelector() {
// Install an instruction selector.
addPass(createPPCISelDag(getPPCTargetMachine()));
+
+#ifndef NDEBUG
+ if (!DisableCTRLoops && getOptLevel() != CodeGenOpt::None)
+ addPass(createPPCCTRLoopsVerify());
+#endif
+
return false;
}
@@ -155,7 +163,7 @@ void PPCTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// Add first the target-independent BasicTTI pass, then our PPC pass. This
// allows the PPC pass to delegate to the target independent layer when
// appropriate.
- PM.add(createBasicTargetTransformInfoPass(getTargetLowering()));
+ PM.add(createBasicTargetTransformInfoPass(this));
PM.add(createPPCTargetTransformInfoPass(this));
}
diff --git a/lib/Target/PowerPC/PPCTargetObjectFile.cpp b/lib/Target/PowerPC/PPCTargetObjectFile.cpp
new file mode 100644
index 000000000000..ec1e606eee56
--- /dev/null
+++ b/lib/Target/PowerPC/PPCTargetObjectFile.cpp
@@ -0,0 +1,67 @@
+//===-- PPCTargetObjectFile.cpp - PPC Object Info -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCTargetObjectFile.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/Target/Mangler.h"
+
+using namespace llvm;
+
+void
+PPC64LinuxTargetObjectFile::
+Initialize(MCContext &Ctx, const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ InitializeELF(TM.Options.UseInitArray);
+}
+
+const MCSection * PPC64LinuxTargetObjectFile::
+SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler *Mang, const TargetMachine &TM) const {
+
+ const MCSection *DefaultSection =
+ TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang, TM);
+
+ if (DefaultSection != ReadOnlySection)
+ return DefaultSection;
+
+ // Here override ReadOnlySection to DataRelROSection for PPC64 SVR4 ABI
+ // when we have a constant that contains global relocations. This is
+ // necessary because of this ABI's handling of pointers to functions in
+ // a shared library. The address of a function is actually the address
+ // of a function descriptor, which resides in the .opd section. Generated
+ // code uses the descriptor directly rather than going via the GOT as some
+ // other ABIs do, which means that initialized function pointers must
+ // reference the descriptor. The linker must convert copy relocs of
+ // pointers to functions in shared libraries into dynamic relocations,
+ // because of an ordering problem with initialization of copy relocs and
+ // PLT entries. The dynamic relocation will be initialized by the dynamic
+ // linker, so we must use DataRelROSection instead of ReadOnlySection.
+ // For more information, see the description of ELIMINATE_COPY_RELOCS in
+ // GNU ld.
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+
+ if (GVar && GVar->isConstant() &&
+ (GVar->getInitializer()->getRelocationInfo() ==
+ Constant::GlobalRelocations))
+ return DataRelROSection;
+
+ return DefaultSection;
+}
+
+const MCExpr *PPC64LinuxTargetObjectFile::
+getDebugThreadLocalSymbol(const MCSymbol *Sym) const {
+ const MCExpr *Expr =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_PPC_DTPREL, getContext());
+ return MCBinaryExpr::CreateAdd(Expr,
+ MCConstantExpr::Create(0x8000, getContext()),
+ getContext());
+}
+
diff --git a/lib/Target/PowerPC/PPCTargetObjectFile.h b/lib/Target/PowerPC/PPCTargetObjectFile.h
new file mode 100644
index 000000000000..262c52213d29
--- /dev/null
+++ b/lib/Target/PowerPC/PPCTargetObjectFile.h
@@ -0,0 +1,35 @@
+//===-- PPCTargetObjectFile.h - PPC Object Info -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_PPC_TARGETOBJECTFILE_H
+#define LLVM_TARGET_PPC_TARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+ /// PPC64LinuxTargetObjectFile - This implementation is used for
+ /// 64-bit PowerPC Linux.
+ class PPC64LinuxTargetObjectFile : public TargetLoweringObjectFileELF {
+
+ virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+
+ virtual const MCSection *
+ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler *Mang, const TargetMachine &TM) const;
+
+ /// \brief Describe a TLS variable address within debug info.
+ virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
+ };
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/PowerPC/PPCTargetStreamer.h b/lib/Target/PowerPC/PPCTargetStreamer.h
new file mode 100644
index 000000000000..e876be16a9b3
--- /dev/null
+++ b/lib/Target/PowerPC/PPCTargetStreamer.h
@@ -0,0 +1,23 @@
+//===-- PPCTargetStreamer.h - PPC Target Streamer --s-----------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PPCTARGETSTREAMER_H
+#define PPCTARGETSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class PPCTargetStreamer : public MCTargetStreamer {
+public:
+ virtual ~PPCTargetStreamer();
+ virtual void emitTCEntry(const MCSymbol &S) = 0;
+};
+}
+
+#endif
diff --git a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 2504ba70c25a..8879630270e2 100644
--- a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -77,6 +77,7 @@ public:
/// \name Scalar TTI Implementations
/// @{
virtual PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
+ virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
/// @}
@@ -129,6 +130,14 @@ PPCTTI::PopcntSupportKind PPCTTI::getPopcntSupport(unsigned TyWidth) const {
return PSK_Software;
}
+void PPCTTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const {
+ if (ST->getDarwinDirective() == PPC::DIR_A2) {
+ // The A2 is in-order with a deep pipeline, and concatenation unrolling
+ // helps expose latency-hiding opportunities to the instruction scheduler.
+ UP.Partial = UP.Runtime = true;
+ }
+}
+
unsigned PPCTTI::getNumberOfRegisters(bool Vector) const {
if (Vector && !ST->hasAltivec())
return 0;
diff --git a/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp b/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp
index fa44331b8af6..5727dbc4170d 100644
--- a/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp
+++ b/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp
@@ -12,7 +12,7 @@
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-Target llvm::ThePPC32Target, llvm::ThePPC64Target;
+Target llvm::ThePPC32Target, llvm::ThePPC64Target, llvm::ThePPC64LETarget;
extern "C" void LLVMInitializePowerPCTargetInfo() {
RegisterTarget<Triple::ppc, /*HasJIT=*/true>
@@ -20,4 +20,7 @@ extern "C" void LLVMInitializePowerPCTargetInfo() {
RegisterTarget<Triple::ppc64, /*HasJIT=*/true>
Y(ThePPC64Target, "ppc64", "PowerPC 64");
+
+ RegisterTarget<Triple::ppc64le, /*HasJIT=*/true>
+ Z(ThePPC64LETarget, "ppc64le", "PowerPC 64 LE");
}
diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h
index 9792bd86f298..025b28e32bfe 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/R600/AMDGPU.h
@@ -11,32 +11,47 @@
#ifndef AMDGPU_H
#define AMDGPU_H
-#include "AMDGPUTargetMachine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
-class FunctionPass;
+class AMDGPUInstrPrinter;
class AMDGPUTargetMachine;
+class FunctionPass;
+class MCAsmInfo;
+class raw_ostream;
+class Target;
+class TargetMachine;
// R600 Passes
-FunctionPass* createR600KernelParametersPass(const DataLayout *TD);
+FunctionPass *createR600VectorRegMerger(TargetMachine &tm);
+FunctionPass *createR600TextureIntrinsicsReplacer();
FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm);
FunctionPass *createR600EmitClauseMarkers(TargetMachine &tm);
+FunctionPass *createR600ClauseMergePass(TargetMachine &tm);
FunctionPass *createR600Packetizer(TargetMachine &tm);
FunctionPass *createR600ControlFlowFinalizer(TargetMachine &tm);
+FunctionPass *createAMDGPUCFGStructurizerPass(TargetMachine &tm);
// SI Passes
+FunctionPass *createSITypeRewriter();
FunctionPass *createSIAnnotateControlFlowPass();
FunctionPass *createSILowerControlFlowPass(TargetMachine &tm);
+FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm);
FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
FunctionPass *createSIInsertWaits(TargetMachine &tm);
// Passes common to R600 and SI
Pass *createAMDGPUStructurizeCFGPass();
FunctionPass *createAMDGPUConvertToISAPass(TargetMachine &tm);
-FunctionPass* createAMDGPUIndirectAddressingPass(TargetMachine &tm);
+FunctionPass *createAMDGPUISelDag(TargetMachine &tm);
+
+/// \brief Creates an AMDGPU-specific Target Transformation Info pass.
+ImmutablePass *
+createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM);
+
+extern Target TheAMDGPUTarget;
} // End namespace llvm
@@ -49,4 +64,47 @@ namespace ShaderType {
};
}
+/// OpenCL uses address spaces to differentiate between
+/// various memory regions on the hardware. On the CPU
+/// all of the address spaces point to the same memory,
+/// however on the GPU, each address space points to
+/// a seperate piece of memory that is unique from other
+/// memory locations.
+namespace AMDGPUAS {
+enum AddressSpaces {
+ PRIVATE_ADDRESS = 0, ///< Address space for private memory.
+ GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0).
+ CONSTANT_ADDRESS = 2, ///< Address space for constant memory
+ LOCAL_ADDRESS = 3, ///< Address space for local memory.
+ REGION_ADDRESS = 4, ///< Address space for region memory.
+ ADDRESS_NONE = 5, ///< Address space for unknown memory.
+ PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0)
+ PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1)
+
+ // Do not re-order the CONSTANT_BUFFER_* enums. Several places depend on this
+ // order to be able to dynamically index a constant buffer, for example:
+ //
+ // ConstantBufferAS = CONSTANT_BUFFER_0 + CBIdx
+
+ CONSTANT_BUFFER_0 = 8,
+ CONSTANT_BUFFER_1 = 9,
+ CONSTANT_BUFFER_2 = 10,
+ CONSTANT_BUFFER_3 = 11,
+ CONSTANT_BUFFER_4 = 12,
+ CONSTANT_BUFFER_5 = 13,
+ CONSTANT_BUFFER_6 = 14,
+ CONSTANT_BUFFER_7 = 15,
+ CONSTANT_BUFFER_8 = 16,
+ CONSTANT_BUFFER_9 = 17,
+ CONSTANT_BUFFER_10 = 18,
+ CONSTANT_BUFFER_11 = 19,
+ CONSTANT_BUFFER_12 = 20,
+ CONSTANT_BUFFER_13 = 21,
+ CONSTANT_BUFFER_14 = 22,
+ CONSTANT_BUFFER_15 = 23,
+ LAST_ADDRESS = 24
+};
+
+} // namespace AMDGPUAS
+
#endif // AMDGPU_H
diff --git a/lib/Target/R600/AMDGPU.td b/lib/Target/R600/AMDGPU.td
index 1a26c77d6bb2..182235b27c48 100644
--- a/lib/Target/R600/AMDGPU.td
+++ b/lib/Target/R600/AMDGPU.td
@@ -10,6 +10,91 @@
// Include AMDIL TD files
include "AMDILBase.td"
+//===----------------------------------------------------------------------===//
+// Subtarget Features
+//===----------------------------------------------------------------------===//
+
+// Debugging Features
+
+def FeatureDumpCode : SubtargetFeature <"DumpCode",
+ "DumpCode",
+ "true",
+ "Dump MachineInstrs in the CodeEmitter">;
+
+def FeatureIRStructurizer : SubtargetFeature <"disable-irstructurizer",
+ "EnableIRStructurizer",
+ "false",
+ "Disable IR Structurizer">;
+
+// Target features
+
+def FeatureIfCvt : SubtargetFeature <"disable-ifcvt",
+ "EnableIfCvt",
+ "false",
+ "Disable the if conversion pass">;
+
+def FeatureFP64 : SubtargetFeature<"fp64",
+ "FP64",
+ "true",
+ "Enable 64bit double precision operations">;
+
+def Feature64BitPtr : SubtargetFeature<"64BitPtr",
+ "Is64bit",
+ "true",
+ "Specify if 64bit addressing should be used.">;
+
+def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr",
+ "Is32on64bit",
+ "false",
+ "Specify if 64bit sized pointers with 32bit addressing should be used.">;
+
+def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
+ "R600ALUInst",
+ "false",
+ "Older version of ALU instructions encoding.">;
+
+def FeatureVertexCache : SubtargetFeature<"HasVertexCache",
+ "HasVertexCache",
+ "true",
+ "Specify use of dedicated vertex cache.">;
+
+def FeatureCaymanISA : SubtargetFeature<"caymanISA",
+ "CaymanISA",
+ "true",
+ "Use Cayman ISA">;
+
+class SubtargetFeatureFetchLimit <string Value> :
+ SubtargetFeature <"fetch"#Value,
+ "TexVTXClauseSize",
+ Value,
+ "Limit the maximum number of fetches in a clause to "#Value>;
+
+def FeatureFetchLimit8 : SubtargetFeatureFetchLimit <"8">;
+def FeatureFetchLimit16 : SubtargetFeatureFetchLimit <"16">;
+
+class SubtargetFeatureGeneration <string Value,
+ list<SubtargetFeature> Implies> :
+ SubtargetFeature <Value, "Gen", "AMDGPUSubtarget::"#Value,
+ Value#" GPU generation", Implies>;
+
+def FeatureR600 : SubtargetFeatureGeneration<"R600",
+ [FeatureR600ALUInst, FeatureFetchLimit8]>;
+
+def FeatureR700 : SubtargetFeatureGeneration<"R700",
+ [FeatureFetchLimit16]>;
+
+def FeatureEvergreen : SubtargetFeatureGeneration<"EVERGREEN",
+ [FeatureFetchLimit16]>;
+
+def FeatureNorthernIslands : SubtargetFeatureGeneration<"NORTHERN_ISLANDS",
+ [FeatureFetchLimit16]>;
+
+def FeatureSouthernIslands : SubtargetFeatureGeneration<"SOUTHERN_ISLANDS",
+ [Feature64BitPtr, FeatureFP64]>;
+
+def FeatureSeaIslands : SubtargetFeatureGeneration<"SEA_ISLANDS",
+ [Feature64BitPtr, FeatureFP64]>;
+//===----------------------------------------------------------------------===//
def AMDGPUInstrInfo : InstrInfo {
let guessInstructionProperties = 1;
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp
index 4c35ecf5c9b5..67bdba28787a 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp
@@ -19,16 +19,17 @@
#include "AMDGPUAsmPrinter.h"
#include "AMDGPU.h"
-#include "SIDefines.h"
-#include "SIMachineFunctionInfo.h"
-#include "SIRegisterInfo.h"
#include "R600Defines.h"
#include "R600MachineFunctionInfo.h"
#include "R600RegisterInfo.h"
+#include "SIDefines.h"
+#include "SIMachineFunctionInfo.h"
+#include "SIRegisterInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/ELF.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -44,32 +45,60 @@ extern "C" void LLVMInitializeR600AsmPrinter() {
TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
}
+AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer)
+{
+ DisasmEnabled = TM.getSubtarget<AMDGPUSubtarget>().dumpCode() &&
+ ! Streamer.hasRawTextSupport();
+}
+
/// We need to override this function so we can avoid
/// the call to EmitFunctionHeader(), which the MCPureStreamer can't handle.
bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
- if (STM.dumpCode()) {
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- MF.dump();
-#endif
- }
SetupMachineFunction(MF);
if (OutStreamer.hasRawTextSupport()) {
OutStreamer.EmitRawText("@" + MF.getName() + ":");
}
- const MCSectionELF *ConfigSection = getObjFileLowering().getContext()
- .getELFSection(".AMDGPU.config",
+ MCContext &Context = getObjFileLowering().getContext();
+ const MCSectionELF *ConfigSection = Context.getELFSection(".AMDGPU.config",
ELF::SHT_PROGBITS, 0,
SectionKind::getReadOnly());
OutStreamer.SwitchSection(ConfigSection);
- if (STM.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
+ const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
+ if (STM.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
EmitProgramInfoSI(MF);
} else {
EmitProgramInfoR600(MF);
}
+
+ DisasmLines.clear();
+ HexLines.clear();
+ DisasmLineMaxLen = 0;
+
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
EmitFunctionBody();
+
+ if (STM.dumpCode()) {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ MF.dump();
+#endif
+
+ if (DisasmEnabled) {
+ OutStreamer.SwitchSection(Context.getELFSection(".AMDGPU.disasm",
+ ELF::SHT_NOTE, 0,
+ SectionKind::getReadOnly()));
+
+ for (size_t i = 0; i < DisasmLines.size(); ++i) {
+ std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
+ Comment += " ; " + HexLines[i] + "\n";
+
+ OutStreamer.EmitBytes(StringRef(DisasmLines[i]));
+ OutStreamer.EmitBytes(StringRef(Comment));
+ }
+ }
+ }
+
return false;
}
@@ -105,7 +134,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
}
unsigned RsrcReg;
- if (STM.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX) {
+ if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
// Evergreen / Northern Islands
switch (MFI->ShaderType) {
default: // Fall through
@@ -130,9 +159,15 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
S_STACK_SIZE(MFI->StackSize), 4);
OutStreamer.EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
OutStreamer.EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
+
+ if (MFI->ShaderType == ShaderType::COMPUTE) {
+ OutStreamer.EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
+ OutStreamer.EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
+ }
}
void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
+ const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
unsigned MaxSGPR = 0;
unsigned MaxVGPR = 0;
bool VCCUsed = false;
@@ -148,7 +183,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
unsigned numOperands = MI.getNumOperands();
for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
- MachineOperand & MO = MI.getOperand(op_idx);
+ MachineOperand &MO = MI.getOperand(op_idx);
unsigned maxUsed;
unsigned width = 0;
bool isSGPR = false;
@@ -162,8 +197,10 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
VCCUsed = true;
continue;
}
+
switch (reg) {
default: break;
+ case AMDGPU::SCC:
case AMDGPU::EXEC:
case AMDGPU::M0:
continue;
@@ -196,6 +233,9 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
} else if (AMDGPU::VReg_256RegClass.contains(reg)) {
isSGPR = false;
width = 8;
+ } else if (AMDGPU::SReg_512RegClass.contains(reg)) {
+ isSGPR = true;
+ width = 16;
} else if (AMDGPU::VReg_512RegClass.contains(reg)) {
isSGPR = false;
width = 16;
@@ -227,7 +267,25 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
OutStreamer.EmitIntValue(RsrcReg, 4);
OutStreamer.EmitIntValue(S_00B028_VGPRS(MaxVGPR / 4) | S_00B028_SGPRS(MaxSGPR / 8), 4);
+
+ unsigned LDSAlignShift;
+ if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
+ // LDS is allocated in 64 dword blocks
+ LDSAlignShift = 8;
+ } else {
+ // LDS is allocated in 128 dword blocks
+ LDSAlignShift = 9;
+ }
+ unsigned LDSBlocks =
+ RoundUpToAlignment(MFI->LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
+
+ if (MFI->ShaderType == ShaderType::COMPUTE) {
+ OutStreamer.EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
+ OutStreamer.EmitIntValue(S_00B84C_LDS_SIZE(LDSBlocks), 4);
+ }
if (MFI->ShaderType == ShaderType::PIXEL) {
+ OutStreamer.EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
+ OutStreamer.EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(LDSBlocks), 4);
OutStreamer.EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
OutStreamer.EmitIntValue(MFI->PSInputAddr, 4);
}
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.h b/lib/Target/R600/AMDGPUAsmPrinter.h
index f425ef419463..05dc9bb672d7 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.h
+++ b/lib/Target/R600/AMDGPUAsmPrinter.h
@@ -1,4 +1,4 @@
-//===-- AMDGPUAsmPrinter.h - Print AMDGPU assembly code -------------------===//
+//===-- AMDGPUAsmPrinter.h - Print AMDGPU assembly code ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,14 +16,15 @@
#define AMDGPU_ASMPRINTER_H
#include "llvm/CodeGen/AsmPrinter.h"
+#include <string>
+#include <vector>
namespace llvm {
class AMDGPUAsmPrinter : public AsmPrinter {
public:
- explicit AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) { }
+ explicit AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -38,6 +39,11 @@ public:
/// Implemented in AMDGPUMCInstLower.cpp
virtual void EmitInstruction(const MachineInstr *MI);
+
+protected:
+ bool DisasmEnabled;
+ std::vector<std::string> DisasmLines, HexLines;
+ size_t DisasmLineMaxLen;
};
} // End anonymous llvm
diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td
index 9c30515a3420..65cdb2467336 100644
--- a/lib/Target/R600/AMDGPUCallingConv.td
+++ b/lib/Target/R600/AMDGPUCallingConv.td
@@ -19,12 +19,13 @@ def CC_SI : CallingConv<[
CCIfInReg<CCIfType<[f32, i32] , CCAssignToReg<[
SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
- SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15
+ SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
+ SGPR16
]>>>,
CCIfInReg<CCIfType<[i64] , CCAssignToRegWithShadow<
[ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
- [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR12, SGPR15 ]
+ [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
>>>,
CCIfNotInReg<CCIfType<[f32, i32] , CCAssignToReg<[
@@ -34,15 +35,40 @@ def CC_SI : CallingConv<[
VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31
]>>>,
- // This is the default for i64 values.
- // XXX: We should change this once clang understands the CC_AMDGPU.
- CCIfType<[i64], CCAssignToRegWithShadow<
- [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
- [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
- >>
+ CCIfByVal<CCIfType<[i64] , CCAssignToRegWithShadow<
+ [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
+ [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
+ >>>
+
+]>;
+
+// Calling convention for R600
+def CC_R600 : CallingConv<[
+ CCIfInReg<CCIfType<[v4f32, v4i32] , CCAssignToReg<[
+ T0_XYZW, T1_XYZW, T2_XYZW, T3_XYZW, T4_XYZW, T5_XYZW, T6_XYZW, T7_XYZW,
+ T8_XYZW, T9_XYZW, T10_XYZW, T11_XYZW, T12_XYZW, T13_XYZW, T14_XYZW, T15_XYZW,
+ T16_XYZW, T17_XYZW, T18_XYZW, T19_XYZW, T20_XYZW, T21_XYZW, T22_XYZW,
+ T23_XYZW, T24_XYZW, T25_XYZW, T26_XYZW, T27_XYZW, T28_XYZW, T29_XYZW,
+ T30_XYZW, T31_XYZW, T32_XYZW
+ ]>>>
+]>;
+
+// Calling convention for compute kernels
+def CC_AMDGPU_Kernel : CallingConv<[
+ CCCustom<"allocateStack">
]>;
def CC_AMDGPU : CallingConv<[
- CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().device()"#
- "->getGeneration() == AMDGPUDeviceInfo::HD7XXX", CCDelegateTo<CC_SI>>
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() >= "
+ "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"#
+ "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() < "
+ "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "State.getMachineFunction().getInfo<R600MachineFunctionInfo>()->"
+ "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"#
+ ".getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_SI>>,
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"#
+ ".getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_R600>>
]>;
diff --git a/lib/Target/R600/AMDGPUFrameLowering.cpp b/lib/Target/R600/AMDGPUFrameLowering.cpp
index 815d6f71c3be..40f14d2f67c5 100644
--- a/lib/Target/R600/AMDGPUFrameLowering.cpp
+++ b/lib/Target/R600/AMDGPUFrameLowering.cpp
@@ -78,27 +78,8 @@ int AMDGPUFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
- const AllocaInst *Alloca = MFI->getObjectAllocation(i);
- unsigned ArrayElements;
- const Type *AllocaType = Alloca->getAllocatedType();
- const Type *ElementType;
-
- if (AllocaType->isArrayTy()) {
- ArrayElements = AllocaType->getArrayNumElements();
- ElementType = AllocaType->getArrayElementType();
- } else {
- ArrayElements = 1;
- ElementType = AllocaType;
- }
-
- unsigned VectorElements;
- if (ElementType->isVectorTy()) {
- VectorElements = ElementType->getVectorNumElements();
- } else {
- VectorElements = 1;
- }
-
- Offset += (VectorElements / getStackWidth(MF)) * ArrayElements;
+ unsigned Size = MFI->getObjectSize(i);
+ Offset += (Size / (getStackWidth(MF) * 4));
}
return Offset;
}
diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
new file mode 100644
index 000000000000..a9891350e570
--- /dev/null
+++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -0,0 +1,585 @@
+//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief Defines an instruction selector for the AMDGPU target.
+//
+//===----------------------------------------------------------------------===//
+#include "AMDGPUInstrInfo.h"
+#include "AMDGPUISelLowering.h" // For AMDGPUISD
+#include "AMDGPURegisterInfo.h"
+#include "R600InstrInfo.h"
+#include "SIISelLowering.h"
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Compiler.h"
+#include <list>
+#include <queue>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// AMDGPU specific code to select AMDGPU machine instructions for
+/// SelectionDAG operations.
+class AMDGPUDAGToDAGISel : public SelectionDAGISel {
+ // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
+ // make the right decision when generating code for different targets.
+ const AMDGPUSubtarget &Subtarget;
+public:
+ AMDGPUDAGToDAGISel(TargetMachine &TM);
+ virtual ~AMDGPUDAGToDAGISel();
+
+ SDNode *Select(SDNode *N);
+ virtual const char *getPassName() const;
+ virtual void PostprocessISelDAG();
+
+private:
+ inline SDValue getSmallIPtrImm(unsigned Imm);
+ bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
+ const R600InstrInfo *TII);
+ bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
+ bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
+
+ // Complex pattern selectors
+ bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
+ bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
+ bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
+ SDValue SimplifyI24(SDValue &Op);
+ bool SelectI24(SDValue Addr, SDValue &Op);
+ bool SelectU24(SDValue Addr, SDValue &Op);
+
+ static bool checkType(const Value *ptr, unsigned int addrspace);
+
+ static bool isGlobalStore(const StoreSDNode *N);
+ static bool isPrivateStore(const StoreSDNode *N);
+ static bool isLocalStore(const StoreSDNode *N);
+ static bool isRegionStore(const StoreSDNode *N);
+
+ bool isCPLoad(const LoadSDNode *N) const;
+ bool isConstantLoad(const LoadSDNode *N, int cbID) const;
+ bool isGlobalLoad(const LoadSDNode *N) const;
+ bool isParamLoad(const LoadSDNode *N) const;
+ bool isPrivateLoad(const LoadSDNode *N) const;
+ bool isLocalLoad(const LoadSDNode *N) const;
+ bool isRegionLoad(const LoadSDNode *N) const;
+
+ const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
+ bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
+ bool SelectGlobalValueVariableOffset(SDValue Addr,
+ SDValue &BaseReg, SDValue& Offset);
+ bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
+
+ // Include the pieces autogenerated from the target description.
+#include "AMDGPUGenDAGISel.inc"
+};
+} // end anonymous namespace
+
+/// \brief This pass converts a legalized DAG into a AMDGPU-specific
+// DAG, ready for instruction scheduling.
+FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
+ ) {
+ return new AMDGPUDAGToDAGISel(TM);
+}
+
+AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
+ : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
+}
+
+AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
+}
+
+/// \brief Determine the register class for \p OpNo
+/// \returns The register class of the virtual register that will be used for
+/// the given operand number \OpNo or NULL if the register class cannot be
+/// determined.
+const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
+ unsigned OpNo) const {
+ if (!N->isMachineOpcode()) {
+ return NULL;
+ }
+ switch (N->getMachineOpcode()) {
+ default: {
+ const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
+ unsigned OpIdx = Desc.getNumDefs() + OpNo;
+ if (OpIdx >= Desc.getNumOperands())
+ return NULL;
+ int RegClass = Desc.OpInfo[OpIdx].RegClass;
+ if (RegClass == -1) {
+ return NULL;
+ }
+ return TM.getRegisterInfo()->getRegClass(RegClass);
+ }
+ case AMDGPU::REG_SEQUENCE: {
+ const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
+ cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
+ unsigned SubRegIdx =
+ dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
+ return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
+ }
+ }
+}
+
+SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
+ return CurDAG->getTargetConstant(Imm, MVT::i32);
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRParam(
+ SDValue Addr, SDValue& R1, SDValue& R2) {
+
+ if (Addr.getOpcode() == ISD::FrameIndex) {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ }
+ } else if (Addr.getOpcode() == ISD::ADD) {
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ }
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress) {
+ return false;
+ }
+ return SelectADDRParam(Addr, R1, R2);
+}
+
+
+bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress) {
+ return false;
+ }
+
+ if (Addr.getOpcode() == ISD::FrameIndex) {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
+ R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ }
+ } else if (Addr.getOpcode() == ISD::ADD) {
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ }
+ return true;
+}
+
+SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
+ unsigned int Opc = N->getOpcode();
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
+ return NULL; // Already selected.
+ }
+ switch (Opc) {
+ default: break;
+ case ISD::BUILD_VECTOR: {
+ unsigned RegClassID;
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ const AMDGPURegisterInfo *TRI =
+ static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
+ const SIRegisterInfo *SIRI =
+ static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
+ EVT VT = N->getValueType(0);
+ unsigned NumVectorElts = VT.getVectorNumElements();
+ assert(VT.getVectorElementType().bitsEq(MVT::i32));
+ if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ bool UseVReg = true;
+ for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
+ U != E; ++U) {
+ if (!U->isMachineOpcode()) {
+ continue;
+ }
+ const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
+ if (!RC) {
+ continue;
+ }
+ if (SIRI->isSGPRClass(RC)) {
+ UseVReg = false;
+ }
+ }
+ switch(NumVectorElts) {
+ case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
+ AMDGPU::SReg_32RegClassID;
+ break;
+ case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
+ AMDGPU::SReg_64RegClassID;
+ break;
+ case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
+ AMDGPU::SReg_128RegClassID;
+ break;
+ case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
+ AMDGPU::SReg_256RegClassID;
+ break;
+ case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
+ AMDGPU::SReg_512RegClassID;
+ break;
+ default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
+ }
+ } else {
+ // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
+ // that adds a 128 bits reg copy when going through TwoAddressInstructions
+ // pass. We want to avoid 128 bits copies as much as possible because they
+ // can't be bundled by our scheduler.
+ switch(NumVectorElts) {
+ case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
+ case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
+ default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
+ }
+ }
+
+ SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
+
+ if (NumVectorElts == 1) {
+ return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
+ VT.getVectorElementType(),
+ N->getOperand(0), RegClass);
+ }
+
+ assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
+ "supported yet");
+ // 16 = Max Num Vector Elements
+ // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
+ // 1 = Vector Register Class
+ SDValue RegSeqArgs[16 * 2 + 1];
+
+ RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
+ bool IsRegSeq = true;
+ for (unsigned i = 0; i < N->getNumOperands(); i++) {
+ // XXX: Why is this here?
+ if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
+ IsRegSeq = false;
+ break;
+ }
+ RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
+ RegSeqArgs[1 + (2 * i) + 1] =
+ CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
+ }
+ if (!IsRegSeq)
+ break;
+ return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
+ RegSeqArgs, 2 * N->getNumOperands() + 1);
+ }
+ case ISD::BUILD_PAIR: {
+ SDValue RC, SubReg0, SubReg1;
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ break;
+ }
+ if (N->getValueType(0) == MVT::i128) {
+ RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
+ SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
+ SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
+ } else if (N->getValueType(0) == MVT::i64) {
+ RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
+ SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
+ SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
+ } else {
+ llvm_unreachable("Unhandled value type for BUILD_PAIR");
+ }
+ const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
+ N->getOperand(1), SubReg1 };
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
+ SDLoc(N), N->getValueType(0), Ops);
+ }
+ case AMDGPUISD::REGISTER_LOAD: {
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ break;
+ SDValue Addr, Offset;
+
+ SelectADDRIndirect(N->getOperand(1), Addr, Offset);
+ const SDValue Ops[] = {
+ Addr,
+ Offset,
+ CurDAG->getTargetConstant(0, MVT::i32),
+ N->getOperand(0),
+ };
+ return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
+ CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
+ Ops);
+ }
+ case AMDGPUISD::REGISTER_STORE: {
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ break;
+ SDValue Addr, Offset;
+ SelectADDRIndirect(N->getOperand(2), Addr, Offset);
+ const SDValue Ops[] = {
+ N->getOperand(1),
+ Addr,
+ Offset,
+ CurDAG->getTargetConstant(0, MVT::i32),
+ N->getOperand(0),
+ };
+ return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
+ CurDAG->getVTList(MVT::Other),
+ Ops);
+ }
+ }
+ return SelectCode(N);
+}
+
+
+bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
+ if (!ptr) {
+ return false;
+ }
+ Type *ptrType = ptr->getType();
+ return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
+}
+
+bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
+ return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
+ return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
+}
+
+bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
+ return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
+ return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
+ if (CbId == -1) {
+ return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
+ }
+ return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
+}
+
+bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
+ if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
+ N->getMemoryVT().bitsLT(MVT::i32)) {
+ return true;
+ }
+ }
+ return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
+ return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
+ return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
+ return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
+ MachineMemOperand *MMO = N->getMemOperand();
+ if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+ if (MMO) {
+ const Value *V = MMO->getValue();
+ const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
+ if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
+ if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+ // Check to make sure we are not a constant pool load or a constant load
+ // that is marked as a private load
+ if (isCPLoad(N) || isConstantLoad(N, -1)) {
+ return false;
+ }
+ }
+ if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
+ return true;
+ }
+ return false;
+}
+
+const char *AMDGPUDAGToDAGISel::getPassName() const {
+ return "AMDGPU DAG->DAG Pattern Instruction Selection";
+}
+
+#ifdef DEBUGTMP
+#undef INT64_C
+#endif
+#undef DEBUGTMP
+
+//===----------------------------------------------------------------------===//
+// Complex Patterns
+//===----------------------------------------------------------------------===//
+
+bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
+ SDValue& IntPtr) {
+ if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
+ IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
+ return true;
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
+ SDValue& BaseReg, SDValue &Offset) {
+ if (!dyn_cast<ConstantSDNode>(Addr)) {
+ BaseReg = Addr;
+ Offset = CurDAG->getIntPtrConstant(0, true);
+ return true;
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ ConstantSDNode * IMMOffset;
+
+ if (Addr.getOpcode() == ISD::ADD
+ && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+ && isInt<16>(IMMOffset->getZExtValue())) {
+
+ Base = Addr.getOperand(0);
+ Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+ return true;
+ // If the pointer address is constant, we can move it to the offset field.
+ } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
+ && isInt<16>(IMMOffset->getZExtValue())) {
+ Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ SDLoc(CurDAG->getEntryNode()),
+ AMDGPU::ZERO, MVT::i32);
+ Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+ return true;
+ }
+
+ // Default case, no offset
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ ConstantSDNode *C;
+
+ if ((C = dyn_cast<ConstantSDNode>(Addr))) {
+ Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
+ Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+ } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
+ (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
+ Base = Addr.getOperand(0);
+ Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+ } else {
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ }
+
+ return true;
+}
+
+SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
+ APInt Demanded = APInt(32, 0x00FFFFFF);
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
+ const TargetLowering *TLI = getTargetLowering();
+ if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
+ CurDAG->ReplaceAllUsesWith(Op, TLO.New);
+ CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
+ return SimplifyI24(TLO.New);
+ } else {
+ return Op;
+ }
+}
+
+bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
+
+ assert(Op.getValueType() == MVT::i32);
+
+ if (CurDAG->ComputeNumSignBits(Op) == 9) {
+ I24 = SimplifyI24(Op);
+ return true;
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
+ APInt KnownZero;
+ APInt KnownOne;
+ CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
+
+ assert (Op.getValueType() == MVT::i32);
+
+ // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
+ // i32. These smaller types are legal to use with the i24 instructions.
+ if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
+ Op.getOpcode() == ISD::ANY_EXTEND ||
+ ISD::isEXTLoad(Op.getNode())) {
+ U24 = SimplifyI24(Op);
+ return true;
+ }
+ return false;
+}
+
+void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
+ const AMDGPUTargetLowering& Lowering =
+ (*(const AMDGPUTargetLowering*)getTargetLowering());
+ bool IsModified = false;
+ do {
+ IsModified = false;
+ // Go over all selected nodes and try to fold them a bit more
+ for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
+ E = CurDAG->allnodes_end(); I != E; ++I) {
+
+ SDNode *Node = I;
+
+ MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
+ if (!MachineNode)
+ continue;
+
+ SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
+ if (ResNode != Node) {
+ ReplaceUses(Node, ResNode);
+ IsModified = true;
+ }
+ }
+ CurDAG->RemoveDeadNodes();
+ } while (IsModified);
+}
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp
index a266df535d56..c4d75ffa0d06 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -14,16 +14,29 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUISelLowering.h"
+#include "AMDGPU.h"
+#include "AMDGPUFrameLowering.h"
#include "AMDGPURegisterInfo.h"
-#include "AMDILIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
+#include "AMDILIntrinsicInfo.h"
+#include "R600MachineFunctionInfo.h"
+#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/DataLayout.h"
using namespace llvm;
+static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ unsigned Offset = State.AllocateStack(ValVT.getSizeInBits() / 8, ArgFlags.getOrigAlign());
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+
+ return true;
+}
#include "AMDGPUGenCallingConv.inc"
@@ -45,26 +58,171 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::FABS, MVT::f32, Legal);
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FRINT, MVT::f32, Legal);
+ setOperationAction(ISD::FROUND, MVT::f32, Legal);
+
+ // The hardware supports ROTR, but not ROTL
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
// Lower floating point store/load to integer store/load to reduce the number
// of patterns in tablegen.
setOperationAction(ISD::STORE, MVT::f32, Promote);
AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
+ setOperationAction(ISD::STORE, MVT::v2f32, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
+
setOperationAction(ISD::STORE, MVT::v4f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
+ setOperationAction(ISD::STORE, MVT::v8f32, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
+
+ setOperationAction(ISD::STORE, MVT::v16f32, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
+
+ setOperationAction(ISD::STORE, MVT::f64, Promote);
+ AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
+
+ // Custom lowering of vector stores is required for local address space
+ // stores.
+ setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+ // XXX: Native v2i32 local address space stores are possible, but not
+ // currently implemented.
+ setOperationAction(ISD::STORE, MVT::v2i32, Custom);
+
+ setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
+ setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
+ setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
+ // XXX: This can be change to Custom, once ExpandVectorStores can
+ // handle 64-bit stores.
+ setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
+
setOperationAction(ISD::LOAD, MVT::f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
+ setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
+
setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
+ setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
+
+ setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
+
+ setOperationAction(ISD::LOAD, MVT::f64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
+
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
+
+ setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
+ setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
+
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+
setOperationAction(ISD::MUL, MVT::i64, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
+
+ static const MVT::SimpleValueType IntTypes[] = {
+ MVT::v2i32, MVT::v4i32
+ };
+ const size_t NumIntTypes = array_lengthof(IntTypes);
+
+ for (unsigned int x = 0; x < NumIntTypes; ++x) {
+ MVT::SimpleValueType VT = IntTypes[x];
+ //Expand the following operations for the current type by default
+ setOperationAction(ISD::ADD, VT, Expand);
+ setOperationAction(ISD::AND, VT, Expand);
+ setOperationAction(ISD::FP_TO_SINT, VT, Expand);
+ setOperationAction(ISD::FP_TO_UINT, VT, Expand);
+ setOperationAction(ISD::MUL, VT, Expand);
+ setOperationAction(ISD::OR, VT, Expand);
+ setOperationAction(ISD::SHL, VT, Expand);
+ setOperationAction(ISD::SINT_TO_FP, VT, Expand);
+ setOperationAction(ISD::SRL, VT, Expand);
+ setOperationAction(ISD::SRA, VT, Expand);
+ setOperationAction(ISD::SUB, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::UINT_TO_FP, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::VSELECT, VT, Expand);
+ setOperationAction(ISD::XOR, VT, Expand);
+ }
+
+ static const MVT::SimpleValueType FloatTypes[] = {
+ MVT::v2f32, MVT::v4f32
+ };
+ const size_t NumFloatTypes = array_lengthof(FloatTypes);
+
+ for (unsigned int x = 0; x < NumFloatTypes; ++x) {
+ MVT::SimpleValueType VT = FloatTypes[x];
+ setOperationAction(ISD::FABS, VT, Expand);
+ setOperationAction(ISD::FADD, VT, Expand);
+ setOperationAction(ISD::FDIV, VT, Expand);
+ setOperationAction(ISD::FFLOOR, VT, Expand);
+ setOperationAction(ISD::FMUL, VT, Expand);
+ setOperationAction(ISD::FRINT, VT, Expand);
+ setOperationAction(ISD::FSQRT, VT, Expand);
+ setOperationAction(ISD::FSUB, VT, Expand);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Target Information
+//===----------------------------------------------------------------------===//
+
+MVT AMDGPUTargetLowering::getVectorIdxTy() const {
+ return MVT::i32;
+}
+
+bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
+ EVT CastTy) const {
+ if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
+ return true;
+
+ unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
+ unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
+
+ return ((LScalarSize <= CastScalarSize) ||
+ (CastScalarSize >= 32) ||
+ (LScalarSize < 32));
+}
+
+//===---------------------------------------------------------------------===//
+// Target Properties
+//===---------------------------------------------------------------------===//
+
+bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return VT == MVT::f32;
+}
+
+bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return VT == MVT::f32;
}
//===---------------------------------------------------------------------===//
@@ -83,7 +241,7 @@ SDValue AMDGPUTargetLowering::LowerReturn(
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
}
@@ -105,16 +263,104 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
// AMDGPU DAG lowering
+ case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
+ case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
+ case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
}
return Op;
}
+SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
+ SDValue Op,
+ SelectionDAG &DAG) const {
+
+ const DataLayout *TD = getTargetMachine().getDataLayout();
+ GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
+
+ assert(G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS);
+ // XXX: What does the value of G->getOffset() mean?
+ assert(G->getOffset() == 0 &&
+ "Do not know what to do with an non-zero offset");
+
+ const GlobalValue *GV = G->getGlobal();
+
+ unsigned Offset;
+ if (MFI->LocalMemoryObjects.count(GV) == 0) {
+ uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
+ Offset = MFI->LDSSize;
+ MFI->LocalMemoryObjects[GV] = Offset;
+ // XXX: Account for alignment?
+ MFI->LDSSize += Size;
+ } else {
+ Offset = MFI->LocalMemoryObjects[GV];
+ }
+
+ return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
+}
+
+void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Args,
+ unsigned Start,
+ unsigned Count) const {
+ EVT VT = Op.getValueType();
+ for (unsigned i = Start, e = Start + Count; i != e; ++i) {
+ Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
+ VT.getVectorElementType(),
+ Op, DAG.getConstant(i, MVT::i32)));
+ }
+}
+
+SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
+ SelectionDAG &DAG) const {
+ SmallVector<SDValue, 8> Args;
+ SDValue A = Op.getOperand(0);
+ SDValue B = Op.getOperand(1);
+
+ ExtractVectorElements(A, DAG, Args, 0,
+ A.getValueType().getVectorNumElements());
+ ExtractVectorElements(B, DAG, Args, 0,
+ B.getValueType().getVectorNumElements());
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
+ &Args[0], Args.size());
+}
+
+SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ SmallVector<SDValue, 8> Args;
+ EVT VT = Op.getValueType();
+ unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
+ VT.getVectorNumElements());
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
+ &Args[0], Args.size());
+}
+
+SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ const AMDGPUFrameLowering *TFL =
+ static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
+
+ FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
+ assert(FIN);
+
+ unsigned FrameIndex = FIN->getIndex();
+ unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
+ return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
+ Op.getValueType());
+}
+
SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
switch (IntrinsicID) {
@@ -154,7 +400,7 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
Op.getOperand(1));
@@ -166,7 +412,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
DAG.getConstantFP(1.0f, MVT::f32),
@@ -181,7 +427,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
/// \brief Generate Min/Max node
SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
@@ -238,11 +484,126 @@ SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
return Op;
}
+SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
+ SelectionDAG &DAG) const {
+ LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
+ EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
+ EVT EltVT = Op.getValueType().getVectorElementType();
+ EVT PtrVT = Load->getBasePtr().getValueType();
+ unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
+ SmallVector<SDValue, 8> Loads;
+ SDLoc SL(Op);
+
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
+ SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
+ DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
+ Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
+ Load->getChain(), Ptr,
+ MachinePointerInfo(Load->getMemOperand()->getValue()),
+ MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
+ Load->getAlignment()));
+ }
+ return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), &Loads[0],
+ Loads.size());
+}
+
+SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
+ SelectionDAG &DAG) const {
+ StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
+ EVT MemVT = Store->getMemoryVT();
+ unsigned MemBits = MemVT.getSizeInBits();
+ // Byte stores are really expensive, so if possible, try to pack
+ // 32-bit vector truncatating store into an i32 store.
+ // XXX: We could also handle optimize other vector bitwidths
+ if (!MemVT.isVector() || MemBits > 32) {
+ return SDValue();
+ }
+
+ SDLoc DL(Op);
+ const SDValue &Value = Store->getValue();
+ EVT VT = Value.getValueType();
+ const SDValue &Ptr = Store->getBasePtr();
+ EVT MemEltVT = MemVT.getVectorElementType();
+ unsigned MemEltBits = MemEltVT.getSizeInBits();
+ unsigned MemNumElements = MemVT.getVectorNumElements();
+ EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
+ SDValue Mask;
+ switch(MemEltBits) {
+ case 8:
+ Mask = DAG.getConstant(0xFF, PackedVT);
+ break;
+ case 16:
+ Mask = DAG.getConstant(0xFFFF, PackedVT);
+ break;
+ default:
+ llvm_unreachable("Cannot lower this vector store");
+ }
+ SDValue PackedValue;
+ for (unsigned i = 0; i < MemNumElements; ++i) {
+ EVT ElemVT = VT.getVectorElementType();
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
+ DAG.getConstant(i, MVT::i32));
+ Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
+ Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
+ SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
+ Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
+ if (i == 0) {
+ PackedValue = Elt;
+ } else {
+ PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
+ }
+ }
+ return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
+ MachinePointerInfo(Store->getMemOperand()->getValue()),
+ Store->isVolatile(), Store->isNonTemporal(),
+ Store->getAlignment());
+}
+
+SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
+ SelectionDAG &DAG) const {
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
+ EVT EltVT = Store->getValue().getValueType().getVectorElementType();
+ EVT PtrVT = Store->getBasePtr().getValueType();
+ unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
+ SDLoc SL(Op);
+
+ SmallVector<SDValue, 8> Chains;
+
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
+ SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
+ Store->getValue(), DAG.getConstant(i, MVT::i32));
+ SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
+ Store->getBasePtr(),
+ DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
+ PtrVT));
+ Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
+ MachinePointerInfo(Store->getMemOperand()->getValue()),
+ MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
+ Store->getAlignment()));
+ }
+ return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
+}
+
+SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
+ if (Result.getNode()) {
+ return Result;
+ }
+
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
+ Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
+ Store->getValue().getValueType().isVector()) {
+ return SplitVectorStore(Op, DAG);
+ }
+ return SDValue();
+}
SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Num = Op.getOperand(0);
@@ -295,13 +656,13 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
DAG.getConstant(-1, VT),
DAG.getConstant(0, VT),
- ISD::SETGE);
- // Remainder_GE_Zero = (Remainder >= 0 ? -1 : 0)
- SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Remainder,
- DAG.getConstant(0, VT),
+ ISD::SETUGE);
+ // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
+ SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
+ Num_S_Remainder,
DAG.getConstant(-1, VT),
DAG.getConstant(0, VT),
- ISD::SETGE);
+ ISD::SETUGE);
// Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
Remainder_GE_Zero);
@@ -345,10 +706,62 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
return DAG.getMergeValues(Ops, 2, DL);
}
+SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue S0 = Op.getOperand(0);
+ SDLoc DL(Op);
+ if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
+ return SDValue();
+
+ // f32 uint_to_fp i64
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
+ DAG.getConstant(0, MVT::i32));
+ SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
+ DAG.getConstant(1, MVT::i32));
+ SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
+ FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
+ DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
+ return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
+
+}
+
//===----------------------------------------------------------------------===//
// Helper functions
//===----------------------------------------------------------------------===//
+void AMDGPUTargetLowering::getOriginalFunctionArgs(
+ SelectionDAG &DAG,
+ const Function *F,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SmallVectorImpl<ISD::InputArg> &OrigIns) const {
+
+ for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
+ if (Ins[i].ArgVT == Ins[i].VT) {
+ OrigIns.push_back(Ins[i]);
+ continue;
+ }
+
+ EVT VT;
+ if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
+ // Vector has been split into scalars.
+ VT = Ins[i].ArgVT.getVectorElementType();
+ } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
+ Ins[i].ArgVT.getVectorElementType() !=
+ Ins[i].VT.getVectorElementType()) {
+ // Vector elements have been promoted
+ VT = Ins[i].ArgVT;
+ } else {
+ // Vector has been spilt into smaller vectors.
+ VT = Ins[i].VT;
+ }
+
+ ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
+ Ins[i].OrigArgIndex, Ins[i].PartOffset);
+ OrigIns.push_back(Arg);
+ }
+}
+
bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
return CFP->isExactlyValue(1.0);
@@ -410,5 +823,13 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(CONST_ADDRESS)
NODE_NAME_CASE(REGISTER_LOAD)
NODE_NAME_CASE(REGISTER_STORE)
+ NODE_NAME_CASE(LOAD_CONSTANT)
+ NODE_NAME_CASE(LOAD_INPUT)
+ NODE_NAME_CASE(SAMPLE)
+ NODE_NAME_CASE(SAMPLEB)
+ NODE_NAME_CASE(SAMPLED)
+ NODE_NAME_CASE(SAMPLEL)
+ NODE_NAME_CASE(STORE_MSKOR)
+ NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
}
}
diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h
index c2a79ea99959..2dfd3cf492a9 100644
--- a/lib/Target/R600/AMDGPUISelLowering.h
+++ b/lib/Target/R600/AMDGPUISelLowering.h
@@ -20,12 +20,25 @@
namespace llvm {
+class AMDGPUMachineFunction;
class MachineRegisterInfo;
class AMDGPUTargetLowering : public TargetLowering {
private:
+ void ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Args,
+ unsigned Start, unsigned Count) const;
+ SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ /// \brief Lower vector stores by merging the vector elements into an integer
+ /// of the same bitwidth.
+ SDValue MergeVectorStore(const SDValue &Op, SelectionDAG &DAG) const;
+ /// \brief Split a vector store into multiple scalar stores.
+ /// \returns The resulting chain.
SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
protected:
@@ -33,23 +46,43 @@ protected:
/// MachineFunction.
///
/// \returns a RegisterSDNode representing Reg.
- SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC,
- unsigned Reg, EVT VT) const;
-
+ virtual SDValue CreateLiveInRegister(SelectionDAG &DAG,
+ const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const;
+ SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
+ SelectionDAG &DAG) const;
+ /// \brief Split a vector load into multiple scalar loads.
+ SDValue SplitVectorLoad(const SDValue &Op, SelectionDAG &DAG) const;
+ SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
bool isHWTrueValue(SDValue Op) const;
bool isHWFalseValue(SDValue Op) const;
+ /// The SelectionDAGBuilder will automatically promote function arguments
+ /// with illegal types. However, this does not work for the AMDGPU targets
+ /// since the function arguments are stored in memory as these illegal types.
+ /// In order to handle this properly we need to get the origianl types sizes
+ /// from the LLVM IR Function and fixup the ISD:InputArg values before
+ /// passing them to AnalyzeFormalArguments()
+ void getOriginalFunctionArgs(SelectionDAG &DAG,
+ const Function *F,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SmallVectorImpl<ISD::InputArg> &OrigIns) const;
void AnalyzeFormalArguments(CCState &State,
const SmallVectorImpl<ISD::InputArg> &Ins) const;
public:
AMDGPUTargetLowering(TargetMachine &TM);
+ virtual bool isFAbsFree(EVT VT) const;
+ virtual bool isFNegFree(EVT VT) const;
+ virtual MVT getVectorIdxTy() const;
+ virtual bool isLoadBitCastBeneficial(EVT, EVT) const LLVM_OVERRIDE;
virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const;
+ SDLoc DL, SelectionDAG &DAG) const;
virtual SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
CLI.Callee.dump();
@@ -115,10 +148,10 @@ enum {
RET_FLAG,
BRANCH_COND,
// End AMDIL ISD Opcodes
- BITALIGN,
- BUFFER_STORE,
DWORDADDR,
FRACT,
+ COS_HW,
+ SIN_HW,
FMAX,
SMAX,
UMAX,
@@ -126,10 +159,21 @@ enum {
SMIN,
UMIN,
URECIP,
+ DOT4,
+ TEXTURE_FETCH,
EXPORT,
CONST_ADDRESS,
REGISTER_LOAD,
REGISTER_STORE,
+ LOAD_INPUT,
+ SAMPLE,
+ SAMPLEB,
+ SAMPLED,
+ SAMPLEL,
+ FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ STORE_MSKOR,
+ LOAD_CONSTANT,
+ TBUFFER_STORE_FORMAT,
LAST_AMDGPU_ISD_NUMBER
};
diff --git a/lib/Target/R600/AMDGPUIndirectAddressing.cpp b/lib/Target/R600/AMDGPUIndirectAddressing.cpp
deleted file mode 100644
index ed6c8ec55dd2..000000000000
--- a/lib/Target/R600/AMDGPUIndirectAddressing.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-//===-- AMDGPUIndirectAddressing.cpp - Indirect Adressing Support ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-///
-/// Instructions can use indirect addressing to index the register file as if it
-/// were memory. This pass lowers RegisterLoad and RegisterStore instructions
-/// to either a COPY or a MOV that uses indirect addressing.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-
-using namespace llvm;
-
-namespace {
-
-class AMDGPUIndirectAddressingPass : public MachineFunctionPass {
-
-private:
- static char ID;
- const AMDGPUInstrInfo *TII;
-
- bool regHasExplicitDef(MachineRegisterInfo &MRI, unsigned Reg) const;
-
-public:
- AMDGPUIndirectAddressingPass(TargetMachine &tm) :
- MachineFunctionPass(ID),
- TII(static_cast<const AMDGPUInstrInfo*>(tm.getInstrInfo()))
- { }
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- const char *getPassName() const { return "R600 Handle indirect addressing"; }
-
-};
-
-} // End anonymous namespace
-
-char AMDGPUIndirectAddressingPass::ID = 0;
-
-FunctionPass *llvm::createAMDGPUIndirectAddressingPass(TargetMachine &tm) {
- return new AMDGPUIndirectAddressingPass(tm);
-}
-
-bool AMDGPUIndirectAddressingPass::runOnMachineFunction(MachineFunction &MF) {
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- int IndirectBegin = TII->getIndirectIndexBegin(MF);
- int IndirectEnd = TII->getIndirectIndexEnd(MF);
-
- if (IndirectBegin == -1) {
- // No indirect addressing, we can skip this pass
- assert(IndirectEnd == -1);
- return false;
- }
-
- // The map keeps track of the indirect address that is represented by
- // each virtual register. The key is the register and the value is the
- // indirect address it uses.
- std::map<unsigned, unsigned> RegisterAddressMap;
-
- // First pass - Lower all of the RegisterStore instructions and track which
- // registers are live.
- for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
- BB != BB_E; ++BB) {
- // This map keeps track of the current live indirect registers.
- // The key is the address and the value is the register
- std::map<unsigned, unsigned> LiveAddressRegisterMap;
- MachineBasicBlock &MBB = *BB;
-
- for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I);
- I != MBB.end(); I = Next) {
- Next = llvm::next(I);
- MachineInstr &MI = *I;
-
- if (!TII->isRegisterStore(MI)) {
- continue;
- }
-
- // Lower RegisterStore
-
- unsigned RegIndex = MI.getOperand(2).getImm();
- unsigned Channel = MI.getOperand(3).getImm();
- unsigned Address = TII->calculateIndirectAddress(RegIndex, Channel);
- const TargetRegisterClass *IndirectStoreRegClass =
- TII->getIndirectAddrStoreRegClass(MI.getOperand(0).getReg());
-
- if (MI.getOperand(1).getReg() == AMDGPU::INDIRECT_BASE_ADDR) {
- // Direct register access.
- unsigned DstReg = MRI.createVirtualRegister(IndirectStoreRegClass);
-
- BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::COPY), DstReg)
- .addOperand(MI.getOperand(0));
-
- RegisterAddressMap[DstReg] = Address;
- LiveAddressRegisterMap[Address] = DstReg;
- } else {
- // Indirect register access.
- MachineInstrBuilder MOV = TII->buildIndirectWrite(BB, I,
- MI.getOperand(0).getReg(), // Value
- Address,
- MI.getOperand(1).getReg()); // Offset
- for (int i = IndirectBegin; i <= IndirectEnd; ++i) {
- unsigned Addr = TII->calculateIndirectAddress(i, Channel);
- unsigned DstReg = MRI.createVirtualRegister(IndirectStoreRegClass);
- MOV.addReg(DstReg, RegState::Define | RegState::Implicit);
- RegisterAddressMap[DstReg] = Addr;
- LiveAddressRegisterMap[Addr] = DstReg;
- }
- }
- MI.eraseFromParent();
- }
-
- // Update the live-ins of the succesor blocks
- for (MachineBasicBlock::succ_iterator Succ = MBB.succ_begin(),
- SuccEnd = MBB.succ_end();
- SuccEnd != Succ; ++Succ) {
- std::map<unsigned, unsigned>::const_iterator Key, KeyEnd;
- for (Key = LiveAddressRegisterMap.begin(),
- KeyEnd = LiveAddressRegisterMap.end(); KeyEnd != Key; ++Key) {
- (*Succ)->addLiveIn(Key->second);
- }
- }
- }
-
- // Second pass - Lower the RegisterLoad instructions
- for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
- BB != BB_E; ++BB) {
- // Key is the address and the value is the register
- std::map<unsigned, unsigned> LiveAddressRegisterMap;
- MachineBasicBlock &MBB = *BB;
-
- MachineBasicBlock::livein_iterator LI = MBB.livein_begin();
- while (LI != MBB.livein_end()) {
- std::vector<unsigned> PhiRegisters;
-
- // Make sure this live in is used for indirect addressing
- if (RegisterAddressMap.find(*LI) == RegisterAddressMap.end()) {
- ++LI;
- continue;
- }
-
- unsigned Address = RegisterAddressMap[*LI];
- LiveAddressRegisterMap[Address] = *LI;
- PhiRegisters.push_back(*LI);
-
- // Check if there are other live in registers which map to the same
- // indirect address.
- for (MachineBasicBlock::livein_iterator LJ = llvm::next(LI),
- LE = MBB.livein_end();
- LJ != LE; ++LJ) {
- unsigned Reg = *LJ;
- if (RegisterAddressMap.find(Reg) == RegisterAddressMap.end()) {
- continue;
- }
-
- if (RegisterAddressMap[Reg] == Address) {
- PhiRegisters.push_back(Reg);
- }
- }
-
- if (PhiRegisters.size() == 1) {
- // We don't need to insert a Phi instruction, so we can just add the
- // registers to the live list for the block.
- LiveAddressRegisterMap[Address] = *LI;
- MBB.removeLiveIn(*LI);
- } else {
- // We need to insert a PHI, because we have the same address being
- // written in multiple predecessor blocks.
- const TargetRegisterClass *PhiDstClass =
- TII->getIndirectAddrStoreRegClass(*(PhiRegisters.begin()));
- unsigned PhiDstReg = MRI.createVirtualRegister(PhiDstClass);
- MachineInstrBuilder Phi = BuildMI(MBB, MBB.begin(),
- MBB.findDebugLoc(MBB.begin()),
- TII->get(AMDGPU::PHI), PhiDstReg);
-
- for (std::vector<unsigned>::const_iterator RI = PhiRegisters.begin(),
- RE = PhiRegisters.end();
- RI != RE; ++RI) {
- unsigned Reg = *RI;
- MachineInstr *DefInst = MRI.getVRegDef(Reg);
- assert(DefInst);
- MachineBasicBlock *RegBlock = DefInst->getParent();
- Phi.addReg(Reg);
- Phi.addMBB(RegBlock);
- MBB.removeLiveIn(Reg);
- }
- RegisterAddressMap[PhiDstReg] = Address;
- LiveAddressRegisterMap[Address] = PhiDstReg;
- }
- LI = MBB.livein_begin();
- }
-
- for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I);
- I != MBB.end(); I = Next) {
- Next = llvm::next(I);
- MachineInstr &MI = *I;
-
- if (!TII->isRegisterLoad(MI)) {
- if (MI.getOpcode() == AMDGPU::PHI) {
- continue;
- }
- // Check for indirect register defs
- for (unsigned OpIdx = 0, NumOperands = MI.getNumOperands();
- OpIdx < NumOperands; ++OpIdx) {
- MachineOperand &MO = MI.getOperand(OpIdx);
- if (MO.isReg() && MO.isDef() &&
- RegisterAddressMap.find(MO.getReg()) != RegisterAddressMap.end()) {
- unsigned Reg = MO.getReg();
- unsigned LiveAddress = RegisterAddressMap[Reg];
- // Chain the live-ins
- if (LiveAddressRegisterMap.find(LiveAddress) !=
- RegisterAddressMap.end()) {
- MI.addOperand(MachineOperand::CreateReg(
- LiveAddressRegisterMap[LiveAddress],
- false, // isDef
- true, // isImp
- true)); // isKill
- }
- LiveAddressRegisterMap[LiveAddress] = Reg;
- }
- }
- continue;
- }
-
- const TargetRegisterClass *SuperIndirectRegClass =
- TII->getSuperIndirectRegClass();
- const TargetRegisterClass *IndirectLoadRegClass =
- TII->getIndirectAddrLoadRegClass();
- unsigned IndirectReg = MRI.createVirtualRegister(SuperIndirectRegClass);
-
- unsigned RegIndex = MI.getOperand(2).getImm();
- unsigned Channel = MI.getOperand(3).getImm();
- unsigned Address = TII->calculateIndirectAddress(RegIndex, Channel);
-
- if (MI.getOperand(1).getReg() == AMDGPU::INDIRECT_BASE_ADDR) {
- // Direct register access
- unsigned Reg = LiveAddressRegisterMap[Address];
- unsigned AddrReg = IndirectLoadRegClass->getRegister(Address);
-
- if (regHasExplicitDef(MRI, Reg)) {
- // If the register we are reading from has an explicit def, then that
- // means it was written via a direct register access (i.e. COPY
- // or other instruction that doesn't use indirect addressing). In
- // this case we know where the value has been stored, so we can just
- // issue a copy.
- BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::COPY),
- MI.getOperand(0).getReg())
- .addReg(Reg);
- } else {
- // If the register we are reading has an implicit def, then that
- // means it was written by an indirect register access (i.e. An
- // instruction that uses indirect addressing.
- BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::COPY),
- MI.getOperand(0).getReg())
- .addReg(AddrReg)
- .addReg(Reg, RegState::Implicit);
- }
- } else {
- // Indirect register access
-
- // Note on REQ_SEQUENCE instructons: You can't actually use the register
- // it defines unless you have an instruction that takes the defined
- // register class as an operand.
-
- MachineInstrBuilder Sequence = BuildMI(MBB, I, MBB.findDebugLoc(I),
- TII->get(AMDGPU::REG_SEQUENCE),
- IndirectReg);
- for (int i = IndirectBegin; i <= IndirectEnd; ++i) {
- unsigned Addr = TII->calculateIndirectAddress(i, Channel);
- if (LiveAddressRegisterMap.find(Addr) == LiveAddressRegisterMap.end()) {
- continue;
- }
- unsigned Reg = LiveAddressRegisterMap[Addr];
-
- // We only need to use REG_SEQUENCE for explicit defs, since the
- // register coalescer won't do anything with the implicit defs.
- if (!regHasExplicitDef(MRI, Reg)) {
- continue;
- }
-
- // Insert a REQ_SEQUENCE instruction to force the register allocator
- // to allocate the virtual register to the correct physical register.
- Sequence.addReg(LiveAddressRegisterMap[Addr]);
- Sequence.addImm(TII->getRegisterInfo().getIndirectSubReg(Addr));
- }
- MachineInstrBuilder Mov = TII->buildIndirectRead(BB, I,
- MI.getOperand(0).getReg(), // Value
- Address,
- MI.getOperand(1).getReg()); // Offset
-
-
-
- Mov.addReg(IndirectReg, RegState::Implicit | RegState::Kill);
- Mov.addReg(LiveAddressRegisterMap[Address], RegState::Implicit);
-
- }
- MI.eraseFromParent();
- }
- }
- return false;
-}
-
-bool AMDGPUIndirectAddressingPass::regHasExplicitDef(MachineRegisterInfo &MRI,
- unsigned Reg) const {
- MachineInstr *DefInstr = MRI.getVRegDef(Reg);
-
- if (!DefInstr) {
- return false;
- }
-
- if (DefInstr->getOpcode() == AMDGPU::PHI) {
- bool Explicit = false;
- for (MachineInstr::const_mop_iterator I = DefInstr->operands_begin(),
- E = DefInstr->operands_end();
- I != E; ++I) {
- const MachineOperand &MO = *I;
- if (!MO.isReg() || MO.isDef()) {
- continue;
- }
-
- Explicit = Explicit || regHasExplicitDef(MRI, MO.getReg());
- }
- return Explicit;
- }
-
- return DefInstr->getOperand(0).isReg() &&
- DefInstr->getOperand(0).getReg() == Reg;
-}
diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp
index 30f736c84c25..4f7084b9202f 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.cpp
+++ b/lib/Target/R600/AMDGPUInstrInfo.cpp
@@ -16,19 +16,23 @@
#include "AMDGPUInstrInfo.h"
#include "AMDGPURegisterInfo.h"
#include "AMDGPUTargetMachine.h"
-#include "AMDIL.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
+#define GET_INSTRINFO_NAMED_OPS
#define GET_INSTRMAP_INFO
#include "AMDGPUGenInstrInfo.inc"
using namespace llvm;
+
+// Pin the vtable to this file.
+void AMDGPUInstrInfo::anchor() {}
+
AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
- : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
+ : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
return RI;
@@ -99,27 +103,6 @@ bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
return false;
}
-MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
- MachineBasicBlock::iterator tmp = MBB->end();
- if (!MBB->size()) {
- return MBB->end();
- }
- while (--tmp) {
- if (tmp->getOpcode() == AMDGPU::ENDLOOP
- || tmp->getOpcode() == AMDGPU::ENDIF
- || tmp->getOpcode() == AMDGPU::ELSE) {
- if (tmp == MBB->begin()) {
- return tmp;
- } else {
- continue;
- }
- } else {
- return ++tmp;
- }
- }
- return MBB->end();
-}
-
void
AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
@@ -139,6 +122,55 @@ AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
assert(!"Not Implemented");
}
+bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+ int OffsetOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
+ // addr is a custom operand with multiple MI operands, and only the
+ // first MI operand is given a name.
+ int RegOpIdx = OffsetOpIdx + 1;
+ int ChanOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);
+
+ if (isRegisterLoad(*MI)) {
+ int DstOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
+ unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
+ unsigned Address = calculateIndirectAddress(RegIndex, Channel);
+ unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
+ if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
+ buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
+ getIndirectAddrRegClass()->getRegister(Address));
+ } else {
+ buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
+ Address, OffsetReg);
+ }
+ } else if (isRegisterStore(*MI)) {
+ int ValOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
+ unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
+ unsigned Address = calculateIndirectAddress(RegIndex, Channel);
+ unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
+ if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
+ buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
+ MI->getOperand(ValOpIdx).getReg());
+ } else {
+ buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
+ calculateIndirectAddress(RegIndex, Channel),
+ OffsetReg);
+ }
+ } else {
+ return false;
+ }
+
+ MBB->erase(MI);
+ return true;
+}
+
+
MachineInstr *
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
@@ -244,6 +276,57 @@ bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
}
+int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int Offset = -1;
+
+ if (MFI->getNumObjects() == 0) {
+ return -1;
+ }
+
+ if (MRI.livein_empty()) {
+ return 0;
+ }
+
+ const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
+ for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
+ LE = MRI.livein_end();
+ LI != LE; ++LI) {
+ unsigned Reg = LI->first;
+ if (TargetRegisterInfo::isVirtualRegister(Reg) ||
+ !IndirectRC->contains(Reg))
+ continue;
+
+ unsigned RegIndex;
+ unsigned RegEnd;
+ for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
+ ++RegIndex) {
+ if (IndirectRC->getRegister(RegIndex) == Reg)
+ break;
+ }
+ Offset = std::max(Offset, (int)RegIndex);
+ }
+
+ return Offset + 1;
+}
+
+int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
+ int Offset = 0;
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Variable sized objects are not supported
+ assert(!MFI->hasVarSizedObjects());
+
+ if (MFI->getNumObjects() == 0) {
+ return -1;
+ }
+
+ Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
+
+ return getIndirectIndexBegin(MF) + Offset;
+}
+
void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
DebugLoc DL) const {
@@ -265,3 +348,12 @@ void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
}
}
}
+
+int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
+ switch (Channels) {
+ default: return Opcode;
+ case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
+ case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
+ case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
+ }
+}
diff --git a/lib/Target/R600/AMDGPUInstrInfo.h b/lib/Target/R600/AMDGPUInstrInfo.h
index 3909e4e105ee..ce5b58c6923f 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.h
+++ b/lib/Target/R600/AMDGPUInstrInfo.h
@@ -23,6 +23,7 @@
#define GET_INSTRINFO_HEADER
#define GET_INSTRINFO_ENUM
+#define GET_INSTRINFO_OPERAND_ENUM
#include "AMDGPUGenInstrInfo.inc"
#define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT
@@ -42,6 +43,7 @@ private:
const AMDGPURegisterInfo RI;
bool getNextBranchInstr(MachineBasicBlock::iterator &iter,
MachineBasicBlock &MBB) const;
+ virtual void anchor();
protected:
TargetMachine &TM;
public:
@@ -86,6 +88,8 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+
protected:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
@@ -96,6 +100,14 @@ protected:
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const;
+ /// \returns the smallest register index that will be accessed by an indirect
+ /// read or write or -1 if indirect addressing is not used by this program.
+ virtual int getIndirectIndexBegin(const MachineFunction &MF) const;
+
+ /// \returns the largest register index that will be accessed by an indirect
+ /// read or write or -1 if indirect addressing is not used by this program.
+ virtual int getIndirectIndexEnd(const MachineFunction &MF) const;
+
public:
bool canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const;
@@ -138,19 +150,9 @@ public:
// Pure virtual funtions to be implemented by sub-classes.
//===---------------------------------------------------------------------===//
- virtual MachineInstr* getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const = 0;
virtual unsigned getIEQOpcode() const = 0;
virtual bool isMov(unsigned opcode) const = 0;
- /// \returns the smallest register index that will be accessed by an indirect
- /// read or write or -1 if indirect addressing is not used by this program.
- virtual int getIndirectIndexBegin(const MachineFunction &MF) const = 0;
-
- /// \returns the largest register index that will be accessed by an indirect
- /// read or write or -1 if indirect addressing is not used by this program.
- virtual int getIndirectIndexEnd(const MachineFunction &MF) const = 0;
-
/// \brief Calculate the "Indirect Address" for the given \p RegIndex and
/// \p Channel
///
@@ -161,14 +163,9 @@ public:
virtual unsigned calculateIndirectAddress(unsigned RegIndex,
unsigned Channel) const = 0;
- /// \returns The register class to be used for storing values to an
- /// "Indirect Address" .
- virtual const TargetRegisterClass *getIndirectAddrStoreRegClass(
- unsigned SourceReg) const = 0;
-
- /// \returns The register class to be used for loading values from
- /// an "Indirect Address" .
- virtual const TargetRegisterClass *getIndirectAddrLoadRegClass() const = 0;
+ /// \returns The register class to be used for loading and storing values
+ /// from an "Indirect Address" .
+ virtual const TargetRegisterClass *getIndirectAddrRegClass() const = 0;
/// \brief Build instruction(s) for an indirect register write.
///
@@ -186,18 +183,27 @@ public:
unsigned ValueReg, unsigned Address,
unsigned OffsetReg) const = 0;
- /// \returns the register class whose sub registers are the set of all
- /// possible registers that can be used for indirect addressing.
- virtual const TargetRegisterClass *getSuperIndirectRegClass() const = 0;
-
/// \brief Convert the AMDIL MachineInstr to a supported ISA
/// MachineInstr
virtual void convertToISA(MachineInstr & MI, MachineFunction &MF,
DebugLoc DL) const;
+ /// \brief Build a MOV instruction.
+ virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const = 0;
+
+ /// \brief Given a MIMG \p Opcode that writes all 4 channels, return the
+ /// equivalent opcode that writes \p Channels Channels.
+ int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const;
+
};
+namespace AMDGPU {
+ int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
+} // End namespace AMDGPU
+
} // End llvm namespace
#define AMDGPU_FLAG_REGISTER_LOAD (UINT64_C(1) << 63)
diff --git a/lib/Target/R600/AMDGPUInstrInfo.td b/lib/Target/R600/AMDGPUInstrInfo.td
index b66ae879dc20..fccede01ab9b 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.td
+++ b/lib/Target/R600/AMDGPUInstrInfo.td
@@ -23,12 +23,6 @@ def AMDGPUDTIntTernaryOp : SDTypeProfile<1, 3, [
// AMDGPU DAG Nodes
//
-// out = ((a << 32) | b) >> c)
-//
-// Can be used to optimize rtol:
-// rotl(a, b) = bitalign(a, a, 32 - b)
-def AMDGPUbitalign : SDNode<"AMDGPUISD::BITALIGN", AMDGPUDTIntTernaryOp>;
-
// This argument to this node is a dword address.
def AMDGPUdwordaddr : SDNode<"AMDGPUISD::DWORDADDR", SDTIntUnaryOp>;
@@ -71,8 +65,6 @@ def AMDGPUumin : SDNode<"AMDGPUISD::UMIN", SDTIntBinOp,
// e is rounding error
def AMDGPUurecip : SDNode<"AMDGPUISD::URECIP", SDTIntUnaryOp>;
-def fpow : SDNode<"ISD::FPOW", SDTFPBinOp>;
-
def AMDGPUregister_load : SDNode<"AMDGPUISD::REGISTER_LOAD",
SDTypeProfile<1, 2, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
[SDNPHasChain, SDNPMayLoad]>;
@@ -80,3 +72,17 @@ def AMDGPUregister_load : SDNode<"AMDGPUISD::REGISTER_LOAD",
def AMDGPUregister_store : SDNode<"AMDGPUISD::REGISTER_STORE",
SDTypeProfile<0, 3, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
[SDNPHasChain, SDNPMayStore]>;
+
+// MSKOR instructions are atomic memory instructions used mainly for storing
+// 8-bit and 16-bit values. The definition is:
+//
+// MSKOR(dst, mask, src) MEM[dst] = ((MEM[dst] & ~mask) | src)
+//
+// src0: vec4(src, 0, 0, mask)
+// src1: dst - rat offset (aka pointer) in dwords
+def AMDGPUstore_mskor : SDNode<"AMDGPUISD::STORE_MSKOR",
+ SDTypeProfile<0, 2, []>,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def AMDGPUround : SDNode<"ISD::FROUND",
+ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>>;
diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
index d2620b2877a5..3c5375d84ecf 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/R600/AMDGPUInstructions.td
@@ -35,46 +35,75 @@ class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
}
def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
+def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
-def COND_EQ : PatLeaf <
+//===----------------------------------------------------------------------===//
+// PatLeafs for floating-point comparisons
+//===----------------------------------------------------------------------===//
+
+def COND_OEQ : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOEQ: case ISD::SETUEQ:
- case ISD::SETEQ: return true;}}}]
+ [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
>;
-def COND_NE : PatLeaf <
+def COND_OGT : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETONE: case ISD::SETUNE:
- case ISD::SETNE: return true;}}}]
+ [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
>;
-def COND_GT : PatLeaf <
+
+def COND_OGE : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOGT: case ISD::SETUGT:
- case ISD::SETGT: return true;}}}]
+ [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
>;
-def COND_GE : PatLeaf <
+def COND_OLT : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOGE: case ISD::SETUGE:
- case ISD::SETGE: return true;}}}]
+ [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
>;
-def COND_LT : PatLeaf <
+def COND_OLE : PatLeaf <
+ (cond),
+ [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
+>;
+
+def COND_UNE : PatLeaf <
+ (cond),
+ [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
+>;
+
+def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
+def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
+
+//===----------------------------------------------------------------------===//
+// PatLeafs for unsigned comparisons
+//===----------------------------------------------------------------------===//
+
+def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
+def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
+def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
+def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
+
+//===----------------------------------------------------------------------===//
+// PatLeafs for signed comparisons
+//===----------------------------------------------------------------------===//
+
+def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
+def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
+def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
+def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
+
+//===----------------------------------------------------------------------===//
+// PatLeafs for integer equality
+//===----------------------------------------------------------------------===//
+
+def COND_EQ : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOLT: case ISD::SETULT:
- case ISD::SETLT: return true;}}}]
+ [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
>;
-def COND_LE : PatLeaf <
+def COND_NE : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOLE: case ISD::SETULE:
- case ISD::SETLE: return true;}}}]
+ [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
>;
def COND_NULL : PatLeaf <
@@ -86,15 +115,131 @@ def COND_NULL : PatLeaf <
// Load/Store Pattern Fragments
//===----------------------------------------------------------------------===//
-def zextloadi8_global : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{
+def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ LoadSDNode *L = cast<LoadSDNode>(N);
+ return L->getExtensionType() == ISD::ZEXTLOAD ||
+ L->getExtensionType() == ISD::EXTLOAD;
+}]>;
+
+def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
return isGlobalLoad(dyn_cast<LoadSDNode>(N));
}]>;
+def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def az_extloadi32_global : PatFrag<(ops node:$ptr),
+ (az_extloadi32 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi32_constant : PatFrag<(ops node:$ptr),
+ (az_extloadi32 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei8 node:$val, node:$ptr), [{
+ return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei16 node:$val, node:$ptr), [{
+ return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def local_store : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei8 node:$val, node:$ptr), [{
+ return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei16 node:$val, node:$ptr), [{
+ return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def atomic_load_add_local : PatFrag<(ops node:$ptr, node:$value),
+ (atomic_load_add node:$ptr, node:$value), [{
+ return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+def atomic_load_sub_local : PatFrag<(ops node:$ptr, node:$value),
+ (atomic_load_sub node:$ptr, node:$value), [{
+ return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+def mskor_global : PatFrag<(ops node:$val, node:$ptr),
+ (AMDGPUstore_mskor node:$val, node:$ptr), [{
+ return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
+}]>;
+
class Constants {
int TWO_PI = 0x40c90fdb;
int PI = 0x40490fdb;
int TWO_PI_INV = 0x3e22f983;
-int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
+int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
}
def CONST : Constants;
@@ -108,6 +253,9 @@ def FP_ONE : PatLeaf <
[{return N->isExactlyValue(1.0);}]
>;
+def U24 : ComplexPattern<i32, 1, "SelectU24", [], []>;
+def I24 : ComplexPattern<i32, 1, "SelectI24", [], []>;
+
let isCodeGenOnly = 1, isPseudo = 1 in {
let usesCustomInserter = 1 in {
@@ -137,6 +285,8 @@ class FNEG <RegisterClass rc> : AMDGPUShaderInst <
multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
ComplexPattern addrPat> {
+let UseNamedOperandTable = 1 in {
+
def RegisterLoad : AMDGPUShaderInst <
(outs dstClass:$dst),
(ins addrClass:$addr, i32imm:$chan),
@@ -155,6 +305,7 @@ multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
let isRegisterStore = 1;
}
}
+}
} // End isCodeGenOnly = 1, isPseudo = 1
@@ -186,61 +337,12 @@ class Insert_Element <ValueType elem_type, ValueType vec_type,
(INSERT_SUBREG $vec, $elem, sub_reg)
>;
-// Vector Build pattern
-class Vector1_Build <ValueType vecType, ValueType elemType,
- RegisterClass rc> : Pat <
- (vecType (build_vector elemType:$src)),
- (vecType (COPY_TO_REGCLASS $src, rc))
->;
-
-class Vector2_Build <ValueType vecType, ValueType elemType> : Pat <
- (vecType (build_vector elemType:$sub0, elemType:$sub1)),
- (INSERT_SUBREG (INSERT_SUBREG
- (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1)
->;
-
class Vector4_Build <ValueType vecType, ValueType elemType> : Pat <
(vecType (build_vector elemType:$x, elemType:$y, elemType:$z, elemType:$w)),
(INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
(vecType (IMPLICIT_DEF)), $x, sub0), $y, sub1), $z, sub2), $w, sub3)
>;
-class Vector8_Build <ValueType vecType, ValueType elemType> : Pat <
- (vecType (build_vector elemType:$sub0, elemType:$sub1,
- elemType:$sub2, elemType:$sub3,
- elemType:$sub4, elemType:$sub5,
- elemType:$sub6, elemType:$sub7)),
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1),
- $sub2, sub2), $sub3, sub3),
- $sub4, sub4), $sub5, sub5),
- $sub6, sub6), $sub7, sub7)
->;
-
-class Vector16_Build <ValueType vecType, ValueType elemType> : Pat <
- (vecType (build_vector elemType:$sub0, elemType:$sub1,
- elemType:$sub2, elemType:$sub3,
- elemType:$sub4, elemType:$sub5,
- elemType:$sub6, elemType:$sub7,
- elemType:$sub8, elemType:$sub9,
- elemType:$sub10, elemType:$sub11,
- elemType:$sub12, elemType:$sub13,
- elemType:$sub14, elemType:$sub15)),
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1),
- $sub2, sub2), $sub3, sub3),
- $sub4, sub4), $sub5, sub5),
- $sub6, sub6), $sub7, sub7),
- $sub8, sub8), $sub9, sub9),
- $sub10, sub10), $sub11, sub11),
- $sub12, sub12), $sub13, sub13),
- $sub14, sub14), $sub15, sub15)
->;
-
// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
// can handle COPY instructions.
// bitconvert pattern
@@ -295,6 +397,22 @@ class BFEPattern <Instruction BFE> : Pat <
(BFE $x, $y, $z)
>;
+// rotr pattern
+class ROTRPattern <Instruction BIT_ALIGN> : Pat <
+ (rotr i32:$src0, i32:$src1),
+ (BIT_ALIGN $src0, $src0, $src1)
+>;
+
+// 24-bit arithmetic patterns
+def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
+
+/*
+class UMUL24Pattern <Instruction UMUL24> : Pat <
+ (mul U24:$x, U24:$y),
+ (UMUL24 $x, $y)
+>;
+*/
+
include "R600Instructions.td"
include "SIInstrInfo.td"
diff --git a/lib/Target/R600/AMDGPUIntrinsics.td b/lib/Target/R600/AMDGPUIntrinsics.td
index eecb25b04f79..9f975bf9bf45 100644
--- a/lib/Target/R600/AMDGPUIntrinsics.td
+++ b/lib/Target/R600/AMDGPUIntrinsics.td
@@ -50,6 +50,8 @@ let TargetPrefix = "AMDGPU", isTarget = 1 in {
def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+ def int_AMDGPU_barrier_local : Intrinsic<[], [], []>;
}
let TargetPrefix = "TGSI", isTarget = 1 in {
diff --git a/lib/Target/R600/AMDGPUMCInstLower.cpp b/lib/Target/R600/AMDGPUMCInstLower.cpp
index 1dc1c657dfe5..0ed598ee8c06 100644
--- a/lib/Target/R600/AMDGPUMCInstLower.cpp
+++ b/lib/Target/R600/AMDGPUMCInstLower.cpp
@@ -15,14 +15,19 @@
#include "AMDGPUMCInstLower.h"
#include "AMDGPUAsmPrinter.h"
+#include "InstPrinter/AMDGPUInstPrinter.h"
#include "R600InstrInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/Constants.h"
+#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
using namespace llvm;
@@ -69,15 +74,45 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MachineBasicBlock::const_instr_iterator I = MI;
++I;
while (I != MBB->end() && I->isInsideBundle()) {
- MCInst MCBundleInst;
- const MachineInstr *BundledInst = I;
- MCInstLowering.lower(BundledInst, MCBundleInst);
- OutStreamer.EmitInstruction(MCBundleInst);
+ EmitInstruction(I);
++I;
}
} else {
MCInst TmpInst;
MCInstLowering.lower(MI, TmpInst);
OutStreamer.EmitInstruction(TmpInst);
+
+ if (DisasmEnabled) {
+ // Disassemble instruction/operands to text.
+ DisasmLines.resize(DisasmLines.size() + 1);
+ std::string &DisasmLine = DisasmLines.back();
+ raw_string_ostream DisasmStream(DisasmLine);
+
+ AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), *TM.getInstrInfo(),
+ *TM.getRegisterInfo());
+ InstPrinter.printInst(&TmpInst, DisasmStream, StringRef());
+
+ // Disassemble instruction/operands to hex representation.
+ SmallVector<MCFixup, 4> Fixups;
+ SmallVector<char, 16> CodeBytes;
+ raw_svector_ostream CodeStream(CodeBytes);
+
+ MCObjectStreamer &ObjStreamer = (MCObjectStreamer &)OutStreamer;
+ MCCodeEmitter &InstEmitter = ObjStreamer.getAssembler().getEmitter();
+ InstEmitter.EncodeInstruction(TmpInst, CodeStream, Fixups);
+ CodeStream.flush();
+
+ HexLines.resize(HexLines.size() + 1);
+ std::string &HexLine = HexLines.back();
+ raw_string_ostream HexStream(HexLine);
+
+ for (size_t i = 0; i < CodeBytes.size(); i += 4) {
+ unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i];
+ HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord);
+ }
+
+ DisasmStream.flush();
+ DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size());
+ }
}
}
diff --git a/lib/Target/R600/AMDGPUMachineFunction.cpp b/lib/Target/R600/AMDGPUMachineFunction.cpp
index 046102540299..14171f46020a 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.cpp
+++ b/lib/Target/R600/AMDGPUMachineFunction.cpp
@@ -2,14 +2,17 @@
#include "AMDGPU.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
+using namespace llvm;
-namespace llvm {
+static const char *const ShaderTypeAttribute = "ShaderType";
-const char *AMDGPUMachineFunction::ShaderTypeAttribute = "ShaderType";
+// Pin the vtable to this file.
+void AMDGPUMachineFunction::anchor() {}
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
MachineFunctionInfo() {
ShaderType = ShaderType::COMPUTE;
+ LDSSize = 0;
AttributeSet Set = MF.getFunction()->getAttributes();
Attribute A = Set.getAttribute(AttributeSet::FunctionIndex,
ShaderTypeAttribute);
@@ -20,5 +23,3 @@ AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
llvm_unreachable("Can't parse shader type!");
}
}
-
-}
diff --git a/lib/Target/R600/AMDGPUMachineFunction.h b/lib/Target/R600/AMDGPUMachineFunction.h
index 21c8c51dae45..fea0b39e91e5 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.h
+++ b/lib/Target/R600/AMDGPUMachineFunction.h
@@ -14,15 +14,20 @@
#define AMDGPUMACHINEFUNCTION_H
#include "llvm/CodeGen/MachineFunction.h"
+#include <map>
namespace llvm {
class AMDGPUMachineFunction : public MachineFunctionInfo {
-private:
- static const char *ShaderTypeAttribute;
+ virtual void anchor();
public:
AMDGPUMachineFunction(const MachineFunction &MF);
unsigned ShaderType;
+ /// A map to keep track of local memory objects and their offsets within
+ /// the local memory space.
+ std::map<const GlobalValue *, unsigned> LocalMemoryObjects;
+ /// Number of bytes in the LDS that are being used.
+ unsigned LDSSize;
};
}
diff --git a/lib/Target/R600/AMDGPURegisterInfo.cpp b/lib/Target/R600/AMDGPURegisterInfo.cpp
index fe994d2d05a1..47617a72990d 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.cpp
+++ b/lib/Target/R600/AMDGPURegisterInfo.cpp
@@ -17,11 +17,9 @@
using namespace llvm;
-AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm,
- const TargetInstrInfo &tii)
+AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm)
: AMDGPUGenRegisterInfo(0),
- TM(tm),
- TII(tii)
+ TM(tm)
{ }
//===----------------------------------------------------------------------===//
@@ -48,27 +46,21 @@ unsigned AMDGPURegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return 0;
}
+unsigned AMDGPURegisterInfo::getSubRegFromChannel(unsigned Channel) const {
+ static const unsigned SubRegs[] = {
+ AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4,
+ AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9,
+ AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14,
+ AMDGPU::sub15
+ };
+
+ assert (Channel < array_lengthof(SubRegs));
+ return SubRegs[Channel];
+}
+
unsigned AMDGPURegisterInfo::getIndirectSubReg(unsigned IndirectIndex) const {
- switch(IndirectIndex) {
- case 0: return AMDGPU::sub0;
- case 1: return AMDGPU::sub1;
- case 2: return AMDGPU::sub2;
- case 3: return AMDGPU::sub3;
- case 4: return AMDGPU::sub4;
- case 5: return AMDGPU::sub5;
- case 6: return AMDGPU::sub6;
- case 7: return AMDGPU::sub7;
- case 8: return AMDGPU::sub8;
- case 9: return AMDGPU::sub9;
- case 10: return AMDGPU::sub10;
- case 11: return AMDGPU::sub11;
- case 12: return AMDGPU::sub12;
- case 13: return AMDGPU::sub13;
- case 14: return AMDGPU::sub14;
- case 15: return AMDGPU::sub15;
- default: llvm_unreachable("indirect index out of range");
- }
+ return getSubRegFromChannel(IndirectIndex);
}
#define GET_REGINFO_TARGET_DESC
diff --git a/lib/Target/R600/AMDGPURegisterInfo.h b/lib/Target/R600/AMDGPURegisterInfo.h
index 1fc88e7455b9..688e1a02c124 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.h
+++ b/lib/Target/R600/AMDGPURegisterInfo.h
@@ -30,10 +30,9 @@ class TargetInstrInfo;
struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
TargetMachine &TM;
- const TargetInstrInfo &TII;
static const uint16_t CalleeSavedReg;
- AMDGPURegisterInfo(TargetMachine &tm, const TargetInstrInfo &tii);
+ AMDGPURegisterInfo(TargetMachine &tm);
virtual BitVector getReservedRegs(const MachineFunction &MF) const {
assert(!"Unimplemented"); return BitVector();
@@ -51,6 +50,14 @@ struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
assert(!"Unimplemented"); return NULL;
}
+ virtual unsigned getHWRegIndex(unsigned Reg) const {
+ assert(!"Unimplemented"); return 0;
+ }
+
+ /// \returns the sub reg enum value for the given \p Channel
+ /// (e.g. getSubRegFromChannel(0) -> AMDGPU::sub0)
+ unsigned getSubRegFromChannel(unsigned Channel) const;
+
const uint16_t* getCalleeSavedRegs(const MachineFunction *MF) const;
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,
diff --git a/lib/Target/R600/AMDGPURegisterInfo.td b/lib/Target/R600/AMDGPURegisterInfo.td
index b5aca0347fb0..835a1464395c 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.td
+++ b/lib/Target/R600/AMDGPURegisterInfo.td
@@ -14,7 +14,8 @@
let Namespace = "AMDGPU" in {
foreach Index = 0-15 in {
- def sub#Index : SubRegIndex;
+ // Indices are used in a variety of ways here, so don't set a size/offset.
+ def sub#Index : SubRegIndex<-1, -1>;
}
def INDIRECT_BASE_ADDR : Register <"INDIRECT_BASE_ADDR">;
diff --git a/lib/Target/R600/AMDGPUStructurizeCFG.cpp b/lib/Target/R600/AMDGPUStructurizeCFG.cpp
deleted file mode 100644
index dea43b874c6f..000000000000
--- a/lib/Target/R600/AMDGPUStructurizeCFG.cpp
+++ /dev/null
@@ -1,896 +0,0 @@
-//===-- AMDGPUStructurizeCFG.cpp - ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// The pass implemented in this file transforms the programs control flow
-/// graph into a form that's suitable for code generation on hardware that
-/// implements control flow by execution masking. This currently includes all
-/// AMD GPUs but may as well be useful for other types of hardware.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "llvm/ADT/SCCIterator.h"
-#include "llvm/ADT/MapVector.h"
-#include "llvm/Analysis/RegionInfo.h"
-#include "llvm/Analysis/RegionIterator.h"
-#include "llvm/Analysis/RegionPass.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Support/PatternMatch.h"
-
-using namespace llvm;
-using namespace llvm::PatternMatch;
-
-namespace {
-
-// Definition of the complex types used in this pass.
-
-typedef std::pair<BasicBlock *, Value *> BBValuePair;
-
-typedef SmallVector<RegionNode*, 8> RNVector;
-typedef SmallVector<BasicBlock*, 8> BBVector;
-typedef SmallVector<BranchInst*, 8> BranchVector;
-typedef SmallVector<BBValuePair, 2> BBValueVector;
-
-typedef SmallPtrSet<BasicBlock *, 8> BBSet;
-
-typedef MapVector<PHINode *, BBValueVector> PhiMap;
-typedef MapVector<BasicBlock *, BBVector> BB2BBVecMap;
-
-typedef DenseMap<DomTreeNode *, unsigned> DTN2UnsignedMap;
-typedef DenseMap<BasicBlock *, PhiMap> BBPhiMap;
-typedef DenseMap<BasicBlock *, Value *> BBPredicates;
-typedef DenseMap<BasicBlock *, BBPredicates> PredMap;
-typedef DenseMap<BasicBlock *, BasicBlock*> BB2BBMap;
-
-// The name for newly created blocks.
-
-static const char *FlowBlockName = "Flow";
-
-/// @brief Find the nearest common dominator for multiple BasicBlocks
-///
-/// Helper class for AMDGPUStructurizeCFG
-/// TODO: Maybe move into common code
-class NearestCommonDominator {
-
- DominatorTree *DT;
-
- DTN2UnsignedMap IndexMap;
-
- BasicBlock *Result;
- unsigned ResultIndex;
- bool ExplicitMentioned;
-
-public:
- /// \brief Start a new query
- NearestCommonDominator(DominatorTree *DomTree) {
- DT = DomTree;
- Result = 0;
- }
-
- /// \brief Add BB to the resulting dominator
- void addBlock(BasicBlock *BB, bool Remember = true) {
-
- DomTreeNode *Node = DT->getNode(BB);
-
- if (Result == 0) {
- unsigned Numbering = 0;
- for (;Node;Node = Node->getIDom())
- IndexMap[Node] = ++Numbering;
- Result = BB;
- ResultIndex = 1;
- ExplicitMentioned = Remember;
- return;
- }
-
- for (;Node;Node = Node->getIDom())
- if (IndexMap.count(Node))
- break;
- else
- IndexMap[Node] = 0;
-
- assert(Node && "Dominator tree invalid!");
-
- unsigned Numbering = IndexMap[Node];
- if (Numbering > ResultIndex) {
- Result = Node->getBlock();
- ResultIndex = Numbering;
- ExplicitMentioned = Remember && (Result == BB);
- } else if (Numbering == ResultIndex) {
- ExplicitMentioned |= Remember;
- }
- }
-
- /// \brief Is "Result" one of the BBs added with "Remember" = True?
- bool wasResultExplicitMentioned() {
- return ExplicitMentioned;
- }
-
- /// \brief Get the query result
- BasicBlock *getResult() {
- return Result;
- }
-};
-
-/// @brief Transforms the control flow graph on one single entry/exit region
-/// at a time.
-///
-/// After the transform all "If"/"Then"/"Else" style control flow looks like
-/// this:
-///
-/// \verbatim
-/// 1
-/// ||
-/// | |
-/// 2 |
-/// | /
-/// |/
-/// 3
-/// || Where:
-/// | | 1 = "If" block, calculates the condition
-/// 4 | 2 = "Then" subregion, runs if the condition is true
-/// | / 3 = "Flow" blocks, newly inserted flow blocks, rejoins the flow
-/// |/ 4 = "Else" optional subregion, runs if the condition is false
-/// 5 5 = "End" block, also rejoins the control flow
-/// \endverbatim
-///
-/// Control flow is expressed as a branch where the true exit goes into the
-/// "Then"/"Else" region, while the false exit skips the region
-/// The condition for the optional "Else" region is expressed as a PHI node.
-/// The incomming values of the PHI node are true for the "If" edge and false
-/// for the "Then" edge.
-///
-/// Additionally to that even complicated loops look like this:
-///
-/// \verbatim
-/// 1
-/// ||
-/// | |
-/// 2 ^ Where:
-/// | / 1 = "Entry" block
-/// |/ 2 = "Loop" optional subregion, with all exits at "Flow" block
-/// 3 3 = "Flow" block, with back edge to entry block
-/// |
-/// \endverbatim
-///
-/// The back edge of the "Flow" block is always on the false side of the branch
-/// while the true side continues the general flow. So the loop condition
-/// consist of a network of PHI nodes where the true incoming values expresses
-/// breaks and the false values expresses continue states.
-class AMDGPUStructurizeCFG : public RegionPass {
-
- static char ID;
-
- Type *Boolean;
- ConstantInt *BoolTrue;
- ConstantInt *BoolFalse;
- UndefValue *BoolUndef;
-
- Function *Func;
- Region *ParentRegion;
-
- DominatorTree *DT;
-
- RNVector Order;
- BBSet Visited;
-
- BBPhiMap DeletedPhis;
- BB2BBVecMap AddedPhis;
-
- PredMap Predicates;
- BranchVector Conditions;
-
- BB2BBMap Loops;
- PredMap LoopPreds;
- BranchVector LoopConds;
-
- RegionNode *PrevNode;
-
- void orderNodes();
-
- void analyzeLoops(RegionNode *N);
-
- Value *invert(Value *Condition);
-
- Value *buildCondition(BranchInst *Term, unsigned Idx, bool Invert);
-
- void gatherPredicates(RegionNode *N);
-
- void collectInfos();
-
- void insertConditions(bool Loops);
-
- void delPhiValues(BasicBlock *From, BasicBlock *To);
-
- void addPhiValues(BasicBlock *From, BasicBlock *To);
-
- void setPhiValues();
-
- void killTerminator(BasicBlock *BB);
-
- void changeExit(RegionNode *Node, BasicBlock *NewExit,
- bool IncludeDominator);
-
- BasicBlock *getNextFlow(BasicBlock *Dominator);
-
- BasicBlock *needPrefix(bool NeedEmpty);
-
- BasicBlock *needPostfix(BasicBlock *Flow, bool ExitUseAllowed);
-
- void setPrevNode(BasicBlock *BB);
-
- bool dominatesPredicates(BasicBlock *BB, RegionNode *Node);
-
- bool isPredictableTrue(RegionNode *Node);
-
- void wireFlow(bool ExitUseAllowed, BasicBlock *LoopEnd);
-
- void handleLoops(bool ExitUseAllowed, BasicBlock *LoopEnd);
-
- void createFlow();
-
- void rebuildSSA();
-
-public:
- AMDGPUStructurizeCFG():
- RegionPass(ID) {
-
- initializeRegionInfoPass(*PassRegistry::getPassRegistry());
- }
-
- using Pass::doInitialization;
- virtual bool doInitialization(Region *R, RGPassManager &RGM);
-
- virtual bool runOnRegion(Region *R, RGPassManager &RGM);
-
- virtual const char *getPassName() const {
- return "AMDGPU simplify control flow";
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
-
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
- RegionPass::getAnalysisUsage(AU);
- }
-
-};
-
-} // end anonymous namespace
-
-char AMDGPUStructurizeCFG::ID = 0;
-
-/// \brief Initialize the types and constants used in the pass
-bool AMDGPUStructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) {
- LLVMContext &Context = R->getEntry()->getContext();
-
- Boolean = Type::getInt1Ty(Context);
- BoolTrue = ConstantInt::getTrue(Context);
- BoolFalse = ConstantInt::getFalse(Context);
- BoolUndef = UndefValue::get(Boolean);
-
- return false;
-}
-
-/// \brief Build up the general order of nodes
-void AMDGPUStructurizeCFG::orderNodes() {
- scc_iterator<Region *> I = scc_begin(ParentRegion),
- E = scc_end(ParentRegion);
- for (Order.clear(); I != E; ++I) {
- std::vector<RegionNode *> &Nodes = *I;
- Order.append(Nodes.begin(), Nodes.end());
- }
-}
-
-/// \brief Determine the end of the loops
-void AMDGPUStructurizeCFG::analyzeLoops(RegionNode *N) {
-
- if (N->isSubRegion()) {
- // Test for exit as back edge
- BasicBlock *Exit = N->getNodeAs<Region>()->getExit();
- if (Visited.count(Exit))
- Loops[Exit] = N->getEntry();
-
- } else {
- // Test for sucessors as back edge
- BasicBlock *BB = N->getNodeAs<BasicBlock>();
- BranchInst *Term = cast<BranchInst>(BB->getTerminator());
-
- for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
- BasicBlock *Succ = Term->getSuccessor(i);
-
- if (Visited.count(Succ))
- Loops[Succ] = BB;
- }
- }
-}
-
-/// \brief Invert the given condition
-Value *AMDGPUStructurizeCFG::invert(Value *Condition) {
-
- // First: Check if it's a constant
- if (Condition == BoolTrue)
- return BoolFalse;
-
- if (Condition == BoolFalse)
- return BoolTrue;
-
- if (Condition == BoolUndef)
- return BoolUndef;
-
- // Second: If the condition is already inverted, return the original value
- if (match(Condition, m_Not(m_Value(Condition))))
- return Condition;
-
- // Third: Check all the users for an invert
- BasicBlock *Parent = cast<Instruction>(Condition)->getParent();
- for (Value::use_iterator I = Condition->use_begin(),
- E = Condition->use_end(); I != E; ++I) {
-
- Instruction *User = dyn_cast<Instruction>(*I);
- if (!User || User->getParent() != Parent)
- continue;
-
- if (match(*I, m_Not(m_Specific(Condition))))
- return *I;
- }
-
- // Last option: Create a new instruction
- return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator());
-}
-
-/// \brief Build the condition for one edge
-Value *AMDGPUStructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx,
- bool Invert) {
- Value *Cond = Invert ? BoolFalse : BoolTrue;
- if (Term->isConditional()) {
- Cond = Term->getCondition();
-
- if (Idx != Invert)
- Cond = invert(Cond);
- }
- return Cond;
-}
-
-/// \brief Analyze the predecessors of each block and build up predicates
-void AMDGPUStructurizeCFG::gatherPredicates(RegionNode *N) {
-
- RegionInfo *RI = ParentRegion->getRegionInfo();
- BasicBlock *BB = N->getEntry();
- BBPredicates &Pred = Predicates[BB];
- BBPredicates &LPred = LoopPreds[BB];
-
- for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- PI != PE; ++PI) {
-
- // Ignore it if it's a branch from outside into our region entry
- if (!ParentRegion->contains(*PI))
- continue;
-
- Region *R = RI->getRegionFor(*PI);
- if (R == ParentRegion) {
-
- // It's a top level block in our region
- BranchInst *Term = cast<BranchInst>((*PI)->getTerminator());
- for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
- BasicBlock *Succ = Term->getSuccessor(i);
- if (Succ != BB)
- continue;
-
- if (Visited.count(*PI)) {
- // Normal forward edge
- if (Term->isConditional()) {
- // Try to treat it like an ELSE block
- BasicBlock *Other = Term->getSuccessor(!i);
- if (Visited.count(Other) && !Loops.count(Other) &&
- !Pred.count(Other) && !Pred.count(*PI)) {
-
- Pred[Other] = BoolFalse;
- Pred[*PI] = BoolTrue;
- continue;
- }
- }
- Pred[*PI] = buildCondition(Term, i, false);
-
- } else {
- // Back edge
- LPred[*PI] = buildCondition(Term, i, true);
- }
- }
-
- } else {
-
- // It's an exit from a sub region
- while(R->getParent() != ParentRegion)
- R = R->getParent();
-
- // Edge from inside a subregion to its entry, ignore it
- if (R == N)
- continue;
-
- BasicBlock *Entry = R->getEntry();
- if (Visited.count(Entry))
- Pred[Entry] = BoolTrue;
- else
- LPred[Entry] = BoolFalse;
- }
- }
-}
-
-/// \brief Collect various loop and predicate infos
-void AMDGPUStructurizeCFG::collectInfos() {
-
- // Reset predicate
- Predicates.clear();
-
- // and loop infos
- Loops.clear();
- LoopPreds.clear();
-
- // Reset the visited nodes
- Visited.clear();
-
- for (RNVector::reverse_iterator OI = Order.rbegin(), OE = Order.rend();
- OI != OE; ++OI) {
-
- // Analyze all the conditions leading to a node
- gatherPredicates(*OI);
-
- // Remember that we've seen this node
- Visited.insert((*OI)->getEntry());
-
- // Find the last back edges
- analyzeLoops(*OI);
- }
-}
-
-/// \brief Insert the missing branch conditions
-void AMDGPUStructurizeCFG::insertConditions(bool Loops) {
- BranchVector &Conds = Loops ? LoopConds : Conditions;
- Value *Default = Loops ? BoolTrue : BoolFalse;
- SSAUpdater PhiInserter;
-
- for (BranchVector::iterator I = Conds.begin(),
- E = Conds.end(); I != E; ++I) {
-
- BranchInst *Term = *I;
- assert(Term->isConditional());
-
- BasicBlock *Parent = Term->getParent();
- BasicBlock *SuccTrue = Term->getSuccessor(0);
- BasicBlock *SuccFalse = Term->getSuccessor(1);
-
- PhiInserter.Initialize(Boolean, "");
- PhiInserter.AddAvailableValue(&Func->getEntryBlock(), Default);
- PhiInserter.AddAvailableValue(Loops ? SuccFalse : Parent, Default);
-
- BBPredicates &Preds = Loops ? LoopPreds[SuccFalse] : Predicates[SuccTrue];
-
- NearestCommonDominator Dominator(DT);
- Dominator.addBlock(Parent, false);
-
- Value *ParentValue = 0;
- for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end();
- PI != PE; ++PI) {
-
- if (PI->first == Parent) {
- ParentValue = PI->second;
- break;
- }
- PhiInserter.AddAvailableValue(PI->first, PI->second);
- Dominator.addBlock(PI->first);
- }
-
- if (ParentValue) {
- Term->setCondition(ParentValue);
- } else {
- if (!Dominator.wasResultExplicitMentioned())
- PhiInserter.AddAvailableValue(Dominator.getResult(), Default);
-
- Term->setCondition(PhiInserter.GetValueInMiddleOfBlock(Parent));
- }
- }
-}
-
-/// \brief Remove all PHI values coming from "From" into "To" and remember
-/// them in DeletedPhis
-void AMDGPUStructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) {
- PhiMap &Map = DeletedPhis[To];
- for (BasicBlock::iterator I = To->begin(), E = To->end();
- I != E && isa<PHINode>(*I);) {
-
- PHINode &Phi = cast<PHINode>(*I++);
- while (Phi.getBasicBlockIndex(From) != -1) {
- Value *Deleted = Phi.removeIncomingValue(From, false);
- Map[&Phi].push_back(std::make_pair(From, Deleted));
- }
- }
-}
-
-/// \brief Add a dummy PHI value as soon as we knew the new predecessor
-void AMDGPUStructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) {
- for (BasicBlock::iterator I = To->begin(), E = To->end();
- I != E && isa<PHINode>(*I);) {
-
- PHINode &Phi = cast<PHINode>(*I++);
- Value *Undef = UndefValue::get(Phi.getType());
- Phi.addIncoming(Undef, From);
- }
- AddedPhis[To].push_back(From);
-}
-
-/// \brief Add the real PHI value as soon as everything is set up
-void AMDGPUStructurizeCFG::setPhiValues() {
-
- SSAUpdater Updater;
- for (BB2BBVecMap::iterator AI = AddedPhis.begin(), AE = AddedPhis.end();
- AI != AE; ++AI) {
-
- BasicBlock *To = AI->first;
- BBVector &From = AI->second;
-
- if (!DeletedPhis.count(To))
- continue;
-
- PhiMap &Map = DeletedPhis[To];
- for (PhiMap::iterator PI = Map.begin(), PE = Map.end();
- PI != PE; ++PI) {
-
- PHINode *Phi = PI->first;
- Value *Undef = UndefValue::get(Phi->getType());
- Updater.Initialize(Phi->getType(), "");
- Updater.AddAvailableValue(&Func->getEntryBlock(), Undef);
- Updater.AddAvailableValue(To, Undef);
-
- NearestCommonDominator Dominator(DT);
- Dominator.addBlock(To, false);
- for (BBValueVector::iterator VI = PI->second.begin(),
- VE = PI->second.end(); VI != VE; ++VI) {
-
- Updater.AddAvailableValue(VI->first, VI->second);
- Dominator.addBlock(VI->first);
- }
-
- if (!Dominator.wasResultExplicitMentioned())
- Updater.AddAvailableValue(Dominator.getResult(), Undef);
-
- for (BBVector::iterator FI = From.begin(), FE = From.end();
- FI != FE; ++FI) {
-
- int Idx = Phi->getBasicBlockIndex(*FI);
- assert(Idx != -1);
- Phi->setIncomingValue(Idx, Updater.GetValueAtEndOfBlock(*FI));
- }
- }
-
- DeletedPhis.erase(To);
- }
- assert(DeletedPhis.empty());
-}
-
-/// \brief Remove phi values from all successors and then remove the terminator.
-void AMDGPUStructurizeCFG::killTerminator(BasicBlock *BB) {
- TerminatorInst *Term = BB->getTerminator();
- if (!Term)
- return;
-
- for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB);
- SI != SE; ++SI) {
-
- delPhiValues(BB, *SI);
- }
-
- Term->eraseFromParent();
-}
-
-/// \brief Let node exit(s) point to NewExit
-void AMDGPUStructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit,
- bool IncludeDominator) {
-
- if (Node->isSubRegion()) {
- Region *SubRegion = Node->getNodeAs<Region>();
- BasicBlock *OldExit = SubRegion->getExit();
- BasicBlock *Dominator = 0;
-
- // Find all the edges from the sub region to the exit
- for (pred_iterator I = pred_begin(OldExit), E = pred_end(OldExit);
- I != E;) {
-
- BasicBlock *BB = *I++;
- if (!SubRegion->contains(BB))
- continue;
-
- // Modify the edges to point to the new exit
- delPhiValues(BB, OldExit);
- BB->getTerminator()->replaceUsesOfWith(OldExit, NewExit);
- addPhiValues(BB, NewExit);
-
- // Find the new dominator (if requested)
- if (IncludeDominator) {
- if (!Dominator)
- Dominator = BB;
- else
- Dominator = DT->findNearestCommonDominator(Dominator, BB);
- }
- }
-
- // Change the dominator (if requested)
- if (Dominator)
- DT->changeImmediateDominator(NewExit, Dominator);
-
- // Update the region info
- SubRegion->replaceExit(NewExit);
-
- } else {
- BasicBlock *BB = Node->getNodeAs<BasicBlock>();
- killTerminator(BB);
- BranchInst::Create(NewExit, BB);
- addPhiValues(BB, NewExit);
- if (IncludeDominator)
- DT->changeImmediateDominator(NewExit, BB);
- }
-}
-
-/// \brief Create a new flow node and update dominator tree and region info
-BasicBlock *AMDGPUStructurizeCFG::getNextFlow(BasicBlock *Dominator) {
- LLVMContext &Context = Func->getContext();
- BasicBlock *Insert = Order.empty() ? ParentRegion->getExit() :
- Order.back()->getEntry();
- BasicBlock *Flow = BasicBlock::Create(Context, FlowBlockName,
- Func, Insert);
- DT->addNewBlock(Flow, Dominator);
- ParentRegion->getRegionInfo()->setRegionFor(Flow, ParentRegion);
- return Flow;
-}
-
-/// \brief Create a new or reuse the previous node as flow node
-BasicBlock *AMDGPUStructurizeCFG::needPrefix(bool NeedEmpty) {
-
- BasicBlock *Entry = PrevNode->getEntry();
-
- if (!PrevNode->isSubRegion()) {
- killTerminator(Entry);
- if (!NeedEmpty || Entry->getFirstInsertionPt() == Entry->end())
- return Entry;
-
- }
-
- // create a new flow node
- BasicBlock *Flow = getNextFlow(Entry);
-
- // and wire it up
- changeExit(PrevNode, Flow, true);
- PrevNode = ParentRegion->getBBNode(Flow);
- return Flow;
-}
-
-/// \brief Returns the region exit if possible, otherwise just a new flow node
-BasicBlock *AMDGPUStructurizeCFG::needPostfix(BasicBlock *Flow,
- bool ExitUseAllowed) {
-
- if (Order.empty() && ExitUseAllowed) {
- BasicBlock *Exit = ParentRegion->getExit();
- DT->changeImmediateDominator(Exit, Flow);
- addPhiValues(Flow, Exit);
- return Exit;
- }
- return getNextFlow(Flow);
-}
-
-/// \brief Set the previous node
-void AMDGPUStructurizeCFG::setPrevNode(BasicBlock *BB) {
- PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB) : 0;
-}
-
-/// \brief Does BB dominate all the predicates of Node ?
-bool AMDGPUStructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) {
- BBPredicates &Preds = Predicates[Node->getEntry()];
- for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end();
- PI != PE; ++PI) {
-
- if (!DT->dominates(BB, PI->first))
- return false;
- }
- return true;
-}
-
-/// \brief Can we predict that this node will always be called?
-bool AMDGPUStructurizeCFG::isPredictableTrue(RegionNode *Node) {
-
- BBPredicates &Preds = Predicates[Node->getEntry()];
- bool Dominated = false;
-
- // Regionentry is always true
- if (PrevNode == 0)
- return true;
-
- for (BBPredicates::iterator I = Preds.begin(), E = Preds.end();
- I != E; ++I) {
-
- if (I->second != BoolTrue)
- return false;
-
- if (!Dominated && DT->dominates(I->first, PrevNode->getEntry()))
- Dominated = true;
- }
-
- // TODO: The dominator check is too strict
- return Dominated;
-}
-
-/// Take one node from the order vector and wire it up
-void AMDGPUStructurizeCFG::wireFlow(bool ExitUseAllowed,
- BasicBlock *LoopEnd) {
-
- RegionNode *Node = Order.pop_back_val();
- Visited.insert(Node->getEntry());
-
- if (isPredictableTrue(Node)) {
- // Just a linear flow
- if (PrevNode) {
- changeExit(PrevNode, Node->getEntry(), true);
- }
- PrevNode = Node;
-
- } else {
- // Insert extra prefix node (or reuse last one)
- BasicBlock *Flow = needPrefix(false);
-
- // Insert extra postfix node (or use exit instead)
- BasicBlock *Entry = Node->getEntry();
- BasicBlock *Next = needPostfix(Flow, ExitUseAllowed);
-
- // let it point to entry and next block
- Conditions.push_back(BranchInst::Create(Entry, Next, BoolUndef, Flow));
- addPhiValues(Flow, Entry);
- DT->changeImmediateDominator(Entry, Flow);
-
- PrevNode = Node;
- while (!Order.empty() && !Visited.count(LoopEnd) &&
- dominatesPredicates(Entry, Order.back())) {
- handleLoops(false, LoopEnd);
- }
-
- changeExit(PrevNode, Next, false);
- setPrevNode(Next);
- }
-}
-
-void AMDGPUStructurizeCFG::handleLoops(bool ExitUseAllowed,
- BasicBlock *LoopEnd) {
- RegionNode *Node = Order.back();
- BasicBlock *LoopStart = Node->getEntry();
-
- if (!Loops.count(LoopStart)) {
- wireFlow(ExitUseAllowed, LoopEnd);
- return;
- }
-
- if (!isPredictableTrue(Node))
- LoopStart = needPrefix(true);
-
- LoopEnd = Loops[Node->getEntry()];
- wireFlow(false, LoopEnd);
- while (!Visited.count(LoopEnd)) {
- handleLoops(false, LoopEnd);
- }
-
- // Create an extra loop end node
- LoopEnd = needPrefix(false);
- BasicBlock *Next = needPostfix(LoopEnd, ExitUseAllowed);
- LoopConds.push_back(BranchInst::Create(Next, LoopStart,
- BoolUndef, LoopEnd));
- addPhiValues(LoopEnd, LoopStart);
- setPrevNode(Next);
-}
-
-/// After this function control flow looks like it should be, but
-/// branches and PHI nodes only have undefined conditions.
-void AMDGPUStructurizeCFG::createFlow() {
-
- BasicBlock *Exit = ParentRegion->getExit();
- bool EntryDominatesExit = DT->dominates(ParentRegion->getEntry(), Exit);
-
- DeletedPhis.clear();
- AddedPhis.clear();
- Conditions.clear();
- LoopConds.clear();
-
- PrevNode = 0;
- Visited.clear();
-
- while (!Order.empty()) {
- handleLoops(EntryDominatesExit, 0);
- }
-
- if (PrevNode)
- changeExit(PrevNode, Exit, EntryDominatesExit);
- else
- assert(EntryDominatesExit);
-}
-
-/// Handle a rare case where the disintegrated nodes instructions
-/// no longer dominate all their uses. Not sure if this is really nessasary
-void AMDGPUStructurizeCFG::rebuildSSA() {
- SSAUpdater Updater;
- for (Region::block_iterator I = ParentRegion->block_begin(),
- E = ParentRegion->block_end();
- I != E; ++I) {
-
- BasicBlock *BB = *I;
- for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
- II != IE; ++II) {
-
- bool Initialized = false;
- for (Use *I = &II->use_begin().getUse(), *Next; I; I = Next) {
-
- Next = I->getNext();
-
- Instruction *User = cast<Instruction>(I->getUser());
- if (User->getParent() == BB) {
- continue;
-
- } else if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
- if (UserPN->getIncomingBlock(*I) == BB)
- continue;
- }
-
- if (DT->dominates(II, User))
- continue;
-
- if (!Initialized) {
- Value *Undef = UndefValue::get(II->getType());
- Updater.Initialize(II->getType(), "");
- Updater.AddAvailableValue(&Func->getEntryBlock(), Undef);
- Updater.AddAvailableValue(BB, II);
- Initialized = true;
- }
- Updater.RewriteUseAfterInsertions(*I);
- }
- }
- }
-}
-
-/// \brief Run the transformation for each region found
-bool AMDGPUStructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) {
- if (R->isTopLevelRegion())
- return false;
-
- Func = R->getEntry()->getParent();
- ParentRegion = R;
-
- DT = &getAnalysis<DominatorTree>();
-
- orderNodes();
- collectInfos();
- createFlow();
- insertConditions(false);
- insertConditions(true);
- setPhiValues();
- rebuildSSA();
-
- // Cleanup
- Order.clear();
- Visited.clear();
- DeletedPhis.clear();
- AddedPhis.clear();
- Predicates.clear();
- Conditions.clear();
- Loops.clear();
- LoopPreds.clear();
- LoopConds.clear();
-
- return true;
-}
-
-/// \brief Create the pass
-Pass *llvm::createAMDGPUStructurizeCFGPass() {
- return new AMDGPUStructurizeCFG();
-}
diff --git a/lib/Target/R600/AMDGPUSubtarget.cpp b/lib/Target/R600/AMDGPUSubtarget.cpp
index a7e1d7b6d54c..061793a68b9d 100644
--- a/lib/Target/R600/AMDGPUSubtarget.cpp
+++ b/lib/Target/R600/AMDGPUSubtarget.cpp
@@ -25,8 +25,6 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) :
AMDGPUGenSubtargetInfo(TT, CPU, FS), DumpCode(false) {
InstrItins = getInstrItineraryForCPU(CPU);
- memset(CapsOverride, 0, sizeof(*CapsOverride)
- * AMDGPUDeviceInfo::MaxNumberCapabilities);
// Default card
StringRef GPU = CPU;
Is64bit = false;
@@ -34,21 +32,16 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) :
DefaultSize[1] = 1;
DefaultSize[2] = 1;
HasVertexCache = false;
+ TexVTXClauseSize = 0;
+ Gen = AMDGPUSubtarget::R600;
+ FP64 = false;
+ CaymanISA = false;
+ EnableIRStructurizer = true;
+ EnableIfCvt = true;
ParseSubtargetFeatures(GPU, FS);
DevName = GPU;
- Device = AMDGPUDeviceInfo::getDeviceFromName(DevName, this, Is64bit);
}
-AMDGPUSubtarget::~AMDGPUSubtarget() {
- delete Device;
-}
-
-bool
-AMDGPUSubtarget::isOverride(AMDGPUDeviceInfo::Caps caps) const {
- assert(caps < AMDGPUDeviceInfo::MaxNumberCapabilities &&
- "Caps index is out of bounds!");
- return CapsOverride[caps];
-}
bool
AMDGPUSubtarget::is64bit() const {
return Is64bit;
@@ -57,6 +50,30 @@ bool
AMDGPUSubtarget::hasVertexCache() const {
return HasVertexCache;
}
+short
+AMDGPUSubtarget::getTexVTXClauseSize() const {
+ return TexVTXClauseSize;
+}
+enum AMDGPUSubtarget::Generation
+AMDGPUSubtarget::getGeneration() const {
+ return Gen;
+}
+bool
+AMDGPUSubtarget::hasHWFP64() const {
+ return FP64;
+}
+bool
+AMDGPUSubtarget::hasCaymanISA() const {
+ return CaymanISA;
+}
+bool
+AMDGPUSubtarget::IsIRStructurizerEnabled() const {
+ return EnableIRStructurizer;
+}
+bool
+AMDGPUSubtarget::isIfCvtEnabled() const {
+ return EnableIfCvt;
+}
bool
AMDGPUSubtarget::isTargetELF() const {
return false;
@@ -72,21 +89,32 @@ AMDGPUSubtarget::getDefaultSize(uint32_t dim) const {
std::string
AMDGPUSubtarget::getDataLayout() const {
- if (!Device) {
- return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
- "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
- "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
- "-v96:128:128-v128:128:128-v192:256:256-v256:256:256"
- "-v512:512:512-v1024:1024:1024-v2048:2048:2048-a0:0:64");
- }
- return Device->getDataLayout();
+ std::string DataLayout = std::string(
+ "e"
+ "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
+ "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
+ "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+ "-n32:64"
+ );
+
+ if (hasHWFP64()) {
+ DataLayout.append("-f64:64:64");
+ }
+
+ if (is64bit()) {
+ DataLayout.append("-p:64:64:64");
+ } else {
+ DataLayout.append("-p:32:32:32");
+ }
+
+ if (Gen >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ DataLayout.append("-p3:32:32:32");
+ }
+
+ return DataLayout;
}
std::string
AMDGPUSubtarget::getDeviceName() const {
return DevName;
}
-const AMDGPUDevice *
-AMDGPUSubtarget::device() const {
- return Device;
-}
diff --git a/lib/Target/R600/AMDGPUSubtarget.h b/lib/Target/R600/AMDGPUSubtarget.h
index b6501a456283..4288d275c99e 100644
--- a/lib/Target/R600/AMDGPUSubtarget.h
+++ b/lib/Target/R600/AMDGPUSubtarget.h
@@ -14,7 +14,7 @@
#ifndef AMDGPUSUBTARGET_H
#define AMDGPUSUBTARGET_H
-#include "AMDILDevice.h"
+#include "AMDGPU.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -27,9 +27,17 @@
namespace llvm {
class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
+public:
+ enum Generation {
+ R600 = 0,
+ R700,
+ EVERGREEN,
+ NORTHERN_ISLANDS,
+ SOUTHERN_ISLANDS,
+ SEA_ISLANDS
+ };
+
private:
- bool CapsOverride[AMDGPUDeviceInfo::MaxNumberCapabilities];
- const AMDGPUDevice *Device;
size_t DefaultSize[3];
std::string DevName;
bool Is64bit;
@@ -37,23 +45,36 @@ private:
bool DumpCode;
bool R600ALUInst;
bool HasVertexCache;
+ short TexVTXClauseSize;
+ enum Generation Gen;
+ bool FP64;
+ bool CaymanISA;
+ bool EnableIRStructurizer;
+ bool EnableIfCvt;
InstrItineraryData InstrItins;
public:
AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS);
- virtual ~AMDGPUSubtarget();
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
virtual void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- bool isOverride(AMDGPUDeviceInfo::Caps) const;
bool is64bit() const;
bool hasVertexCache() const;
+ short getTexVTXClauseSize() const;
+ enum Generation getGeneration() const;
+ bool hasHWFP64() const;
+ bool hasCaymanISA() const;
+ bool IsIRStructurizerEnabled() const;
+ bool isIfCvtEnabled() const;
+
+ virtual bool enableMachineScheduler() const {
+ return getGeneration() <= NORTHERN_ISLANDS;
+ }
// Helper functions to simplify if statements
bool isTargetELF() const;
- const AMDGPUDevice* device() const;
std::string getDataLayout() const;
std::string getDeviceName() const;
virtual size_t getDefaultSize(uint32_t dim) const;
diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp
index 31fbf32d0c9a..bc4f5d720ae2 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/lib/Target/R600/AMDGPUTargetMachine.cpp
@@ -33,6 +33,7 @@
#include "llvm/Transforms/Scalar.h"
#include <llvm/CodeGen/Passes.h>
+
using namespace llvm;
extern "C" void LLVMInitializeR600Target() {
@@ -59,17 +60,19 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
Subtarget(TT, CPU, FS),
Layout(Subtarget.getDataLayout()),
FrameLowering(TargetFrameLowering::StackGrowsUp,
- Subtarget.device()->getStackAlignment(), 0),
+ 64 * 16 // Maximum stack alignment (long16)
+ , 0),
IntrinsicInfo(this),
InstrItins(&Subtarget.getInstrItineraryData()) {
// TLInfo uses InstrInfo so it must be initialized after.
- if (Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- InstrInfo = new R600InstrInfo(*this);
- TLInfo = new R600TargetLowering(*this);
+ if (Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ InstrInfo.reset(new R600InstrInfo(*this));
+ TLInfo.reset(new R600TargetLowering(*this));
} else {
- InstrInfo = new SIInstrInfo(*this);
- TLInfo = new SITargetLowering(*this);
+ InstrInfo.reset(new SIInstrInfo(*this));
+ TLInfo.reset(new SITargetLowering(*this));
}
+ initAsmInfo();
}
AMDGPUTargetMachine::~AMDGPUTargetMachine() {
@@ -79,18 +82,20 @@ namespace {
class AMDGPUPassConfig : public TargetPassConfig {
public:
AMDGPUPassConfig(AMDGPUTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {
- const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- enablePass(&MachineSchedulerID);
- MachineSchedRegistry::setDefault(createR600MachineScheduler);
- }
- }
+ : TargetPassConfig(TM, PM) {}
AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
return getTM<AMDGPUTargetMachine>();
}
+ virtual ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const {
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ return createR600MachineScheduler(C);
+ return 0;
+ }
+
virtual bool addPreISel();
virtual bool addInstSelector();
virtual bool addPreRegAlloc();
@@ -104,53 +109,76 @@ TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) {
return new AMDGPUPassConfig(this, PM);
}
+//===----------------------------------------------------------------------===//
+// AMDGPU Analysis Pass Setup
+//===----------------------------------------------------------------------===//
+
+void AMDGPUTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
+ // Add first the target-independent BasicTTI pass, then our AMDGPU pass. This
+ // allows the AMDGPU pass to delegate to the target independent layer when
+ // appropriate.
+ PM.add(createBasicTargetTransformInfoPass(this));
+ PM.add(createAMDGPUTargetTransformInfoPass(this));
+}
+
bool
AMDGPUPassConfig::addPreISel() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
- addPass(createAMDGPUStructurizeCFGPass());
+ addPass(createFlattenCFGPass());
+ if (ST.IsIRStructurizerEnabled())
+ addPass(createStructurizeCFGPass());
+ if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ addPass(createSinkingPass());
+ addPass(createSITypeRewriter());
addPass(createSIAnnotateControlFlowPass());
+ } else {
+ addPass(createR600TextureIntrinsicsReplacer());
}
return false;
}
bool AMDGPUPassConfig::addInstSelector() {
addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
-
- const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- // This callbacks this pass uses are not implemented yet on SI.
- addPass(createAMDGPUIndirectAddressingPass(*TM));
- }
return false;
}
bool AMDGPUPassConfig::addPreRegAlloc() {
addPass(createAMDGPUConvertToISAPass(*TM));
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ addPass(createR600VectorRegMerger(*TM));
+ } else {
+ addPass(createSIFixSGPRCopiesPass(*TM));
+ }
return false;
}
bool AMDGPUPassConfig::addPostRegAlloc() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
+ if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
addPass(createSIInsertWaits(*TM));
}
return false;
}
bool AMDGPUPassConfig::addPreSched2() {
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- addPass(&IfConverterID);
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ addPass(createR600EmitClauseMarkers(*TM));
+ if (ST.isIfCvtEnabled())
+ addPass(&IfConverterID);
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ addPass(createR600ClauseMergePass(*TM));
return false;
}
bool AMDGPUPassConfig::addPreEmitPass() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- addPass(createAMDGPUCFGPreparationPass(*TM));
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
addPass(createAMDGPUCFGStructurizerPass(*TM));
- addPass(createR600EmitClauseMarkers(*TM));
addPass(createR600ExpandSpecialInstrsPass(*TM));
addPass(&FinalizeMachineBundlesID);
addPass(createR600Packetizer(*TM));
@@ -161,4 +189,3 @@ bool AMDGPUPassConfig::addPreEmitPass() {
return false;
}
-
diff --git a/lib/Target/R600/AMDGPUTargetMachine.h b/lib/Target/R600/AMDGPUTargetMachine.h
index 2afe7873a90c..f942614fe764 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.h
+++ b/lib/Target/R600/AMDGPUTargetMachine.h
@@ -25,44 +25,45 @@
namespace llvm {
-MCAsmInfo* createMCAsmInfo(const Target &T, StringRef TT);
-
class AMDGPUTargetMachine : public LLVMTargetMachine {
AMDGPUSubtarget Subtarget;
const DataLayout Layout;
AMDGPUFrameLowering FrameLowering;
AMDGPUIntrinsicInfo IntrinsicInfo;
- const AMDGPUInstrInfo * InstrInfo;
- AMDGPUTargetLowering * TLInfo;
- const InstrItineraryData* InstrItins;
+ OwningPtr<AMDGPUInstrInfo> InstrInfo;
+ OwningPtr<AMDGPUTargetLowering> TLInfo;
+ const InstrItineraryData *InstrItins;
public:
- AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS,
- StringRef CPU,
- TargetOptions Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
- ~AMDGPUTargetMachine();
- virtual const AMDGPUFrameLowering* getFrameLowering() const {
- return &FrameLowering;
- }
- virtual const AMDGPUIntrinsicInfo* getIntrinsicInfo() const {
- return &IntrinsicInfo;
- }
- virtual const AMDGPUInstrInfo *getInstrInfo() const {return InstrInfo;}
- virtual const AMDGPUSubtarget *getSubtargetImpl() const {return &Subtarget; }
- virtual const AMDGPURegisterInfo *getRegisterInfo() const {
- return &InstrInfo->getRegisterInfo();
- }
- virtual AMDGPUTargetLowering * getTargetLowering() const {
- return TLInfo;
- }
- virtual const InstrItineraryData* getInstrItineraryData() const {
- return InstrItins;
- }
- virtual const DataLayout* getDataLayout() const { return &Layout; }
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS,
+ StringRef CPU, TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
+ ~AMDGPUTargetMachine();
+ virtual const AMDGPUFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+ virtual const AMDGPUIntrinsicInfo *getIntrinsicInfo() const {
+ return &IntrinsicInfo;
+ }
+ virtual const AMDGPUInstrInfo *getInstrInfo() const {
+ return InstrInfo.get();
+ }
+ virtual const AMDGPUSubtarget *getSubtargetImpl() const { return &Subtarget; }
+ virtual const AMDGPURegisterInfo *getRegisterInfo() const {
+ return &InstrInfo->getRegisterInfo();
+ }
+ virtual AMDGPUTargetLowering *getTargetLowering() const {
+ return TLInfo.get();
+ }
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return InstrItins;
+ }
+ virtual const DataLayout *getDataLayout() const { return &Layout; }
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+ /// \brief Register R600 analysis passes with a pass manager.
+ virtual void addAnalysisPasses(PassManagerBase &PM);
};
} // End namespace llvm
diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
new file mode 100644
index 000000000000..8db319c73bb1
--- /dev/null
+++ b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
@@ -0,0 +1,90 @@
+//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file implements a TargetTransformInfo analysis pass specific to the
+// AMDGPU target machine. It uses the target's detailed information to provide
+// more precise answers to certain TTI queries, while letting the target
+// independent and default TTI implementations handle the rest.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "AMDGPUtti"
+#include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/CostTable.h"
+using namespace llvm;
+
+// Declare the pass initialization routine locally as target-specific passes
+// don't have a target-wide initialization entry point, and so we rely on the
+// pass constructor initialization.
+namespace llvm {
+void initializeAMDGPUTTIPass(PassRegistry &);
+}
+
+namespace {
+
+class AMDGPUTTI : public ImmutablePass, public TargetTransformInfo {
+ const AMDGPUTargetMachine *TM;
+ const AMDGPUSubtarget *ST;
+ const AMDGPUTargetLowering *TLI;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
+
+public:
+ AMDGPUTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
+ llvm_unreachable("This pass cannot be directly constructed");
+ }
+
+ AMDGPUTTI(const AMDGPUTargetMachine *TM)
+ : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
+ TLI(TM->getTargetLowering()) {
+ initializeAMDGPUTTIPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void initializePass() { pushTTIStack(this); }
+
+ virtual void finalizePass() { popTTIStack(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ TargetTransformInfo::getAnalysisUsage(AU);
+ }
+
+ /// Pass identification.
+ static char ID;
+
+ /// Provide necessary pointer adjustments for the two base classes.
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &TargetTransformInfo::ID)
+ return (TargetTransformInfo *)this;
+ return this;
+ }
+
+ virtual bool hasBranchDivergence() const;
+
+ /// @}
+};
+
+} // end anonymous namespace
+
+INITIALIZE_AG_PASS(AMDGPUTTI, TargetTransformInfo, "AMDGPUtti",
+ "AMDGPU Target Transform Info", true, true, false)
+char AMDGPUTTI::ID = 0;
+
+ImmutablePass *
+llvm::createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM) {
+ return new AMDGPUTTI(TM);
+}
+
+bool AMDGPUTTI::hasBranchDivergence() const { return true; }
diff --git a/lib/Target/R600/AMDIL.h b/lib/Target/R600/AMDIL.h
deleted file mode 100644
index 39ab664d1018..000000000000
--- a/lib/Target/R600/AMDIL.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- AMDIL.h - Top-level interface for AMDIL representation --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// This file contains the entry points for global functions defined in the LLVM
-/// AMDGPU back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef AMDIL_H
-#define AMDIL_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/Target/TargetMachine.h"
-
-#define ARENA_SEGMENT_RESERVED_UAVS 12
-#define DEFAULT_ARENA_UAV_ID 8
-#define DEFAULT_RAW_UAV_ID 7
-#define GLOBAL_RETURN_RAW_UAV_ID 11
-#define HW_MAX_NUM_CB 8
-#define MAX_NUM_UNIQUE_UAVS 8
-#define OPENCL_MAX_NUM_ATOMIC_COUNTERS 8
-#define OPENCL_MAX_READ_IMAGES 128
-#define OPENCL_MAX_WRITE_IMAGES 8
-#define OPENCL_MAX_SAMPLERS 16
-
-// The next two values can never be zero, as zero is the ID that is
-// used to assert against.
-#define DEFAULT_LDS_ID 1
-#define DEFAULT_GDS_ID 1
-#define DEFAULT_SCRATCH_ID 1
-#define DEFAULT_VEC_SLOTS 8
-
-#define OCL_DEVICE_RV710 0x0001
-#define OCL_DEVICE_RV730 0x0002
-#define OCL_DEVICE_RV770 0x0004
-#define OCL_DEVICE_CEDAR 0x0008
-#define OCL_DEVICE_REDWOOD 0x0010
-#define OCL_DEVICE_JUNIPER 0x0020
-#define OCL_DEVICE_CYPRESS 0x0040
-#define OCL_DEVICE_CAICOS 0x0080
-#define OCL_DEVICE_TURKS 0x0100
-#define OCL_DEVICE_BARTS 0x0200
-#define OCL_DEVICE_CAYMAN 0x0400
-#define OCL_DEVICE_ALL 0x3FFF
-
-/// The number of function ID's that are reserved for
-/// internal compiler usage.
-const unsigned int RESERVED_FUNCS = 1024;
-
-namespace llvm {
-class AMDGPUInstrPrinter;
-class FunctionPass;
-class MCAsmInfo;
-class raw_ostream;
-class Target;
-class TargetMachine;
-
-// Instruction selection passes.
-FunctionPass*
- createAMDGPUISelDag(TargetMachine &TM);
-FunctionPass*
- createAMDGPUPeepholeOpt(TargetMachine &TM);
-
-// Pre emit passes.
-FunctionPass*
- createAMDGPUCFGPreparationPass(TargetMachine &TM);
-FunctionPass*
- createAMDGPUCFGStructurizerPass(TargetMachine &TM);
-
-extern Target TheAMDGPUTarget;
-} // end namespace llvm;
-
-// Include device information enumerations
-#include "AMDILDeviceInfo.h"
-
-namespace llvm {
-/// OpenCL uses address spaces to differentiate between
-/// various memory regions on the hardware. On the CPU
-/// all of the address spaces point to the same memory,
-/// however on the GPU, each address space points to
-/// a seperate piece of memory that is unique from other
-/// memory locations.
-namespace AMDGPUAS {
-enum AddressSpaces {
- PRIVATE_ADDRESS = 0, ///< Address space for private memory.
- GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0).
- CONSTANT_ADDRESS = 2, ///< Address space for constant memory
- LOCAL_ADDRESS = 3, ///< Address space for local memory.
- REGION_ADDRESS = 4, ///< Address space for region memory.
- ADDRESS_NONE = 5, ///< Address space for unknown memory.
- PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0)
- PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1)
- CONSTANT_BUFFER_0 = 8,
- CONSTANT_BUFFER_1 = 9,
- CONSTANT_BUFFER_2 = 10,
- CONSTANT_BUFFER_3 = 11,
- CONSTANT_BUFFER_4 = 12,
- CONSTANT_BUFFER_5 = 13,
- CONSTANT_BUFFER_6 = 14,
- CONSTANT_BUFFER_7 = 15,
- CONSTANT_BUFFER_8 = 16,
- CONSTANT_BUFFER_9 = 17,
- CONSTANT_BUFFER_10 = 18,
- CONSTANT_BUFFER_11 = 19,
- CONSTANT_BUFFER_12 = 20,
- CONSTANT_BUFFER_13 = 21,
- CONSTANT_BUFFER_14 = 22,
- CONSTANT_BUFFER_15 = 23,
- LAST_ADDRESS = 24
-};
-
-} // namespace AMDGPUAS
-
-} // end namespace llvm
-#endif // AMDIL_H
diff --git a/lib/Target/R600/AMDIL7XXDevice.cpp b/lib/Target/R600/AMDIL7XXDevice.cpp
deleted file mode 100644
index ea6ac34f570c..000000000000
--- a/lib/Target/R600/AMDIL7XXDevice.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-//===-- AMDIL7XXDevice.cpp - Device Info for 7XX GPUs ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDIL7XXDevice.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDILDevice.h"
-
-using namespace llvm;
-
-AMDGPU7XXDevice::AMDGPU7XXDevice(AMDGPUSubtarget *ST) : AMDGPUDevice(ST) {
- setCaps();
- std::string name = mSTM->getDeviceName();
- if (name == "rv710") {
- DeviceFlag = OCL_DEVICE_RV710;
- } else if (name == "rv730") {
- DeviceFlag = OCL_DEVICE_RV730;
- } else {
- DeviceFlag = OCL_DEVICE_RV770;
- }
-}
-
-AMDGPU7XXDevice::~AMDGPU7XXDevice() {
-}
-
-void AMDGPU7XXDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::LocalMem);
-}
-
-size_t AMDGPU7XXDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_700;
- }
- return 0;
-}
-
-size_t AMDGPU7XXDevice::getWavefrontSize() const {
- return AMDGPUDevice::HalfWavefrontSize;
-}
-
-uint32_t AMDGPU7XXDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD4XXX;
-}
-
-uint32_t AMDGPU7XXDevice::getResourceID(uint32_t DeviceID) const {
- switch (DeviceID) {
- default:
- assert(0 && "ID type passed in is unknown!");
- break;
- case GLOBAL_ID:
- case CONSTANT_ID:
- case RAW_UAV_ID:
- case ARENA_UAV_ID:
- break;
- case LDS_ID:
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return DEFAULT_LDS_ID;
- }
- break;
- case SCRATCH_ID:
- if (usesHardware(AMDGPUDeviceInfo::PrivateMem)) {
- return DEFAULT_SCRATCH_ID;
- }
- break;
- case GDS_ID:
- assert(0 && "GDS UAV ID is not supported on this chip");
- if (usesHardware(AMDGPUDeviceInfo::RegionMem)) {
- return DEFAULT_GDS_ID;
- }
- break;
- };
-
- return 0;
-}
-
-uint32_t AMDGPU7XXDevice::getMaxNumUAVs() const {
- return 1;
-}
-
-AMDGPU770Device::AMDGPU770Device(AMDGPUSubtarget *ST): AMDGPU7XXDevice(ST) {
- setCaps();
-}
-
-AMDGPU770Device::~AMDGPU770Device() {
-}
-
-void AMDGPU770Device::setCaps() {
- if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
- mSWBits.set(AMDGPUDeviceInfo::FMA);
- mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
- }
- mSWBits.set(AMDGPUDeviceInfo::BarrierDetect);
- mHWBits.reset(AMDGPUDeviceInfo::LongOps);
- mSWBits.set(AMDGPUDeviceInfo::LongOps);
- mSWBits.set(AMDGPUDeviceInfo::LocalMem);
-}
-
-size_t AMDGPU770Device::getWavefrontSize() const {
- return AMDGPUDevice::WavefrontSize;
-}
-
-AMDGPU710Device::AMDGPU710Device(AMDGPUSubtarget *ST) : AMDGPU7XXDevice(ST) {
-}
-
-AMDGPU710Device::~AMDGPU710Device() {
-}
-
-size_t AMDGPU710Device::getWavefrontSize() const {
- return AMDGPUDevice::QuarterWavefrontSize;
-}
diff --git a/lib/Target/R600/AMDIL7XXDevice.h b/lib/Target/R600/AMDIL7XXDevice.h
deleted file mode 100644
index 1cf4ca415a4c..000000000000
--- a/lib/Target/R600/AMDIL7XXDevice.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//==-- AMDIL7XXDevice.h - Define 7XX Device Device for AMDIL ---*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===----------------------------------------------------------------------===//
-#ifndef AMDIL7XXDEVICEIMPL_H
-#define AMDIL7XXDEVICEIMPL_H
-#include "AMDILDevice.h"
-
-namespace llvm {
-class AMDGPUSubtarget;
-
-//===----------------------------------------------------------------------===//
-// 7XX generation of devices and their respective sub classes
-//===----------------------------------------------------------------------===//
-
-/// \brief The AMDGPU7XXDevice class represents the generic 7XX device.
-///
-/// All 7XX devices are derived from this class. The AMDGPU7XX device will only
-/// support the minimal features that are required to be considered OpenCL 1.0
-/// compliant and nothing more.
-class AMDGPU7XXDevice : public AMDGPUDevice {
-public:
- AMDGPU7XXDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPU7XXDevice();
- virtual size_t getMaxLDSSize() const;
- virtual size_t getWavefrontSize() const;
- virtual uint32_t getGeneration() const;
- virtual uint32_t getResourceID(uint32_t DeviceID) const;
- virtual uint32_t getMaxNumUAVs() const;
-
-protected:
- virtual void setCaps();
-};
-
-/// \brief The AMDGPU770Device class represents the RV770 chip and it's
-/// derivative cards.
-///
-/// The difference between this device and the base class is this device device
-/// adds support for double precision and has a larger wavefront size.
-class AMDGPU770Device : public AMDGPU7XXDevice {
-public:
- AMDGPU770Device(AMDGPUSubtarget *ST);
- virtual ~AMDGPU770Device();
- virtual size_t getWavefrontSize() const;
-private:
- virtual void setCaps();
-};
-
-/// \brief The AMDGPU710Device class derives from the 7XX base class.
-///
-/// This class is a smaller derivative, so we need to overload some of the
-/// functions in order to correctly specify this information.
-class AMDGPU710Device : public AMDGPU7XXDevice {
-public:
- AMDGPU710Device(AMDGPUSubtarget *ST);
- virtual ~AMDGPU710Device();
- virtual size_t getWavefrontSize() const;
-};
-
-} // namespace llvm
-#endif // AMDILDEVICEIMPL_H
diff --git a/lib/Target/R600/AMDILBase.td b/lib/Target/R600/AMDILBase.td
index e2211106f3f3..5dcd478fe5bc 100644
--- a/lib/Target/R600/AMDILBase.td
+++ b/lib/Target/R600/AMDILBase.td
@@ -16,70 +16,6 @@ def ALU_NULL : FuncUnit;
def NullALU : InstrItinClass;
//===----------------------------------------------------------------------===//
-// AMDIL Subtarget features.
-//===----------------------------------------------------------------------===//
-def FeatureFP64 : SubtargetFeature<"fp64",
- "CapsOverride[AMDGPUDeviceInfo::DoubleOps]",
- "true",
- "Enable 64bit double precision operations">;
-def FeatureByteAddress : SubtargetFeature<"byte_addressable_store",
- "CapsOverride[AMDGPUDeviceInfo::ByteStores]",
- "true",
- "Enable byte addressable stores">;
-def FeatureBarrierDetect : SubtargetFeature<"barrier_detect",
- "CapsOverride[AMDGPUDeviceInfo::BarrierDetect]",
- "true",
- "Enable duplicate barrier detection(HD5XXX or later).">;
-def FeatureImages : SubtargetFeature<"images",
- "CapsOverride[AMDGPUDeviceInfo::Images]",
- "true",
- "Enable image functions">;
-def FeatureMultiUAV : SubtargetFeature<"multi_uav",
- "CapsOverride[AMDGPUDeviceInfo::MultiUAV]",
- "true",
- "Generate multiple UAV code(HD5XXX family or later)">;
-def FeatureMacroDB : SubtargetFeature<"macrodb",
- "CapsOverride[AMDGPUDeviceInfo::MacroDB]",
- "true",
- "Use internal macrodb, instead of macrodb in driver">;
-def FeatureNoAlias : SubtargetFeature<"noalias",
- "CapsOverride[AMDGPUDeviceInfo::NoAlias]",
- "true",
- "assert that all kernel argument pointers are not aliased">;
-def FeatureNoInline : SubtargetFeature<"no-inline",
- "CapsOverride[AMDGPUDeviceInfo::NoInline]",
- "true",
- "specify whether to not inline functions">;
-
-def Feature64BitPtr : SubtargetFeature<"64BitPtr",
- "Is64bit",
- "false",
- "Specify if 64bit addressing should be used.">;
-
-def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr",
- "Is32on64bit",
- "false",
- "Specify if 64bit sized pointers with 32bit addressing should be used.">;
-def FeatureDebug : SubtargetFeature<"debug",
- "CapsOverride[AMDGPUDeviceInfo::Debug]",
- "true",
- "Debug mode is enabled, so disable hardware accelerated address spaces.">;
-def FeatureDumpCode : SubtargetFeature <"DumpCode",
- "DumpCode",
- "true",
- "Dump MachineInstrs in the CodeEmitter">;
-
-def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
- "R600ALUInst",
- "false",
- "Older version of ALU instructions encoding.">;
-
-def FeatureVertexCache : SubtargetFeature<"HasVertexCache",
- "HasVertexCache",
- "true",
- "Specify use of dedicated vertex cache.">;
-
-//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp
index b0cd0f9756a4..507570fdcaa2 100644
--- a/lib/Target/R600/AMDILCFGStructurizer.cpp
+++ b/lib/Target/R600/AMDILCFGStructurizer.cpp
@@ -8,14 +8,17 @@
/// \file
//==-----------------------------------------------------------------------===//
-#define DEBUGME 0
#define DEBUG_TYPE "structcfg"
+#include "AMDGPU.h"
#include "AMDGPUInstrInfo.h"
-#include "AMDIL.h"
+#include "R600InstrInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/Analysis/DominatorInternals.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -28,9 +31,12 @@
#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEFAULT_VEC_SLOTS 8
+
// TODO: move-begin.
//===----------------------------------------------------------------------===//
@@ -43,12 +49,8 @@ STATISTIC(numSerialPatternMatch, "CFGStructurizer number of serial pattern "
"matched");
STATISTIC(numIfPatternMatch, "CFGStructurizer number of if pattern "
"matched");
-STATISTIC(numLoopbreakPatternMatch, "CFGStructurizer number of loop-break "
- "pattern matched");
STATISTIC(numLoopcontPatternMatch, "CFGStructurizer number of loop-continue "
"pattern matched");
-STATISTIC(numLoopPatternMatch, "CFGStructurizer number of loop pattern "
- "matched");
STATISTIC(numClonedBlock, "CFGStructurizer cloned blocks");
STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
@@ -57,39 +59,29 @@ STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
// Miscellaneous utility for CFGStructurizer.
//
//===----------------------------------------------------------------------===//
-namespace llvmCFGStruct {
+namespace {
#define SHOWNEWINSTR(i) \
- if (DEBUGME) errs() << "New instr: " << *i << "\n"
+ DEBUG(dbgs() << "New instr: " << *i << "\n");
#define SHOWNEWBLK(b, msg) \
-if (DEBUGME) { \
- errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
- errs() << "\n"; \
-}
+DEBUG( \
+ dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+ dbgs() << "\n"; \
+);
#define SHOWBLK_DETAIL(b, msg) \
-if (DEBUGME) { \
+DEBUG( \
if (b) { \
- errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
- b->print(errs()); \
- errs() << "\n"; \
+ dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+ b->print(dbgs()); \
+ dbgs() << "\n"; \
} \
-}
+);
#define INVALIDSCCNUM -1
-#define INVALIDREGNUM 0
-
-template<class LoopinfoT>
-void PrintLoopinfo(const LoopinfoT &LoopInfo, llvm::raw_ostream &OS) {
- for (typename LoopinfoT::iterator iter = LoopInfo.begin(),
- iterEnd = LoopInfo.end();
- iter != iterEnd; ++iter) {
- (*iter)->print(OS, 0);
- }
-}
template<class NodeT>
-void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
+void ReverseVector(SmallVectorImpl<NodeT *> &Src) {
size_t sz = Src.size();
for (size_t i = 0; i < sz/2; ++i) {
NodeT *t = Src[i];
@@ -98,7 +90,7 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
}
}
-} //end namespace llvmCFGStruct
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
//
@@ -106,43 +98,17 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
//
//===----------------------------------------------------------------------===//
-namespace llvmCFGStruct {
-template<class PassT>
-struct CFGStructTraits {
-};
-template <class InstrT>
-class BlockInformation {
-public:
- bool isRetired;
- int sccNum;
- //SmallVector<InstrT*, DEFAULT_VEC_SLOTS> succInstr;
- //Instructions defining the corresponding successor.
- BlockInformation() : isRetired(false), sccNum(INVALIDSCCNUM) {}
-};
+namespace {
-template <class BlockT, class InstrT, class RegiT>
-class LandInformation {
+class BlockInformation {
public:
- BlockT *landBlk;
- std::set<RegiT> breakInitRegs; //Registers that need to "reg = 0", before
- //WHILELOOP(thisloop) init before entering
- //thisloop.
- std::set<RegiT> contInitRegs; //Registers that need to "reg = 0", after
- //WHILELOOP(thisloop) init after entering
- //thisloop.
- std::set<RegiT> endbranchInitRegs; //Init before entering this loop, at loop
- //land block, branch cond on this reg.
- std::set<RegiT> breakOnRegs; //registers that need to "if (reg) break
- //endif" after ENDLOOP(thisloop) break
- //outerLoopOf(thisLoop).
- std::set<RegiT> contOnRegs; //registers that need to "if (reg) continue
- //endif" after ENDLOOP(thisloop) continue on
- //outerLoopOf(thisLoop).
- LandInformation() : landBlk(NULL) {}
+ bool IsRetired;
+ int SccNum;
+ BlockInformation() : IsRetired(false), SccNum(INVALIDSCCNUM) {}
};
-} //end of namespace llvmCFGStruct
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
//
@@ -150,1026 +116,1213 @@ public:
//
//===----------------------------------------------------------------------===//
-namespace llvmCFGStruct {
-// bixia TODO: port it to BasicBlock, not just MachineBasicBlock.
-template<class PassT>
-class CFGStructurizer {
+namespace {
+class AMDGPUCFGStructurizer : public MachineFunctionPass {
public:
- typedef enum {
+ typedef SmallVector<MachineBasicBlock *, 32> MBBVector;
+ typedef std::map<MachineBasicBlock *, BlockInformation *> MBBInfoMap;
+ typedef std::map<MachineLoop *, MachineBasicBlock *> LoopLandInfoMap;
+
+ enum PathToKind {
Not_SinglePath = 0,
SinglePath_InPath = 1,
SinglePath_NotInPath = 2
- } PathToKind;
+ };
-public:
- typedef typename PassT::InstructionType InstrT;
- typedef typename PassT::FunctionType FuncT;
- typedef typename PassT::DominatortreeType DomTreeT;
- typedef typename PassT::PostDominatortreeType PostDomTreeT;
- typedef typename PassT::DomTreeNodeType DomTreeNodeT;
- typedef typename PassT::LoopinfoType LoopInfoT;
-
- typedef GraphTraits<FuncT *> FuncGTraits;
- //typedef FuncGTraits::nodes_iterator BlockIterator;
- typedef typename FuncT::iterator BlockIterator;
-
- typedef typename FuncGTraits::NodeType BlockT;
- typedef GraphTraits<BlockT *> BlockGTraits;
- typedef GraphTraits<Inverse<BlockT *> > InvBlockGTraits;
- //typedef BlockGTraits::succ_iterator InstructionIterator;
- typedef typename BlockT::iterator InstrIterator;
-
- typedef CFGStructTraits<PassT> CFGTraits;
- typedef BlockInformation<InstrT> BlockInfo;
- typedef std::map<BlockT *, BlockInfo *> BlockInfoMap;
-
- typedef int RegiT;
- typedef typename PassT::LoopType LoopT;
- typedef LandInformation<BlockT, InstrT, RegiT> LoopLandInfo;
- typedef std::map<LoopT *, LoopLandInfo *> LoopLandInfoMap;
- //landing info for loop break
- typedef SmallVector<BlockT *, 32> BlockTSmallerVector;
+ static char ID;
-public:
- CFGStructurizer();
- ~CFGStructurizer();
+ AMDGPUCFGStructurizer(TargetMachine &tm) :
+ MachineFunctionPass(ID), TM(tm),
+ TII(static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
+ TRI(&TII->getRegisterInfo()) { }
+
+ const char *getPassName() const {
+ return "AMD IL Control Flow Graph structurizer Pass";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addPreserved<MachineFunctionAnalysis>();
+ AU.addRequired<MachineFunctionAnalysis>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<MachinePostDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ }
/// Perform the CFG structurization
- bool run(FuncT &Func, PassT &Pass, const AMDGPURegisterInfo *tri);
+ bool run();
/// Perform the CFG preparation
- bool prepare(FuncT &Func, PassT &Pass, const AMDGPURegisterInfo *tri);
+ /// This step will remove every unconditionnal/dead jump instructions and make
+ /// sure all loops have an exit block
+ bool prepare();
+
+ bool runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(MF.dump(););
+ OrderedBlks.clear();
+ FuncRep = &MF;
+ MLI = &getAnalysis<MachineLoopInfo>();
+ DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
+ MDT = &getAnalysis<MachineDominatorTree>();
+ DEBUG(MDT->print(dbgs(), (const llvm::Module*)0););
+ PDT = &getAnalysis<MachinePostDominatorTree>();
+ DEBUG(PDT->print(dbgs()););
+ prepare();
+ run();
+ DEBUG(MF.dump(););
+ return true;
+ }
-private:
- void reversePredicateSetter(typename BlockT::iterator);
- void orderBlocks();
- void printOrderedBlocks(llvm::raw_ostream &OS);
- int patternMatch(BlockT *CurBlock);
- int patternMatchGroup(BlockT *CurBlock);
-
- int serialPatternMatch(BlockT *CurBlock);
- int ifPatternMatch(BlockT *CurBlock);
- int switchPatternMatch(BlockT *CurBlock);
- int loopendPatternMatch(BlockT *CurBlock);
- int loopPatternMatch(BlockT *CurBlock);
-
- int loopbreakPatternMatch(LoopT *LoopRep, BlockT *LoopHeader);
- int loopcontPatternMatch(LoopT *LoopRep, BlockT *LoopHeader);
- //int loopWithoutBreak(BlockT *);
-
- void handleLoopbreak (BlockT *ExitingBlock, LoopT *ExitingLoop,
- BlockT *ExitBlock, LoopT *exitLoop, BlockT *landBlock);
- void handleLoopcontBlock(BlockT *ContingBlock, LoopT *contingLoop,
- BlockT *ContBlock, LoopT *contLoop);
- bool isSameloopDetachedContbreak(BlockT *Src1Block, BlockT *Src2Block);
- int handleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock);
- int handleJumpintoIfImp(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock);
- int improveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock, BlockT **LandBlockPtr);
- void showImproveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock, BlockT *LandBlock,
- bool Detail = false);
- PathToKind singlePathTo(BlockT *SrcBlock, BlockT *DstBlock,
- bool AllowSideEntry = true);
- BlockT *singlePathEnd(BlockT *srcBlock, BlockT *DstBlock,
- bool AllowSideEntry = true);
- int cloneOnSideEntryTo(BlockT *PreBlock, BlockT *SrcBlock, BlockT *DstBlock);
- void mergeSerialBlock(BlockT *DstBlock, BlockT *srcBlock);
-
- void mergeIfthenelseBlock(InstrT *BranchInstr, BlockT *CurBlock,
- BlockT *TrueBlock, BlockT *FalseBlock,
- BlockT *LandBlock);
- void mergeLooplandBlock(BlockT *DstBlock, LoopLandInfo *LoopLand);
- void mergeLoopbreakBlock(BlockT *ExitingBlock, BlockT *ExitBlock,
- BlockT *ExitLandBlock, RegiT SetReg);
- void settleLoopcontBlock(BlockT *ContingBlock, BlockT *ContBlock,
- RegiT SetReg);
- BlockT *relocateLoopcontBlock(LoopT *ParentLoopRep, LoopT *LoopRep,
- std::set<BlockT*> &ExitBlockSet,
- BlockT *ExitLandBlk);
- BlockT *addLoopEndbranchBlock(LoopT *LoopRep,
- BlockTSmallerVector &ExitingBlocks,
- BlockTSmallerVector &ExitBlocks);
- BlockT *normalizeInfiniteLoopExit(LoopT *LoopRep);
- void removeUnconditionalBranch(BlockT *SrcBlock);
- void removeRedundantConditionalBranch(BlockT *SrcBlock);
- void addDummyExitBlock(SmallVector<BlockT *, DEFAULT_VEC_SLOTS> &RetBlocks);
-
- void removeSuccessor(BlockT *SrcBlock);
- BlockT *cloneBlockForPredecessor(BlockT *CurBlock, BlockT *PredBlock);
- BlockT *exitingBlock2ExitBlock (LoopT *LoopRep, BlockT *exitingBlock);
-
- void migrateInstruction(BlockT *SrcBlock, BlockT *DstBlock,
- InstrIterator InsertPos);
-
- void recordSccnum(BlockT *SrcBlock, int SCCNum);
- int getSCCNum(BlockT *srcBlk);
-
- void retireBlock(BlockT *DstBlock, BlockT *SrcBlock);
- bool isRetiredBlock(BlockT *SrcBlock);
- bool isActiveLoophead(BlockT *CurBlock);
- bool needMigrateBlock(BlockT *Block);
-
- BlockT *recordLoopLandBlock(LoopT *LoopRep, BlockT *LandBlock,
- BlockTSmallerVector &exitBlocks,
- std::set<BlockT*> &ExitBlockSet);
- void setLoopLandBlock(LoopT *LoopRep, BlockT *Block = NULL);
- BlockT *getLoopLandBlock(LoopT *LoopRep);
- LoopLandInfo *getLoopLandInfo(LoopT *LoopRep);
-
- void addLoopBreakOnReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopContOnReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopBreakInitReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopContInitReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopEndbranchInitReg(LoopT *LoopRep, RegiT RegNum);
-
- bool hasBackEdge(BlockT *curBlock);
- unsigned getLoopDepth (LoopT *LoopRep);
- int countActiveBlock(
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterStart,
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterEnd);
- BlockT *findNearestCommonPostDom(std::set<BlockT *>&);
- BlockT *findNearestCommonPostDom(BlockT *Block1, BlockT *Block2);
+protected:
+ TargetMachine &TM;
+ MachineDominatorTree *MDT;
+ MachinePostDominatorTree *PDT;
+ MachineLoopInfo *MLI;
+ const R600InstrInfo *TII;
+ const AMDGPURegisterInfo *TRI;
+
+ // PRINT FUNCTIONS
+ /// Print the ordered Blocks.
+ void printOrderedBlocks() const {
+ size_t i = 0;
+ for (MBBVector::const_iterator iterBlk = OrderedBlks.begin(),
+ iterBlkEnd = OrderedBlks.end(); iterBlk != iterBlkEnd; ++iterBlk, ++i) {
+ dbgs() << "BB" << (*iterBlk)->getNumber();
+ dbgs() << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
+ if (i != 0 && i % 10 == 0) {
+ dbgs() << "\n";
+ } else {
+ dbgs() << " ";
+ }
+ }
+ }
+ static void PrintLoopinfo(const MachineLoopInfo &LoopInfo) {
+ for (MachineLoop::iterator iter = LoopInfo.begin(),
+ iterEnd = LoopInfo.end(); iter != iterEnd; ++iter) {
+ (*iter)->print(dbgs(), 0);
+ }
+ }
+
+ // UTILITY FUNCTIONS
+ int getSCCNum(MachineBasicBlock *MBB) const;
+ MachineBasicBlock *getLoopLandInfo(MachineLoop *LoopRep) const;
+ bool hasBackEdge(MachineBasicBlock *MBB) const;
+ static unsigned getLoopDepth(MachineLoop *LoopRep);
+ bool isRetiredBlock(MachineBasicBlock *MBB) const;
+ bool isActiveLoophead(MachineBasicBlock *MBB) const;
+ PathToKind singlePathTo(MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
+ bool AllowSideEntry = true) const;
+ int countActiveBlock(MBBVector::const_iterator It,
+ MBBVector::const_iterator E) const;
+ bool needMigrateBlock(MachineBasicBlock *MBB) const;
+
+ // Utility Functions
+ void reversePredicateSetter(MachineBasicBlock::iterator I);
+ /// Compute the reversed DFS post order of Blocks
+ void orderBlocks(MachineFunction *MF);
+
+ // Function originaly from CFGStructTraits
+ void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
+ DebugLoc DL = DebugLoc());
+ MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
+ DebugLoc DL = DebugLoc());
+ MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode);
+ void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode,
+ DebugLoc DL);
+ void insertCondBranchBefore(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
+ DebugLoc DL);
+ void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum);
+ static int getBranchNzeroOpcode(int OldOpcode);
+ static int getBranchZeroOpcode(int OldOpcode);
+ static int getContinueNzeroOpcode(int OldOpcode);
+ static int getContinueZeroOpcode(int OldOpcode);
+ static MachineBasicBlock *getTrueBranch(MachineInstr *MI);
+ static void setTrueBranch(MachineInstr *MI, MachineBasicBlock *MBB);
+ static MachineBasicBlock *getFalseBranch(MachineBasicBlock *MBB,
+ MachineInstr *MI);
+ static bool isCondBranch(MachineInstr *MI);
+ static bool isUncondBranch(MachineInstr *MI);
+ static DebugLoc getLastDebugLocInBB(MachineBasicBlock *MBB);
+ static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *MBB);
+ /// The correct naming for this is getPossibleLoopendBlockBranchInstr.
+ ///
+ /// BB with backward-edge could have move instructions after the branch
+ /// instruction. Such move instruction "belong to" the loop backward-edge.
+ MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *MBB);
+ static MachineInstr *getReturnInstr(MachineBasicBlock *MBB);
+ static MachineInstr *getContinueInstr(MachineBasicBlock *MBB);
+ static bool isReturnBlock(MachineBasicBlock *MBB);
+ static void cloneSuccessorList(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB) ;
+ static MachineBasicBlock *clone(MachineBasicBlock *MBB);
+ /// MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose
+ /// because the AMDGPU instruction is not recognized as terminator fix this
+ /// and retire this routine
+ void replaceInstrUseOfBlockWith(MachineBasicBlock *SrcMBB,
+ MachineBasicBlock *OldMBB, MachineBasicBlock *NewBlk);
+ static void wrapup(MachineBasicBlock *MBB);
+
+
+ int patternMatch(MachineBasicBlock *MBB);
+ int patternMatchGroup(MachineBasicBlock *MBB);
+ int serialPatternMatch(MachineBasicBlock *MBB);
+ int ifPatternMatch(MachineBasicBlock *MBB);
+ int loopendPatternMatch();
+ int mergeLoop(MachineLoop *LoopRep);
+ int loopcontPatternMatch(MachineLoop *LoopRep, MachineBasicBlock *LoopHeader);
+
+ void handleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
+ MachineLoop *ContLoop);
+ /// return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in
+ /// the same loop with LoopLandInfo without explicitly keeping track of
+ /// loopContBlks and loopBreakBlks, this is a method to get the information.
+ bool isSameloopDetachedContbreak(MachineBasicBlock *Src1MBB,
+ MachineBasicBlock *Src2MBB);
+ int handleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
+ int handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
+ int improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
+ MachineBasicBlock **LandMBBPtr);
+ void showImproveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
+ MachineBasicBlock *LandMBB, bool Detail = false);
+ int cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB);
+ void mergeSerialBlock(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB);
+
+ void mergeIfthenelseBlock(MachineInstr *BranchMI,
+ MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
+ MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB);
+ void mergeLooplandBlock(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *LandMBB);
+ void mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
+ MachineBasicBlock *LandMBB);
+ void settleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineBasicBlock *ContMBB);
+ /// normalizeInfiniteLoopExit change
+ /// B1:
+ /// uncond_br LoopHeader
+ ///
+ /// to
+ /// B1:
+ /// cond_br 1 LoopHeader dummyExit
+ /// and return the newly added dummy exit block
+ MachineBasicBlock *normalizeInfiniteLoopExit(MachineLoop *LoopRep);
+ void removeUnconditionalBranch(MachineBasicBlock *MBB);
+ /// Remove duplicate branches instructions in a block.
+ /// For instance
+ /// B0:
+ /// cond_br X B1 B2
+ /// cond_br X B1 B2
+ /// is transformed to
+ /// B0:
+ /// cond_br X B1 B2
+ void removeRedundantConditionalBranch(MachineBasicBlock *MBB);
+ void addDummyExitBlock(SmallVectorImpl<MachineBasicBlock *> &RetMBB);
+ void removeSuccessor(MachineBasicBlock *MBB);
+ MachineBasicBlock *cloneBlockForPredecessor(MachineBasicBlock *MBB,
+ MachineBasicBlock *PredMBB);
+ void migrateInstruction(MachineBasicBlock *SrcMBB,
+ MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I);
+ void recordSccnum(MachineBasicBlock *MBB, int SCCNum);
+ void retireBlock(MachineBasicBlock *MBB);
+ void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = NULL);
+
+ MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&);
+ /// This is work around solution for findNearestCommonDominator not avaiable
+ /// to post dom a proper fix should go to Dominators.h.
+ MachineBasicBlock *findNearestCommonPostDom(MachineBasicBlock *MBB1,
+ MachineBasicBlock *MBB2);
private:
- DomTreeT *domTree;
- PostDomTreeT *postDomTree;
- LoopInfoT *loopInfo;
- PassT *passRep;
- FuncT *funcRep;
-
- BlockInfoMap blockInfoMap;
- LoopLandInfoMap loopLandInfoMap;
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> orderedBlks;
- const AMDGPURegisterInfo *TRI;
+ MBBInfoMap BlockInfoMap;
+ LoopLandInfoMap LLInfoMap;
+ std::map<MachineLoop *, bool> Visited;
+ MachineFunction *FuncRep;
+ SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> OrderedBlks;
+};
+
+int AMDGPUCFGStructurizer::getSCCNum(MachineBasicBlock *MBB) const {
+ MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
+ if (It == BlockInfoMap.end())
+ return INVALIDSCCNUM;
+ return (*It).second->SccNum;
+}
-}; //template class CFGStructurizer
+MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep)
+ const {
+ LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep);
+ if (It == LLInfoMap.end())
+ return NULL;
+ return (*It).second;
+}
+
+bool AMDGPUCFGStructurizer::hasBackEdge(MachineBasicBlock *MBB) const {
+ MachineLoop *LoopRep = MLI->getLoopFor(MBB);
+ if (!LoopRep)
+ return false;
+ MachineBasicBlock *LoopHeader = LoopRep->getHeader();
+ return MBB->isSuccessor(LoopHeader);
+}
+
+unsigned AMDGPUCFGStructurizer::getLoopDepth(MachineLoop *LoopRep) {
+ return LoopRep ? LoopRep->getLoopDepth() : 0;
+}
-template<class PassT> CFGStructurizer<PassT>::CFGStructurizer()
- : domTree(NULL), postDomTree(NULL), loopInfo(NULL) {
+bool AMDGPUCFGStructurizer::isRetiredBlock(MachineBasicBlock *MBB) const {
+ MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
+ if (It == BlockInfoMap.end())
+ return false;
+ return (*It).second->IsRetired;
}
-template<class PassT> CFGStructurizer<PassT>::~CFGStructurizer() {
- for (typename BlockInfoMap::iterator I = blockInfoMap.begin(),
- E = blockInfoMap.end(); I != E; ++I) {
- delete I->second;
+bool AMDGPUCFGStructurizer::isActiveLoophead(MachineBasicBlock *MBB) const {
+ MachineLoop *LoopRep = MLI->getLoopFor(MBB);
+ while (LoopRep && LoopRep->getHeader() == MBB) {
+ MachineBasicBlock *LoopLand = getLoopLandInfo(LoopRep);
+ if(!LoopLand)
+ return true;
+ if (!isRetiredBlock(LoopLand))
+ return true;
+ LoopRep = LoopRep->getParentLoop();
}
+ return false;
+}
+AMDGPUCFGStructurizer::PathToKind AMDGPUCFGStructurizer::singlePathTo(
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
+ bool AllowSideEntry) const {
+ assert(DstMBB);
+ if (SrcMBB == DstMBB)
+ return SinglePath_InPath;
+ while (SrcMBB && SrcMBB->succ_size() == 1) {
+ SrcMBB = *SrcMBB->succ_begin();
+ if (SrcMBB == DstMBB)
+ return SinglePath_InPath;
+ if (!AllowSideEntry && SrcMBB->pred_size() > 1)
+ return Not_SinglePath;
+ }
+ if (SrcMBB && SrcMBB->succ_size()==0)
+ return SinglePath_NotInPath;
+ return Not_SinglePath;
}
-template<class PassT>
-bool CFGStructurizer<PassT>::prepare(FuncT &func, PassT &pass,
- const AMDGPURegisterInfo * tri) {
- passRep = &pass;
- funcRep = &func;
- TRI = tri;
+int AMDGPUCFGStructurizer::countActiveBlock(MBBVector::const_iterator It,
+ MBBVector::const_iterator E) const {
+ int Count = 0;
+ while (It != E) {
+ if (!isRetiredBlock(*It))
+ ++Count;
+ ++It;
+ }
+ return Count;
+}
- bool changed = false;
+bool AMDGPUCFGStructurizer::needMigrateBlock(MachineBasicBlock *MBB) const {
+ unsigned BlockSizeThreshold = 30;
+ unsigned CloneInstrThreshold = 100;
+ bool MultiplePreds = MBB && (MBB->pred_size() > 1);
- //FIXME: if not reducible flow graph, make it so ???
+ if(!MultiplePreds)
+ return false;
+ unsigned BlkSize = MBB->size();
+ return ((BlkSize > BlockSizeThreshold) &&
+ (BlkSize * (MBB->pred_size() - 1) > CloneInstrThreshold));
+}
- if (DEBUGME) {
- errs() << "AMDGPUCFGStructurizer::prepare\n";
+void AMDGPUCFGStructurizer::reversePredicateSetter(
+ MachineBasicBlock::iterator I) {
+ while (I--) {
+ if (I->getOpcode() == AMDGPU::PRED_X) {
+ switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
+ case OPCODE_IS_ZERO_INT:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_NOT_ZERO_INT);
+ return;
+ case OPCODE_IS_NOT_ZERO_INT:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_ZERO_INT);
+ return;
+ case OPCODE_IS_ZERO:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_NOT_ZERO);
+ return;
+ case OPCODE_IS_NOT_ZERO:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_ZERO);
+ return;
+ default:
+ llvm_unreachable("PRED_X Opcode invalid!");
+ }
+ }
}
+}
+
+void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB,
+ int NewOpcode, DebugLoc DL) {
+ MachineInstr *MI = MBB->getParent()
+ ->CreateMachineInstr(TII->get(NewOpcode), DL);
+ MBB->push_back(MI);
+ //assume the instruction doesn't take any reg operand ...
+ SHOWNEWINSTR(MI);
+}
+
+MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB,
+ int NewOpcode, DebugLoc DL) {
+ MachineInstr *MI =
+ MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
+ if (MBB->begin() != MBB->end())
+ MBB->insert(MBB->begin(), MI);
+ else
+ MBB->push_back(MI);
+ SHOWNEWINSTR(MI);
+ return MI;
+}
+
+MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(
+ MachineBasicBlock::iterator I, int NewOpcode) {
+ MachineInstr *OldMI = &(*I);
+ MachineBasicBlock *MBB = OldMI->getParent();
+ MachineInstr *NewMBB =
+ MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
+ MBB->insert(I, NewMBB);
+ //assume the instruction doesn't take any reg operand ...
+ SHOWNEWINSTR(NewMBB);
+ return NewMBB;
+}
+
+void AMDGPUCFGStructurizer::insertCondBranchBefore(
+ MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) {
+ MachineInstr *OldMI = &(*I);
+ MachineBasicBlock *MBB = OldMI->getParent();
+ MachineFunction *MF = MBB->getParent();
+ MachineInstr *NewMI = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
+ MBB->insert(I, NewMI);
+ MachineInstrBuilder MIB(*MF, NewMI);
+ MIB.addReg(OldMI->getOperand(1).getReg(), false);
+ SHOWNEWINSTR(NewMI);
+ //erase later oldInstr->eraseFromParent();
+}
+
+void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
+ MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
+ DebugLoc DL) {
+ MachineFunction *MF = blk->getParent();
+ MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
+ //insert before
+ blk->insert(I, NewInstr);
+ MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
+ SHOWNEWINSTR(NewInstr);
+}
- loopInfo = CFGTraits::getLoopInfo(pass);
- if (DEBUGME) {
- errs() << "LoopInfo:\n";
- PrintLoopinfo(*loopInfo, errs());
+void AMDGPUCFGStructurizer::insertCondBranchEnd(MachineBasicBlock *MBB,
+ int NewOpcode, int RegNum) {
+ MachineFunction *MF = MBB->getParent();
+ MachineInstr *NewInstr =
+ MF->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
+ MBB->push_back(NewInstr);
+ MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
+ SHOWNEWINSTR(NewInstr);
+}
+
+int AMDGPUCFGStructurizer::getBranchNzeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
+ case AMDGPU::BRANCH_COND_i32:
+ case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32;
+ default: llvm_unreachable("internal error");
}
+ return -1;
+}
- orderBlocks();
- if (DEBUGME) {
- errs() << "Ordered blocks:\n";
- printOrderedBlocks(errs());
+int AMDGPUCFGStructurizer::getBranchZeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
+ case AMDGPU::BRANCH_COND_i32:
+ case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32;
+ default: llvm_unreachable("internal error");
}
+ return -1;
+}
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> retBlks;
-
- for (typename LoopInfoT::iterator iter = loopInfo->begin(),
- iterEnd = loopInfo->end();
- iter != iterEnd; ++iter) {
- LoopT* loopRep = (*iter);
- BlockTSmallerVector exitingBlks;
- loopRep->getExitingBlocks(exitingBlks);
-
- if (exitingBlks.size() == 0) {
- BlockT* dummyExitBlk = normalizeInfiniteLoopExit(loopRep);
- if (dummyExitBlk != NULL)
- retBlks.push_back(dummyExitBlk);
- }
+int AMDGPUCFGStructurizer::getContinueNzeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
+ default: llvm_unreachable("internal error");
+ };
+ return -1;
+}
+
+int AMDGPUCFGStructurizer::getContinueZeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
+ default: llvm_unreachable("internal error");
}
+ return -1;
+}
- // Remove unconditional branch instr.
- // Add dummy exit block iff there are multiple returns.
+MachineBasicBlock *AMDGPUCFGStructurizer::getTrueBranch(MachineInstr *MI) {
+ return MI->getOperand(0).getMBB();
+}
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlk = orderedBlks.begin(), iterEndBlk = orderedBlks.end();
- iterBlk != iterEndBlk;
- ++iterBlk) {
- BlockT *curBlk = *iterBlk;
- removeUnconditionalBranch(curBlk);
- removeRedundantConditionalBranch(curBlk);
- if (CFGTraits::isReturnBlock(curBlk)) {
- retBlks.push_back(curBlk);
+void AMDGPUCFGStructurizer::setTrueBranch(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ MI->getOperand(0).setMBB(MBB);
+}
+
+MachineBasicBlock *
+AMDGPUCFGStructurizer::getFalseBranch(MachineBasicBlock *MBB,
+ MachineInstr *MI) {
+ assert(MBB->succ_size() == 2);
+ MachineBasicBlock *TrueBranch = getTrueBranch(MI);
+ MachineBasicBlock::succ_iterator It = MBB->succ_begin();
+ MachineBasicBlock::succ_iterator Next = It;
+ ++Next;
+ return (*It == TrueBranch) ? *Next : *It;
+}
+
+bool AMDGPUCFGStructurizer::isCondBranch(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::BRANCH_COND_i32:
+ case AMDGPU::BRANCH_COND_f32: return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool AMDGPUCFGStructurizer::isUncondBranch(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AMDGPU::JUMP:
+ case AMDGPU::BRANCH:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+DebugLoc AMDGPUCFGStructurizer::getLastDebugLocInBB(MachineBasicBlock *MBB) {
+ //get DebugLoc from the first MachineBasicBlock instruction with debug info
+ DebugLoc DL;
+ for (MachineBasicBlock::iterator It = MBB->begin(); It != MBB->end();
+ ++It) {
+ MachineInstr *instr = &(*It);
+ if (instr->getDebugLoc().isUnknown() == false)
+ DL = instr->getDebugLoc();
+ }
+ return DL;
+}
+
+MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr(
+ MachineBasicBlock *MBB) {
+ MachineBasicBlock::reverse_iterator It = MBB->rbegin();
+ MachineInstr *MI = &*It;
+ if (MI && (isCondBranch(MI) || isUncondBranch(MI)))
+ return MI;
+ return NULL;
+}
+
+MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr(
+ MachineBasicBlock *MBB) {
+ for (MachineBasicBlock::reverse_iterator It = MBB->rbegin(), E = MBB->rend();
+ It != E; ++It) {
+ // FIXME: Simplify
+ MachineInstr *MI = &*It;
+ if (MI) {
+ if (isCondBranch(MI) || isUncondBranch(MI))
+ return MI;
+ else if (!TII->isMov(MI->getOpcode()))
+ break;
}
- assert(curBlk->succ_size() <= 2);
- } //for
+ }
+ return NULL;
+}
- if (retBlks.size() >= 2) {
- addDummyExitBlock(retBlks);
- changed = true;
+MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
+ MachineBasicBlock::reverse_iterator It = MBB->rbegin();
+ if (It != MBB->rend()) {
+ MachineInstr *instr = &(*It);
+ if (instr->getOpcode() == AMDGPU::RETURN)
+ return instr;
}
+ return NULL;
+}
- return changed;
-} //CFGStructurizer::prepare
+MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
+ MachineBasicBlock::reverse_iterator It = MBB->rbegin();
+ if (It != MBB->rend()) {
+ MachineInstr *MI = &(*It);
+ if (MI->getOpcode() == AMDGPU::CONTINUE)
+ return MI;
+ }
+ return NULL;
+}
-template<class PassT>
-bool CFGStructurizer<PassT>::run(FuncT &func, PassT &pass,
- const AMDGPURegisterInfo * tri) {
- passRep = &pass;
- funcRep = &func;
- TRI = tri;
+bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
+ MachineInstr *MI = getReturnInstr(MBB);
+ bool IsReturn = (MBB->succ_size() == 0);
+ if (MI)
+ assert(IsReturn);
+ else if (IsReturn)
+ DEBUG(
+ dbgs() << "BB" << MBB->getNumber()
+ <<" is return block without RETURN instr\n";);
+ return IsReturn;
+}
- //Assume reducible CFG...
- if (DEBUGME) {
- errs() << "AMDGPUCFGStructurizer::run\n";
- func.viewCFG();
+void AMDGPUCFGStructurizer::cloneSuccessorList(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB) {
+ for (MachineBasicBlock::succ_iterator It = SrcMBB->succ_begin(),
+ iterEnd = SrcMBB->succ_end(); It != iterEnd; ++It)
+ DstMBB->addSuccessor(*It); // *iter's predecessor is also taken care of
+}
+
+MachineBasicBlock *AMDGPUCFGStructurizer::clone(MachineBasicBlock *MBB) {
+ MachineFunction *Func = MBB->getParent();
+ MachineBasicBlock *NewMBB = Func->CreateMachineBasicBlock();
+ Func->push_back(NewMBB); //insert to function
+ for (MachineBasicBlock::iterator It = MBB->begin(), E = MBB->end();
+ It != E; ++It) {
+ MachineInstr *MI = Func->CloneMachineInstr(It);
+ NewMBB->push_back(MI);
}
+ return NewMBB;
+}
+
+void AMDGPUCFGStructurizer::replaceInstrUseOfBlockWith(
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *OldMBB,
+ MachineBasicBlock *NewBlk) {
+ MachineInstr *BranchMI = getLoopendBlockBranchInstr(SrcMBB);
+ if (BranchMI && isCondBranch(BranchMI) &&
+ getTrueBranch(BranchMI) == OldMBB)
+ setTrueBranch(BranchMI, NewBlk);
+}
+
+void AMDGPUCFGStructurizer::wrapup(MachineBasicBlock *MBB) {
+ assert((!MBB->getParent()->getJumpTableInfo()
+ || MBB->getParent()->getJumpTableInfo()->isEmpty())
+ && "found a jump table");
+
+ //collect continue right before endloop
+ SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> ContInstr;
+ MachineBasicBlock::iterator Pre = MBB->begin();
+ MachineBasicBlock::iterator E = MBB->end();
+ MachineBasicBlock::iterator It = Pre;
+ while (It != E) {
+ if (Pre->getOpcode() == AMDGPU::CONTINUE
+ && It->getOpcode() == AMDGPU::ENDLOOP)
+ ContInstr.push_back(Pre);
+ Pre = It;
+ ++It;
+ }
+
+ //delete continue right before endloop
+ for (unsigned i = 0; i < ContInstr.size(); ++i)
+ ContInstr[i]->eraseFromParent();
+
+ // TODO to fix up jump table so later phase won't be confused. if
+ // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
+ // there isn't such an interface yet. alternatively, replace all the other
+ // blocks in the jump table with the entryBlk //}
+
+}
+
+
+bool AMDGPUCFGStructurizer::prepare() {
+ bool Changed = false;
+
+ //FIXME: if not reducible flow graph, make it so ???
+
+ DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";);
- domTree = CFGTraits::getDominatorTree(pass);
- if (DEBUGME) {
- domTree->print(errs(), (const llvm::Module*)0);
+ orderBlocks(FuncRep);
+
+ SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> RetBlks;
+
+ // Add an ExitBlk to loop that don't have one
+ for (MachineLoopInfo::iterator It = MLI->begin(),
+ E = MLI->end(); It != E; ++It) {
+ MachineLoop *LoopRep = (*It);
+ MBBVector ExitingMBBs;
+ LoopRep->getExitingBlocks(ExitingMBBs);
+
+ if (ExitingMBBs.size() == 0) {
+ MachineBasicBlock* DummyExitBlk = normalizeInfiniteLoopExit(LoopRep);
+ if (DummyExitBlk)
+ RetBlks.push_back(DummyExitBlk);
+ }
}
- postDomTree = CFGTraits::getPostDominatorTree(pass);
- if (DEBUGME) {
- postDomTree->print(errs());
+ // Remove unconditional branch instr.
+ // Add dummy exit block iff there are multiple returns.
+ for (SmallVectorImpl<MachineBasicBlock *>::const_iterator
+ It = OrderedBlks.begin(), E = OrderedBlks.end(); It != E; ++It) {
+ MachineBasicBlock *MBB = *It;
+ removeUnconditionalBranch(MBB);
+ removeRedundantConditionalBranch(MBB);
+ if (isReturnBlock(MBB)) {
+ RetBlks.push_back(MBB);
+ }
+ assert(MBB->succ_size() <= 2);
}
- loopInfo = CFGTraits::getLoopInfo(pass);
- if (DEBUGME) {
- errs() << "LoopInfo:\n";
- PrintLoopinfo(*loopInfo, errs());
+ if (RetBlks.size() >= 2) {
+ addDummyExitBlock(RetBlks);
+ Changed = true;
}
- orderBlocks();
+ return Changed;
+}
+
+bool AMDGPUCFGStructurizer::run() {
+
+ //Assume reducible CFG...
+ DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n";FuncRep->viewCFG(););
+
#ifdef STRESSTEST
//Use the worse block ordering to test the algorithm.
ReverseVector(orderedBlks);
#endif
- if (DEBUGME) {
- errs() << "Ordered blocks:\n";
- printOrderedBlocks(errs());
- }
- int numIter = 0;
- bool finish = false;
- BlockT *curBlk;
- bool makeProgress = false;
- int numRemainedBlk = countActiveBlock(orderedBlks.begin(),
- orderedBlks.end());
+ DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks(););
+ int NumIter = 0;
+ bool Finish = false;
+ MachineBasicBlock *MBB;
+ bool MakeProgress = false;
+ int NumRemainedBlk = countActiveBlock(OrderedBlks.begin(),
+ OrderedBlks.end());
do {
- ++numIter;
- if (DEBUGME) {
- errs() << "numIter = " << numIter
- << ", numRemaintedBlk = " << numRemainedBlk << "\n";
- }
-
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlk = orderedBlks.begin();
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlkEnd = orderedBlks.end();
-
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- sccBeginIter = iterBlk;
- BlockT *sccBeginBlk = NULL;
- int sccNumBlk = 0; // The number of active blocks, init to a
+ ++NumIter;
+ DEBUG(
+ dbgs() << "numIter = " << NumIter
+ << ", numRemaintedBlk = " << NumRemainedBlk << "\n";
+ );
+
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator It =
+ OrderedBlks.begin();
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator E =
+ OrderedBlks.end();
+
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator SccBeginIter =
+ It;
+ MachineBasicBlock *SccBeginMBB = NULL;
+ int SccNumBlk = 0; // The number of active blocks, init to a
// maximum possible number.
- int sccNumIter; // Number of iteration in this SCC.
-
- while (iterBlk != iterBlkEnd) {
- curBlk = *iterBlk;
-
- if (sccBeginBlk == NULL) {
- sccBeginIter = iterBlk;
- sccBeginBlk = curBlk;
- sccNumIter = 0;
- sccNumBlk = numRemainedBlk; // Init to maximum possible number.
- if (DEBUGME) {
- errs() << "start processing SCC" << getSCCNum(sccBeginBlk);
- errs() << "\n";
- }
+ int SccNumIter; // Number of iteration in this SCC.
+
+ while (It != E) {
+ MBB = *It;
+
+ if (!SccBeginMBB) {
+ SccBeginIter = It;
+ SccBeginMBB = MBB;
+ SccNumIter = 0;
+ SccNumBlk = NumRemainedBlk; // Init to maximum possible number.
+ DEBUG(
+ dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB);
+ dbgs() << "\n";
+ );
}
- if (!isRetiredBlock(curBlk)) {
- patternMatch(curBlk);
- }
+ if (!isRetiredBlock(MBB))
+ patternMatch(MBB);
- ++iterBlk;
+ ++It;
- bool contNextScc = true;
- if (iterBlk == iterBlkEnd
- || getSCCNum(sccBeginBlk) != getSCCNum(*iterBlk)) {
+ bool ContNextScc = true;
+ if (It == E
+ || getSCCNum(SccBeginMBB) != getSCCNum(*It)) {
// Just finish one scc.
- ++sccNumIter;
- int sccRemainedNumBlk = countActiveBlock(sccBeginIter, iterBlk);
- if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= sccNumBlk) {
- if (DEBUGME) {
- errs() << "Can't reduce SCC " << getSCCNum(curBlk)
- << ", sccNumIter = " << sccNumIter;
- errs() << "doesn't make any progress\n";
- }
- contNextScc = true;
- } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < sccNumBlk) {
- sccNumBlk = sccRemainedNumBlk;
- iterBlk = sccBeginIter;
- contNextScc = false;
- if (DEBUGME) {
- errs() << "repeat processing SCC" << getSCCNum(curBlk)
- << "sccNumIter = " << sccNumIter << "\n";
- func.viewCFG();
- }
+ ++SccNumIter;
+ int sccRemainedNumBlk = countActiveBlock(SccBeginIter, It);
+ if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= SccNumBlk) {
+ DEBUG(
+ dbgs() << "Can't reduce SCC " << getSCCNum(MBB)
+ << ", sccNumIter = " << SccNumIter;
+ dbgs() << "doesn't make any progress\n";
+ );
+ ContNextScc = true;
+ } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < SccNumBlk) {
+ SccNumBlk = sccRemainedNumBlk;
+ It = SccBeginIter;
+ ContNextScc = false;
+ DEBUG(
+ dbgs() << "repeat processing SCC" << getSCCNum(MBB)
+ << "sccNumIter = " << SccNumIter << "\n";
+ FuncRep->viewCFG();
+ );
} else {
// Finish the current scc.
- contNextScc = true;
+ ContNextScc = true;
}
} else {
// Continue on next component in the current scc.
- contNextScc = false;
+ ContNextScc = false;
}
- if (contNextScc) {
- sccBeginBlk = NULL;
- }
+ if (ContNextScc)
+ SccBeginMBB = NULL;
} //while, "one iteration" over the function.
- BlockT *entryBlk = FuncGTraits::nodes_begin(&func);
- if (entryBlk->succ_size() == 0) {
- finish = true;
- if (DEBUGME) {
- errs() << "Reduce to one block\n";
- }
+ MachineBasicBlock *EntryMBB =
+ GraphTraits<MachineFunction *>::nodes_begin(FuncRep);
+ if (EntryMBB->succ_size() == 0) {
+ Finish = true;
+ DEBUG(
+ dbgs() << "Reduce to one block\n";
+ );
} else {
- int newnumRemainedBlk
- = countActiveBlock(orderedBlks.begin(), orderedBlks.end());
+ int NewnumRemainedBlk
+ = countActiveBlock(OrderedBlks.begin(), OrderedBlks.end());
// consider cloned blocks ??
- if (newnumRemainedBlk == 1 || newnumRemainedBlk < numRemainedBlk) {
- makeProgress = true;
- numRemainedBlk = newnumRemainedBlk;
+ if (NewnumRemainedBlk == 1 || NewnumRemainedBlk < NumRemainedBlk) {
+ MakeProgress = true;
+ NumRemainedBlk = NewnumRemainedBlk;
} else {
- makeProgress = false;
- if (DEBUGME) {
- errs() << "No progress\n";
- }
+ MakeProgress = false;
+ DEBUG(
+ dbgs() << "No progress\n";
+ );
}
}
- } while (!finish && makeProgress);
+ } while (!Finish && MakeProgress);
// Misc wrap up to maintain the consistency of the Function representation.
- CFGTraits::wrapup(FuncGTraits::nodes_begin(&func));
+ wrapup(GraphTraits<MachineFunction *>::nodes_begin(FuncRep));
// Detach retired Block, release memory.
- for (typename BlockInfoMap::iterator iterMap = blockInfoMap.begin(),
- iterEndMap = blockInfoMap.end(); iterMap != iterEndMap; ++iterMap) {
- if ((*iterMap).second && (*iterMap).second->isRetired) {
- assert(((*iterMap).first)->getNumber() != -1);
- if (DEBUGME) {
- errs() << "Erase BB" << ((*iterMap).first)->getNumber() << "\n";
- }
- (*iterMap).first->eraseFromParent(); //Remove from the parent Function.
+ for (MBBInfoMap::iterator It = BlockInfoMap.begin(), E = BlockInfoMap.end();
+ It != E; ++It) {
+ if ((*It).second && (*It).second->IsRetired) {
+ assert(((*It).first)->getNumber() != -1);
+ DEBUG(
+ dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n";
+ );
+ (*It).first->eraseFromParent(); //Remove from the parent Function.
}
- delete (*iterMap).second;
+ delete (*It).second;
}
- blockInfoMap.clear();
+ BlockInfoMap.clear();
+ LLInfoMap.clear();
- // clear loopLandInfoMap
- for (typename LoopLandInfoMap::iterator iterMap = loopLandInfoMap.begin(),
- iterEndMap = loopLandInfoMap.end(); iterMap != iterEndMap; ++iterMap) {
- delete (*iterMap).second;
- }
- loopLandInfoMap.clear();
+ DEBUG(
+ FuncRep->viewCFG();
+ );
- if (DEBUGME) {
- func.viewCFG();
- }
-
- if (!finish) {
- assert(!"IRREDUCIBL_CF");
- }
+ if (!Finish)
+ llvm_unreachable("IRREDUCIBL_CF");
return true;
-} //CFGStructurizer::run
-
-/// Print the ordered Blocks.
-///
-template<class PassT>
-void CFGStructurizer<PassT>::printOrderedBlocks(llvm::raw_ostream &os) {
- size_t i = 0;
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlk = orderedBlks.begin(), iterBlkEnd = orderedBlks.end();
- iterBlk != iterBlkEnd;
- ++iterBlk, ++i) {
- os << "BB" << (*iterBlk)->getNumber();
- os << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
- if (i != 0 && i % 10 == 0) {
- os << "\n";
- } else {
- os << " ";
- }
- }
-} //printOrderedBlocks
-
-/// Compute the reversed DFS post order of Blocks
-///
-template<class PassT> void CFGStructurizer<PassT>::orderBlocks() {
- int sccNum = 0;
- BlockT *bb;
- for (scc_iterator<FuncT *> sccIter = scc_begin(funcRep),
- sccEnd = scc_end(funcRep); sccIter != sccEnd; ++sccIter, ++sccNum) {
- std::vector<BlockT *> &sccNext = *sccIter;
- for (typename std::vector<BlockT *>::const_iterator
- blockIter = sccNext.begin(), blockEnd = sccNext.end();
+}
+
+
+
+void AMDGPUCFGStructurizer::orderBlocks(MachineFunction *MF) {
+ int SccNum = 0;
+ MachineBasicBlock *MBB;
+ for (scc_iterator<MachineFunction *> It = scc_begin(MF), E = scc_end(MF);
+ It != E; ++It, ++SccNum) {
+ std::vector<MachineBasicBlock *> &SccNext = *It;
+ for (std::vector<MachineBasicBlock *>::const_iterator
+ blockIter = SccNext.begin(), blockEnd = SccNext.end();
blockIter != blockEnd; ++blockIter) {
- bb = *blockIter;
- orderedBlks.push_back(bb);
- recordSccnum(bb, sccNum);
+ MBB = *blockIter;
+ OrderedBlks.push_back(MBB);
+ recordSccnum(MBB, SccNum);
}
}
//walk through all the block in func to check for unreachable
- for (BlockIterator blockIter1 = FuncGTraits::nodes_begin(funcRep),
- blockEnd1 = FuncGTraits::nodes_end(funcRep);
- blockIter1 != blockEnd1; ++blockIter1) {
- BlockT *bb = &(*blockIter1);
- sccNum = getSCCNum(bb);
- if (sccNum == INVALIDSCCNUM) {
- errs() << "unreachable block BB" << bb->getNumber() << "\n";
- }
+ typedef GraphTraits<MachineFunction *> GTM;
+ MachineFunction::iterator It = GTM::nodes_begin(MF), E = GTM::nodes_end(MF);
+ for (; It != E; ++It) {
+ MachineBasicBlock *MBB = &(*It);
+ SccNum = getSCCNum(MBB);
+ if (SccNum == INVALIDSCCNUM)
+ dbgs() << "unreachable block BB" << MBB->getNumber() << "\n";
}
-} //orderBlocks
+}
-template<class PassT> int CFGStructurizer<PassT>::patternMatch(BlockT *curBlk) {
- int numMatch = 0;
- int curMatch;
+int AMDGPUCFGStructurizer::patternMatch(MachineBasicBlock *MBB) {
+ int NumMatch = 0;
+ int CurMatch;
- if (DEBUGME) {
- errs() << "Begin patternMatch BB" << curBlk->getNumber() << "\n";
- }
+ DEBUG(
+ dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n";
+ );
- while ((curMatch = patternMatchGroup(curBlk)) > 0) {
- numMatch += curMatch;
- }
+ while ((CurMatch = patternMatchGroup(MBB)) > 0)
+ NumMatch += CurMatch;
- if (DEBUGME) {
- errs() << "End patternMatch BB" << curBlk->getNumber()
- << ", numMatch = " << numMatch << "\n";
- }
+ DEBUG(
+ dbgs() << "End patternMatch BB" << MBB->getNumber()
+ << ", numMatch = " << NumMatch << "\n";
+ );
+
+ return NumMatch;
+}
+
+int AMDGPUCFGStructurizer::patternMatchGroup(MachineBasicBlock *MBB) {
+ int NumMatch = 0;
+ NumMatch += loopendPatternMatch();
+ NumMatch += serialPatternMatch(MBB);
+ NumMatch += ifPatternMatch(MBB);
+ return NumMatch;
+}
- return numMatch;
-} //patternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::patternMatchGroup(BlockT *curBlk) {
- int numMatch = 0;
- numMatch += serialPatternMatch(curBlk);
- numMatch += ifPatternMatch(curBlk);
- numMatch += loopendPatternMatch(curBlk);
- numMatch += loopPatternMatch(curBlk);
- return numMatch;
-}//patternMatchGroup
-
-template<class PassT>
-int CFGStructurizer<PassT>::serialPatternMatch(BlockT *curBlk) {
- if (curBlk->succ_size() != 1) {
+
+int AMDGPUCFGStructurizer::serialPatternMatch(MachineBasicBlock *MBB) {
+ if (MBB->succ_size() != 1)
return 0;
- }
- BlockT *childBlk = *curBlk->succ_begin();
- if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk)) {
+ MachineBasicBlock *childBlk = *MBB->succ_begin();
+ if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk))
return 0;
- }
- mergeSerialBlock(curBlk, childBlk);
+ mergeSerialBlock(MBB, childBlk);
++numSerialPatternMatch;
return 1;
-} //serialPatternMatch
+}
-template<class PassT>
-int CFGStructurizer<PassT>::ifPatternMatch(BlockT *curBlk) {
+int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
//two edges
- if (curBlk->succ_size() != 2) {
+ if (MBB->succ_size() != 2)
return 0;
- }
-
- if (hasBackEdge(curBlk)) {
+ if (hasBackEdge(MBB))
return 0;
- }
-
- InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(curBlk);
- if (branchInstr == NULL) {
+ MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
+ if (!BranchMI)
return 0;
- }
- assert(CFGTraits::isCondBranch(branchInstr));
+ assert(isCondBranch(BranchMI));
+ int NumMatch = 0;
- BlockT *trueBlk = CFGTraits::getTrueBranch(branchInstr);
- BlockT *falseBlk = CFGTraits::getFalseBranch(curBlk, branchInstr);
- BlockT *landBlk;
- int cloned = 0;
+ MachineBasicBlock *TrueMBB = getTrueBranch(BranchMI);
+ NumMatch += serialPatternMatch(TrueMBB);
+ NumMatch += ifPatternMatch(TrueMBB);
+ MachineBasicBlock *FalseMBB = getFalseBranch(MBB, BranchMI);
+ NumMatch += serialPatternMatch(FalseMBB);
+ NumMatch += ifPatternMatch(FalseMBB);
+ MachineBasicBlock *LandBlk;
+ int Cloned = 0;
+ assert (!TrueMBB->succ_empty() || !FalseMBB->succ_empty());
// TODO: Simplify
- if (trueBlk->succ_size() == 1 && falseBlk->succ_size() == 1
- && *trueBlk->succ_begin() == *falseBlk->succ_begin()) {
- landBlk = *trueBlk->succ_begin();
- } else if (trueBlk->succ_size() == 0 && falseBlk->succ_size() == 0) {
- landBlk = NULL;
- } else if (trueBlk->succ_size() == 1 && *trueBlk->succ_begin() == falseBlk) {
- landBlk = falseBlk;
- falseBlk = NULL;
- } else if (falseBlk->succ_size() == 1
- && *falseBlk->succ_begin() == trueBlk) {
- landBlk = trueBlk;
- trueBlk = NULL;
- } else if (falseBlk->succ_size() == 1
- && isSameloopDetachedContbreak(trueBlk, falseBlk)) {
- landBlk = *falseBlk->succ_begin();
- } else if (trueBlk->succ_size() == 1
- && isSameloopDetachedContbreak(falseBlk, trueBlk)) {
- landBlk = *trueBlk->succ_begin();
+ if (TrueMBB->succ_size() == 1 && FalseMBB->succ_size() == 1
+ && *TrueMBB->succ_begin() == *FalseMBB->succ_begin()) {
+ // Diamond pattern
+ LandBlk = *TrueMBB->succ_begin();
+ } else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) {
+ // Triangle pattern, false is empty
+ LandBlk = FalseMBB;
+ FalseMBB = NULL;
+ } else if (FalseMBB->succ_size() == 1
+ && *FalseMBB->succ_begin() == TrueMBB) {
+ // Triangle pattern, true is empty
+ // We reverse the predicate to make a triangle, empty false pattern;
+ std::swap(TrueMBB, FalseMBB);
+ reversePredicateSetter(MBB->end());
+ LandBlk = FalseMBB;
+ FalseMBB = NULL;
+ } else if (FalseMBB->succ_size() == 1
+ && isSameloopDetachedContbreak(TrueMBB, FalseMBB)) {
+ LandBlk = *FalseMBB->succ_begin();
+ } else if (TrueMBB->succ_size() == 1
+ && isSameloopDetachedContbreak(FalseMBB, TrueMBB)) {
+ LandBlk = *TrueMBB->succ_begin();
} else {
- return handleJumpintoIf(curBlk, trueBlk, falseBlk);
+ return NumMatch + handleJumpintoIf(MBB, TrueMBB, FalseMBB);
}
// improveSimpleJumpinfoIf can handle the case where landBlk == NULL but the
// new BB created for landBlk==NULL may introduce new challenge to the
// reduction process.
- if (landBlk != NULL &&
- ((trueBlk && trueBlk->pred_size() > 1)
- || (falseBlk && falseBlk->pred_size() > 1))) {
- cloned += improveSimpleJumpintoIf(curBlk, trueBlk, falseBlk, &landBlk);
+ if (LandBlk &&
+ ((TrueMBB && TrueMBB->pred_size() > 1)
+ || (FalseMBB && FalseMBB->pred_size() > 1))) {
+ Cloned += improveSimpleJumpintoIf(MBB, TrueMBB, FalseMBB, &LandBlk);
}
- if (trueBlk && trueBlk->pred_size() > 1) {
- trueBlk = cloneBlockForPredecessor(trueBlk, curBlk);
- ++cloned;
+ if (TrueMBB && TrueMBB->pred_size() > 1) {
+ TrueMBB = cloneBlockForPredecessor(TrueMBB, MBB);
+ ++Cloned;
}
- if (falseBlk && falseBlk->pred_size() > 1) {
- falseBlk = cloneBlockForPredecessor(falseBlk, curBlk);
- ++cloned;
+ if (FalseMBB && FalseMBB->pred_size() > 1) {
+ FalseMBB = cloneBlockForPredecessor(FalseMBB, MBB);
+ ++Cloned;
}
- mergeIfthenelseBlock(branchInstr, curBlk, trueBlk, falseBlk, landBlk);
+ mergeIfthenelseBlock(BranchMI, MBB, TrueMBB, FalseMBB, LandBlk);
++numIfPatternMatch;
- numClonedBlock += cloned;
-
- return 1 + cloned;
-} //ifPatternMatch
+ numClonedBlock += Cloned;
-template<class PassT>
-int CFGStructurizer<PassT>::switchPatternMatch(BlockT *curBlk) {
- return 0;
-} //switchPatternMatch
+ return 1 + Cloned + NumMatch;
+}
-template<class PassT>
-int CFGStructurizer<PassT>::loopendPatternMatch(BlockT *curBlk) {
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- typename std::vector<LoopT *> nestedLoops;
- while (loopRep) {
- nestedLoops.push_back(loopRep);
- loopRep = loopRep->getParentLoop();
+int AMDGPUCFGStructurizer::loopendPatternMatch() {
+ std::vector<MachineLoop *> NestedLoops;
+ for (MachineLoopInfo::iterator It = MLI->begin(), E = MLI->end();
+ It != E; ++It) {
+ df_iterator<MachineLoop *> LpIt = df_begin(*It),
+ LpE = df_end(*It);
+ for (; LpIt != LpE; ++LpIt)
+ NestedLoops.push_back(*LpIt);
}
-
- if (nestedLoops.size() == 0) {
+ if (NestedLoops.size() == 0)
return 0;
- }
// Process nested loop outside->inside, so "continue" to a outside loop won't
// be mistaken as "break" of the current loop.
- int num = 0;
- for (typename std::vector<LoopT *>::reverse_iterator
- iter = nestedLoops.rbegin(), iterEnd = nestedLoops.rend();
- iter != iterEnd; ++iter) {
- loopRep = *iter;
-
- if (getLoopLandBlock(loopRep) != NULL) {
+ int Num = 0;
+ for (std::vector<MachineLoop *>::reverse_iterator It = NestedLoops.rbegin(),
+ E = NestedLoops.rend(); It != E; ++It) {
+ MachineLoop *ExaminedLoop = *It;
+ if (ExaminedLoop->getNumBlocks() == 0 || Visited[ExaminedLoop])
continue;
- }
-
- BlockT *loopHeader = loopRep->getHeader();
-
- int numBreak = loopbreakPatternMatch(loopRep, loopHeader);
-
- if (numBreak == -1) {
+ DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump(););
+ int NumBreak = mergeLoop(ExaminedLoop);
+ if (NumBreak == -1)
break;
- }
-
- int numCont = loopcontPatternMatch(loopRep, loopHeader);
- num += numBreak + numCont;
- }
-
- return num;
-} //loopendPatternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::loopPatternMatch(BlockT *curBlk) {
- if (curBlk->succ_size() != 0) {
- return 0;
- }
-
- int numLoop = 0;
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- while (loopRep && loopRep->getHeader() == curBlk) {
- LoopLandInfo *loopLand = getLoopLandInfo(loopRep);
- if (loopLand) {
- BlockT *landBlk = loopLand->landBlk;
- assert(landBlk);
- if (!isRetiredBlock(landBlk)) {
- mergeLooplandBlock(curBlk, loopLand);
- ++numLoop;
- }
- }
- loopRep = loopRep->getParentLoop();
- }
-
- numLoopPatternMatch += numLoop;
-
- return numLoop;
-} //loopPatternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::loopbreakPatternMatch(LoopT *loopRep,
- BlockT *loopHeader) {
- BlockTSmallerVector exitingBlks;
- loopRep->getExitingBlocks(exitingBlks);
-
- if (DEBUGME) {
- errs() << "Loop has " << exitingBlks.size() << " exiting blocks\n";
- }
-
- if (exitingBlks.size() == 0) {
- setLoopLandBlock(loopRep);
- return 0;
- }
-
- // Compute the corresponding exitBlks and exit block set.
- BlockTSmallerVector exitBlks;
- std::set<BlockT *> exitBlkSet;
- for (typename BlockTSmallerVector::const_iterator iter = exitingBlks.begin(),
- iterEnd = exitingBlks.end(); iter != iterEnd; ++iter) {
- BlockT *exitingBlk = *iter;
- BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk);
- exitBlks.push_back(exitBlk);
- exitBlkSet.insert(exitBlk); //non-duplicate insert
- }
-
- assert(exitBlkSet.size() > 0);
- assert(exitBlks.size() == exitingBlks.size());
-
- if (DEBUGME) {
- errs() << "Loop has " << exitBlkSet.size() << " exit blocks\n";
+ Num += NumBreak;
}
+ return Num;
+}
- // Find exitLandBlk.
- BlockT *exitLandBlk = NULL;
- int numCloned = 0;
- int numSerial = 0;
-
- if (exitBlkSet.size() == 1) {
- exitLandBlk = *exitBlkSet.begin();
- } else {
- exitLandBlk = findNearestCommonPostDom(exitBlkSet);
-
- if (exitLandBlk == NULL) {
- return -1;
- }
-
- bool allInPath = true;
- bool allNotInPath = true;
- for (typename std::set<BlockT*>::const_iterator
- iter = exitBlkSet.begin(),
- iterEnd = exitBlkSet.end();
- iter != iterEnd; ++iter) {
- BlockT *exitBlk = *iter;
-
- PathToKind pathKind = singlePathTo(exitBlk, exitLandBlk, true);
- if (DEBUGME) {
- errs() << "BB" << exitBlk->getNumber()
- << " to BB" << exitLandBlk->getNumber() << " PathToKind="
- << pathKind << "\n";
- }
-
- allInPath = allInPath && (pathKind == SinglePath_InPath);
- allNotInPath = allNotInPath && (pathKind == SinglePath_NotInPath);
-
- if (!allInPath && !allNotInPath) {
- if (DEBUGME) {
- errs() << "singlePath check fail\n";
- }
- return -1;
- }
- } // check all exit blocks
-
- if (allNotInPath) {
-
- // TODO: Simplify, maybe separate function?
- LoopT *parentLoopRep = loopRep->getParentLoop();
- BlockT *parentLoopHeader = NULL;
- if (parentLoopRep)
- parentLoopHeader = parentLoopRep->getHeader();
-
- if (exitLandBlk == parentLoopHeader &&
- (exitLandBlk = relocateLoopcontBlock(parentLoopRep,
- loopRep,
- exitBlkSet,
- exitLandBlk)) != NULL) {
- if (DEBUGME) {
- errs() << "relocateLoopcontBlock success\n";
- }
- } else if ((exitLandBlk = addLoopEndbranchBlock(loopRep,
- exitingBlks,
- exitBlks)) != NULL) {
- if (DEBUGME) {
- errs() << "insertEndbranchBlock success\n";
- }
- } else {
- if (DEBUGME) {
- errs() << "loop exit fail\n";
- }
- return -1;
- }
- }
-
- // Handle side entry to exit path.
- exitBlks.clear();
- exitBlkSet.clear();
- for (typename BlockTSmallerVector::iterator iterExiting =
- exitingBlks.begin(),
- iterExitingEnd = exitingBlks.end();
- iterExiting != iterExitingEnd; ++iterExiting) {
- BlockT *exitingBlk = *iterExiting;
- BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk);
- BlockT *newExitBlk = exitBlk;
-
- if (exitBlk != exitLandBlk && exitBlk->pred_size() > 1) {
- newExitBlk = cloneBlockForPredecessor(exitBlk, exitingBlk);
- ++numCloned;
- }
-
- numCloned += cloneOnSideEntryTo(exitingBlk, newExitBlk, exitLandBlk);
-
- exitBlks.push_back(newExitBlk);
- exitBlkSet.insert(newExitBlk);
- }
-
- for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(),
- iterExitEnd = exitBlks.end();
- iterExit != iterExitEnd; ++iterExit) {
- BlockT *exitBlk = *iterExit;
- numSerial += serialPatternMatch(exitBlk);
- }
-
- for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(),
- iterExitEnd = exitBlks.end();
- iterExit != iterExitEnd; ++iterExit) {
- BlockT *exitBlk = *iterExit;
- if (exitBlk->pred_size() > 1) {
- if (exitBlk != exitLandBlk) {
- return -1;
- }
- } else {
- if (exitBlk != exitLandBlk &&
- (exitBlk->succ_size() != 1 ||
- *exitBlk->succ_begin() != exitLandBlk)) {
- return -1;
- }
- }
- }
- } // else
-
- exitLandBlk = recordLoopLandBlock(loopRep, exitLandBlk, exitBlks, exitBlkSet);
-
- // Fold break into the breaking block. Leverage across level breaks.
- assert(exitingBlks.size() == exitBlks.size());
- for (typename BlockTSmallerVector::const_iterator iterExit = exitBlks.begin(),
- iterExiting = exitingBlks.begin(), iterExitEnd = exitBlks.end();
- iterExit != iterExitEnd; ++iterExit, ++iterExiting) {
- BlockT *exitBlk = *iterExit;
- BlockT *exitingBlk = *iterExiting;
- assert(exitBlk->pred_size() == 1 || exitBlk == exitLandBlk);
- LoopT *exitingLoop = loopInfo->getLoopFor(exitingBlk);
- handleLoopbreak(exitingBlk, exitingLoop, exitBlk, loopRep, exitLandBlk);
- }
+int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
+ MachineBasicBlock *LoopHeader = LoopRep->getHeader();
+ MBBVector ExitingMBBs;
+ LoopRep->getExitingBlocks(ExitingMBBs);
+ assert(!ExitingMBBs.empty() && "Infinite Loop not supported");
+ DEBUG(dbgs() << "Loop has " << ExitingMBBs.size() << " exiting blocks\n";);
+ // We assume a single ExitBlk
+ MBBVector ExitBlks;
+ LoopRep->getExitBlocks(ExitBlks);
+ SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet;
+ for (unsigned i = 0, e = ExitBlks.size(); i < e; ++i)
+ ExitBlkSet.insert(ExitBlks[i]);
+ assert(ExitBlkSet.size() == 1);
+ MachineBasicBlock *ExitBlk = *ExitBlks.begin();
+ assert(ExitBlk && "Loop has several exit block");
+ MBBVector LatchBlks;
+ typedef GraphTraits<Inverse<MachineBasicBlock*> > InvMBBTraits;
+ InvMBBTraits::ChildIteratorType PI = InvMBBTraits::child_begin(LoopHeader),
+ PE = InvMBBTraits::child_end(LoopHeader);
+ for (; PI != PE; PI++) {
+ if (LoopRep->contains(*PI))
+ LatchBlks.push_back(*PI);
+ }
+
+ for (unsigned i = 0, e = ExitingMBBs.size(); i < e; ++i)
+ mergeLoopbreakBlock(ExitingMBBs[i], ExitBlk);
+ for (unsigned i = 0, e = LatchBlks.size(); i < e; ++i)
+ settleLoopcontBlock(LatchBlks[i], LoopHeader);
+ int Match = 0;
+ do {
+ Match = 0;
+ Match += serialPatternMatch(LoopHeader);
+ Match += ifPatternMatch(LoopHeader);
+ } while (Match > 0);
+ mergeLooplandBlock(LoopHeader, ExitBlk);
+ MachineLoop *ParentLoop = LoopRep->getParentLoop();
+ if (ParentLoop)
+ MLI->changeLoopFor(LoopHeader, ParentLoop);
+ else
+ MLI->removeBlock(LoopHeader);
+ Visited[LoopRep] = true;
+ return 1;
+}
- int numBreak = static_cast<int>(exitingBlks.size());
- numLoopbreakPatternMatch += numBreak;
- numClonedBlock += numCloned;
- return numBreak + numSerial + numCloned;
-} //loopbreakPatternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::loopcontPatternMatch(LoopT *loopRep,
- BlockT *loopHeader) {
- int numCont = 0;
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> contBlk;
- for (typename InvBlockGTraits::ChildIteratorType iter =
- InvBlockGTraits::child_begin(loopHeader),
- iterEnd = InvBlockGTraits::child_end(loopHeader);
- iter != iterEnd; ++iter) {
- BlockT *curBlk = *iter;
- if (loopRep->contains(curBlk)) {
- handleLoopcontBlock(curBlk, loopInfo->getLoopFor(curBlk),
- loopHeader, loopRep);
- contBlk.push_back(curBlk);
- ++numCont;
+int AMDGPUCFGStructurizer::loopcontPatternMatch(MachineLoop *LoopRep,
+ MachineBasicBlock *LoopHeader) {
+ int NumCont = 0;
+ SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> ContMBB;
+ typedef GraphTraits<Inverse<MachineBasicBlock *> > GTIM;
+ GTIM::ChildIteratorType It = GTIM::child_begin(LoopHeader),
+ E = GTIM::child_end(LoopHeader);
+ for (; It != E; ++It) {
+ MachineBasicBlock *MBB = *It;
+ if (LoopRep->contains(MBB)) {
+ handleLoopcontBlock(MBB, MLI->getLoopFor(MBB),
+ LoopHeader, LoopRep);
+ ContMBB.push_back(MBB);
+ ++NumCont;
}
}
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator
- iter = contBlk.begin(), iterEnd = contBlk.end();
- iter != iterEnd; ++iter) {
- (*iter)->removeSuccessor(loopHeader);
+ for (SmallVectorImpl<MachineBasicBlock *>::iterator It = ContMBB.begin(),
+ E = ContMBB.end(); It != E; ++It) {
+ (*It)->removeSuccessor(LoopHeader);
}
- numLoopcontPatternMatch += numCont;
+ numLoopcontPatternMatch += NumCont;
- return numCont;
-} //loopcontPatternMatch
+ return NumCont;
+}
-template<class PassT>
-bool CFGStructurizer<PassT>::isSameloopDetachedContbreak(BlockT *src1Blk,
- BlockT *src2Blk) {
- // return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in the
- // same loop with LoopLandInfo without explicitly keeping track of
- // loopContBlks and loopBreakBlks, this is a method to get the information.
- //
- if (src1Blk->succ_size() == 0) {
- LoopT *loopRep = loopInfo->getLoopFor(src1Blk);
- if (loopRep != NULL && loopRep == loopInfo->getLoopFor(src2Blk)) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
- if (theEntry != NULL) {
- if (DEBUGME) {
- errs() << "isLoopContBreakBlock yes src1 = BB"
- << src1Blk->getNumber()
- << " src2 = BB" << src2Blk->getNumber() << "\n";
- }
+bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak(
+ MachineBasicBlock *Src1MBB, MachineBasicBlock *Src2MBB) {
+ if (Src1MBB->succ_size() == 0) {
+ MachineLoop *LoopRep = MLI->getLoopFor(Src1MBB);
+ if (LoopRep&& LoopRep == MLI->getLoopFor(Src2MBB)) {
+ MachineBasicBlock *&TheEntry = LLInfoMap[LoopRep];
+ if (TheEntry) {
+ DEBUG(
+ dbgs() << "isLoopContBreakBlock yes src1 = BB"
+ << Src1MBB->getNumber()
+ << " src2 = BB" << Src2MBB->getNumber() << "\n";
+ );
return true;
}
}
}
return false;
-} //isSameloopDetachedContbreak
-
-template<class PassT>
-int CFGStructurizer<PassT>::handleJumpintoIf(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk) {
- int num = handleJumpintoIfImp(headBlk, trueBlk, falseBlk);
- if (num == 0) {
- if (DEBUGME) {
- errs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
- }
- num = handleJumpintoIfImp(headBlk, falseBlk, trueBlk);
- }
- return num;
}
-template<class PassT>
-int CFGStructurizer<PassT>::handleJumpintoIfImp(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk) {
- int num = 0;
- BlockT *downBlk;
-
- //trueBlk could be the common post dominator
- downBlk = trueBlk;
-
- if (DEBUGME) {
- errs() << "handleJumpintoIfImp head = BB" << headBlk->getNumber()
- << " true = BB" << trueBlk->getNumber()
- << ", numSucc=" << trueBlk->succ_size()
- << " false = BB" << falseBlk->getNumber() << "\n";
+int AMDGPUCFGStructurizer::handleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
+ int Num = handleJumpintoIfImp(HeadMBB, TrueMBB, FalseMBB);
+ if (Num == 0) {
+ DEBUG(
+ dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
+ );
+ Num = handleJumpintoIfImp(HeadMBB, FalseMBB, TrueMBB);
}
+ return Num;
+}
- while (downBlk) {
- if (DEBUGME) {
- errs() << "check down = BB" << downBlk->getNumber();
- }
-
- if (singlePathTo(falseBlk, downBlk) == SinglePath_InPath) {
- if (DEBUGME) {
- errs() << " working\n";
- }
-
- num += cloneOnSideEntryTo(headBlk, trueBlk, downBlk);
- num += cloneOnSideEntryTo(headBlk, falseBlk, downBlk);
+int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
+ int Num = 0;
+ MachineBasicBlock *DownBlk;
- numClonedBlock += num;
- num += serialPatternMatch(*headBlk->succ_begin());
- num += serialPatternMatch(*(++headBlk->succ_begin()));
- num += ifPatternMatch(headBlk);
- assert(num > 0);
+ //trueBlk could be the common post dominator
+ DownBlk = TrueMBB;
+
+ DEBUG(
+ dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber()
+ << " true = BB" << TrueMBB->getNumber()
+ << ", numSucc=" << TrueMBB->succ_size()
+ << " false = BB" << FalseMBB->getNumber() << "\n";
+ );
+
+ while (DownBlk) {
+ DEBUG(
+ dbgs() << "check down = BB" << DownBlk->getNumber();
+ );
+
+ if (singlePathTo(FalseMBB, DownBlk) == SinglePath_InPath) {
+ DEBUG(
+ dbgs() << " working\n";
+ );
+
+ Num += cloneOnSideEntryTo(HeadMBB, TrueMBB, DownBlk);
+ Num += cloneOnSideEntryTo(HeadMBB, FalseMBB, DownBlk);
+
+ numClonedBlock += Num;
+ Num += serialPatternMatch(*HeadMBB->succ_begin());
+ Num += serialPatternMatch(*llvm::next(HeadMBB->succ_begin()));
+ Num += ifPatternMatch(HeadMBB);
+ assert(Num > 0);
break;
}
- if (DEBUGME) {
- errs() << " not working\n";
- }
- downBlk = (downBlk->succ_size() == 1) ? (*downBlk->succ_begin()) : NULL;
+ DEBUG(
+ dbgs() << " not working\n";
+ );
+ DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : NULL;
} // walk down the postDomTree
- return num;
-} //handleJumpintoIf
-
-template<class PassT>
-void CFGStructurizer<PassT>::showImproveSimpleJumpintoIf(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk,
- BlockT *landBlk,
- bool detail) {
- errs() << "head = BB" << headBlk->getNumber()
- << " size = " << headBlk->size();
- if (detail) {
- errs() << "\n";
- headBlk->print(errs());
- errs() << "\n";
+ return Num;
+}
+
+void AMDGPUCFGStructurizer::showImproveSimpleJumpintoIf(
+ MachineBasicBlock *HeadMBB, MachineBasicBlock *TrueMBB,
+ MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB, bool Detail) {
+ dbgs() << "head = BB" << HeadMBB->getNumber()
+ << " size = " << HeadMBB->size();
+ if (Detail) {
+ dbgs() << "\n";
+ HeadMBB->print(dbgs());
+ dbgs() << "\n";
}
- if (trueBlk) {
- errs() << ", true = BB" << trueBlk->getNumber() << " size = "
- << trueBlk->size() << " numPred = " << trueBlk->pred_size();
- if (detail) {
- errs() << "\n";
- trueBlk->print(errs());
- errs() << "\n";
+ if (TrueMBB) {
+ dbgs() << ", true = BB" << TrueMBB->getNumber() << " size = "
+ << TrueMBB->size() << " numPred = " << TrueMBB->pred_size();
+ if (Detail) {
+ dbgs() << "\n";
+ TrueMBB->print(dbgs());
+ dbgs() << "\n";
}
}
- if (falseBlk) {
- errs() << ", false = BB" << falseBlk->getNumber() << " size = "
- << falseBlk->size() << " numPred = " << falseBlk->pred_size();
- if (detail) {
- errs() << "\n";
- falseBlk->print(errs());
- errs() << "\n";
+ if (FalseMBB) {
+ dbgs() << ", false = BB" << FalseMBB->getNumber() << " size = "
+ << FalseMBB->size() << " numPred = " << FalseMBB->pred_size();
+ if (Detail) {
+ dbgs() << "\n";
+ FalseMBB->print(dbgs());
+ dbgs() << "\n";
}
}
- if (landBlk) {
- errs() << ", land = BB" << landBlk->getNumber() << " size = "
- << landBlk->size() << " numPred = " << landBlk->pred_size();
- if (detail) {
- errs() << "\n";
- landBlk->print(errs());
- errs() << "\n";
+ if (LandMBB) {
+ dbgs() << ", land = BB" << LandMBB->getNumber() << " size = "
+ << LandMBB->size() << " numPred = " << LandMBB->pred_size();
+ if (Detail) {
+ dbgs() << "\n";
+ LandMBB->print(dbgs());
+ dbgs() << "\n";
}
}
- errs() << "\n";
-} //showImproveSimpleJumpintoIf
+ dbgs() << "\n";
+}
-template<class PassT>
-int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk,
- BlockT **plandBlk) {
- bool migrateTrue = false;
- bool migrateFalse = false;
+int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
+ MachineBasicBlock **LandMBBPtr) {
+ bool MigrateTrue = false;
+ bool MigrateFalse = false;
- BlockT *landBlk = *plandBlk;
+ MachineBasicBlock *LandBlk = *LandMBBPtr;
- assert((trueBlk == NULL || trueBlk->succ_size() <= 1)
- && (falseBlk == NULL || falseBlk->succ_size() <= 1));
+ assert((!TrueMBB || TrueMBB->succ_size() <= 1)
+ && (!FalseMBB || FalseMBB->succ_size() <= 1));
- if (trueBlk == falseBlk) {
+ if (TrueMBB == FalseMBB)
return 0;
- }
- migrateTrue = needMigrateBlock(trueBlk);
- migrateFalse = needMigrateBlock(falseBlk);
+ MigrateTrue = needMigrateBlock(TrueMBB);
+ MigrateFalse = needMigrateBlock(FalseMBB);
- if (!migrateTrue && !migrateFalse) {
+ if (!MigrateTrue && !MigrateFalse)
return 0;
- }
// If we need to migrate either trueBlk and falseBlk, migrate the rest that
// have more than one predecessors. without doing this, its predecessor
// rather than headBlk will have undefined value in initReg.
- if (!migrateTrue && trueBlk && trueBlk->pred_size() > 1) {
- migrateTrue = true;
- }
- if (!migrateFalse && falseBlk && falseBlk->pred_size() > 1) {
- migrateFalse = true;
- }
+ if (!MigrateTrue && TrueMBB && TrueMBB->pred_size() > 1)
+ MigrateTrue = true;
+ if (!MigrateFalse && FalseMBB && FalseMBB->pred_size() > 1)
+ MigrateFalse = true;
- if (DEBUGME) {
- errs() << "before improveSimpleJumpintoIf: ";
- showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0);
- }
+ DEBUG(
+ dbgs() << "before improveSimpleJumpintoIf: ";
+ showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
+ );
// org: headBlk => if () {trueBlk} else {falseBlk} => landBlk
//
@@ -1183,205 +1336,192 @@ int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk,
// add initReg = initVal to headBlk
const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
- unsigned initReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
- if (!migrateTrue || !migrateFalse) {
- int initVal = migrateTrue ? 0 : 1;
- CFGTraits::insertAssignInstrBefore(headBlk, passRep, initReg, initVal);
+ if (!MigrateTrue || !MigrateFalse) {
+ // XXX: We have an opportunity here to optimize the "branch into if" case
+ // here. Branch into if looks like this:
+ // entry
+ // / |
+ // diamond_head branch_from
+ // / \ |
+ // diamond_false diamond_true
+ // \ /
+ // done
+ //
+ // The diamond_head block begins the "if" and the diamond_true block
+ // is the block being "branched into".
+ //
+ // If MigrateTrue is true, then TrueBB is the block being "branched into"
+ // and if MigrateFalse is true, then FalseBB is the block being
+ // "branched into"
+ //
+ // Here is the pseudo code for how I think the optimization should work:
+ // 1. Insert MOV GPR0, 0 before the branch instruction in diamond_head.
+ // 2. Insert MOV GPR0, 1 before the branch instruction in branch_from.
+ // 3. Move the branch instruction from diamond_head into its own basic
+ // block (new_block).
+ // 4. Add an unconditional branch from diamond_head to new_block
+ // 5. Replace the branch instruction in branch_from with an unconditional
+ // branch to new_block. If branch_from has multiple predecessors, then
+ // we need to replace the True/False block in the branch
+ // instruction instead of replacing it.
+ // 6. Change the condition of the branch instruction in new_block from
+ // COND to (COND || GPR0)
+ //
+ // In order insert these MOV instruction, we will need to use the
+ // RegisterScavenger. Usually liveness stops being tracked during
+ // the late machine optimization passes, however if we implement
+ // bool TargetRegisterInfo::requiresRegisterScavenging(
+ // const MachineFunction &MF)
+ // and have it return true, liveness will be tracked correctly
+ // by generic optimization passes. We will also need to make sure that
+ // all of our target-specific passes that run after regalloc and before
+ // the CFGStructurizer track liveness and we will need to modify this pass
+ // to correctly track liveness.
+ //
+ // After the above changes, the new CFG should look like this:
+ // entry
+ // / |
+ // diamond_head branch_from
+ // \ /
+ // new_block
+ // / |
+ // diamond_false diamond_true
+ // \ /
+ // done
+ //
+ // Without this optimization, we are forced to duplicate the diamond_true
+ // block and we will end up with a CFG like this:
+ //
+ // entry
+ // / |
+ // diamond_head branch_from
+ // / \ |
+ // diamond_false diamond_true diamond_true (duplicate)
+ // \ / |
+ // done --------------------|
+ //
+ // Duplicating diamond_true can be very costly especially if it has a
+ // lot of instructions.
+ return 0;
}
- int numNewBlk = 0;
-
- if (landBlk == NULL) {
- landBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(landBlk); //insert to function
-
- if (trueBlk) {
- trueBlk->addSuccessor(landBlk);
- } else {
- headBlk->addSuccessor(landBlk);
- }
-
- if (falseBlk) {
- falseBlk->addSuccessor(landBlk);
- } else {
- headBlk->addSuccessor(landBlk);
- }
-
- numNewBlk ++;
- }
+ int NumNewBlk = 0;
- bool landBlkHasOtherPred = (landBlk->pred_size() > 2);
+ bool LandBlkHasOtherPred = (LandBlk->pred_size() > 2);
//insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL"
- typename BlockT::iterator insertPos =
- CFGTraits::getInstrPos
- (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDGPU::ENDIF, passRep));
-
- if (landBlkHasOtherPred) {
- unsigned immReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
- CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 2);
- unsigned cmpResReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
-
- CFGTraits::insertCompareInstrBefore(landBlk, insertPos, passRep, cmpResReg,
- initReg, immReg);
- CFGTraits::insertCondBranchBefore(landBlk, insertPos,
- AMDGPU::IF_PREDICATE_SET, passRep,
- cmpResReg, DebugLoc());
- }
-
- CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDGPU::IF_PREDICATE_SET,
- passRep, initReg, DebugLoc());
-
- if (migrateTrue) {
- migrateInstruction(trueBlk, landBlk, insertPos);
+ MachineBasicBlock::iterator I = insertInstrBefore(LandBlk, AMDGPU::ENDIF);
+
+ if (LandBlkHasOtherPred) {
+ llvm_unreachable("Extra register needed to handle CFG");
+ unsigned CmpResReg =
+ HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
+ llvm_unreachable("Extra compare instruction needed to handle CFG");
+ insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET,
+ CmpResReg, DebugLoc());
+ }
+
+ // XXX: We are running this after RA, so creating virtual registers will
+ // cause an assertion failure in the PostRA scheduling pass.
+ unsigned InitReg =
+ HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
+ insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET, InitReg,
+ DebugLoc());
+
+ if (MigrateTrue) {
+ migrateInstruction(TrueMBB, LandBlk, I);
// need to uncondionally insert the assignment to ensure a path from its
// predecessor rather than headBlk has valid value in initReg if
// (initVal != 1).
- CFGTraits::insertAssignInstrBefore(trueBlk, passRep, initReg, 1);
+ llvm_unreachable("Extra register needed to handle CFG");
}
- CFGTraits::insertInstrBefore(insertPos, AMDGPU::ELSE, passRep);
+ insertInstrBefore(I, AMDGPU::ELSE);
- if (migrateFalse) {
- migrateInstruction(falseBlk, landBlk, insertPos);
+ if (MigrateFalse) {
+ migrateInstruction(FalseMBB, LandBlk, I);
// need to uncondionally insert the assignment to ensure a path from its
// predecessor rather than headBlk has valid value in initReg if
// (initVal != 0)
- CFGTraits::insertAssignInstrBefore(falseBlk, passRep, initReg, 0);
+ llvm_unreachable("Extra register needed to handle CFG");
}
- if (landBlkHasOtherPred) {
+ if (LandBlkHasOtherPred) {
// add endif
- CFGTraits::insertInstrBefore(insertPos, AMDGPU::ENDIF, passRep);
+ insertInstrBefore(I, AMDGPU::ENDIF);
// put initReg = 2 to other predecessors of landBlk
- for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(),
- predIterEnd = landBlk->pred_end(); predIter != predIterEnd;
- ++predIter) {
- BlockT *curBlk = *predIter;
- if (curBlk != trueBlk && curBlk != falseBlk) {
- CFGTraits::insertAssignInstrBefore(curBlk, passRep, initReg, 2);
- }
- } //for
- }
- if (DEBUGME) {
- errs() << "result from improveSimpleJumpintoIf: ";
- showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0);
- }
-
- // update landBlk
- *plandBlk = landBlk;
-
- return numNewBlk;
-} //improveSimpleJumpintoIf
-
-template<class PassT>
-void CFGStructurizer<PassT>::handleLoopbreak(BlockT *exitingBlk,
- LoopT *exitingLoop,
- BlockT *exitBlk,
- LoopT *exitLoop,
- BlockT *landBlk) {
- if (DEBUGME) {
- errs() << "Trying to break loop-depth = " << getLoopDepth(exitLoop)
- << " from loop-depth = " << getLoopDepth(exitingLoop) << "\n";
- }
- const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
-
- RegiT initReg = INVALIDREGNUM;
- if (exitingLoop != exitLoop) {
- initReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- assert(initReg != INVALIDREGNUM);
- addLoopBreakInitReg(exitLoop, initReg);
- while (exitingLoop != exitLoop && exitingLoop) {
- addLoopBreakOnReg(exitingLoop, initReg);
- exitingLoop = exitingLoop->getParentLoop();
+ for (MachineBasicBlock::pred_iterator PI = LandBlk->pred_begin(),
+ PE = LandBlk->pred_end(); PI != PE; ++PI) {
+ MachineBasicBlock *MBB = *PI;
+ if (MBB != TrueMBB && MBB != FalseMBB)
+ llvm_unreachable("Extra register needed to handle CFG");
}
- assert(exitingLoop == exitLoop);
}
+ DEBUG(
+ dbgs() << "result from improveSimpleJumpintoIf: ";
+ showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
+ );
- mergeLoopbreakBlock(exitingBlk, exitBlk, landBlk, initReg);
+ // update landBlk
+ *LandMBBPtr = LandBlk;
-} //handleLoopbreak
+ return NumNewBlk;
+}
-template<class PassT>
-void CFGStructurizer<PassT>::handleLoopcontBlock(BlockT *contingBlk,
- LoopT *contingLoop,
- BlockT *contBlk,
- LoopT *contLoop) {
- if (DEBUGME) {
- errs() << "loopcontPattern cont = BB" << contingBlk->getNumber()
- << " header = BB" << contBlk->getNumber() << "\n";
+void AMDGPUCFGStructurizer::handleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
+ MachineLoop *ContLoop) {
+ DEBUG(dbgs() << "loopcontPattern cont = BB" << ContingMBB->getNumber()
+ << " header = BB" << ContMBB->getNumber() << "\n";
+ dbgs() << "Trying to continue loop-depth = "
+ << getLoopDepth(ContLoop)
+ << " from loop-depth = " << getLoopDepth(ContingLoop) << "\n";);
+ settleLoopcontBlock(ContingMBB, ContMBB);
+}
- errs() << "Trying to continue loop-depth = "
- << getLoopDepth(contLoop)
- << " from loop-depth = " << getLoopDepth(contingLoop) << "\n";
- }
+void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB) {
+ DEBUG(
+ dbgs() << "serialPattern BB" << DstMBB->getNumber()
+ << " <= BB" << SrcMBB->getNumber() << "\n";
+ );
+ DstMBB->splice(DstMBB->end(), SrcMBB, SrcMBB->begin(), SrcMBB->end());
- RegiT initReg = INVALIDREGNUM;
- const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
- if (contingLoop != contLoop) {
- initReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- assert(initReg != INVALIDREGNUM);
- addLoopContInitReg(contLoop, initReg);
- while (contingLoop && contingLoop->getParentLoop() != contLoop) {
- addLoopBreakOnReg(contingLoop, initReg); //not addLoopContOnReg
- contingLoop = contingLoop->getParentLoop();
- }
- assert(contingLoop && contingLoop->getParentLoop() == contLoop);
- addLoopContOnReg(contingLoop, initReg);
- }
+ DstMBB->removeSuccessor(SrcMBB);
+ cloneSuccessorList(DstMBB, SrcMBB);
- settleLoopcontBlock(contingBlk, contBlk, initReg);
-} //handleLoopcontBlock
+ removeSuccessor(SrcMBB);
+ MLI->removeBlock(SrcMBB);
+ retireBlock(SrcMBB);
+}
-template<class PassT>
-void CFGStructurizer<PassT>::mergeSerialBlock(BlockT *dstBlk, BlockT *srcBlk) {
- if (DEBUGME) {
- errs() << "serialPattern BB" << dstBlk->getNumber()
- << " <= BB" << srcBlk->getNumber() << "\n";
- }
- dstBlk->splice(dstBlk->end(), srcBlk, srcBlk->begin(), srcBlk->end());
-
- dstBlk->removeSuccessor(srcBlk);
- CFGTraits::cloneSuccessorList(dstBlk, srcBlk);
-
- removeSuccessor(srcBlk);
- retireBlock(dstBlk, srcBlk);
-} //mergeSerialBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr,
- BlockT *curBlk,
- BlockT *trueBlk,
- BlockT *falseBlk,
- BlockT *landBlk) {
- if (DEBUGME) {
- errs() << "ifPattern BB" << curBlk->getNumber();
- errs() << "{ ";
- if (trueBlk) {
- errs() << "BB" << trueBlk->getNumber();
- }
- errs() << " } else ";
- errs() << "{ ";
- if (falseBlk) {
- errs() << "BB" << falseBlk->getNumber();
- }
- errs() << " }\n ";
- errs() << "landBlock: ";
- if (landBlk == NULL) {
- errs() << "NULL";
+void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI,
+ MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
+ MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB) {
+ assert (TrueMBB);
+ DEBUG(
+ dbgs() << "ifPattern BB" << MBB->getNumber();
+ dbgs() << "{ ";
+ if (TrueMBB) {
+ dbgs() << "BB" << TrueMBB->getNumber();
+ }
+ dbgs() << " } else ";
+ dbgs() << "{ ";
+ if (FalseMBB) {
+ dbgs() << "BB" << FalseMBB->getNumber();
+ }
+ dbgs() << " }\n ";
+ dbgs() << "landBlock: ";
+ if (!LandMBB) {
+ dbgs() << "NULL";
} else {
- errs() << "BB" << landBlk->getNumber();
+ dbgs() << "BB" << LandMBB->getNumber();
}
- errs() << "\n";
- }
+ dbgs() << "\n";
+ );
- int oldOpcode = branchInstr->getOpcode();
- DebugLoc branchDL = branchInstr->getDebugLoc();
+ int OldOpcode = BranchMI->getOpcode();
+ DebugLoc BranchDL = BranchMI->getDebugLoc();
// transform to
// if cond
@@ -1391,1661 +1531,374 @@ void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr,
// endif
// landBlk
- typename BlockT::iterator branchInstrPos =
- CFGTraits::getInstrPos(curBlk, branchInstr);
- CFGTraits::insertCondBranchBefore(branchInstrPos,
- CFGTraits::getBranchNzeroOpcode(oldOpcode),
- passRep,
- branchDL);
-
- if (trueBlk) {
- curBlk->splice(branchInstrPos, trueBlk, trueBlk->begin(), trueBlk->end());
- curBlk->removeSuccessor(trueBlk);
- if (landBlk && trueBlk->succ_size()!=0) {
- trueBlk->removeSuccessor(landBlk);
- }
- retireBlock(curBlk, trueBlk);
- }
- CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ELSE, passRep);
-
- if (falseBlk) {
- curBlk->splice(branchInstrPos, falseBlk, falseBlk->begin(),
- falseBlk->end());
- curBlk->removeSuccessor(falseBlk);
- if (landBlk && falseBlk->succ_size() != 0) {
- falseBlk->removeSuccessor(landBlk);
- }
- retireBlock(curBlk, falseBlk);
- }
- CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ENDIF, passRep);
-
- branchInstr->eraseFromParent();
+ MachineBasicBlock::iterator I = BranchMI;
+ insertCondBranchBefore(I, getBranchNzeroOpcode(OldOpcode),
+ BranchDL);
- if (landBlk && trueBlk && falseBlk) {
- curBlk->addSuccessor(landBlk);
+ if (TrueMBB) {
+ MBB->splice(I, TrueMBB, TrueMBB->begin(), TrueMBB->end());
+ MBB->removeSuccessor(TrueMBB);
+ if (LandMBB && TrueMBB->succ_size()!=0)
+ TrueMBB->removeSuccessor(LandMBB);
+ retireBlock(TrueMBB);
+ MLI->removeBlock(TrueMBB);
}
-} //mergeIfthenelseBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk,
- LoopLandInfo *loopLand) {
- BlockT *landBlk = loopLand->landBlk;
-
- if (DEBUGME) {
- errs() << "loopPattern header = BB" << dstBlk->getNumber()
- << " land = BB" << landBlk->getNumber() << "\n";
+ if (FalseMBB) {
+ insertInstrBefore(I, AMDGPU::ELSE);
+ MBB->splice(I, FalseMBB, FalseMBB->begin(),
+ FalseMBB->end());
+ MBB->removeSuccessor(FalseMBB);
+ if (LandMBB && FalseMBB->succ_size() != 0)
+ FalseMBB->removeSuccessor(LandMBB);
+ retireBlock(FalseMBB);
+ MLI->removeBlock(FalseMBB);
}
+ insertInstrBefore(I, AMDGPU::ENDIF);
- // Loop contInitRegs are init at the beginning of the loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->contInitRegs.begin(),
- iterEnd = loopLand->contInitRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
- }
+ BranchMI->eraseFromParent();
- /* we last inserterd the DebugLoc in the
- * BREAK_LOGICALZ_i32 or AMDGPU::BREAK_LOGICALNZ statement in the current dstBlk.
- * search for the DebugLoc in the that statement.
- * if not found, we have to insert the empty/default DebugLoc */
- InstrT *loopBreakInstr = CFGTraits::getLoopBreakInstr(dstBlk);
- DebugLoc DLBreak = (loopBreakInstr) ? loopBreakInstr->getDebugLoc() : DebugLoc();
-
- CFGTraits::insertInstrBefore(dstBlk, AMDGPU::WHILELOOP, passRep, DLBreak);
- // Loop breakInitRegs are init before entering the loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->breakInitRegs.begin(),
- iterEnd = loopLand->breakInitRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
- }
- // Loop endbranchInitRegs are init before entering the loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->endbranchInitRegs.begin(),
- iterEnd = loopLand->endbranchInitRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
- }
+ if (LandMBB && TrueMBB && FalseMBB)
+ MBB->addSuccessor(LandMBB);
- /* we last inserterd the DebugLoc in the continue statement in the current dstBlk
- * search for the DebugLoc in the continue statement.
- * if not found, we have to insert the empty/default DebugLoc */
- InstrT *continueInstr = CFGTraits::getContinueInstr(dstBlk);
- DebugLoc DLContinue = (continueInstr) ? continueInstr->getDebugLoc() : DebugLoc();
-
- CFGTraits::insertInstrEnd(dstBlk, AMDGPU::ENDLOOP, passRep, DLContinue);
- // Loop breakOnRegs are check after the ENDLOOP: break the loop outside this
- // loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->breakOnRegs.begin(),
- iterEnd = loopLand->breakOnRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::PREDICATED_BREAK, passRep,
- *iter);
- }
-
- // Loop contOnRegs are check after the ENDLOOP: cont the loop outside this
- // loop.
- for (std::set<RegiT>::const_iterator iter = loopLand->contOnRegs.begin(),
- iterEnd = loopLand->contOnRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::CONTINUE_LOGICALNZ_i32,
- passRep, *iter);
- }
-
- dstBlk->splice(dstBlk->end(), landBlk, landBlk->begin(), landBlk->end());
-
- for (typename BlockT::succ_iterator iter = landBlk->succ_begin(),
- iterEnd = landBlk->succ_end(); iter != iterEnd; ++iter) {
- dstBlk->addSuccessor(*iter); // *iter's predecessor is also taken care of.
- }
-
- removeSuccessor(landBlk);
- retireBlock(dstBlk, landBlk);
-} //mergeLooplandBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::reversePredicateSetter(typename BlockT::iterator I) {
- while (I--) {
- if (I->getOpcode() == AMDGPU::PRED_X) {
- switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
- case OPCODE_IS_ZERO_INT:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_NOT_ZERO_INT);
- return;
- case OPCODE_IS_NOT_ZERO_INT:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_ZERO_INT);
- return;
- case OPCODE_IS_ZERO:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_NOT_ZERO);
- return;
- case OPCODE_IS_NOT_ZERO:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_ZERO);
- return;
- default:
- assert(0 && "PRED_X Opcode invalid!");
- }
- }
- }
}
-template<class PassT>
-void CFGStructurizer<PassT>::mergeLoopbreakBlock(BlockT *exitingBlk,
- BlockT *exitBlk,
- BlockT *exitLandBlk,
- RegiT setReg) {
- if (DEBUGME) {
- errs() << "loopbreakPattern exiting = BB" << exitingBlk->getNumber()
- << " exit = BB" << exitBlk->getNumber()
- << " land = BB" << exitLandBlk->getNumber() << "\n";
- }
+void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk,
+ MachineBasicBlock *LandMBB) {
+ DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber()
+ << " land = BB" << LandMBB->getNumber() << "\n";);
- InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(exitingBlk);
- assert(branchInstr && CFGTraits::isCondBranch(branchInstr));
-
- DebugLoc DL = branchInstr->getDebugLoc();
-
- BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr);
-
- // transform exitingBlk to
- // if ( ) {
- // exitBlk (if exitBlk != exitLandBlk)
- // setReg = 1
- // break
- // }endif
- // successor = {orgSuccessor(exitingBlk) - exitBlk}
-
- typename BlockT::iterator branchInstrPos =
- CFGTraits::getInstrPos(exitingBlk, branchInstr);
-
- if (exitBlk == exitLandBlk && setReg == INVALIDREGNUM) {
- //break_logical
+ insertInstrBefore(DstBlk, AMDGPU::WHILELOOP, DebugLoc());
+ insertInstrEnd(DstBlk, AMDGPU::ENDLOOP, DebugLoc());
+ DstBlk->addSuccessor(LandMBB);
+ DstBlk->removeSuccessor(DstBlk);
+}
- if (trueBranch != exitBlk) {
- reversePredicateSetter(branchInstrPos);
- }
- CFGTraits::insertCondBranchBefore(branchInstrPos, AMDGPU::PREDICATED_BREAK, passRep, DL);
- } else {
- if (trueBranch != exitBlk) {
- reversePredicateSetter(branchInstr);
- }
- CFGTraits::insertCondBranchBefore(branchInstrPos, AMDGPU::PREDICATED_BREAK, passRep, DL);
- if (exitBlk != exitLandBlk) {
- //splice is insert-before ...
- exitingBlk->splice(branchInstrPos, exitBlk, exitBlk->begin(),
- exitBlk->end());
- }
- if (setReg != INVALIDREGNUM) {
- CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
- }
- CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::BREAK, passRep);
- } //if_logical
+void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
+ MachineBasicBlock *LandMBB) {
+ DEBUG(dbgs() << "loopbreakPattern exiting = BB" << ExitingMBB->getNumber()
+ << " land = BB" << LandMBB->getNumber() << "\n";);
+ MachineInstr *BranchMI = getLoopendBlockBranchInstr(ExitingMBB);
+ assert(BranchMI && isCondBranch(BranchMI));
+ DebugLoc DL = BranchMI->getDebugLoc();
+ MachineBasicBlock *TrueBranch = getTrueBranch(BranchMI);
+ MachineBasicBlock::iterator I = BranchMI;
+ if (TrueBranch != LandMBB)
+ reversePredicateSetter(I);
+ insertCondBranchBefore(ExitingMBB, I, AMDGPU::IF_PREDICATE_SET, AMDGPU::PREDICATE_BIT, DL);
+ insertInstrBefore(I, AMDGPU::BREAK);
+ insertInstrBefore(I, AMDGPU::ENDIF);
//now branchInst can be erase safely
- branchInstr->eraseFromParent();
-
+ BranchMI->eraseFromParent();
//now take care of successors, retire blocks
- exitingBlk->removeSuccessor(exitBlk);
- if (exitBlk != exitLandBlk) {
- //splice is insert-before ...
- exitBlk->removeSuccessor(exitLandBlk);
- retireBlock(exitingBlk, exitBlk);
- }
-
-} //mergeLoopbreakBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk,
- BlockT *contBlk,
- RegiT setReg) {
- if (DEBUGME) {
- errs() << "settleLoopcontBlock conting = BB"
- << contingBlk->getNumber()
- << ", cont = BB" << contBlk->getNumber() << "\n";
- }
-
- InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(contingBlk);
- if (branchInstr) {
- assert(CFGTraits::isCondBranch(branchInstr));
- typename BlockT::iterator branchInstrPos =
- CFGTraits::getInstrPos(contingBlk, branchInstr);
- BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr);
- int oldOpcode = branchInstr->getOpcode();
- DebugLoc DL = branchInstr->getDebugLoc();
-
- // transform contingBlk to
- // if () {
- // move instr after branchInstr
- // continue
- // or
- // setReg = 1
- // break
- // }endif
- // successor = {orgSuccessor(contingBlk) - loopHeader}
-
- bool useContinueLogical =
- (setReg == INVALIDREGNUM && (&*contingBlk->rbegin()) == branchInstr);
-
- if (useContinueLogical == false) {
- int branchOpcode =
- trueBranch == contBlk ? CFGTraits::getBranchNzeroOpcode(oldOpcode)
- : CFGTraits::getBranchZeroOpcode(oldOpcode);
-
- CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL);
-
- if (setReg != INVALIDREGNUM) {
- CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, DL);
- } else {
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, DL);
- }
+ ExitingMBB->removeSuccessor(LandMBB);
+}
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::ENDIF, passRep, DL);
+void AMDGPUCFGStructurizer::settleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineBasicBlock *ContMBB) {
+ DEBUG(dbgs() << "settleLoopcontBlock conting = BB"
+ << ContingMBB->getNumber()
+ << ", cont = BB" << ContMBB->getNumber() << "\n";);
+
+ MachineInstr *MI = getLoopendBlockBranchInstr(ContingMBB);
+ if (MI) {
+ assert(isCondBranch(MI));
+ MachineBasicBlock::iterator I = MI;
+ MachineBasicBlock *TrueBranch = getTrueBranch(MI);
+ int OldOpcode = MI->getOpcode();
+ DebugLoc DL = MI->getDebugLoc();
+
+ bool UseContinueLogical = ((&*ContingMBB->rbegin()) == MI);
+
+ if (UseContinueLogical == false) {
+ int BranchOpcode =
+ TrueBranch == ContMBB ? getBranchNzeroOpcode(OldOpcode) :
+ getBranchZeroOpcode(OldOpcode);
+ insertCondBranchBefore(I, BranchOpcode, DL);
+ // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
+ insertInstrEnd(ContingMBB, AMDGPU::CONTINUE, DL);
+ insertInstrEnd(ContingMBB, AMDGPU::ENDIF, DL);
} else {
- int branchOpcode =
- trueBranch == contBlk ? CFGTraits::getContinueNzeroOpcode(oldOpcode)
- : CFGTraits::getContinueZeroOpcode(oldOpcode);
-
- CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL);
+ int BranchOpcode =
+ TrueBranch == ContMBB ? getContinueNzeroOpcode(OldOpcode) :
+ getContinueZeroOpcode(OldOpcode);
+ insertCondBranchBefore(I, BranchOpcode, DL);
}
- branchInstr->eraseFromParent();
+ MI->eraseFromParent();
} else {
// if we've arrived here then we've already erased the branch instruction
- // travel back up the basic block to see the last reference of our debug location
- // we've just inserted that reference here so it should be representative
- if (setReg != INVALIDREGNUM) {
- CFGTraits::insertAssignInstrBefore(contingBlk, passRep, setReg, 1);
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
- } else {
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
- }
- } //else
-
-} //settleLoopcontBlock
-
-// BBs in exitBlkSet are determined as in break-path for loopRep,
-// before we can put code for BBs as inside loop-body for loopRep
-// check whether those BBs are determined as cont-BB for parentLoopRep
-// earlier.
-// If so, generate a new BB newBlk
-// (1) set newBlk common successor of BBs in exitBlkSet
-// (2) change the continue-instr in BBs in exitBlkSet to break-instr
-// (3) generate continue-instr in newBlk
-//
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::relocateLoopcontBlock(LoopT *parentLoopRep,
- LoopT *loopRep,
- std::set<BlockT *> &exitBlkSet,
- BlockT *exitLandBlk) {
- std::set<BlockT *> endBlkSet;
-
-
-
- for (typename std::set<BlockT *>::const_iterator iter = exitBlkSet.begin(),
- iterEnd = exitBlkSet.end();
- iter != iterEnd; ++iter) {
- BlockT *exitBlk = *iter;
- BlockT *endBlk = singlePathEnd(exitBlk, exitLandBlk);
-
- if (endBlk == NULL || CFGTraits::getContinueInstr(endBlk) == NULL)
- return NULL;
-
- endBlkSet.insert(endBlk);
- }
-
- BlockT *newBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(newBlk); //insert to function
- CFGTraits::insertInstrEnd(newBlk, AMDGPU::CONTINUE, passRep);
- SHOWNEWBLK(newBlk, "New continue block: ");
-
- for (typename std::set<BlockT*>::const_iterator iter = endBlkSet.begin(),
- iterEnd = endBlkSet.end();
- iter != iterEnd; ++iter) {
- BlockT *endBlk = *iter;
- InstrT *contInstr = CFGTraits::getContinueInstr(endBlk);
- if (contInstr) {
- contInstr->eraseFromParent();
- }
- endBlk->addSuccessor(newBlk);
- if (DEBUGME) {
- errs() << "Add new continue Block to BB"
- << endBlk->getNumber() << " successors\n";
- }
- }
-
- return newBlk;
-} //relocateLoopcontBlock
-
-
-// LoopEndbranchBlock is a BB created by the CFGStructurizer to use as
-// LoopLandBlock. This BB branch on the loop endBranchInit register to the
-// pathes corresponding to the loop exiting branches.
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::addLoopEndbranchBlock(LoopT *loopRep,
- BlockTSmallerVector &exitingBlks,
- BlockTSmallerVector &exitBlks) {
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
- const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
-
- RegiT endBranchReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- assert(endBranchReg >= 0);
-
- // reg = 0 before entering the loop
- addLoopEndbranchInitReg(loopRep, endBranchReg);
-
- uint32_t numBlks = static_cast<uint32_t>(exitingBlks.size());
- assert(numBlks >=2 && numBlks == exitBlks.size());
-
- BlockT *preExitingBlk = exitingBlks[0];
- BlockT *preExitBlk = exitBlks[0];
- BlockT *preBranchBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(preBranchBlk); //insert to function
- SHOWNEWBLK(preBranchBlk, "New loopEndbranch block: ");
-
- BlockT *newLandBlk = preBranchBlk;
-
- CFGTraits::replaceInstrUseOfBlockWith(preExitingBlk, preExitBlk,
- newLandBlk);
- preExitingBlk->removeSuccessor(preExitBlk);
- preExitingBlk->addSuccessor(newLandBlk);
-
- //it is redundant to add reg = 0 to exitingBlks[0]
-
- // For 1..n th exiting path (the last iteration handles two pathes) create the
- // branch to the previous path and the current path.
- for (uint32_t i = 1; i < numBlks; ++i) {
- BlockT *curExitingBlk = exitingBlks[i];
- BlockT *curExitBlk = exitBlks[i];
- BlockT *curBranchBlk;
-
- if (i == numBlks - 1) {
- curBranchBlk = curExitBlk;
- } else {
- curBranchBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(curBranchBlk); //insert to function
- SHOWNEWBLK(curBranchBlk, "New loopEndbranch block: ");
- }
-
- // Add reg = i to exitingBlks[i].
- CFGTraits::insertAssignInstrBefore(curExitingBlk, passRep,
- endBranchReg, i);
-
- // Remove the edge (exitingBlks[i] exitBlks[i]) add new edge
- // (exitingBlks[i], newLandBlk).
- CFGTraits::replaceInstrUseOfBlockWith(curExitingBlk, curExitBlk,
- newLandBlk);
- curExitingBlk->removeSuccessor(curExitBlk);
- curExitingBlk->addSuccessor(newLandBlk);
-
- // add to preBranchBlk the branch instruction:
- // if (endBranchReg == preVal)
- // preExitBlk
- // else
- // curBranchBlk
- //
- // preValReg = i - 1
-
- DebugLoc DL;
- RegiT preValReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
-
- preBranchBlk->insert(preBranchBlk->begin(),
- tii->getMovImmInstr(preBranchBlk->getParent(), preValReg,
- i - 1));
-
- // condResReg = (endBranchReg == preValReg)
- RegiT condResReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- BuildMI(preBranchBlk, DL, tii->get(tii->getIEQOpcode()), condResReg)
- .addReg(endBranchReg).addReg(preValReg);
-
- BuildMI(preBranchBlk, DL, tii->get(AMDGPU::BRANCH_COND_i32))
- .addMBB(preExitBlk).addReg(condResReg);
-
- preBranchBlk->addSuccessor(preExitBlk);
- preBranchBlk->addSuccessor(curBranchBlk);
-
- // Update preExitingBlk, preExitBlk, preBranchBlk.
- preExitingBlk = curExitingBlk;
- preExitBlk = curExitBlk;
- preBranchBlk = curBranchBlk;
-
- } //end for 1 .. n blocks
-
- return newLandBlk;
-} //addLoopEndbranchBlock
-
-template<class PassT>
-typename CFGStructurizer<PassT>::PathToKind
-CFGStructurizer<PassT>::singlePathTo(BlockT *srcBlk, BlockT *dstBlk,
- bool allowSideEntry) {
- assert(dstBlk);
-
- if (srcBlk == dstBlk) {
- return SinglePath_InPath;
+ // travel back up the basic block to see the last reference of our debug
+ // location we've just inserted that reference here so it should be
+ // representative insertEnd to ensure phi-moves, if exist, go before the
+ // continue-instr.
+ insertInstrEnd(ContingMBB, AMDGPU::CONTINUE,
+ getLastDebugLocInBB(ContingMBB));
}
+}
- while (srcBlk && srcBlk->succ_size() == 1) {
- srcBlk = *srcBlk->succ_begin();
- if (srcBlk == dstBlk) {
- return SinglePath_InPath;
- }
-
- if (!allowSideEntry && srcBlk->pred_size() > 1) {
- return Not_SinglePath;
- }
- }
-
- if (srcBlk && srcBlk->succ_size()==0) {
- return SinglePath_NotInPath;
- }
-
- return Not_SinglePath;
-} //singlePathTo
-
-// If there is a single path from srcBlk to dstBlk, return the last block before
-// dstBlk If there is a single path from srcBlk->end without dstBlk, return the
-// last block in the path Otherwise, return NULL
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::singlePathEnd(BlockT *srcBlk, BlockT *dstBlk,
- bool allowSideEntry) {
- assert(dstBlk);
-
- if (srcBlk == dstBlk) {
- return srcBlk;
- }
-
- if (srcBlk->succ_size() == 0) {
- return srcBlk;
- }
-
- while (srcBlk && srcBlk->succ_size() == 1) {
- BlockT *preBlk = srcBlk;
-
- srcBlk = *srcBlk->succ_begin();
- if (srcBlk == NULL) {
- return preBlk;
- }
-
- if (!allowSideEntry && srcBlk->pred_size() > 1) {
- return NULL;
- }
- }
-
- if (srcBlk && srcBlk->succ_size()==0) {
- return srcBlk;
- }
-
- return NULL;
-
-} //singlePathEnd
-
-template<class PassT>
-int CFGStructurizer<PassT>::cloneOnSideEntryTo(BlockT *preBlk, BlockT *srcBlk,
- BlockT *dstBlk) {
- int cloned = 0;
- assert(preBlk->isSuccessor(srcBlk));
- while (srcBlk && srcBlk != dstBlk) {
- assert(srcBlk->succ_size() == 1);
- if (srcBlk->pred_size() > 1) {
- srcBlk = cloneBlockForPredecessor(srcBlk, preBlk);
- ++cloned;
+int AMDGPUCFGStructurizer::cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB) {
+ int Cloned = 0;
+ assert(PreMBB->isSuccessor(SrcMBB));
+ while (SrcMBB && SrcMBB != DstMBB) {
+ assert(SrcMBB->succ_size() == 1);
+ if (SrcMBB->pred_size() > 1) {
+ SrcMBB = cloneBlockForPredecessor(SrcMBB, PreMBB);
+ ++Cloned;
}
- preBlk = srcBlk;
- srcBlk = *srcBlk->succ_begin();
+ PreMBB = SrcMBB;
+ SrcMBB = *SrcMBB->succ_begin();
}
- return cloned;
-} //cloneOnSideEntryTo
+ return Cloned;
+}
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::cloneBlockForPredecessor(BlockT *curBlk,
- BlockT *predBlk) {
- assert(predBlk->isSuccessor(curBlk) &&
+MachineBasicBlock *
+AMDGPUCFGStructurizer::cloneBlockForPredecessor(MachineBasicBlock *MBB,
+ MachineBasicBlock *PredMBB) {
+ assert(PredMBB->isSuccessor(MBB) &&
"succBlk is not a prececessor of curBlk");
- BlockT *cloneBlk = CFGTraits::clone(curBlk); //clone instructions
- CFGTraits::replaceInstrUseOfBlockWith(predBlk, curBlk, cloneBlk);
+ MachineBasicBlock *CloneMBB = clone(MBB); //clone instructions
+ replaceInstrUseOfBlockWith(PredMBB, MBB, CloneMBB);
//srcBlk, oldBlk, newBlk
- predBlk->removeSuccessor(curBlk);
- predBlk->addSuccessor(cloneBlk);
+ PredMBB->removeSuccessor(MBB);
+ PredMBB->addSuccessor(CloneMBB);
// add all successor to cloneBlk
- CFGTraits::cloneSuccessorList(cloneBlk, curBlk);
+ cloneSuccessorList(CloneMBB, MBB);
- numClonedInstr += curBlk->size();
+ numClonedInstr += MBB->size();
- if (DEBUGME) {
- errs() << "Cloned block: " << "BB"
- << curBlk->getNumber() << "size " << curBlk->size() << "\n";
- }
+ DEBUG(
+ dbgs() << "Cloned block: " << "BB"
+ << MBB->getNumber() << "size " << MBB->size() << "\n";
+ );
- SHOWNEWBLK(cloneBlk, "result of Cloned block: ");
+ SHOWNEWBLK(CloneMBB, "result of Cloned block: ");
- return cloneBlk;
-} //cloneBlockForPredecessor
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::exitingBlock2ExitBlock(LoopT *loopRep,
- BlockT *exitingBlk) {
- BlockT *exitBlk = NULL;
-
- for (typename BlockT::succ_iterator iterSucc = exitingBlk->succ_begin(),
- iterSuccEnd = exitingBlk->succ_end();
- iterSucc != iterSuccEnd; ++iterSucc) {
- BlockT *curBlk = *iterSucc;
- if (!loopRep->contains(curBlk)) {
- assert(exitBlk == NULL);
- exitBlk = curBlk;
- }
- }
-
- assert(exitBlk != NULL);
-
- return exitBlk;
-} //exitingBlock2ExitBlock
+ return CloneMBB;
+}
-template<class PassT>
-void CFGStructurizer<PassT>::migrateInstruction(BlockT *srcBlk,
- BlockT *dstBlk,
- InstrIterator insertPos) {
- InstrIterator spliceEnd;
+void AMDGPUCFGStructurizer::migrateInstruction(MachineBasicBlock *SrcMBB,
+ MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I) {
+ MachineBasicBlock::iterator SpliceEnd;
//look for the input branchinstr, not the AMDGPU branchinstr
- InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk);
- if (branchInstr == NULL) {
- if (DEBUGME) {
- errs() << "migrateInstruction don't see branch instr\n" ;
- }
- spliceEnd = srcBlk->end();
+ MachineInstr *BranchMI = getNormalBlockBranchInstr(SrcMBB);
+ if (!BranchMI) {
+ DEBUG(
+ dbgs() << "migrateInstruction don't see branch instr\n" ;
+ );
+ SpliceEnd = SrcMBB->end();
} else {
- if (DEBUGME) {
- errs() << "migrateInstruction see branch instr\n" ;
- branchInstr->dump();
- }
- spliceEnd = CFGTraits::getInstrPos(srcBlk, branchInstr);
- }
- if (DEBUGME) {
- errs() << "migrateInstruction before splice dstSize = " << dstBlk->size()
- << "srcSize = " << srcBlk->size() << "\n";
+ DEBUG(
+ dbgs() << "migrateInstruction see branch instr\n" ;
+ BranchMI->dump();
+ );
+ SpliceEnd = BranchMI;
}
+ DEBUG(
+ dbgs() << "migrateInstruction before splice dstSize = " << DstMBB->size()
+ << "srcSize = " << SrcMBB->size() << "\n";
+ );
//splice insert before insertPos
- dstBlk->splice(insertPos, srcBlk, srcBlk->begin(), spliceEnd);
+ DstMBB->splice(I, SrcMBB, SrcMBB->begin(), SpliceEnd);
- if (DEBUGME) {
- errs() << "migrateInstruction after splice dstSize = " << dstBlk->size()
- << "srcSize = " << srcBlk->size() << "\n";
- }
-} //migrateInstruction
+ DEBUG(
+ dbgs() << "migrateInstruction after splice dstSize = " << DstMBB->size()
+ << "srcSize = " << SrcMBB->size() << "\n";
+ );
+}
-// normalizeInfiniteLoopExit change
-// B1:
-// uncond_br LoopHeader
-//
-// to
-// B1:
-// cond_br 1 LoopHeader dummyExit
-// and return the newly added dummy exit block
-//
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::normalizeInfiniteLoopExit(LoopT* LoopRep) {
- BlockT *loopHeader;
- BlockT *loopLatch;
- loopHeader = LoopRep->getHeader();
- loopLatch = LoopRep->getLoopLatch();
- BlockT *dummyExitBlk = NULL;
+MachineBasicBlock *
+AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) {
+ MachineBasicBlock *LoopHeader = LoopRep->getHeader();
+ MachineBasicBlock *LoopLatch = LoopRep->getLoopLatch();
const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
- if (loopHeader!=NULL && loopLatch!=NULL) {
- InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(loopLatch);
- if (branchInstr!=NULL && CFGTraits::isUncondBranch(branchInstr)) {
- dummyExitBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(dummyExitBlk); //insert to function
- SHOWNEWBLK(dummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
-
- if (DEBUGME) errs() << "Old branch instr: " << *branchInstr << "\n";
-
- typename BlockT::iterator insertPos =
- CFGTraits::getInstrPos(loopLatch, branchInstr);
- unsigned immReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
- CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 1);
- InstrT *newInstr =
- CFGTraits::insertInstrBefore(insertPos, AMDGPU::BRANCH_COND_i32, passRep);
- MachineInstrBuilder MIB(*funcRep, newInstr);
- MIB.addMBB(loopHeader);
- MIB.addReg(immReg, false);
-
- SHOWNEWINSTR(newInstr);
-
- branchInstr->eraseFromParent();
- loopLatch->addSuccessor(dummyExitBlk);
- }
- }
- return dummyExitBlk;
-} //normalizeInfiniteLoopExit
+ if (!LoopHeader || !LoopLatch)
+ return NULL;
+ MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch);
+ // Is LoopRep an infinite loop ?
+ if (!BranchMI || !isUncondBranch(BranchMI))
+ return NULL;
-template<class PassT>
-void CFGStructurizer<PassT>::removeUnconditionalBranch(BlockT *srcBlk) {
- InstrT *branchInstr;
+ MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
+ FuncRep->push_back(DummyExitBlk); //insert to function
+ SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
+ DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
+ MachineBasicBlock::iterator I = BranchMI;
+ unsigned ImmReg = FuncRep->getRegInfo().createVirtualRegister(I32RC);
+ llvm_unreachable("Extra register needed to handle CFG");
+ MachineInstr *NewMI = insertInstrBefore(I, AMDGPU::BRANCH_COND_i32);
+ MachineInstrBuilder MIB(*FuncRep, NewMI);
+ MIB.addMBB(LoopHeader);
+ MIB.addReg(ImmReg, false);
+ SHOWNEWINSTR(NewMI);
+ BranchMI->eraseFromParent();
+ LoopLatch->addSuccessor(DummyExitBlk);
+
+ return DummyExitBlk;
+}
+
+void AMDGPUCFGStructurizer::removeUnconditionalBranch(MachineBasicBlock *MBB) {
+ MachineInstr *BranchMI;
// I saw two unconditional branch in one basic block in example
// test_fc_do_while_or.c need to fix the upstream on this to remove the loop.
- while ((branchInstr = CFGTraits::getLoopendBlockBranchInstr(srcBlk))
- && CFGTraits::isUncondBranch(branchInstr)) {
- if (DEBUGME) {
- errs() << "Removing unconditional branch instruction" ;
- branchInstr->dump();
- }
- branchInstr->eraseFromParent();
- }
-} //removeUnconditionalBranch
-
-template<class PassT>
-void CFGStructurizer<PassT>::removeRedundantConditionalBranch(BlockT *srcBlk) {
- if (srcBlk->succ_size() == 2) {
- BlockT *blk1 = *srcBlk->succ_begin();
- BlockT *blk2 = *(++srcBlk->succ_begin());
-
- if (blk1 == blk2) {
- InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk);
- assert(branchInstr && CFGTraits::isCondBranch(branchInstr));
- if (DEBUGME) {
- errs() << "Removing unneeded conditional branch instruction" ;
- branchInstr->dump();
- }
- branchInstr->eraseFromParent();
- SHOWNEWBLK(blk1, "Removing redundant successor");
- srcBlk->removeSuccessor(blk1);
- }
- }
-} //removeRedundantConditionalBranch
-
-template<class PassT>
-void CFGStructurizer<PassT>::addDummyExitBlock(SmallVector<BlockT*,
- DEFAULT_VEC_SLOTS> &retBlks) {
- BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(dummyExitBlk); //insert to function
- CFGTraits::insertInstrEnd(dummyExitBlk, AMDGPU::RETURN, passRep);
-
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator iter =
- retBlks.begin(),
- iterEnd = retBlks.end(); iter != iterEnd; ++iter) {
- BlockT *curBlk = *iter;
- InstrT *curInstr = CFGTraits::getReturnInstr(curBlk);
- if (curInstr) {
- curInstr->eraseFromParent();
- }
- curBlk->addSuccessor(dummyExitBlk);
- if (DEBUGME) {
- errs() << "Add dummyExitBlock to BB" << curBlk->getNumber()
- << " successors\n";
- }
- } //for
-
- SHOWNEWBLK(dummyExitBlk, "DummyExitBlock: ");
-} //addDummyExitBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::removeSuccessor(BlockT *srcBlk) {
- while (srcBlk->succ_size()) {
- srcBlk->removeSuccessor(*srcBlk->succ_begin());
+ while ((BranchMI = getLoopendBlockBranchInstr(MBB))
+ && isUncondBranch(BranchMI)) {
+ DEBUG(dbgs() << "Removing uncond branch instr"; BranchMI->dump(););
+ BranchMI->eraseFromParent();
}
}
-template<class PassT>
-void CFGStructurizer<PassT>::recordSccnum(BlockT *srcBlk, int sccNum) {
- BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk];
+void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
+ MachineBasicBlock *MBB) {
+ if (MBB->succ_size() != 2)
+ return;
+ MachineBasicBlock *MBB1 = *MBB->succ_begin();
+ MachineBasicBlock *MBB2 = *llvm::next(MBB->succ_begin());
+ if (MBB1 != MBB2)
+ return;
+
+ MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
+ assert(BranchMI && isCondBranch(BranchMI));
+ DEBUG(dbgs() << "Removing unneeded cond branch instr"; BranchMI->dump(););
+ BranchMI->eraseFromParent();
+ SHOWNEWBLK(MBB1, "Removing redundant successor");
+ MBB->removeSuccessor(MBB1);
+}
- if (srcBlkInfo == NULL) {
- srcBlkInfo = new BlockInfo();
+void AMDGPUCFGStructurizer::addDummyExitBlock(
+ SmallVectorImpl<MachineBasicBlock*> &RetMBB) {
+ MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
+ FuncRep->push_back(DummyExitBlk); //insert to function
+ insertInstrEnd(DummyExitBlk, AMDGPU::RETURN);
+
+ for (SmallVectorImpl<MachineBasicBlock *>::iterator It = RetMBB.begin(),
+ E = RetMBB.end(); It != E; ++It) {
+ MachineBasicBlock *MBB = *It;
+ MachineInstr *MI = getReturnInstr(MBB);
+ if (MI)
+ MI->eraseFromParent();
+ MBB->addSuccessor(DummyExitBlk);
+ DEBUG(
+ dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber()
+ << " successors\n";
+ );
}
+ SHOWNEWBLK(DummyExitBlk, "DummyExitBlock: ");
+}
- srcBlkInfo->sccNum = sccNum;
+void AMDGPUCFGStructurizer::removeSuccessor(MachineBasicBlock *MBB) {
+ while (MBB->succ_size())
+ MBB->removeSuccessor(*MBB->succ_begin());
}
-template<class PassT>
-int CFGStructurizer<PassT>::getSCCNum(BlockT *srcBlk) {
- BlockInfo *srcBlkInfo = blockInfoMap[srcBlk];
- return srcBlkInfo ? srcBlkInfo->sccNum : INVALIDSCCNUM;
+void AMDGPUCFGStructurizer::recordSccnum(MachineBasicBlock *MBB,
+ int SccNum) {
+ BlockInformation *&srcBlkInfo = BlockInfoMap[MBB];
+ if (!srcBlkInfo)
+ srcBlkInfo = new BlockInformation();
+ srcBlkInfo->SccNum = SccNum;
}
-template<class PassT>
-void CFGStructurizer<PassT>::retireBlock(BlockT *dstBlk, BlockT *srcBlk) {
- if (DEBUGME) {
- errs() << "Retiring BB" << srcBlk->getNumber() << "\n";
- }
+void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) {
+ DEBUG(
+ dbgs() << "Retiring BB" << MBB->getNumber() << "\n";
+ );
- BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk];
+ BlockInformation *&SrcBlkInfo = BlockInfoMap[MBB];
- if (srcBlkInfo == NULL) {
- srcBlkInfo = new BlockInfo();
- }
+ if (!SrcBlkInfo)
+ SrcBlkInfo = new BlockInformation();
- srcBlkInfo->isRetired = true;
- assert(srcBlk->succ_size() == 0 && srcBlk->pred_size() == 0
+ SrcBlkInfo->IsRetired = true;
+ assert(MBB->succ_size() == 0 && MBB->pred_size() == 0
&& "can't retire block yet");
}
-template<class PassT>
-bool CFGStructurizer<PassT>::isRetiredBlock(BlockT *srcBlk) {
- BlockInfo *srcBlkInfo = blockInfoMap[srcBlk];
- return (srcBlkInfo && srcBlkInfo->isRetired);
-}
-
-template<class PassT>
-bool CFGStructurizer<PassT>::isActiveLoophead(BlockT *curBlk) {
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- while (loopRep && loopRep->getHeader() == curBlk) {
- LoopLandInfo *loopLand = getLoopLandInfo(loopRep);
-
- if(loopLand == NULL)
- return true;
-
- BlockT *landBlk = loopLand->landBlk;
- assert(landBlk);
- if (!isRetiredBlock(landBlk)) {
- return true;
- }
-
- loopRep = loopRep->getParentLoop();
- }
-
- return false;
-} //isActiveLoophead
-
-template<class PassT>
-bool CFGStructurizer<PassT>::needMigrateBlock(BlockT *blk) {
- const unsigned blockSizeThreshold = 30;
- const unsigned cloneInstrThreshold = 100;
-
- bool multiplePreds = blk && (blk->pred_size() > 1);
-
- if(!multiplePreds)
- return false;
-
- unsigned blkSize = blk->size();
- return ((blkSize > blockSizeThreshold)
- && (blkSize * (blk->pred_size() - 1) > cloneInstrThreshold));
-} //needMigrateBlock
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::recordLoopLandBlock(LoopT *loopRep, BlockT *landBlk,
- BlockTSmallerVector &exitBlks,
- std::set<BlockT *> &exitBlkSet) {
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> inpathBlks; //in exit path blocks
-
- for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(),
- predIterEnd = landBlk->pred_end();
- predIter != predIterEnd; ++predIter) {
- BlockT *curBlk = *predIter;
- if (loopRep->contains(curBlk) || exitBlkSet.count(curBlk)) {
- inpathBlks.push_back(curBlk);
- }
- } //for
-
- //if landBlk has predecessors that are not in the given loop,
- //create a new block
- BlockT *newLandBlk = landBlk;
- if (inpathBlks.size() != landBlk->pred_size()) {
- newLandBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(newLandBlk); //insert to function
- newLandBlk->addSuccessor(landBlk);
- for (typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::iterator iter =
- inpathBlks.begin(),
- iterEnd = inpathBlks.end(); iter != iterEnd; ++iter) {
- BlockT *curBlk = *iter;
- CFGTraits::replaceInstrUseOfBlockWith(curBlk, landBlk, newLandBlk);
- //srcBlk, oldBlk, newBlk
- curBlk->removeSuccessor(landBlk);
- curBlk->addSuccessor(newLandBlk);
- }
- for (size_t i = 0, tot = exitBlks.size(); i < tot; ++i) {
- if (exitBlks[i] == landBlk) {
- exitBlks[i] = newLandBlk;
- }
- }
- SHOWNEWBLK(newLandBlk, "NewLandingBlock: ");
- }
-
- setLoopLandBlock(loopRep, newLandBlk);
-
- return newLandBlk;
-} // recordLoopbreakLand
-
-template<class PassT>
-void CFGStructurizer<PassT>::setLoopLandBlock(LoopT *loopRep, BlockT *blk) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- assert(theEntry->landBlk == NULL);
-
- if (blk == NULL) {
- blk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(blk); //insert to function
- SHOWNEWBLK(blk, "DummyLandingBlock for loop without break: ");
- }
-
- theEntry->landBlk = blk;
-
- if (DEBUGME) {
- errs() << "setLoopLandBlock loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " landing-block = BB" << blk->getNumber() << "\n";
- }
-} // setLoopLandBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopBreakOnReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
-
- theEntry->breakOnRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopBreakOnReg loop-header = BB"
+void AMDGPUCFGStructurizer::setLoopLandBlock(MachineLoop *loopRep,
+ MachineBasicBlock *MBB) {
+ MachineBasicBlock *&TheEntry = LLInfoMap[loopRep];
+ if (!MBB) {
+ MBB = FuncRep->CreateMachineBasicBlock();
+ FuncRep->push_back(MBB); //insert to function
+ SHOWNEWBLK(MBB, "DummyLandingBlock for loop without break: ");
+ }
+ TheEntry = MBB;
+ DEBUG(
+ dbgs() << "setLoopLandBlock loop-header = BB"
<< loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopBreakOnReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopContOnReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->contOnRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopContOnReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopContOnReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopBreakInitReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->breakInitRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopBreakInitReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopBreakInitReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopContInitReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->contInitRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopContInitReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopContInitReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopEndbranchInitReg(LoopT *loopRep,
- RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->endbranchInitRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopEndbranchInitReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopEndbranchInitReg
-
-template<class PassT>
-typename CFGStructurizer<PassT>::LoopLandInfo *
-CFGStructurizer<PassT>::getLoopLandInfo(LoopT *loopRep) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- return theEntry;
-} // getLoopLandInfo
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::getLoopLandBlock(LoopT *loopRep) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- return theEntry ? theEntry->landBlk : NULL;
-} // getLoopLandBlock
-
-
-template<class PassT>
-bool CFGStructurizer<PassT>::hasBackEdge(BlockT *curBlk) {
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- if (loopRep == NULL)
- return false;
-
- BlockT *loopHeader = loopRep->getHeader();
-
- return curBlk->isSuccessor(loopHeader);
-
-} //hasBackEdge
-
-template<class PassT>
-unsigned CFGStructurizer<PassT>::getLoopDepth(LoopT *loopRep) {
- return loopRep ? loopRep->getLoopDepth() : 0;
-} //getLoopDepth
-
-template<class PassT>
-int CFGStructurizer<PassT>::countActiveBlock
-(typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterStart,
- typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterEnd) {
- int count = 0;
- while (iterStart != iterEnd) {
- if (!isRetiredBlock(*iterStart)) {
- ++count;
- }
- ++iterStart;
- }
-
- return count;
-} //countActiveBlock
+ << " landing-block = BB" << MBB->getNumber() << "\n";
+ );
+}
-// This is work around solution for findNearestCommonDominator not avaiable to
-// post dom a proper fix should go to Dominators.h.
+MachineBasicBlock *
+AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
+ MachineBasicBlock *MBB2) {
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT*
-CFGStructurizer<PassT>::findNearestCommonPostDom(BlockT *blk1, BlockT *blk2) {
+ if (PDT->dominates(MBB1, MBB2))
+ return MBB1;
+ if (PDT->dominates(MBB2, MBB1))
+ return MBB2;
- if (postDomTree->dominates(blk1, blk2)) {
- return blk1;
- }
- if (postDomTree->dominates(blk2, blk1)) {
- return blk2;
- }
-
- DomTreeNodeT *node1 = postDomTree->getNode(blk1);
- DomTreeNodeT *node2 = postDomTree->getNode(blk2);
+ MachineDomTreeNode *Node1 = PDT->getNode(MBB1);
+ MachineDomTreeNode *Node2 = PDT->getNode(MBB2);
// Handle newly cloned node.
- if (node1 == NULL && blk1->succ_size() == 1) {
- return findNearestCommonPostDom(*blk1->succ_begin(), blk2);
- }
- if (node2 == NULL && blk2->succ_size() == 1) {
- return findNearestCommonPostDom(blk1, *blk2->succ_begin());
- }
+ if (!Node1 && MBB1->succ_size() == 1)
+ return findNearestCommonPostDom(*MBB1->succ_begin(), MBB2);
+ if (!Node2 && MBB2->succ_size() == 1)
+ return findNearestCommonPostDom(MBB1, *MBB2->succ_begin());
- if (node1 == NULL || node2 == NULL) {
+ if (!Node1 || !Node2)
return NULL;
- }
- node1 = node1->getIDom();
- while (node1) {
- if (postDomTree->dominates(node1, node2)) {
- return node1->getBlock();
- }
- node1 = node1->getIDom();
+ Node1 = Node1->getIDom();
+ while (Node1) {
+ if (PDT->dominates(Node1, Node2))
+ return Node1->getBlock();
+ Node1 = Node1->getIDom();
}
return NULL;
}
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::findNearestCommonPostDom
-(typename std::set<BlockT *> &blks) {
- BlockT *commonDom;
- typename std::set<BlockT *>::const_iterator iter = blks.begin();
- typename std::set<BlockT *>::const_iterator iterEnd = blks.end();
- for (commonDom = *iter; iter != iterEnd && commonDom != NULL; ++iter) {
- BlockT *curBlk = *iter;
- if (curBlk != commonDom) {
- commonDom = findNearestCommonPostDom(curBlk, commonDom);
- }
- }
-
- if (DEBUGME) {
- errs() << "Common post dominator for exit blocks is ";
- if (commonDom) {
- errs() << "BB" << commonDom->getNumber() << "\n";
- } else {
- errs() << "NULL\n";
- }
- }
-
- return commonDom;
-} //findNearestCommonPostDom
-
-} //end namespace llvm
-
-//todo: move-end
-
-
-//===----------------------------------------------------------------------===//
-//
-// CFGStructurizer for AMDGPU
-//
-//===----------------------------------------------------------------------===//
-
-
-using namespace llvmCFGStruct;
-
-namespace llvm {
-class AMDGPUCFGStructurizer : public MachineFunctionPass {
-public:
- typedef MachineInstr InstructionType;
- typedef MachineFunction FunctionType;
- typedef MachineBasicBlock BlockType;
- typedef MachineLoopInfo LoopinfoType;
- typedef MachineDominatorTree DominatortreeType;
- typedef MachinePostDominatorTree PostDominatortreeType;
- typedef MachineDomTreeNode DomTreeNodeType;
- typedef MachineLoop LoopType;
-
-protected:
- TargetMachine &TM;
- const TargetInstrInfo *TII;
- const AMDGPURegisterInfo *TRI;
-
-public:
- AMDGPUCFGStructurizer(char &pid, TargetMachine &tm);
- const TargetInstrInfo *getTargetInstrInfo() const;
-
-private:
-
-};
-
-} //end of namespace llvm
-AMDGPUCFGStructurizer::AMDGPUCFGStructurizer(char &pid, TargetMachine &tm)
-: MachineFunctionPass(pid), TM(tm), TII(tm.getInstrInfo()),
- TRI(static_cast<const AMDGPURegisterInfo *>(tm.getRegisterInfo())) {
-}
-
-const TargetInstrInfo *AMDGPUCFGStructurizer::getTargetInstrInfo() const {
- return TII;
+MachineBasicBlock *
+AMDGPUCFGStructurizer::findNearestCommonPostDom(
+ std::set<MachineBasicBlock *> &MBBs) {
+ MachineBasicBlock *CommonDom;
+ std::set<MachineBasicBlock *>::const_iterator It = MBBs.begin();
+ std::set<MachineBasicBlock *>::const_iterator E = MBBs.end();
+ for (CommonDom = *It; It != E && CommonDom; ++It) {
+ MachineBasicBlock *MBB = *It;
+ if (MBB != CommonDom)
+ CommonDom = findNearestCommonPostDom(MBB, CommonDom);
+ }
+
+ DEBUG(
+ dbgs() << "Common post dominator for exit blocks is ";
+ if (CommonDom)
+ dbgs() << "BB" << CommonDom->getNumber() << "\n";
+ else
+ dbgs() << "NULL\n";
+ );
+
+ return CommonDom;
}
-//===----------------------------------------------------------------------===//
-//
-// CFGPrepare
-//
-//===----------------------------------------------------------------------===//
-
-
-using namespace llvmCFGStruct;
-
-namespace llvm {
-class AMDGPUCFGPrepare : public AMDGPUCFGStructurizer {
-public:
- static char ID;
-public:
- AMDGPUCFGPrepare(TargetMachine &tm);
-
- virtual const char *getPassName() const;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
-
- bool runOnMachineFunction(MachineFunction &F);
+char AMDGPUCFGStructurizer::ID = 0;
-private:
-
-};
+} // end anonymous namespace
-char AMDGPUCFGPrepare::ID = 0;
-} //end of namespace llvm
-
-AMDGPUCFGPrepare::AMDGPUCFGPrepare(TargetMachine &tm)
- : AMDGPUCFGStructurizer(ID, tm ) {
-}
-const char *AMDGPUCFGPrepare::getPassName() const {
- return "AMD IL Control Flow Graph Preparation Pass";
-}
-
-void AMDGPUCFGPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<MachineFunctionAnalysis>();
- AU.addRequired<MachineFunctionAnalysis>();
- AU.addRequired<MachineDominatorTree>();
- AU.addRequired<MachinePostDominatorTree>();
- AU.addRequired<MachineLoopInfo>();
-}
-
-//===----------------------------------------------------------------------===//
-//
-// CFGPerform
-//
-//===----------------------------------------------------------------------===//
-
-
-using namespace llvmCFGStruct;
-
-namespace llvm {
-class AMDGPUCFGPerform : public AMDGPUCFGStructurizer {
-public:
- static char ID;
-
-public:
- AMDGPUCFGPerform(TargetMachine &tm);
- virtual const char *getPassName() const;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- bool runOnMachineFunction(MachineFunction &F);
-
-private:
-
-};
-
-char AMDGPUCFGPerform::ID = 0;
-} //end of namespace llvm
-
- AMDGPUCFGPerform::AMDGPUCFGPerform(TargetMachine &tm)
-: AMDGPUCFGStructurizer(ID, tm) {
-}
-
-const char *AMDGPUCFGPerform::getPassName() const {
- return "AMD IL Control Flow Graph structurizer Pass";
-}
-
-void AMDGPUCFGPerform::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<MachineFunctionAnalysis>();
- AU.addRequired<MachineFunctionAnalysis>();
- AU.addRequired<MachineDominatorTree>();
- AU.addRequired<MachinePostDominatorTree>();
- AU.addRequired<MachineLoopInfo>();
-}
-
-//===----------------------------------------------------------------------===//
-//
-// CFGStructTraits<AMDGPUCFGStructurizer>
-//
-//===----------------------------------------------------------------------===//
-
-namespace llvmCFGStruct {
-// this class is tailor to the AMDGPU backend
-template<>
-struct CFGStructTraits<AMDGPUCFGStructurizer> {
- typedef int RegiT;
-
- static int getBranchNzeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
- case AMDGPU::BRANCH_COND_i32:
- case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32;
- default:
- assert(0 && "internal error");
- }
- return -1;
- }
-
- static int getBranchZeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
- case AMDGPU::BRANCH_COND_i32:
- case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32;
- default:
- assert(0 && "internal error");
- }
- return -1;
- }
-
- static int getContinueNzeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
- default:
- assert(0 && "internal error");
- };
- return -1;
- }
-
- static int getContinueZeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
- default:
- assert(0 && "internal error");
- }
- return -1;
- }
-
- static MachineBasicBlock *getTrueBranch(MachineInstr *instr) {
- return instr->getOperand(0).getMBB();
- }
-
- static void setTrueBranch(MachineInstr *instr, MachineBasicBlock *blk) {
- instr->getOperand(0).setMBB(blk);
- }
-
- static MachineBasicBlock *
- getFalseBranch(MachineBasicBlock *blk, MachineInstr *instr) {
- assert(blk->succ_size() == 2);
- MachineBasicBlock *trueBranch = getTrueBranch(instr);
- MachineBasicBlock::succ_iterator iter = blk->succ_begin();
- MachineBasicBlock::succ_iterator iterNext = iter;
- ++iterNext;
-
- return (*iter == trueBranch) ? *iterNext : *iter;
- }
-
- static bool isCondBranch(MachineInstr *instr) {
- switch (instr->getOpcode()) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::BRANCH_COND_i32:
- case AMDGPU::BRANCH_COND_f32:
- break;
- default:
- return false;
- }
- return true;
- }
-
- static bool isUncondBranch(MachineInstr *instr) {
- switch (instr->getOpcode()) {
- case AMDGPU::JUMP:
- case AMDGPU::BRANCH:
- return true;
- default:
- return false;
- }
- return true;
- }
-
- static DebugLoc getLastDebugLocInBB(MachineBasicBlock *blk) {
- //get DebugLoc from the first MachineBasicBlock instruction with debug info
- DebugLoc DL;
- for (MachineBasicBlock::iterator iter = blk->begin(); iter != blk->end(); ++iter) {
- MachineInstr *instr = &(*iter);
- if (instr->getDebugLoc().isUnknown() == false) {
- DL = instr->getDebugLoc();
- }
- }
- return DL;
- }
-
- static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *blk) {
- MachineBasicBlock::reverse_iterator iter = blk->rbegin();
- MachineInstr *instr = &*iter;
- if (instr && (isCondBranch(instr) || isUncondBranch(instr))) {
- return instr;
- }
- return NULL;
- }
-
- // The correct naming for this is getPossibleLoopendBlockBranchInstr.
- //
- // BB with backward-edge could have move instructions after the branch
- // instruction. Such move instruction "belong to" the loop backward-edge.
- //
- static MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *blk) {
- const AMDGPUInstrInfo * TII = static_cast<const AMDGPUInstrInfo *>(
- blk->getParent()->getTarget().getInstrInfo());
-
- for (MachineBasicBlock::reverse_iterator iter = blk->rbegin(),
- iterEnd = blk->rend(); iter != iterEnd; ++iter) {
- // FIXME: Simplify
- MachineInstr *instr = &*iter;
- if (instr) {
- if (isCondBranch(instr) || isUncondBranch(instr)) {
- return instr;
- } else if (!TII->isMov(instr->getOpcode())) {
- break;
- }
- }
- }
- return NULL;
- }
-
- static MachineInstr *getReturnInstr(MachineBasicBlock *blk) {
- MachineBasicBlock::reverse_iterator iter = blk->rbegin();
- if (iter != blk->rend()) {
- MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDGPU::RETURN) {
- return instr;
- }
- }
- return NULL;
- }
-
- static MachineInstr *getContinueInstr(MachineBasicBlock *blk) {
- MachineBasicBlock::reverse_iterator iter = blk->rbegin();
- if (iter != blk->rend()) {
- MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDGPU::CONTINUE) {
- return instr;
- }
- }
- return NULL;
- }
-
- static MachineInstr *getLoopBreakInstr(MachineBasicBlock *blk) {
- for (MachineBasicBlock::iterator iter = blk->begin(); (iter != blk->end()); ++iter) {
- MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDGPU::PREDICATED_BREAK) {
- return instr;
- }
- }
- return NULL;
- }
-
- static bool isReturnBlock(MachineBasicBlock *blk) {
- MachineInstr *instr = getReturnInstr(blk);
- bool isReturn = (blk->succ_size() == 0);
- if (instr) {
- assert(isReturn);
- } else if (isReturn) {
- if (DEBUGME) {
- errs() << "BB" << blk->getNumber()
- <<" is return block without RETURN instr\n";
- }
- }
-
- return isReturn;
- }
-
- static MachineBasicBlock::iterator
- getInstrPos(MachineBasicBlock *blk, MachineInstr *instr) {
- assert(instr->getParent() == blk && "instruction doesn't belong to block");
- MachineBasicBlock::iterator iter = blk->begin();
- MachineBasicBlock::iterator iterEnd = blk->end();
- while (&(*iter) != instr && iter != iterEnd) {
- ++iter;
- }
-
- assert(iter != iterEnd);
- return iter;
- }//getInstrPos
-
- static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep) {
- return insertInstrBefore(blk,newOpcode,passRep,DebugLoc());
- } //insertInstrBefore
-
- static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep, DebugLoc DL) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineInstr *newInstr =
- blk->getParent()->CreateMachineInstr(tii->get(newOpcode), DL);
-
- MachineBasicBlock::iterator res;
- if (blk->begin() != blk->end()) {
- blk->insert(blk->begin(), newInstr);
- } else {
- blk->push_back(newInstr);
- }
-
- SHOWNEWINSTR(newInstr);
-
- return newInstr;
- } //insertInstrBefore
-
- static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep) {
- insertInstrEnd(blk,newOpcode,passRep,DebugLoc());
- } //insertInstrEnd
-
- static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep, DebugLoc DL) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineInstr *newInstr = blk->getParent()
- ->CreateMachineInstr(tii->get(newOpcode), DL);
-
- blk->push_back(newInstr);
- //assume the instruction doesn't take any reg operand ...
-
- SHOWNEWINSTR(newInstr);
- } //insertInstrEnd
-
- static MachineInstr *insertInstrBefore(MachineBasicBlock::iterator instrPos,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep) {
- MachineInstr *oldInstr = &(*instrPos);
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineBasicBlock *blk = oldInstr->getParent();
- MachineInstr *newInstr =
- blk->getParent()->CreateMachineInstr(tii->get(newOpcode),
- DebugLoc());
-
- blk->insert(instrPos, newInstr);
- //assume the instruction doesn't take any reg operand ...
-
- SHOWNEWINSTR(newInstr);
- return newInstr;
- } //insertInstrBefore
-
- static void insertCondBranchBefore(MachineBasicBlock::iterator instrPos,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep,
- DebugLoc DL) {
- MachineInstr *oldInstr = &(*instrPos);
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineBasicBlock *blk = oldInstr->getParent();
- MachineFunction *MF = blk->getParent();
- MachineInstr *newInstr = MF->CreateMachineInstr(tii->get(newOpcode), DL);
-
- blk->insert(instrPos, newInstr);
- MachineInstrBuilder MIB(*MF, newInstr);
- MIB.addReg(oldInstr->getOperand(1).getReg(), false);
-
- SHOWNEWINSTR(newInstr);
- //erase later oldInstr->eraseFromParent();
- } //insertCondBranchBefore
-
- static void insertCondBranchBefore(MachineBasicBlock *blk,
- MachineBasicBlock::iterator insertPos,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum,
- DebugLoc DL) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineFunction *MF = blk->getParent();
-
- MachineInstr *newInstr = MF->CreateMachineInstr(tii->get(newOpcode), DL);
-
- //insert before
- blk->insert(insertPos, newInstr);
- MachineInstrBuilder(*MF, newInstr).addReg(regNum, false);
-
- SHOWNEWINSTR(newInstr);
- } //insertCondBranchBefore
-
- static void insertCondBranchEnd(MachineBasicBlock *blk,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineFunction *MF = blk->getParent();
- MachineInstr *newInstr =
- MF->CreateMachineInstr(tii->get(newOpcode), DebugLoc());
-
- blk->push_back(newInstr);
- MachineInstrBuilder(*MF, newInstr).addReg(regNum, false);
-
- SHOWNEWINSTR(newInstr);
- } //insertCondBranchEnd
-
-
- static void insertAssignInstrBefore(MachineBasicBlock::iterator instrPos,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum, int regVal) {
- MachineInstr *oldInstr = &(*instrPos);
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
- MachineBasicBlock *blk = oldInstr->getParent();
- MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
- regVal);
- blk->insert(instrPos, newInstr);
-
- SHOWNEWINSTR(newInstr);
- } //insertAssignInstrBefore
-
- static void insertAssignInstrBefore(MachineBasicBlock *blk,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum, int regVal) {
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
-
- MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
- regVal);
- if (blk->begin() != blk->end()) {
- blk->insert(blk->begin(), newInstr);
- } else {
- blk->push_back(newInstr);
- }
-
- SHOWNEWINSTR(newInstr);
-
- } //insertInstrBefore
-
- static void insertCompareInstrBefore(MachineBasicBlock *blk,
- MachineBasicBlock::iterator instrPos,
- AMDGPUCFGStructurizer *passRep,
- RegiT dstReg, RegiT src1Reg,
- RegiT src2Reg) {
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
- MachineFunction *MF = blk->getParent();
- MachineInstr *newInstr =
- MF->CreateMachineInstr(tii->get(tii->getIEQOpcode()), DebugLoc());
-
- MachineInstrBuilder MIB(*MF, newInstr);
- MIB.addReg(dstReg, RegState::Define); //set target
- MIB.addReg(src1Reg); //set src value
- MIB.addReg(src2Reg); //set src value
-
- blk->insert(instrPos, newInstr);
- SHOWNEWINSTR(newInstr);
-
- } //insertCompareInstrBefore
-
- static void cloneSuccessorList(MachineBasicBlock *dstBlk,
- MachineBasicBlock *srcBlk) {
- for (MachineBasicBlock::succ_iterator iter = srcBlk->succ_begin(),
- iterEnd = srcBlk->succ_end(); iter != iterEnd; ++iter) {
- dstBlk->addSuccessor(*iter); // *iter's predecessor is also taken care of
- }
- } //cloneSuccessorList
-
- static MachineBasicBlock *clone(MachineBasicBlock *srcBlk) {
- MachineFunction *func = srcBlk->getParent();
- MachineBasicBlock *newBlk = func->CreateMachineBasicBlock();
- func->push_back(newBlk); //insert to function
- for (MachineBasicBlock::iterator iter = srcBlk->begin(),
- iterEnd = srcBlk->end();
- iter != iterEnd; ++iter) {
- MachineInstr *instr = func->CloneMachineInstr(iter);
- newBlk->push_back(instr);
- }
- return newBlk;
- }
-
- //MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose because
- //the AMDGPU instruction is not recognized as terminator fix this and retire
- //this routine
- static void replaceInstrUseOfBlockWith(MachineBasicBlock *srcBlk,
- MachineBasicBlock *oldBlk,
- MachineBasicBlock *newBlk) {
- MachineInstr *branchInstr = getLoopendBlockBranchInstr(srcBlk);
- if (branchInstr && isCondBranch(branchInstr) &&
- getTrueBranch(branchInstr) == oldBlk) {
- setTrueBranch(branchInstr, newBlk);
- }
- }
-
- static void wrapup(MachineBasicBlock *entryBlk) {
- assert((!entryBlk->getParent()->getJumpTableInfo()
- || entryBlk->getParent()->getJumpTableInfo()->isEmpty())
- && "found a jump table");
-
- //collect continue right before endloop
- SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> contInstr;
- MachineBasicBlock::iterator pre = entryBlk->begin();
- MachineBasicBlock::iterator iterEnd = entryBlk->end();
- MachineBasicBlock::iterator iter = pre;
- while (iter != iterEnd) {
- if (pre->getOpcode() == AMDGPU::CONTINUE
- && iter->getOpcode() == AMDGPU::ENDLOOP) {
- contInstr.push_back(pre);
- }
- pre = iter;
- ++iter;
- } //end while
-
- //delete continue right before endloop
- for (unsigned i = 0; i < contInstr.size(); ++i) {
- contInstr[i]->eraseFromParent();
- }
-
- // TODO to fix up jump table so later phase won't be confused. if
- // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
- // there isn't such an interface yet. alternatively, replace all the other
- // blocks in the jump table with the entryBlk //}
-
- } //wrapup
-
- static MachineDominatorTree *getDominatorTree(AMDGPUCFGStructurizer &pass) {
- return &pass.getAnalysis<MachineDominatorTree>();
- }
-
- static MachinePostDominatorTree*
- getPostDominatorTree(AMDGPUCFGStructurizer &pass) {
- return &pass.getAnalysis<MachinePostDominatorTree>();
- }
-
- static MachineLoopInfo *getLoopInfo(AMDGPUCFGStructurizer &pass) {
- return &pass.getAnalysis<MachineLoopInfo>();
- }
-}; // template class CFGStructTraits
-} //end of namespace llvm
-
-// createAMDGPUCFGPreparationPass- Returns a pass
-FunctionPass *llvm::createAMDGPUCFGPreparationPass(TargetMachine &tm
- ) {
- return new AMDGPUCFGPrepare(tm );
-}
-
-bool AMDGPUCFGPrepare::runOnMachineFunction(MachineFunction &func) {
- return llvmCFGStruct::CFGStructurizer<AMDGPUCFGStructurizer>().prepare(func,
- *this,
- TRI);
-}
-
-// createAMDGPUCFGStructurizerPass- Returns a pass
-FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm
- ) {
- return new AMDGPUCFGPerform(tm );
-}
-bool AMDGPUCFGPerform::runOnMachineFunction(MachineFunction &func) {
- return llvmCFGStruct::CFGStructurizer<AMDGPUCFGStructurizer>().run(func,
- *this,
- TRI);
+FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm) {
+ return new AMDGPUCFGStructurizer(tm);
}
diff --git a/lib/Target/R600/AMDILDevice.cpp b/lib/Target/R600/AMDILDevice.cpp
deleted file mode 100644
index db8e01ea4043..000000000000
--- a/lib/Target/R600/AMDILDevice.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-//===-- AMDILDevice.cpp - Base class for AMDIL Devices --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILDevice.h"
-#include "AMDGPUSubtarget.h"
-
-using namespace llvm;
-// Default implementation for all of the classes.
-AMDGPUDevice::AMDGPUDevice(AMDGPUSubtarget *ST) : mSTM(ST) {
- mHWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
- mSWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
- setCaps();
- DeviceFlag = OCL_DEVICE_ALL;
-}
-
-AMDGPUDevice::~AMDGPUDevice() {
- mHWBits.clear();
- mSWBits.clear();
-}
-
-size_t AMDGPUDevice::getMaxGDSSize() const {
- return 0;
-}
-
-uint32_t
-AMDGPUDevice::getDeviceFlag() const {
- return DeviceFlag;
-}
-
-size_t AMDGPUDevice::getMaxNumCBs() const {
- if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) {
- return HW_MAX_NUM_CB;
- }
-
- return 0;
-}
-
-size_t AMDGPUDevice::getMaxCBSize() const {
- if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) {
- return MAX_CB_SIZE;
- }
-
- return 0;
-}
-
-size_t AMDGPUDevice::getMaxScratchSize() const {
- return 65536;
-}
-
-uint32_t AMDGPUDevice::getStackAlignment() const {
- return 16;
-}
-
-void AMDGPUDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::HalfOps);
- mSWBits.set(AMDGPUDeviceInfo::ByteOps);
- mSWBits.set(AMDGPUDeviceInfo::ShortOps);
- mSWBits.set(AMDGPUDeviceInfo::HW64BitDivMod);
- if (mSTM->isOverride(AMDGPUDeviceInfo::NoInline)) {
- mSWBits.set(AMDGPUDeviceInfo::NoInline);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::MacroDB)) {
- mSWBits.set(AMDGPUDeviceInfo::MacroDB);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) {
- mSWBits.set(AMDGPUDeviceInfo::ConstantMem);
- } else {
- mHWBits.set(AMDGPUDeviceInfo::ConstantMem);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) {
- mSWBits.set(AMDGPUDeviceInfo::PrivateMem);
- } else {
- mHWBits.set(AMDGPUDeviceInfo::PrivateMem);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::BarrierDetect)) {
- mSWBits.set(AMDGPUDeviceInfo::BarrierDetect);
- }
- mSWBits.set(AMDGPUDeviceInfo::ByteLDSOps);
- mSWBits.set(AMDGPUDeviceInfo::LongOps);
-}
-
-AMDGPUDeviceInfo::ExecutionMode
-AMDGPUDevice::getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const {
- if (mHWBits[Caps]) {
- assert(!mSWBits[Caps] && "Cannot set both SW and HW caps");
- return AMDGPUDeviceInfo::Hardware;
- }
-
- if (mSWBits[Caps]) {
- assert(!mHWBits[Caps] && "Cannot set both SW and HW caps");
- return AMDGPUDeviceInfo::Software;
- }
-
- return AMDGPUDeviceInfo::Unsupported;
-
-}
-
-bool AMDGPUDevice::isSupported(AMDGPUDeviceInfo::Caps Mode) const {
- return getExecutionMode(Mode) != AMDGPUDeviceInfo::Unsupported;
-}
-
-bool AMDGPUDevice::usesHardware(AMDGPUDeviceInfo::Caps Mode) const {
- return getExecutionMode(Mode) == AMDGPUDeviceInfo::Hardware;
-}
-
-bool AMDGPUDevice::usesSoftware(AMDGPUDeviceInfo::Caps Mode) const {
- return getExecutionMode(Mode) == AMDGPUDeviceInfo::Software;
-}
-
-std::string
-AMDGPUDevice::getDataLayout() const {
- std::string DataLayout = std::string(
- "e"
- "-p:32:32:32"
- "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
- "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
- "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
- "-n32:64"
- );
-
- if (usesHardware(AMDGPUDeviceInfo::DoubleOps)) {
- DataLayout.append("-f64:64:64");
- }
-
- return DataLayout;
-}
diff --git a/lib/Target/R600/AMDILDevice.h b/lib/Target/R600/AMDILDevice.h
deleted file mode 100644
index 97df98cafb2a..000000000000
--- a/lib/Target/R600/AMDILDevice.h
+++ /dev/null
@@ -1,117 +0,0 @@
-//===---- AMDILDevice.h - Define Device Data for AMDGPU -----*- C++ -*------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface for the subtarget data classes.
-//
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===----------------------------------------------------------------------===//
-#ifndef AMDILDEVICEIMPL_H
-#define AMDILDEVICEIMPL_H
-#include "AMDIL.h"
-#include "llvm/ADT/BitVector.h"
-
-namespace llvm {
- class AMDGPUSubtarget;
- class MCStreamer;
-//===----------------------------------------------------------------------===//
-// Interface for data that is specific to a single device
-//===----------------------------------------------------------------------===//
-class AMDGPUDevice {
-public:
- AMDGPUDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUDevice();
-
- // Enum values for the various memory types.
- enum {
- RAW_UAV_ID = 0,
- ARENA_UAV_ID = 1,
- LDS_ID = 2,
- GDS_ID = 3,
- SCRATCH_ID = 4,
- CONSTANT_ID = 5,
- GLOBAL_ID = 6,
- MAX_IDS = 7
- } IO_TYPE_IDS;
-
- /// \returns The max LDS size that the hardware supports. Size is in
- /// bytes.
- virtual size_t getMaxLDSSize() const = 0;
-
- /// \returns The max GDS size that the hardware supports if the GDS is
- /// supported by the hardware. Size is in bytes.
- virtual size_t getMaxGDSSize() const;
-
- /// \returns The max number of hardware constant address spaces that
- /// are supported by this device.
- virtual size_t getMaxNumCBs() const;
-
- /// \returns The max number of bytes a single hardware constant buffer
- /// can support. Size is in bytes.
- virtual size_t getMaxCBSize() const;
-
- /// \returns The max number of bytes allowed by the hardware scratch
- /// buffer. Size is in bytes.
- virtual size_t getMaxScratchSize() const;
-
- /// \brief Get the flag that corresponds to the device.
- virtual uint32_t getDeviceFlag() const;
-
- /// \returns The number of work-items that exist in a single hardware
- /// wavefront.
- virtual size_t getWavefrontSize() const = 0;
-
- /// \brief Get the generational name of this specific device.
- virtual uint32_t getGeneration() const = 0;
-
- /// \brief Get the stack alignment of this specific device.
- virtual uint32_t getStackAlignment() const;
-
- /// \brief Get the resource ID for this specific device.
- virtual uint32_t getResourceID(uint32_t DeviceID) const = 0;
-
- /// \brief Get the max number of UAV's for this device.
- virtual uint32_t getMaxNumUAVs() const = 0;
-
-
- // API utilizing more detailed capabilities of each family of
- // cards. If a capability is supported, then either usesHardware or
- // usesSoftware returned true. If usesHardware returned true, then
- // usesSoftware must return false for the same capability. Hardware
- // execution means that the feature is done natively by the hardware
- // and is not emulated by the softare. Software execution means
- // that the feature could be done in the hardware, but there is
- // software that emulates it with possibly using the hardware for
- // support since the hardware does not fully comply with OpenCL
- // specs.
-
- bool isSupported(AMDGPUDeviceInfo::Caps Mode) const;
- bool usesHardware(AMDGPUDeviceInfo::Caps Mode) const;
- bool usesSoftware(AMDGPUDeviceInfo::Caps Mode) const;
- virtual std::string getDataLayout() const;
- static const unsigned int MAX_LDS_SIZE_700 = 16384;
- static const unsigned int MAX_LDS_SIZE_800 = 32768;
- static const unsigned int WavefrontSize = 64;
- static const unsigned int HalfWavefrontSize = 32;
- static const unsigned int QuarterWavefrontSize = 16;
-protected:
- virtual void setCaps();
- BitVector mHWBits;
- llvm::BitVector mSWBits;
- AMDGPUSubtarget *mSTM;
- uint32_t DeviceFlag;
-private:
- AMDGPUDeviceInfo::ExecutionMode
- getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const;
-};
-
-} // namespace llvm
-#endif // AMDILDEVICEIMPL_H
diff --git a/lib/Target/R600/AMDILDeviceInfo.cpp b/lib/Target/R600/AMDILDeviceInfo.cpp
deleted file mode 100644
index 126514b97663..000000000000
--- a/lib/Target/R600/AMDILDeviceInfo.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- AMDILDeviceInfo.cpp - AMDILDeviceInfo class -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Function that creates DeviceInfo from a device name and other information.
-//
-//==-----------------------------------------------------------------------===//
-#include "AMDILDevices.h"
-#include "AMDGPUSubtarget.h"
-
-using namespace llvm;
-namespace llvm {
-namespace AMDGPUDeviceInfo {
-
-AMDGPUDevice* getDeviceFromName(const std::string &deviceName,
- AMDGPUSubtarget *ptr,
- bool is64bit, bool is64on32bit) {
- if (deviceName.c_str()[2] == '7') {
- switch (deviceName.c_str()[3]) {
- case '1':
- return new AMDGPU710Device(ptr);
- case '7':
- return new AMDGPU770Device(ptr);
- default:
- return new AMDGPU7XXDevice(ptr);
- }
- } else if (deviceName == "cypress") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUCypressDevice(ptr);
- } else if (deviceName == "juniper") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUEvergreenDevice(ptr);
- } else if (deviceName == "redwood" || deviceName == "sumo") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPURedwoodDevice(ptr);
- } else if (deviceName == "cedar") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUCedarDevice(ptr);
- } else if (deviceName == "barts" || deviceName == "turks") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUNIDevice(ptr);
- } else if (deviceName == "cayman") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUCaymanDevice(ptr);
- } else if (deviceName == "caicos") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUNIDevice(ptr);
- } else if (deviceName == "SI" ||
- deviceName == "tahiti" || deviceName == "pitcairn" ||
- deviceName == "verde" || deviceName == "oland" ||
- deviceName == "hainan") {
- return new AMDGPUSIDevice(ptr);
- } else {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPU7XXDevice(ptr);
- }
-}
-} // End namespace AMDGPUDeviceInfo
-} // End namespace llvm
diff --git a/lib/Target/R600/AMDILDeviceInfo.h b/lib/Target/R600/AMDILDeviceInfo.h
deleted file mode 100644
index 4b2c3a53c79f..000000000000
--- a/lib/Target/R600/AMDILDeviceInfo.h
+++ /dev/null
@@ -1,88 +0,0 @@
-//===-- AMDILDeviceInfo.h - Constants for describing devices --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#ifndef AMDILDEVICEINFO_H
-#define AMDILDEVICEINFO_H
-
-
-#include <string>
-
-namespace llvm {
- class AMDGPUDevice;
- class AMDGPUSubtarget;
- namespace AMDGPUDeviceInfo {
- /// Each Capabilities can be executed using a hardware instruction,
- /// emulated with a sequence of software instructions, or not
- /// supported at all.
- enum ExecutionMode {
- Unsupported = 0, ///< Unsupported feature on the card(Default value)
- /// This is the execution mode that is set if the feature is emulated in
- /// software.
- Software,
- /// This execution mode is set if the feature exists natively in hardware
- Hardware
- };
-
- enum Caps {
- HalfOps = 0x1, ///< Half float is supported or not.
- DoubleOps = 0x2, ///< Double is supported or not.
- ByteOps = 0x3, ///< Byte(char) is support or not.
- ShortOps = 0x4, ///< Short is supported or not.
- LongOps = 0x5, ///< Long is supported or not.
- Images = 0x6, ///< Images are supported or not.
- ByteStores = 0x7, ///< ByteStores available(!HD4XXX).
- ConstantMem = 0x8, ///< Constant/CB memory.
- LocalMem = 0x9, ///< Local/LDS memory.
- PrivateMem = 0xA, ///< Scratch/Private/Stack memory.
- RegionMem = 0xB, ///< OCL GDS Memory Extension.
- FMA = 0xC, ///< Use HW FMA or SW FMA.
- ArenaSegment = 0xD, ///< Use for Arena UAV per pointer 12-1023.
- MultiUAV = 0xE, ///< Use for UAV per Pointer 0-7.
- Reserved0 = 0xF, ///< ReservedFlag
- NoAlias = 0x10, ///< Cached loads.
- Signed24BitOps = 0x11, ///< Peephole Optimization.
- /// Debug mode implies that no hardware features or optimizations
- /// are performned and that all memory access go through a single
- /// uav(Arena on HD5XXX/HD6XXX and Raw on HD4XXX).
- Debug = 0x12,
- CachedMem = 0x13, ///< Cached mem is available or not.
- BarrierDetect = 0x14, ///< Detect duplicate barriers.
- Reserved1 = 0x15, ///< Reserved flag
- ByteLDSOps = 0x16, ///< Flag to specify if byte LDS ops are available.
- ArenaVectors = 0x17, ///< Flag to specify if vector loads from arena work.
- TmrReg = 0x18, ///< Flag to specify if Tmr register is supported.
- NoInline = 0x19, ///< Flag to specify that no inlining should occur.
- MacroDB = 0x1A, ///< Flag to specify that backend handles macrodb.
- HW64BitDivMod = 0x1B, ///< Flag for backend to generate 64bit div/mod.
- ArenaUAV = 0x1C, ///< Flag to specify that arena uav is supported.
- PrivateUAV = 0x1D, ///< Flag to specify that private memory uses uav's.
- /// If more capabilities are required, then
- /// this number needs to be increased.
- /// All capabilities must come before this
- /// number.
- MaxNumberCapabilities = 0x20
- };
- /// These have to be in order with the older generations
- /// having the lower number enumerations.
- enum Generation {
- HD4XXX = 0, ///< 7XX based devices.
- HD5XXX, ///< Evergreen based devices.
- HD6XXX, ///< NI/Evergreen+ based devices.
- HD7XXX, ///< Southern Islands based devices.
- HDTEST, ///< Experimental feature testing device.
- HDNUMGEN
- };
-
-
- AMDGPUDevice*
- getDeviceFromName(const std::string &name, AMDGPUSubtarget *ptr,
- bool is64bit = false, bool is64on32bit = false);
- } // namespace AMDILDeviceInfo
-} // namespace llvm
-#endif // AMDILDEVICEINFO_H
diff --git a/lib/Target/R600/AMDILDevices.h b/lib/Target/R600/AMDILDevices.h
deleted file mode 100644
index 636fa6d35947..000000000000
--- a/lib/Target/R600/AMDILDevices.h
+++ /dev/null
@@ -1,19 +0,0 @@
-//===-- AMDILDevices.h - Consolidate AMDIL Device headers -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#ifndef AMDIL_DEVICES_H
-#define AMDIL_DEVICES_H
-// Include all of the device specific header files
-#include "AMDIL7XXDevice.h"
-#include "AMDILDevice.h"
-#include "AMDILEvergreenDevice.h"
-#include "AMDILNIDevice.h"
-#include "AMDILSIDevice.h"
-
-#endif // AMDIL_DEVICES_H
diff --git a/lib/Target/R600/AMDILEvergreenDevice.cpp b/lib/Target/R600/AMDILEvergreenDevice.cpp
deleted file mode 100644
index c5213a041005..000000000000
--- a/lib/Target/R600/AMDILEvergreenDevice.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-//===-- AMDILEvergreenDevice.cpp - Device Info for Evergreen --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILEvergreenDevice.h"
-
-using namespace llvm;
-
-AMDGPUEvergreenDevice::AMDGPUEvergreenDevice(AMDGPUSubtarget *ST)
-: AMDGPUDevice(ST) {
- setCaps();
- std::string name = ST->getDeviceName();
- if (name == "cedar") {
- DeviceFlag = OCL_DEVICE_CEDAR;
- } else if (name == "redwood") {
- DeviceFlag = OCL_DEVICE_REDWOOD;
- } else if (name == "cypress") {
- DeviceFlag = OCL_DEVICE_CYPRESS;
- } else {
- DeviceFlag = OCL_DEVICE_JUNIPER;
- }
-}
-
-AMDGPUEvergreenDevice::~AMDGPUEvergreenDevice() {
-}
-
-size_t AMDGPUEvergreenDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_800;
- } else {
- return 0;
- }
-}
-size_t AMDGPUEvergreenDevice::getMaxGDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::RegionMem)) {
- return MAX_LDS_SIZE_800;
- } else {
- return 0;
- }
-}
-uint32_t AMDGPUEvergreenDevice::getMaxNumUAVs() const {
- return 12;
-}
-
-uint32_t AMDGPUEvergreenDevice::getResourceID(uint32_t id) const {
- switch(id) {
- default:
- assert(0 && "ID type passed in is unknown!");
- break;
- case CONSTANT_ID:
- case RAW_UAV_ID:
- return GLOBAL_RETURN_RAW_UAV_ID;
- case GLOBAL_ID:
- case ARENA_UAV_ID:
- return DEFAULT_ARENA_UAV_ID;
- case LDS_ID:
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return DEFAULT_LDS_ID;
- } else {
- return DEFAULT_ARENA_UAV_ID;
- }
- case GDS_ID:
- if (usesHardware(AMDGPUDeviceInfo::RegionMem)) {
- return DEFAULT_GDS_ID;
- } else {
- return DEFAULT_ARENA_UAV_ID;
- }
- case SCRATCH_ID:
- if (usesHardware(AMDGPUDeviceInfo::PrivateMem)) {
- return DEFAULT_SCRATCH_ID;
- } else {
- return DEFAULT_ARENA_UAV_ID;
- }
- };
- return 0;
-}
-
-size_t AMDGPUEvergreenDevice::getWavefrontSize() const {
- return AMDGPUDevice::WavefrontSize;
-}
-
-uint32_t AMDGPUEvergreenDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD5XXX;
-}
-
-void AMDGPUEvergreenDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::ArenaSegment);
- mHWBits.set(AMDGPUDeviceInfo::ArenaUAV);
- mHWBits.set(AMDGPUDeviceInfo::HW64BitDivMod);
- mSWBits.reset(AMDGPUDeviceInfo::HW64BitDivMod);
- mSWBits.set(AMDGPUDeviceInfo::Signed24BitOps);
- if (mSTM->isOverride(AMDGPUDeviceInfo::ByteStores)) {
- mHWBits.set(AMDGPUDeviceInfo::ByteStores);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) {
- mSWBits.set(AMDGPUDeviceInfo::LocalMem);
- mSWBits.set(AMDGPUDeviceInfo::RegionMem);
- } else {
- mHWBits.set(AMDGPUDeviceInfo::LocalMem);
- mHWBits.set(AMDGPUDeviceInfo::RegionMem);
- }
- mHWBits.set(AMDGPUDeviceInfo::Images);
- if (mSTM->isOverride(AMDGPUDeviceInfo::NoAlias)) {
- mHWBits.set(AMDGPUDeviceInfo::NoAlias);
- }
- mHWBits.set(AMDGPUDeviceInfo::CachedMem);
- if (mSTM->isOverride(AMDGPUDeviceInfo::MultiUAV)) {
- mHWBits.set(AMDGPUDeviceInfo::MultiUAV);
- }
- mHWBits.set(AMDGPUDeviceInfo::ByteLDSOps);
- mSWBits.reset(AMDGPUDeviceInfo::ByteLDSOps);
- mHWBits.set(AMDGPUDeviceInfo::ArenaVectors);
- mHWBits.set(AMDGPUDeviceInfo::LongOps);
- mSWBits.reset(AMDGPUDeviceInfo::LongOps);
- mHWBits.set(AMDGPUDeviceInfo::TmrReg);
-}
-
-AMDGPUCypressDevice::AMDGPUCypressDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- setCaps();
-}
-
-AMDGPUCypressDevice::~AMDGPUCypressDevice() {
-}
-
-void AMDGPUCypressDevice::setCaps() {
- if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
- mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
- mHWBits.set(AMDGPUDeviceInfo::FMA);
- }
-}
-
-
-AMDGPUCedarDevice::AMDGPUCedarDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- setCaps();
-}
-
-AMDGPUCedarDevice::~AMDGPUCedarDevice() {
-}
-
-void AMDGPUCedarDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::FMA);
-}
-
-size_t AMDGPUCedarDevice::getWavefrontSize() const {
- return AMDGPUDevice::QuarterWavefrontSize;
-}
-
-AMDGPURedwoodDevice::AMDGPURedwoodDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- setCaps();
-}
-
-AMDGPURedwoodDevice::~AMDGPURedwoodDevice() {
-}
-
-void AMDGPURedwoodDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::FMA);
-}
-
-size_t AMDGPURedwoodDevice::getWavefrontSize() const {
- return AMDGPUDevice::HalfWavefrontSize;
-}
diff --git a/lib/Target/R600/AMDILEvergreenDevice.h b/lib/Target/R600/AMDILEvergreenDevice.h
deleted file mode 100644
index ea90f774a856..000000000000
--- a/lib/Target/R600/AMDILEvergreenDevice.h
+++ /dev/null
@@ -1,93 +0,0 @@
-//==- AMDILEvergreenDevice.h - Define Evergreen Device for AMDIL -*- C++ -*--=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===----------------------------------------------------------------------===//
-#ifndef AMDILEVERGREENDEVICE_H
-#define AMDILEVERGREENDEVICE_H
-#include "AMDGPUSubtarget.h"
-#include "AMDILDevice.h"
-
-namespace llvm {
- class AMDGPUSubtarget;
-//===----------------------------------------------------------------------===//
-// Evergreen generation of devices and their respective sub classes
-//===----------------------------------------------------------------------===//
-
-
-/// \brief The AMDGPUEvergreenDevice is the base device class for all of the Evergreen
-/// series of cards.
-///
-/// This class contains information required to differentiate
-/// the Evergreen device from the generic AMDGPUDevice. This device represents
-/// that capabilities of the 'Juniper' cards, also known as the HD57XX.
-class AMDGPUEvergreenDevice : public AMDGPUDevice {
-public:
- AMDGPUEvergreenDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUEvergreenDevice();
- virtual size_t getMaxLDSSize() const;
- virtual size_t getMaxGDSSize() const;
- virtual size_t getWavefrontSize() const;
- virtual uint32_t getGeneration() const;
- virtual uint32_t getMaxNumUAVs() const;
- virtual uint32_t getResourceID(uint32_t) const;
-protected:
- virtual void setCaps();
-};
-
-/// The AMDGPUCypressDevice is similiar to the AMDGPUEvergreenDevice, except it has
-/// support for double precision operations. This device is used to represent
-/// both the Cypress and Hemlock cards, which are commercially known as HD58XX
-/// and HD59XX cards.
-class AMDGPUCypressDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUCypressDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUCypressDevice();
-private:
- virtual void setCaps();
-};
-
-
-/// \brief The AMDGPUCedarDevice is the class that represents all of the 'Cedar' based
-/// devices.
-///
-/// This class differs from the base AMDGPUEvergreenDevice in that the
-/// device is a ~quarter of the 'Juniper'. These are commercially known as the
-/// HD54XX and HD53XX series of cards.
-class AMDGPUCedarDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUCedarDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUCedarDevice();
- virtual size_t getWavefrontSize() const;
-private:
- virtual void setCaps();
-};
-
-/// \brief The AMDGPURedwoodDevice is the class the represents all of the 'Redwood' based
-/// devices.
-///
-/// This class differs from the base class, in that these devices are
-/// considered about half of a 'Juniper' device. These are commercially known as
-/// the HD55XX and HD56XX series of cards.
-class AMDGPURedwoodDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPURedwoodDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPURedwoodDevice();
- virtual size_t getWavefrontSize() const;
-private:
- virtual void setCaps();
-};
-
-} // namespace llvm
-#endif // AMDILEVERGREENDEVICE_H
diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
deleted file mode 100644
index ba75a44ea1f1..000000000000
--- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
+++ /dev/null
@@ -1,666 +0,0 @@
-//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Defines an instruction selector for the AMDGPU target.
-//
-//===----------------------------------------------------------------------===//
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUISelLowering.h" // For AMDGPUISD
-#include "AMDGPURegisterInfo.h"
-#include "AMDILDevices.h"
-#include "R600InstrInfo.h"
-#include "SIISelLowering.h"
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include <list>
-#include <queue>
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Instruction Selector Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-/// AMDGPU specific code to select AMDGPU machine instructions for
-/// SelectionDAG operations.
-class AMDGPUDAGToDAGISel : public SelectionDAGISel {
- // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
- // make the right decision when generating code for different targets.
- const AMDGPUSubtarget &Subtarget;
-public:
- AMDGPUDAGToDAGISel(TargetMachine &TM);
- virtual ~AMDGPUDAGToDAGISel();
-
- SDNode *Select(SDNode *N);
- virtual const char *getPassName() const;
- virtual void PostprocessISelDAG();
-
-private:
- inline SDValue getSmallIPtrImm(unsigned Imm);
- bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
-
- // Complex pattern selectors
- bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
- bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
- bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
-
- static bool checkType(const Value *ptr, unsigned int addrspace);
- static const Value *getBasePointerValue(const Value *V);
-
- static bool isGlobalStore(const StoreSDNode *N);
- static bool isPrivateStore(const StoreSDNode *N);
- static bool isLocalStore(const StoreSDNode *N);
- static bool isRegionStore(const StoreSDNode *N);
-
- static bool isCPLoad(const LoadSDNode *N);
- static bool isConstantLoad(const LoadSDNode *N, int cbID);
- static bool isGlobalLoad(const LoadSDNode *N);
- static bool isParamLoad(const LoadSDNode *N);
- static bool isPrivateLoad(const LoadSDNode *N);
- static bool isLocalLoad(const LoadSDNode *N);
- static bool isRegionLoad(const LoadSDNode *N);
-
- bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
- bool SelectGlobalValueVariableOffset(SDValue Addr,
- SDValue &BaseReg, SDValue& Offset);
- bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
- bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
-
- // Include the pieces autogenerated from the target description.
-#include "AMDGPUGenDAGISel.inc"
-};
-} // end anonymous namespace
-
-/// \brief This pass converts a legalized DAG into a AMDGPU-specific
-// DAG, ready for instruction scheduling.
-FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
- ) {
- return new AMDGPUDAGToDAGISel(TM);
-}
-
-AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM
- )
- : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
-}
-
-AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
-}
-
-SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRParam(
- SDValue Addr, SDValue& R1, SDValue& R2) {
-
- if (Addr.getOpcode() == ISD::FrameIndex) {
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
- }
- } else if (Addr.getOpcode() == ISD::ADD) {
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
- }
- return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
- if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress) {
- return false;
- }
- return SelectADDRParam(Addr, R1, R2);
-}
-
-
-bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
- if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress) {
- return false;
- }
-
- if (Addr.getOpcode() == ISD::FrameIndex) {
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
- }
- } else if (Addr.getOpcode() == ISD::ADD) {
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
- }
- return true;
-}
-
-SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
- unsigned int Opc = N->getOpcode();
- if (N->isMachineOpcode()) {
- return NULL; // Already selected.
- }
- switch (Opc) {
- default: break;
- case ISD::BUILD_VECTOR: {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
- break;
- }
- // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
- // that adds a 128 bits reg copy when going through TwoAddressInstructions
- // pass. We want to avoid 128 bits copies as much as possible because they
- // can't be bundled by our scheduler.
- SDValue RegSeqArgs[9] = {
- CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32)
- };
- bool IsRegSeq = true;
- for (unsigned i = 0; i < N->getNumOperands(); i++) {
- if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
- IsRegSeq = false;
- break;
- }
- RegSeqArgs[2 * i + 1] = N->getOperand(i);
- }
- if (!IsRegSeq)
- break;
- return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
- RegSeqArgs, 2 * N->getNumOperands() + 1);
- }
- case ISD::BUILD_PAIR: {
- SDValue RC, SubReg0, SubReg1;
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- break;
- }
- if (N->getValueType(0) == MVT::i128) {
- RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
- SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
- SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
- } else if (N->getValueType(0) == MVT::i64) {
- RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
- SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
- SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
- } else {
- llvm_unreachable("Unhandled value type for BUILD_PAIR");
- }
- const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
- N->getOperand(1), SubReg1 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
- N->getDebugLoc(), N->getValueType(0), Ops);
- }
-
- case ISD::ConstantFP:
- case ISD::Constant: {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- // XXX: Custom immediate lowering not implemented yet. Instead we use
- // pseudo instructions defined in SIInstructions.td
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
- break;
- }
- const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
-
- uint64_t ImmValue = 0;
- unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
-
- if (N->getOpcode() == ISD::ConstantFP) {
- // XXX: 64-bit Immediates not supported yet
- assert(N->getValueType(0) != MVT::f64);
-
- ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
- APFloat Value = C->getValueAPF();
- float FloatValue = Value.convertToFloat();
- if (FloatValue == 0.0) {
- ImmReg = AMDGPU::ZERO;
- } else if (FloatValue == 0.5) {
- ImmReg = AMDGPU::HALF;
- } else if (FloatValue == 1.0) {
- ImmReg = AMDGPU::ONE;
- } else {
- ImmValue = Value.bitcastToAPInt().getZExtValue();
- }
- } else {
- // XXX: 64-bit Immediates not supported yet
- assert(N->getValueType(0) != MVT::i64);
-
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
- if (C->getZExtValue() == 0) {
- ImmReg = AMDGPU::ZERO;
- } else if (C->getZExtValue() == 1) {
- ImmReg = AMDGPU::ONE_INT;
- } else {
- ImmValue = C->getZExtValue();
- }
- }
-
- for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
- Use != SDNode::use_end(); Use = Next) {
- Next = llvm::next(Use);
- std::vector<SDValue> Ops;
- for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
- Ops.push_back(Use->getOperand(i));
- }
-
- if (!Use->isMachineOpcode()) {
- if (ImmReg == AMDGPU::ALU_LITERAL_X) {
- // We can only use literal constants (e.g. AMDGPU::ZERO,
- // AMDGPU::ONE, etc) in machine opcodes.
- continue;
- }
- } else {
- if (!TII->isALUInstr(Use->getMachineOpcode()) ||
- (TII->get(Use->getMachineOpcode()).TSFlags &
- R600_InstFlag::VECTOR)) {
- continue;
- }
-
- int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM);
- assert(ImmIdx != -1);
-
- // subtract one from ImmIdx, because the DST operand is usually index
- // 0 for MachineInstrs, but we have no DST in the Ops vector.
- ImmIdx--;
-
- // Check that we aren't already using an immediate.
- // XXX: It's possible for an instruction to have more than one
- // immediate operand, but this is not supported yet.
- if (ImmReg == AMDGPU::ALU_LITERAL_X) {
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
- assert(C);
-
- if (C->getZExtValue() != 0) {
- // This instruction is already using an immediate.
- continue;
- }
-
- // Set the immediate value
- Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
- }
- }
- // Set the immediate register
- Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
-
- CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
- }
- break;
- }
- }
- SDNode *Result = SelectCode(N);
-
- // Fold operands of selected node
-
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- const R600InstrInfo *TII =
- static_cast<const R600InstrInfo*>(TM.getInstrInfo());
- if (Result && Result->isMachineOpcode() &&
- !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
- && TII->isALUInstr(Result->getMachineOpcode())) {
- // Fold FNEG/FABS/CONST_ADDRESS
- // TODO: Isel can generate multiple MachineInst, we need to recursively
- // parse Result
- bool IsModified = false;
- do {
- std::vector<SDValue> Ops;
- for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
- I != E; ++I)
- Ops.push_back(*I);
- IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
- if (IsModified) {
- Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
- }
- } while (IsModified);
-
- // If node has a single use which is CLAMP_R600, folds it
- if (Result->hasOneUse() && Result->isMachineOpcode()) {
- SDNode *PotentialClamp = *Result->use_begin();
- if (PotentialClamp->isMachineOpcode() &&
- PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
- unsigned ClampIdx =
- TII->getOperandIdx(Result->getMachineOpcode(), R600Operands::CLAMP);
- std::vector<SDValue> Ops;
- unsigned NumOp = Result->getNumOperands();
- for (unsigned i = 0; i < NumOp; ++i) {
- Ops.push_back(Result->getOperand(i));
- }
- Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
- Result = CurDAG->SelectNodeTo(PotentialClamp,
- Result->getMachineOpcode(), PotentialClamp->getVTList(),
- Ops.data(), NumOp);
- }
- }
- }
- }
-
- return Result;
-}
-
-bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
- const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
- int OperandIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0),
- TII->getOperandIdx(Opcode, R600Operands::SRC1),
- TII->getOperandIdx(Opcode, R600Operands::SRC2)
- };
- int SelIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL),
- TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL),
- TII->getOperandIdx(Opcode, R600Operands::SRC2_SEL)
- };
- int NegIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG),
- TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG),
- TII->getOperandIdx(Opcode, R600Operands::SRC2_NEG)
- };
- int AbsIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS),
- TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS),
- -1
- };
-
- for (unsigned i = 0; i < 3; i++) {
- if (OperandIdx[i] < 0)
- return false;
- SDValue Operand = Ops[OperandIdx[i] - 1];
- switch (Operand.getOpcode()) {
- case AMDGPUISD::CONST_ADDRESS: {
- SDValue CstOffset;
- if (Operand.getValueType().isVector() ||
- !SelectGlobalValueConstantOffset(Operand.getOperand(0), CstOffset))
- break;
-
- // Gather others constants values
- std::vector<unsigned> Consts;
- for (unsigned j = 0; j < 3; j++) {
- int SrcIdx = OperandIdx[j];
- if (SrcIdx < 0)
- break;
- if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) {
- if (Reg->getReg() == AMDGPU::ALU_CONST) {
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]);
- Consts.push_back(Cst->getZExtValue());
- }
- }
- }
-
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
- Consts.push_back(Cst->getZExtValue());
- if (!TII->fitsConstReadLimitations(Consts))
- break;
-
- Ops[OperandIdx[i] - 1] = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32);
- Ops[SelIdx[i] - 1] = CstOffset;
- return true;
- }
- case ISD::FNEG:
- if (NegIdx[i] < 0)
- break;
- Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
- Ops[NegIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32);
- return true;
- case ISD::FABS:
- if (AbsIdx[i] < 0)
- break;
- Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
- Ops[AbsIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32);
- return true;
- case ISD::BITCAST:
- Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
- return true;
- default:
- break;
- }
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
- if (!ptr) {
- return false;
- }
- Type *ptrType = ptr->getType();
- return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
-}
-
-const Value * AMDGPUDAGToDAGISel::getBasePointerValue(const Value *V) {
- if (!V) {
- return NULL;
- }
- const Value *ret = NULL;
- ValueMap<const Value *, bool> ValueBitMap;
- std::queue<const Value *, std::list<const Value *> > ValueQueue;
- ValueQueue.push(V);
- while (!ValueQueue.empty()) {
- V = ValueQueue.front();
- if (ValueBitMap.find(V) == ValueBitMap.end()) {
- ValueBitMap[V] = true;
- if (dyn_cast<Argument>(V) && dyn_cast<PointerType>(V->getType())) {
- ret = V;
- break;
- } else if (dyn_cast<GlobalVariable>(V)) {
- ret = V;
- break;
- } else if (dyn_cast<Constant>(V)) {
- const ConstantExpr *CE = dyn_cast<ConstantExpr>(V);
- if (CE) {
- ValueQueue.push(CE->getOperand(0));
- }
- } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- ret = AI;
- break;
- } else if (const Instruction *I = dyn_cast<Instruction>(V)) {
- uint32_t numOps = I->getNumOperands();
- for (uint32_t x = 0; x < numOps; ++x) {
- ValueQueue.push(I->getOperand(x));
- }
- } else {
- assert(!"Found a Value that we didn't know how to handle!");
- }
- }
- ValueQueue.pop();
- }
- return ret;
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
- return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
-}
-
-bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int cbID) {
- if (checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)) {
- return true;
- }
- MachineMemOperand *MMO = N->getMemOperand();
- const Value *V = MMO->getValue();
- const Value *BV = getBasePointerValue(V);
- if (MMO
- && MMO->getValue()
- && ((V && dyn_cast<GlobalValue>(V))
- || (BV && dyn_cast<GlobalValue>(
- getBasePointerValue(MMO->getValue()))))) {
- return checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS);
- } else {
- return false;
- }
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) {
- MachineMemOperand *MMO = N->getMemOperand();
- if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
- if (MMO) {
- const Value *V = MMO->getValue();
- const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
- if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
- return true;
- }
- }
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) {
- if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
- // Check to make sure we are not a constant pool load or a constant load
- // that is marked as a private load
- if (isCPLoad(N) || isConstantLoad(N, -1)) {
- return false;
- }
- }
- if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
- return true;
- }
- return false;
-}
-
-const char *AMDGPUDAGToDAGISel::getPassName() const {
- return "AMDGPU DAG->DAG Pattern Instruction Selection";
-}
-
-#ifdef DEBUGTMP
-#undef INT64_C
-#endif
-#undef DEBUGTMP
-
-///==== AMDGPU Functions ====///
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
- SDValue& IntPtr) {
- if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
- IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
- return true;
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
- SDValue& BaseReg, SDValue &Offset) {
- if (!dyn_cast<ConstantSDNode>(Addr)) {
- BaseReg = Addr;
- Offset = CurDAG->getIntPtrConstant(0, true);
- return true;
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
- SDValue &Offset) {
- ConstantSDNode * IMMOffset;
-
- if (Addr.getOpcode() == ISD::ADD
- && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
- && isInt<16>(IMMOffset->getZExtValue())) {
-
- Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
- return true;
- // If the pointer address is constant, we can move it to the offset field.
- } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
- && isInt<16>(IMMOffset->getZExtValue())) {
- Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
- CurDAG->getEntryNode().getDebugLoc(),
- AMDGPU::ZERO, MVT::i32);
- Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
- return true;
- }
-
- // Default case, no offset
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
- SDValue &Offset) {
- ConstantSDNode *C;
-
- if ((C = dyn_cast<ConstantSDNode>(Addr))) {
- Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
- Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
- } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
- (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
- Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
- } else {
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
- }
-
- return true;
-}
-
-void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
-
- // Go over all selected nodes and try to fold them a bit more
- const AMDGPUTargetLowering& Lowering = ((const AMDGPUTargetLowering&)TLI);
- for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
- E = CurDAG->allnodes_end(); I != E; ++I) {
-
- MachineSDNode *Node = dyn_cast<MachineSDNode>(I);
- if (!Node)
- continue;
-
- SDNode *ResNode = Lowering.PostISelFolding(Node, *CurDAG);
- if (ResNode != Node)
- ReplaceUses(Node, ResNode);
- }
-}
-
diff --git a/lib/Target/R600/AMDILISelLowering.cpp b/lib/Target/R600/AMDILISelLowering.cpp
index 922cac12b98e..970787ef31e0 100644
--- a/lib/Target/R600/AMDILISelLowering.cpp
+++ b/lib/Target/R600/AMDILISelLowering.cpp
@@ -15,7 +15,6 @@
#include "AMDGPUISelLowering.h"
#include "AMDGPURegisterInfo.h"
#include "AMDGPUSubtarget.h"
-#include "AMDILDevices.h"
#include "AMDILIntrinsicInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -40,7 +39,7 @@ using namespace llvm;
// TargetLowering Class Implementation Begins
//===----------------------------------------------------------------------===//
void AMDGPUTargetLowering::InitAMDILLowering() {
- int types[] = {
+ static const int types[] = {
(int)MVT::i8,
(int)MVT::i16,
(int)MVT::i32,
@@ -59,19 +58,19 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
(int)MVT::v2i64
};
- int IntTypes[] = {
+ static const int IntTypes[] = {
(int)MVT::i8,
(int)MVT::i16,
(int)MVT::i32,
(int)MVT::i64
};
- int FloatTypes[] = {
+ static const int FloatTypes[] = {
(int)MVT::f32,
(int)MVT::f64
};
- int VectorTypes[] = {
+ static const int VectorTypes[] = {
(int)MVT::v2i8,
(int)MVT::v4i8,
(int)MVT::v2i16,
@@ -83,10 +82,10 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
(int)MVT::v2f64,
(int)MVT::v2i64
};
- size_t NumTypes = sizeof(types) / sizeof(*types);
- size_t NumFloatTypes = sizeof(FloatTypes) / sizeof(*FloatTypes);
- size_t NumIntTypes = sizeof(IntTypes) / sizeof(*IntTypes);
- size_t NumVectorTypes = sizeof(VectorTypes) / sizeof(*VectorTypes);
+ const size_t NumTypes = array_lengthof(types);
+ const size_t NumFloatTypes = array_lengthof(FloatTypes);
+ const size_t NumIntTypes = array_lengthof(IntTypes);
+ const size_t NumVectorTypes = array_lengthof(VectorTypes);
const AMDGPUSubtarget &STM = getTargetMachine().getSubtarget<AMDGPUSubtarget>();
// These are the current register classes that are
@@ -138,8 +137,6 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
- // GPU doesn't have a rotl, rotr, or byteswap instruction
- setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
// GPU doesn't have any counting operators
@@ -158,21 +155,19 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
setOperationAction(ISD::SELECT_CC, VT, Expand);
}
- if (STM.device()->isSupported(AMDGPUDeviceInfo::LongOps)) {
- setOperationAction(ISD::MULHU, MVT::i64, Expand);
- setOperationAction(ISD::MULHU, MVT::v2i64, Expand);
- setOperationAction(ISD::MULHS, MVT::i64, Expand);
- setOperationAction(ISD::MULHS, MVT::v2i64, Expand);
- setOperationAction(ISD::ADD, MVT::v2i64, Expand);
- setOperationAction(ISD::SREM, MVT::v2i64, Expand);
- setOperationAction(ISD::Constant , MVT::i64 , Legal);
- setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
- setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand);
- setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand);
- setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand);
- }
- if (STM.device()->isSupported(AMDGPUDeviceInfo::DoubleOps)) {
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHU, MVT::v2i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::v2i64, Expand);
+ setOperationAction(ISD::ADD, MVT::v2i64, Expand);
+ setOperationAction(ISD::SREM, MVT::v2i64, Expand);
+ setOperationAction(ISD::Constant , MVT::i64 , Legal);
+ setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
+ setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand);
+ setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand);
+ if (STM.hasHWFP64()) {
// we support loading/storing v2f64 but not operations on the type
setOperationAction(ISD::FADD, MVT::v2f64, Expand);
setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
@@ -331,7 +326,7 @@ SDValue
AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const {
SDValue Data = Op.getOperand(0);
VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1));
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT DVT = Data.getValueType();
EVT BVT = BaseType->getVT();
unsigned baseBits = BVT.getScalarType().getSizeInBits();
@@ -387,7 +382,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Result;
Result = DAG.getNode(
AMDGPUISD::BRANCH_COND,
- Op.getDebugLoc(),
+ SDLoc(Op),
Op.getValueType(),
Chain, Jump, Cond);
return Result;
@@ -395,7 +390,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
@@ -476,7 +471,7 @@ AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
@@ -547,7 +542,7 @@ AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
MVT INTTY = MVT::i32;
if (OVT == MVT::v2i8) {
@@ -564,7 +559,7 @@ AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
MVT INTTY = MVT::i32;
if (OVT == MVT::v2i16) {
@@ -581,7 +576,7 @@ AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
diff --git a/lib/Target/R600/AMDILInstrInfo.td b/lib/Target/R600/AMDILInstrInfo.td
index 110f1476513b..0f0c88db935a 100644
--- a/lib/Target/R600/AMDILInstrInfo.td
+++ b/lib/Target/R600/AMDILInstrInfo.td
@@ -10,63 +10,6 @@
// This file describes the AMDIL instructions in TableGen format.
//
//===----------------------------------------------------------------------===//
-// AMDIL Instruction Predicate Definitions
-// Predicate that is set to true if the hardware supports double precision
-// divide
-def HasHWDDiv : Predicate<"Subtarget.device()"
- "->getGeneration() > AMDGPUDeviceInfo::HD4XXX && "
- "Subtarget.device()->usesHardware(AMDGPUDeviceInfo::DoubleOps)">;
-
-// Predicate that is set to true if the hardware supports double, but not double
-// precision divide in hardware
-def HasSWDDiv : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&"
- "Subtarget.device()->usesHardware(AMDGPUDeviceInfo::DoubleOps)">;
-
-// Predicate that is set to true if the hardware support 24bit signed
-// math ops. Otherwise a software expansion to 32bit math ops is used instead.
-def HasHWSign24Bit : Predicate<"Subtarget.device()"
- "->getGeneration() > AMDGPUDeviceInfo::HD5XXX">;
-
-// Predicate that is set to true if 64bit operations are supported or not
-def HasHW64Bit : Predicate<"Subtarget.device()"
- "->usesHardware(AMDGPUDeviceInfo::LongOps)">;
-def HasSW64Bit : Predicate<"Subtarget.device()"
- "->usesSoftware(AMDGPUDeviceInfo::LongOps)">;
-
-// Predicate that is set to true if the timer register is supported
-def HasTmrRegister : Predicate<"Subtarget.device()"
- "->isSupported(AMDGPUDeviceInfo::TmrReg)">;
-// Predicate that is true if we are at least evergreen series
-def HasDeviceIDInst : Predicate<"Subtarget.device()"
- "->getGeneration() >= AMDGPUDeviceInfo::HD5XXX">;
-
-// Predicate that is true if we have region address space.
-def hasRegionAS : Predicate<"Subtarget.device()"
- "->usesHardware(AMDGPUDeviceInfo::RegionMem)">;
-
-// Predicate that is false if we don't have region address space.
-def noRegionAS : Predicate<"!Subtarget.device()"
- "->isSupported(AMDGPUDeviceInfo::RegionMem)">;
-
-
-// Predicate that is set to true if 64bit Mul is supported in the IL or not
-def HasHW64Mul : Predicate<"Subtarget.calVersion()"
- ">= CAL_VERSION_SC_139"
- "&& Subtarget.device()"
- "->getGeneration() >="
- "AMDGPUDeviceInfo::HD5XXX">;
-def HasSW64Mul : Predicate<"Subtarget.calVersion()"
- "< CAL_VERSION_SC_139">;
-// Predicate that is set to true if 64bit Div/Mod is supported in the IL or not
-def HasHW64DivMod : Predicate<"Subtarget.device()"
- "->usesHardware(AMDGPUDeviceInfo::HW64BitDivMod)">;
-def HasSW64DivMod : Predicate<"Subtarget.device()"
- "->usesSoftware(AMDGPUDeviceInfo::HW64BitDivMod)">;
-
-// Predicate that is set to true if 64bit pointer are used.
-def Has64BitPtr : Predicate<"Subtarget.is64bit()">;
-def Has32BitPtr : Predicate<"!Subtarget.is64bit()">;
//===--------------------------------------------------------------------===//
// Custom Operands
//===--------------------------------------------------------------------===//
@@ -175,15 +118,15 @@ class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
// Multiclass Instruction formats
//===--------------------------------------------------------------------===//
// Multiclass that handles branch instructions
-multiclass BranchConditional<SDNode Op> {
+multiclass BranchConditional<SDNode Op, RegisterClass rci, RegisterClass rcf> {
def _i32 : ILFormat<(outs),
- (ins brtarget:$target, GPRI32:$src0),
+ (ins brtarget:$target, rci:$src0),
"; i32 Pseudo branch instruction",
- [(Op bb:$target, GPRI32:$src0)]>;
+ [(Op bb:$target, (i32 rci:$src0))]>;
def _f32 : ILFormat<(outs),
- (ins brtarget:$target, GPRF32:$src0),
+ (ins brtarget:$target, rcf:$src0),
"; f32 Pseudo branch instruction",
- [(Op bb:$target, GPRF32:$src0)]>;
+ [(Op bb:$target, (f32 rcf:$src0))]>;
}
// Only scalar types should generate flow control
diff --git a/lib/Target/R600/AMDILIntrinsicInfo.cpp b/lib/Target/R600/AMDILIntrinsicInfo.cpp
index 4ddb057d80a7..762ee39e4693 100644
--- a/lib/Target/R600/AMDILIntrinsicInfo.cpp
+++ b/lib/Target/R600/AMDILIntrinsicInfo.cpp
@@ -14,7 +14,6 @@
#include "AMDILIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
-#include "AMDIL.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
@@ -50,6 +49,9 @@ AMDGPUIntrinsicInfo::getName(unsigned int IntrID, Type **Tys,
unsigned int
AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const {
+ if (!StringRef(Name, Len).startswith("llvm."))
+ return 0; // All intrinsics start with 'llvm.'
+
#define GET_FUNCTION_RECOGNIZER
#include "AMDGPUGenIntrinsics.inc"
#undef GET_FUNCTION_RECOGNIZER
diff --git a/lib/Target/R600/AMDILNIDevice.cpp b/lib/Target/R600/AMDILNIDevice.cpp
deleted file mode 100644
index 47c3f7f209d6..000000000000
--- a/lib/Target/R600/AMDILNIDevice.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-//===-- AMDILNIDevice.cpp - Device Info for Northern Islands devices ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILNIDevice.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDILEvergreenDevice.h"
-
-using namespace llvm;
-
-AMDGPUNIDevice::AMDGPUNIDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- std::string name = ST->getDeviceName();
- if (name == "caicos") {
- DeviceFlag = OCL_DEVICE_CAICOS;
- } else if (name == "turks") {
- DeviceFlag = OCL_DEVICE_TURKS;
- } else if (name == "cayman") {
- DeviceFlag = OCL_DEVICE_CAYMAN;
- } else {
- DeviceFlag = OCL_DEVICE_BARTS;
- }
-}
-AMDGPUNIDevice::~AMDGPUNIDevice() {
-}
-
-size_t
-AMDGPUNIDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_900;
- } else {
- return 0;
- }
-}
-
-uint32_t
-AMDGPUNIDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD6XXX;
-}
-
-
-AMDGPUCaymanDevice::AMDGPUCaymanDevice(AMDGPUSubtarget *ST)
- : AMDGPUNIDevice(ST) {
- setCaps();
-}
-
-AMDGPUCaymanDevice::~AMDGPUCaymanDevice() {
-}
-
-void
-AMDGPUCaymanDevice::setCaps() {
- if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
- mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
- mHWBits.set(AMDGPUDeviceInfo::FMA);
- }
- mHWBits.set(AMDGPUDeviceInfo::Signed24BitOps);
- mSWBits.reset(AMDGPUDeviceInfo::Signed24BitOps);
- mSWBits.set(AMDGPUDeviceInfo::ArenaSegment);
-}
-
diff --git a/lib/Target/R600/AMDILNIDevice.h b/lib/Target/R600/AMDILNIDevice.h
deleted file mode 100644
index 24a640845eab..000000000000
--- a/lib/Target/R600/AMDILNIDevice.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===------- AMDILNIDevice.h - Define NI Device for AMDIL -*- C++ -*------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===---------------------------------------------------------------------===//
-#ifndef AMDILNIDEVICE_H
-#define AMDILNIDEVICE_H
-#include "AMDGPUSubtarget.h"
-#include "AMDILEvergreenDevice.h"
-
-namespace llvm {
-
-class AMDGPUSubtarget;
-//===---------------------------------------------------------------------===//
-// NI generation of devices and their respective sub classes
-//===---------------------------------------------------------------------===//
-
-/// \brief The AMDGPUNIDevice is the base class for all Northern Island series of
-/// cards.
-///
-/// It is very similiar to the AMDGPUEvergreenDevice, with the major
-/// exception being differences in wavefront size and hardware capabilities. The
-/// NI devices are all 64 wide wavefronts and also add support for signed 24 bit
-/// integer operations
-class AMDGPUNIDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUNIDevice(AMDGPUSubtarget*);
- virtual ~AMDGPUNIDevice();
- virtual size_t getMaxLDSSize() const;
- virtual uint32_t getGeneration() const;
-};
-
-/// Just as the AMDGPUCypressDevice is the double capable version of the
-/// AMDGPUEvergreenDevice, the AMDGPUCaymanDevice is the double capable version
-/// of the AMDGPUNIDevice. The other major difference is that the Cayman Device
-/// has 4 wide ALU's, whereas the rest of the NI family is a 5 wide.
-class AMDGPUCaymanDevice: public AMDGPUNIDevice {
-public:
- AMDGPUCaymanDevice(AMDGPUSubtarget*);
- virtual ~AMDGPUCaymanDevice();
-private:
- virtual void setCaps();
-};
-
-static const unsigned int MAX_LDS_SIZE_900 = AMDGPUDevice::MAX_LDS_SIZE_800;
-} // namespace llvm
-#endif // AMDILNIDEVICE_H
diff --git a/lib/Target/R600/AMDILSIDevice.cpp b/lib/Target/R600/AMDILSIDevice.cpp
deleted file mode 100644
index 0d1de3d11eb4..000000000000
--- a/lib/Target/R600/AMDILSIDevice.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===-- AMDILSIDevice.cpp - Device Info for Southern Islands GPUs ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILSIDevice.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDILEvergreenDevice.h"
-#include "AMDILNIDevice.h"
-
-using namespace llvm;
-
-AMDGPUSIDevice::AMDGPUSIDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
-}
-AMDGPUSIDevice::~AMDGPUSIDevice() {
-}
-
-size_t
-AMDGPUSIDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_900;
- } else {
- return 0;
- }
-}
-
-uint32_t
-AMDGPUSIDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD7XXX;
-}
-
-std::string
-AMDGPUSIDevice::getDataLayout() const {
- return std::string(
- "e"
- "-p:64:64:64"
- "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64"
- "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128"
- "-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024"
- "-v2048:2048:2048"
- "-n32:64"
- );
-}
diff --git a/lib/Target/R600/AMDILSIDevice.h b/lib/Target/R600/AMDILSIDevice.h
deleted file mode 100644
index 5b2cb2502211..000000000000
--- a/lib/Target/R600/AMDILSIDevice.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===------- AMDILSIDevice.h - Define SI Device for AMDIL -*- C++ -*------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===---------------------------------------------------------------------===//
-#ifndef AMDILSIDEVICE_H
-#define AMDILSIDEVICE_H
-#include "AMDILEvergreenDevice.h"
-
-namespace llvm {
-class AMDGPUSubtarget;
-//===---------------------------------------------------------------------===//
-// SI generation of devices and their respective sub classes
-//===---------------------------------------------------------------------===//
-
-/// \brief The AMDGPUSIDevice is the base class for all Southern Island series
-/// of cards.
-class AMDGPUSIDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUSIDevice(AMDGPUSubtarget*);
- virtual ~AMDGPUSIDevice();
- virtual size_t getMaxLDSSize() const;
- virtual uint32_t getGeneration() const;
- virtual std::string getDataLayout() const;
-};
-
-} // namespace llvm
-#endif // AMDILSIDEVICE_H
diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt
index 97f0a40c2981..9f8f6a83e459 100644
--- a/lib/Target/R600/CMakeLists.txt
+++ b/lib/Target/R600/CMakeLists.txt
@@ -12,28 +12,22 @@ tablegen(LLVM AMDGPUGenAsmWriter.inc -gen-asm-writer)
add_public_tablegen_target(AMDGPUCommonTableGen)
add_llvm_target(R600CodeGen
- AMDIL7XXDevice.cpp
AMDILCFGStructurizer.cpp
- AMDILDevice.cpp
- AMDILDeviceInfo.cpp
- AMDILEvergreenDevice.cpp
AMDILIntrinsicInfo.cpp
- AMDILISelDAGToDAG.cpp
AMDILISelLowering.cpp
- AMDILNIDevice.cpp
- AMDILSIDevice.cpp
AMDGPUAsmPrinter.cpp
AMDGPUFrameLowering.cpp
- AMDGPUIndirectAddressing.cpp
+ AMDGPUISelDAGToDAG.cpp
AMDGPUMCInstLower.cpp
AMDGPUMachineFunction.cpp
AMDGPUSubtarget.cpp
- AMDGPUStructurizeCFG.cpp
AMDGPUTargetMachine.cpp
+ AMDGPUTargetTransformInfo.cpp
AMDGPUISelLowering.cpp
AMDGPUConvertToISA.cpp
AMDGPUInstrInfo.cpp
AMDGPURegisterInfo.cpp
+ R600ClauseMergePass.cpp
R600ControlFlowFinalizer.cpp
R600EmitClauseMarkers.cpp
R600ExpandSpecialInstrs.cpp
@@ -41,18 +35,22 @@ add_llvm_target(R600CodeGen
R600ISelLowering.cpp
R600MachineFunctionInfo.cpp
R600MachineScheduler.cpp
+ R600OptimizeVectorRegisters.cpp
R600Packetizer.cpp
R600RegisterInfo.cpp
+ R600TextureIntrinsicsReplacer.cpp
SIAnnotateControlFlow.cpp
+ SIFixSGPRCopies.cpp
SIInsertWaits.cpp
SIInstrInfo.cpp
SIISelLowering.cpp
SILowerControlFlow.cpp
SIMachineFunctionInfo.cpp
SIRegisterInfo.cpp
+ SITypeRewriter.cpp
)
-add_dependencies(LLVMR600CodeGen intrinsics_gen)
+add_dependencies(LLVMR600CodeGen AMDGPUCommonTableGen intrinsics_gen)
add_subdirectory(InstPrinter)
add_subdirectory(TargetInfo)
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
index 303cdf2d3583..99e1377a6585 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
+++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
@@ -10,8 +10,8 @@
#include "AMDGPUInstPrinter.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
using namespace llvm;
@@ -23,6 +23,63 @@ void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
printAnnotation(OS, Annot);
}
+void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O) {
+ switch (reg) {
+ case AMDGPU::VCC:
+ O << "vcc";
+ return;
+ case AMDGPU::SCC:
+ O << "scc";
+ return;
+ case AMDGPU::EXEC:
+ O << "exec";
+ return;
+ case AMDGPU::M0:
+ O << "m0";
+ return;
+ default:
+ break;
+ }
+
+ // It's seems there's no way to use SIRegisterInfo here, and dealing with the
+ // giant enum of all the different shifted sets of registers is pretty
+ // unmanagable, so parse the name and reformat it to be prettier.
+ StringRef Name(getRegisterName(reg));
+
+ std::pair<StringRef, StringRef> Split = Name.split('_');
+ StringRef SubRegName = Split.first;
+ StringRef Rest = Split.second;
+
+ if (SubRegName.size() <= 4) { // Must at least be as long as "SGPR"/"VGPR".
+ O << Name;
+ return;
+ }
+
+ unsigned RegIndex;
+ StringRef RegIndexStr = SubRegName.drop_front(4);
+
+ if (RegIndexStr.getAsInteger(10, RegIndex)) {
+ O << Name;
+ return;
+ }
+
+ if (SubRegName.front() == 'V')
+ O << 'v';
+ else if (SubRegName.front() == 'S')
+ O << 's';
+ else {
+ O << Name;
+ return;
+ }
+
+ if (Rest.empty()) // Only 1 32-bit register
+ O << RegIndex;
+ else {
+ unsigned NumReg = Rest.count('_') + 2;
+ O << '[' << RegIndex << ':' << (RegIndex + NumReg - 1) << ']';
+ }
+}
+
void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
@@ -30,8 +87,12 @@ void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
switch (Op.getReg()) {
// This is the default predicate state, so we don't need to print it.
- case AMDGPU::PRED_SEL_OFF: break;
- default: O << getRegisterName(Op.getReg()); break;
+ case AMDGPU::PRED_SEL_OFF:
+ break;
+
+ default:
+ printRegOperand(Op.getReg(), O);
+ break;
}
} else if (Op.isImm()) {
O << Op.getImm();
@@ -102,7 +163,7 @@ void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- printIfSet(MI, OpNo, O.indent(20 - O.GetNumBytesInBuffer()), "*", " ");
+ printIfSet(MI, OpNo, O.indent(25 - O.GetNumBytesInBuffer()), "*", " ");
}
void AMDGPUInstPrinter::printNeg(const MCInst *MI, unsigned OpNo,
@@ -178,13 +239,13 @@ void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo,
int BankSwizzle = MI->getOperand(OpNo).getImm();
switch (BankSwizzle) {
case 1:
- O << "BS:VEC_021";
+ O << "BS:VEC_021/SCL_122";
break;
case 2:
- O << "BS:VEC_120";
+ O << "BS:VEC_120/SCL_212";
break;
case 3:
- O << "BS:VEC_102";
+ O << "BS:VEC_102/SCL_221";
break;
case 4:
O << "BS:VEC_201";
@@ -198,6 +259,51 @@ void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo,
return;
}
+void AMDGPUInstPrinter::printRSel(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Sel = MI->getOperand(OpNo).getImm();
+ switch (Sel) {
+ case 0:
+ O << "X";
+ break;
+ case 1:
+ O << "Y";
+ break;
+ case 2:
+ O << "Z";
+ break;
+ case 3:
+ O << "W";
+ break;
+ case 4:
+ O << "0";
+ break;
+ case 5:
+ O << "1";
+ break;
+ case 7:
+ O << "_";
+ break;
+ default:
+ break;
+ }
+}
+
+void AMDGPUInstPrinter::printCT(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned CT = MI->getOperand(OpNo).getImm();
+ switch (CT) {
+ case 0:
+ O << "U";
+ break;
+ case 1:
+ O << "N";
+ break;
+ default:
+ break;
+ }
+}
+
void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
int KCacheMode = MI->getOperand(OpNo).getImm();
@@ -210,4 +316,21 @@ void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo,
}
}
+void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ // Note: Mask values are taken from SIInsertWaits.cpp and not from ISA docs
+ // SIInsertWaits.cpp bits usage does not match ISA docs description but it
+ // works so it might be a misprint in docs.
+ unsigned SImm16 = MI->getOperand(OpNo).getImm();
+ unsigned Vmcnt = SImm16 & 0xF;
+ unsigned Expcnt = (SImm16 >> 4) & 0xF;
+ unsigned Lgkmcnt = (SImm16 >> 8) & 0xF;
+ if (Vmcnt != 0xF)
+ O << "vmcnt(" << Vmcnt << ") ";
+ if (Expcnt != 0x7)
+ O << "expcnt(" << Expcnt << ") ";
+ if (Lgkmcnt != 0x7)
+ O << "lgkmcnt(" << Lgkmcnt << ")";
+}
+
#include "AMDGPUGenAsmWriter.inc"
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
index c6fd053f6dcd..77af9425c5e5 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
+++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
@@ -32,6 +32,7 @@ public:
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
private:
+ void printRegOperand(unsigned RegNo, raw_ostream &O);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
@@ -49,7 +50,10 @@ private:
void printWrite(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printBankSwizzle(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printRSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printCT(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printWaitFlag(const MCInst *MI, unsigned OpNo, raw_ostream &O);
};
} // End namespace llvm
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
index a3397f3a4204..29d0acf9ea48 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -82,6 +82,8 @@ void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
// ELFAMDGPUAsmBackend class
//===----------------------------------------------------------------------===//
+namespace {
+
class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
public:
ELFAMDGPUAsmBackend(const Target &T) : AMDGPUAsmBackend(T) { }
@@ -91,7 +93,11 @@ public:
}
};
-MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, StringRef TT,
+} // end anonymous namespace
+
+MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new ELFAMDGPUAsmBackend(T);
}
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 2aae26aa12dc..4a8e1b0b2d86 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -11,7 +11,7 @@
#include "AMDGPUMCAsmInfo.h"
using namespace llvm;
-AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
+AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() {
HasSingleParameterDotFile = false;
WeakDefDirective = 0;
//===------------------------------------------------------------------===//
@@ -21,7 +21,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
HasStaticCtorDtorReferenceInStaticMode = false;
LinkerRequiresNonEmptyDwarfLines = true;
MaxInstLength = 16;
- PCSymbol = "$";
SeparatorString = "\n";
CommentColumn = 40;
CommentString = ";";
@@ -32,9 +31,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
InlineAsmStart = ";#ASMSTART";
InlineAsmEnd = ";#ASMEND";
AssemblerDialect = 0;
- AllowQuotesInName = false;
- AllowNameToStartWithDigit = false;
- AllowPeriodsInName = false;
//===--- Data Emission Directives -------------------------------------===//
ZeroDirective = ".zero";
@@ -56,13 +52,11 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
//===--- Global Variable Emission Directives --------------------------===//
GlobalDirective = ".global";
- ExternDirective = ".extern";
HasSetDirective = false;
HasAggressiveSymbolFolding = true;
COMMDirectiveAlignmentIsInBytes = false;
HasDotTypeDotSizeDirective = false;
HasNoDeadStrip = true;
- HasSymbolResolver = false;
WeakRefDirective = ".weakref\t";
LinkOnceDirective = 0;
//===--- Dwarf Emission Directives -----------------------------------===//
@@ -70,11 +64,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
SupportsDebugInformation = true;
}
-const char*
-AMDGPUMCAsmInfo::getDataASDirective(unsigned int Size, unsigned int AS) const {
- return 0;
-}
-
const MCSection*
AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const {
return 0;
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
index 3ad0fa6824ab..22afd636aba2 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
@@ -17,13 +17,11 @@
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
-class Target;
class StringRef;
class AMDGPUMCAsmInfo : public MCAsmInfo {
public:
- explicit AMDGPUMCAsmInfo(const Target &T, StringRef &TT);
- const char* getDataASDirective(unsigned int Size, unsigned int AS) const;
+ explicit AMDGPUMCAsmInfo(StringRef &TT);
const MCSection* getNonexecutableStackSection(MCContext &CTX) const;
};
} // namespace llvm
diff --git a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
index 6a115b22bd4a..521b3b39bba2 100644
--- a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//===-- MBlazeSelectionDAGInfo.cpp - MBlaze SelectionDAG Info -------------===//
+//===-- AMDGPUCodeEmitter.cpp - AMDGPU Code Emitter interface -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,17 +7,15 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the MBlazeSelectionDAGInfo class.
+/// \file
+/// \brief CodeEmitter interface for R600 and SI codegen.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mblaze-selectiondag-info"
-#include "MBlazeTargetMachine.h"
+#include "AMDGPUMCCodeEmitter.h"
+
using namespace llvm;
-MBlazeSelectionDAGInfo::MBlazeSelectionDAGInfo(const MBlazeTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+// pin vtable to this file
+void AMDGPUMCCodeEmitter::anchor() {}
-MBlazeSelectionDAGInfo::~MBlazeSelectionDAGInfo() {
-}
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
index cd3a7ce65aa5..d8cf64adb91b 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
@@ -24,6 +24,7 @@ class MCInst;
class MCOperand;
class AMDGPUMCCodeEmitter : public MCCodeEmitter {
+ virtual void anchor();
public:
uint64_t getBinaryCodeForInstr(const MCInst &MI,
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index 61d70bb34292..a1bec2827646 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -88,7 +88,7 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCCodeEmitter *_Emitter,
bool RelaxAll,
bool NoExecStack) {
- return createELFStreamer(Ctx, MAB, _OS, _Emitter, false, false);
+ return createELFStreamer(Ctx, 0, MAB, _OS, _Emitter, false, false);
}
extern "C" void LLVMInitializeR600TargetMC() {
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
index abb032045bef..f6b3376da32c 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
@@ -40,8 +40,8 @@ MCCodeEmitter *createSIMCCodeEmitter(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createAMDGPUAsmBackend(const Target &T, StringRef TT,
- StringRef CPU);
+MCAsmBackend *createAMDGPUAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
MCObjectWriter *createAMDGPUELFObjectWriter(raw_ostream &OS);
} // End llvm namespace
diff --git a/lib/Target/R600/MCTargetDesc/CMakeLists.txt b/lib/Target/R600/MCTargetDesc/CMakeLists.txt
index 3ccdf420601d..98f6925d9fb2 100644
--- a/lib/Target/R600/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/R600/MCTargetDesc/CMakeLists.txt
@@ -2,6 +2,7 @@
add_llvm_library(LLVMR600Desc
AMDGPUAsmBackend.cpp
AMDGPUELFObjectWriter.cpp
+ AMDGPUMCCodeEmitter.cpp
AMDGPUMCTargetDesc.cpp
AMDGPUMCAsmInfo.cpp
R600MCCodeEmitter.cpp
diff --git a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
index cb4cf0ce3886..dd8df65d66a5 100644
--- a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
@@ -24,7 +24,6 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/raw_ostream.h"
-#include <stdio.h>
using namespace llvm;
@@ -81,21 +80,6 @@ enum FCInstr {
FC_CONTINUE
};
-enum TextureTypes {
- TEXTURE_1D = 1,
- TEXTURE_2D,
- TEXTURE_3D,
- TEXTURE_CUBE,
- TEXTURE_RECT,
- TEXTURE_SHADOW1D,
- TEXTURE_SHADOW2D,
- TEXTURE_SHADOWRECT,
- TEXTURE_1D_ARRAY,
- TEXTURE_2D_ARRAY,
- TEXTURE_SHADOW1D_ARRAY,
- TEXTURE_SHADOW2D_ARRAY
-};
-
MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
@@ -114,69 +98,37 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
} else if (IS_VTX(Desc)) {
uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
- InstWord2 |= 1 << 19;
+ if (!(STI.getFeatureBits() & AMDGPU::FeatureCaymanISA)) {
+ InstWord2 |= 1 << 19; // Mega-Fetch bit
+ }
Emit(InstWord01, OS);
Emit(InstWord2, OS);
- Emit((u_int32_t) 0, OS);
+ Emit((uint32_t) 0, OS);
} else if (IS_TEX(Desc)) {
- unsigned Opcode = MI.getOpcode();
- bool HasOffsets = (Opcode == AMDGPU::TEX_LD);
- unsigned OpOffset = HasOffsets ? 3 : 0;
- int64_t Sampler = MI.getOperand(OpOffset + 3).getImm();
- int64_t TextureType = MI.getOperand(OpOffset + 4).getImm();
-
- uint32_t SrcSelect[4] = {0, 1, 2, 3};
- uint32_t Offsets[3] = {0, 0, 0};
- uint64_t CoordType[4] = {1, 1, 1, 1};
-
- if (HasOffsets)
- for (unsigned i = 0; i < 3; i++) {
- int SignedOffset = MI.getOperand(i + 2).getImm();
- Offsets[i] = (SignedOffset & 0x1F);
- }
-
- if (TextureType == TEXTURE_RECT ||
- TextureType == TEXTURE_SHADOWRECT) {
- CoordType[ELEMENT_X] = 0;
- CoordType[ELEMENT_Y] = 0;
- }
-
- if (TextureType == TEXTURE_1D_ARRAY ||
- TextureType == TEXTURE_SHADOW1D_ARRAY) {
- if (Opcode == AMDGPU::TEX_SAMPLE_C_L ||
- Opcode == AMDGPU::TEX_SAMPLE_C_LB) {
- CoordType[ELEMENT_Y] = 0;
- } else {
- CoordType[ELEMENT_Z] = 0;
- SrcSelect[ELEMENT_Z] = ELEMENT_Y;
- }
- } else if (TextureType == TEXTURE_2D_ARRAY ||
- TextureType == TEXTURE_SHADOW2D_ARRAY) {
- CoordType[ELEMENT_Z] = 0;
- }
-
-
- if ((TextureType == TEXTURE_SHADOW1D ||
- TextureType == TEXTURE_SHADOW2D ||
- TextureType == TEXTURE_SHADOWRECT ||
- TextureType == TEXTURE_SHADOW1D_ARRAY) &&
- Opcode != AMDGPU::TEX_SAMPLE_C_L &&
- Opcode != AMDGPU::TEX_SAMPLE_C_LB) {
- SrcSelect[ELEMENT_W] = ELEMENT_Z;
- }
-
- uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups) |
- CoordType[ELEMENT_X] << 60 | CoordType[ELEMENT_Y] << 61 |
- CoordType[ELEMENT_Z] << 62 | CoordType[ELEMENT_W] << 63;
- uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
- SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
- SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
- Offsets[2] << 10;
-
- Emit(Word01, OS);
- Emit(Word2, OS);
- Emit((u_int32_t) 0, OS);
+ int64_t Sampler = MI.getOperand(14).getImm();
+
+ int64_t SrcSelect[4] = {
+ MI.getOperand(2).getImm(),
+ MI.getOperand(3).getImm(),
+ MI.getOperand(4).getImm(),
+ MI.getOperand(5).getImm()
+ };
+ int64_t Offsets[3] = {
+ MI.getOperand(6).getImm() & 0x1F,
+ MI.getOperand(7).getImm() & 0x1F,
+ MI.getOperand(8).getImm() & 0x1F
+ };
+
+ uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups);
+ uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
+ SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
+ SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
+ Offsets[2] << 10;
+
+ Emit(Word01, OS);
+ Emit(Word2, OS);
+ Emit((uint32_t) 0, OS);
} else {
uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
if ((STI.getFeatureBits() & AMDGPU::FeatureR600ALUInst) &&
diff --git a/lib/Target/R600/Processors.td b/lib/Target/R600/Processors.td
index 0cbe919d8104..ee190e4aed7b 100644
--- a/lib/Target/R600/Processors.td
+++ b/lib/Target/R600/Processors.td
@@ -10,39 +10,45 @@
class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Features>
: Processor<Name, itin, Features>;
def : Proc<"", R600_VLIW5_Itin,
- [FeatureR600ALUInst, FeatureVertexCache]>;
+ [FeatureR600, FeatureVertexCache]>;
def : Proc<"r600", R600_VLIW5_Itin,
- [FeatureR600ALUInst , FeatureVertexCache]>;
+ [FeatureR600 , FeatureVertexCache]>;
def : Proc<"rs880", R600_VLIW5_Itin,
- [FeatureR600ALUInst]>;
+ [FeatureR600]>;
def : Proc<"rv670", R600_VLIW5_Itin,
- [FeatureR600ALUInst, FeatureFP64, FeatureVertexCache]>;
+ [FeatureR600, FeatureFP64, FeatureVertexCache]>;
def : Proc<"rv710", R600_VLIW5_Itin,
- [FeatureVertexCache]>;
+ [FeatureR700, FeatureVertexCache]>;
def : Proc<"rv730", R600_VLIW5_Itin,
- [FeatureVertexCache]>;
+ [FeatureR700, FeatureVertexCache]>;
def : Proc<"rv770", R600_VLIW5_Itin,
- [FeatureFP64, FeatureVertexCache]>;
+ [FeatureR700, FeatureFP64, FeatureVertexCache]>;
def : Proc<"cedar", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache]>;
def : Proc<"redwood", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache]>;
def : Proc<"sumo", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages]>;
+ [FeatureEvergreen]>;
def : Proc<"juniper", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache]>;
def : Proc<"cypress", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureFP64, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureFP64, FeatureVertexCache]>;
def : Proc<"barts", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureNorthernIslands, FeatureVertexCache]>;
def : Proc<"turks", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureNorthernIslands, FeatureVertexCache]>;
def : Proc<"caicos", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages]>;
+ [FeatureNorthernIslands]>;
def : Proc<"cayman", R600_VLIW4_Itin,
- [FeatureByteAddress, FeatureImages, FeatureFP64]>;def : Proc<"SI", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"tahiti", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"pitcairn", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"verde", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"oland", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"hainan", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
+ [FeatureNorthernIslands, FeatureFP64, FeatureCaymanISA]>;
+
+def : Proc<"SI", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"tahiti", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"pitcairn", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"verde", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"oland", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"hainan", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"bonaire", SI_Itin, [FeatureSeaIslands]>;
+def : Proc<"kabini", SI_Itin, [FeatureSeaIslands]>;
+def : Proc<"kaveri", SI_Itin, [FeatureSeaIslands]>;
+def : Proc<"hawaii", SI_Itin, [FeatureSeaIslands]>;
diff --git a/lib/Target/R600/R600ClauseMergePass.cpp b/lib/Target/R600/R600ClauseMergePass.cpp
new file mode 100644
index 000000000000..33d2ca32577d
--- /dev/null
+++ b/lib/Target/R600/R600ClauseMergePass.cpp
@@ -0,0 +1,204 @@
+//===-- R600ClauseMergePass - Merge consecutive CF_ALU -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// R600EmitClauseMarker pass emits CFAlu instruction in a conservative maneer.
+/// This pass is merging consecutive CFAlus where applicable.
+/// It needs to be called after IfCvt for best results.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "r600mergeclause"
+#include "AMDGPU.h"
+#include "R600Defines.h"
+#include "R600InstrInfo.h"
+#include "R600MachineFunctionInfo.h"
+#include "R600RegisterInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+
+static bool isCFAlu(const MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AMDGPU::CF_ALU:
+ case AMDGPU::CF_ALU_PUSH_BEFORE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+class R600ClauseMergePass : public MachineFunctionPass {
+
+private:
+ static char ID;
+ const R600InstrInfo *TII;
+
+ unsigned getCFAluSize(const MachineInstr *MI) const;
+ bool isCFAluEnabled(const MachineInstr *MI) const;
+
+ /// IfCvt pass can generate "disabled" ALU clause marker that need to be
+ /// removed and their content affected to the previous alu clause.
+ /// This function parse instructions after CFAlu untill it find a disabled
+ /// CFAlu and merge the content, or an enabled CFAlu.
+ void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
+
+ /// Check whether LatrCFAlu can be merged into RootCFAlu and do it if
+ /// it is the case.
+ bool mergeIfPossible(MachineInstr *RootCFAlu, const MachineInstr *LatrCFAlu)
+ const;
+
+public:
+ R600ClauseMergePass(TargetMachine &tm) : MachineFunctionPass(ID) { }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const;
+};
+
+char R600ClauseMergePass::ID = 0;
+
+unsigned R600ClauseMergePass::getCFAluSize(const MachineInstr *MI) const {
+ assert(isCFAlu(MI));
+ return MI->getOperand(
+ TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::COUNT)).getImm();
+}
+
+bool R600ClauseMergePass::isCFAluEnabled(const MachineInstr *MI) const {
+ assert(isCFAlu(MI));
+ return MI->getOperand(
+ TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::Enabled)).getImm();
+}
+
+void R600ClauseMergePass::cleanPotentialDisabledCFAlu(MachineInstr *CFAlu)
+ const {
+ int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
+ MachineBasicBlock::iterator I = CFAlu, E = CFAlu->getParent()->end();
+ I++;
+ do {
+ while (I!= E && !isCFAlu(I))
+ I++;
+ if (I == E)
+ return;
+ MachineInstr *MI = I++;
+ if (isCFAluEnabled(MI))
+ break;
+ CFAlu->getOperand(CntIdx).setImm(getCFAluSize(CFAlu) + getCFAluSize(MI));
+ MI->eraseFromParent();
+ } while (I != E);
+}
+
+bool R600ClauseMergePass::mergeIfPossible(MachineInstr *RootCFAlu,
+ const MachineInstr *LatrCFAlu) const {
+ assert(isCFAlu(RootCFAlu) && isCFAlu(LatrCFAlu));
+ int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
+ unsigned RootInstCount = getCFAluSize(RootCFAlu),
+ LaterInstCount = getCFAluSize(LatrCFAlu);
+ unsigned CumuledInsts = RootInstCount + LaterInstCount;
+ if (CumuledInsts >= TII->getMaxAlusPerClause()) {
+ DEBUG(dbgs() << "Excess inst counts\n");
+ return false;
+ }
+ if (RootCFAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
+ return false;
+ // Is KCache Bank 0 compatible ?
+ int Mode0Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE0);
+ int KBank0Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK0);
+ int KBank0LineIdx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR0);
+ if (LatrCFAlu->getOperand(Mode0Idx).getImm() &&
+ RootCFAlu->getOperand(Mode0Idx).getImm() &&
+ (LatrCFAlu->getOperand(KBank0Idx).getImm() !=
+ RootCFAlu->getOperand(KBank0Idx).getImm() ||
+ LatrCFAlu->getOperand(KBank0LineIdx).getImm() !=
+ RootCFAlu->getOperand(KBank0LineIdx).getImm())) {
+ DEBUG(dbgs() << "Wrong KC0\n");
+ return false;
+ }
+ // Is KCache Bank 1 compatible ?
+ int Mode1Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE1);
+ int KBank1Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK1);
+ int KBank1LineIdx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR1);
+ if (LatrCFAlu->getOperand(Mode1Idx).getImm() &&
+ RootCFAlu->getOperand(Mode1Idx).getImm() &&
+ (LatrCFAlu->getOperand(KBank1Idx).getImm() !=
+ RootCFAlu->getOperand(KBank1Idx).getImm() ||
+ LatrCFAlu->getOperand(KBank1LineIdx).getImm() !=
+ RootCFAlu->getOperand(KBank1LineIdx).getImm())) {
+ DEBUG(dbgs() << "Wrong KC0\n");
+ return false;
+ }
+ if (LatrCFAlu->getOperand(Mode0Idx).getImm()) {
+ RootCFAlu->getOperand(Mode0Idx).setImm(
+ LatrCFAlu->getOperand(Mode0Idx).getImm());
+ RootCFAlu->getOperand(KBank0Idx).setImm(
+ LatrCFAlu->getOperand(KBank0Idx).getImm());
+ RootCFAlu->getOperand(KBank0LineIdx).setImm(
+ LatrCFAlu->getOperand(KBank0LineIdx).getImm());
+ }
+ if (LatrCFAlu->getOperand(Mode1Idx).getImm()) {
+ RootCFAlu->getOperand(Mode1Idx).setImm(
+ LatrCFAlu->getOperand(Mode1Idx).getImm());
+ RootCFAlu->getOperand(KBank1Idx).setImm(
+ LatrCFAlu->getOperand(KBank1Idx).getImm());
+ RootCFAlu->getOperand(KBank1LineIdx).setImm(
+ LatrCFAlu->getOperand(KBank1LineIdx).getImm());
+ }
+ RootCFAlu->getOperand(CntIdx).setImm(CumuledInsts);
+ RootCFAlu->setDesc(TII->get(LatrCFAlu->getOpcode()));
+ return true;
+}
+
+bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
+ for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
+ BB != BB_E; ++BB) {
+ MachineBasicBlock &MBB = *BB;
+ MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ MachineBasicBlock::iterator LatestCFAlu = E;
+ while (I != E) {
+ MachineInstr *MI = I++;
+ if ((!TII->canBeConsideredALU(MI) && !isCFAlu(MI)) ||
+ TII->mustBeLastInClause(MI->getOpcode()))
+ LatestCFAlu = E;
+ if (!isCFAlu(MI))
+ continue;
+ cleanPotentialDisabledCFAlu(MI);
+
+ if (LatestCFAlu != E && mergeIfPossible(LatestCFAlu, MI)) {
+ MI->eraseFromParent();
+ } else {
+ assert(MI->getOperand(8).getImm() && "CF ALU instruction disabled");
+ LatestCFAlu = MI;
+ }
+ }
+ }
+ return false;
+}
+
+const char *R600ClauseMergePass::getPassName() const {
+ return "R600 Merge Clause Markers Pass";
+}
+
+} // end anonymous namespace
+
+
+llvm::FunctionPass *llvm::createR600ClauseMergePass(TargetMachine &TM) {
+ return new R600ClauseMergePass(TM);
+}
diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/R600/R600ControlFlowFinalizer.cpp
index ffe34144132a..ac3d8f63d57f 100644
--- a/lib/Target/R600/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/R600/R600ControlFlowFinalizer.cpp
@@ -14,8 +14,6 @@
#define DEBUG_TYPE "r600cf"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
#include "AMDGPU.h"
#include "R600Defines.h"
#include "R600InstrInfo.h"
@@ -24,8 +22,11 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
-namespace llvm {
+namespace {
class R600ControlFlowFinalizer : public MachineFunctionPass {
@@ -48,7 +49,7 @@ private:
static char ID;
const R600InstrInfo *TII;
- const R600RegisterInfo &TRI;
+ const R600RegisterInfo *TRI;
unsigned MaxFetchInst;
const AMDGPUSubtarget &ST;
@@ -64,7 +65,7 @@ private:
const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
unsigned Opcode = 0;
- bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX);
+ bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
switch (CFI) {
case CF_TC:
Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
@@ -97,7 +98,7 @@ private:
Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
break;
case CF_END:
- if (ST.device()->getDeviceFlag() == OCL_DEVICE_CAYMAN) {
+ if (ST.hasCaymanISA()) {
Opcode = AMDGPU::CF_END_CM;
break;
}
@@ -109,28 +110,33 @@ private:
}
bool isCompatibleWithClause(const MachineInstr *MI,
- std::set<unsigned> &DstRegs, std::set<unsigned> &SrcRegs) const {
+ std::set<unsigned> &DstRegs) const {
unsigned DstMI, SrcMI;
for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
E = MI->operands_end(); I != E; ++I) {
const MachineOperand &MO = *I;
if (!MO.isReg())
continue;
- if (MO.isDef())
- DstMI = MO.getReg();
+ if (MO.isDef()) {
+ unsigned Reg = MO.getReg();
+ if (AMDGPU::R600_Reg128RegClass.contains(Reg))
+ DstMI = Reg;
+ else
+ DstMI = TRI->getMatchingSuperReg(Reg,
+ TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
+ &AMDGPU::R600_Reg128RegClass);
+ }
if (MO.isUse()) {
unsigned Reg = MO.getReg();
if (AMDGPU::R600_Reg128RegClass.contains(Reg))
SrcMI = Reg;
else
- SrcMI = TRI.getMatchingSuperReg(Reg,
- TRI.getSubRegFromChannel(TRI.getHWRegChan(Reg)),
+ SrcMI = TRI->getMatchingSuperReg(Reg,
+ TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
&AMDGPU::R600_Reg128RegClass);
}
}
- if ((DstRegs.find(SrcMI) == DstRegs.end()) &&
- (SrcRegs.find(DstMI) == SrcRegs.end())) {
- SrcRegs.insert(SrcMI);
+ if ((DstRegs.find(SrcMI) == DstRegs.end())) {
DstRegs.insert(DstMI);
return true;
} else
@@ -144,16 +150,16 @@ private:
std::vector<MachineInstr *> ClauseContent;
unsigned AluInstCount = 0;
bool IsTex = TII->usesTextureCache(ClauseHead);
- std::set<unsigned> DstRegs, SrcRegs;
+ std::set<unsigned> DstRegs;
for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
if (IsTrivialInst(I))
continue;
- if (AluInstCount > MaxFetchInst)
+ if (AluInstCount >= MaxFetchInst)
break;
if ((IsTex && !TII->usesTextureCache(I)) ||
(!IsTex && !TII->usesVertexCache(I)))
break;
- if (!isCompatibleWithClause(I, DstRegs, SrcRegs))
+ if (!isCompatibleWithClause(I, DstRegs))
break;
AluInstCount ++;
ClauseContent.push_back(I);
@@ -166,28 +172,26 @@ private:
}
void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
- unsigned LiteralRegs[] = {
+ static const unsigned LiteralRegs[] = {
AMDGPU::ALU_LITERAL_X,
AMDGPU::ALU_LITERAL_Y,
AMDGPU::ALU_LITERAL_Z,
AMDGPU::ALU_LITERAL_W
};
- for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg())
- continue;
- if (MO.getReg() != AMDGPU::ALU_LITERAL_X)
+ const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
+ TII->getSrcs(MI);
+ for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
+ if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
continue;
- unsigned ImmIdx = TII->getOperandIdx(MI->getOpcode(), R600Operands::IMM);
- int64_t Imm = MI->getOperand(ImmIdx).getImm();
+ int64_t Imm = Srcs[i].second;
std::vector<int64_t>::iterator It =
std::find(Lits.begin(), Lits.end(), Imm);
if (It != Lits.end()) {
unsigned Index = It - Lits.begin();
- MO.setReg(LiteralRegs[Index]);
+ Srcs[i].first->setReg(LiteralRegs[Index]);
} else {
assert(Lits.size() < 4 && "Too many literals in Instruction Group");
- MO.setReg(LiteralRegs[Lits.size()]);
+ Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
Lits.push_back(Imm);
}
}
@@ -252,6 +256,7 @@ private:
ClauseContent.push_back(MILit);
}
}
+ assert(ClauseContent.size() < 128 && "ALU clause is too big");
ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
return ClauseFile(ClauseHead, ClauseContent);
}
@@ -272,6 +277,7 @@ private:
void
EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
unsigned &CfCount) {
+ Clause.first->getOperand(0).setImm(0);
CounterPropagateAddr(Clause.first, CfCount);
MachineBasicBlock *BB = Clause.first->getParent();
BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
@@ -295,34 +301,35 @@ private:
}
unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const {
- switch (ST.device()->getGeneration()) {
- case AMDGPUDeviceInfo::HD4XXX:
+ switch (ST.getGeneration()) {
+ case AMDGPUSubtarget::R600:
+ case AMDGPUSubtarget::R700:
if (hasPush)
StackSubEntry += 2;
break;
- case AMDGPUDeviceInfo::HD5XXX:
+ case AMDGPUSubtarget::EVERGREEN:
if (hasPush)
StackSubEntry ++;
- case AMDGPUDeviceInfo::HD6XXX:
+ case AMDGPUSubtarget::NORTHERN_ISLANDS:
StackSubEntry += 2;
break;
+ default: llvm_unreachable("Not a VLIW4/VLIW5 GPU");
}
return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4
}
public:
R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
- TRI(TII->getRegisterInfo()),
+ TII (0), TRI(0),
ST(tm.getSubtarget<AMDGPUSubtarget>()) {
const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
- MaxFetchInst = 8;
- else
- MaxFetchInst = 16;
+ MaxFetchInst = ST.getTexVTXClauseSize();
}
virtual bool runOnMachineFunction(MachineFunction &MF) {
+ TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
+ TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo());
+
unsigned MaxStack = 0;
unsigned CurrentStack = 0;
bool HasPush = false;
@@ -340,6 +347,9 @@ public:
MaxStack = 1;
}
std::vector<ClauseFile> FetchClauses, AluClauses;
+ std::vector<MachineInstr *> LastAlu(1);
+ std::vector<MachineInstr *> ToPopAfter;
+
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E;) {
if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
@@ -350,6 +360,10 @@ public:
}
MachineBasicBlock::iterator MI = I;
+ if (MI->getOpcode() != AMDGPU::ENDIF)
+ LastAlu.back() = 0;
+ if (MI->getOpcode() == AMDGPU::CF_ALU)
+ LastAlu.back() = MI;
I++;
switch (MI->getOpcode()) {
case AMDGPU::CF_ALU_PUSH_BEFORE:
@@ -359,12 +373,6 @@ public:
case AMDGPU::CF_ALU:
I = MI;
AluClauses.push_back(MakeALUClause(MBB, I));
- case AMDGPU::EG_ExportBuf:
- case AMDGPU::EG_ExportSwz:
- case AMDGPU::R600_ExportBuf:
- case AMDGPU::R600_ExportSwz:
- case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
- case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
DEBUG(dbgs() << CfCount << ":"; MI->dump(););
CfCount++;
break;
@@ -395,6 +403,7 @@ public:
break;
}
case AMDGPU::IF_PREDICATE_SET: {
+ LastAlu.push_back(0);
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_JUMP))
.addImm(0)
@@ -412,7 +421,7 @@ public:
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_ELSE))
.addImm(0)
- .addImm(1);
+ .addImm(0);
DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
IfThenElseStack.push_back(MIb);
MI->eraseFromParent();
@@ -421,31 +430,31 @@ public:
}
case AMDGPU::ENDIF: {
CurrentStack--;
+ if (LastAlu.back()) {
+ ToPopAfter.push_back(LastAlu.back());
+ } else {
+ MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
+ getHWInstrDesc(CF_POP))
+ .addImm(CfCount + 1)
+ .addImm(1);
+ (void)MIb;
+ DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
+ CfCount++;
+ }
+
MachineInstr *IfOrElseInst = IfThenElseStack.back();
IfThenElseStack.pop_back();
- CounterPropagateAddr(IfOrElseInst, CfCount + 1);
- MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
- getHWInstrDesc(CF_POP))
- .addImm(CfCount + 1)
- .addImm(1);
- (void)MIb;
- DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
+ CounterPropagateAddr(IfOrElseInst, CfCount);
+ IfOrElseInst->getOperand(1).setImm(1);
+ LastAlu.pop_back();
MI->eraseFromParent();
- CfCount++;
break;
}
- case AMDGPU::PREDICATED_BREAK: {
- CurrentStack--;
- CfCount += 3;
- BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
- .addImm(CfCount)
- .addImm(1);
+ case AMDGPU::BREAK: {
+ CfCount ++;
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_LOOP_BREAK))
.addImm(0);
- BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
- .addImm(CfCount)
- .addImm(1);
LoopStack.back().second.insert(MIb);
MI->eraseFromParent();
break;
@@ -473,9 +482,28 @@ public:
EmitALUClause(I, AluClauses[i], CfCount);
}
default:
+ if (TII->isExport(MI->getOpcode())) {
+ DEBUG(dbgs() << CfCount << ":"; MI->dump(););
+ CfCount++;
+ }
break;
}
}
+ for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
+ MachineInstr *Alu = ToPopAfter[i];
+ BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
+ TII->get(AMDGPU::CF_ALU_POP_AFTER))
+ .addImm(Alu->getOperand(0).getImm())
+ .addImm(Alu->getOperand(1).getImm())
+ .addImm(Alu->getOperand(2).getImm())
+ .addImm(Alu->getOperand(3).getImm())
+ .addImm(Alu->getOperand(4).getImm())
+ .addImm(Alu->getOperand(5).getImm())
+ .addImm(Alu->getOperand(6).getImm())
+ .addImm(Alu->getOperand(7).getImm())
+ .addImm(Alu->getOperand(8).getImm());
+ Alu->eraseFromParent();
+ }
MFI->StackSize = getHWStackSize(MaxStack, HasPush);
}
@@ -489,7 +517,7 @@ public:
char R600ControlFlowFinalizer::ID = 0;
-}
+} // end anonymous namespace
llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h
index 36bfb18e6782..1781f2aee167 100644
--- a/lib/Target/R600/R600Defines.h
+++ b/lib/Target/R600/R600Defines.h
@@ -41,7 +41,12 @@ namespace R600_InstFlag {
OP1 = (1 << 10),
OP2 = (1 << 11),
VTX_INST = (1 << 12),
- TEX_INST = (1 << 13)
+ TEX_INST = (1 << 13),
+ ALU_INST = (1 << 14),
+ LDS_1A = (1 << 15),
+ LDS_1A1D = (1 << 16),
+ IS_EXPORT = (1 << 17),
+ LDS_1A2D = (1 << 18)
};
}
@@ -57,47 +62,82 @@ namespace R600_InstFlag {
#define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST)
#define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST)
-namespace R600Operands {
- enum Ops {
- DST,
- UPDATE_EXEC_MASK,
- UPDATE_PREDICATE,
- WRITE,
- OMOD,
- DST_REL,
- CLAMP,
- SRC0,
- SRC0_NEG,
- SRC0_REL,
- SRC0_ABS,
- SRC0_SEL,
- SRC1,
- SRC1_NEG,
- SRC1_REL,
- SRC1_ABS,
- SRC1_SEL,
- SRC2,
- SRC2_NEG,
- SRC2_REL,
- SRC2_SEL,
- LAST,
- PRED_SEL,
- IMM,
- BANK_SWIZZLE,
- COUNT
+namespace OpName {
+
+ enum VecOps {
+ UPDATE_EXEC_MASK_X,
+ UPDATE_PREDICATE_X,
+ WRITE_X,
+ OMOD_X,
+ DST_REL_X,
+ CLAMP_X,
+ SRC0_X,
+ SRC0_NEG_X,
+ SRC0_REL_X,
+ SRC0_ABS_X,
+ SRC0_SEL_X,
+ SRC1_X,
+ SRC1_NEG_X,
+ SRC1_REL_X,
+ SRC1_ABS_X,
+ SRC1_SEL_X,
+ PRED_SEL_X,
+ UPDATE_EXEC_MASK_Y,
+ UPDATE_PREDICATE_Y,
+ WRITE_Y,
+ OMOD_Y,
+ DST_REL_Y,
+ CLAMP_Y,
+ SRC0_Y,
+ SRC0_NEG_Y,
+ SRC0_REL_Y,
+ SRC0_ABS_Y,
+ SRC0_SEL_Y,
+ SRC1_Y,
+ SRC1_NEG_Y,
+ SRC1_REL_Y,
+ SRC1_ABS_Y,
+ SRC1_SEL_Y,
+ PRED_SEL_Y,
+ UPDATE_EXEC_MASK_Z,
+ UPDATE_PREDICATE_Z,
+ WRITE_Z,
+ OMOD_Z,
+ DST_REL_Z,
+ CLAMP_Z,
+ SRC0_Z,
+ SRC0_NEG_Z,
+ SRC0_REL_Z,
+ SRC0_ABS_Z,
+ SRC0_SEL_Z,
+ SRC1_Z,
+ SRC1_NEG_Z,
+ SRC1_REL_Z,
+ SRC1_ABS_Z,
+ SRC1_SEL_Z,
+ PRED_SEL_Z,
+ UPDATE_EXEC_MASK_W,
+ UPDATE_PREDICATE_W,
+ WRITE_W,
+ OMOD_W,
+ DST_REL_W,
+ CLAMP_W,
+ SRC0_W,
+ SRC0_NEG_W,
+ SRC0_REL_W,
+ SRC0_ABS_W,
+ SRC0_SEL_W,
+ SRC1_W,
+ SRC1_NEG_W,
+ SRC1_REL_W,
+ SRC1_ABS_W,
+ SRC1_SEL_W,
+ PRED_SEL_W,
+ IMM_0,
+ IMM_1,
+ VEC_COUNT
};
- const static int ALUOpTable[3][R600Operands::COUNT] = {
-// W C S S S S S S S S S S S
-// R O D L S R R R R S R R R R S R R R L P
-// D U I M R A R C C C C R C C C C R C C C A R I
-// S E U T O E M C 0 0 0 0 C 1 1 1 1 C 2 2 2 S E M B
-// T M P E D L P 0 N R A S 1 N R A S 2 N R S T D M S
- {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,11,12,13},
- {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,13,14,15,16,-1,-1,-1,-1,17,18,19,20},
- {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17,18}
- };
-
}
//===----------------------------------------------------------------------===//
@@ -126,4 +166,6 @@ namespace R600Operands {
#define R_028878_SQ_PGM_RESOURCES_GS 0x028878
#define R_0288D4_SQ_PGM_RESOURCES_LS 0x0288d4
+#define R_0288E8_SQ_LDS_ALLOC 0x0288E8
+
#endif // R600DEFINES_H_
diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp
index 3fdc678b9ef1..1bbfd2b68f3d 100644
--- a/lib/Target/R600/R600EmitClauseMarkers.cpp
+++ b/lib/Target/R600/R600EmitClauseMarkers.cpp
@@ -23,21 +23,23 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-namespace llvm {
+using namespace llvm;
+
+namespace {
class R600EmitClauseMarkersPass : public MachineFunctionPass {
private:
static char ID;
const R600InstrInfo *TII;
+ int Address;
unsigned OccupiedDwords(MachineInstr *MI) const {
switch (MI->getOpcode()) {
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
- case AMDGPU::DOT4_eg_pseudo:
- case AMDGPU::DOT4_r600_pseudo:
+ case AMDGPU::DOT_4:
return 4;
case AMDGPU::KILL:
return 0;
@@ -45,6 +47,11 @@ private:
break;
}
+ // These will be expanded to two ALU instructions in the
+ // ExpandSpecialInstructions pass.
+ if (TII->isLDSRetInstr(MI->getOpcode()))
+ return 2;
+
if(TII->isVector(*MI) ||
TII->isCubeOp(MI->getOpcode()) ||
TII->isReductionOp(MI->getOpcode()))
@@ -71,8 +78,7 @@ private:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
case AMDGPU::COPY:
- case AMDGPU::DOT4_eg_pseudo:
- case AMDGPU::DOT4_r600_pseudo:
+ case AMDGPU::DOT_4:
return true;
default:
return false;
@@ -83,37 +89,13 @@ private:
switch (MI->getOpcode()) {
case AMDGPU::KILL:
case AMDGPU::RETURN:
+ case AMDGPU::IMPLICIT_DEF:
return true;
default:
return false;
}
}
- // Register Idx, then Const value
- std::vector<std::pair<unsigned, unsigned> > ExtractConstRead(MachineInstr *MI)
- const {
- const R600Operands::Ops OpTable[3][2] = {
- {R600Operands::SRC0, R600Operands::SRC0_SEL},
- {R600Operands::SRC1, R600Operands::SRC1_SEL},
- {R600Operands::SRC2, R600Operands::SRC2_SEL},
- };
- std::vector<std::pair<unsigned, unsigned> > Result;
-
- if (!TII->isALUInstr(MI->getOpcode()))
- return Result;
- for (unsigned j = 0; j < 3; j++) {
- int SrcIdx = TII->getOperandIdx(MI->getOpcode(), OpTable[j][0]);
- if (SrcIdx < 0)
- break;
- if (MI->getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST) {
- unsigned Const = MI->getOperand(
- TII->getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
- Result.push_back(std::pair<unsigned, unsigned>(SrcIdx, Const));
- }
- }
- return Result;
- }
-
std::pair<unsigned, unsigned> getAccessedBankLine(unsigned Sel) const {
// Sel is (512 + (kc_bank << 12) + ConstIndex) << 2
// (See also R600ISelLowering.cpp)
@@ -129,11 +111,20 @@ private:
}
bool SubstituteKCacheBank(MachineInstr *MI,
- std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const {
+ std::vector<std::pair<unsigned, unsigned> > &CachedConsts,
+ bool UpdateInstr = true) const {
std::vector<std::pair<unsigned, unsigned> > UsedKCache;
- std::vector<std::pair<unsigned, unsigned> > Consts = ExtractConstRead(MI);
- assert(TII->isALUInstr(MI->getOpcode()) && "Can't assign Const");
+
+ if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4)
+ return true;
+
+ const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts =
+ TII->getSrcs(MI);
+ assert((TII->isALUInstr(MI->getOpcode()) ||
+ MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const");
for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
+ if (Consts[i].first->getReg() != AMDGPU::ALU_CONST)
+ continue;
unsigned Sel = Consts[i].second;
unsigned Chan = Sel & 3, Index = ((Sel >> 2) - 512) & 31;
unsigned KCacheIndex = Index * 4 + Chan;
@@ -159,25 +150,77 @@ private:
return false;
}
- for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
- switch(UsedKCache[i].first) {
+ if (!UpdateInstr)
+ return true;
+
+ for (unsigned i = 0, j = 0, n = Consts.size(); i < n; ++i) {
+ if (Consts[i].first->getReg() != AMDGPU::ALU_CONST)
+ continue;
+ switch(UsedKCache[j].first) {
case 0:
- MI->getOperand(Consts[i].first).setReg(
- AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[i].second));
+ Consts[i].first->setReg(
+ AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[j].second));
break;
case 1:
- MI->getOperand(Consts[i].first).setReg(
- AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[i].second));
+ Consts[i].first->setReg(
+ AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[j].second));
break;
default:
llvm_unreachable("Wrong Cache Line");
}
+ j++;
+ }
+ return true;
+ }
+
+ bool canClauseLocalKillFitInClause(
+ unsigned AluInstCount,
+ std::vector<std::pair<unsigned, unsigned> > KCacheBanks,
+ MachineBasicBlock::iterator Def,
+ MachineBasicBlock::iterator BBEnd) {
+ const R600RegisterInfo &TRI = TII->getRegisterInfo();
+ for (MachineInstr::const_mop_iterator
+ MOI = Def->operands_begin(),
+ MOE = Def->operands_end(); MOI != MOE; ++MOI) {
+ if (!MOI->isReg() || !MOI->isDef() ||
+ TRI.isPhysRegLiveAcrossClauses(MOI->getReg()))
+ continue;
+
+ // Def defines a clause local register, so check that its use will fit
+ // in the clause.
+ unsigned LastUseCount = 0;
+ for (MachineBasicBlock::iterator UseI = Def; UseI != BBEnd; ++UseI) {
+ AluInstCount += OccupiedDwords(UseI);
+ // Make sure we won't need to end the clause due to KCache limitations.
+ if (!SubstituteKCacheBank(UseI, KCacheBanks, false))
+ return false;
+
+ // We have reached the maximum instruction limit before finding the
+ // use that kills this register, so we cannot use this def in the
+ // current clause.
+ if (AluInstCount >= TII->getMaxAlusPerClause())
+ return false;
+
+ // Register kill flags have been cleared by the time we get to this
+ // pass, but it is safe to assume that all uses of this register
+ // occur in the same basic block as its definition, because
+ // it is illegal for the scheduler to schedule them in
+ // different blocks.
+ if (UseI->findRegisterUseOperandIdx(MOI->getReg()))
+ LastUseCount = AluInstCount;
+
+ if (UseI != Def && UseI->findRegisterDefOperandIdx(MOI->getReg()) != -1)
+ break;
+ }
+ if (LastUseCount)
+ return LastUseCount <= TII->getMaxAlusPerClause();
+ llvm_unreachable("Clause local register live at end of clause.");
}
return true;
}
MachineBasicBlock::iterator
- MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
+ MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) {
MachineBasicBlock::iterator ClauseHead = I;
std::vector<std::pair<unsigned, unsigned> > KCacheBanks;
bool PushBeforeModifier = false;
@@ -190,39 +233,66 @@ private:
if (AluInstCount > TII->getMaxAlusPerClause())
break;
if (I->getOpcode() == AMDGPU::PRED_X) {
+ // We put PRED_X in its own clause to ensure that ifcvt won't create
+ // clauses with more than 128 insts.
+ // IfCvt is indeed checking that "then" and "else" branches of an if
+ // statement have less than ~60 insts thus converted clauses can't be
+ // bigger than ~121 insts (predicate setter needs to be in the same
+ // clause as predicated alus).
+ if (AluInstCount > 0)
+ break;
if (TII->getFlagOp(I).getImm() & MO_FLAG_PUSH)
PushBeforeModifier = true;
AluInstCount ++;
continue;
}
- if (I->getOpcode() == AMDGPU::KILLGT) {
+ // XXX: GROUP_BARRIER instructions cannot be in the same ALU clause as:
+ //
+ // * KILL or INTERP instructions
+ // * Any instruction that sets UPDATE_EXEC_MASK or UPDATE_PRED bits
+ // * Uses waterfalling (i.e. INDEX_MODE = AR.X)
+ //
+ // XXX: These checks have not been implemented yet.
+ if (TII->mustBeLastInClause(I->getOpcode())) {
I++;
break;
}
- if (TII->isALUInstr(I->getOpcode()) &&
- !SubstituteKCacheBank(I, KCacheBanks))
+
+ // If this instruction defines a clause local register, make sure
+ // its use can fit in this clause.
+ if (!canClauseLocalKillFitInClause(AluInstCount, KCacheBanks, I, E))
+ break;
+
+ if (!SubstituteKCacheBank(I, KCacheBanks))
break;
AluInstCount += OccupiedDwords(I);
}
unsigned Opcode = PushBeforeModifier ?
AMDGPU::CF_ALU_PUSH_BEFORE : AMDGPU::CF_ALU;
BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), TII->get(Opcode))
- .addImm(0) // ADDR
+ // We don't use the ADDR field until R600ControlFlowFinalizer pass, where
+ // it is safe to assume it is 0. However if we always put 0 here, the ifcvt
+ // pass may assume that identical ALU clause starter at the beginning of a
+ // true and false branch can be factorized which is not the case.
+ .addImm(Address++) // ADDR
.addImm(KCacheBanks.empty()?0:KCacheBanks[0].first) // KB0
.addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].first) // KB1
.addImm(KCacheBanks.empty()?0:2) // KM0
.addImm((KCacheBanks.size() < 2)?0:2) // KM1
.addImm(KCacheBanks.empty()?0:KCacheBanks[0].second) // KLINE0
.addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].second) // KLINE1
- .addImm(AluInstCount); // COUNT
+ .addImm(AluInstCount) // COUNT
+ .addImm(1); // Enabled
return I;
}
public:
R600EmitClauseMarkersPass(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { }
+ TII(0), Address(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
+
for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
BB != BB_E; ++BB) {
MachineBasicBlock &MBB = *BB;
@@ -246,7 +316,7 @@ public:
char R600EmitClauseMarkersPass::ID = 0;
-}
+} // end anonymous namespace
llvm::FunctionPass *llvm::createR600EmitClauseMarkers(TargetMachine &TM) {
diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
index f8c900f72776..aeee4aa89562 100644
--- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp
+++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
@@ -38,7 +38,7 @@ private:
public:
R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { }
+ TII(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -56,6 +56,7 @@ FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
}
bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
const R600RegisterInfo &TRI = TII->getRegisterInfo();
@@ -67,6 +68,23 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
MachineInstr &MI = *I;
I = llvm::next(I);
+ // Expand LDS_*_RET instructions
+ if (TII->isLDSRetInstr(MI.getOpcode())) {
+ int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
+ assert(DstIdx != -1);
+ MachineOperand &DstOp = MI.getOperand(DstIdx);
+ MachineInstr *Mov = TII->buildMovInstr(&MBB, I,
+ DstOp.getReg(), AMDGPU::OQAP);
+ DstOp.setReg(AMDGPU::OQAP);
+ int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(),
+ AMDGPU::OpName::pred_sel);
+ int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(),
+ AMDGPU::OpName::pred_sel);
+ // Copy the pred_sel bit
+ Mov->getOperand(MovPredSelIdx).setReg(
+ MI.getOperand(LDSPredSelIdx).getReg());
+ }
+
switch (MI.getOpcode()) {
default: break;
// Expand PRED_X to one of the PRED_SET instructions.
@@ -81,28 +99,13 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
AMDGPU::ZERO); // src1
TII->addFlag(PredSet, 0, MO_FLAG_MASK);
if (Flags & MO_FLAG_PUSH) {
- TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
+ TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
} else {
- TII->setImmOperand(PredSet, R600Operands::UPDATE_PREDICATE, 1);
+ TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
}
MI.eraseFromParent();
continue;
}
- case AMDGPU::BREAK: {
- MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
- AMDGPU::PRED_SETE_INT,
- AMDGPU::PREDICATE_BIT,
- AMDGPU::ZERO,
- AMDGPU::ZERO);
- TII->addFlag(PredSet, 0, MO_FLAG_MASK);
- TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
-
- BuildMI(MBB, I, MBB.findDebugLoc(I),
- TII->get(AMDGPU::PREDICATED_BREAK))
- .addReg(AMDGPU::PREDICATE_BIT);
- MI.eraseFromParent();
- continue;
- }
case AMDGPU::INTERP_PAIR_XY: {
MachineInstr *BMI;
@@ -182,6 +185,45 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
MI.eraseFromParent();
continue;
}
+ case AMDGPU::DOT_4: {
+
+ const R600RegisterInfo &TRI = TII->getRegisterInfo();
+
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
+
+ for (unsigned Chan = 0; Chan < 4; ++Chan) {
+ bool Mask = (Chan != TRI.getHWRegChan(DstReg));
+ unsigned SubDstReg =
+ AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
+ MachineInstr *BMI =
+ TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
+ if (Chan > 0) {
+ BMI->bundleWithPred();
+ }
+ if (Mask) {
+ TII->addFlag(BMI, 0, MO_FLAG_MASK);
+ }
+ if (Chan != 3)
+ TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
+ unsigned Opcode = BMI->getOpcode();
+ // While not strictly necessary from hw point of view, we force
+ // all src operands of a dot4 inst to belong to the same slot.
+ unsigned Src0 = BMI->getOperand(
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
+ .getReg();
+ unsigned Src1 = BMI->getOperand(
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
+ .getReg();
+ (void) Src0;
+ (void) Src1;
+ if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
+ (TRI.getEncodingValue(Src1) & 0xff) < 127)
+ assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
+ }
+ MI.eraseFromParent();
+ continue;
+ }
}
bool IsReduction = TII->isReductionOp(MI.getOpcode());
@@ -218,14 +260,14 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
// T0_W = CUBE T1_Y, T1_Z
for (unsigned Chan = 0; Chan < 4; Chan++) {
unsigned DstReg = MI.getOperand(
- TII->getOperandIdx(MI, R600Operands::DST)).getReg();
+ TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
unsigned Src0 = MI.getOperand(
- TII->getOperandIdx(MI, R600Operands::SRC0)).getReg();
+ TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
unsigned Src1 = 0;
// Determine the correct source registers
if (!IsCube) {
- int Src1Idx = TII->getOperandIdx(MI, R600Operands::SRC1);
+ int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
if (Src1Idx != -1) {
Src1 = MI.getOperand(Src1Idx).getReg();
}
@@ -268,12 +310,6 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::CUBE_eg_pseudo:
Opcode = AMDGPU::CUBE_eg_real;
break;
- case AMDGPU::DOT4_r600_pseudo:
- Opcode = AMDGPU::DOT4_r600_real;
- break;
- case AMDGPU::DOT4_eg_pseudo:
- Opcode = AMDGPU::DOT4_eg_real;
- break;
default:
break;
}
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 7252235d5ba0..0fcb488672f8 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -16,6 +16,7 @@
#include "R600Defines.h"
#include "R600InstrInfo.h"
#include "R600MachineFunctionInfo.h"
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -27,41 +28,40 @@ using namespace llvm;
R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
AMDGPUTargetLowering(TM),
- TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo())) {
+ Gen(TM.getSubtarget<AMDGPUSubtarget>().getGeneration()) {
addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass);
+ addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass);
+ addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass);
+
computeRegisterProperties();
- setOperationAction(ISD::FADD, MVT::v4f32, Expand);
- setOperationAction(ISD::FMUL, MVT::v4f32, Expand);
- setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
- setOperationAction(ISD::FSUB, MVT::v4f32, Expand);
-
- setOperationAction(ISD::ADD, MVT::v4i32, Expand);
- setOperationAction(ISD::AND, MVT::v4i32, Expand);
- setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Expand);
- setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Expand);
- setOperationAction(ISD::MUL, MVT::v2i32, Expand);
- setOperationAction(ISD::MUL, MVT::v4i32, Expand);
- setOperationAction(ISD::OR, MVT::v4i32, Expand);
- setOperationAction(ISD::OR, MVT::v2i32, Expand);
- setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Expand);
- setOperationAction(ISD::SHL, MVT::v4i32, Expand);
- setOperationAction(ISD::SHL, MVT::v2i32, Expand);
- setOperationAction(ISD::SRL, MVT::v4i32, Expand);
- setOperationAction(ISD::SRL, MVT::v2i32, Expand);
- setOperationAction(ISD::SRA, MVT::v4i32, Expand);
- setOperationAction(ISD::SRA, MVT::v2i32, Expand);
- setOperationAction(ISD::SUB, MVT::v4i32, Expand);
- setOperationAction(ISD::SUB, MVT::v2i32, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Expand);
- setOperationAction(ISD::UDIV, MVT::v4i32, Expand);
- setOperationAction(ISD::UREM, MVT::v4i32, Expand);
+ // Set condition code actions
+ setCondCodeAction(ISD::SETO, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETLT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETLE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::f32, Expand);
+
+ setCondCodeAction(ISD::SETLE, MVT::i32, Expand);
+ setCondCodeAction(ISD::SETLT, MVT::i32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::i32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::i32, Expand);
+
+ setOperationAction(ISD::FCOS, MVT::f32, Custom);
+ setOperationAction(ISD::FSIN, MVT::f32, Custom);
+
setOperationAction(ISD::SETCC, MVT::v4i32, Expand);
- setOperationAction(ISD::XOR, MVT::v4i32, Expand);
- setOperationAction(ISD::XOR, MVT::v2i32, Expand);
+ setOperationAction(ISD::SETCC, MVT::v2i32, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
@@ -72,8 +72,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom);
- setOperationAction(ISD::ROTL, MVT::i32, Custom);
-
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
@@ -81,24 +79,33 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::SETCC, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom);
- setOperationAction(ISD::SELECT, MVT::i32, Custom);
- setOperationAction(ISD::SELECT, MVT::f32, Custom);
-
- setOperationAction(ISD::VSELECT, MVT::v4i32, Expand);
- setOperationAction(ISD::VSELECT, MVT::v2i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v2f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
// Legalize loads and stores to the private address space.
setOperationAction(ISD::LOAD, MVT::i32, Custom);
setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
+
+ // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address
+ // spaces, so it is custom lowered to handle those where it isn't.
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom);
+
setOperationAction(ISD::STORE, MVT::i8, Custom);
setOperationAction(ISD::STORE, MVT::i32, Custom);
setOperationAction(ISD::STORE, MVT::v2i32, Custom);
setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+ setTruncStoreAction(MVT::i32, MVT::i8, Custom);
+ setTruncStoreAction(MVT::i32, MVT::i16, Custom);
setOperationAction(ISD::LOAD, MVT::i32, Custom);
setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
@@ -108,10 +115,13 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setTargetDAGCombine(ISD::FP_TO_SINT);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::SELECT_CC);
+ setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setBooleanContents(ZeroOrNegativeOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
- setSchedulingPreference(Sched::VLIW);
+ setSchedulingPreference(Sched::Source);
}
MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
@@ -119,9 +129,29 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
MachineFunction * MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
MachineBasicBlock::iterator I = *MI;
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo*>(MF->getTarget().getInstrInfo());
switch (MI->getOpcode()) {
- default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
+ default:
+ // Replace LDS_*_RET instruction that don't have any uses with the
+ // equivalent LDS_*_NORET instruction.
+ if (TII->isLDSRetInstr(MI->getOpcode())) {
+ int DstIdx = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ assert(DstIdx != -1);
+ MachineInstrBuilder NewMI;
+ if (!MRI.use_empty(MI->getOperand(DstIdx).getReg()))
+ return BB;
+
+ NewMI = BuildMI(*BB, I, BB->findDebugLoc(I),
+ TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode())));
+ for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) {
+ NewMI.addOperand(MI->getOperand(i));
+ }
+ } else {
+ return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
+ }
+ break;
case AMDGPU::CLAMP_R600: {
MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
AMDGPU::MOV,
@@ -169,12 +199,13 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::CONST_COPY: {
MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
- TII->setImmOperand(NewMI, R600Operands::SRC0_SEL,
+ TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel,
MI->getOperand(1).getImm());
break;
}
case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
+ case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
@@ -188,23 +219,99 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::TXD: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-
+ MachineOperand &RID = MI->getOperand(4);
+ MachineOperand &SID = MI->getOperand(5);
+ unsigned TextureId = MI->getOperand(6).getImm();
+ unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
+ unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
+
+ switch (TextureId) {
+ case 5: // Rect
+ CTX = CTY = 0;
+ break;
+ case 6: // Shadow1D
+ SrcW = SrcZ;
+ break;
+ case 7: // Shadow2D
+ SrcW = SrcZ;
+ break;
+ case 8: // ShadowRect
+ CTX = CTY = 0;
+ SrcW = SrcZ;
+ break;
+ case 9: // 1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 10: // 2DArray
+ CTZ = 0;
+ break;
+ case 11: // Shadow1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 12: // Shadow2DArray
+ CTZ = 0;
+ break;
+ }
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
.addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
.addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
.addReg(T0, RegState::Implicit)
.addReg(T1, RegState::Implicit);
break;
@@ -213,23 +320,100 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::TXD_SHADOW: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+ MachineOperand &RID = MI->getOperand(4);
+ MachineOperand &SID = MI->getOperand(5);
+ unsigned TextureId = MI->getOperand(6).getImm();
+ unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
+ unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
+
+ switch (TextureId) {
+ case 5: // Rect
+ CTX = CTY = 0;
+ break;
+ case 6: // Shadow1D
+ SrcW = SrcZ;
+ break;
+ case 7: // Shadow2D
+ SrcW = SrcZ;
+ break;
+ case 8: // ShadowRect
+ CTX = CTY = 0;
+ SrcW = SrcZ;
+ break;
+ case 9: // 1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 10: // 2DArray
+ CTZ = 0;
+ break;
+ case 11: // Shadow1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 12: // Shadow2DArray
+ CTZ = 0;
+ break;
+ }
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
.addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
.addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
.addReg(T0, RegState::Implicit)
.addReg(T1, RegState::Implicit);
break;
@@ -321,30 +505,27 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
// Custom DAG Lowering Operations
//===----------------------------------------------------------------------===//
-using namespace llvm::Intrinsic;
-using namespace llvm::AMDGPUIntrinsic;
-
SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
- case ISD::ROTL: return LowerROTL(Op, DAG);
+ case ISD::FCOS:
+ case ISD::FSIN: return LowerTrig(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
case ISD::LOAD: return LowerLOAD(Op, DAG);
- case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
+ case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
case ISD::INTRINSIC_VOID: {
SDValue Chain = Op.getOperand(0);
unsigned IntrinsicID =
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
switch (IntrinsicID) {
case AMDGPUIntrinsic::AMDGPU_store_output: {
- MachineFunction &MF = DAG.getMachineFunction();
- R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
MFI->LiveOuts.push_back(Reg);
- return DAG.getCopyToReg(Chain, Op.getDebugLoc(), Reg, Op.getOperand(2));
+ return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2));
}
case AMDGPUIntrinsic::R600_store_swizzle: {
const SDValue Args[8] = {
@@ -357,7 +538,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(2, MVT::i32), // SWZ_Z
DAG.getConstant(3, MVT::i32) // SWZ_W
};
- return DAG.getNode(AMDGPUISD::EXPORT, Op.getDebugLoc(), Op.getValueType(),
+ return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(),
Args, 8);
}
@@ -371,13 +552,17 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
unsigned IntrinsicID =
cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
switch(IntrinsicID) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
case AMDGPUIntrinsic::R600_load_input: {
int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
- return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, Reg, VT);
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MRI.addLiveIn(Reg);
+ return DAG.getCopyFromReg(DAG.getEntryNode(),
+ SDLoc(DAG.getEntryNode()), Reg, VT);
}
case AMDGPUIntrinsic::R600_interp_input: {
@@ -385,66 +570,184 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
MachineSDNode *interp;
if (ijb < 0) {
+ const MachineFunction &MF = DAG.getMachineFunction();
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo*>(MF.getTarget().getInstrInfo());
interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL,
MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32));
return DAG.getTargetExtractSubreg(
TII->getRegisterInfo().getSubRegFromChannel(slot % 4),
DL, MVT::f32, SDValue(interp, 0));
}
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ unsigned RegisterI = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb);
+ unsigned RegisterJ = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1);
+ MRI.addLiveIn(RegisterI);
+ MRI.addLiveIn(RegisterJ);
+ SDValue RegisterINode = DAG.getCopyFromReg(DAG.getEntryNode(),
+ SDLoc(DAG.getEntryNode()), RegisterI, MVT::f32);
+ SDValue RegisterJNode = DAG.getCopyFromReg(DAG.getEntryNode(),
+ SDLoc(DAG.getEntryNode()), RegisterJ, MVT::f32);
if (slot % 4 < 2)
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32));
+ RegisterJNode, RegisterINode);
else
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32));
-
+ RegisterJNode, RegisterINode);
return SDValue(interp, slot % 2);
}
+ case AMDGPUIntrinsic::R600_interp_xy:
+ case AMDGPUIntrinsic::R600_interp_zw: {
+ int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ MachineSDNode *interp;
+ SDValue RegisterINode = Op.getOperand(2);
+ SDValue RegisterJNode = Op.getOperand(3);
+
+ if (IntrinsicID == AMDGPUIntrinsic::R600_interp_xy)
+ interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32),
+ RegisterJNode, RegisterINode);
+ else
+ interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32),
+ RegisterJNode, RegisterINode);
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32,
+ SDValue(interp, 0), SDValue(interp, 1));
+ }
+ case AMDGPUIntrinsic::R600_tex:
+ case AMDGPUIntrinsic::R600_texc:
+ case AMDGPUIntrinsic::R600_txl:
+ case AMDGPUIntrinsic::R600_txlc:
+ case AMDGPUIntrinsic::R600_txb:
+ case AMDGPUIntrinsic::R600_txbc:
+ case AMDGPUIntrinsic::R600_txf:
+ case AMDGPUIntrinsic::R600_txq:
+ case AMDGPUIntrinsic::R600_ddx:
+ case AMDGPUIntrinsic::R600_ddy:
+ case AMDGPUIntrinsic::R600_ldptr: {
+ unsigned TextureOp;
+ switch (IntrinsicID) {
+ case AMDGPUIntrinsic::R600_tex:
+ TextureOp = 0;
+ break;
+ case AMDGPUIntrinsic::R600_texc:
+ TextureOp = 1;
+ break;
+ case AMDGPUIntrinsic::R600_txl:
+ TextureOp = 2;
+ break;
+ case AMDGPUIntrinsic::R600_txlc:
+ TextureOp = 3;
+ break;
+ case AMDGPUIntrinsic::R600_txb:
+ TextureOp = 4;
+ break;
+ case AMDGPUIntrinsic::R600_txbc:
+ TextureOp = 5;
+ break;
+ case AMDGPUIntrinsic::R600_txf:
+ TextureOp = 6;
+ break;
+ case AMDGPUIntrinsic::R600_txq:
+ TextureOp = 7;
+ break;
+ case AMDGPUIntrinsic::R600_ddx:
+ TextureOp = 8;
+ break;
+ case AMDGPUIntrinsic::R600_ddy:
+ TextureOp = 9;
+ break;
+ case AMDGPUIntrinsic::R600_ldptr:
+ TextureOp = 10;
+ break;
+ default:
+ llvm_unreachable("Unknow Texture Operation");
+ }
+
+ SDValue TexArgs[19] = {
+ DAG.getConstant(TextureOp, MVT::i32),
+ Op.getOperand(1),
+ DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(2, MVT::i32),
+ DAG.getConstant(3, MVT::i32),
+ Op.getOperand(2),
+ Op.getOperand(3),
+ Op.getOperand(4),
+ DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(2, MVT::i32),
+ DAG.getConstant(3, MVT::i32),
+ Op.getOperand(5),
+ Op.getOperand(6),
+ Op.getOperand(7),
+ Op.getOperand(8),
+ Op.getOperand(9),
+ Op.getOperand(10)
+ };
+ return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs, 19);
+ }
+ case AMDGPUIntrinsic::AMDGPU_dp4: {
+ SDValue Args[8] = {
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(0, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(0, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(1, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(1, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(2, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(2, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(3, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(3, MVT::i32))
+ };
+ return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args, 8);
+ }
- case r600_read_ngroups_x:
+ case Intrinsic::r600_read_ngroups_x:
return LowerImplicitParameter(DAG, VT, DL, 0);
- case r600_read_ngroups_y:
+ case Intrinsic::r600_read_ngroups_y:
return LowerImplicitParameter(DAG, VT, DL, 1);
- case r600_read_ngroups_z:
+ case Intrinsic::r600_read_ngroups_z:
return LowerImplicitParameter(DAG, VT, DL, 2);
- case r600_read_global_size_x:
+ case Intrinsic::r600_read_global_size_x:
return LowerImplicitParameter(DAG, VT, DL, 3);
- case r600_read_global_size_y:
+ case Intrinsic::r600_read_global_size_y:
return LowerImplicitParameter(DAG, VT, DL, 4);
- case r600_read_global_size_z:
+ case Intrinsic::r600_read_global_size_z:
return LowerImplicitParameter(DAG, VT, DL, 5);
- case r600_read_local_size_x:
+ case Intrinsic::r600_read_local_size_x:
return LowerImplicitParameter(DAG, VT, DL, 6);
- case r600_read_local_size_y:
+ case Intrinsic::r600_read_local_size_y:
return LowerImplicitParameter(DAG, VT, DL, 7);
- case r600_read_local_size_z:
+ case Intrinsic::r600_read_local_size_z:
return LowerImplicitParameter(DAG, VT, DL, 8);
- case r600_read_tgid_x:
+ case Intrinsic::r600_read_tgid_x:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T1_X, VT);
- case r600_read_tgid_y:
+ case Intrinsic::r600_read_tgid_y:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T1_Y, VT);
- case r600_read_tgid_z:
+ case Intrinsic::r600_read_tgid_z:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T1_Z, VT);
- case r600_read_tidig_x:
+ case Intrinsic::r600_read_tidig_x:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_X, VT);
- case r600_read_tidig_y:
+ case Intrinsic::r600_read_tidig_y:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_Y, VT);
- case r600_read_tidig_z:
+ case Intrinsic::r600_read_tidig_z:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_Z, VT);
}
@@ -478,10 +781,41 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N,
}
}
+SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
+ // On hw >= R700, COS/SIN input must be between -1. and 1.
+ // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5)
+ EVT VT = Op.getValueType();
+ SDValue Arg = Op.getOperand(0);
+ SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT,
+ DAG.getNode(ISD::FADD, SDLoc(Op), VT,
+ DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg,
+ DAG.getConstantFP(0.15915494309, MVT::f32)),
+ DAG.getConstantFP(0.5, MVT::f32)));
+ unsigned TrigNode;
+ switch (Op.getOpcode()) {
+ case ISD::FCOS:
+ TrigNode = AMDGPUISD::COS_HW;
+ break;
+ case ISD::FSIN:
+ TrigNode = AMDGPUISD::SIN_HW;
+ break;
+ default:
+ llvm_unreachable("Wrong trig opcode");
+ }
+ SDValue TrigVal = DAG.getNode(TrigNode, SDLoc(Op), VT,
+ DAG.getNode(ISD::FADD, SDLoc(Op), VT, FractPart,
+ DAG.getConstantFP(-0.5, MVT::f32)));
+ if (Gen >= AMDGPUSubtarget::R700)
+ return TrigVal;
+ // On R600 hw, COS/SIN input must be between -Pi and Pi.
+ return DAG.getNode(ISD::FMUL, SDLoc(Op), VT, TrigVal,
+ DAG.getConstantFP(3.14159265359, MVT::f32));
+}
+
SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(
ISD::SETCC,
- Op.getDebugLoc(),
+ SDLoc(Op),
MVT::i1,
Op, DAG.getConstantFP(0.0f, MVT::f32),
DAG.getCondCode(ISD::SETNE)
@@ -489,11 +823,11 @@ SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
}
SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
- DebugLoc DL,
+ SDLoc DL,
unsigned DwordOffset) const {
unsigned ByteOffset = DwordOffset * 4;
PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
- AMDGPUAS::PARAM_I_ADDRESS);
+ AMDGPUAS::CONSTANT_BUFFER_0);
// We shouldn't be using an offset wider than 16-bits for implicit parameters.
assert(isInt<16>(ByteOffset));
@@ -504,32 +838,6 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
false, false, false, 0);
}
-SDValue R600TargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const {
-
- MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL =
- static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
-
- FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
- assert(FIN);
-
- unsigned FrameIndex = FIN->getIndex();
- unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
- return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), MVT::i32);
-}
-
-SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT VT = Op.getValueType();
-
- return DAG.getNode(AMDGPUISD::BITALIGN, DL, VT,
- Op.getOperand(0),
- Op.getOperand(0),
- DAG.getNode(ISD::SUB, DL, VT,
- DAG.getConstant(32, MVT::i32),
- Op.getOperand(1)));
-}
-
bool R600TargetLowering::isZero(SDValue Op) const {
if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
return Cst->isNullValue();
@@ -541,7 +849,7 @@ bool R600TargetLowering::isZero(SDValue Op) const {
}
SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
@@ -560,16 +868,27 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
//
// SET* can match the following patterns:
//
- // select_cc f32, f32, -1, 0, cc_any
- // select_cc f32, f32, 1.0f, 0.0f, cc_any
- // select_cc i32, i32, -1, 0, cc_any
+ // select_cc f32, f32, -1, 0, cc_supported
+ // select_cc f32, f32, 1.0f, 0.0f, cc_supported
+ // select_cc i32, i32, -1, 0, cc_supported
//
// Move hardware True/False values to the correct operand.
+ ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
+ ISD::CondCode InverseCC =
+ ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32);
if (isHWTrueValue(False) && isHWFalseValue(True)) {
- ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
- std::swap(False, True);
- CC = DAG.getCondCode(ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32));
+ if (isCondCodeLegal(InverseCC, CompareVT.getSimpleVT())) {
+ std::swap(False, True);
+ CC = DAG.getCondCode(InverseCC);
+ } else {
+ ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InverseCC);
+ if (isCondCodeLegal(SwapInvCC, CompareVT.getSimpleVT())) {
+ std::swap(False, True);
+ std::swap(LHS, RHS);
+ CC = DAG.getCondCode(SwapInvCC);
+ }
+ }
}
if (isHWTrueValue(True) && isHWFalseValue(False) &&
@@ -582,14 +901,34 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
//
// CND* can match the following patterns:
//
- // select_cc f32, 0.0, f32, f32, cc_any
- // select_cc f32, 0.0, i32, i32, cc_any
- // select_cc i32, 0, f32, f32, cc_any
- // select_cc i32, 0, i32, i32, cc_any
+ // select_cc f32, 0.0, f32, f32, cc_supported
+ // select_cc f32, 0.0, i32, i32, cc_supported
+ // select_cc i32, 0, f32, f32, cc_supported
+ // select_cc i32, 0, i32, i32, cc_supported
//
- if (isZero(LHS) || isZero(RHS)) {
- SDValue Cond = (isZero(LHS) ? RHS : LHS);
- SDValue Zero = (isZero(LHS) ? LHS : RHS);
+
+ // Try to move the zero value to the RHS
+ if (isZero(LHS)) {
+ ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
+ // Try swapping the operands
+ ISD::CondCode CCSwapped = ISD::getSetCCSwappedOperands(CCOpcode);
+ if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
+ std::swap(LHS, RHS);
+ CC = DAG.getCondCode(CCSwapped);
+ } else {
+ // Try inverting the conditon and then swapping the operands
+ ISD::CondCode CCInv = ISD::getSetCCInverse(CCOpcode, CompareVT.isInteger());
+ CCSwapped = ISD::getSetCCSwappedOperands(CCInv);
+ if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
+ std::swap(True, False);
+ std::swap(LHS, RHS);
+ CC = DAG.getCondCode(CCSwapped);
+ }
+ }
+ }
+ if (isZero(RHS)) {
+ SDValue Cond = LHS;
+ SDValue Zero = RHS;
ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
if (CompareVT != VT) {
// Bitcast True / False to the correct types. This will end up being
@@ -599,20 +938,11 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True);
False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False);
}
- if (isZero(LHS)) {
- CCOpcode = ISD::getSetCCSwappedOperands(CCOpcode);
- }
switch (CCOpcode) {
case ISD::SETONE:
case ISD::SETUNE:
case ISD::SETNE:
- case ISD::SETULE:
- case ISD::SETULT:
- case ISD::SETOLE:
- case ISD::SETOLT:
- case ISD::SETLE:
- case ISD::SETLT:
CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32);
Temp = True;
True = False;
@@ -660,17 +990,6 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
DAG.getCondCode(ISD::SETNE));
}
-SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
- return DAG.getNode(ISD::SELECT_CC,
- Op.getDebugLoc(),
- Op.getValueType(),
- Op.getOperand(0),
- DAG.getConstant(0, MVT::i32),
- Op.getOperand(1),
- Op.getOperand(2),
- DAG.getCondCode(ISD::SETNE));
-}
-
/// LLVM generates byte-addresed pointers. For indirect addressing, we need to
/// convert these pointers to a register index. Each register holds
/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
@@ -693,7 +1012,7 @@ SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr,
default: llvm_unreachable("Invalid stack width");
}
- return DAG.getNode(ISD::SRL, Ptr.getDebugLoc(), Ptr.getValueType(), Ptr,
+ return DAG.getNode(ISD::SRL, SDLoc(Ptr), Ptr.getValueType(), Ptr,
DAG.getConstant(SRLPad, MVT::i32));
}
@@ -727,25 +1046,65 @@ void R600TargetLowering::getStackAddress(unsigned StackWidth,
}
SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
SDValue Chain = Op.getOperand(0);
SDValue Value = Op.getOperand(1);
SDValue Ptr = Op.getOperand(2);
- if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
- Ptr->getOpcode() != AMDGPUISD::DWORDADDR) {
- // Convert pointer from byte address to dword address.
- Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
- DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
- Ptr, DAG.getConstant(2, MVT::i32)));
+ SDValue Result = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
+ if (Result.getNode()) {
+ return Result;
+ }
- if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
- assert(!"Truncated and indexed stores not supported yet");
- } else {
- Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
+ if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS) {
+ if (StoreNode->isTruncatingStore()) {
+ EVT VT = Value.getValueType();
+ assert(VT.bitsLE(MVT::i32));
+ EVT MemVT = StoreNode->getMemoryVT();
+ SDValue MaskConstant;
+ if (MemVT == MVT::i8) {
+ MaskConstant = DAG.getConstant(0xFF, MVT::i32);
+ } else {
+ assert(MemVT == MVT::i16);
+ MaskConstant = DAG.getConstant(0xFFFF, MVT::i32);
+ }
+ SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, VT, Ptr,
+ DAG.getConstant(2, MVT::i32));
+ SDValue ByteIndex = DAG.getNode(ISD::AND, DL, Ptr.getValueType(), Ptr,
+ DAG.getConstant(0x00000003, VT));
+ SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant);
+ SDValue Shift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex,
+ DAG.getConstant(3, VT));
+ SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, Shift);
+ SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, Shift);
+ // XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32
+ // vector instead.
+ SDValue Src[4] = {
+ ShiftedValue,
+ DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(0, MVT::i32),
+ Mask
+ };
+ SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src, 4);
+ SDValue Args[3] = { Chain, Input, DWordAddr };
+ return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
+ Op->getVTList(), Args, 3, MemVT,
+ StoreNode->getMemOperand());
+ } else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR &&
+ Value.getValueType().bitsGE(MVT::i32)) {
+ // Convert pointer from byte address to dword address.
+ Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
+ DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
+ Ptr, DAG.getConstant(2, MVT::i32)));
+
+ if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
+ assert(!"Truncated and indexed stores not supported yet");
+ } else {
+ Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
+ }
+ return Chain;
}
- return Chain;
}
EVT ValueVT = Value.getValueType();
@@ -789,7 +1148,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value);
}
Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr,
- DAG.getTargetConstant(0, MVT::i32)); // Channel
+ DAG.getTargetConstant(0, MVT::i32)); // Channel
}
return Chain;
@@ -839,18 +1198,28 @@ ConstantAddressBlock(unsigned AddressSpace) {
SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
{
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
SDValue Chain = Op.getOperand(0);
SDValue Ptr = Op.getOperand(1);
SDValue LoweredLoad;
+ if (LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && VT.isVector()) {
+ SDValue MergedValues[2] = {
+ SplitVectorLoad(Op, DAG),
+ Chain
+ };
+ return DAG.getMergeValues(MergedValues, 2, DL);
+ }
+
int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
- if (ConstantBlock > -1) {
+ if (ConstantBlock > -1 &&
+ ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) ||
+ (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) {
SDValue Result;
- if (dyn_cast<ConstantExpr>(LoadNode->getSrcValue()) ||
- dyn_cast<Constant>(LoadNode->getSrcValue()) ||
- dyn_cast<ConstantSDNode>(Ptr)) {
+ if (isa<ConstantExpr>(LoadNode->getSrcValue()) ||
+ isa<Constant>(LoadNode->getSrcValue()) ||
+ isa<ConstantSDNode>(Ptr)) {
SDValue Slots[4];
for (unsigned i = 0; i < 4; i++) {
// We want Const position encoded with the following formula :
@@ -862,13 +1231,19 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(4 * i + ConstantBlock * 16, MVT::i32));
Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
}
- Result = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Slots, 4);
+ EVT NewVT = MVT::v4i32;
+ unsigned NumElements = 4;
+ if (VT.isVector()) {
+ NewVT = VT;
+ NumElements = VT.getVectorNumElements();
+ }
+ Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT, Slots, NumElements);
} else {
// non constant ptr cant be folded, keeps it as a v4f32 load
Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)),
DAG.getConstant(LoadNode->getAddressSpace() -
- AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32)
+ AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32)
);
}
@@ -884,6 +1259,30 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
return DAG.getMergeValues(MergedValues, 2, DL);
}
+ // For most operations returning SDValue() will result in the node being
+ // expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so we
+ // need to manually expand loads that may be legal in some address spaces and
+ // illegal in others. SEXT loads from CONSTANT_BUFFER_0 are supported for
+ // compute shaders, since the data is sign extended when it is uploaded to the
+ // buffer. However SEXT loads from other address spaces are not supported, so
+ // we need to expand them here.
+ if (LoadNode->getExtensionType() == ISD::SEXTLOAD) {
+ EVT MemVT = LoadNode->getMemoryVT();
+ assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8));
+ SDValue ShiftAmount =
+ DAG.getConstant(VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32);
+ SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr,
+ LoadNode->getPointerInfo(), MemVT,
+ LoadNode->isVolatile(),
+ LoadNode->isNonTemporal(),
+ LoadNode->getAlignment());
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, NewLoad, ShiftAmount);
+ SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Shl, ShiftAmount);
+
+ SDValue MergedValues[2] = { Sra, Chain };
+ return DAG.getMergeValues(MergedValues, 2, DL);
+ }
+
if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
return SDValue();
}
@@ -941,42 +1340,158 @@ SDValue R600TargetLowering::LowerFormalArguments(
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
- unsigned ParamOffsetBytes = 36;
- Function::const_arg_iterator FuncArg =
- DAG.getMachineFunction().getFunction()->arg_begin();
- for (unsigned i = 0, e = Ins.size(); i < e; ++i, ++FuncArg) {
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+ MachineFunction &MF = DAG.getMachineFunction();
+ unsigned ShaderType = MF.getInfo<R600MachineFunctionInfo>()->ShaderType;
+
+ SmallVector<ISD::InputArg, 8> LocalIns;
+
+ getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
+ LocalIns);
+
+ AnalyzeFormalArguments(CCInfo, LocalIns);
+
+ for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
EVT VT = Ins[i].VT;
- Type *ArgType = FuncArg->getType();
- unsigned ArgSizeInBits = ArgType->isPointerTy() ?
- 32 : ArgType->getPrimitiveSizeInBits();
- unsigned ArgBytes = ArgSizeInBits >> 3;
- EVT ArgVT;
- if (ArgSizeInBits < VT.getSizeInBits()) {
- assert(!ArgType->isFloatTy() &&
- "Extending floating point arguments not supported yet");
- ArgVT = MVT::getIntegerVT(ArgSizeInBits);
- } else {
- ArgVT = VT;
+ EVT MemVT = LocalIns[i].VT;
+
+ if (ShaderType != ShaderType::COMPUTE) {
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), &AMDGPU::R600_Reg128RegClass);
+ SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT);
+ InVals.push_back(Register);
+ continue;
}
+
PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
- AMDGPUAS::PARAM_I_ADDRESS);
- SDValue Arg = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getRoot(),
- DAG.getConstant(ParamOffsetBytes, MVT::i32),
- MachinePointerInfo(UndefValue::get(PtrTy)),
- ArgVT, false, false, ArgBytes);
+ AMDGPUAS::CONSTANT_BUFFER_0);
+
+ // The first 36 bytes of the input buffer contains information about
+ // thread group and global sizes.
+ SDValue Arg = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain,
+ DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
+ MachinePointerInfo(UndefValue::get(PtrTy)),
+ MemVT, false, false, 4);
+ // 4 is the prefered alignment for
+ // the CONSTANT memory space.
InVals.push_back(Arg);
- ParamOffsetBytes += ArgBytes;
}
return Chain;
}
-EVT R600TargetLowering::getSetCCResultType(EVT VT) const {
+EVT R600TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector()) return MVT::i32;
return VT.changeVectorElementTypeToInteger();
}
+static SDValue
+CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry,
+ DenseMap<unsigned, unsigned> &RemapSwizzle) {
+ assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
+ assert(RemapSwizzle.empty());
+ SDValue NewBldVec[4] = {
+ VectorEntry.getOperand(0),
+ VectorEntry.getOperand(1),
+ VectorEntry.getOperand(2),
+ VectorEntry.getOperand(3)
+ };
+
+ for (unsigned i = 0; i < 4; i++) {
+ if (NewBldVec[i].getOpcode() == ISD::UNDEF)
+ // We mask write here to teach later passes that the ith element of this
+ // vector is undef. Thus we can use it to reduce 128 bits reg usage,
+ // break false dependencies and additionnaly make assembly easier to read.
+ RemapSwizzle[i] = 7; // SEL_MASK_WRITE
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) {
+ if (C->isZero()) {
+ RemapSwizzle[i] = 4; // SEL_0
+ NewBldVec[i] = DAG.getUNDEF(MVT::f32);
+ } else if (C->isExactlyValue(1.0)) {
+ RemapSwizzle[i] = 5; // SEL_1
+ NewBldVec[i] = DAG.getUNDEF(MVT::f32);
+ }
+ }
+
+ if (NewBldVec[i].getOpcode() == ISD::UNDEF)
+ continue;
+ for (unsigned j = 0; j < i; j++) {
+ if (NewBldVec[i] == NewBldVec[j]) {
+ NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType());
+ RemapSwizzle[i] = j;
+ break;
+ }
+ }
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
+ VectorEntry.getValueType(), NewBldVec, 4);
+}
+
+static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
+ DenseMap<unsigned, unsigned> &RemapSwizzle) {
+ assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
+ assert(RemapSwizzle.empty());
+ SDValue NewBldVec[4] = {
+ VectorEntry.getOperand(0),
+ VectorEntry.getOperand(1),
+ VectorEntry.getOperand(2),
+ VectorEntry.getOperand(3)
+ };
+ bool isUnmovable[4] = { false, false, false, false };
+ for (unsigned i = 0; i < 4; i++)
+ RemapSwizzle[i] = i;
+
+ for (unsigned i = 0; i < 4; i++) {
+ if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+ unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1))
+ ->getZExtValue();
+ if (i == Idx) {
+ isUnmovable[Idx] = true;
+ continue;
+ }
+ if (isUnmovable[Idx])
+ continue;
+ // Swap i and Idx
+ std::swap(NewBldVec[Idx], NewBldVec[i]);
+ std::swap(RemapSwizzle[i], RemapSwizzle[Idx]);
+ break;
+ }
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
+ VectorEntry.getValueType(), NewBldVec, 4);
+}
+
+
+SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector,
+SDValue Swz[4], SelectionDAG &DAG) const {
+ assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR);
+ // Old -> New swizzle values
+ DenseMap<unsigned, unsigned> SwizzleRemap;
+
+ BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
+ Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
+ }
+
+ SwizzleRemap.clear();
+ BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
+ Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
+ }
+
+ return BuildVector;
+}
+
+
//===----------------------------------------------------------------------===//
// Custom DAG Optimizations
//===----------------------------------------------------------------------===//
@@ -990,7 +1505,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::FP_ROUND: {
SDValue Arg = N->getOperand(0);
if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) {
- return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), N->getValueType(0),
Arg.getOperand(0));
}
break;
@@ -1015,7 +1530,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
- return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0),
SelectCC.getOperand(0), // LHS
SelectCC.getOperand(1), // RHS
DAG.getConstant(-1, MVT::i32), // True
@@ -1024,6 +1539,61 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
break;
}
+
+ // insert_vector_elt (build_vector elt0, ... , eltN), NewEltIdx, idx
+ // => build_vector elt0, ... , NewEltIdx, ... , eltN
+ case ISD::INSERT_VECTOR_ELT: {
+ SDValue InVec = N->getOperand(0);
+ SDValue InVal = N->getOperand(1);
+ SDValue EltNo = N->getOperand(2);
+ SDLoc dl(N);
+
+ // If the inserted element is an UNDEF, just use the input vector.
+ if (InVal.getOpcode() == ISD::UNDEF)
+ return InVec;
+
+ EVT VT = InVec.getValueType();
+
+ // If we can't generate a legal BUILD_VECTOR, exit
+ if (!isOperationLegal(ISD::BUILD_VECTOR, VT))
+ return SDValue();
+
+ // Check that we know which element is being inserted
+ if (!isa<ConstantSDNode>(EltNo))
+ return SDValue();
+ unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+
+ // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
+ // be converted to a BUILD_VECTOR). Fill in the Ops vector with the
+ // vector elements.
+ SmallVector<SDValue, 8> Ops;
+ if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
+ Ops.append(InVec.getNode()->op_begin(),
+ InVec.getNode()->op_end());
+ } else if (InVec.getOpcode() == ISD::UNDEF) {
+ unsigned NElts = VT.getVectorNumElements();
+ Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
+ } else {
+ return SDValue();
+ }
+
+ // Insert the element
+ if (Elt < Ops.size()) {
+ // All the operands of BUILD_VECTOR must have the same type;
+ // we enforce that here.
+ EVT OpVT = Ops[0].getValueType();
+ if (InVal.getValueType() != OpVT)
+ InVal = OpVT.bitsGT(InVal.getValueType()) ?
+ DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) :
+ DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal);
+ Ops[Elt] = InVal;
+ }
+
+ // Return the new vector
+ return DAG.getNode(ISD::BUILD_VECTOR, dl,
+ VT, &Ops[0], Ops.size());
+ }
+
// Extract_vec (Build_vector) generated by custom lowering
// also needs to be customly combined
case ISD::EXTRACT_VECTOR_ELT: {
@@ -1038,7 +1608,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
unsigned Element = Const->getZExtValue();
- return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getVTList(),
+ return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getVTList(),
Arg->getOperand(0).getOperand(Element));
}
}
@@ -1073,25 +1643,25 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get();
LHSCC = ISD::getSetCCInverse(LHSCC,
LHS.getOperand(0).getValueType().isInteger());
- return DAG.getSelectCC(N->getDebugLoc(),
- LHS.getOperand(0),
- LHS.getOperand(1),
- LHS.getOperand(2),
- LHS.getOperand(3),
- LHSCC);
+ if (DCI.isBeforeLegalizeOps() ||
+ isCondCodeLegal(LHSCC, LHS.getOperand(0).getSimpleValueType()))
+ return DAG.getSelectCC(SDLoc(N),
+ LHS.getOperand(0),
+ LHS.getOperand(1),
+ LHS.getOperand(2),
+ LHS.getOperand(3),
+ LHSCC);
+ break;
}
}
+ return SDValue();
}
+
case AMDGPUISD::EXPORT: {
SDValue Arg = N->getOperand(1);
if (Arg.getOpcode() != ISD::BUILD_VECTOR)
break;
- SDValue NewBldVec[4] = {
- DAG.getUNDEF(MVT::f32),
- DAG.getUNDEF(MVT::f32),
- DAG.getUNDEF(MVT::f32),
- DAG.getUNDEF(MVT::f32)
- };
+
SDValue NewArgs[8] = {
N->getOperand(0), // Chain
SDValue(),
@@ -1102,23 +1672,290 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
N->getOperand(6), // SWZ_Z
N->getOperand(7) // SWZ_W
};
- for (unsigned i = 0; i < Arg.getNumOperands(); i++) {
- if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg.getOperand(i))) {
- if (C->isZero()) {
- NewArgs[4 + i] = DAG.getConstant(4, MVT::i32); // SEL_0
- } else if (C->isExactlyValue(1.0)) {
- NewArgs[4 + i] = DAG.getConstant(5, MVT::i32); // SEL_0
- } else {
- NewBldVec[i] = Arg.getOperand(i);
+ SDLoc DL(N);
+ NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG);
+ return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8);
+ }
+ case AMDGPUISD::TEXTURE_FETCH: {
+ SDValue Arg = N->getOperand(1);
+ if (Arg.getOpcode() != ISD::BUILD_VECTOR)
+ break;
+
+ SDValue NewArgs[19] = {
+ N->getOperand(0),
+ N->getOperand(1),
+ N->getOperand(2),
+ N->getOperand(3),
+ N->getOperand(4),
+ N->getOperand(5),
+ N->getOperand(6),
+ N->getOperand(7),
+ N->getOperand(8),
+ N->getOperand(9),
+ N->getOperand(10),
+ N->getOperand(11),
+ N->getOperand(12),
+ N->getOperand(13),
+ N->getOperand(14),
+ N->getOperand(15),
+ N->getOperand(16),
+ N->getOperand(17),
+ N->getOperand(18),
+ };
+ NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG);
+ return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, SDLoc(N), N->getVTList(),
+ NewArgs, 19);
+ }
+ }
+ return SDValue();
+}
+
+static bool
+FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
+ SDValue &Abs, SDValue &Sel, SDValue &Imm, SelectionDAG &DAG) {
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo *>(DAG.getTarget().getInstrInfo());
+ if (!Src.isMachineOpcode())
+ return false;
+ switch (Src.getMachineOpcode()) {
+ case AMDGPU::FNEG_R600:
+ if (!Neg.getNode())
+ return false;
+ Src = Src.getOperand(0);
+ Neg = DAG.getTargetConstant(1, MVT::i32);
+ return true;
+ case AMDGPU::FABS_R600:
+ if (!Abs.getNode())
+ return false;
+ Src = Src.getOperand(0);
+ Abs = DAG.getTargetConstant(1, MVT::i32);
+ return true;
+ case AMDGPU::CONST_COPY: {
+ unsigned Opcode = ParentNode->getMachineOpcode();
+ bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+
+ if (!Sel.getNode())
+ return false;
+
+ SDValue CstOffset = Src.getOperand(0);
+ if (ParentNode->getValueType(0).isVector())
+ return false;
+
+ // Gather constants values
+ int SrcIndices[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
+ };
+ std::vector<unsigned> Consts;
+ for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
+ int OtherSrcIdx = SrcIndices[i];
+ int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
+ if (OtherSrcIdx < 0 || OtherSelIdx < 0)
+ continue;
+ if (HasDst) {
+ OtherSrcIdx--;
+ OtherSelIdx--;
+ }
+ if (RegisterSDNode *Reg =
+ dyn_cast<RegisterSDNode>(ParentNode->getOperand(OtherSrcIdx))) {
+ if (Reg->getReg() == AMDGPU::ALU_CONST) {
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(
+ ParentNode->getOperand(OtherSelIdx));
+ Consts.push_back(Cst->getZExtValue());
}
+ }
+ }
+
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
+ Consts.push_back(Cst->getZExtValue());
+ if (!TII->fitsConstReadLimitations(Consts)) {
+ return false;
+ }
+
+ Sel = CstOffset;
+ Src = DAG.getRegister(AMDGPU::ALU_CONST, MVT::f32);
+ return true;
+ }
+ case AMDGPU::MOV_IMM_I32:
+ case AMDGPU::MOV_IMM_F32: {
+ unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
+ uint64_t ImmValue = 0;
+
+
+ if (Src.getMachineOpcode() == AMDGPU::MOV_IMM_F32) {
+ ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(Src.getOperand(0));
+ float FloatValue = FPC->getValueAPF().convertToFloat();
+ if (FloatValue == 0.0) {
+ ImmReg = AMDGPU::ZERO;
+ } else if (FloatValue == 0.5) {
+ ImmReg = AMDGPU::HALF;
+ } else if (FloatValue == 1.0) {
+ ImmReg = AMDGPU::ONE;
+ } else {
+ ImmValue = FPC->getValueAPF().bitcastToAPInt().getZExtValue();
+ }
+ } else {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(0));
+ uint64_t Value = C->getZExtValue();
+ if (Value == 0) {
+ ImmReg = AMDGPU::ZERO;
+ } else if (Value == 1) {
+ ImmReg = AMDGPU::ONE_INT;
} else {
- NewBldVec[i] = Arg.getOperand(i);
+ ImmValue = Value;
}
}
- DebugLoc DL = N->getDebugLoc();
- NewArgs[1] = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4f32, NewBldVec, 4);
- return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8);
+
+ // Check that we aren't already using an immediate.
+ // XXX: It's possible for an instruction to have more than one
+ // immediate operand, but this is not supported yet.
+ if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+ if (!Imm.getNode())
+ return false;
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Imm);
+ assert(C);
+ if (C->getZExtValue())
+ return false;
+ Imm = DAG.getTargetConstant(ImmValue, MVT::i32);
+ }
+ Src = DAG.getRegister(ImmReg, MVT::i32);
+ return true;
}
+ default:
+ return false;
}
- return SDValue();
+}
+
+
+/// \brief Fold the instructions after selecting them
+SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
+ SelectionDAG &DAG) const {
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo *>(DAG.getTarget().getInstrInfo());
+ if (!Node->isMachineOpcode())
+ return Node;
+ unsigned Opcode = Node->getMachineOpcode();
+ SDValue FakeOp;
+
+ std::vector<SDValue> Ops;
+ for(SDNode::op_iterator I = Node->op_begin(), E = Node->op_end();
+ I != E; ++I)
+ Ops.push_back(*I);
+
+ if (Opcode == AMDGPU::DOT_4) {
+ int OperandIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
+ };
+ int NegIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
+ };
+ int AbsIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
+ };
+ for (unsigned i = 0; i < 8; i++) {
+ if (OperandIdx[i] < 0)
+ return Node;
+ SDValue &Src = Ops[OperandIdx[i] - 1];
+ SDValue &Neg = Ops[NegIdx[i] - 1];
+ SDValue &Abs = Ops[AbsIdx[i] - 1];
+ bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+ int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
+ if (HasDst)
+ SelIdx--;
+ SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
+ if (FoldOperand(Node, i, Src, Neg, Abs, Sel, FakeOp, DAG))
+ return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
+ }
+ } else if (Opcode == AMDGPU::REG_SEQUENCE) {
+ for (unsigned i = 1, e = Node->getNumOperands(); i < e; i += 2) {
+ SDValue &Src = Ops[i];
+ if (FoldOperand(Node, i, Src, FakeOp, FakeOp, FakeOp, FakeOp, DAG))
+ return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
+ }
+ } else if (Opcode == AMDGPU::CLAMP_R600) {
+ SDValue Src = Node->getOperand(0);
+ if (!Src.isMachineOpcode() ||
+ !TII->hasInstrModifiers(Src.getMachineOpcode()))
+ return Node;
+ int ClampIdx = TII->getOperandIdx(Src.getMachineOpcode(),
+ AMDGPU::OpName::clamp);
+ if (ClampIdx < 0)
+ return Node;
+ std::vector<SDValue> Ops;
+ unsigned NumOp = Src.getNumOperands();
+ for(unsigned i = 0; i < NumOp; ++i)
+ Ops.push_back(Src.getOperand(i));
+ Ops[ClampIdx - 1] = DAG.getTargetConstant(1, MVT::i32);
+ return DAG.getMachineNode(Src.getMachineOpcode(), SDLoc(Node),
+ Node->getVTList(), Ops);
+ } else {
+ if (!TII->hasInstrModifiers(Opcode))
+ return Node;
+ int OperandIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
+ };
+ int NegIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
+ };
+ int AbsIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
+ -1
+ };
+ for (unsigned i = 0; i < 3; i++) {
+ if (OperandIdx[i] < 0)
+ return Node;
+ SDValue &Src = Ops[OperandIdx[i] - 1];
+ SDValue &Neg = Ops[NegIdx[i] - 1];
+ SDValue FakeAbs;
+ SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
+ bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+ int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
+ int ImmIdx = TII->getOperandIdx(Opcode, AMDGPU::OpName::literal);
+ if (HasDst) {
+ SelIdx--;
+ ImmIdx--;
+ }
+ SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
+ SDValue &Imm = Ops[ImmIdx];
+ if (FoldOperand(Node, i, Src, Neg, Abs, Sel, Imm, DAG))
+ return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
+ }
+ }
+
+ return Node;
}
diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h
index 2c09acb9af30..c10257eeadaa 100644
--- a/lib/Target/R600/R600ISelLowering.h
+++ b/lib/Target/R600/R600ISelLowering.h
@@ -36,37 +36,37 @@ public:
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const;
private:
- const R600InstrInfo * TII;
-
+ unsigned Gen;
/// Each OpenCL kernel has nine implicit parameters that are stored in the
/// first nine dwords of a Vertex Buffer. These implicit parameters are
/// lowered to load instructions which retreive the values from the Vertex
/// Buffer.
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
- DebugLoc DL, unsigned DwordOffset) const;
+ SDLoc DL, unsigned DwordOffset) const;
void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
MachineRegisterInfo & MRI, unsigned dword_offset) const;
+ SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG) const;
/// \brief Lower ROTL opcode to BITALIGN
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
SDValue stackPtrToRegIndex(SDValue Ptr, unsigned StackWidth,
SelectionDAG &DAG) const;
void getStackAddress(unsigned StackWidth, unsigned ElemIdx,
unsigned &Channel, unsigned &PtrIncr) const;
bool isZero(SDValue Op) const;
+ virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
};
} // End namespace llvm;
diff --git a/lib/Target/R600/R600InstrFormats.td b/lib/Target/R600/R600InstrFormats.td
new file mode 100644
index 000000000000..9428babcbefd
--- /dev/null
+++ b/lib/Target/R600/R600InstrFormats.td
@@ -0,0 +1,492 @@
+//===-- R600InstrFormats.td - R600 Instruction Encodings ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600 Instruction format definitions.
+//
+//===----------------------------------------------------------------------===//
+
+class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
+ InstrItinClass itin>
+ : AMDGPUInst <outs, ins, asm, pattern> {
+
+ field bits<64> Inst;
+ bit Trig = 0;
+ bit Op3 = 0;
+ bit isVector = 0;
+ bits<2> FlagOperandIdx = 0;
+ bit Op1 = 0;
+ bit Op2 = 0;
+ bit LDS_1A = 0;
+ bit LDS_1A1D = 0;
+ bit HasNativeOperands = 0;
+ bit VTXInst = 0;
+ bit TEXInst = 0;
+ bit ALUInst = 0;
+ bit IsExport = 0;
+ bit LDS_1A2D = 0;
+
+ let Namespace = "AMDGPU";
+ let OutOperandList = outs;
+ let InOperandList = ins;
+ let AsmString = asm;
+ let Pattern = pattern;
+ let Itinerary = itin;
+
+ let TSFlags{4} = Trig;
+ let TSFlags{5} = Op3;
+
+ // Vector instructions are instructions that must fill all slots in an
+ // instruction group
+ let TSFlags{6} = isVector;
+ let TSFlags{8-7} = FlagOperandIdx;
+ let TSFlags{9} = HasNativeOperands;
+ let TSFlags{10} = Op1;
+ let TSFlags{11} = Op2;
+ let TSFlags{12} = VTXInst;
+ let TSFlags{13} = TEXInst;
+ let TSFlags{14} = ALUInst;
+ let TSFlags{15} = LDS_1A;
+ let TSFlags{16} = LDS_1A1D;
+ let TSFlags{17} = IsExport;
+ let TSFlags{18} = LDS_1A2D;
+}
+
+//===----------------------------------------------------------------------===//
+// ALU instructions
+//===----------------------------------------------------------------------===//
+
+class R600_ALU_LDS_Word0 {
+ field bits<32> Word0;
+
+ bits<11> src0;
+ bits<1> src0_rel;
+ bits<11> src1;
+ bits<1> src1_rel;
+ bits<3> index_mode = 0;
+ bits<2> pred_sel;
+ bits<1> last;
+
+ bits<9> src0_sel = src0{8-0};
+ bits<2> src0_chan = src0{10-9};
+ bits<9> src1_sel = src1{8-0};
+ bits<2> src1_chan = src1{10-9};
+
+ let Word0{8-0} = src0_sel;
+ let Word0{9} = src0_rel;
+ let Word0{11-10} = src0_chan;
+ let Word0{21-13} = src1_sel;
+ let Word0{22} = src1_rel;
+ let Word0{24-23} = src1_chan;
+ let Word0{28-26} = index_mode;
+ let Word0{30-29} = pred_sel;
+ let Word0{31} = last;
+}
+
+class R600ALU_Word0 : R600_ALU_LDS_Word0 {
+
+ bits<1> src0_neg;
+ bits<1> src1_neg;
+
+ let Word0{12} = src0_neg;
+ let Word0{25} = src1_neg;
+}
+
+class R600ALU_Word1 {
+ field bits<32> Word1;
+
+ bits<11> dst;
+ bits<3> bank_swizzle;
+ bits<1> dst_rel;
+ bits<1> clamp;
+
+ bits<7> dst_sel = dst{6-0};
+ bits<2> dst_chan = dst{10-9};
+
+ let Word1{20-18} = bank_swizzle;
+ let Word1{27-21} = dst_sel;
+ let Word1{28} = dst_rel;
+ let Word1{30-29} = dst_chan;
+ let Word1{31} = clamp;
+}
+
+class R600ALU_Word1_OP2 <bits<11> alu_inst> : R600ALU_Word1{
+
+ bits<1> src0_abs;
+ bits<1> src1_abs;
+ bits<1> update_exec_mask;
+ bits<1> update_pred;
+ bits<1> write;
+ bits<2> omod;
+
+ let Word1{0} = src0_abs;
+ let Word1{1} = src1_abs;
+ let Word1{2} = update_exec_mask;
+ let Word1{3} = update_pred;
+ let Word1{4} = write;
+ let Word1{6-5} = omod;
+ let Word1{17-7} = alu_inst;
+}
+
+class R600ALU_Word1_OP3 <bits<5> alu_inst> : R600ALU_Word1{
+
+ bits<11> src2;
+ bits<1> src2_rel;
+ bits<1> src2_neg;
+
+ bits<9> src2_sel = src2{8-0};
+ bits<2> src2_chan = src2{10-9};
+
+ let Word1{8-0} = src2_sel;
+ let Word1{9} = src2_rel;
+ let Word1{11-10} = src2_chan;
+ let Word1{12} = src2_neg;
+ let Word1{17-13} = alu_inst;
+}
+
+class R600LDS_Word1 {
+ field bits<32> Word1;
+
+ bits<11> src2;
+ bits<9> src2_sel = src2{8-0};
+ bits<2> src2_chan = src2{10-9};
+ bits<1> src2_rel;
+ // offset specifies the stride offset to the second set of data to be read
+ // from. This is a dword offset.
+ bits<5> alu_inst = 17; // OP3_INST_LDS_IDX_OP
+ bits<3> bank_swizzle;
+ bits<6> lds_op;
+ bits<2> dst_chan = 0;
+
+ let Word1{8-0} = src2_sel;
+ let Word1{9} = src2_rel;
+ let Word1{11-10} = src2_chan;
+ let Word1{17-13} = alu_inst;
+ let Word1{20-18} = bank_swizzle;
+ let Word1{26-21} = lds_op;
+ let Word1{30-29} = dst_chan;
+}
+
+
+/*
+XXX: R600 subtarget uses a slightly different encoding than the other
+subtargets. We currently handle this in R600MCCodeEmitter, but we may
+want to use these instruction classes in the future.
+
+class R600ALU_Word1_OP2_r600 : R600ALU_Word1_OP2 {
+
+ bits<1> fog_merge;
+ bits<10> alu_inst;
+
+ let Inst{37} = fog_merge;
+ let Inst{39-38} = omod;
+ let Inst{49-40} = alu_inst;
+}
+
+class R600ALU_Word1_OP2_r700 : R600ALU_Word1_OP2 {
+
+ bits<11> alu_inst;
+
+ let Inst{38-37} = omod;
+ let Inst{49-39} = alu_inst;
+}
+*/
+
+//===----------------------------------------------------------------------===//
+// Vertex Fetch instructions
+//===----------------------------------------------------------------------===//
+
+class VTX_WORD0 {
+ field bits<32> Word0;
+ bits<7> src_gpr;
+ bits<5> VC_INST;
+ bits<2> FETCH_TYPE;
+ bits<1> FETCH_WHOLE_QUAD;
+ bits<8> BUFFER_ID;
+ bits<1> SRC_REL;
+ bits<2> SRC_SEL_X;
+
+ let Word0{4-0} = VC_INST;
+ let Word0{6-5} = FETCH_TYPE;
+ let Word0{7} = FETCH_WHOLE_QUAD;
+ let Word0{15-8} = BUFFER_ID;
+ let Word0{22-16} = src_gpr;
+ let Word0{23} = SRC_REL;
+ let Word0{25-24} = SRC_SEL_X;
+}
+
+class VTX_WORD0_eg : VTX_WORD0 {
+
+ bits<6> MEGA_FETCH_COUNT;
+
+ let Word0{31-26} = MEGA_FETCH_COUNT;
+}
+
+class VTX_WORD0_cm : VTX_WORD0 {
+
+ bits<2> SRC_SEL_Y;
+ bits<2> STRUCTURED_READ;
+ bits<1> LDS_REQ;
+ bits<1> COALESCED_READ;
+
+ let Word0{27-26} = SRC_SEL_Y;
+ let Word0{29-28} = STRUCTURED_READ;
+ let Word0{30} = LDS_REQ;
+ let Word0{31} = COALESCED_READ;
+}
+
+class VTX_WORD1_GPR {
+ field bits<32> Word1;
+ bits<7> dst_gpr;
+ bits<1> DST_REL;
+ bits<3> DST_SEL_X;
+ bits<3> DST_SEL_Y;
+ bits<3> DST_SEL_Z;
+ bits<3> DST_SEL_W;
+ bits<1> USE_CONST_FIELDS;
+ bits<6> DATA_FORMAT;
+ bits<2> NUM_FORMAT_ALL;
+ bits<1> FORMAT_COMP_ALL;
+ bits<1> SRF_MODE_ALL;
+
+ let Word1{6-0} = dst_gpr;
+ let Word1{7} = DST_REL;
+ let Word1{8} = 0; // Reserved
+ let Word1{11-9} = DST_SEL_X;
+ let Word1{14-12} = DST_SEL_Y;
+ let Word1{17-15} = DST_SEL_Z;
+ let Word1{20-18} = DST_SEL_W;
+ let Word1{21} = USE_CONST_FIELDS;
+ let Word1{27-22} = DATA_FORMAT;
+ let Word1{29-28} = NUM_FORMAT_ALL;
+ let Word1{30} = FORMAT_COMP_ALL;
+ let Word1{31} = SRF_MODE_ALL;
+}
+
+//===----------------------------------------------------------------------===//
+// Texture fetch instructions
+//===----------------------------------------------------------------------===//
+
+class TEX_WORD0 {
+ field bits<32> Word0;
+
+ bits<5> TEX_INST;
+ bits<2> INST_MOD;
+ bits<1> FETCH_WHOLE_QUAD;
+ bits<8> RESOURCE_ID;
+ bits<7> SRC_GPR;
+ bits<1> SRC_REL;
+ bits<1> ALT_CONST;
+ bits<2> RESOURCE_INDEX_MODE;
+ bits<2> SAMPLER_INDEX_MODE;
+
+ let Word0{4-0} = TEX_INST;
+ let Word0{6-5} = INST_MOD;
+ let Word0{7} = FETCH_WHOLE_QUAD;
+ let Word0{15-8} = RESOURCE_ID;
+ let Word0{22-16} = SRC_GPR;
+ let Word0{23} = SRC_REL;
+ let Word0{24} = ALT_CONST;
+ let Word0{26-25} = RESOURCE_INDEX_MODE;
+ let Word0{28-27} = SAMPLER_INDEX_MODE;
+}
+
+class TEX_WORD1 {
+ field bits<32> Word1;
+
+ bits<7> DST_GPR;
+ bits<1> DST_REL;
+ bits<3> DST_SEL_X;
+ bits<3> DST_SEL_Y;
+ bits<3> DST_SEL_Z;
+ bits<3> DST_SEL_W;
+ bits<7> LOD_BIAS;
+ bits<1> COORD_TYPE_X;
+ bits<1> COORD_TYPE_Y;
+ bits<1> COORD_TYPE_Z;
+ bits<1> COORD_TYPE_W;
+
+ let Word1{6-0} = DST_GPR;
+ let Word1{7} = DST_REL;
+ let Word1{11-9} = DST_SEL_X;
+ let Word1{14-12} = DST_SEL_Y;
+ let Word1{17-15} = DST_SEL_Z;
+ let Word1{20-18} = DST_SEL_W;
+ let Word1{27-21} = LOD_BIAS;
+ let Word1{28} = COORD_TYPE_X;
+ let Word1{29} = COORD_TYPE_Y;
+ let Word1{30} = COORD_TYPE_Z;
+ let Word1{31} = COORD_TYPE_W;
+}
+
+class TEX_WORD2 {
+ field bits<32> Word2;
+
+ bits<5> OFFSET_X;
+ bits<5> OFFSET_Y;
+ bits<5> OFFSET_Z;
+ bits<5> SAMPLER_ID;
+ bits<3> SRC_SEL_X;
+ bits<3> SRC_SEL_Y;
+ bits<3> SRC_SEL_Z;
+ bits<3> SRC_SEL_W;
+
+ let Word2{4-0} = OFFSET_X;
+ let Word2{9-5} = OFFSET_Y;
+ let Word2{14-10} = OFFSET_Z;
+ let Word2{19-15} = SAMPLER_ID;
+ let Word2{22-20} = SRC_SEL_X;
+ let Word2{25-23} = SRC_SEL_Y;
+ let Word2{28-26} = SRC_SEL_Z;
+ let Word2{31-29} = SRC_SEL_W;
+}
+
+//===----------------------------------------------------------------------===//
+// Control Flow Instructions
+//===----------------------------------------------------------------------===//
+
+class CF_WORD1_R600 {
+ field bits<32> Word1;
+
+ bits<3> POP_COUNT;
+ bits<5> CF_CONST;
+ bits<2> COND;
+ bits<3> COUNT;
+ bits<6> CALL_COUNT;
+ bits<1> COUNT_3;
+ bits<1> END_OF_PROGRAM;
+ bits<1> VALID_PIXEL_MODE;
+ bits<7> CF_INST;
+ bits<1> WHOLE_QUAD_MODE;
+ bits<1> BARRIER;
+
+ let Word1{2-0} = POP_COUNT;
+ let Word1{7-3} = CF_CONST;
+ let Word1{9-8} = COND;
+ let Word1{12-10} = COUNT;
+ let Word1{18-13} = CALL_COUNT;
+ let Word1{19} = COUNT_3;
+ let Word1{21} = END_OF_PROGRAM;
+ let Word1{22} = VALID_PIXEL_MODE;
+ let Word1{29-23} = CF_INST;
+ let Word1{30} = WHOLE_QUAD_MODE;
+ let Word1{31} = BARRIER;
+}
+
+class CF_WORD0_EG {
+ field bits<32> Word0;
+
+ bits<24> ADDR;
+ bits<3> JUMPTABLE_SEL;
+
+ let Word0{23-0} = ADDR;
+ let Word0{26-24} = JUMPTABLE_SEL;
+}
+
+class CF_WORD1_EG {
+ field bits<32> Word1;
+
+ bits<3> POP_COUNT;
+ bits<5> CF_CONST;
+ bits<2> COND;
+ bits<6> COUNT;
+ bits<1> VALID_PIXEL_MODE;
+ bits<1> END_OF_PROGRAM;
+ bits<8> CF_INST;
+ bits<1> BARRIER;
+
+ let Word1{2-0} = POP_COUNT;
+ let Word1{7-3} = CF_CONST;
+ let Word1{9-8} = COND;
+ let Word1{15-10} = COUNT;
+ let Word1{20} = VALID_PIXEL_MODE;
+ let Word1{21} = END_OF_PROGRAM;
+ let Word1{29-22} = CF_INST;
+ let Word1{31} = BARRIER;
+}
+
+class CF_ALU_WORD0 {
+ field bits<32> Word0;
+
+ bits<22> ADDR;
+ bits<4> KCACHE_BANK0;
+ bits<4> KCACHE_BANK1;
+ bits<2> KCACHE_MODE0;
+
+ let Word0{21-0} = ADDR;
+ let Word0{25-22} = KCACHE_BANK0;
+ let Word0{29-26} = KCACHE_BANK1;
+ let Word0{31-30} = KCACHE_MODE0;
+}
+
+class CF_ALU_WORD1 {
+ field bits<32> Word1;
+
+ bits<2> KCACHE_MODE1;
+ bits<8> KCACHE_ADDR0;
+ bits<8> KCACHE_ADDR1;
+ bits<7> COUNT;
+ bits<1> ALT_CONST;
+ bits<4> CF_INST;
+ bits<1> WHOLE_QUAD_MODE;
+ bits<1> BARRIER;
+
+ let Word1{1-0} = KCACHE_MODE1;
+ let Word1{9-2} = KCACHE_ADDR0;
+ let Word1{17-10} = KCACHE_ADDR1;
+ let Word1{24-18} = COUNT;
+ let Word1{25} = ALT_CONST;
+ let Word1{29-26} = CF_INST;
+ let Word1{30} = WHOLE_QUAD_MODE;
+ let Word1{31} = BARRIER;
+}
+
+class CF_ALLOC_EXPORT_WORD0_RAT {
+ field bits<32> Word0;
+
+ bits<4> rat_id;
+ bits<6> rat_inst;
+ bits<2> rim;
+ bits<2> type;
+ bits<7> rw_gpr;
+ bits<1> rw_rel;
+ bits<7> index_gpr;
+ bits<2> elem_size;
+
+ let Word0{3-0} = rat_id;
+ let Word0{9-4} = rat_inst;
+ let Word0{10} = 0; // Reserved
+ let Word0{12-11} = rim;
+ let Word0{14-13} = type;
+ let Word0{21-15} = rw_gpr;
+ let Word0{22} = rw_rel;
+ let Word0{29-23} = index_gpr;
+ let Word0{31-30} = elem_size;
+}
+
+class CF_ALLOC_EXPORT_WORD1_BUF {
+ field bits<32> Word1;
+
+ bits<12> array_size;
+ bits<4> comp_mask;
+ bits<4> burst_count;
+ bits<1> vpm;
+ bits<1> eop;
+ bits<8> cf_inst;
+ bits<1> mark;
+ bits<1> barrier;
+
+ let Word1{11-0} = array_size;
+ let Word1{15-12} = comp_mask;
+ let Word1{19-16} = burst_count;
+ let Word1{20} = vpm;
+ let Word1{21} = eop;
+ let Word1{29-22} = cf_inst;
+ let Word1{30} = mark;
+ let Word1{31} = barrier;
+}
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index 37150c430d2c..c0827fc1ca40 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -19,18 +19,18 @@
#include "R600Defines.h"
#include "R600MachineFunctionInfo.h"
#include "R600RegisterInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "AMDGPUGenDFAPacketizer.inc"
using namespace llvm;
R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
: AMDGPUInstrInfo(tm),
- RI(tm, *this),
+ RI(tm),
ST(tm.getSubtarget<AMDGPUSubtarget>())
{ }
@@ -51,9 +51,17 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
- if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
- && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
- for (unsigned I = 0; I < 4; I++) {
+ unsigned VectorComponents = 0;
+ if (AMDGPU::R600_Reg128RegClass.contains(DestReg) &&
+ AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
+ VectorComponents = 4;
+ } else if(AMDGPU::R600_Reg64RegClass.contains(DestReg) &&
+ AMDGPU::R600_Reg64RegClass.contains(SrcReg)) {
+ VectorComponents = 2;
+ }
+
+ if (VectorComponents > 0) {
+ for (unsigned I = 0; I < VectorComponents; I++) {
unsigned SubRegIndex = RI.getSubRegFromChannel(I);
buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
RI.getSubReg(DestReg, SubRegIndex),
@@ -62,28 +70,23 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
RegState::Define | RegState::Implicit);
}
} else {
-
- // We can't copy vec4 registers
- assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
- && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
-
MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
DestReg, SrcReg);
- NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
+ NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
.setIsKill(KillSrc);
}
}
-MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
- unsigned DstReg, int64_t Imm) const {
- MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
- MachineInstrBuilder MIB(*MF, MI);
- MIB.addReg(DstReg, RegState::Define);
- MIB.addReg(AMDGPU::ALU_LITERAL_X);
- MIB.addImm(Imm);
- MIB.addReg(0); // PREDICATE_BIT
-
- return MI;
+/// \returns true if \p MBBI can be moved into a new basic.
+bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {
+ for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
+ E = MBBI->operands_end(); I != E; ++I) {
+ if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
+ I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
+ return false;
+ }
+ return true;
}
unsigned R600InstrInfo::getIEQOpcode() const {
@@ -114,12 +117,7 @@ bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
}
bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
- switch(Opcode) {
- default: return false;
- case AMDGPU::DOT4_r600_pseudo:
- case AMDGPU::DOT4_eg_pseudo:
- return true;
- }
+ return false;
}
bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
@@ -136,19 +134,73 @@ bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
unsigned TargetFlags = get(Opcode).TSFlags;
+ return (TargetFlags & R600_InstFlag::ALU_INST);
+}
+
+bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
+ unsigned TargetFlags = get(Opcode).TSFlags;
+
return ((TargetFlags & R600_InstFlag::OP1) |
(TargetFlags & R600_InstFlag::OP2) |
(TargetFlags & R600_InstFlag::OP3));
}
+bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
+ unsigned TargetFlags = get(Opcode).TSFlags;
+
+ return ((TargetFlags & R600_InstFlag::LDS_1A) |
+ (TargetFlags & R600_InstFlag::LDS_1A1D) |
+ (TargetFlags & R600_InstFlag::LDS_1A2D));
+}
+
+bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
+ return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
+}
+
+bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
+ return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
+}
+
+bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
+ if (isALUInstr(MI->getOpcode()))
+ return true;
+ if (isVector(*MI) || isCubeOp(MI->getOpcode()))
+ return true;
+ switch (MI->getOpcode()) {
+ case AMDGPU::PRED_X:
+ case AMDGPU::INTERP_PAIR_XY:
+ case AMDGPU::INTERP_PAIR_ZW:
+ case AMDGPU::INTERP_VEC_LOAD:
+ case AMDGPU::COPY:
+ case AMDGPU::DOT_4:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
- return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
+ if (ST.hasCaymanISA())
+ return false;
+ return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
}
bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
return isTransOnly(MI->getOpcode());
}
+bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
+ return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
+}
+
+bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
+ return isVectorOnly(MI->getOpcode());
+}
+
+bool R600InstrInfo::isExport(unsigned Opcode) const {
+ return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
+}
+
bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
return ST.hasVertexCache() && IS_VTX(get(Opcode));
}
@@ -168,6 +220,380 @@ bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
usesTextureCache(MI->getOpcode());
}
+bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
+ switch (Opcode) {
+ case AMDGPU::KILLGT:
+ case AMDGPU::GROUP_BARRIER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
+ return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
+}
+
+bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
+ return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
+}
+
+bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
+ if (!isALUInstr(MI->getOpcode())) {
+ return false;
+ }
+ for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
+ E = MI->operands_end(); I != E; ++I) {
+ if (!I->isReg() || !I->isUse() ||
+ TargetRegisterInfo::isVirtualRegister(I->getReg()))
+ continue;
+
+ if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
+ return true;
+ }
+ return false;
+}
+
+int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
+ static const unsigned OpTable[] = {
+ AMDGPU::OpName::src0,
+ AMDGPU::OpName::src1,
+ AMDGPU::OpName::src2
+ };
+
+ assert (SrcNum < 3);
+ return getOperandIdx(Opcode, OpTable[SrcNum]);
+}
+
+#define SRC_SEL_ROWS 11
+int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
+ static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = {
+ {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
+ {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
+ {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
+ {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
+ {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
+ {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
+ {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
+ {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
+ {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
+ {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
+ {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
+ };
+
+ for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) {
+ if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) {
+ return getOperandIdx(Opcode, SrcSelTable[i][1]);
+ }
+ }
+ return -1;
+}
+#undef SRC_SEL_ROWS
+
+SmallVector<std::pair<MachineOperand *, int64_t>, 3>
+R600InstrInfo::getSrcs(MachineInstr *MI) const {
+ SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
+
+ if (MI->getOpcode() == AMDGPU::DOT_4) {
+ static const unsigned OpTable[8][2] = {
+ {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
+ {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
+ {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
+ {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
+ {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
+ {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
+ {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
+ {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
+ };
+
+ for (unsigned j = 0; j < 8; j++) {
+ MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
+ OpTable[j][0]));
+ unsigned Reg = MO.getReg();
+ if (Reg == AMDGPU::ALU_CONST) {
+ unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
+ OpTable[j][1])).getImm();
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
+ continue;
+ }
+
+ }
+ return Result;
+ }
+
+ static const unsigned OpTable[3][2] = {
+ {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
+ {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
+ {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
+ };
+
+ for (unsigned j = 0; j < 3; j++) {
+ int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
+ if (SrcIdx < 0)
+ break;
+ MachineOperand &MO = MI->getOperand(SrcIdx);
+ unsigned Reg = MI->getOperand(SrcIdx).getReg();
+ if (Reg == AMDGPU::ALU_CONST) {
+ unsigned Sel = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
+ continue;
+ }
+ if (Reg == AMDGPU::ALU_LITERAL_X) {
+ unsigned Imm = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
+ continue;
+ }
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
+ }
+ return Result;
+}
+
+std::vector<std::pair<int, unsigned> >
+R600InstrInfo::ExtractSrcs(MachineInstr *MI,
+ const DenseMap<unsigned, unsigned> &PV,
+ unsigned &ConstCount) const {
+ ConstCount = 0;
+ const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
+ const std::pair<int, unsigned> DummyPair(-1, 0);
+ std::vector<std::pair<int, unsigned> > Result;
+ unsigned i = 0;
+ for (unsigned n = Srcs.size(); i < n; ++i) {
+ unsigned Reg = Srcs[i].first->getReg();
+ unsigned Index = RI.getEncodingValue(Reg) & 0xff;
+ if (Reg == AMDGPU::OQAP) {
+ Result.push_back(std::pair<int, unsigned>(Index, 0));
+ }
+ if (PV.find(Reg) != PV.end()) {
+ // 255 is used to tells its a PS/PV reg
+ Result.push_back(std::pair<int, unsigned>(255, 0));
+ continue;
+ }
+ if (Index > 127) {
+ ConstCount++;
+ Result.push_back(DummyPair);
+ continue;
+ }
+ unsigned Chan = RI.getHWRegChan(Reg);
+ Result.push_back(std::pair<int, unsigned>(Index, Chan));
+ }
+ for (; i < 3; ++i)
+ Result.push_back(DummyPair);
+ return Result;
+}
+
+static std::vector<std::pair<int, unsigned> >
+Swizzle(std::vector<std::pair<int, unsigned> > Src,
+ R600InstrInfo::BankSwizzle Swz) {
+ if (Src[0] == Src[1])
+ Src[1].first = -1;
+ switch (Swz) {
+ case R600InstrInfo::ALU_VEC_012_SCL_210:
+ break;
+ case R600InstrInfo::ALU_VEC_021_SCL_122:
+ std::swap(Src[1], Src[2]);
+ break;
+ case R600InstrInfo::ALU_VEC_102_SCL_221:
+ std::swap(Src[0], Src[1]);
+ break;
+ case R600InstrInfo::ALU_VEC_120_SCL_212:
+ std::swap(Src[0], Src[1]);
+ std::swap(Src[0], Src[2]);
+ break;
+ case R600InstrInfo::ALU_VEC_201:
+ std::swap(Src[0], Src[2]);
+ std::swap(Src[0], Src[1]);
+ break;
+ case R600InstrInfo::ALU_VEC_210:
+ std::swap(Src[0], Src[2]);
+ break;
+ }
+ return Src;
+}
+
+static unsigned
+getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
+ switch (Swz) {
+ case R600InstrInfo::ALU_VEC_012_SCL_210: {
+ unsigned Cycles[3] = { 2, 1, 0};
+ return Cycles[Op];
+ }
+ case R600InstrInfo::ALU_VEC_021_SCL_122: {
+ unsigned Cycles[3] = { 1, 2, 2};
+ return Cycles[Op];
+ }
+ case R600InstrInfo::ALU_VEC_120_SCL_212: {
+ unsigned Cycles[3] = { 2, 1, 2};
+ return Cycles[Op];
+ }
+ case R600InstrInfo::ALU_VEC_102_SCL_221: {
+ unsigned Cycles[3] = { 2, 2, 1};
+ return Cycles[Op];
+ }
+ default:
+ llvm_unreachable("Wrong Swizzle for Trans Slot");
+ return 0;
+ }
+}
+
+/// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
+/// in the same Instruction Group while meeting read port limitations given a
+/// Swz swizzle sequence.
+unsigned R600InstrInfo::isLegalUpTo(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ const std::vector<R600InstrInfo::BankSwizzle> &Swz,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const {
+ int Vector[4][3];
+ memset(Vector, -1, sizeof(Vector));
+ for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
+ const std::vector<std::pair<int, unsigned> > &Srcs =
+ Swizzle(IGSrcs[i], Swz[i]);
+ for (unsigned j = 0; j < 3; j++) {
+ const std::pair<int, unsigned> &Src = Srcs[j];
+ if (Src.first < 0 || Src.first == 255)
+ continue;
+ if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
+ if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
+ Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
+ // The value from output queue A (denoted by register OQAP) can
+ // only be fetched during the first cycle.
+ return false;
+ }
+ // OQAP does not count towards the normal read port restrictions
+ continue;
+ }
+ if (Vector[Src.second][j] < 0)
+ Vector[Src.second][j] = Src.first;
+ if (Vector[Src.second][j] != Src.first)
+ return i;
+ }
+ }
+ // Now check Trans Alu
+ for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
+ const std::pair<int, unsigned> &Src = TransSrcs[i];
+ unsigned Cycle = getTransSwizzle(TransSwz, i);
+ if (Src.first < 0)
+ continue;
+ if (Src.first == 255)
+ continue;
+ if (Vector[Src.second][Cycle] < 0)
+ Vector[Src.second][Cycle] = Src.first;
+ if (Vector[Src.second][Cycle] != Src.first)
+ return IGSrcs.size() - 1;
+ }
+ return IGSrcs.size();
+}
+
+/// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
+/// (in lexicographic term) swizzle sequence assuming that all swizzles after
+/// Idx can be skipped
+static bool
+NextPossibleSolution(
+ std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
+ unsigned Idx) {
+ assert(Idx < SwzCandidate.size());
+ int ResetIdx = Idx;
+ while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
+ ResetIdx --;
+ for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
+ SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
+ }
+ if (ResetIdx == -1)
+ return false;
+ int NextSwizzle = SwzCandidate[ResetIdx] + 1;
+ SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
+ return true;
+}
+
+/// Enumerate all possible Swizzle sequence to find one that can meet all
+/// read port requirements.
+bool R600InstrInfo::FindSwizzleForVectorSlot(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const {
+ unsigned ValidUpTo = 0;
+ do {
+ ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
+ if (ValidUpTo == IGSrcs.size())
+ return true;
+ } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
+ return false;
+}
+
+/// Instructions in Trans slot can't read gpr at cycle 0 if they also read
+/// a const, and can't read a gpr at cycle 1 if they read 2 const.
+static bool
+isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
+ const std::vector<std::pair<int, unsigned> > &TransOps,
+ unsigned ConstCount) {
+ // TransALU can't read 3 constants
+ if (ConstCount > 2)
+ return false;
+ for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
+ const std::pair<int, unsigned> &Src = TransOps[i];
+ unsigned Cycle = getTransSwizzle(TransSwz, i);
+ if (Src.first < 0)
+ continue;
+ if (ConstCount > 0 && Cycle == 0)
+ return false;
+ if (ConstCount > 1 && Cycle == 1)
+ return false;
+ }
+ return true;
+}
+
+bool
+R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
+ const DenseMap<unsigned, unsigned> &PV,
+ std::vector<BankSwizzle> &ValidSwizzle,
+ bool isLastAluTrans)
+ const {
+ //Todo : support shared src0 - src1 operand
+
+ std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
+ ValidSwizzle.clear();
+ unsigned ConstCount;
+ BankSwizzle TransBS = ALU_VEC_012_SCL_210;
+ for (unsigned i = 0, e = IG.size(); i < e; ++i) {
+ IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
+ unsigned Op = getOperandIdx(IG[i]->getOpcode(),
+ AMDGPU::OpName::bank_swizzle);
+ ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
+ IG[i]->getOperand(Op).getImm());
+ }
+ std::vector<std::pair<int, unsigned> > TransOps;
+ if (!isLastAluTrans)
+ return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
+
+ TransOps = IGSrcs.back();
+ IGSrcs.pop_back();
+ ValidSwizzle.pop_back();
+
+ static const R600InstrInfo::BankSwizzle TransSwz[] = {
+ ALU_VEC_012_SCL_210,
+ ALU_VEC_021_SCL_122,
+ ALU_VEC_120_SCL_212,
+ ALU_VEC_102_SCL_221
+ };
+ for (unsigned i = 0; i < 4; i++) {
+ TransBS = TransSwz[i];
+ if (!isConstCompatible(TransBS, TransOps, ConstCount))
+ continue;
+ bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
+ TransBS);
+ if (Result) {
+ ValidSwizzle.push_back(TransBS);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
bool
R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
const {
@@ -194,37 +620,31 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
}
bool
-R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const {
+R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
+ const {
std::vector<unsigned> Consts;
+ SmallSet<int64_t, 4> Literals;
for (unsigned i = 0, n = MIs.size(); i < n; i++) {
- const MachineInstr *MI = MIs[i];
-
- const R600Operands::Ops OpTable[3][2] = {
- {R600Operands::SRC0, R600Operands::SRC0_SEL},
- {R600Operands::SRC1, R600Operands::SRC1_SEL},
- {R600Operands::SRC2, R600Operands::SRC2_SEL},
- };
-
+ MachineInstr *MI = MIs[i];
if (!isALUInstr(MI->getOpcode()))
continue;
- for (unsigned j = 0; j < 3; j++) {
- int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
- if (SrcIdx < 0)
- break;
- unsigned Reg = MI->getOperand(SrcIdx).getReg();
- if (Reg == AMDGPU::ALU_CONST) {
- unsigned Const = MI->getOperand(
- getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
- Consts.push_back(Const);
- continue;
- }
- if (AMDGPU::R600_KC0RegClass.contains(Reg) ||
- AMDGPU::R600_KC1RegClass.contains(Reg)) {
- unsigned Index = RI.getEncodingValue(Reg) & 0xff;
- unsigned Chan = RI.getHWRegChan(Reg);
+ const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
+ getSrcs(MI);
+
+ for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
+ std::pair<MachineOperand *, unsigned> Src = Srcs[j];
+ if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
+ Literals.insert(Src.second);
+ if (Literals.size() > 4)
+ return false;
+ if (Src.first->getReg() == AMDGPU::ALU_CONST)
+ Consts.push_back(Src.second);
+ if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
+ AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
+ unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
+ unsigned Chan = RI.getHWRegChan(Src.first->getReg());
Consts.push_back((Index << 2) | Chan);
- continue;
}
}
}
@@ -265,6 +685,11 @@ bool isJump(unsigned Opcode) {
return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
}
+static bool isBranch(unsigned Opcode) {
+ return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
+ Opcode == AMDGPU::BRANCH_COND_f32;
+}
+
bool
R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
@@ -283,6 +708,10 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return false;
--I;
}
+ // AMDGPU::BRANCH* instructions are only available after isel and are not
+ // handled
+ if (isBranch(I->getOpcode()))
+ return true;
if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
return false;
}
@@ -343,6 +772,17 @@ int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
};
}
+static
+MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
+ for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
+ It != E; ++It) {
+ if (It->getOpcode() == AMDGPU::CF_ALU ||
+ It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
+ return llvm::prior(It.base());
+ }
+ return MBB.end();
+}
+
unsigned
R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
@@ -364,6 +804,11 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
.addMBB(TBB)
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ return 1;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
return 1;
}
} else {
@@ -375,6 +820,11 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
.addMBB(TBB)
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ return 2;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
return 2;
}
}
@@ -398,6 +848,11 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
clearFlag(predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ break;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU));
break;
}
case AMDGPU::JUMP:
@@ -418,6 +873,11 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
clearFlag(predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ break;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU));
break;
}
case AMDGPU::JUMP:
@@ -452,6 +912,15 @@ R600InstrInfo::isPredicable(MachineInstr *MI) const {
if (MI->getOpcode() == AMDGPU::KILLGT) {
return false;
+ } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
+ // If the clause start in the middle of MBB then the MBB has more
+ // than a single clause, unable to predicate several clauses.
+ if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
+ return false;
+ // TODO: We don't support KC merging atm
+ if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
+ return false;
+ return true;
} else if (isVector(*MI)) {
return false;
} else {
@@ -547,6 +1016,25 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
int PIdx = MI->findFirstPredOperandIdx();
+ if (MI->getOpcode() == AMDGPU::CF_ALU) {
+ MI->getOperand(8).setImm(0);
+ return true;
+ }
+
+ if (MI->getOpcode() == AMDGPU::DOT_4) {
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_X))
+ .setReg(Pred[2].getReg());
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Y))
+ .setReg(Pred[2].getReg());
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Z))
+ .setReg(Pred[2].getReg());
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_W))
+ .setReg(Pred[2].getReg());
+ MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
+ MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
+ return true;
+ }
+
if (PIdx != -1) {
MachineOperand &PMO = MI->getOperand(PIdx);
PMO.setReg(Pred[2].getReg());
@@ -558,6 +1046,10 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI,
return false;
}
+unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
+ return 2;
+}
+
unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
@@ -566,67 +1058,25 @@ unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
return 2;
}
-int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- int Offset = 0;
-
- if (MFI->getNumObjects() == 0) {
- return -1;
- }
-
- if (MRI.livein_empty()) {
- return 0;
- }
-
- for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
- LE = MRI.livein_end();
- LI != LE; ++LI) {
- Offset = std::max(Offset,
- GET_REG_INDEX(RI.getEncodingValue(LI->first)));
- }
-
- return Offset + 1;
-}
-
-int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
- int Offset = 0;
- const MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // Variable sized objects are not supported
- assert(!MFI->hasVarSizedObjects());
-
- if (MFI->getNumObjects() == 0) {
- return -1;
- }
-
- Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
-
- return getIndirectIndexBegin(MF) + Offset;
-}
-
-std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
+void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
const MachineFunction &MF) const {
const AMDGPUFrameLowering *TFL =
static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
- std::vector<unsigned> Regs;
unsigned StackWidth = TFL->getStackWidth(MF);
int End = getIndirectIndexEnd(MF);
- if (End == -1) {
- return Regs;
- }
+ if (End == -1)
+ return;
for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
- Regs.push_back(SuperReg);
+ Reserved.set(SuperReg);
for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
- Regs.push_back(Reg);
+ Reserved.set(Reg);
}
}
- return Regs;
}
unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
@@ -636,13 +1086,8 @@ unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
return RegIndex;
}
-const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
- unsigned SourceReg) const {
- return &AMDGPU::R600_TReg32RegClass;
-}
-
-const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
- return &AMDGPU::TRegMemRegClass;
+const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
+ return &AMDGPU::R600_TReg32_XRegClass;
}
MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
@@ -652,12 +1097,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X, OffsetReg);
- setImmOperand(MOVA, R600Operands::WRITE, 0);
+ setImmOperand(MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
AddrReg, ValueReg)
- .addReg(AMDGPU::AR_X, RegState::Implicit);
- setImmOperand(Mov, R600Operands::DST_REL, 1);
+ .addReg(AMDGPU::AR_X,
+ RegState::Implicit | RegState::Kill);
+ setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
return Mov;
}
@@ -669,20 +1115,17 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X,
OffsetReg);
- setImmOperand(MOVA, R600Operands::WRITE, 0);
+ setImmOperand(MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
ValueReg,
AddrReg)
- .addReg(AMDGPU::AR_X, RegState::Implicit);
- setImmOperand(Mov, R600Operands::SRC0_REL, 1);
+ .addReg(AMDGPU::AR_X,
+ RegState::Implicit | RegState::Kill);
+ setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
return Mov;
}
-const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
- return &AMDGPU::IndirectRegRegClass;
-}
-
unsigned R600InstrInfo::getMaxAlusPerClause() const {
return 115;
}
@@ -728,52 +1171,118 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
return MIB;
}
+#define OPERAND_CASE(Label) \
+ case Label: { \
+ static const unsigned Ops[] = \
+ { \
+ Label##_X, \
+ Label##_Y, \
+ Label##_Z, \
+ Label##_W \
+ }; \
+ return Ops[Slot]; \
+ }
+
+static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
+ switch (Op) {
+ OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
+ OPERAND_CASE(AMDGPU::OpName::update_pred)
+ OPERAND_CASE(AMDGPU::OpName::write)
+ OPERAND_CASE(AMDGPU::OpName::omod)
+ OPERAND_CASE(AMDGPU::OpName::dst_rel)
+ OPERAND_CASE(AMDGPU::OpName::clamp)
+ OPERAND_CASE(AMDGPU::OpName::src0)
+ OPERAND_CASE(AMDGPU::OpName::src0_neg)
+ OPERAND_CASE(AMDGPU::OpName::src0_rel)
+ OPERAND_CASE(AMDGPU::OpName::src0_abs)
+ OPERAND_CASE(AMDGPU::OpName::src0_sel)
+ OPERAND_CASE(AMDGPU::OpName::src1)
+ OPERAND_CASE(AMDGPU::OpName::src1_neg)
+ OPERAND_CASE(AMDGPU::OpName::src1_rel)
+ OPERAND_CASE(AMDGPU::OpName::src1_abs)
+ OPERAND_CASE(AMDGPU::OpName::src1_sel)
+ OPERAND_CASE(AMDGPU::OpName::pred_sel)
+ default:
+ llvm_unreachable("Wrong Operand");
+ }
+}
+
+#undef OPERAND_CASE
+
+MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
+ MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
+ const {
+ assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
+ unsigned Opcode;
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::R700)
+ Opcode = AMDGPU::DOT4_r600;
+ else
+ Opcode = AMDGPU::DOT4_eg;
+ MachineBasicBlock::iterator I = MI;
+ MachineOperand &Src0 = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
+ MachineOperand &Src1 = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
+ MachineInstr *MIB = buildDefaultInstruction(
+ MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
+ static const unsigned Operands[14] = {
+ AMDGPU::OpName::update_exec_mask,
+ AMDGPU::OpName::update_pred,
+ AMDGPU::OpName::write,
+ AMDGPU::OpName::omod,
+ AMDGPU::OpName::dst_rel,
+ AMDGPU::OpName::clamp,
+ AMDGPU::OpName::src0_neg,
+ AMDGPU::OpName::src0_rel,
+ AMDGPU::OpName::src0_abs,
+ AMDGPU::OpName::src0_sel,
+ AMDGPU::OpName::src1_neg,
+ AMDGPU::OpName::src1_rel,
+ AMDGPU::OpName::src1_abs,
+ AMDGPU::OpName::src1_sel,
+ };
+
+ MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
+ getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
+ MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
+ .setReg(MO.getReg());
+
+ for (unsigned i = 0; i < 14; i++) {
+ MachineOperand &MO = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
+ assert (MO.isImm());
+ setImmOperand(MIB, Operands[i], MO.getImm());
+ }
+ MIB->getOperand(20).setImm(0);
+ return MIB;
+}
+
MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
unsigned DstReg,
uint64_t Imm) const {
MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
AMDGPU::ALU_LITERAL_X);
- setImmOperand(MovImm, R600Operands::IMM, Imm);
+ setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
return MovImm;
}
-int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
- R600Operands::Ops Op) const {
- return getOperandIdx(MI.getOpcode(), Op);
+MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const {
+ return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
}
-int R600InstrInfo::getOperandIdx(unsigned Opcode,
- R600Operands::Ops Op) const {
- unsigned TargetFlags = get(Opcode).TSFlags;
- unsigned OpTableIdx;
-
- if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
- switch (Op) {
- case R600Operands::DST: return 0;
- case R600Operands::SRC0: return 1;
- case R600Operands::SRC1: return 2;
- case R600Operands::SRC2: return 3;
- default:
- assert(!"Unknown operand type for instruction");
- return -1;
- }
- }
-
- if (TargetFlags & R600_InstFlag::OP1) {
- OpTableIdx = 0;
- } else if (TargetFlags & R600_InstFlag::OP2) {
- OpTableIdx = 1;
- } else {
- assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
- "for this instruction");
- OpTableIdx = 2;
- }
+int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
+ return getOperandIdx(MI.getOpcode(), Op);
+}
- return R600Operands::ALUOpTable[OpTableIdx][Op];
+int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
+ return AMDGPU::getNamedOperandIdx(Opcode, Op);
}
-void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
+void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
int64_t Imm) const {
int Idx = getOperandIdx(*MI, Op);
assert(Idx != -1 && "Operand not supported for this instruction.");
@@ -801,20 +1310,20 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
switch (Flag) {
case MO_FLAG_CLAMP:
- FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
+ FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
break;
case MO_FLAG_MASK:
- FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
+ FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
break;
case MO_FLAG_NOT_LAST:
case MO_FLAG_LAST:
- FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
+ FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
break;
case MO_FLAG_NEG:
switch (SrcIdx) {
- case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
- case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
- case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
+ case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
+ case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
+ case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
}
break;
@@ -823,8 +1332,8 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
"instructions.");
(void)IsOP3;
switch (SrcIdx) {
- case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
- case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
+ case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
+ case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
}
break;
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index babe4b8fe518..13d981094ed5 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -16,7 +16,6 @@
#define R600INSTRUCTIONINFO_H_
#include "AMDGPUInstrInfo.h"
-#include "AMDIL.h"
#include "R600Defines.h"
#include "R600RegisterInfo.h"
#include <map>
@@ -36,8 +35,19 @@ namespace llvm {
const AMDGPUSubtarget &ST;
int getBranchInstr(const MachineOperand &op) const;
+ std::vector<std::pair<int, unsigned> >
+ ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV, unsigned &ConstCount) const;
public:
+ enum BankSwizzle {
+ ALU_VEC_012_SCL_210 = 0,
+ ALU_VEC_021_SCL_122,
+ ALU_VEC_120_SCL_212,
+ ALU_VEC_102_SCL_221,
+ ALU_VEC_201,
+ ALU_VEC_210
+ };
+
explicit R600InstrInfo(AMDGPUTargetMachine &tm);
const R600RegisterInfo &getRegisterInfo() const;
@@ -45,6 +55,8 @@ namespace llvm {
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
+ bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const;
bool isTrig(const MachineInstr &MI) const;
bool isPlaceHolderOpcode(unsigned opcode) const;
@@ -53,25 +65,83 @@ namespace llvm {
/// \returns true if this \p Opcode represents an ALU instruction.
bool isALUInstr(unsigned Opcode) const;
+ bool hasInstrModifiers(unsigned Opcode) const;
+ bool isLDSInstr(unsigned Opcode) const;
+ bool isLDSNoRetInstr(unsigned Opcode) const;
+ bool isLDSRetInstr(unsigned Opcode) const;
+
+ /// \returns true if this \p Opcode represents an ALU instruction or an
+ /// instruction that will be lowered in ExpandSpecialInstrs Pass.
+ bool canBeConsideredALU(const MachineInstr *MI) const;
bool isTransOnly(unsigned Opcode) const;
bool isTransOnly(const MachineInstr *MI) const;
+ bool isVectorOnly(unsigned Opcode) const;
+ bool isVectorOnly(const MachineInstr *MI) const;
+ bool isExport(unsigned Opcode) const;
bool usesVertexCache(unsigned Opcode) const;
bool usesVertexCache(const MachineInstr *MI) const;
bool usesTextureCache(unsigned Opcode) const;
bool usesTextureCache(const MachineInstr *MI) const;
+ bool mustBeLastInClause(unsigned Opcode) const;
+ bool usesAddressRegister(MachineInstr *MI) const;
+ bool definesAddressRegister(MachineInstr *MI) const;
+ bool readsLDSSrcReg(const MachineInstr *MI) const;
+
+ /// \returns The operand index for the given source number. Legal values
+ /// for SrcNum are 0, 1, and 2.
+ int getSrcIdx(unsigned Opcode, unsigned SrcNum) const;
+ /// \returns The operand Index for the Sel operand given an index to one
+ /// of the instruction's src operands.
+ int getSelIdx(unsigned Opcode, unsigned SrcIdx) const;
+
+ /// \returns a pair for each src of an ALU instructions.
+ /// The first member of a pair is the register id.
+ /// If register is ALU_CONST, second member is SEL.
+ /// If register is ALU_LITERAL, second member is IMM.
+ /// Otherwise, second member value is undefined.
+ SmallVector<std::pair<MachineOperand *, int64_t>, 3>
+ getSrcs(MachineInstr *MI) const;
+
+ unsigned isLegalUpTo(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ const std::vector<R600InstrInfo::BankSwizzle> &Swz,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const;
+
+ bool FindSwizzleForVectorSlot(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const;
+
+ /// Given the order VEC_012 < VEC_021 < VEC_120 < VEC_102 < VEC_201 < VEC_210
+ /// returns true and the first (in lexical order) BankSwizzle affectation
+ /// starting from the one already provided in the Instruction Group MIs that
+ /// fits Read Port limitations in BS if available. Otherwise returns false
+ /// and undefined content in BS.
+ /// isLastAluTrans should be set if the last Alu of MIs will be executed on
+ /// Trans ALU. In this case, ValidTSwizzle returns the BankSwizzle value to
+ /// apply to the last instruction.
+ /// PV holds GPR to PV registers in the Instruction Group MIs.
+ bool fitsReadPortLimitations(const std::vector<MachineInstr *> &MIs,
+ const DenseMap<unsigned, unsigned> &PV,
+ std::vector<BankSwizzle> &BS,
+ bool isLastAluTrans) const;
+
+ /// An instruction group can only access 2 channel pair (either [XY] or [ZW])
+ /// from KCache bank on R700+. This function check if MI set in input meet
+ /// this limitations
+ bool fitsConstReadLimitations(const std::vector<MachineInstr *> &) const;
+ /// Same but using const index set instead of MI set.
bool fitsConstReadLimitations(const std::vector<unsigned>&) const;
- bool canBundle(const std::vector<MachineInstr *> &) const;
/// \breif Vector instructions are instructions that must fill all
/// instruction slots within an instruction group.
bool isVector(const MachineInstr &MI) const;
- virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const;
-
virtual unsigned getIEQOpcode() const;
virtual bool isMov(unsigned Opcode) const;
@@ -118,6 +188,8 @@ namespace llvm {
bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
+ unsigned int getPredicationCost(const MachineInstr *) const;
+
unsigned int getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost = 0) const;
@@ -125,22 +197,14 @@ namespace llvm {
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const { return 1;}
- /// \returns a list of all the registers that may be accesed using indirect
- /// addressing.
- std::vector<unsigned> getIndirectReservedRegs(const MachineFunction &MF) const;
-
- virtual int getIndirectIndexBegin(const MachineFunction &MF) const;
-
- virtual int getIndirectIndexEnd(const MachineFunction &MF) const;
-
+ /// \brief Reserve the registers that may be accesed using indirect addressing.
+ void reserveIndirectRegisters(BitVector &Reserved,
+ const MachineFunction &MF) const;
virtual unsigned calculateIndirectAddress(unsigned RegIndex,
unsigned Channel) const;
- virtual const TargetRegisterClass *getIndirectAddrStoreRegClass(
- unsigned SourceReg) const;
-
- virtual const TargetRegisterClass *getIndirectAddrLoadRegClass() const;
+ virtual const TargetRegisterClass *getIndirectAddrRegClass() const;
virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
@@ -152,8 +216,6 @@ namespace llvm {
unsigned ValueReg, unsigned Address,
unsigned OffsetReg) const;
- virtual const TargetRegisterClass *getSuperIndirectRegClass() const;
-
unsigned getMaxAlusPerClause() const;
///buildDefaultInstruction - This function returns a MachineInstr with
@@ -170,23 +232,32 @@ namespace llvm {
unsigned Src0Reg,
unsigned Src1Reg = 0) const;
+ MachineInstr *buildSlotOfVectorInstruction(MachineBasicBlock &MBB,
+ MachineInstr *MI,
+ unsigned Slot,
+ unsigned DstReg) const;
+
MachineInstr *buildMovImm(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
unsigned DstReg,
uint64_t Imm) const;
+ MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const;
+
/// \brief Get the index of Op in the MachineInstr.
///
/// \returns -1 if the Instruction does not contain the specified \p Op.
- int getOperandIdx(const MachineInstr &MI, R600Operands::Ops Op) const;
+ int getOperandIdx(const MachineInstr &MI, unsigned Op) const;
/// \brief Get the index of \p Op for the given Opcode.
///
/// \returns -1 if the Instruction does not contain the specified \p Op.
- int getOperandIdx(unsigned Opcode, R600Operands::Ops Op) const;
+ int getOperandIdx(unsigned Opcode, unsigned Op) const;
/// \brief Helper function for setting instruction flag values.
- void setImmOperand(MachineInstr *MI, R600Operands::Ops Op, int64_t Imm) const;
+ void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const;
/// \returns true if this instruction has an operand for storing target flags.
bool hasFlagOperand(const MachineInstr &MI) const;
@@ -208,6 +279,12 @@ namespace llvm {
void clearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
};
+namespace AMDGPU {
+
+int getLDSNoRetOp(uint16_t Opcode);
+
+} //End namespace AMDGPU
+
} // End llvm namespace
#endif // R600INSTRINFO_H_
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 8f47523524e5..0346e24ab771 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -12,44 +12,7 @@
//===----------------------------------------------------------------------===//
include "R600Intrinsics.td"
-
-class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
- InstrItinClass itin>
- : AMDGPUInst <outs, ins, asm, pattern> {
-
- field bits<64> Inst;
- bit TransOnly = 0;
- bit Trig = 0;
- bit Op3 = 0;
- bit isVector = 0;
- bits<2> FlagOperandIdx = 0;
- bit Op1 = 0;
- bit Op2 = 0;
- bit HasNativeOperands = 0;
- bit VTXInst = 0;
- bit TEXInst = 0;
-
- let Namespace = "AMDGPU";
- let OutOperandList = outs;
- let InOperandList = ins;
- let AsmString = asm;
- let Pattern = pattern;
- let Itinerary = itin;
-
- let TSFlags{0} = TransOnly;
- let TSFlags{4} = Trig;
- let TSFlags{5} = Op3;
-
- // Vector instructions are instructions that must fill all slots in an
- // instruction group
- let TSFlags{6} = isVector;
- let TSFlags{8-7} = FlagOperandIdx;
- let TSFlags{9} = HasNativeOperands;
- let TSFlags{10} = Op1;
- let TSFlags{11} = Op2;
- let TSFlags{12} = VTXInst;
- let TSFlags{13} = TEXInst;
-}
+include "R600InstrFormats.td"
class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
InstR600 <outs, ins, asm, pattern, NullALU> {
@@ -96,6 +59,12 @@ def UP : InstFlag <"printUpdatePred">;
// Once we start using the packetizer in this backend we should have this
// default to 0.
def LAST : InstFlag<"printLast", 1>;
+def RSel : Operand<i32> {
+ let PrintMethod = "printRSel";
+}
+def CT: Operand<i32> {
+ let PrintMethod = "printCT";
+}
def FRAMEri : Operand<iPTR> {
let MIOperandInfo = (ops R600_Reg32:$ptr, i32imm:$index);
@@ -106,237 +75,7 @@ def ADDRDWord : ComplexPattern<i32, 1, "SelectADDRDWord", [], []>;
def ADDRVTX_READ : ComplexPattern<i32, 2, "SelectADDRVTX_READ", [], []>;
def ADDRGA_CONST_OFFSET : ComplexPattern<i32, 1, "SelectGlobalValueConstantOffset", [], []>;
def ADDRGA_VAR_OFFSET : ComplexPattern<i32, 2, "SelectGlobalValueVariableOffset", [], []>;
-def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
-
-class R600ALU_Word0 {
- field bits<32> Word0;
-
- bits<11> src0;
- bits<1> src0_neg;
- bits<1> src0_rel;
- bits<11> src1;
- bits<1> src1_rel;
- bits<1> src1_neg;
- bits<3> index_mode = 0;
- bits<2> pred_sel;
- bits<1> last;
-
- bits<9> src0_sel = src0{8-0};
- bits<2> src0_chan = src0{10-9};
- bits<9> src1_sel = src1{8-0};
- bits<2> src1_chan = src1{10-9};
-
- let Word0{8-0} = src0_sel;
- let Word0{9} = src0_rel;
- let Word0{11-10} = src0_chan;
- let Word0{12} = src0_neg;
- let Word0{21-13} = src1_sel;
- let Word0{22} = src1_rel;
- let Word0{24-23} = src1_chan;
- let Word0{25} = src1_neg;
- let Word0{28-26} = index_mode;
- let Word0{30-29} = pred_sel;
- let Word0{31} = last;
-}
-
-class R600ALU_Word1 {
- field bits<32> Word1;
-
- bits<11> dst;
- bits<3> bank_swizzle;
- bits<1> dst_rel;
- bits<1> clamp;
-
- bits<7> dst_sel = dst{6-0};
- bits<2> dst_chan = dst{10-9};
-
- let Word1{20-18} = bank_swizzle;
- let Word1{27-21} = dst_sel;
- let Word1{28} = dst_rel;
- let Word1{30-29} = dst_chan;
- let Word1{31} = clamp;
-}
-
-class R600ALU_Word1_OP2 <bits<11> alu_inst> : R600ALU_Word1{
-
- bits<1> src0_abs;
- bits<1> src1_abs;
- bits<1> update_exec_mask;
- bits<1> update_pred;
- bits<1> write;
- bits<2> omod;
-
- let Word1{0} = src0_abs;
- let Word1{1} = src1_abs;
- let Word1{2} = update_exec_mask;
- let Word1{3} = update_pred;
- let Word1{4} = write;
- let Word1{6-5} = omod;
- let Word1{17-7} = alu_inst;
-}
-class R600ALU_Word1_OP3 <bits<5> alu_inst> : R600ALU_Word1{
-
- bits<11> src2;
- bits<1> src2_rel;
- bits<1> src2_neg;
-
- bits<9> src2_sel = src2{8-0};
- bits<2> src2_chan = src2{10-9};
-
- let Word1{8-0} = src2_sel;
- let Word1{9} = src2_rel;
- let Word1{11-10} = src2_chan;
- let Word1{12} = src2_neg;
- let Word1{17-13} = alu_inst;
-}
-
-class VTX_WORD0 {
- field bits<32> Word0;
- bits<7> SRC_GPR;
- bits<5> VC_INST;
- bits<2> FETCH_TYPE;
- bits<1> FETCH_WHOLE_QUAD;
- bits<8> BUFFER_ID;
- bits<1> SRC_REL;
- bits<2> SRC_SEL_X;
- bits<6> MEGA_FETCH_COUNT;
-
- let Word0{4-0} = VC_INST;
- let Word0{6-5} = FETCH_TYPE;
- let Word0{7} = FETCH_WHOLE_QUAD;
- let Word0{15-8} = BUFFER_ID;
- let Word0{22-16} = SRC_GPR;
- let Word0{23} = SRC_REL;
- let Word0{25-24} = SRC_SEL_X;
- let Word0{31-26} = MEGA_FETCH_COUNT;
-}
-
-class VTX_WORD1_GPR {
- field bits<32> Word1;
- bits<7> DST_GPR;
- bits<1> DST_REL;
- bits<3> DST_SEL_X;
- bits<3> DST_SEL_Y;
- bits<3> DST_SEL_Z;
- bits<3> DST_SEL_W;
- bits<1> USE_CONST_FIELDS;
- bits<6> DATA_FORMAT;
- bits<2> NUM_FORMAT_ALL;
- bits<1> FORMAT_COMP_ALL;
- bits<1> SRF_MODE_ALL;
-
- let Word1{6-0} = DST_GPR;
- let Word1{7} = DST_REL;
- let Word1{8} = 0; // Reserved
- let Word1{11-9} = DST_SEL_X;
- let Word1{14-12} = DST_SEL_Y;
- let Word1{17-15} = DST_SEL_Z;
- let Word1{20-18} = DST_SEL_W;
- let Word1{21} = USE_CONST_FIELDS;
- let Word1{27-22} = DATA_FORMAT;
- let Word1{29-28} = NUM_FORMAT_ALL;
- let Word1{30} = FORMAT_COMP_ALL;
- let Word1{31} = SRF_MODE_ALL;
-}
-
-class TEX_WORD0 {
- field bits<32> Word0;
-
- bits<5> TEX_INST;
- bits<2> INST_MOD;
- bits<1> FETCH_WHOLE_QUAD;
- bits<8> RESOURCE_ID;
- bits<7> SRC_GPR;
- bits<1> SRC_REL;
- bits<1> ALT_CONST;
- bits<2> RESOURCE_INDEX_MODE;
- bits<2> SAMPLER_INDEX_MODE;
-
- let Word0{4-0} = TEX_INST;
- let Word0{6-5} = INST_MOD;
- let Word0{7} = FETCH_WHOLE_QUAD;
- let Word0{15-8} = RESOURCE_ID;
- let Word0{22-16} = SRC_GPR;
- let Word0{23} = SRC_REL;
- let Word0{24} = ALT_CONST;
- let Word0{26-25} = RESOURCE_INDEX_MODE;
- let Word0{28-27} = SAMPLER_INDEX_MODE;
-}
-
-class TEX_WORD1 {
- field bits<32> Word1;
-
- bits<7> DST_GPR;
- bits<1> DST_REL;
- bits<3> DST_SEL_X;
- bits<3> DST_SEL_Y;
- bits<3> DST_SEL_Z;
- bits<3> DST_SEL_W;
- bits<7> LOD_BIAS;
- bits<1> COORD_TYPE_X;
- bits<1> COORD_TYPE_Y;
- bits<1> COORD_TYPE_Z;
- bits<1> COORD_TYPE_W;
-
- let Word1{6-0} = DST_GPR;
- let Word1{7} = DST_REL;
- let Word1{11-9} = DST_SEL_X;
- let Word1{14-12} = DST_SEL_Y;
- let Word1{17-15} = DST_SEL_Z;
- let Word1{20-18} = DST_SEL_W;
- let Word1{27-21} = LOD_BIAS;
- let Word1{28} = COORD_TYPE_X;
- let Word1{29} = COORD_TYPE_Y;
- let Word1{30} = COORD_TYPE_Z;
- let Word1{31} = COORD_TYPE_W;
-}
-
-class TEX_WORD2 {
- field bits<32> Word2;
-
- bits<5> OFFSET_X;
- bits<5> OFFSET_Y;
- bits<5> OFFSET_Z;
- bits<5> SAMPLER_ID;
- bits<3> SRC_SEL_X;
- bits<3> SRC_SEL_Y;
- bits<3> SRC_SEL_Z;
- bits<3> SRC_SEL_W;
-
- let Word2{4-0} = OFFSET_X;
- let Word2{9-5} = OFFSET_Y;
- let Word2{14-10} = OFFSET_Z;
- let Word2{19-15} = SAMPLER_ID;
- let Word2{22-20} = SRC_SEL_X;
- let Word2{25-23} = SRC_SEL_Y;
- let Word2{28-26} = SRC_SEL_Z;
- let Word2{31-29} = SRC_SEL_W;
-}
-
-/*
-XXX: R600 subtarget uses a slightly different encoding than the other
-subtargets. We currently handle this in R600MCCodeEmitter, but we may
-want to use these instruction classes in the future.
-
-class R600ALU_Word1_OP2_r600 : R600ALU_Word1_OP2 {
-
- bits<1> fog_merge;
- bits<10> alu_inst;
-
- let Inst{37} = fog_merge;
- let Inst{39-38} = omod;
- let Inst{49-40} = alu_inst;
-}
-
-class R600ALU_Word1_OP2_r700 : R600ALU_Word1_OP2 {
-
- bits<11> alu_inst;
-
- let Inst{38-37} = omod;
- let Inst{49-39} = alu_inst;
-}
-*/
def R600_Pred : PredicateOperand<i32, (ops R600_Predicate),
(ops PRED_SEL_OFF)>;
@@ -358,7 +97,7 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern,
LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
BANK_SWIZZLE:$bank_swizzle),
!strconcat(" ", opName,
- "$last$clamp $dst$write$dst_rel$omod, "
+ "$clamp $last $dst$write$dst_rel$omod, "
"$src0_neg$src0_abs$src0$src0_abs$src0_rel, "
"$pred_sel $bank_swizzle"),
pattern,
@@ -374,7 +113,9 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern,
let update_pred = 0;
let HasNativeOperands = 1;
let Op1 = 1;
+ let ALUInst = 1;
let DisableEncoding = "$literal";
+ let UseNamedOperandTable = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -386,7 +127,7 @@ class R600_1OP_Helper <bits<11> inst, string opName, SDPatternOperator node,
[(set R600_Reg32:$dst, (node R600_Reg32:$src0))]
>;
-// If you add our change the operands for R600_2OP instructions, you must
+// If you add or change the operands for R600_2OP instructions, you must
// also update the R600Op2OperandIndex::ROI enum in R600Defines.h,
// R600InstrInfo::buildDefaultInstruction(), and R600InstrInfo::getOperandIdx().
class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
@@ -399,7 +140,7 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
BANK_SWIZZLE:$bank_swizzle),
!strconcat(" ", opName,
- "$last$clamp $update_exec_mask$update_pred$dst$write$dst_rel$omod, "
+ "$clamp $last $update_exec_mask$update_pred$dst$write$dst_rel$omod, "
"$src0_neg$src0_abs$src0$src0_abs$src0_rel, "
"$src1_neg$src1_abs$src1$src1_abs$src1_rel, "
"$pred_sel $bank_swizzle"),
@@ -410,7 +151,9 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
let HasNativeOperands = 1;
let Op2 = 1;
+ let ALUInst = 1;
let DisableEncoding = "$literal";
+ let UseNamedOperandTable = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -436,7 +179,7 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern,
R600_Reg32:$src2, NEG:$src2_neg, REL:$src2_rel, SEL:$src2_sel,
LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
BANK_SWIZZLE:$bank_swizzle),
- !strconcat(" ", opName, "$last$clamp $dst$dst_rel, "
+ !strconcat(" ", opName, "$clamp $last $dst$dst_rel, "
"$src0_neg$src0$src0_rel, "
"$src1_neg$src1$src1_rel, "
"$src2_neg$src2$src2_rel, "
@@ -450,6 +193,8 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern,
let HasNativeOperands = 1;
let DisableEncoding = "$literal";
let Op3 = 1;
+ let UseNamedOperandTable = 1;
+ let ALUInst = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -463,38 +208,7 @@ class R600_REDUCTION <bits<11> inst, dag ins, string asm, list<dag> pattern,
pattern,
itin>;
-class R600_TEX <bits<11> inst, string opName, list<dag> pattern,
- InstrItinClass itin = AnyALU> :
- InstR600 <(outs R600_Reg128:$DST_GPR),
- (ins R600_Reg128:$SRC_GPR, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID, i32imm:$textureTarget),
- !strconcat(opName, "$DST_GPR, $SRC_GPR, $RESOURCE_ID, $SAMPLER_ID, $textureTarget"),
- pattern,
- itin>, TEX_WORD0, TEX_WORD1, TEX_WORD2 {
- let Inst{31-0} = Word0;
- let Inst{63-32} = Word1;
-
- let TEX_INST = inst{4-0};
- let SRC_REL = 0;
- let DST_REL = 0;
- let DST_SEL_X = 0;
- let DST_SEL_Y = 1;
- let DST_SEL_Z = 2;
- let DST_SEL_W = 3;
- let LOD_BIAS = 0;
-
- let INST_MOD = 0;
- let FETCH_WHOLE_QUAD = 0;
- let ALT_CONST = 0;
- let SAMPLER_INDEX_MODE = 0;
- let RESOURCE_INDEX_MODE = 0;
-
- let COORD_TYPE_X = 0;
- let COORD_TYPE_Y = 0;
- let COORD_TYPE_Z = 0;
- let COORD_TYPE_W = 0;
-
- let TEXInst = 1;
- }
+
} // End mayLoad = 1, mayStore = 0, hasSideEffects = 0
@@ -515,7 +229,7 @@ def TEX_RECT : PatLeaf<
def TEX_ARRAY : PatLeaf<
(imm),
[{uint32_t TType = (uint32_t)N->getZExtValue();
- return TType == 9 || TType == 10 || TType == 15 || TType == 16;
+ return TType == 9 || TType == 10 || TType == 16;
}]
>;
@@ -526,76 +240,115 @@ def TEX_SHADOW_ARRAY : PatLeaf<
}]
>;
-class EG_CF_RAT <bits <8> cf_inst, bits <6> rat_inst, bits<4> rat_id, dag outs,
- dag ins, string asm, list<dag> pattern> :
- InstR600ISA <outs, ins, asm, pattern> {
- bits<7> RW_GPR;
- bits<7> INDEX_GPR;
-
- bits<2> RIM;
- bits<2> TYPE;
- bits<1> RW_REL;
- bits<2> ELEM_SIZE;
-
- bits<12> ARRAY_SIZE;
- bits<4> COMP_MASK;
- bits<4> BURST_COUNT;
- bits<1> VPM;
- bits<1> eop;
- bits<1> MARK;
- bits<1> BARRIER;
-
- // CF_ALLOC_EXPORT_WORD0_RAT
- let Inst{3-0} = rat_id;
- let Inst{9-4} = rat_inst;
- let Inst{10} = 0; // Reserved
- let Inst{12-11} = RIM;
- let Inst{14-13} = TYPE;
- let Inst{21-15} = RW_GPR;
- let Inst{22} = RW_REL;
- let Inst{29-23} = INDEX_GPR;
- let Inst{31-30} = ELEM_SIZE;
-
- // CF_ALLOC_EXPORT_WORD1_BUF
- let Inst{43-32} = ARRAY_SIZE;
- let Inst{47-44} = COMP_MASK;
- let Inst{51-48} = BURST_COUNT;
- let Inst{52} = VPM;
- let Inst{53} = eop;
- let Inst{61-54} = cf_inst;
- let Inst{62} = MARK;
- let Inst{63} = BARRIER;
+def TEX_MSAA : PatLeaf<
+ (imm),
+ [{uint32_t TType = (uint32_t)N->getZExtValue();
+ return TType == 14;
+ }]
+>;
+
+def TEX_ARRAY_MSAA : PatLeaf<
+ (imm),
+ [{uint32_t TType = (uint32_t)N->getZExtValue();
+ return TType == 15;
+ }]
+>;
+
+class EG_CF_RAT <bits <8> cfinst, bits <6> ratinst, bits<4> ratid, bits<4> mask,
+ dag outs, dag ins, string asm, list<dag> pattern> :
+ InstR600ISA <outs, ins, asm, pattern>,
+ CF_ALLOC_EXPORT_WORD0_RAT, CF_ALLOC_EXPORT_WORD1_BUF {
+
+ let rat_id = ratid;
+ let rat_inst = ratinst;
+ let rim = 0;
+ // XXX: Have a separate instruction for non-indexed writes.
+ let type = 1;
+ let rw_rel = 0;
+ let elem_size = 0;
+
+ let array_size = 0;
+ let comp_mask = mask;
+ let burst_count = 0;
+ let vpm = 0;
+ let cf_inst = cfinst;
+ let mark = 0;
+ let barrier = 1;
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+ let IsExport = 1;
+
+}
+
+class VTX_READ <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : InstR600ISA <outs, (ins MEMxi:$src_gpr), name, pattern>,
+ VTX_WORD1_GPR {
+
+ // Static fields
+ let DST_REL = 0;
+ // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL,
+ // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored,
+ // however, based on my testing if USE_CONST_FIELDS is set, then all
+ // these fields need to be set to 0.
+ let USE_CONST_FIELDS = 0;
+ let NUM_FORMAT_ALL = 1;
+ let FORMAT_COMP_ALL = 0;
+ let SRF_MODE_ALL = 0;
+
+ let Inst{63-32} = Word1;
+ // LLVM can only encode 64-bit instructions, so these fields are manually
+ // encoded in R600CodeEmitter
+ //
+ // bits<16> OFFSET;
+ // bits<2> ENDIAN_SWAP = 0;
+ // bits<1> CONST_BUF_NO_STRIDE = 0;
+ // bits<1> MEGA_FETCH = 0;
+ // bits<1> ALT_CONST = 0;
+ // bits<2> BUFFER_INDEX_MODE = 0;
+
+ // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
+ // is done in R600CodeEmitter
+ //
+ // Inst{79-64} = OFFSET;
+ // Inst{81-80} = ENDIAN_SWAP;
+ // Inst{82} = CONST_BUF_NO_STRIDE;
+ // Inst{83} = MEGA_FETCH;
+ // Inst{84} = ALT_CONST;
+ // Inst{86-85} = BUFFER_INDEX_MODE;
+ // Inst{95-86} = 0; Reserved
+
+ // VTX_WORD3 (Padding)
+ //
+ // Inst{127-96} = 0;
+
+ let VTXInst = 1;
}
class LoadParamFrag <PatFrag load_type> : PatFrag <
(ops node:$ptr), (load_type node:$ptr),
- [{ return isParamLoad(dyn_cast<LoadSDNode>(N)); }]
+ [{ return isConstantLoad(dyn_cast<LoadSDNode>(N), 0); }]
>;
def load_param : LoadParamFrag<load>;
-def load_param_zexti8 : LoadParamFrag<zextloadi8>;
-def load_param_zexti16 : LoadParamFrag<zextloadi16>;
-
-def isR600 : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD4XXX">;
-def isR700 : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&"
- "Subtarget.device()->getDeviceFlag()"
- ">= OCL_DEVICE_RV710">;
+def load_param_exti8 : LoadParamFrag<az_extloadi8>;
+def load_param_exti16 : LoadParamFrag<az_extloadi16>;
+
+def isR600 : Predicate<"Subtarget.getGeneration() <= AMDGPUSubtarget::R700">;
+def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">;
def isEG : Predicate<
- "Subtarget.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX && "
- "Subtarget.device()->getGeneration() < AMDGPUDeviceInfo::HD7XXX && "
- "Subtarget.device()->getDeviceFlag() != OCL_DEVICE_CAYMAN">;
+ "Subtarget.getGeneration() >= AMDGPUSubtarget::EVERGREEN && "
+ "Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "!Subtarget.hasCaymanISA()">;
-def isCayman : Predicate<"Subtarget.device()"
- "->getDeviceFlag() == OCL_DEVICE_CAYMAN">;
-def isEGorCayman : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD5XXX"
- "|| Subtarget.device()->getGeneration() =="
- "AMDGPUDeviceInfo::HD6XXX">;
+def isCayman : Predicate<"Subtarget.hasCaymanISA()">;
+def isEGorCayman : Predicate<"Subtarget.getGeneration() == "
+ "AMDGPUSubtarget::EVERGREEN"
+ "|| Subtarget.getGeneration() =="
+ "AMDGPUSubtarget::NORTHERN_ISLANDS">;
def isR600toCayman : Predicate<
- "Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX">;
+ "Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">;
//===----------------------------------------------------------------------===//
// R600 SDNodes
@@ -603,13 +356,13 @@ def isR600toCayman : Predicate<
def INTERP_PAIR_XY : AMDGPUShaderInst <
(outs R600_TReg32_X:$dst0, R600_TReg32_Y:$dst1),
- (ins i32imm:$src0, R600_Reg32:$src1, R600_Reg32:$src2),
+ (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2),
"INTERP_PAIR_XY $src0 $src1 $src2 : $dst0 dst1",
[]>;
def INTERP_PAIR_ZW : AMDGPUShaderInst <
(outs R600_TReg32_Z:$dst0, R600_TReg32_W:$dst1),
- (ins i32imm:$src0, R600_Reg32:$src1, R600_Reg32:$src2),
+ (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2),
"INTERP_PAIR_ZW $src0 $src1 $src2 : $dst0 dst1",
[]>;
@@ -618,6 +371,44 @@ def CONST_ADDRESS: SDNode<"AMDGPUISD::CONST_ADDRESS",
[SDNPVariadic]
>;
+def DOT4 : SDNode<"AMDGPUISD::DOT4",
+ SDTypeProfile<1, 8, [SDTCisFP<0>, SDTCisVT<1, f32>, SDTCisVT<2, f32>,
+ SDTCisVT<3, f32>, SDTCisVT<4, f32>, SDTCisVT<5, f32>,
+ SDTCisVT<6, f32>, SDTCisVT<7, f32>, SDTCisVT<8, f32>]>,
+ []
+>;
+
+def COS_HW : SDNode<"AMDGPUISD::COS_HW",
+ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>
+>;
+
+def SIN_HW : SDNode<"AMDGPUISD::SIN_HW",
+ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>
+>;
+
+def TEXTURE_FETCH_Type : SDTypeProfile<1, 19, [SDTCisFP<0>]>;
+
+def TEXTURE_FETCH: SDNode<"AMDGPUISD::TEXTURE_FETCH", TEXTURE_FETCH_Type, []>;
+
+multiclass TexPattern<bits<32> TextureOp, Instruction inst, ValueType vt = v4f32> {
+def : Pat<(TEXTURE_FETCH (i32 TextureOp), vt:$SRC_GPR,
+ (i32 imm:$srcx), (i32 imm:$srcy), (i32 imm:$srcz), (i32 imm:$srcw),
+ (i32 imm:$offsetx), (i32 imm:$offsety), (i32 imm:$offsetz),
+ (i32 imm:$DST_SEL_X), (i32 imm:$DST_SEL_Y), (i32 imm:$DST_SEL_Z),
+ (i32 imm:$DST_SEL_W),
+ (i32 imm:$RESOURCE_ID), (i32 imm:$SAMPLER_ID),
+ (i32 imm:$COORD_TYPE_X), (i32 imm:$COORD_TYPE_Y), (i32 imm:$COORD_TYPE_Z),
+ (i32 imm:$COORD_TYPE_W)),
+ (inst R600_Reg128:$SRC_GPR,
+ imm:$srcx, imm:$srcy, imm:$srcz, imm:$srcw,
+ imm:$offsetx, imm:$offsety, imm:$offsetz,
+ imm:$DST_SEL_X, imm:$DST_SEL_Y, imm:$DST_SEL_Z,
+ imm:$DST_SEL_W,
+ imm:$RESOURCE_ID, imm:$SAMPLER_ID,
+ imm:$COORD_TYPE_X, imm:$COORD_TYPE_Y, imm:$COORD_TYPE_Z,
+ imm:$COORD_TYPE_W)>;
+}
+
//===----------------------------------------------------------------------===//
// Interpolation Instructions
//===----------------------------------------------------------------------===//
@@ -626,7 +417,7 @@ def INTERP_VEC_LOAD : AMDGPUShaderInst <
(outs R600_Reg128:$dst),
(ins i32imm:$src0),
"INTERP_LOAD $src0 : $dst",
- []>;
+ [(set R600_Reg128:$dst, (int_R600_interp_const imm:$src0))]>;
def INTERP_XY : R600_2OP <0xD6, "INTERP_XY", []> {
let bank_swizzle = 5;
@@ -753,13 +544,14 @@ let usesCustomInserter = 1, isNotDuplicable = 1 in {
class ExportSwzInst : InstR600ISA<(
outs),
(ins R600_Reg128:$gpr, i32imm:$type, i32imm:$arraybase,
- i32imm:$sw_x, i32imm:$sw_y, i32imm:$sw_z, i32imm:$sw_w, i32imm:$inst,
+ RSel:$sw_x, RSel:$sw_y, RSel:$sw_z, RSel:$sw_w, i32imm:$inst,
i32imm:$eop),
- !strconcat("EXPORT", " $gpr"),
+ !strconcat("EXPORT", " $gpr.$sw_x$sw_y$sw_z$sw_w"),
[]>, ExportWord0, ExportSwzWord1 {
let elem_size = 3;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
+ let IsExport = 1;
}
} // End usesCustomInserter = 1
@@ -773,47 +565,13 @@ class ExportBufInst : InstR600ISA<(
let elem_size = 0;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
+ let IsExport = 1;
}
//===----------------------------------------------------------------------===//
// Control Flow Instructions
//===----------------------------------------------------------------------===//
-class CF_ALU_WORD0 {
- field bits<32> Word0;
-
- bits<22> ADDR;
- bits<4> KCACHE_BANK0;
- bits<4> KCACHE_BANK1;
- bits<2> KCACHE_MODE0;
-
- let Word0{21-0} = ADDR;
- let Word0{25-22} = KCACHE_BANK0;
- let Word0{29-26} = KCACHE_BANK1;
- let Word0{31-30} = KCACHE_MODE0;
-}
-
-class CF_ALU_WORD1 {
- field bits<32> Word1;
-
- bits<2> KCACHE_MODE1;
- bits<8> KCACHE_ADDR0;
- bits<8> KCACHE_ADDR1;
- bits<7> COUNT;
- bits<1> ALT_CONST;
- bits<4> CF_INST;
- bits<1> WHOLE_QUAD_MODE;
- bits<1> BARRIER;
-
- let Word1{1-0} = KCACHE_MODE1;
- let Word1{9-2} = KCACHE_ADDR0;
- let Word1{17-10} = KCACHE_ADDR1;
- let Word1{24-18} = COUNT;
- let Word1{25} = ALT_CONST;
- let Word1{29-26} = CF_INST;
- let Word1{30} = WHOLE_QUAD_MODE;
- let Word1{31} = BARRIER;
-}
def KCACHE : InstFlag<"printKCache">;
@@ -821,7 +579,7 @@ class ALU_CLAUSE<bits<4> inst, string OpName> : AMDGPUInst <(outs),
(ins i32imm:$ADDR, i32imm:$KCACHE_BANK0, i32imm:$KCACHE_BANK1,
KCACHE:$KCACHE_MODE0, KCACHE:$KCACHE_MODE1,
i32imm:$KCACHE_ADDR0, i32imm:$KCACHE_ADDR1,
-i32imm:$COUNT),
+i32imm:$COUNT, i32imm:$Enabled),
!strconcat(OpName, " $COUNT, @$ADDR, "
"KC0[$KCACHE_MODE0], KC1[$KCACHE_MODE1]"),
[] >, CF_ALU_WORD0, CF_ALU_WORD1 {
@@ -831,6 +589,7 @@ i32imm:$COUNT),
let ALT_CONST = 0;
let WHOLE_QUAD_MODE = 0;
let BARRIER = 1;
+ let UseNamedOperandTable = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -844,45 +603,19 @@ class CF_WORD0_R600 {
let Word0 = ADDR;
}
-class CF_WORD1_R600 {
- field bits<32> Word1;
-
- bits<3> POP_COUNT;
- bits<5> CF_CONST;
- bits<2> COND;
- bits<3> COUNT;
- bits<6> CALL_COUNT;
- bits<1> COUNT_3;
- bits<1> END_OF_PROGRAM;
- bits<1> VALID_PIXEL_MODE;
- bits<7> CF_INST;
- bits<1> WHOLE_QUAD_MODE;
- bits<1> BARRIER;
-
- let Word1{2-0} = POP_COUNT;
- let Word1{7-3} = CF_CONST;
- let Word1{9-8} = COND;
- let Word1{12-10} = COUNT;
- let Word1{18-13} = CALL_COUNT;
- let Word1{19} = COUNT_3;
- let Word1{21} = END_OF_PROGRAM;
- let Word1{22} = VALID_PIXEL_MODE;
- let Word1{29-23} = CF_INST;
- let Word1{30} = WHOLE_QUAD_MODE;
- let Word1{31} = BARRIER;
-}
-
class CF_CLAUSE_R600 <bits<7> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs),
ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 {
field bits<64> Inst;
+ bits<4> CNT;
let CF_INST = inst;
let BARRIER = 1;
let CF_CONST = 0;
let VALID_PIXEL_MODE = 0;
let COND = 0;
+ let COUNT = CNT{2-0};
let CALL_COUNT = 0;
- let COUNT_3 = 0;
+ let COUNT_3 = CNT{3};
let END_OF_PROGRAM = 0;
let WHOLE_QUAD_MODE = 0;
@@ -890,38 +623,6 @@ ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 {
let Inst{63-32} = Word1;
}
-class CF_WORD0_EG {
- field bits<32> Word0;
-
- bits<24> ADDR;
- bits<3> JUMPTABLE_SEL;
-
- let Word0{23-0} = ADDR;
- let Word0{26-24} = JUMPTABLE_SEL;
-}
-
-class CF_WORD1_EG {
- field bits<32> Word1;
-
- bits<3> POP_COUNT;
- bits<5> CF_CONST;
- bits<2> COND;
- bits<6> COUNT;
- bits<1> VALID_PIXEL_MODE;
- bits<1> END_OF_PROGRAM;
- bits<8> CF_INST;
- bits<1> BARRIER;
-
- let Word1{2-0} = POP_COUNT;
- let Word1{7-3} = CF_CONST;
- let Word1{9-8} = COND;
- let Word1{15-10} = COUNT;
- let Word1{20} = VALID_PIXEL_MODE;
- let Word1{21} = END_OF_PROGRAM;
- let Word1{29-22} = CF_INST;
- let Word1{31} = BARRIER;
-}
-
class CF_CLAUSE_EG <bits<8> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs),
ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG {
field bits<64> Inst;
@@ -940,6 +641,7 @@ ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG {
def CF_ALU : ALU_CLAUSE<8, "ALU">;
def CF_ALU_PUSH_BEFORE : ALU_CLAUSE<9, "ALU_PUSH_BEFORE">;
+def CF_ALU_POP_AFTER : ALU_CLAUSE<10, "ALU_POP_AFTER">;
def FETCH_CLAUSE : AMDGPUInst <(outs),
(ins i32imm:$addr), "Fetch clause starting at $addr:", [] > {
@@ -987,42 +689,42 @@ def MIN : R600_2OP_Helper <0x4, "MIN", AMDGPUfmin>;
// XXX: Use the defs in TargetSelectionDAG.td instead of intrinsics.
def SETE : R600_2OP <
0x08, "SETE",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_EQ))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OEQ))]
>;
def SGT : R600_2OP <
0x09, "SETGT",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_GT))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OGT))]
>;
def SGE : R600_2OP <
0xA, "SETGE",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_GE))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OGE))]
>;
def SNE : R600_2OP <
0xB, "SETNE",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_NE))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_UNE))]
>;
def SETE_DX10 : R600_2OP <
0xC, "SETE_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_EQ))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OEQ))]
>;
def SETGT_DX10 : R600_2OP <
0xD, "SETGT_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_GT))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OGT))]
>;
def SETGE_DX10 : R600_2OP <
0xE, "SETGE_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_GE))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OGE))]
>;
def SETNE_DX10 : R600_2OP <
0xF, "SETNE_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_NE))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_UNE))]
>;
def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>;
@@ -1120,104 +822,86 @@ def CNDE_INT : R600_3OP <
def CNDGE_INT : R600_3OP <
0x1E, "CNDGE_INT",
- [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_GE))]
+ [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_SGE))]
>;
def CNDGT_INT : R600_3OP <
0x1D, "CNDGT_INT",
- [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_GT))]
+ [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_SGT))]
>;
//===----------------------------------------------------------------------===//
// Texture instructions
//===----------------------------------------------------------------------===//
-def TEX_LD : R600_TEX <
- 0x03, "TEX_LD",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txf v4f32:$SRC_GPR,
- imm:$OFFSET_X, imm:$OFFSET_Y, imm:$OFFSET_Z, imm:$RESOURCE_ID,
- imm:$SAMPLER_ID, imm:$textureTarget))]
-> {
-let AsmString = "TEX_LD $DST_GPR, $SRC_GPR, $OFFSET_X, $OFFSET_Y, $OFFSET_Z,"
- "$RESOURCE_ID, $SAMPLER_ID, $textureTarget";
-let InOperandList = (ins R600_Reg128:$SRC_GPR, i32imm:$OFFSET_X,
- i32imm:$OFFSET_Y, i32imm:$OFFSET_Z, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID,
- i32imm:$textureTarget);
-}
-
-def TEX_GET_TEXTURE_RESINFO : R600_TEX <
- 0x04, "TEX_GET_TEXTURE_RESINFO",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txq v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
-
-def TEX_GET_GRADIENTS_H : R600_TEX <
- 0x07, "TEX_GET_GRADIENTS_H",
- [(set v4f32:$DST_GPR, (int_AMDGPU_ddx v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
-
-def TEX_GET_GRADIENTS_V : R600_TEX <
- 0x08, "TEX_GET_GRADIENTS_V",
- [(set v4f32:$DST_GPR, (int_AMDGPU_ddy v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
-
-def TEX_SET_GRADIENTS_H : R600_TEX <
- 0x0B, "TEX_SET_GRADIENTS_H",
- []
->;
-
-def TEX_SET_GRADIENTS_V : R600_TEX <
- 0x0C, "TEX_SET_GRADIENTS_V",
- []
->;
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
-def TEX_SAMPLE : R600_TEX <
- 0x10, "TEX_SAMPLE",
- [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
+class R600_TEX <bits<11> inst, string opName> :
+ InstR600 <(outs R600_Reg128:$DST_GPR),
+ (ins R600_Reg128:$SRC_GPR,
+ RSel:$srcx, RSel:$srcy, RSel:$srcz, RSel:$srcw,
+ i32imm:$offsetx, i32imm:$offsety, i32imm:$offsetz,
+ RSel:$DST_SEL_X, RSel:$DST_SEL_Y, RSel:$DST_SEL_Z, RSel:$DST_SEL_W,
+ i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID,
+ CT:$COORD_TYPE_X, CT:$COORD_TYPE_Y, CT:$COORD_TYPE_Z,
+ CT:$COORD_TYPE_W),
+ !strconcat(opName,
+ " $DST_GPR.$DST_SEL_X$DST_SEL_Y$DST_SEL_Z$DST_SEL_W, "
+ "$SRC_GPR.$srcx$srcy$srcz$srcw "
+ "RID:$RESOURCE_ID SID:$SAMPLER_ID "
+ "CT:$COORD_TYPE_X$COORD_TYPE_Y$COORD_TYPE_Z$COORD_TYPE_W"),
+ [],
+ NullALU>, TEX_WORD0, TEX_WORD1, TEX_WORD2 {
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
-def TEX_SAMPLE_C : R600_TEX <
- 0x18, "TEX_SAMPLE_C",
- [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))]
->;
+ let TEX_INST = inst{4-0};
+ let SRC_REL = 0;
+ let DST_REL = 0;
+ let LOD_BIAS = 0;
-def TEX_SAMPLE_L : R600_TEX <
- 0x11, "TEX_SAMPLE_L",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
+ let INST_MOD = 0;
+ let FETCH_WHOLE_QUAD = 0;
+ let ALT_CONST = 0;
+ let SAMPLER_INDEX_MODE = 0;
+ let RESOURCE_INDEX_MODE = 0;
-def TEX_SAMPLE_C_L : R600_TEX <
- 0x19, "TEX_SAMPLE_C_L",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))]
->;
+ let TEXInst = 1;
+}
-def TEX_SAMPLE_LB : R600_TEX <
- 0x12, "TEX_SAMPLE_LB",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
+} // End mayLoad = 0, mayStore = 0, hasSideEffects = 0
-def TEX_SAMPLE_C_LB : R600_TEX <
- 0x1A, "TEX_SAMPLE_C_LB",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))]
->;
-def TEX_SAMPLE_G : R600_TEX <
- 0x14, "TEX_SAMPLE_G",
- []
->;
-def TEX_SAMPLE_C_G : R600_TEX <
- 0x1C, "TEX_SAMPLE_C_G",
- []
->;
+def TEX_SAMPLE : R600_TEX <0x10, "TEX_SAMPLE">;
+def TEX_SAMPLE_C : R600_TEX <0x18, "TEX_SAMPLE_C">;
+def TEX_SAMPLE_L : R600_TEX <0x11, "TEX_SAMPLE_L">;
+def TEX_SAMPLE_C_L : R600_TEX <0x19, "TEX_SAMPLE_C_L">;
+def TEX_SAMPLE_LB : R600_TEX <0x12, "TEX_SAMPLE_LB">;
+def TEX_SAMPLE_C_LB : R600_TEX <0x1A, "TEX_SAMPLE_C_LB">;
+def TEX_LD : R600_TEX <0x03, "TEX_LD">;
+def TEX_LDPTR : R600_TEX <0x03, "TEX_LDPTR"> {
+ let INST_MOD = 1;
+}
+def TEX_GET_TEXTURE_RESINFO : R600_TEX <0x04, "TEX_GET_TEXTURE_RESINFO">;
+def TEX_GET_GRADIENTS_H : R600_TEX <0x07, "TEX_GET_GRADIENTS_H">;
+def TEX_GET_GRADIENTS_V : R600_TEX <0x08, "TEX_GET_GRADIENTS_V">;
+def TEX_SET_GRADIENTS_H : R600_TEX <0x0B, "TEX_SET_GRADIENTS_H">;
+def TEX_SET_GRADIENTS_V : R600_TEX <0x0C, "TEX_SET_GRADIENTS_V">;
+def TEX_SAMPLE_G : R600_TEX <0x14, "TEX_SAMPLE_G">;
+def TEX_SAMPLE_C_G : R600_TEX <0x1C, "TEX_SAMPLE_C_G">;
+
+defm : TexPattern<0, TEX_SAMPLE>;
+defm : TexPattern<1, TEX_SAMPLE_C>;
+defm : TexPattern<2, TEX_SAMPLE_L>;
+defm : TexPattern<3, TEX_SAMPLE_C_L>;
+defm : TexPattern<4, TEX_SAMPLE_LB>;
+defm : TexPattern<5, TEX_SAMPLE_C_LB>;
+defm : TexPattern<6, TEX_LD, v4i32>;
+defm : TexPattern<7, TEX_GET_TEXTURE_RESINFO, v4i32>;
+defm : TexPattern<8, TEX_GET_GRADIENTS_H>;
+defm : TexPattern<9, TEX_GET_GRADIENTS_V>;
+defm : TexPattern<10, TEX_LDPTR, v4i32>;
//===----------------------------------------------------------------------===//
// Helper classes for common instructions
@@ -1240,41 +924,82 @@ class MULADD_IEEE_Common <bits<5> inst> : R600_3OP <
class CNDE_Common <bits<5> inst> : R600_3OP <
inst, "CNDE",
- [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_EQ))]
+ [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OEQ))]
>;
class CNDGT_Common <bits<5> inst> : R600_3OP <
inst, "CNDGT",
- [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_GT))]
->;
+ [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OGT))]
+> {
+ let Itinerary = VecALU;
+}
class CNDGE_Common <bits<5> inst> : R600_3OP <
inst, "CNDGE",
- [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_GE))]
->;
-
-multiclass DOT4_Common <bits<11> inst> {
+ [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OGE))]
+> {
+ let Itinerary = VecALU;
+}
+
+
+let isCodeGenOnly = 1, isPseudo = 1, Namespace = "AMDGPU" in {
+class R600_VEC2OP<list<dag> pattern> : InstR600 <(outs R600_Reg32:$dst), (ins
+// Slot X
+ UEM:$update_exec_mask_X, UP:$update_pred_X, WRITE:$write_X,
+ OMOD:$omod_X, REL:$dst_rel_X, CLAMP:$clamp_X,
+ R600_TReg32_X:$src0_X, NEG:$src0_neg_X, REL:$src0_rel_X, ABS:$src0_abs_X, SEL:$src0_sel_X,
+ R600_TReg32_X:$src1_X, NEG:$src1_neg_X, REL:$src1_rel_X, ABS:$src1_abs_X, SEL:$src1_sel_X,
+ R600_Pred:$pred_sel_X,
+// Slot Y
+ UEM:$update_exec_mask_Y, UP:$update_pred_Y, WRITE:$write_Y,
+ OMOD:$omod_Y, REL:$dst_rel_Y, CLAMP:$clamp_Y,
+ R600_TReg32_Y:$src0_Y, NEG:$src0_neg_Y, REL:$src0_rel_Y, ABS:$src0_abs_Y, SEL:$src0_sel_Y,
+ R600_TReg32_Y:$src1_Y, NEG:$src1_neg_Y, REL:$src1_rel_Y, ABS:$src1_abs_Y, SEL:$src1_sel_Y,
+ R600_Pred:$pred_sel_Y,
+// Slot Z
+ UEM:$update_exec_mask_Z, UP:$update_pred_Z, WRITE:$write_Z,
+ OMOD:$omod_Z, REL:$dst_rel_Z, CLAMP:$clamp_Z,
+ R600_TReg32_Z:$src0_Z, NEG:$src0_neg_Z, REL:$src0_rel_Z, ABS:$src0_abs_Z, SEL:$src0_sel_Z,
+ R600_TReg32_Z:$src1_Z, NEG:$src1_neg_Z, REL:$src1_rel_Z, ABS:$src1_abs_Z, SEL:$src1_sel_Z,
+ R600_Pred:$pred_sel_Z,
+// Slot W
+ UEM:$update_exec_mask_W, UP:$update_pred_W, WRITE:$write_W,
+ OMOD:$omod_W, REL:$dst_rel_W, CLAMP:$clamp_W,
+ R600_TReg32_W:$src0_W, NEG:$src0_neg_W, REL:$src0_rel_W, ABS:$src0_abs_W, SEL:$src0_sel_W,
+ R600_TReg32_W:$src1_W, NEG:$src1_neg_W, REL:$src1_rel_W, ABS:$src1_abs_W, SEL:$src1_sel_W,
+ R600_Pred:$pred_sel_W,
+ LITERAL:$literal0, LITERAL:$literal1),
+ "",
+ pattern,
+ AnyALU> {
- def _pseudo : R600_REDUCTION <inst,
- (ins R600_Reg128:$src0, R600_Reg128:$src1),
- "DOT4 $dst $src0, $src1",
- [(set f32:$dst, (int_AMDGPU_dp4 v4f32:$src0, v4f32:$src1))]
- >;
+ let UseNamedOperandTable = 1;
- def _real : R600_2OP <inst, "DOT4", []>;
}
+}
+
+def DOT_4 : R600_VEC2OP<[(set R600_Reg32:$dst, (DOT4
+ R600_TReg32_X:$src0_X, R600_TReg32_X:$src1_X,
+ R600_TReg32_Y:$src0_Y, R600_TReg32_Y:$src1_Y,
+ R600_TReg32_Z:$src0_Z, R600_TReg32_Z:$src1_Z,
+ R600_TReg32_W:$src0_W, R600_TReg32_W:$src1_W))]>;
+
+
+class DOT4_Common <bits<11> inst> : R600_2OP <inst, "DOT4", []>;
+
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
multiclass CUBE_Common <bits<11> inst> {
def _pseudo : InstR600 <
(outs R600_Reg128:$dst),
- (ins R600_Reg128:$src),
- "CUBE $dst $src",
- [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src))],
+ (ins R600_Reg128:$src0),
+ "CUBE $dst $src0",
+ [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src0))],
VecALU
> {
let isPseudo = 1;
+ let UseNamedOperandTable = 1;
}
def _real : R600_2OP <inst, "CUBE", []>;
@@ -1284,35 +1009,30 @@ multiclass CUBE_Common <bits<11> inst> {
class EXP_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
inst, "EXP_IEEE", fexp2
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class FLT_TO_INT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "FLT_TO_INT", fp_to_sint
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class INT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "INT_TO_FLT", sint_to_fp
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class FLT_TO_UINT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "FLT_TO_UINT", fp_to_uint
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class UINT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "UINT_TO_FLT", uint_to_fp
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
@@ -1323,7 +1043,6 @@ class LOG_CLAMPED_Common <bits<11> inst> : R600_1OP <
class LOG_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
inst, "LOG_IEEE", flog2
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
@@ -1333,75 +1052,68 @@ class ASHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "ASHR", sra>;
class MULHI_INT_Common <bits<11> inst> : R600_2OP_Helper <
inst, "MULHI_INT", mulhs
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class MULHI_UINT_Common <bits<11> inst> : R600_2OP_Helper <
inst, "MULHI", mulhu
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class MULLO_INT_Common <bits<11> inst> : R600_2OP_Helper <
inst, "MULLO_INT", mul
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class MULLO_UINT_Common <bits<11> inst> : R600_2OP <inst, "MULLO_UINT", []> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIP_CLAMPED_Common <bits<11> inst> : R600_1OP <
inst, "RECIP_CLAMPED", []
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIP_IEEE_Common <bits<11> inst> : R600_1OP <
inst, "RECIP_IEEE", [(set f32:$dst, (fdiv FP_ONE, f32:$src0))]
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIP_UINT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "RECIP_UINT", AMDGPUurecip
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIPSQRT_CLAMPED_Common <bits<11> inst> : R600_1OP_Helper <
inst, "RECIPSQRT_CLAMPED", int_AMDGPU_rsq
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP <
inst, "RECIPSQRT_IEEE", []
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class SIN_Common <bits<11> inst> : R600_1OP <
- inst, "SIN", []>{
+ inst, "SIN", [(set f32:$dst, (SIN_HW f32:$src0))]>{
let Trig = 1;
- let TransOnly = 1;
let Itinerary = TransALU;
}
class COS_Common <bits<11> inst> : R600_1OP <
- inst, "COS", []> {
+ inst, "COS", [(set f32:$dst, (COS_HW f32:$src0))]> {
let Trig = 1;
- let TransOnly = 1;
let Itinerary = TransALU;
}
+def CLAMP_R600 : CLAMP <R600_Reg32>;
+def FABS_R600 : FABS<R600_Reg32>;
+def FNEG_R600 : FNEG<R600_Reg32>;
+
//===----------------------------------------------------------------------===//
// Helper patterns for complex intrinsics
//===----------------------------------------------------------------------===//
@@ -1424,6 +1136,13 @@ class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ie
(exp_ieee (mul_lit (log_clamped (MAX $src_y, (f32 ZERO))), $src_w, $src_x))
>;
+// FROUND pattern
+class FROUNDPat<Instruction CNDGE> : Pat <
+ (AMDGPUround f32:$x),
+ (CNDGE (ADD (FNEG_R600 (f32 HALF)), (FRACT $x)), (CEIL $x), (FLOOR $x))
+>;
+
+
//===----------------------------------------------------------------------===//
// R600 / R700 Instructions
//===----------------------------------------------------------------------===//
@@ -1436,7 +1155,7 @@ let Predicates = [isR600] in {
def CNDE_r600 : CNDE_Common<0x18>;
def CNDGT_r600 : CNDGT_Common<0x19>;
def CNDGE_r600 : CNDGE_Common<0x1A>;
- defm DOT4_r600 : DOT4_Common<0x50>;
+ def DOT4_r600 : DOT4_Common<0x50>;
defm CUBE_r600 : CUBE_Common<0x52>;
def EXP_IEEE_r600 : EXP_IEEE_Common<0x61>;
def LOG_CLAMPED_r600 : LOG_CLAMPED_Common<0x62>;
@@ -1465,11 +1184,12 @@ let Predicates = [isR600] in {
def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_r600 $src))>;
+ def : FROUNDPat <CNDGE_r600>;
def R600_ExportSwz : ExportSwzInst {
let Word1{20-17} = 0; // BURST_COUNT
let Word1{21} = eop;
- let Word1{22} = 1; // VALID_PIXEL_MODE
+ let Word1{22} = 0; // VALID_PIXEL_MODE
let Word1{30-23} = inst;
let Word1{31} = 1; // BARRIER
}
@@ -1478,58 +1198,58 @@ let Predicates = [isR600] in {
def R600_ExportBuf : ExportBufInst {
let Word1{20-17} = 0; // BURST_COUNT
let Word1{21} = eop;
- let Word1{22} = 1; // VALID_PIXEL_MODE
+ let Word1{22} = 0; // VALID_PIXEL_MODE
let Word1{30-23} = inst;
let Word1{31} = 1; // BARRIER
}
defm : SteamOutputExportPattern<R600_ExportBuf, 0x20, 0x21, 0x22, 0x23>;
- def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$COUNT),
- "TEX $COUNT @$ADDR"> {
+ def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$CNT),
+ "TEX $CNT @$ADDR"> {
let POP_COUNT = 0;
}
- def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$COUNT),
- "VTX $COUNT @$ADDR"> {
+ def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$CNT),
+ "VTX $CNT @$ADDR"> {
let POP_COUNT = 0;
}
def WHILE_LOOP_R600 : CF_CLAUSE_R600<6, (ins i32imm:$ADDR),
"LOOP_START_DX10 @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def END_LOOP_R600 : CF_CLAUSE_R600<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def LOOP_BREAK_R600 : CF_CLAUSE_R600<9, (ins i32imm:$ADDR),
"LOOP_BREAK @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def CF_CONTINUE_R600 : CF_CLAUSE_R600<8, (ins i32imm:$ADDR),
"CONTINUE @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def CF_JUMP_R600 : CF_CLAUSE_R600<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"JUMP @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
+ let CNT = 0;
}
def CF_ELSE_R600 : CF_CLAUSE_R600<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"ELSE @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
+ let CNT = 0;
}
def CF_CALL_FS_R600 : CF_CLAUSE_R600<19, (ins), "CALL_FS"> {
let ADDR = 0;
- let COUNT = 0;
+ let CNT = 0;
let POP_COUNT = 0;
}
def POP_R600 : CF_CLAUSE_R600<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"POP @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
+ let CNT = 0;
}
def CF_END_R600 : CF_CLAUSE_R600<0, (ins), "CF_END"> {
- let COUNT = 0;
+ let CNT = 0;
let POP_COUNT = 0;
let ADDR = 0;
let END_OF_PROGRAM = 1;
@@ -1537,18 +1257,6 @@ let Predicates = [isR600] in {
}
-// Helper pattern for normalizing inputs to triginomic instructions for R700+
-// cards.
-class COS_PAT <InstR600 trig> : Pat<
- (fcos f32:$src),
- (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src))
->;
-
-class SIN_PAT <InstR600 trig> : Pat<
- (fsin f32:$src),
- (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src))
->;
-
//===----------------------------------------------------------------------===//
// R700 Only instructions
//===----------------------------------------------------------------------===//
@@ -1556,12 +1264,35 @@ class SIN_PAT <InstR600 trig> : Pat<
let Predicates = [isR700] in {
def SIN_r700 : SIN_Common<0x6E>;
def COS_r700 : COS_Common<0x6F>;
+}
+
+//===----------------------------------------------------------------------===//
+// Evergreen / Cayman store instructions
+//===----------------------------------------------------------------------===//
- // R700 normalizes inputs to SIN/COS the same as EG
- def : SIN_PAT <SIN_r700>;
- def : COS_PAT <COS_r700>;
+let Predicates = [isEGorCayman] in {
+
+class CF_MEM_RAT_CACHELESS <bits<6> rat_inst, bits<4> rat_id, bits<4> mask, dag ins,
+ string name, list<dag> pattern>
+ : EG_CF_RAT <0x57, rat_inst, rat_id, mask, (outs), ins,
+ "MEM_RAT_CACHELESS "#name, pattern>;
+
+class CF_MEM_RAT <bits<6> rat_inst, bits<4> rat_id, dag ins, string name,
+ list<dag> pattern>
+ : EG_CF_RAT <0x56, rat_inst, rat_id, 0xf /* mask */, (outs), ins,
+ "MEM_RAT "#name, pattern>;
+
+def RAT_MSKOR : CF_MEM_RAT <0x11, 0,
+ (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr),
+ "MSKOR $rw_gpr.XW, $index_gpr",
+ [(mskor_global v4i32:$rw_gpr, i32:$index_gpr)]
+> {
+ let eop = 0;
}
+} // End Predicates = [isEGorCayman]
+
+
//===----------------------------------------------------------------------===//
// Evergreen Only instructions
//===----------------------------------------------------------------------===//
@@ -1585,9 +1316,179 @@ def SIN_eg : SIN_Common<0x8D>;
def COS_eg : COS_Common<0x8E>;
def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>;
-def : SIN_PAT <SIN_eg>;
-def : COS_PAT <COS_eg>;
def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>;
+
+//===----------------------------------------------------------------------===//
+// Memory read/write instructions
+//===----------------------------------------------------------------------===//
+
+let usesCustomInserter = 1 in {
+
+// 32-bit store
+def RAT_WRITE_CACHELESS_32_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x1,
+ (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr, $index_gpr, $eop",
+ [(global_store i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+// 64-bit store
+def RAT_WRITE_CACHELESS_64_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x3,
+ (ins R600_Reg64:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr.XY, $index_gpr, $eop",
+ [(global_store v2i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+//128-bit store
+def RAT_WRITE_CACHELESS_128_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0xf,
+ (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr.XYZW, $index_gpr, $eop",
+ [(global_store v4i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+} // End usesCustomInserter = 1
+
+class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : VTX_WORD0_eg, VTX_READ<name, buffer_id, outs, pattern> {
+
+ // Static fields
+ let VC_INST = 0;
+ let FETCH_TYPE = 2;
+ let FETCH_WHOLE_QUAD = 0;
+ let BUFFER_ID = buffer_id;
+ let SRC_REL = 0;
+ // XXX: We can infer this field based on the SRC_GPR. This would allow us
+ // to store vertex addresses in any channel, not just X.
+ let SRC_SEL_X = 0;
+
+ let Inst{31-0} = Word0;
+}
+
+class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 1;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 1; // FMT_8
+}
+
+class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+ let MEGA_FETCH_COUNT = 2;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 5; // FMT_16
+
+}
+
+class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 4;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 0xD; // COLOR_32
+
+ // This is not really necessary, but there were some GPU hangs that appeared
+ // to be caused by ALU instructions in the next instruction group that wrote
+ // to the $src_gpr registers of the VTX_READ.
+ // e.g.
+ // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
+ // %T2_X<def> = MOV %ZERO
+ //Adding this constraint prevents this from happening.
+ let Constraints = "$src_gpr.ptr = $dst_gpr";
+}
+
+class VTX_READ_64_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_64 $dst_gpr.XY, $src_gpr", buffer_id,
+ (outs R600_Reg64:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 8;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 7;
+ let DST_SEL_W = 7;
+ let DATA_FORMAT = 0x1D; // COLOR_32_32
+}
+
+class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
+ (outs R600_Reg128:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 16;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 2;
+ let DST_SEL_W = 3;
+ let DATA_FORMAT = 0x22; // COLOR_32_32_32_32
+
+ // XXX: Need to force VTX_READ_128 instructions to write to the same register
+ // that holds its buffer address to avoid potential hangs. We can't use
+ // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
+ // registers are different sizes.
+}
+
+//===----------------------------------------------------------------------===//
+// VTX Read from parameter memory space
+//===----------------------------------------------------------------------===//
+
+def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
+ [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
+ [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
+ [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <0,
+ [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
+ [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+//===----------------------------------------------------------------------===//
+// VTX Read from global memory space
+//===----------------------------------------------------------------------===//
+
+// 8-bit reads
+def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
+ [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1,
+ [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+>;
+
+// 32-bit reads
+def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
+ [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 64-bit reads
+def VTX_READ_GLOBAL_64_eg : VTX_READ_64_eg <1,
+ [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 128-bit reads
+def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
+ [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
} // End Predicates = [isEG]
//===----------------------------------------------------------------------===//
@@ -1620,10 +1521,11 @@ let Predicates = [isEGorCayman] in {
def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", [], VecALU>;
defm : BFIPatterns <BFI_INT_eg>;
- def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT",
- [(set i32:$dst, (AMDGPUbitalign i32:$src0, i32:$src1, i32:$src2))],
- VecALU
+ def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24",
+ [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))], VecALU
>;
+ def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
+ def : ROTRPattern <BIT_ALIGN_INT_eg>;
def MULADD_eg : MULADD_Common<0x14>;
def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>;
@@ -1635,7 +1537,10 @@ let Predicates = [isEGorCayman] in {
def CNDGE_eg : CNDGE_Common<0x1B>;
def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
- defm DOT4_eg : DOT4_Common<0xBE>;
+ def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24",
+ [(set i32:$dst, (mul U24:$src0, U24:$src1))], VecALU
+ >;
+ def DOT4_eg : DOT4_Common<0xBE>;
defm CUBE_eg : CUBE_Common<0xC0>;
let hasSideEffects = 1 in {
@@ -1646,6 +1551,7 @@ let hasSideEffects = 1 in {
def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
let Pattern = [];
+ let Itinerary = AnyALU;
}
def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>;
@@ -1656,6 +1562,165 @@ let hasSideEffects = 1 in {
def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
+def GROUP_BARRIER : InstR600 <
+ (outs), (ins), " GROUP_BARRIER", [(int_AMDGPU_barrier_local)], AnyALU>,
+ R600ALU_Word0,
+ R600ALU_Word1_OP2 <0x54> {
+
+ let dst = 0;
+ let dst_rel = 0;
+ let src0 = 0;
+ let src0_rel = 0;
+ let src0_neg = 0;
+ let src0_abs = 0;
+ let src1 = 0;
+ let src1_rel = 0;
+ let src1_neg = 0;
+ let src1_abs = 0;
+ let write = 0;
+ let omod = 0;
+ let clamp = 0;
+ let last = 1;
+ let bank_swizzle = 0;
+ let pred_sel = 0;
+ let update_exec_mask = 0;
+ let update_pred = 0;
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+
+ let ALUInst = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// LDS Instructions
+//===----------------------------------------------------------------------===//
+class R600_LDS <bits<6> op, dag outs, dag ins, string asm,
+ list<dag> pattern = []> :
+
+ InstR600 <outs, ins, asm, pattern, XALU>,
+ R600_ALU_LDS_Word0,
+ R600LDS_Word1 {
+
+ bits<6> offset = 0;
+ let lds_op = op;
+
+ let Word1{27} = offset{0};
+ let Word1{12} = offset{1};
+ let Word1{28} = offset{2};
+ let Word1{31} = offset{3};
+ let Word0{12} = offset{4};
+ let Word0{25} = offset{5};
+
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+
+ let ALUInst = 1;
+ let HasNativeOperands = 1;
+ let UseNamedOperandTable = 1;
+}
+
+class R600_LDS_1A <bits<6> lds_op, string name, list<dag> pattern> : R600_LDS <
+ lds_op,
+ (outs R600_Reg32:$dst),
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ LAST:$last, R600_Pred:$pred_sel,
+ BANK_SWIZZLE:$bank_swizzle),
+ " "#name#" $last OQAP, $src0$src0_rel $pred_sel",
+ pattern
+ > {
+
+ let src1 = 0;
+ let src1_rel = 0;
+ let src2 = 0;
+ let src2_rel = 0;
+
+ let Defs = [OQAP];
+ let usesCustomInserter = 1;
+ let LDS_1A = 1;
+ let DisableEncoding = "$dst";
+}
+
+class R600_LDS_1A1D <bits<6> lds_op, dag outs, string name, list<dag> pattern,
+ string dst =""> :
+ R600_LDS <
+ lds_op, outs,
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
+ LAST:$last, R600_Pred:$pred_sel,
+ BANK_SWIZZLE:$bank_swizzle),
+ " "#name#" $last "#dst#"$src0$src0_rel, $src1$src1_rel, $pred_sel",
+ pattern
+ > {
+
+ field string BaseOp;
+
+ let src2 = 0;
+ let src2_rel = 0;
+ let LDS_1A1D = 1;
+}
+
+class R600_LDS_1A1D_NORET <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS_1A1D <lds_op, (outs), name, pattern> {
+ let BaseOp = name;
+}
+
+class R600_LDS_1A1D_RET <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS_1A1D <lds_op, (outs R600_Reg32:$dst), name##"_RET", pattern, "OQAP, "> {
+
+ let BaseOp = name;
+ let usesCustomInserter = 1;
+ let DisableEncoding = "$dst";
+ let Defs = [OQAP];
+}
+
+class R600_LDS_1A2D <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS <
+ lds_op,
+ (outs),
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
+ R600_Reg32:$src2, REL:$src2_rel, SEL:$src2_sel,
+ LAST:$last, R600_Pred:$pred_sel, BANK_SWIZZLE:$bank_swizzle),
+ " "#name# "$last $src0$src0_rel, $src1$src1_rel, $src2$src2_rel, $pred_sel",
+ pattern> {
+ let LDS_1A2D = 1;
+}
+
+def LDS_ADD : R600_LDS_1A1D_NORET <0x0, "LDS_ADD", [] >;
+def LDS_SUB : R600_LDS_1A1D_NORET <0x1, "LDS_SUB", [] >;
+def LDS_WRITE : R600_LDS_1A1D_NORET <0xD, "LDS_WRITE",
+ [(local_store (i32 R600_Reg32:$src1), R600_Reg32:$src0)]
+>;
+def LDS_BYTE_WRITE : R600_LDS_1A1D_NORET<0x12, "LDS_BYTE_WRITE",
+ [(truncstorei8_local i32:$src1, i32:$src0)]
+>;
+def LDS_SHORT_WRITE : R600_LDS_1A1D_NORET<0x13, "LDS_SHORT_WRITE",
+ [(truncstorei16_local i32:$src1, i32:$src0)]
+>;
+def LDS_ADD_RET : R600_LDS_1A1D_RET <0x20, "LDS_ADD",
+ [(set i32:$dst, (atomic_load_add_local i32:$src0, i32:$src1))]
+>;
+def LDS_SUB_RET : R600_LDS_1A1D_RET <0x21, "LDS_SUB",
+ [(set i32:$dst, (atomic_load_sub_local i32:$src0, i32:$src1))]
+>;
+def LDS_READ_RET : R600_LDS_1A <0x32, "LDS_READ_RET",
+ [(set (i32 R600_Reg32:$dst), (local_load R600_Reg32:$src0))]
+>;
+def LDS_BYTE_READ_RET : R600_LDS_1A <0x36, "LDS_BYTE_READ_RET",
+ [(set i32:$dst, (sextloadi8_local i32:$src0))]
+>;
+def LDS_UBYTE_READ_RET : R600_LDS_1A <0x37, "LDS_UBYTE_READ_RET",
+ [(set i32:$dst, (az_extloadi8_local i32:$src0))]
+>;
+def LDS_SHORT_READ_RET : R600_LDS_1A <0x38, "LDS_SHORT_READ_RET",
+ [(set i32:$dst, (sextloadi16_local i32:$src0))]
+>;
+def LDS_USHORT_READ_RET : R600_LDS_1A <0x39, "LDS_USHORT_READ_RET",
+ [(set i32:$dst, (az_extloadi16_local i32:$src0))]
+>;
+
// TRUNC is used for the FLT_TO_INT instructions to work around a
// perceived problem where the rounding modes are applied differently
// depending on the instruction and the slot they are in.
@@ -1673,9 +1738,11 @@ let hasSideEffects = 1 in {
// SHA-256 Patterns
def : SHA256MaPattern <BFI_INT_eg, XOR_INT>;
+ def : FROUNDPat <CNDGE_eg>;
+
def EG_ExportSwz : ExportSwzInst {
let Word1{19-16} = 0; // BURST_COUNT
- let Word1{20} = 1; // VALID_PIXEL_MODE
+ let Word1{20} = 0; // VALID_PIXEL_MODE
let Word1{21} = eop;
let Word1{29-22} = inst;
let Word1{30} = 0; // MARK
@@ -1685,7 +1752,7 @@ let hasSideEffects = 1 in {
def EG_ExportBuf : ExportBufInst {
let Word1{19-16} = 0; // BURST_COUNT
- let Word1{20} = 1; // VALID_PIXEL_MODE
+ let Word1{20} = 0; // VALID_PIXEL_MODE
let Word1{21} = eop;
let Word1{29-22} = inst;
let Word1{30} = 0; // MARK
@@ -1744,48 +1811,78 @@ let hasSideEffects = 1 in {
let END_OF_PROGRAM = 1;
}
+} // End Predicates = [isEGorCayman]
+
//===----------------------------------------------------------------------===//
-// Memory read/write instructions
+// Regist loads and stores - for indirect addressing
//===----------------------------------------------------------------------===//
-let usesCustomInserter = 1 in {
-class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> comp_mask, string name,
- list<dag> pattern>
- : EG_CF_RAT <0x57, 0x2, 0, (outs), ins,
- !strconcat(name, " $rw_gpr, $index_gpr, $eop"), pattern> {
- let RIM = 0;
- // XXX: Have a separate instruction for non-indexed writes.
- let TYPE = 1;
- let RW_REL = 0;
- let ELEM_SIZE = 0;
+defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
- let ARRAY_SIZE = 0;
- let COMP_MASK = comp_mask;
- let BURST_COUNT = 0;
- let VPM = 0;
- let MARK = 0;
- let BARRIER = 1;
-}
+//===----------------------------------------------------------------------===//
+// Cayman Instructions
+//===----------------------------------------------------------------------===//
-} // End usesCustomInserter = 1
+let Predicates = [isCayman] in {
-// 32-bit store
-def RAT_WRITE_CACHELESS_32_eg : RAT_WRITE_CACHELESS_eg <
- (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
- 0x1, "RAT_WRITE_CACHELESS_32_eg",
- [(global_store i32:$rw_gpr, i32:$index_gpr)]
+def MULADD_INT24_cm : R600_3OP <0x08, "MULADD_INT24",
+ [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))], VecALU
+>;
+def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
+ [(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU
>;
-//128-bit store
-def RAT_WRITE_CACHELESS_128_eg : RAT_WRITE_CACHELESS_eg <
- (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
- 0xf, "RAT_WRITE_CACHELESS_128",
- [(global_store v4i32:$rw_gpr, i32:$index_gpr)]
+let isVector = 1 in {
+
+def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
+
+def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
+def MULHI_INT_cm : MULHI_INT_Common<0x90>;
+def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
+def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
+def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
+def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
+def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;
+def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>;
+def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>;
+def SIN_cm : SIN_Common<0x8D>;
+def COS_cm : COS_Common<0x8E>;
+} // End isVector = 1
+
+def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
+
+defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
+
+// RECIP_UINT emulation for Cayman
+// The multiplication scales from [0,1] to the unsigned integer range
+def : Pat <
+ (AMDGPUurecip i32:$src0),
+ (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)),
+ (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1)))
>;
-class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
- : InstR600ISA <outs, (ins MEMxi:$ptr), name#" $dst, $ptr", pattern>,
- VTX_WORD1_GPR, VTX_WORD0 {
+ def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> {
+ let ADDR = 0;
+ let POP_COUNT = 0;
+ let COUNT = 0;
+ }
+
+def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
+
+class RAT_STORE_DWORD <RegisterClass rc, ValueType vt, bits<4> mask> :
+ CF_MEM_RAT_CACHELESS <0x14, 0, mask,
+ (ins rc:$rw_gpr, R600_TReg32_X:$index_gpr),
+ "STORE_DWORD $rw_gpr, $index_gpr",
+ [(global_store vt:$rw_gpr, i32:$index_gpr)]> {
+ let eop = 0; // This bit is not used on Cayman.
+}
+
+def RAT_STORE_DWORD32 : RAT_STORE_DWORD <R600_TReg32_X, i32, 0x1>;
+def RAT_STORE_DWORD64 : RAT_STORE_DWORD <R600_Reg64, v2i32, 0x3>;
+def RAT_STORE_DWORD128 : RAT_STORE_DWORD <R600_Reg128, v4i32, 0xf>;
+
+class VTX_READ_cm <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : VTX_WORD0_cm, VTX_READ<name, buffer_id, outs, pattern> {
// Static fields
let VC_INST = 0;
@@ -1796,53 +1893,18 @@ class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
// XXX: We can infer this field based on the SRC_GPR. This would allow us
// to store vertex addresses in any channel, not just X.
let SRC_SEL_X = 0;
- let DST_REL = 0;
- // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL,
- // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored,
- // however, based on my testing if USE_CONST_FIELDS is set, then all
- // these fields need to be set to 0.
- let USE_CONST_FIELDS = 0;
- let NUM_FORMAT_ALL = 1;
- let FORMAT_COMP_ALL = 0;
- let SRF_MODE_ALL = 0;
+ let SRC_SEL_Y = 0;
+ let STRUCTURED_READ = 0;
+ let LDS_REQ = 0;
+ let COALESCED_READ = 0;
let Inst{31-0} = Word0;
- let Inst{63-32} = Word1;
- // LLVM can only encode 64-bit instructions, so these fields are manually
- // encoded in R600CodeEmitter
- //
- // bits<16> OFFSET;
- // bits<2> ENDIAN_SWAP = 0;
- // bits<1> CONST_BUF_NO_STRIDE = 0;
- // bits<1> MEGA_FETCH = 0;
- // bits<1> ALT_CONST = 0;
- // bits<2> BUFFER_INDEX_MODE = 0;
-
-
-
- // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
- // is done in R600CodeEmitter
- //
- // Inst{79-64} = OFFSET;
- // Inst{81-80} = ENDIAN_SWAP;
- // Inst{82} = CONST_BUF_NO_STRIDE;
- // Inst{83} = MEGA_FETCH;
- // Inst{84} = ALT_CONST;
- // Inst{86-85} = BUFFER_INDEX_MODE;
- // Inst{95-86} = 0; Reserved
-
- // VTX_WORD3 (Padding)
- //
- // Inst{127-96} = 0;
-
- let VTXInst = 1;
}
-class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_8", buffer_id, (outs R600_TReg32_X:$dst),
- pattern> {
+class VTX_READ_8_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
- let MEGA_FETCH_COUNT = 1;
let DST_SEL_X = 0;
let DST_SEL_Y = 7; // Masked
let DST_SEL_Z = 7; // Masked
@@ -1850,10 +1912,9 @@ class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
let DATA_FORMAT = 1; // FMT_8
}
-class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_16", buffer_id, (outs R600_TReg32_X:$dst),
- pattern> {
- let MEGA_FETCH_COUNT = 2;
+class VTX_READ_16_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
let DST_SEL_X = 0;
let DST_SEL_Y = 7; // Masked
let DST_SEL_Z = 7; // Masked
@@ -1862,11 +1923,10 @@ class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
}
-class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_32", buffer_id, (outs R600_TReg32_X:$dst),
- pattern> {
+class VTX_READ_32_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
- let MEGA_FETCH_COUNT = 4;
let DST_SEL_X = 0;
let DST_SEL_Y = 7; // Masked
let DST_SEL_Z = 7; // Masked
@@ -1875,19 +1935,29 @@ class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
// This is not really necessary, but there were some GPU hangs that appeared
// to be caused by ALU instructions in the next instruction group that wrote
- // to the $ptr registers of the VTX_READ.
+ // to the $src_gpr registers of the VTX_READ.
// e.g.
// %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
// %T2_X<def> = MOV %ZERO
//Adding this constraint prevents this from happening.
- let Constraints = "$ptr.ptr = $dst";
+ let Constraints = "$src_gpr.ptr = $dst_gpr";
}
-class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_128", buffer_id, (outs R600_Reg128:$dst),
- pattern> {
+class VTX_READ_64_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_64 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_Reg64:$dst_gpr), pattern> {
+
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 7;
+ let DST_SEL_W = 7;
+ let DATA_FORMAT = 0x1D; // COLOR_32_32
+}
+
+class VTX_READ_128_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
+ (outs R600_Reg128:$dst_gpr), pattern> {
- let MEGA_FETCH_COUNT = 16;
let DST_SEL_X = 0;
let DST_SEL_Y = 1;
let DST_SEL_Z = 2;
@@ -1896,28 +1966,31 @@ class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
// XXX: Need to force VTX_READ_128 instructions to write to the same register
// that holds its buffer address to avoid potential hangs. We can't use
- // the same constraint as VTX_READ_32_eg, because the $ptr.ptr and $dst
+ // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
// registers are different sizes.
}
//===----------------------------------------------------------------------===//
// VTX Read from parameter memory space
//===----------------------------------------------------------------------===//
+def VTX_READ_PARAM_8_cm : VTX_READ_8_cm <0,
+ [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
+>;
-def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
- [(set i32:$dst, (load_param_zexti8 ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_16_cm : VTX_READ_16_cm <0,
+ [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
- [(set i32:$dst, (load_param_zexti16 ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_32_cm : VTX_READ_32_cm <0,
+ [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
- [(set i32:$dst, (load_param ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_64_cm : VTX_READ_64_cm <0,
+ [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
- [(set v4i32:$dst, (load_param ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0,
+ [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
//===----------------------------------------------------------------------===//
@@ -1925,78 +1998,29 @@ def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
//===----------------------------------------------------------------------===//
// 8-bit reads
-def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
- [(set i32:$dst, (zextloadi8_global ADDRVTX_READ:$ptr))]
+def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
+ [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
>;
-// 32-bit reads
-def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
- [(set i32:$dst, (global_load ADDRVTX_READ:$ptr))]
+def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1,
+ [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
>;
-// 128-bit reads
-def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
- [(set v4i32:$dst, (global_load ADDRVTX_READ:$ptr))]
+// 32-bit reads
+def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1,
+ [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
>;
-//===----------------------------------------------------------------------===//
-// Constant Loads
-// XXX: We are currently storing all constants in the global address space.
-//===----------------------------------------------------------------------===//
-
-def CONSTANT_LOAD_eg : VTX_READ_32_eg <1,
- [(set i32:$dst, (constant_load ADDRVTX_READ:$ptr))]
+// 64-bit reads
+def VTX_READ_GLOBAL_64_cm : VTX_READ_64_cm <1,
+ [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
>;
-}
-
-//===----------------------------------------------------------------------===//
-// Regist loads and stores - for indirect addressing
-//===----------------------------------------------------------------------===//
-
-defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
-
-let Predicates = [isCayman] in {
-
-let isVector = 1 in {
-
-def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
-
-def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
-def MULHI_INT_cm : MULHI_INT_Common<0x90>;
-def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
-def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
-def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
-def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
-def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;
-def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>;
-def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>;
-def SIN_cm : SIN_Common<0x8D>;
-def COS_cm : COS_Common<0x8E>;
-} // End isVector = 1
-
-def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
-def : SIN_PAT <SIN_cm>;
-def : COS_PAT <COS_cm>;
-
-defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
-
-// RECIP_UINT emulation for Cayman
-// The multiplication scales from [0,1] to the unsigned integer range
-def : Pat <
- (AMDGPUurecip i32:$src0),
- (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)),
- (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1)))
+// 128-bit reads
+def VTX_READ_GLOBAL_128_cm : VTX_READ_128_cm <1,
+ [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
>;
- def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> {
- let ADDR = 0;
- let POP_COUNT = 0;
- let COUNT = 0;
- }
-
-def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
-
} // End isCayman
//===----------------------------------------------------------------------===//
@@ -2007,9 +2031,6 @@ def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
def IF_PREDICATE_SET : ILFormat<(outs), (ins GPRI32:$src),
"IF_PREDICATE_SET $src", []>;
-def PREDICATED_BREAK : ILFormat<(outs), (ins GPRI32:$src),
- "PREDICATED_BREAK $src", []>;
-
//===----------------------------------------------------------------------===//
// Pseudo instructions
//===----------------------------------------------------------------------===//
@@ -2083,10 +2104,6 @@ def TXD_SHADOW: InstR600 <
} // End isPseudo = 1
} // End usesCustomInserter = 1
-def CLAMP_R600 : CLAMP <R600_Reg32>;
-def FABS_R600 : FABS<R600_Reg32>;
-def FNEG_R600 : FNEG<R600_Reg32>;
-
//===---------------------------------------------------------------------===//
// Return instruction
//===---------------------------------------------------------------------===//
@@ -2117,7 +2134,7 @@ def CONST_COPY : Instruction {
def TEX_VTX_CONSTBUF :
InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "VTX_READ_eg $dst, $ptr",
[(set v4i32:$dst, (CONST_ADDRESS ADDRGA_VAR_OFFSET:$ptr, (i32 imm:$BUFFER_ID)))]>,
- VTX_WORD1_GPR, VTX_WORD0 {
+ VTX_WORD1_GPR, VTX_WORD0_eg {
let VC_INST = 0;
let FETCH_TYPE = 2;
@@ -2171,7 +2188,7 @@ def TEX_VTX_CONSTBUF :
def TEX_VTX_TEXBUF:
InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "TEX_VTX_EXPLICIT_READ $dst, $ptr",
[(set v4f32:$dst, (int_R600_load_texbuf ADDRGA_VAR_OFFSET:$ptr, imm:$BUFFER_ID))]>,
-VTX_WORD1_GPR, VTX_WORD0 {
+VTX_WORD1_GPR, VTX_WORD0_eg {
let VC_INST = 0;
let FETCH_TYPE = 2;
@@ -2235,7 +2252,7 @@ let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
def BRANCH : ILFormat<(outs), (ins brtarget:$target),
"; Pseudo unconditional branch instruction",
[(br bb:$target)]>;
- defm BRANCH_COND : BranchConditional<IL_brcond>;
+ defm BRANCH_COND : BranchConditional<IL_brcond, R600_Reg32, R600_Reg32>;
}
//===---------------------------------------------------------------------===//
@@ -2306,7 +2323,7 @@ def : CND_INT_f32 <CNDGE_INT, SETGE>;
//CNDGE_INT extra pattern
def : Pat <
- (selectcc i32:$src0, -1, i32:$src1, i32:$src2, COND_GT),
+ (selectcc i32:$src0, -1, i32:$src1, i32:$src2, COND_SGT),
(CNDGE_INT $src0, $src1, $src2)
>;
@@ -2321,86 +2338,6 @@ def KIL : Pat <
(MASK_WRITE (KILLGT (f32 ZERO), $src0))
>;
-// SGT Reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_LT),
- (SGT $src1, $src0)
->;
-
-// SGE Reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_LE),
- (SGE $src1, $src0)
->;
-
-// SETGT_DX10 reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, COND_LT),
- (SETGT_DX10 $src1, $src0)
->;
-
-// SETGE_DX10 reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, COND_LE),
- (SETGE_DX10 $src1, $src0)
->;
-
-// SETGT_INT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETLT),
- (SETGT_INT $src1, $src0)
->;
-
-// SETGE_INT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETLE),
- (SETGE_INT $src1, $src0)
->;
-
-// SETGT_UINT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETULT),
- (SETGT_UINT $src1, $src0)
->;
-
-// SETGE_UINT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETULE),
- (SETGE_UINT $src1, $src0)
->;
-
-// The next two patterns are special cases for handling 'true if ordered' and
-// 'true if unordered' conditionals. The assumption here is that the behavior of
-// SETE and SNE conforms to the Direct3D 10 rules for floating point values
-// described here:
-// http://msdn.microsoft.com/en-us/library/windows/desktop/cc308050.aspx#alpha_32_bit
-// We assume that SETE returns false when one of the operands is NAN and
-// SNE returns true when on of the operands is NAN
-
-//SETE - 'true if ordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, SETO),
- (SETE $src0, $src1)
->;
-
-//SETE_DX10 - 'true if ordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, SETO),
- (SETE_DX10 $src0, $src1)
->;
-
-//SNE - 'true if unordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, SETUO),
- (SNE $src0, $src1)
->;
-
-//SETNE_DX10 - 'true if ordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, SETUO),
- (SETNE_DX10 $src0, $src1)
->;
-
def : Extract_Element <f32, v4f32, 0, sub0>;
def : Extract_Element <f32, v4f32, 1, sub1>;
def : Extract_Element <f32, v4f32, 2, sub2>;
@@ -2424,10 +2361,24 @@ def : Insert_Element <i32, v4i32, 3, sub3>;
def : Vector4_Build <v4f32, f32>;
def : Vector4_Build <v4i32, i32>;
+def : Extract_Element <f32, v2f32, 0, sub0>;
+def : Extract_Element <f32, v2f32, 1, sub1>;
+
+def : Insert_Element <f32, v2f32, 0, sub0>;
+def : Insert_Element <f32, v2f32, 1, sub1>;
+
+def : Extract_Element <i32, v2i32, 0, sub0>;
+def : Extract_Element <i32, v2i32, 1, sub1>;
+
+def : Insert_Element <i32, v2i32, 0, sub0>;
+def : Insert_Element <i32, v2i32, 1, sub1>;
+
// bitconvert patterns
def : BitConvert <i32, f32, R600_Reg32>;
def : BitConvert <f32, i32, R600_Reg32>;
+def : BitConvert <v2f32, v2i32, R600_Reg64>;
+def : BitConvert <v2i32, v2f32, R600_Reg64>;
def : BitConvert <v4f32, v4i32, R600_Reg128>;
def : BitConvert <v4i32, v4f32, R600_Reg128>;
@@ -2435,3 +2386,11 @@ def : BitConvert <v4i32, v4f32, R600_Reg128>;
def : DwordAddrPat <i32, R600_Reg32>;
} // End isR600toCayman Predicate
+
+def getLDSNoRetOp : InstrMapping {
+ let FilterClass = "R600_LDS_1A1D";
+ let RowFields = ["BaseOp"];
+ let ColFields = ["DisableEncoding"];
+ let KeyCol = ["$dst"];
+ let ValueCols = [[""""]];
+}
diff --git a/lib/Target/R600/R600Intrinsics.td b/lib/Target/R600/R600Intrinsics.td
index dc8980aef146..9681747006d9 100644
--- a/lib/Target/R600/R600Intrinsics.td
+++ b/lib/Target/R600/R600Intrinsics.td
@@ -12,12 +12,56 @@
//===----------------------------------------------------------------------===//
let TargetPrefix = "R600", isTarget = 1 in {
+ class TextureIntrinsicFloatInput :
+ Intrinsic<[llvm_v4f32_ty], [
+ llvm_v4f32_ty, // Coord
+ llvm_i32_ty, // offset_x
+ llvm_i32_ty, // offset_y,
+ llvm_i32_ty, // offset_z,
+ llvm_i32_ty, // resource_id
+ llvm_i32_ty, // samplerid
+ llvm_i32_ty, // coord_type_x
+ llvm_i32_ty, // coord_type_y
+ llvm_i32_ty, // coord_type_z
+ llvm_i32_ty // coord_type_w
+ ], [IntrNoMem]>;
+ class TextureIntrinsicInt32Input :
+ Intrinsic<[llvm_v4i32_ty], [
+ llvm_v4i32_ty, // Coord
+ llvm_i32_ty, // offset_x
+ llvm_i32_ty, // offset_y,
+ llvm_i32_ty, // offset_z,
+ llvm_i32_ty, // resource_id
+ llvm_i32_ty, // samplerid
+ llvm_i32_ty, // coord_type_x
+ llvm_i32_ty, // coord_type_y
+ llvm_i32_ty, // coord_type_z
+ llvm_i32_ty // coord_type_w
+ ], [IntrNoMem]>;
+
def int_R600_load_input :
Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_R600_interp_input :
Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_R600_interp_const :
+ Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_R600_interp_xy :
+ Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+def int_R600_interp_zw :
+ Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_R600_load_texbuf :
Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_R600_tex : TextureIntrinsicFloatInput;
+ def int_R600_texc : TextureIntrinsicFloatInput;
+ def int_R600_txl : TextureIntrinsicFloatInput;
+ def int_R600_txlc : TextureIntrinsicFloatInput;
+ def int_R600_txb : TextureIntrinsicFloatInput;
+ def int_R600_txbc : TextureIntrinsicFloatInput;
+ def int_R600_txf : TextureIntrinsicInt32Input;
+ def int_R600_ldptr : TextureIntrinsicInt32Input;
+ def int_R600_txq : TextureIntrinsicInt32Input;
+ def int_R600_ddx : TextureIntrinsicFloatInput;
+ def int_R600_ddy : TextureIntrinsicFloatInput;
def int_R600_store_swizzle :
Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_R600_store_stream_output :
diff --git a/lib/Target/R600/R600MachineFunctionInfo.cpp b/lib/Target/R600/R600MachineFunctionInfo.cpp
index 018b40363363..01105c614c55 100644
--- a/lib/Target/R600/R600MachineFunctionInfo.cpp
+++ b/lib/Target/R600/R600MachineFunctionInfo.cpp
@@ -12,7 +12,9 @@
using namespace llvm;
-R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
- : AMDGPUMachineFunction(MF) { }
+// Pin the vtable to this file.
+void R600MachineFunctionInfo::anchor() {}
+R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
+ : AMDGPUMachineFunction(MF) { }
diff --git a/lib/Target/R600/R600MachineFunctionInfo.h b/lib/Target/R600/R600MachineFunctionInfo.h
index 70fddbb6d505..c1bec0aae7e4 100644
--- a/lib/Target/R600/R600MachineFunctionInfo.h
+++ b/lib/Target/R600/R600MachineFunctionInfo.h
@@ -13,14 +13,15 @@
#ifndef R600MACHINEFUNCTIONINFO_H
#define R600MACHINEFUNCTIONINFO_H
+#include "AMDGPUMachineFunction.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "AMDGPUMachineFunction.h"
#include <vector>
namespace llvm {
class R600MachineFunctionInfo : public AMDGPUMachineFunction {
+ virtual void anchor();
public:
R600MachineFunctionInfo(const MachineFunction &MF);
SmallVector<unsigned, 4> LiveOuts;
diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp
index a777142a9e70..da2a4d862e7d 100644
--- a/lib/Target/R600/R600MachineScheduler.cpp
+++ b/lib/Target/R600/R600MachineScheduler.cpp
@@ -9,19 +9,17 @@
//
/// \file
/// \brief R600 Machine Scheduler interface
-// TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "misched"
#include "R600MachineScheduler.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
#include "llvm/Support/raw_ostream.h"
-#include <set>
using namespace llvm;
@@ -30,54 +28,80 @@ void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
DAG = dag;
TII = static_cast<const R600InstrInfo*>(DAG->TII);
TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
+ VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
MRI = &DAG->MRI;
- Available[IDAlu]->clear();
- Available[IDFetch]->clear();
- Available[IDOther]->clear();
CurInstKind = IDOther;
CurEmitted = 0;
- OccupedSlotsMask = 15;
+ OccupedSlotsMask = 31;
InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
-
+ InstKindLimit[IDOther] = 32;
const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD5XXX) {
- InstKindLimit[IDFetch] = 7; // 8 minus 1 for security
- } else {
- InstKindLimit[IDFetch] = 15; // 16 minus 1 for security
- }
+ InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
+ AluInstCount = 0;
+ FetchInstCount = 0;
}
-void R600SchedStrategy::MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst)
+void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
+ std::vector<SUnit *> &QDst)
{
- if (QSrc->empty())
- return;
- for (ReadyQueue::iterator I = QSrc->begin(),
- E = QSrc->end(); I != E; ++I) {
- (*I)->NodeQueueId &= ~QSrc->getID();
- QDst->push(*I);
- }
- QSrc->clear();
+ QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
+ QSrc.clear();
+}
+
+static
+unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
+ assert (GPRCount && "GPRCount cannot be 0");
+ return 248 / GPRCount;
}
SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
SUnit *SU = 0;
- IsTopNode = true;
NextInstKind = IDOther;
+ IsTopNode = false;
+
// check if we might want to switch current clause type
- bool AllowSwitchToAlu = (CurInstKind == IDOther) ||
- (CurEmitted > InstKindLimit[CurInstKind]) ||
- (Available[CurInstKind]->empty());
- bool AllowSwitchFromAlu = (CurEmitted > InstKindLimit[CurInstKind]) &&
- (!Available[IDFetch]->empty() || !Available[IDOther]->empty());
-
- if ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
- (!AllowSwitchFromAlu && CurInstKind == IDAlu)) {
+ bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
+ (Available[CurInstKind].empty());
+ bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
+ (!Available[IDFetch].empty() || !Available[IDOther].empty());
+
+ if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
+ // We use the heuristic provided by AMD Accelerated Parallel Processing
+ // OpenCL Programming Guide :
+ // The approx. number of WF that allows TEX inst to hide ALU inst is :
+ // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
+ float ALUFetchRationEstimate =
+ (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
+ (FetchInstCount + Available[IDFetch].size());
+ unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
+ DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
+ // We assume the local GPR requirements to be "dominated" by the requirement
+ // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
+ // after TEX are indeed likely to consume or generate values from/for the
+ // TEX clause.
+ // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
+ // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
+ // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
+ // (TODO : use RegisterPressure)
+ // If we are going too use too many GPR, we flush Fetch instruction to lower
+ // register pressure on 128 bits regs.
+ unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
+ if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
+ AllowSwitchFromAlu = true;
+ }
+
+ if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
+ (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
// try to pick ALU
SU = pickAlu();
+ if (!SU && !PhysicalRegCopy.empty()) {
+ SU = PhysicalRegCopy.front();
+ PhysicalRegCopy.erase(PhysicalRegCopy.begin());
+ }
if (SU) {
- if (CurEmitted > InstKindLimit[IDAlu])
+ if (CurEmitted >= InstKindLimit[IDAlu])
CurEmitted = 0;
NextInstKind = IDAlu;
}
@@ -99,14 +123,10 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
DEBUG(
if (SU) {
- dbgs() << "picked node: ";
+ dbgs() << " ** Pick node **\n";
SU->dump(DAG);
} else {
- dbgs() << "NO NODE ";
- for (int i = 0; i < IDLast; ++i) {
- Available[i]->dump();
- Pending[i]->dump();
- }
+ dbgs() << "NO NODE \n";
for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
const SUnit &S = DAG->SUnits[i];
if (!S.isScheduled)
@@ -119,19 +139,16 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
}
void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
-
- DEBUG(dbgs() << "scheduled: ");
- DEBUG(SU->dump(DAG));
-
if (NextInstKind != CurInstKind) {
DEBUG(dbgs() << "Instruction Type Switch\n");
if (NextInstKind != IDAlu)
- OccupedSlotsMask = 15;
+ OccupedSlotsMask |= 31;
CurEmitted = 0;
CurInstKind = NextInstKind;
}
if (CurInstKind == IDAlu) {
+ AluInstCount ++;
switch (getAluKind(SU)) {
case AluT_XYZW:
CurEmitted += 4;
@@ -157,20 +174,37 @@ void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
if (CurInstKind != IDFetch) {
MoveUnits(Pending[IDFetch], Available[IDFetch]);
- }
- MoveUnits(Pending[IDOther], Available[IDOther]);
+ } else
+ FetchInstCount++;
}
-void R600SchedStrategy::releaseTopNode(SUnit *SU) {
- int IK = getInstKind(SU);
+static bool
+isPhysicalRegCopy(MachineInstr *MI) {
+ if (MI->getOpcode() != AMDGPU::COPY)
+ return false;
- DEBUG(dbgs() << IK << " <= ");
- DEBUG(SU->dump(DAG));
+ return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
+}
- Pending[IK]->push(SU);
+void R600SchedStrategy::releaseTopNode(SUnit *SU) {
+ DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
}
void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
+ DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
+ if (isPhysicalRegCopy(SU->getInstr())) {
+ PhysicalRegCopy.push_back(SU);
+ return;
+ }
+
+ int IK = getInstKind(SU);
+
+ // There is no export clause, we can schedule one as soon as its ready
+ if (IK == IDOther)
+ Available[IDOther].push_back(SU);
+ else
+ Pending[IK].push_back(SU);
+
}
bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
@@ -185,18 +219,19 @@ bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
MachineInstr *MI = SU->getInstr();
+ if (TII->isTransOnly(MI))
+ return AluTrans;
+
switch (MI->getOpcode()) {
+ case AMDGPU::PRED_X:
+ return AluPredX;
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
+ case AMDGPU::DOT_4:
return AluT_XYZW;
case AMDGPU::COPY:
- if (TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) {
- // %vregX = COPY Tn_X is likely to be discarded in favor of an
- // assignement of Tn_X to %vregX, don't considers it in scheduling
- return AluDiscarded;
- }
- else if (MI->getOperand(1).isUndef()) {
+ if (MI->getOperand(1).isUndef()) {
// MI will become a KILL, don't considers it in scheduling
return AluDiscarded;
}
@@ -205,10 +240,18 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
}
// Does the instruction take a whole IG ?
+ // XXX: Is it possible to add a helper function in R600InstrInfo that can
+ // be used here and in R600PacketizerList::isSoloInstruction() ?
if(TII->isVector(*MI) ||
TII->isCubeOp(MI->getOpcode()) ||
- TII->isReductionOp(MI->getOpcode()))
+ TII->isReductionOp(MI->getOpcode()) ||
+ MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
return AluT_XYZW;
+ }
+
+ if (TII->isLDSInstr(MI->getOpcode())) {
+ return AluT_X;
+ }
// Is the result already assigned to a channel ?
unsigned DestSubReg = MI->getOperand(0).getSubReg();
@@ -239,6 +282,10 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
return AluT_XYZW;
+ // LDS src registers cannot be used in the Trans slot.
+ if (TII->readsLDSSrcReg(MI))
+ return AluT_XYZW;
+
return AluAny;
}
@@ -246,57 +293,39 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
int R600SchedStrategy::getInstKind(SUnit* SU) {
int Opcode = SU->getInstr()->getOpcode();
+ if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
+ return IDFetch;
+
if (TII->isALUInstr(Opcode)) {
return IDAlu;
}
switch (Opcode) {
+ case AMDGPU::PRED_X:
case AMDGPU::COPY:
case AMDGPU::CONST_COPY:
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
- case AMDGPU::DOT4_eg_pseudo:
- case AMDGPU::DOT4_r600_pseudo:
+ case AMDGPU::DOT_4:
return IDAlu;
- case AMDGPU::TEX_VTX_CONSTBUF:
- case AMDGPU::TEX_VTX_TEXBUF:
- case AMDGPU::TEX_LD:
- case AMDGPU::TEX_GET_TEXTURE_RESINFO:
- case AMDGPU::TEX_GET_GRADIENTS_H:
- case AMDGPU::TEX_GET_GRADIENTS_V:
- case AMDGPU::TEX_SET_GRADIENTS_H:
- case AMDGPU::TEX_SET_GRADIENTS_V:
- case AMDGPU::TEX_SAMPLE:
- case AMDGPU::TEX_SAMPLE_C:
- case AMDGPU::TEX_SAMPLE_L:
- case AMDGPU::TEX_SAMPLE_C_L:
- case AMDGPU::TEX_SAMPLE_LB:
- case AMDGPU::TEX_SAMPLE_C_LB:
- case AMDGPU::TEX_SAMPLE_G:
- case AMDGPU::TEX_SAMPLE_C_G:
- case AMDGPU::TXD:
- case AMDGPU::TXD_SHADOW:
- return IDFetch;
default:
- DEBUG(
- dbgs() << "other inst: ";
- SU->dump(DAG);
- );
return IDOther;
}
}
-SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) {
+SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
if (Q.empty())
return NULL;
- for (std::set<SUnit *, CompareSUnit>::iterator It = Q.begin(), E = Q.end();
+ for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
It != E; ++It) {
SUnit *SU = *It;
InstructionsGroupCandidate.push_back(SU->getInstr());
- if (TII->canBundle(InstructionsGroupCandidate)) {
+ if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)
+ && (!AnyALU || !TII->isVectorOnly(SU->getInstr()))
+ ) {
InstructionsGroupCandidate.pop_back();
- Q.erase(It);
+ Q.erase((It + 1).base());
return SU;
} else {
InstructionsGroupCandidate.pop_back();
@@ -306,33 +335,37 @@ SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) {
}
void R600SchedStrategy::LoadAlu() {
- ReadyQueue *QSrc = Pending[IDAlu];
- for (ReadyQueue::iterator I = QSrc->begin(),
- E = QSrc->end(); I != E; ++I) {
- (*I)->NodeQueueId &= ~QSrc->getID();
- AluKind AK = getAluKind(*I);
- AvailableAlus[AK].insert(*I);
- }
- QSrc->clear();
+ std::vector<SUnit *> &QSrc = Pending[IDAlu];
+ for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
+ AluKind AK = getAluKind(QSrc[i]);
+ AvailableAlus[AK].push_back(QSrc[i]);
+ }
+ QSrc.clear();
}
void R600SchedStrategy::PrepareNextSlot() {
DEBUG(dbgs() << "New Slot\n");
assert (OccupedSlotsMask && "Slot wasn't filled");
OccupedSlotsMask = 0;
+// if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS)
+// OccupedSlotsMask |= 16;
InstructionsGroupCandidate.clear();
LoadAlu();
}
void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
- unsigned DestReg = MI->getOperand(0).getReg();
+ int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ if (DstIndex == -1) {
+ return;
+ }
+ unsigned DestReg = MI->getOperand(DstIndex).getReg();
// PressureRegister crashes if an operand is def and used in the same inst
// and we try to constraint its regclass
for (MachineInstr::mop_iterator It = MI->operands_begin(),
E = MI->operands_end(); It != E; ++It) {
MachineOperand &MO = *It;
if (MO.isReg() && !MO.isDef() &&
- MO.getReg() == MI->getOperand(0).getReg())
+ MO.getReg() == DestReg)
return;
}
// Constrains the regclass of DestReg to assign it to Slot
@@ -352,53 +385,60 @@ void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
}
}
-SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) {
+SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) {
static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
- SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]);
- SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
- if (!UnslotedSU) {
+ SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu);
+ if (SlotedSU)
return SlotedSU;
- } else if (!SlotedSU) {
+ SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu);
+ if (UnslotedSU)
AssignSlot(UnslotedSU->getInstr(), Slot);
- return UnslotedSU;
- } else {
- //Determine which one to pick (the lesser one)
- if (CompareSUnit()(SlotedSU, UnslotedSU)) {
- AvailableAlus[AluAny].insert(UnslotedSU);
- return SlotedSU;
- } else {
- AvailableAlus[IndexToID[Slot]].insert(SlotedSU);
- AssignSlot(UnslotedSU->getInstr(), Slot);
- return UnslotedSU;
- }
- }
+ return UnslotedSU;
}
-bool R600SchedStrategy::isAvailablesAluEmpty() const {
- return Pending[IDAlu]->empty() && AvailableAlus[AluAny].empty() &&
- AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() &&
- AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() &&
- AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty();
+unsigned R600SchedStrategy::AvailablesAluCount() const {
+ return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
+ AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
+ AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
+ AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() +
+ AvailableAlus[AluPredX].size();
}
SUnit* R600SchedStrategy::pickAlu() {
- while (!isAvailablesAluEmpty()) {
+ while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
if (!OccupedSlotsMask) {
+ // Bottom up scheduling : predX must comes first
+ if (!AvailableAlus[AluPredX].empty()) {
+ OccupedSlotsMask |= 31;
+ return PopInst(AvailableAlus[AluPredX], false);
+ }
// Flush physical reg copies (RA will discard them)
if (!AvailableAlus[AluDiscarded].empty()) {
- OccupedSlotsMask = 15;
- return PopInst(AvailableAlus[AluDiscarded]);
+ OccupedSlotsMask |= 31;
+ return PopInst(AvailableAlus[AluDiscarded], false);
}
// If there is a T_XYZW alu available, use it
if (!AvailableAlus[AluT_XYZW].empty()) {
- OccupedSlotsMask = 15;
- return PopInst(AvailableAlus[AluT_XYZW]);
+ OccupedSlotsMask |= 15;
+ return PopInst(AvailableAlus[AluT_XYZW], false);
+ }
+ }
+ bool TransSlotOccuped = OccupedSlotsMask & 16;
+ if (!TransSlotOccuped && VLIW5) {
+ if (!AvailableAlus[AluTrans].empty()) {
+ OccupedSlotsMask |= 16;
+ return PopInst(AvailableAlus[AluTrans], false);
+ }
+ SUnit *SU = AttemptFillSlot(3, true);
+ if (SU) {
+ OccupedSlotsMask |= 16;
+ return SU;
}
}
- for (unsigned Chan = 0; Chan < 4; ++Chan) {
+ for (int Chan = 3; Chan > -1; --Chan) {
bool isOccupied = OccupedSlotsMask & (1 << Chan);
if (!isOccupied) {
- SUnit *SU = AttemptFillSlot(Chan);
+ SUnit *SU = AttemptFillSlot(Chan, false);
if (SU) {
OccupedSlotsMask |= (1 << Chan);
InstructionsGroupCandidate.push_back(SU->getInstr());
@@ -413,14 +453,14 @@ SUnit* R600SchedStrategy::pickAlu() {
SUnit* R600SchedStrategy::pickOther(int QID) {
SUnit *SU = 0;
- ReadyQueue *AQ = Available[QID];
+ std::vector<SUnit *> &AQ = Available[QID];
- if (AQ->empty()) {
+ if (AQ.empty()) {
MoveUnits(Pending[QID], AQ);
}
- if (!AQ->empty()) {
- SU = *AQ->begin();
- AQ->remove(AQ->begin());
+ if (!AQ.empty()) {
+ SU = AQ.back();
+ AQ.resize(AQ.size() - 1);
}
return SU;
}
diff --git a/lib/Target/R600/R600MachineScheduler.h b/lib/Target/R600/R600MachineScheduler.h
index 3d0367fd8ebf..97c8cdec0aae 100644
--- a/lib/Target/R600/R600MachineScheduler.h
+++ b/lib/Target/R600/R600MachineScheduler.h
@@ -16,21 +16,14 @@
#define R600MACHINESCHEDULER_H_
#include "R600InstrInfo.h"
+#include "llvm/ADT/PriorityQueue.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/ADT/PriorityQueue.h"
using namespace llvm;
namespace llvm {
-class CompareSUnit {
-public:
- bool operator()(const SUnit *S1, const SUnit *S2) {
- return S1->getDepth() > S2->getDepth();
- }
-};
-
class R600SchedStrategy : public MachineSchedStrategy {
const ScheduleDAGMI *DAG;
@@ -38,12 +31,6 @@ class R600SchedStrategy : public MachineSchedStrategy {
const R600RegisterInfo *TRI;
MachineRegisterInfo *MRI;
- enum InstQueue {
- QAlu = 1,
- QFetch = 2,
- QOther = 4
- };
-
enum InstKind {
IDAlu,
IDFetch,
@@ -58,17 +45,23 @@ class R600SchedStrategy : public MachineSchedStrategy {
AluT_Z,
AluT_W,
AluT_XYZW,
+ AluPredX,
+ AluTrans,
AluDiscarded, // LLVM Instructions that are going to be eliminated
AluLast
};
- ReadyQueue *Available[IDLast], *Pending[IDLast];
- std::multiset<SUnit *, CompareSUnit> AvailableAlus[AluLast];
+ std::vector<SUnit *> Available[IDLast], Pending[IDLast];
+ std::vector<SUnit *> AvailableAlus[AluLast];
+ std::vector<SUnit *> PhysicalRegCopy;
InstKind CurInstKind;
int CurEmitted;
InstKind NextInstKind;
+ unsigned AluInstCount;
+ unsigned FetchInstCount;
+
int InstKindLimit[IDLast];
int OccupedSlotsMask;
@@ -76,19 +69,9 @@ class R600SchedStrategy : public MachineSchedStrategy {
public:
R600SchedStrategy() :
DAG(0), TII(0), TRI(0), MRI(0) {
- Available[IDAlu] = new ReadyQueue(QAlu, "AAlu");
- Available[IDFetch] = new ReadyQueue(QFetch, "AFetch");
- Available[IDOther] = new ReadyQueue(QOther, "AOther");
- Pending[IDAlu] = new ReadyQueue(QAlu<<4, "PAlu");
- Pending[IDFetch] = new ReadyQueue(QFetch<<4, "PFetch");
- Pending[IDOther] = new ReadyQueue(QOther<<4, "POther");
}
virtual ~R600SchedStrategy() {
- for (unsigned I = 0; I < IDLast; ++I) {
- delete Available[I];
- delete Pending[I];
- }
}
virtual void initialize(ScheduleDAGMI *dag);
@@ -99,20 +82,21 @@ public:
private:
std::vector<MachineInstr *> InstructionsGroupCandidate;
+ bool VLIW5;
int getInstKind(SUnit *SU);
bool regBelongsToClass(unsigned Reg, const TargetRegisterClass *RC) const;
AluKind getAluKind(SUnit *SU) const;
void LoadAlu();
- bool isAvailablesAluEmpty() const;
- SUnit *AttemptFillSlot (unsigned Slot);
+ unsigned AvailablesAluCount() const;
+ SUnit *AttemptFillSlot (unsigned Slot, bool AnyAlu);
void PrepareNextSlot();
- SUnit *PopInst(std::multiset<SUnit *, CompareSUnit> &Q);
+ SUnit *PopInst(std::vector<SUnit*> &Q, bool AnyALU);
void AssignSlot(MachineInstr *MI, unsigned Slot);
SUnit* pickAlu();
SUnit* pickOther(int QID);
- void MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst);
+ void MoveUnits(std::vector<SUnit *> &QSrc, std::vector<SUnit *> &QDst);
};
} // namespace llvm
diff --git a/lib/Target/R600/R600OptimizeVectorRegisters.cpp b/lib/Target/R600/R600OptimizeVectorRegisters.cpp
new file mode 100644
index 000000000000..cf719c0b9fe1
--- /dev/null
+++ b/lib/Target/R600/R600OptimizeVectorRegisters.cpp
@@ -0,0 +1,380 @@
+//===--------------------- R600MergeVectorRegisters.cpp -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass merges inputs of swizzeable instructions into vector sharing
+/// common data and/or have enough undef subreg using swizzle abilities.
+///
+/// For instance let's consider the following pseudo code :
+/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// ...
+/// vreg7<def> = REG_SEQ vreg1, sub0, vreg3, sub1, undef, sub2, vreg4, sub3
+/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub1, sub2, sub3
+///
+/// is turned into :
+/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// ...
+/// vreg7<def> = INSERT_SUBREG vreg4, sub3
+/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub2, sub1, sub3
+///
+/// This allow regalloc to reduce register pressure for vector registers and
+/// to reduce MOV count.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "vec-merger"
+#include "llvm/Support/Debug.h"
+#include "AMDGPU.h"
+#include "R600InstrInfo.h"
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+namespace {
+
+static bool
+isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) {
+ for (MachineRegisterInfo::def_iterator It = MRI.def_begin(Reg),
+ E = MRI.def_end(); It != E; ++It) {
+ return (*It).isImplicitDef();
+ }
+ if (MRI.isReserved(Reg)) {
+ return false;
+ }
+ llvm_unreachable("Reg without a def");
+ return false;
+}
+
+class RegSeqInfo {
+public:
+ MachineInstr *Instr;
+ DenseMap<unsigned, unsigned> RegToChan;
+ std::vector<unsigned> UndefReg;
+ RegSeqInfo(MachineRegisterInfo &MRI, MachineInstr *MI) : Instr(MI) {
+ assert (MI->getOpcode() == AMDGPU::REG_SEQUENCE);
+ for (unsigned i = 1, e = Instr->getNumOperands(); i < e; i+=2) {
+ MachineOperand &MO = Instr->getOperand(i);
+ unsigned Chan = Instr->getOperand(i + 1).getImm();
+ if (isImplicitlyDef(MRI, MO.getReg()))
+ UndefReg.push_back(Chan);
+ else
+ RegToChan[MO.getReg()] = Chan;
+ }
+ }
+ RegSeqInfo() {}
+
+ bool operator==(const RegSeqInfo &RSI) const {
+ return RSI.Instr == Instr;
+ }
+};
+
+class R600VectorRegMerger : public MachineFunctionPass {
+private:
+ MachineRegisterInfo *MRI;
+ const R600InstrInfo *TII;
+ bool canSwizzle(const MachineInstr &) const;
+ bool areAllUsesSwizzeable(unsigned Reg) const;
+ void SwizzleInput(MachineInstr &,
+ const std::vector<std::pair<unsigned, unsigned> > &) const;
+ bool tryMergeVector(const RegSeqInfo *, RegSeqInfo *,
+ std::vector<std::pair<unsigned, unsigned> > &Remap) const;
+ bool tryMergeUsingCommonSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan);
+ bool tryMergeUsingFreeSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan);
+ MachineInstr *RebuildVector(RegSeqInfo *MI,
+ const RegSeqInfo *BaseVec,
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const;
+ void RemoveMI(MachineInstr *);
+ void trackRSI(const RegSeqInfo &RSI);
+
+ typedef DenseMap<unsigned, std::vector<MachineInstr *> > InstructionSetMap;
+ DenseMap<MachineInstr *, RegSeqInfo> PreviousRegSeq;
+ InstructionSetMap PreviousRegSeqByReg;
+ InstructionSetMap PreviousRegSeqByUndefCount;
+public:
+ static char ID;
+ R600VectorRegMerger(TargetMachine &tm) : MachineFunctionPass(ID),
+ TII(0) { }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ const char *getPassName() const {
+ return "R600 Vector Registers Merge Pass";
+ }
+
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+char R600VectorRegMerger::ID = 0;
+
+bool R600VectorRegMerger::canSwizzle(const MachineInstr &MI)
+ const {
+ if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST)
+ return true;
+ switch (MI.getOpcode()) {
+ case AMDGPU::R600_ExportSwz:
+ case AMDGPU::EG_ExportSwz:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool R600VectorRegMerger::tryMergeVector(const RegSeqInfo *Untouched,
+ RegSeqInfo *ToMerge, std::vector< std::pair<unsigned, unsigned> > &Remap)
+ const {
+ unsigned CurrentUndexIdx = 0;
+ for (DenseMap<unsigned, unsigned>::iterator It = ToMerge->RegToChan.begin(),
+ E = ToMerge->RegToChan.end(); It != E; ++It) {
+ DenseMap<unsigned, unsigned>::const_iterator PosInUntouched =
+ Untouched->RegToChan.find((*It).first);
+ if (PosInUntouched != Untouched->RegToChan.end()) {
+ Remap.push_back(std::pair<unsigned, unsigned>
+ ((*It).second, (*PosInUntouched).second));
+ continue;
+ }
+ if (CurrentUndexIdx >= Untouched->UndefReg.size())
+ return false;
+ Remap.push_back(std::pair<unsigned, unsigned>
+ ((*It).second, Untouched->UndefReg[CurrentUndexIdx++]));
+ }
+
+ return true;
+}
+
+static
+unsigned getReassignedChan(
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan,
+ unsigned Chan) {
+ for (unsigned j = 0, je = RemapChan.size(); j < je; j++) {
+ if (RemapChan[j].first == Chan)
+ return RemapChan[j].second;
+ }
+ llvm_unreachable("Chan wasn't reassigned");
+}
+
+MachineInstr *R600VectorRegMerger::RebuildVector(
+ RegSeqInfo *RSI, const RegSeqInfo *BaseRSI,
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const {
+ unsigned Reg = RSI->Instr->getOperand(0).getReg();
+ MachineBasicBlock::iterator Pos = RSI->Instr;
+ MachineBasicBlock &MBB = *Pos->getParent();
+ DebugLoc DL = Pos->getDebugLoc();
+
+ unsigned SrcVec = BaseRSI->Instr->getOperand(0).getReg();
+ DenseMap<unsigned, unsigned> UpdatedRegToChan = BaseRSI->RegToChan;
+ std::vector<unsigned> UpdatedUndef = BaseRSI->UndefReg;
+ for (DenseMap<unsigned, unsigned>::iterator It = RSI->RegToChan.begin(),
+ E = RSI->RegToChan.end(); It != E; ++It) {
+ unsigned DstReg = MRI->createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+ unsigned SubReg = (*It).first;
+ unsigned Swizzle = (*It).second;
+ unsigned Chan = getReassignedChan(RemapChan, Swizzle);
+
+ MachineInstr *Tmp = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::INSERT_SUBREG),
+ DstReg)
+ .addReg(SrcVec)
+ .addReg(SubReg)
+ .addImm(Chan);
+ UpdatedRegToChan[SubReg] = Chan;
+ std::vector<unsigned>::iterator ChanPos =
+ std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan);
+ if (ChanPos != UpdatedUndef.end())
+ UpdatedUndef.erase(ChanPos);
+ assert(std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan) ==
+ UpdatedUndef.end() &&
+ "UpdatedUndef shouldn't contain Chan more than once!");
+ DEBUG(dbgs() << " ->"; Tmp->dump(););
+ (void)Tmp;
+ SrcVec = DstReg;
+ }
+ Pos = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::COPY), Reg)
+ .addReg(SrcVec);
+ DEBUG(dbgs() << " ->"; Pos->dump(););
+
+ DEBUG(dbgs() << " Updating Swizzle:\n");
+ for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg),
+ E = MRI->use_end(); It != E; ++It) {
+ DEBUG(dbgs() << " ";(*It).dump(); dbgs() << " ->");
+ SwizzleInput(*It, RemapChan);
+ DEBUG((*It).dump());
+ }
+ RSI->Instr->eraseFromParent();
+
+ // Update RSI
+ RSI->Instr = Pos;
+ RSI->RegToChan = UpdatedRegToChan;
+ RSI->UndefReg = UpdatedUndef;
+
+ return Pos;
+}
+
+void R600VectorRegMerger::RemoveMI(MachineInstr *MI) {
+ for (InstructionSetMap::iterator It = PreviousRegSeqByReg.begin(),
+ E = PreviousRegSeqByReg.end(); It != E; ++It) {
+ std::vector<MachineInstr *> &MIs = (*It).second;
+ MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end());
+ }
+ for (InstructionSetMap::iterator It = PreviousRegSeqByUndefCount.begin(),
+ E = PreviousRegSeqByUndefCount.end(); It != E; ++It) {
+ std::vector<MachineInstr *> &MIs = (*It).second;
+ MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end());
+ }
+}
+
+void R600VectorRegMerger::SwizzleInput(MachineInstr &MI,
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const {
+ unsigned Offset;
+ if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST)
+ Offset = 2;
+ else
+ Offset = 3;
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned Swizzle = MI.getOperand(i + Offset).getImm() + 1;
+ for (unsigned j = 0, e = RemapChan.size(); j < e; j++) {
+ if (RemapChan[j].first == Swizzle) {
+ MI.getOperand(i + Offset).setImm(RemapChan[j].second - 1);
+ break;
+ }
+ }
+ }
+}
+
+bool R600VectorRegMerger::areAllUsesSwizzeable(unsigned Reg) const {
+ for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg),
+ E = MRI->use_end(); It != E; ++It) {
+ if (!canSwizzle(*It))
+ return false;
+ }
+ return true;
+}
+
+bool R600VectorRegMerger::tryMergeUsingCommonSlot(RegSeqInfo &RSI,
+ RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan) {
+ for (MachineInstr::mop_iterator MOp = RSI.Instr->operands_begin(),
+ MOE = RSI.Instr->operands_end(); MOp != MOE; ++MOp) {
+ if (!MOp->isReg())
+ continue;
+ if (PreviousRegSeqByReg[MOp->getReg()].empty())
+ continue;
+ std::vector<MachineInstr *> MIs = PreviousRegSeqByReg[MOp->getReg()];
+ for (unsigned i = 0, e = MIs.size(); i < e; i++) {
+ CompatibleRSI = PreviousRegSeq[MIs[i]];
+ if (RSI == CompatibleRSI)
+ continue;
+ if (tryMergeVector(&CompatibleRSI, &RSI, RemapChan))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool R600VectorRegMerger::tryMergeUsingFreeSlot(RegSeqInfo &RSI,
+ RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan) {
+ unsigned NeededUndefs = 4 - RSI.UndefReg.size();
+ if (PreviousRegSeqByUndefCount[NeededUndefs].empty())
+ return false;
+ std::vector<MachineInstr *> &MIs =
+ PreviousRegSeqByUndefCount[NeededUndefs];
+ CompatibleRSI = PreviousRegSeq[MIs.back()];
+ tryMergeVector(&CompatibleRSI, &RSI, RemapChan);
+ return true;
+}
+
+void R600VectorRegMerger::trackRSI(const RegSeqInfo &RSI) {
+ for (DenseMap<unsigned, unsigned>::const_iterator
+ It = RSI.RegToChan.begin(), E = RSI.RegToChan.end(); It != E; ++It) {
+ PreviousRegSeqByReg[(*It).first].push_back(RSI.Instr);
+ }
+ PreviousRegSeqByUndefCount[RSI.UndefReg.size()].push_back(RSI.Instr);
+ PreviousRegSeq[RSI.Instr] = RSI;
+}
+
+bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
+ TII = static_cast<const R600InstrInfo *>(Fn.getTarget().getInstrInfo());
+ MRI = &(Fn.getRegInfo());
+ for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
+ MBB != MBBe; ++MBB) {
+ MachineBasicBlock *MB = MBB;
+ PreviousRegSeq.clear();
+ PreviousRegSeqByReg.clear();
+ PreviousRegSeqByUndefCount.clear();
+
+ for (MachineBasicBlock::iterator MII = MB->begin(), MIIE = MB->end();
+ MII != MIIE; ++MII) {
+ MachineInstr *MI = MII;
+ if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) {
+ if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TEX_INST) {
+ unsigned Reg = MI->getOperand(1).getReg();
+ for (MachineRegisterInfo::def_iterator It = MRI->def_begin(Reg),
+ E = MRI->def_end(); It != E; ++It) {
+ RemoveMI(&(*It));
+ }
+ }
+ continue;
+ }
+
+
+ RegSeqInfo RSI(*MRI, MI);
+
+ // All uses of MI are swizzeable ?
+ unsigned Reg = MI->getOperand(0).getReg();
+ if (!areAllUsesSwizzeable(Reg))
+ continue;
+
+ DEBUG (dbgs() << "Trying to optimize ";
+ MI->dump();
+ );
+
+ RegSeqInfo CandidateRSI;
+ std::vector<std::pair<unsigned, unsigned> > RemapChan;
+ DEBUG(dbgs() << "Using common slots...\n";);
+ if (tryMergeUsingCommonSlot(RSI, CandidateRSI, RemapChan)) {
+ // Remove CandidateRSI mapping
+ RemoveMI(CandidateRSI.Instr);
+ MII = RebuildVector(&RSI, &CandidateRSI, RemapChan);
+ trackRSI(RSI);
+ continue;
+ }
+ DEBUG(dbgs() << "Using free slots...\n";);
+ RemapChan.clear();
+ if (tryMergeUsingFreeSlot(RSI, CandidateRSI, RemapChan)) {
+ RemoveMI(CandidateRSI.Instr);
+ MII = RebuildVector(&RSI, &CandidateRSI, RemapChan);
+ trackRSI(RSI);
+ continue;
+ }
+ //Failed to merge
+ trackRSI(RSI);
+ }
+ }
+ return false;
+}
+
+}
+
+llvm::FunctionPass *llvm::createR600VectorRegMerger(TargetMachine &tm) {
+ return new R600VectorRegMerger(tm);
+}
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
index cd7b7d0b219b..cd9b6eae6ed6 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/R600/R600Packetizer.cpp
@@ -14,22 +14,21 @@
//
//===----------------------------------------------------------------------===//
-#ifndef R600PACKETIZER_CPP
-#define R600PACKETIZER_CPP
-
#define DEBUG_TYPE "packets"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
+#include "AMDGPU.h"
+#include "R600InstrInfo.h"
#include "llvm/CodeGen/DFAPacketizer.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/ScheduleDAG.h"
-#include "AMDGPU.h"
-#include "R600InstrInfo.h"
+#include "llvm/Support/raw_ostream.h"
-namespace llvm {
+using namespace llvm;
+
+namespace {
class R600Packetizer : public MachineFunctionPass {
@@ -59,15 +58,8 @@ class R600PacketizerList : public VLIWPacketizerList {
private:
const R600InstrInfo *TII;
const R600RegisterInfo &TRI;
-
- enum BankSwizzle {
- ALU_VEC_012 = 0,
- ALU_VEC_021,
- ALU_VEC_120,
- ALU_VEC_102,
- ALU_VEC_201,
- ALU_VEC_210
- };
+ bool VLIW5;
+ bool ConsideredInstUsesAlreadyWrittenVectorElement;
unsigned getSlot(const MachineInstr *MI) const {
return TRI.getHWRegChan(MI->getOperand(0).getReg());
@@ -84,21 +76,35 @@ private:
MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
if (I->isBundle())
BI++;
+ int LastDstChan = -1;
do {
+ bool isTrans = false;
+ int BISlot = getSlot(BI);
+ if (LastDstChan >= BISlot)
+ isTrans = true;
+ LastDstChan = BISlot;
if (TII->isPredicated(BI))
continue;
- if (TII->isTransOnly(BI))
+ int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write);
+ if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0)
continue;
- int OperandIdx = TII->getOperandIdx(BI->getOpcode(), R600Operands::WRITE);
- if (OperandIdx < 0)
+ int DstIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::dst);
+ if (DstIdx == -1) {
continue;
- if (BI->getOperand(OperandIdx).getImm() == 0)
+ }
+ unsigned Dst = BI->getOperand(DstIdx).getReg();
+ if (isTrans || TII->isTransOnly(BI)) {
+ Result[Dst] = AMDGPU::PS;
continue;
- unsigned Dst = BI->getOperand(0).getReg();
- if (BI->getOpcode() == AMDGPU::DOT4_r600_real) {
+ }
+ if (BI->getOpcode() == AMDGPU::DOT4_r600 ||
+ BI->getOpcode() == AMDGPU::DOT4_eg) {
Result[Dst] = AMDGPU::PV_X;
continue;
}
+ if (Dst == AMDGPU::OQAP) {
+ continue;
+ }
unsigned PVReg = 0;
switch (TRI.getHWRegChan(Dst)) {
case 0:
@@ -123,10 +129,10 @@ private:
void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs)
const {
- R600Operands::Ops Ops[] = {
- R600Operands::SRC0,
- R600Operands::SRC1,
- R600Operands::SRC2
+ unsigned Ops[] = {
+ AMDGPU::OpName::src0,
+ AMDGPU::OpName::src1,
+ AMDGPU::OpName::src2
};
for (unsigned i = 0; i < 3; i++) {
int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
@@ -144,10 +150,14 @@ public:
MachineDominatorTree &MDT)
: VLIWPacketizerList(MF, MLI, MDT, true),
TII (static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo())),
- TRI(TII->getRegisterInfo()) { }
+ TRI(TII->getRegisterInfo()) {
+ VLIW5 = !MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
+ }
// initPacketizerState - initialize some internal flags.
- void initPacketizerState() { }
+ void initPacketizerState() {
+ ConsideredInstUsesAlreadyWrittenVectorElement = false;
+ }
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB) {
@@ -161,9 +171,11 @@ public:
return true;
if (!TII->isALUInstr(MI->getOpcode()))
return true;
- if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TRANS_ONLY)
+ if (MI->getOpcode() == AMDGPU::GROUP_BARRIER)
return true;
- if (TII->isTransOnly(MI))
+ // XXX: This can be removed once the packetizer properly handles all the
+ // LDS instruction group restrictions.
+ if (TII->isLDSInstr(MI->getOpcode()))
return true;
return false;
}
@@ -172,11 +184,11 @@ public:
// together.
bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
MachineInstr *MII = SUI->getInstr(), *MIJ = SUJ->getInstr();
- if (getSlot(MII) <= getSlot(MIJ))
- return false;
+ if (getSlot(MII) == getSlot(MIJ))
+ ConsideredInstUsesAlreadyWrittenVectorElement = true;
// Does MII and MIJ share the same pred_sel ?
- int OpI = TII->getOperandIdx(MII->getOpcode(), R600Operands::PRED_SEL),
- OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600Operands::PRED_SEL);
+ int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel),
+ OpJ = TII->getOperandIdx(MIJ->getOpcode(), AMDGPU::OpName::pred_sel);
unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
if (PredI != PredJ)
@@ -194,6 +206,14 @@ public:
return false;
}
}
+
+ bool ARDef = TII->definesAddressRegister(MII) ||
+ TII->definesAddressRegister(MIJ);
+ bool ARUse = TII->usesAddressRegister(MII) ||
+ TII->usesAddressRegister(MIJ);
+ if (ARDef && ARUse)
+ return false;
+
return true;
}
@@ -202,15 +222,34 @@ public:
bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {return false;}
void setIsLastBit(MachineInstr *MI, unsigned Bit) const {
- unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), R600Operands::LAST);
+ unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last);
MI->getOperand(LastOp).setImm(Bit);
}
- MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ bool isBundlableWithCurrentPMI(MachineInstr *MI,
+ const DenseMap<unsigned, unsigned> &PV,
+ std::vector<R600InstrInfo::BankSwizzle> &BS,
+ bool &isTransSlot) {
+ isTransSlot = TII->isTransOnly(MI);
+ assert (!isTransSlot || VLIW5);
+
+ // Is the dst reg sequence legal ?
+ if (!isTransSlot && !CurrentPacketMIs.empty()) {
+ if (getSlot(MI) <= getSlot(CurrentPacketMIs.back())) {
+ if (ConsideredInstUsesAlreadyWrittenVectorElement &&
+ !TII->isVectorOnly(MI) && VLIW5) {
+ isTransSlot = true;
+ DEBUG(dbgs() << "Considering as Trans Inst :"; MI->dump(););
+ }
+ else
+ return false;
+ }
+ }
+
+ // Are the Constants limitations met ?
CurrentPacketMIs.push_back(MI);
- bool FitsConstLimits = TII->canBundle(CurrentPacketMIs);
- DEBUG(
- if (!FitsConstLimits) {
+ if (!TII->fitsConstReadLimitations(CurrentPacketMIs)) {
+ DEBUG(
dbgs() << "Couldn't pack :\n";
MI->dump();
dbgs() << "with the following packets :\n";
@@ -219,12 +258,15 @@ public:
dbgs() << "\n";
}
dbgs() << "because of Consts read limitations\n";
- });
- const DenseMap<unsigned, unsigned> &PV =
- getPreviousVector(CurrentPacketMIs.front());
- bool FitsReadPortLimits = fitsReadPortLimitation(CurrentPacketMIs, PV);
- DEBUG(
- if (!FitsReadPortLimits) {
+ );
+ CurrentPacketMIs.pop_back();
+ return false;
+ }
+
+ // Is there a BankSwizzle set that meet Read Port limitations ?
+ if (!TII->fitsReadPortLimitations(CurrentPacketMIs,
+ PV, BS, isTransSlot)) {
+ DEBUG(
dbgs() << "Couldn't pack :\n";
MI->dump();
dbgs() << "with the following packets :\n";
@@ -233,146 +275,50 @@ public:
dbgs() << "\n";
}
dbgs() << "because of Read port limitations\n";
- });
- bool isBundlable = FitsConstLimits && FitsReadPortLimits;
- CurrentPacketMIs.pop_back();
- if (!isBundlable) {
- endPacket(MI->getParent(), MI);
- substitutePV(MI, getPreviousVector(MI));
- return VLIWPacketizerList::addToPacket(MI);
- }
- if (!CurrentPacketMIs.empty())
- setIsLastBit(CurrentPacketMIs.back(), 0);
- substitutePV(MI, PV);
- return VLIWPacketizerList::addToPacket(MI);
- }
-private:
- std::vector<std::pair<int, unsigned> >
- ExtractSrcs(const MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV)
- const {
- R600Operands::Ops Ops[] = {
- R600Operands::SRC0,
- R600Operands::SRC1,
- R600Operands::SRC2
- };
- std::vector<std::pair<int, unsigned> > Result;
- for (unsigned i = 0; i < 3; i++) {
- int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
- if (OperandIdx < 0){
- Result.push_back(std::pair<int, unsigned>(-1,0));
- continue;
- }
- unsigned Src = MI->getOperand(OperandIdx).getReg();
- if (PV.find(Src) != PV.end()) {
- Result.push_back(std::pair<int, unsigned>(-1,0));
- continue;
- }
- unsigned Reg = TRI.getEncodingValue(Src) & 0xff;
- if (Reg > 127) {
- Result.push_back(std::pair<int, unsigned>(-1,0));
- continue;
- }
- unsigned Chan = TRI.getHWRegChan(Src);
- Result.push_back(std::pair<int, unsigned>(Reg, Chan));
+ );
+ CurrentPacketMIs.pop_back();
+ return false;
}
- return Result;
- }
- std::vector<std::pair<int, unsigned> >
- Swizzle(std::vector<std::pair<int, unsigned> > Src,
- BankSwizzle Swz) const {
- switch (Swz) {
- case ALU_VEC_012:
- break;
- case ALU_VEC_021:
- std::swap(Src[1], Src[2]);
- break;
- case ALU_VEC_102:
- std::swap(Src[0], Src[1]);
- break;
- case ALU_VEC_120:
- std::swap(Src[0], Src[1]);
- std::swap(Src[0], Src[2]);
- break;
- case ALU_VEC_201:
- std::swap(Src[0], Src[2]);
- std::swap(Src[0], Src[1]);
- break;
- case ALU_VEC_210:
- std::swap(Src[0], Src[2]);
- break;
- }
- return Src;
- }
+ // We cannot read LDS source registrs from the Trans slot.
+ if (isTransSlot && TII->readsLDSSrcReg(MI))
+ return false;
- bool isLegal(const std::vector<MachineInstr *> &IG,
- const std::vector<BankSwizzle> &Swz,
- const DenseMap<unsigned, unsigned> &PV) const {
- assert (Swz.size() == IG.size());
- int Vector[4][3];
- memset(Vector, -1, sizeof(Vector));
- for (unsigned i = 0, e = IG.size(); i < e; i++) {
- const std::vector<std::pair<int, unsigned> > &Srcs =
- Swizzle(ExtractSrcs(IG[i], PV), Swz[i]);
- for (unsigned j = 0; j < 3; j++) {
- const std::pair<int, unsigned> &Src = Srcs[j];
- if (Src.first < 0)
- continue;
- if (Vector[Src.second][j] < 0)
- Vector[Src.second][j] = Src.first;
- if (Vector[Src.second][j] != Src.first)
- return false;
- }
- }
+ CurrentPacketMIs.pop_back();
return true;
}
- bool recursiveFitsFPLimitation(
- std::vector<MachineInstr *> IG,
- const DenseMap<unsigned, unsigned> &PV,
- std::vector<BankSwizzle> &SwzCandidate,
- std::vector<MachineInstr *> CurrentlyChecked)
- const {
- if (!isLegal(CurrentlyChecked, SwzCandidate, PV))
- return false;
- if (IG.size() == CurrentlyChecked.size()) {
- return true;
- }
- BankSwizzle AvailableSwizzle[] = {
- ALU_VEC_012,
- ALU_VEC_021,
- ALU_VEC_120,
- ALU_VEC_102,
- ALU_VEC_201,
- ALU_VEC_210
- };
- CurrentlyChecked.push_back(IG[CurrentlyChecked.size()]);
- for (unsigned i = 0; i < 6; i++) {
- SwzCandidate.push_back(AvailableSwizzle[i]);
- if (recursiveFitsFPLimitation(IG, PV, SwzCandidate, CurrentlyChecked))
- return true;
- SwzCandidate.pop_back();
- }
- return false;
- }
-
- bool fitsReadPortLimitation(
- std::vector<MachineInstr *> IG,
- const DenseMap<unsigned, unsigned> &PV)
- const {
- //Todo : support shared src0 - src1 operand
- std::vector<BankSwizzle> SwzCandidate;
- bool Result = recursiveFitsFPLimitation(IG, PV, SwzCandidate,
- std::vector<MachineInstr *>());
- if (!Result)
- return false;
- for (unsigned i = 0, e = IG.size(); i < e; i++) {
- MachineInstr *MI = IG[i];
+ MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ MachineBasicBlock::iterator FirstInBundle =
+ CurrentPacketMIs.empty() ? MI : CurrentPacketMIs.front();
+ const DenseMap<unsigned, unsigned> &PV =
+ getPreviousVector(FirstInBundle);
+ std::vector<R600InstrInfo::BankSwizzle> BS;
+ bool isTransSlot;
+
+ if (isBundlableWithCurrentPMI(MI, PV, BS, isTransSlot)) {
+ for (unsigned i = 0, e = CurrentPacketMIs.size(); i < e; i++) {
+ MachineInstr *MI = CurrentPacketMIs[i];
+ unsigned Op = TII->getOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::bank_swizzle);
+ MI->getOperand(Op).setImm(BS[i]);
+ }
unsigned Op = TII->getOperandIdx(MI->getOpcode(),
- R600Operands::BANK_SWIZZLE);
- MI->getOperand(Op).setImm(SwzCandidate[i]);
+ AMDGPU::OpName::bank_swizzle);
+ MI->getOperand(Op).setImm(BS.back());
+ if (!CurrentPacketMIs.empty())
+ setIsLastBit(CurrentPacketMIs.back(), 0);
+ substitutePV(MI, PV);
+ MachineBasicBlock::iterator It = VLIWPacketizerList::addToPacket(MI);
+ if (isTransSlot) {
+ endPacket(llvm::next(It)->getParent(), llvm::next(It));
+ }
+ return It;
}
- return true;
+ endPacket(MI->getParent(), MI);
+ if (TII->isTransOnly(MI))
+ return MI;
+ return VLIWPacketizerList::addToPacket(MI);
}
};
@@ -402,7 +348,8 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
MachineBasicBlock::iterator End = MBB->end();
MachineBasicBlock::iterator MI = MBB->begin();
while (MI != End) {
- if (MI->isKill()) {
+ if (MI->isKill() || MI->getOpcode() == AMDGPU::IMPLICIT_DEF ||
+ (MI->getOpcode() == AMDGPU::CF_ALU && !MI->getOperand(8).getImm())) {
MachineBasicBlock::iterator DeleteMI = MI;
++MI;
MBB->erase(DeleteMI);
@@ -450,10 +397,8 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
}
-}
+} // end anonymous namespace
llvm::FunctionPass *llvm::createR600Packetizer(TargetMachine &tm) {
return new R600Packetizer(tm);
}
-
-#endif // R600PACKETIZER_CPP
diff --git a/lib/Target/R600/R600RegisterInfo.cpp b/lib/Target/R600/R600RegisterInfo.cpp
index bbd7995d7d51..f3bb88b3eefc 100644
--- a/lib/Target/R600/R600RegisterInfo.cpp
+++ b/lib/Target/R600/R600RegisterInfo.cpp
@@ -20,16 +20,16 @@
using namespace llvm;
-R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm,
- const TargetInstrInfo &tii)
-: AMDGPURegisterInfo(tm, tii),
- TM(tm),
- TII(tii)
- { }
+R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm)
+: AMDGPURegisterInfo(tm),
+ TM(tm)
+ { RCW.RegWeight = 0; RCW.WeightLimit = 0;}
BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
+
Reserved.set(AMDGPU::ZERO);
Reserved.set(AMDGPU::HALF);
Reserved.set(AMDGPU::ONE);
@@ -43,25 +43,15 @@ BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(AMDGPU::PRED_SEL_OFF);
Reserved.set(AMDGPU::PRED_SEL_ZERO);
Reserved.set(AMDGPU::PRED_SEL_ONE);
+ Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
for (TargetRegisterClass::iterator I = AMDGPU::R600_AddrRegClass.begin(),
E = AMDGPU::R600_AddrRegClass.end(); I != E; ++I) {
Reserved.set(*I);
}
- for (TargetRegisterClass::iterator I = AMDGPU::TRegMemRegClass.begin(),
- E = AMDGPU::TRegMemRegClass.end();
- I != E; ++I) {
- Reserved.set(*I);
- }
+ TII->reserveIndirectRegisters(Reserved, MF);
- const R600InstrInfo *RII = static_cast<const R600InstrInfo*>(&TII);
- std::vector<unsigned> IndirectRegs = RII->getIndirectReservedRegs(MF);
- for (std::vector<unsigned>::iterator I = IndirectRegs.begin(),
- E = IndirectRegs.end();
- I != E; ++I) {
- Reserved.set(*I);
- }
return Reserved;
}
@@ -79,6 +69,10 @@ unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const {
return this->getEncodingValue(reg) >> HW_CHAN_SHIFT;
}
+unsigned R600RegisterInfo::getHWRegIndex(unsigned Reg) const {
+ return GET_REG_INDEX(getEncodingValue(Reg));
+}
+
const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass(
MVT VT) const {
switch(VT.SimpleTy) {
@@ -87,13 +81,20 @@ const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass(
}
}
-unsigned R600RegisterInfo::getSubRegFromChannel(unsigned Channel) const {
- switch (Channel) {
- default: assert(!"Invalid channel index"); return 0;
- case 0: return AMDGPU::sub0;
- case 1: return AMDGPU::sub1;
- case 2: return AMDGPU::sub2;
- case 3: return AMDGPU::sub3;
- }
+const RegClassWeight &R600RegisterInfo::getRegClassWeight(
+ const TargetRegisterClass *RC) const {
+ return RCW;
}
+bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const {
+ assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+
+ switch (Reg) {
+ case AMDGPU::OQAP:
+ case AMDGPU::OQBP:
+ case AMDGPU::AR_X:
+ return false;
+ default:
+ return true;
+ }
+}
diff --git a/lib/Target/R600/R600RegisterInfo.h b/lib/Target/R600/R600RegisterInfo.h
index f9ca918f246b..c74c49ecdcde 100644
--- a/lib/Target/R600/R600RegisterInfo.h
+++ b/lib/Target/R600/R600RegisterInfo.h
@@ -21,13 +21,12 @@
namespace llvm {
class R600TargetMachine;
-class TargetInstrInfo;
struct R600RegisterInfo : public AMDGPURegisterInfo {
AMDGPUTargetMachine &TM;
- const TargetInstrInfo &TII;
+ RegClassWeight RCW;
- R600RegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
+ R600RegisterInfo(AMDGPUTargetMachine &tm);
virtual BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -40,14 +39,16 @@ struct R600RegisterInfo : public AMDGPURegisterInfo {
/// \brief get the HW encoding for a register's channel.
unsigned getHWRegChan(unsigned reg) const;
+ virtual unsigned getHWRegIndex(unsigned Reg) const;
+
/// \brief get the register class of the specified type to use in the
/// CFGStructurizer
virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
- /// \returns the sub reg enum value for the given \p Channel
- /// (e.g. getSubRegFromChannel(0) -> AMDGPU::sel_x)
- unsigned getSubRegFromChannel(unsigned Channel) const;
+ virtual const RegClassWeight &getRegClassWeight(const TargetRegisterClass *RC) const;
+ // \returns true if \p Reg can be defined in one ALU caluse and used in another.
+ virtual bool isPhysRegLiveAcrossClauses(unsigned Reg) const;
};
} // End namespace llvm
diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/R600/R600RegisterInfo.td
index bfc546bb9922..68bcd207b42c 100644
--- a/lib/Target/R600/R600RegisterInfo.td
+++ b/lib/Target/R600/R600RegisterInfo.td
@@ -23,6 +23,14 @@ class R600Reg_128<string n, list<Register> subregs, bits<16> encoding> :
let HWEncoding = encoding;
}
+class R600Reg_64<string n, list<Register> subregs, bits<16> encoding> :
+ RegisterWithSubRegs<n, subregs> {
+ let Namespace = "AMDGPU";
+ let SubRegIndices = [sub0, sub1];
+ let HWEncoding = encoding;
+}
+
+
foreach Index = 0-127 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
// 32-bit Temporary Registers
@@ -31,26 +39,29 @@ foreach Index = 0-127 in {
// Indirect addressing offset registers
def Addr#Index#_#Chan : R600RegWithChan <"T("#Index#" + AR.x)."#Chan,
Index, Chan>;
- def TRegMem#Index#_#Chan : R600RegWithChan <"T"#Index#"."#Chan, Index,
- Chan>;
}
// 128-bit Temporary Registers
- def T#Index#_XYZW : R600Reg_128 <"T"#Index#".XYZW",
+ def T#Index#_XYZW : R600Reg_128 <"T"#Index#"",
[!cast<Register>("T"#Index#"_X"),
!cast<Register>("T"#Index#"_Y"),
!cast<Register>("T"#Index#"_Z"),
!cast<Register>("T"#Index#"_W")],
Index>;
+
+ def T#Index#_XY : R600Reg_64 <"T"#Index#"",
+ [!cast<Register>("T"#Index#"_X"),
+ !cast<Register>("T"#Index#"_Y")],
+ Index>;
}
// KCACHE_BANK0
foreach Index = 159-128 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
// 32-bit Temporary Registers
- def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#Index#"-128]."#Chan, Index, Chan>;
+ def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#!add(Index,-128)#"]."#Chan, Index, Chan>;
}
// 128-bit Temporary Registers
- def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#Index#"-128].XYZW",
+ def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#!add(Index, -128)#"].XYZW",
[!cast<Register>("KC0_"#Index#"_X"),
!cast<Register>("KC0_"#Index#"_Y"),
!cast<Register>("KC0_"#Index#"_Z"),
@@ -62,10 +73,10 @@ foreach Index = 159-128 in {
foreach Index = 191-160 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
// 32-bit Temporary Registers
- def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#Index#"-160]."#Chan, Index, Chan>;
+ def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#!add(Index,-160)#"]."#Chan, Index, Chan>;
}
// 128-bit Temporary Registers
- def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#Index#"-160].XYZW",
+ def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#!add(Index, -160)#"].XYZW",
[!cast<Register>("KC1_"#Index#"_X"),
!cast<Register>("KC1_"#Index#"_Y"),
!cast<Register>("KC1_"#Index#"_Z"),
@@ -82,6 +93,12 @@ foreach Index = 448-480 in {
// Special Registers
+def OQA : R600Reg<"OQA", 219>;
+def OQB : R600Reg<"OQB", 220>;
+def OQAP : R600Reg<"OQAP", 221>;
+def OQBP : R600Reg<"OQAP", 222>;
+def LDS_DIRECT_A : R600Reg<"LDS_DIRECT_A", 223>;
+def LDS_DIRECT_B : R600Reg<"LDS_DIRECT_B", 224>;
def ZERO : R600Reg<"0.0", 248>;
def ONE : R600Reg<"1.0", 249>;
def NEG_ONE : R600Reg<"-1.0", 249>;
@@ -92,10 +109,11 @@ def ALU_LITERAL_X : R600RegWithChan<"literal.x", 253, "X">;
def ALU_LITERAL_Y : R600RegWithChan<"literal.y", 253, "Y">;
def ALU_LITERAL_Z : R600RegWithChan<"literal.z", 253, "Z">;
def ALU_LITERAL_W : R600RegWithChan<"literal.w", 253, "W">;
-def PV_X : R600RegWithChan<"PV.x", 254, "X">;
-def PV_Y : R600RegWithChan<"PV.y", 254, "Y">;
-def PV_Z : R600RegWithChan<"PV.z", 254, "Z">;
-def PV_W : R600RegWithChan<"PV.w", 254, "W">;
+def PV_X : R600RegWithChan<"PV.X", 254, "X">;
+def PV_Y : R600RegWithChan<"PV.Y", 254, "Y">;
+def PV_Z : R600RegWithChan<"PV.Z", 254, "Z">;
+def PV_W : R600RegWithChan<"PV.W", 254, "W">;
+def PS: R600Reg<"PS", 255>;
def PREDICATE_BIT : R600Reg<"PredicateBit", 0>;
def PRED_SEL_OFF: R600Reg<"Pred_sel_off", 0>;
def PRED_SEL_ZERO : R600Reg<"Pred_sel_zero", 2>;
@@ -115,7 +133,8 @@ let isAllocatable = 0 in {
// XXX: Only use the X channel, until we support wider stack widths
def R600_Addr : RegisterClass <"AMDGPU", [i32], 127, (add (sequence "Addr%u_X", 0, 127))>;
-} // End isAllocatable = 0
+def R600_LDS_SRC_REG : RegisterClass<"AMDGPU", [i32], 32,
+ (add OQA, OQB, OQAP, OQBP, LDS_DIRECT_A, LDS_DIRECT_B)>;
def R600_KC0_X : RegisterClass <"AMDGPU", [f32, i32], 32,
(add (sequence "KC0_%u_X", 128, 159))>;
@@ -149,6 +168,8 @@ def R600_KC1 : RegisterClass <"AMDGPU", [f32, i32], 32,
(interleave R600_KC1_X, R600_KC1_Y,
R600_KC1_Z, R600_KC1_W)>;
+} // End isAllocatable = 0
+
def R600_TReg32_X : RegisterClass <"AMDGPU", [f32, i32], 32,
(add (sequence "T%u_X", 0, 127), AR_X)>;
@@ -169,8 +190,9 @@ def R600_Reg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add
R600_TReg32,
R600_ArrayBase,
R600_Addr,
+ R600_KC0, R600_KC1,
ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF,
- ALU_CONST, ALU_PARAM
+ ALU_CONST, ALU_PARAM, OQAP
)>;
def R600_Predicate : RegisterClass <"AMDGPU", [i32], 32, (add
@@ -184,32 +206,5 @@ def R600_Reg128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128,
let CopyCost = -1;
}
-//===----------------------------------------------------------------------===//
-// Register classes for indirect addressing
-//===----------------------------------------------------------------------===//
-
-// Super register for all the Indirect Registers. This register class is used
-// by the REG_SEQUENCE instruction to specify the registers to use for direct
-// reads / writes which may be written / read by an indirect address.
-class IndirectSuper<string n, list<Register> subregs> :
- RegisterWithSubRegs<n, subregs> {
- let Namespace = "AMDGPU";
- let SubRegIndices =
- [sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7,
- sub8, sub9, sub10, sub11, sub12, sub13, sub14, sub15];
-}
-
-def IndirectSuperReg : IndirectSuper<"Indirect",
- [TRegMem0_X, TRegMem1_X, TRegMem2_X, TRegMem3_X, TRegMem4_X, TRegMem5_X,
- TRegMem6_X, TRegMem7_X, TRegMem8_X, TRegMem9_X, TRegMem10_X, TRegMem11_X,
- TRegMem12_X, TRegMem13_X, TRegMem14_X, TRegMem15_X]
->;
-
-def IndirectReg : RegisterClass<"AMDGPU", [f32, i32], 32, (add IndirectSuperReg)>;
-
-// This register class defines the registers that are the storage units for
-// the "Indirect Addressing" pseudo memory space.
-// XXX: Only use the X channel, until we support wider stack widths
-def TRegMem : RegisterClass<"AMDGPU", [f32, i32], 32,
- (add (sequence "TRegMem%u_X", 0, 16))
->;
+def R600_Reg64 : RegisterClass<"AMDGPU", [v2f32, v2i32], 64,
+ (add (sequence "T%u_XY", 0, 63))>;
diff --git a/lib/Target/R600/R600Schedule.td b/lib/Target/R600/R600Schedule.td
index 78a460ae9d7b..df62bf85c0ad 100644
--- a/lib/Target/R600/R600Schedule.td
+++ b/lib/Target/R600/R600Schedule.td
@@ -23,14 +23,16 @@ def TRANS : FuncUnit;
def AnyALU : InstrItinClass;
def VecALU : InstrItinClass;
def TransALU : InstrItinClass;
+def XALU : InstrItinClass;
def R600_VLIW5_Itin : ProcessorItineraries <
[ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS, ALU_NULL],
[],
[
InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS]>]>,
- InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>,
+ InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
InstrItinData<TransALU, [InstrStage<1, [TRANS]>]>,
+ InstrItinData<XALU, [InstrStage<1, [ALU_X]>]>,
InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]>
]
>;
@@ -40,7 +42,7 @@ def R600_VLIW4_Itin : ProcessorItineraries <
[],
[
InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
- InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>,
+ InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
InstrItinData<TransALU, [InstrStage<1, [ALU_NULL]>]>,
InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]>
]
diff --git a/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
new file mode 100644
index 000000000000..32588948d5d3
--- /dev/null
+++ b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
@@ -0,0 +1,303 @@
+//===-- R600TextureIntrinsicsReplacer.cpp ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass translates tgsi-like texture intrinsics into R600 texture
+/// closer to hardware intrinsics.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/InstVisitor.h"
+
+using namespace llvm;
+
+namespace {
+class R600TextureIntrinsicsReplacer :
+ public FunctionPass, public InstVisitor<R600TextureIntrinsicsReplacer> {
+ static char ID;
+
+ Module *Mod;
+ Type *FloatType;
+ Type *Int32Type;
+ Type *V4f32Type;
+ Type *V4i32Type;
+ FunctionType *TexSign;
+ FunctionType *TexQSign;
+
+ void getAdjustmentFromTextureTarget(unsigned TextureType, bool hasLOD,
+ unsigned SrcSelect[4], unsigned CT[4],
+ bool &useShadowVariant) {
+ enum TextureTypes {
+ TEXTURE_1D = 1,
+ TEXTURE_2D,
+ TEXTURE_3D,
+ TEXTURE_CUBE,
+ TEXTURE_RECT,
+ TEXTURE_SHADOW1D,
+ TEXTURE_SHADOW2D,
+ TEXTURE_SHADOWRECT,
+ TEXTURE_1D_ARRAY,
+ TEXTURE_2D_ARRAY,
+ TEXTURE_SHADOW1D_ARRAY,
+ TEXTURE_SHADOW2D_ARRAY,
+ TEXTURE_SHADOWCUBE,
+ TEXTURE_2D_MSAA,
+ TEXTURE_2D_ARRAY_MSAA,
+ TEXTURE_CUBE_ARRAY,
+ TEXTURE_SHADOWCUBE_ARRAY
+ };
+
+ switch (TextureType) {
+ case 0:
+ useShadowVariant = false;
+ return;
+ case TEXTURE_RECT:
+ case TEXTURE_1D:
+ case TEXTURE_2D:
+ case TEXTURE_3D:
+ case TEXTURE_CUBE:
+ case TEXTURE_1D_ARRAY:
+ case TEXTURE_2D_ARRAY:
+ case TEXTURE_CUBE_ARRAY:
+ case TEXTURE_2D_MSAA:
+ case TEXTURE_2D_ARRAY_MSAA:
+ useShadowVariant = false;
+ break;
+ case TEXTURE_SHADOW1D:
+ case TEXTURE_SHADOW2D:
+ case TEXTURE_SHADOWRECT:
+ case TEXTURE_SHADOW1D_ARRAY:
+ case TEXTURE_SHADOW2D_ARRAY:
+ case TEXTURE_SHADOWCUBE:
+ case TEXTURE_SHADOWCUBE_ARRAY:
+ useShadowVariant = true;
+ break;
+ default:
+ llvm_unreachable("Unknow Texture Type");
+ }
+
+ if (TextureType == TEXTURE_RECT ||
+ TextureType == TEXTURE_SHADOWRECT) {
+ CT[0] = 0;
+ CT[1] = 0;
+ }
+
+ if (TextureType == TEXTURE_CUBE_ARRAY ||
+ TextureType == TEXTURE_SHADOWCUBE_ARRAY)
+ CT[2] = 0;
+
+ if (TextureType == TEXTURE_1D_ARRAY ||
+ TextureType == TEXTURE_SHADOW1D_ARRAY) {
+ if (hasLOD && useShadowVariant) {
+ CT[1] = 0;
+ } else {
+ CT[2] = 0;
+ SrcSelect[2] = 1;
+ }
+ } else if (TextureType == TEXTURE_2D_ARRAY ||
+ TextureType == TEXTURE_SHADOW2D_ARRAY) {
+ CT[2] = 0;
+ }
+
+ if ((TextureType == TEXTURE_SHADOW1D ||
+ TextureType == TEXTURE_SHADOW2D ||
+ TextureType == TEXTURE_SHADOWRECT ||
+ TextureType == TEXTURE_SHADOW1D_ARRAY) &&
+ !(hasLOD && useShadowVariant))
+ SrcSelect[3] = 2;
+ }
+
+ void ReplaceCallInst(CallInst &I, FunctionType *FT, const char *Name,
+ unsigned SrcSelect[4], Value *Offset[3], Value *Resource,
+ Value *Sampler, unsigned CT[4], Value *Coord) {
+ IRBuilder<> Builder(&I);
+ Constant *Mask[] = {
+ ConstantInt::get(Int32Type, SrcSelect[0]),
+ ConstantInt::get(Int32Type, SrcSelect[1]),
+ ConstantInt::get(Int32Type, SrcSelect[2]),
+ ConstantInt::get(Int32Type, SrcSelect[3])
+ };
+ Value *SwizzleMask = ConstantVector::get(Mask);
+ Value *SwizzledCoord =
+ Builder.CreateShuffleVector(Coord, Coord, SwizzleMask);
+
+ Value *Args[] = {
+ SwizzledCoord,
+ Offset[0],
+ Offset[1],
+ Offset[2],
+ Resource,
+ Sampler,
+ ConstantInt::get(Int32Type, CT[0]),
+ ConstantInt::get(Int32Type, CT[1]),
+ ConstantInt::get(Int32Type, CT[2]),
+ ConstantInt::get(Int32Type, CT[3])
+ };
+
+ Function *F = Mod->getFunction(Name);
+ if (!F) {
+ F = Function::Create(FT, GlobalValue::ExternalLinkage, Name, Mod);
+ F->addFnAttr(Attribute::ReadNone);
+ }
+ I.replaceAllUsesWith(Builder.CreateCall(F, Args));
+ I.eraseFromParent();
+ }
+
+ void ReplaceTexIntrinsic(CallInst &I, bool hasLOD, FunctionType *FT,
+ const char *VanillaInt,
+ const char *ShadowInt) {
+ Value *Coord = I.getArgOperand(0);
+ Value *ResourceId = I.getArgOperand(1);
+ Value *SamplerId = I.getArgOperand(2);
+
+ unsigned TextureType =
+ dyn_cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+
+ unsigned SrcSelect[4] = { 0, 1, 2, 3 };
+ unsigned CT[4] = {1, 1, 1, 1};
+ Value *Offset[3] = {
+ ConstantInt::get(Int32Type, 0),
+ ConstantInt::get(Int32Type, 0),
+ ConstantInt::get(Int32Type, 0)
+ };
+ bool useShadowVariant;
+
+ getAdjustmentFromTextureTarget(TextureType, hasLOD, SrcSelect, CT,
+ useShadowVariant);
+
+ ReplaceCallInst(I, FT, useShadowVariant?ShadowInt:VanillaInt, SrcSelect,
+ Offset, ResourceId, SamplerId, CT, Coord);
+ }
+
+ void ReplaceTXF(CallInst &I) {
+ Value *Coord = I.getArgOperand(0);
+ Value *ResourceId = I.getArgOperand(4);
+ Value *SamplerId = I.getArgOperand(5);
+
+ unsigned TextureType =
+ dyn_cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
+
+ unsigned SrcSelect[4] = { 0, 1, 2, 3 };
+ unsigned CT[4] = {1, 1, 1, 1};
+ Value *Offset[3] = {
+ I.getArgOperand(1),
+ I.getArgOperand(2),
+ I.getArgOperand(3),
+ };
+ bool useShadowVariant;
+
+ getAdjustmentFromTextureTarget(TextureType, false, SrcSelect, CT,
+ useShadowVariant);
+
+ ReplaceCallInst(I, TexQSign, "llvm.R600.txf", SrcSelect,
+ Offset, ResourceId, SamplerId, CT, Coord);
+ }
+
+public:
+ R600TextureIntrinsicsReplacer():
+ FunctionPass(ID) {
+ }
+
+ virtual bool doInitialization(Module &M) {
+ LLVMContext &Ctx = M.getContext();
+ Mod = &M;
+ FloatType = Type::getFloatTy(Ctx);
+ Int32Type = Type::getInt32Ty(Ctx);
+ V4f32Type = VectorType::get(FloatType, 4);
+ V4i32Type = VectorType::get(Int32Type, 4);
+ Type *ArgsType[] = {
+ V4f32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ };
+ TexSign = FunctionType::get(V4f32Type, ArgsType, /*isVarArg=*/false);
+ Type *ArgsQType[] = {
+ V4i32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ };
+ TexQSign = FunctionType::get(V4f32Type, ArgsQType, /*isVarArg=*/false);
+ return false;
+ }
+
+ virtual bool runOnFunction(Function &F) {
+ visit(F);
+ return false;
+ }
+
+ virtual const char *getPassName() const {
+ return "R600 Texture Intrinsics Replacer";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ }
+
+ void visitCallInst(CallInst &I) {
+ if (!I.getCalledFunction())
+ return;
+
+ StringRef Name = I.getCalledFunction()->getName();
+ if (Name == "llvm.AMDGPU.tex") {
+ ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.tex", "llvm.R600.texc");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txl") {
+ ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txl", "llvm.R600.txlc");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txb") {
+ ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txb", "llvm.R600.txbc");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txf") {
+ ReplaceTXF(I);
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txq") {
+ ReplaceTexIntrinsic(I, false, TexQSign, "llvm.R600.txq", "llvm.R600.txq");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.ddx") {
+ ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddx", "llvm.R600.ddx");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.ddy") {
+ ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddy", "llvm.R600.ddy");
+ return;
+ }
+ }
+
+};
+
+char R600TextureIntrinsicsReplacer::ID = 0;
+
+}
+
+FunctionPass *llvm::createR600TextureIntrinsicsReplacer() {
+ return new R600TextureIntrinsicsReplacer();
+}
diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/R600/SIAnnotateControlFlow.cpp
index 2477e2a9dcc3..6bbdf59d559b 100644
--- a/lib/Target/R600/SIAnnotateControlFlow.cpp
+++ b/lib/Target/R600/SIAnnotateControlFlow.cpp
@@ -15,6 +15,8 @@
#include "AMDGPU.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -29,13 +31,13 @@ typedef std::pair<BasicBlock *, Value *> StackEntry;
typedef SmallVector<StackEntry, 16> StackVector;
// Intrinsic names the control flow is annotated with
-static const char *IfIntrinsic = "llvm.SI.if";
-static const char *ElseIntrinsic = "llvm.SI.else";
-static const char *BreakIntrinsic = "llvm.SI.break";
-static const char *IfBreakIntrinsic = "llvm.SI.if.break";
-static const char *ElseBreakIntrinsic = "llvm.SI.else.break";
-static const char *LoopIntrinsic = "llvm.SI.loop";
-static const char *EndCfIntrinsic = "llvm.SI.end.cf";
+static const char *const IfIntrinsic = "llvm.SI.if";
+static const char *const ElseIntrinsic = "llvm.SI.else";
+static const char *const BreakIntrinsic = "llvm.SI.break";
+static const char *const IfBreakIntrinsic = "llvm.SI.if.break";
+static const char *const ElseBreakIntrinsic = "llvm.SI.else.break";
+static const char *const LoopIntrinsic = "llvm.SI.loop";
+static const char *const EndCfIntrinsic = "llvm.SI.end.cf";
class SIAnnotateControlFlow : public FunctionPass {
diff --git a/lib/Target/R600/SIDefines.h b/lib/Target/R600/SIDefines.h
index 716b093fc69d..2cbce282cbe7 100644
--- a/lib/Target/R600/SIDefines.h
+++ b/lib/Target/R600/SIDefines.h
@@ -11,12 +11,28 @@
#ifndef SIDEFINES_H_
#define SIDEFINES_H_
+namespace SIInstrFlags {
+enum {
+ MIMG = 1 << 3,
+ SMRD = 1 << 4,
+ VOP1 = 1 << 5,
+ VOP2 = 1 << 6,
+ VOP3 = 1 << 7,
+ VOPC = 1 << 8,
+ SALU = 1 << 9
+};
+}
+
#define R_00B028_SPI_SHADER_PGM_RSRC1_PS 0x00B028
+#define R_00B02C_SPI_SHADER_PGM_RSRC2_PS 0x00B02C
+#define S_00B02C_EXTRA_LDS_SIZE(x) (((x) & 0xFF) << 8)
#define R_00B128_SPI_SHADER_PGM_RSRC1_VS 0x00B128
#define R_00B228_SPI_SHADER_PGM_RSRC1_GS 0x00B228
#define R_00B848_COMPUTE_PGM_RSRC1 0x00B848
#define S_00B028_VGPRS(x) (((x) & 0x3F) << 0)
#define S_00B028_SGPRS(x) (((x) & 0x0F) << 6)
+#define R_00B84C_COMPUTE_PGM_RSRC2 0x00B84C
+#define S_00B84C_LDS_SIZE(x) (((x) & 0x1FF) << 15)
#define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC
#endif // SIDEFINES_H_
diff --git a/lib/Target/R600/SIFixSGPRCopies.cpp b/lib/Target/R600/SIFixSGPRCopies.cpp
new file mode 100644
index 000000000000..3370c7955bc7
--- /dev/null
+++ b/lib/Target/R600/SIFixSGPRCopies.cpp
@@ -0,0 +1,263 @@
+//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Copies from VGPR to SGPR registers are illegal and the register coalescer
+/// will sometimes generate these illegal copies in situations like this:
+///
+/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+///
+///
+/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
+/// code will look like this:
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now that the result of the PHI instruction is an SGPR, the register
+/// allocator is now forced to constrain the register class of %vreg3 to
+/// <sgpr> so we end up with final code like this:
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now this code contains an illegal copy from a VGPR to an SGPR.
+///
+/// In order to avoid this problem, this pass searches for PHI instructions
+/// which define a <vsrc> register and constrains its definition class to
+/// <vgpr> if the user of the PHI's definition register is a vector instruction.
+/// If the PHI's definition class is constrained to <vgpr> then the coalescer
+/// will be unable to perform the COPY removal from the above example which
+/// ultimately led to the creation of an illegal COPY.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "sgpr-copies"
+#include "AMDGPU.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+class SIFixSGPRCopies : public MachineFunctionPass {
+
+private:
+ static char ID;
+ const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const;
+ const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const;
+ bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI) const;
+
+public:
+ SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const {
+ return "SI Fix SGPR copies";
+ }
+
+};
+
+} // End anonymous namespace
+
+char SIFixSGPRCopies::ID = 0;
+
+FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
+ return new SIFixSGPRCopies(tm);
+}
+
+static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ if (!MI.getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
+ continue;
+
+ if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
+ return true;
+ }
+ return false;
+}
+
+/// This functions walks the use list of Reg until it finds an Instruction
+/// that isn't a COPY returns the register class of that instruction.
+/// \return The register defined by the first non-COPY instruction.
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const {
+ // The Reg parameter to the function must always be defined by either a PHI
+ // or a COPY, therefore it cannot be a physical register.
+ assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
+ "Reg cannot be a physical register");
+
+ const TargetRegisterClass *RC = MRI.getRegClass(Reg);
+ RC = TRI->getSubRegClass(RC, SubReg);
+ for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
+ E = MRI.use_end(); I != E; ++I) {
+ switch (I->getOpcode()) {
+ case AMDGPU::COPY:
+ RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
+ I->getOperand(0).getReg(),
+ I->getOperand(0).getSubReg()));
+ break;
+ }
+ }
+
+ return RC;
+}
+
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const {
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
+ return TRI->getSubRegClass(RC, SubReg);
+ }
+ MachineInstr *Def = MRI.getVRegDef(Reg);
+ if (Def->getOpcode() != AMDGPU::COPY) {
+ return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
+ }
+
+ return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
+ Def->getOperand(1).getSubReg());
+}
+
+bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI) const {
+
+ unsigned DstReg = Copy.getOperand(0).getReg();
+ unsigned SrcReg = Copy.getOperand(1).getReg();
+ unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
+ const TargetRegisterClass *SrcRC;
+
+ if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
+ DstRC == &AMDGPU::M0RegRegClass)
+ return false;
+
+ SrcRC = inferRegClassFromDef(TRI, MRI, SrcReg, SrcSubReg);
+ return TRI->isSGPRClass(DstRC) && TRI->hasVGPRs(SrcRC);
+}
+
+bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
+ MF.getTarget().getInstrInfo());
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I) {
+ MachineInstr &MI = *I;
+ if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
+ DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
+ DEBUG(MI.print(dbgs()));
+ TII->moveToVALU(MI);
+
+ }
+
+ switch (MI.getOpcode()) {
+ default: continue;
+ case AMDGPU::PHI: {
+ DEBUG(dbgs() << " Fixing PHI:\n");
+ DEBUG(MI.print(dbgs()));
+
+ for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
+ unsigned Reg = MI.getOperand(i).getReg();
+ const TargetRegisterClass *RC = inferRegClassFromDef(TRI, MRI, Reg,
+ MI.getOperand(0).getSubReg());
+ MRI.constrainRegClass(Reg, RC);
+ }
+ unsigned Reg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
+ MI.getOperand(0).getSubReg());
+ if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
+ MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
+ }
+
+ if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
+ break;
+
+ // If a PHI node defines an SGPR and any of its operands are VGPRs,
+ // then we need to move it to the VALU.
+ for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
+ unsigned Reg = MI.getOperand(i).getReg();
+ if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
+ TII->moveToVALU(MI);
+ break;
+ }
+ }
+
+ break;
+ }
+ case AMDGPU::REG_SEQUENCE: {
+ if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
+ !hasVGPROperands(MI, TRI))
+ continue;
+
+ DEBUG(dbgs() << "Fixing REG_SEQUENCE:\n");
+ DEBUG(MI.print(dbgs()));
+
+ TII->moveToVALU(MI);
+ break;
+ }
+ }
+ }
+ }
+ return false;
+}
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index 6bd82a5d774a..d5d2b68caf0a 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -13,39 +13,36 @@
//===----------------------------------------------------------------------===//
#include "SIISelLowering.h"
-#include "AMDIL.h"
#include "AMDGPU.h"
#include "AMDILIntrinsicInfo.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
-#include "llvm/IR/Function.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/IR/Function.h"
+
+const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
using namespace llvm;
SITargetLowering::SITargetLowering(TargetMachine &TM) :
- AMDGPUTargetLowering(TM),
- TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())),
- TRI(TM.getRegisterInfo()) {
+ AMDGPUTargetLowering(TM) {
addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass);
- addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
+ addRegisterClass(MVT::i64, &AMDGPU::VSrc_64RegClass);
- addRegisterClass(MVT::v16i8, &AMDGPU::SReg_128RegClass);
addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
- addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
- addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
+ addRegisterClass(MVT::i32, &AMDGPU::VSrc_32RegClass);
+ addRegisterClass(MVT::f32, &AMDGPU::VSrc_32RegClass);
- addRegisterClass(MVT::v1i32, &AMDGPU::VReg_32RegClass);
-
- addRegisterClass(MVT::v2i32, &AMDGPU::VReg_64RegClass);
- addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
+ addRegisterClass(MVT::f64, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::v2i32, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::v2f32, &AMDGPU::VSrc_64RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass);
addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
@@ -59,6 +56,21 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
computeRegisterProperties();
+ // Condition Codes
+ setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
+
+ setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
+
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
@@ -66,14 +78,66 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::ADD, MVT::i64, Legal);
setOperationAction(ISD::ADD, MVT::i32, Legal);
+ setOperationAction(ISD::ADDC, MVT::i32, Legal);
+ setOperationAction(ISD::ADDE, MVT::i32, Legal);
+
+ setOperationAction(ISD::BITCAST, MVT::i128, Legal);
+
+ // We need to custom lower vector stores from local memory
+ setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
+
+ setOperationAction(ISD::STORE, MVT::v8i32, Custom);
+ setOperationAction(ISD::STORE, MVT::v16i32, Custom);
+
+ // We need to custom lower loads/stores from private memory
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
+
+ setOperationAction(ISD::STORE, MVT::i32, Custom);
+ setOperationAction(ISD::STORE, MVT::i64, Custom);
+ setOperationAction(ISD::STORE, MVT::i128, Custom);
+ setOperationAction(ISD::STORE, MVT::v2i32, Custom);
+ setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
- setOperationAction(ISD::STORE, MVT::i32, Custom);
- setOperationAction(ISD::STORE, MVT::i64, Custom);
+ setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
+ setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
+
+ setOperationAction(ISD::ANY_EXTEND, MVT::i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom);
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
+
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i32, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i32, Expand);
+ setTruncStoreAction(MVT::i128, MVT::i64, Expand);
+ setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
+ setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
setTargetDAGCombine(ISD::SELECT_CC);
@@ -82,12 +146,45 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setSchedulingPreference(Sched::RegPressure);
}
+//===----------------------------------------------------------------------===//
+// TargetLowering queries
+//===----------------------------------------------------------------------===//
+
+bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ bool *IsFast) const {
+ // XXX: This depends on the address space and also we may want to revist
+ // the alignment values we specify in the DataLayout.
+ if (!VT.isSimple() || VT == MVT::Other)
+ return false;
+ return VT.bitsGT(MVT::i32);
+}
+
+bool SITargetLowering::shouldSplitVectorElementType(EVT VT) const {
+ return VT.bitsLE(MVT::i16);
+}
+
+SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
+ SDLoc DL, SDValue Chain,
+ unsigned Offset) const {
+ MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
+ PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
+ AMDGPUAS::CONSTANT_ADDRESS);
+ SDValue BasePtr = DAG.getCopyFromReg(Chain, DL,
+ MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
+ DAG.getConstant(Offset, MVT::i64));
+ return DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain, Ptr,
+ MachinePointerInfo(UndefValue::get(PtrTy)), MemVT,
+ false, false, MemVT.getSizeInBits() >> 3);
+
+}
+
SDValue SITargetLowering::LowerFormalArguments(
SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
@@ -103,9 +200,10 @@ SDValue SITargetLowering::LowerFormalArguments(
for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) {
const ISD::InputArg &Arg = Ins[i];
-
- // First check if it's a PS input addr
- if (Info->ShaderType == ShaderType::PIXEL && !Arg.Flags.isInReg()) {
+
+ // First check if it's a PS input addr
+ if (Info->ShaderType == ShaderType::PIXEL && !Arg.Flags.isInReg() &&
+ !Arg.Flags.isByVal()) {
assert((PSInputNum <= 15) && "Too many PS inputs!");
@@ -120,7 +218,7 @@ SDValue SITargetLowering::LowerFormalArguments(
}
// Second split vertices into their elements
- if (Arg.VT.isVector()) {
+ if (Info->ShaderType != ShaderType::COMPUTE && Arg.VT.isVector()) {
ISD::InputArg NewArg = Arg;
NewArg.Flags.setSplit();
NewArg.VT = Arg.VT.getVectorElementType();
@@ -136,7 +234,7 @@ SDValue SITargetLowering::LowerFormalArguments(
NewArg.PartOffset += NewArg.VT.getStoreSize();
}
- } else {
+ } else if (Info->ShaderType != ShaderType::COMPUTE) {
Splits.push_back(Arg);
}
}
@@ -152,20 +250,44 @@ SDValue SITargetLowering::LowerFormalArguments(
CCInfo.AllocateReg(AMDGPU::VGPR1);
}
+ // The pointer to the list of arguments is stored in SGPR0, SGPR1
+ if (Info->ShaderType == ShaderType::COMPUTE) {
+ CCInfo.AllocateReg(AMDGPU::SGPR0);
+ CCInfo.AllocateReg(AMDGPU::SGPR1);
+ MF.addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass);
+ }
+
+ if (Info->ShaderType == ShaderType::COMPUTE) {
+ getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
+ Splits);
+ }
+
AnalyzeFormalArguments(CCInfo, Splits);
for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
+ const ISD::InputArg &Arg = Ins[i];
if (Skipped & (1 << i)) {
- InVals.push_back(SDValue());
+ InVals.push_back(DAG.getUNDEF(Arg.VT));
continue;
}
CCValAssign &VA = ArgLocs[ArgIdx++];
+ EVT VT = VA.getLocVT();
+
+ if (VA.isMemLoc()) {
+ VT = Ins[i].VT;
+ EVT MemVT = Splits[i].VT;
+ // The first 36 bytes of the input buffer contains information about
+ // thread group and global sizes.
+ SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, DAG.getRoot(),
+ 36 + VA.getLocMemOffset());
+ InVals.push_back(Arg);
+ continue;
+ }
assert(VA.isRegLoc() && "Parameter must be in a register!");
unsigned Reg = VA.getLocReg();
- MVT VT = VA.getLocVT();
if (VT == MVT::i64) {
// For now assume it is a pointer
@@ -181,7 +303,6 @@ SDValue SITargetLowering::LowerFormalArguments(
Reg = MF.addLiveIn(Reg, RC);
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
- const ISD::InputArg &Arg = Ins[i];
if (Arg.VT.isVector()) {
// Build a vector from the registers
@@ -200,7 +321,7 @@ SDValue SITargetLowering::LowerFormalArguments(
NumElements = Arg.VT.getVectorNumElements() - NumElements;
for (unsigned j = 0; j != NumElements; ++j)
Regs.push_back(DAG.getUNDEF(VT));
-
+
InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT,
Regs.data(), Regs.size()));
continue;
@@ -214,36 +335,274 @@ SDValue SITargetLowering::LowerFormalArguments(
MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
MachineInstr * MI, MachineBasicBlock * BB) const {
+ MachineBasicBlock::iterator I = *MI;
+
switch (MI->getOpcode()) {
default:
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case AMDGPU::BRANCH: return BB;
+ case AMDGPU::SI_ADDR64_RSRC: {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ unsigned SuperReg = MI->getOperand(0).getReg();
+ unsigned SubRegLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned SubRegHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned SubRegHiHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ unsigned SubRegHiLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), SubRegLo)
+ .addOperand(MI->getOperand(1));
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiLo)
+ .addImm(0);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiHi)
+ .addImm(RSRC_DATA_FORMAT >> 32);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SubRegHi)
+ .addReg(SubRegHiLo)
+ .addImm(AMDGPU::sub0)
+ .addReg(SubRegHiHi)
+ .addImm(AMDGPU::sub1);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SuperReg)
+ .addReg(SubRegLo)
+ .addImm(AMDGPU::sub0_sub1)
+ .addReg(SubRegHi)
+ .addImm(AMDGPU::sub2_sub3);
+ MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::V_SUB_F64: {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64),
+ MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(MI->getOperand(2).getReg())
+ .addImm(0) /* src2 */
+ .addImm(0) /* ABS */
+ .addImm(0) /* CLAMP */
+ .addImm(0) /* OMOD */
+ .addImm(2); /* NEG */
+ MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::SI_RegisterStorePseudo: {
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::SI_RegisterStore),
+ Reg);
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ MI->eraseFromParent();
+ }
}
return BB;
}
-EVT SITargetLowering::getSetCCResultType(EVT VT) const {
- return MVT::i1;
+EVT SITargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector()) {
+ return MVT::i1;
+ }
+ return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
}
MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const {
return MVT::i32;
}
+bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ VT = VT.getScalarType();
+
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f32:
+ return false; /* There is V_MAD_F32 for f32 */
+ case MVT::f64:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Custom DAG Lowering Operations
//===----------------------------------------------------------------------===//
SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
+ case ISD::ADD: return LowerADD(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
+ case ISD::LOAD: {
+ LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
+ if ((Load->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
+ Load->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
+ Op.getValueType().isVector()) {
+ SDValue MergedValues[2] = {
+ SplitVectorLoad(Op, DAG),
+ Load->getChain()
+ };
+ return DAG.getMergeValues(MergedValues, 2, SDLoc(Op));
+ } else {
+ return LowerLOAD(Op, DAG);
+ }
+ }
+
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
+ case ISD::ANY_EXTEND: // Fall-through
+ case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG);
+ case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntrinsicID =
+ cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ //XXX: Hardcoded we only use two to store the pointer to the parameters.
+ unsigned NumUserSGPRs = 2;
+ switch (IntrinsicID) {
+ default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
+ case Intrinsic::r600_read_ngroups_x:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 0);
+ case Intrinsic::r600_read_ngroups_y:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4);
+ case Intrinsic::r600_read_ngroups_z:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 8);
+ case Intrinsic::r600_read_global_size_x:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 12);
+ case Intrinsic::r600_read_global_size_y:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 16);
+ case Intrinsic::r600_read_global_size_z:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 20);
+ case Intrinsic::r600_read_local_size_x:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 24);
+ case Intrinsic::r600_read_local_size_y:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 28);
+ case Intrinsic::r600_read_local_size_z:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 32);
+ case Intrinsic::r600_read_tgid_x:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT);
+ case Intrinsic::r600_read_tgid_y:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 1), VT);
+ case Intrinsic::r600_read_tgid_z:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 2), VT);
+ case Intrinsic::r600_read_tidig_x:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR0, VT);
+ case Intrinsic::r600_read_tidig_y:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR1, VT);
+ case Intrinsic::r600_read_tidig_z:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR2, VT);
+ case AMDGPUIntrinsic::SI_load_const: {
+ SDValue Ops [] = {
+ ResourceDescriptorToi128(Op.getOperand(1), DAG),
+ Op.getOperand(2)
+ };
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo(),
+ MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant,
+ VT.getSizeInBits() / 8, 4);
+ return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
+ Op->getVTList(), Ops, 2, VT, MMO);
+ }
+ case AMDGPUIntrinsic::SI_sample:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG);
+ case AMDGPUIntrinsic::SI_sampleb:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG);
+ case AMDGPUIntrinsic::SI_sampled:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG);
+ case AMDGPUIntrinsic::SI_samplel:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG);
+ case AMDGPUIntrinsic::SI_vs_load_input:
+ return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
+ ResourceDescriptorToi128(Op.getOperand(1), DAG),
+ Op.getOperand(2),
+ Op.getOperand(3));
+ }
+ }
+
+ case ISD::INTRINSIC_VOID:
+ SDValue Chain = Op.getOperand(0);
+ unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+
+ switch (IntrinsicID) {
+ case AMDGPUIntrinsic::SI_tbuffer_store: {
+ SDLoc DL(Op);
+ SDValue Ops [] = {
+ Chain,
+ ResourceDescriptorToi128(Op.getOperand(2), DAG),
+ Op.getOperand(3),
+ Op.getOperand(4),
+ Op.getOperand(5),
+ Op.getOperand(6),
+ Op.getOperand(7),
+ Op.getOperand(8),
+ Op.getOperand(9),
+ Op.getOperand(10),
+ Op.getOperand(11),
+ Op.getOperand(12),
+ Op.getOperand(13),
+ Op.getOperand(14)
+ };
+ EVT VT = Op.getOperand(3).getValueType();
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo(),
+ MachineMemOperand::MOStore,
+ VT.getSizeInBits() / 8, 4);
+ return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
+ Op->getVTList(), Ops,
+ sizeof(Ops)/sizeof(Ops[0]), VT, MMO);
+ }
+ default:
+ break;
+ }
}
return SDValue();
}
+SDValue SITargetLowering::LowerADD(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (Op.getValueType() != MVT::i64)
+ return SDValue();
+
+ SDLoc DL(Op);
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+
+ SDValue Zero = DAG.getConstant(0, MVT::i32);
+ SDValue One = DAG.getConstant(1, MVT::i32);
+
+ SDValue Lo0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, Zero);
+ SDValue Hi0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, One);
+
+ SDValue Lo1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, Zero);
+ SDValue Hi1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, One);
+
+ SDVTList VTList = DAG.getVTList(MVT::i32, MVT::Glue);
+
+ SDValue AddLo = DAG.getNode(ISD::ADDC, DL, VTList, Lo0, Lo1);
+ SDValue Carry = AddLo.getValue(1);
+ SDValue AddHi = DAG.getNode(ISD::ADDE, DL, VTList, Hi0, Hi1, Carry);
+
+ return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, AddLo, AddHi.getValue(0));
+}
+
/// \brief Helper function for LowerBRCOND
static SDNode *findUser(SDValue Value, unsigned Opcode) {
@@ -265,7 +624,7 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) {
SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
SelectionDAG &DAG) const {
- DebugLoc DL = BRCOND.getDebugLoc();
+ SDLoc DL(BRCOND);
SDNode *Intr = BRCOND.getOperand(1).getNode();
SDValue Target = BRCOND.getOperand(2);
@@ -338,30 +697,51 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
return Chain;
}
-#define RSRC_DATA_FORMAT 0xf00000000000
-
-SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue Value = Op.getOperand(1);
- SDValue VirtualAddress = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
- if (StoreNode->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) {
+ if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
return SDValue();
- }
- SDValue SrcSrc = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
- DAG.getConstant(0, MVT::i64),
- DAG.getConstant(RSRC_DATA_FORMAT, MVT::i64));
+ SDValue TruncPtr = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ Load->getBasePtr(), DAG.getConstant(0, MVT::i32));
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
+ DAG.getConstant(2, MVT::i32));
+
+ SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
+ Load->getChain(), Ptr,
+ DAG.getTargetConstant(0, MVT::i32),
+ Op.getOperand(2));
+ SDValue MergedValues[2] = {
+ Ret,
+ Load->getChain()
+ };
+ return DAG.getMergeValues(MergedValues, 2, DL);
+
+}
+
+SDValue SITargetLowering::ResourceDescriptorToi128(SDValue Op,
+ SelectionDAG &DAG) const {
- SDValue Ops[2];
- Ops[0] = DAG.getNode(AMDGPUISD::BUFFER_STORE, DL, MVT::Other, Chain,
- Value, SrcSrc, VirtualAddress);
- Ops[1] = Chain;
+ if (Op.getValueType() == MVT::i128) {
+ return Op;
+ }
- return DAG.getMergeValues(Ops, 2, DL);
+ assert(Op.getOpcode() == ISD::UNDEF);
+ return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), MVT::i128,
+ DAG.getConstant(0, MVT::i64),
+ DAG.getConstant(0, MVT::i64));
+}
+
+SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode,
+ const SDValue &Op,
+ SelectionDAG &DAG) const {
+ return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1),
+ Op.getOperand(2),
+ ResourceDescriptorToi128(Op.getOperand(3), DAG),
+ Op.getOperand(4));
}
SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
@@ -371,7 +751,7 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue False = Op.getOperand(3);
SDValue CC = Op.getOperand(4);
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Possible Min/Max pattern
SDValue MinMax = LowerMinMax(Op, DAG);
@@ -383,6 +763,84 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
}
+SDValue SITargetLowering::LowerSIGN_EXTEND(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ if (VT != MVT::i64) {
+ return SDValue();
+ }
+
+ SDValue Hi = DAG.getNode(ISD::SRA, DL, MVT::i32, Op.getOperand(0),
+ DAG.getConstant(31, MVT::i32));
+
+ return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), Hi);
+}
+
+SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT VT = Store->getMemoryVT();
+
+ SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
+ if (Ret.getNode())
+ return Ret;
+
+ if (VT.isVector() && VT.getVectorNumElements() >= 8)
+ return SplitVectorStore(Op, DAG);
+
+ if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
+ return SDValue();
+
+ SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
+ DAG.getConstant(2, MVT::i32));
+ SDValue Chain = Store->getChain();
+ SmallVector<SDValue, 8> Values;
+
+ if (VT == MVT::i64) {
+ for (unsigned i = 0; i < 2; ++i) {
+ Values.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ Store->getValue(), DAG.getConstant(i, MVT::i32)));
+ }
+ } else if (VT == MVT::i128) {
+ for (unsigned i = 0; i < 2; ++i) {
+ for (unsigned j = 0; j < 2; ++j) {
+ Values.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64,
+ Store->getValue(), DAG.getConstant(i, MVT::i32)),
+ DAG.getConstant(j, MVT::i32)));
+ }
+ }
+ } else {
+ Values.push_back(Store->getValue());
+ }
+
+ for (unsigned i = 0; i < Values.size(); ++i) {
+ SDValue PartPtr = DAG.getNode(ISD::ADD, DL, MVT::i32,
+ Ptr, DAG.getConstant(i, MVT::i32));
+ Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
+ Chain, Values[i], PartPtr,
+ DAG.getTargetConstant(0, MVT::i32));
+ }
+ return Chain;
+}
+
+
+SDValue SITargetLowering::LowerZERO_EXTEND(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ if (VT != MVT::i64) {
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0),
+ DAG.getConstant(0, MVT::i32));
+}
+
//===----------------------------------------------------------------------===//
// Custom DAG optimizations
//===----------------------------------------------------------------------===//
@@ -390,13 +848,12 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
switch (N->getOpcode()) {
default: break;
case ISD::SELECT_CC: {
- N->dump();
ConstantSDNode *True, *False;
// i1 selectcc(l, r, -1, 0, cc) -> i1 setcc(l, r, cc)
if ((True = dyn_cast<ConstantSDNode>(N->getOperand(2)))
@@ -433,13 +890,13 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
-/// \brief Test if RegClass is one of the VSrc classes
+/// \brief Test if RegClass is one of the VSrc classes
static bool isVSrc(unsigned RegClass) {
return AMDGPU::VSrc_32RegClassID == RegClass ||
AMDGPU::VSrc_64RegClassID == RegClass;
}
-/// \brief Test if RegClass is one of the SSrc classes
+/// \brief Test if RegClass is one of the SSrc classes
static bool isSSrc(unsigned RegClass) {
return AMDGPU::SSrc_32RegClassID == RegClass ||
AMDGPU::SSrc_64RegClassID == RegClass;
@@ -481,6 +938,8 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
bool &ScalarSlotUsed) const {
MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode()))
return false;
@@ -512,30 +971,67 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
return false;
}
+const TargetRegisterClass *SITargetLowering::getRegClassForNode(
+ SelectionDAG &DAG, const SDValue &Op) const {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ const SIRegisterInfo &TRI = TII->getRegisterInfo();
+
+ if (!Op->isMachineOpcode()) {
+ switch(Op->getOpcode()) {
+ case ISD::CopyFromReg: {
+ MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
+ unsigned Reg = cast<RegisterSDNode>(Op->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ return MRI.getRegClass(Reg);
+ }
+ return TRI.getPhysRegClass(Reg);
+ }
+ default: return NULL;
+ }
+ }
+ const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode());
+ int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass;
+ if (OpClassID != -1) {
+ return TRI.getRegClass(OpClassID);
+ }
+ switch(Op.getMachineOpcode()) {
+ case AMDGPU::COPY_TO_REGCLASS:
+ // Operand 1 is the register class id for COPY_TO_REGCLASS instructions.
+ OpClassID = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+
+ // If the COPY_TO_REGCLASS instruction is copying to a VSrc register
+ // class, then the register class for the value could be either a
+ // VReg or and SReg. In order to get a more accurate
+ if (OpClassID == AMDGPU::VSrc_32RegClassID ||
+ OpClassID == AMDGPU::VSrc_64RegClassID) {
+ return getRegClassForNode(DAG, Op.getOperand(0));
+ }
+ return TRI.getRegClass(OpClassID);
+ case AMDGPU::EXTRACT_SUBREG: {
+ int SubIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ const TargetRegisterClass *SuperClass =
+ getRegClassForNode(DAG, Op.getOperand(0));
+ return TRI.getSubClassWithSubReg(SuperClass, SubIdx);
+ }
+ case AMDGPU::REG_SEQUENCE:
+ // Operand 0 is the register class id for REG_SEQUENCE instructions.
+ return TRI.getRegClass(
+ cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue());
+ default:
+ return getRegClassFor(Op.getSimpleValueType());
+ }
+}
+
/// \brief Does "Op" fit into register class "RegClass" ?
-bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, SDValue &Op,
+bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, const SDValue &Op,
unsigned RegClass) const {
-
- MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
- SDNode *Node = Op.getNode();
-
- const TargetRegisterClass *OpClass;
- if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) {
- const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode());
- int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass;
- if (OpClassID == -1)
- OpClass = getRegClassFor(Op.getSimpleValueType());
- else
- OpClass = TRI->getRegClass(OpClassID);
-
- } else if (Node->getOpcode() == ISD::CopyFromReg) {
- RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode());
- OpClass = MRI.getRegClass(Reg->getReg());
-
- } else
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterClass *RC = getRegClassForNode(DAG, Op);
+ if (!RC) {
return false;
-
- return TRI->getRegClass(RegClass)->hasSubClassEq(OpClass);
+ }
+ return TRI->getRegClass(RegClass)->hasSubClassEq(RC);
}
/// \brief Make sure that we don't exeed the number of allowed scalars
@@ -561,20 +1057,33 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
return;
}
- // This is a conservative aproach, it is possible that we can't determine
- // the correct register class and copy too often, but better save than sorry.
+ // This is a conservative aproach. It is possible that we can't determine the
+ // correct register class and copy too often, but better safe than sorry.
SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32);
- SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DebugLoc(),
+ SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDLoc(),
Operand.getValueType(), Operand, RC);
Operand = SDValue(Node, 0);
}
+/// \returns true if \p Node's operands are different from the SDValue list
+/// \p Ops
+static bool isNodeChanged(const SDNode *Node, const std::vector<SDValue> &Ops) {
+ for (unsigned i = 0, e = Node->getNumOperands(); i < e; ++i) {
+ if (Ops[i].getNode() != Node->getOperand(i).getNode()) {
+ return true;
+ }
+ }
+ return false;
+}
+
/// \brief Try to fold the Nodes operands into the Node
SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
SelectionDAG &DAG) const {
// Original encoding (either e32 or e64)
int Opcode = Node->getMachineOpcode();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
const MCInstrDesc *Desc = &TII->get(Opcode);
unsigned NumDefs = Desc->getNumDefs();
@@ -700,13 +1209,19 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
Ops.push_back(Node->getOperand(i));
+ // Nodes that have a glue result are not CSE'd by getMachineNode(), so in
+ // this case a brand new node is always be created, even if the operands
+ // are the same as before. So, manually check if anything has been changed.
+ if (Desc->Opcode == Opcode && !isNodeChanged(Node, Ops)) {
+ return Node;
+ }
+
// Create a complete new instruction
- return DAG.getMachineNode(Desc->Opcode, Node->getDebugLoc(),
- Node->getVTList(), Ops);
+ return DAG.getMachineNode(Desc->Opcode, SDLoc(Node), Node->getVTList(), Ops);
}
/// \brief Helper function for adjustWritemask
-unsigned SubIdx2Lane(unsigned Idx) {
+static unsigned SubIdx2Lane(unsigned Idx) {
switch (Idx) {
default: return 0;
case AMDGPU::sub0: return 0;
@@ -720,7 +1235,9 @@ unsigned SubIdx2Lane(unsigned Idx) {
void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
SelectionDAG &DAG) const {
SDNode *Users[4] = { };
- unsigned Writemask = 0, Lane = 0;
+ unsigned Lane = 0;
+ unsigned OldDmask = Node->getConstantOperandVal(0);
+ unsigned NewDmask = 0;
// Try to figure out the used register components
for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
@@ -731,32 +1248,45 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
return;
+ // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
+ // Note that subregs are packed, i.e. Lane==0 is the first bit set
+ // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
+ // set, etc.
Lane = SubIdx2Lane(I->getConstantOperandVal(1));
+ // Set which texture component corresponds to the lane.
+ unsigned Comp;
+ for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
+ assert(Dmask);
+ Comp = countTrailingZeros(Dmask);
+ Dmask &= ~(1 << Comp);
+ }
+
// Abort if we have more than one user per component
if (Users[Lane])
return;
Users[Lane] = *I;
- Writemask |= 1 << Lane;
+ NewDmask |= 1 << Comp;
}
- // Abort if all components are used
- if (Writemask == 0xf)
+ // Abort if there's no change
+ if (NewDmask == OldDmask)
return;
// Adjust the writemask in the node
std::vector<SDValue> Ops;
- Ops.push_back(DAG.getTargetConstant(Writemask, MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(NewDmask, MVT::i32));
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
Ops.push_back(Node->getOperand(i));
Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
// If we only got one lane, replace it with a copy
- if (Writemask == (1U << Lane)) {
+ // (if NewDmask has only one bit set...)
+ if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
SDValue RC = DAG.getTargetConstant(AMDGPU::VReg_32RegClassID, MVT::i32);
SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
- DebugLoc(), Users[Lane]->getValueType(0),
+ SDLoc(), Users[Lane]->getValueType(0),
SDValue(Node, 0), RC);
DAG.ReplaceAllUsesWith(Users[Lane], Copy);
return;
@@ -784,8 +1314,11 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
/// \brief Fold the instructions after slecting them
SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ Node = AdjustRegClass(Node, DAG);
- if (AMDGPU::isMIMG(Node->getMachineOpcode()) != -1)
+ if (TII->isMIMG(Node->getMachineOpcode()))
adjustWritemask(Node, DAG);
return foldOperands(Node, DAG);
@@ -795,7 +1328,9 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
/// bits set in the writemask
void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
SDNode *Node) const {
- if (AMDGPU::isMIMG(MI->getOpcode()) == -1)
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ if (!TII->isMIMG(MI->getOpcode()))
return;
unsigned VReg = MI->getOperand(0).getReg();
@@ -812,6 +1347,53 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
case 3: RC = &AMDGPU::VReg_96RegClass; break;
}
+ unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
+ MI->setDesc(TII->get(NewOpcode));
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
MRI.setRegClass(VReg, RC);
}
+
+MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(N);
+ unsigned NewOpcode = N->getMachineOpcode();
+
+ switch (N->getMachineOpcode()) {
+ default: return N;
+ case AMDGPU::S_LOAD_DWORD_IMM:
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
+ // Fall-through
+ case AMDGPU::S_LOAD_DWORDX2_SGPR:
+ if (NewOpcode == N->getMachineOpcode()) {
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
+ }
+ // Fall-through
+ case AMDGPU::S_LOAD_DWORDX4_IMM:
+ case AMDGPU::S_LOAD_DWORDX4_SGPR: {
+ if (NewOpcode == N->getMachineOpcode()) {
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
+ }
+ if (fitsRegClass(DAG, N->getOperand(0), AMDGPU::SReg_64RegClassID)) {
+ return N;
+ }
+ ConstantSDNode *Offset = cast<ConstantSDNode>(N->getOperand(1));
+ SDValue Ops[] = {
+ SDValue(DAG.getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::i128,
+ DAG.getConstant(0, MVT::i64)), 0),
+ N->getOperand(0),
+ DAG.getConstant(Offset->getSExtValue() << 2, MVT::i32)
+ };
+ return DAG.getMachineNode(NewOpcode, DL, N->getVTList(), Ops);
+ }
+ }
+}
+
+SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
+ const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const {
+ SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT);
+
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()),
+ cast<RegisterSDNode>(VReg)->getReg(), VT);
+}
diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h
index de637bea37b9..9933eced901f 100644
--- a/lib/Target/R600/SIISelLowering.h
+++ b/lib/Target/R600/SIISelLowering.h
@@ -21,35 +21,48 @@
namespace llvm {
class SITargetLowering : public AMDGPUTargetLowering {
- const SIInstrInfo * TII;
- const TargetRegisterInfo * TRI;
-
- SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, SDLoc DL,
+ SDValue Chain, unsigned Offset) const;
+ SDValue LowerSampleIntrinsic(unsigned Opcode, const SDValue &Op,
+ SelectionDAG &DAG) const;
+ SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue ResourceDescriptorToi128(SDValue Op, SelectionDAG &DAG) const;
bool foldImm(SDValue &Operand, int32_t &Immediate,
bool &ScalarSlotUsed) const;
- bool fitsRegClass(SelectionDAG &DAG, SDValue &Op, unsigned RegClass) const;
- void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
+ const TargetRegisterClass *getRegClassForNode(SelectionDAG &DAG,
+ const SDValue &Op) const;
+ bool fitsRegClass(SelectionDAG &DAG, const SDValue &Op,
+ unsigned RegClass) const;
+ void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
unsigned RegClass, bool &ScalarSlotUsed) const;
SDNode *foldOperands(MachineSDNode *N, SelectionDAG &DAG) const;
void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
+ MachineSDNode *AdjustRegClass(MachineSDNode *N, SelectionDAG &DAG) const;
public:
SITargetLowering(TargetMachine &tm);
+ bool allowsUnalignedMemoryAccesses(EVT VT, bool *IsFast) const;
+ virtual bool shouldSplitVectorElementType(EVT VT) const;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
MachineBasicBlock * BB) const;
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
virtual MVT getScalarShiftAmountTy(EVT VT) const;
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
@@ -57,6 +70,8 @@ public:
SDNode *Node) const;
int32_t analyzeImmediate(const SDNode *N) const;
+ SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const;
};
} // End namespace llvm
diff --git a/lib/Target/R600/SIInsertWaits.cpp b/lib/Target/R600/SIInsertWaits.cpp
index 98bd3dbb6646..7ef662eb65b1 100644
--- a/lib/Target/R600/SIInsertWaits.cpp
+++ b/lib/Target/R600/SIInsertWaits.cpp
@@ -47,7 +47,7 @@ class SIInsertWaits : public MachineFunctionPass {
private:
static char ID;
const SIInstrInfo *TII;
- const SIRegisterInfo &TRI;
+ const SIRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
/// \brief Constant hardware limits
@@ -97,8 +97,9 @@ private:
public:
SIInsertWaits(TargetMachine &tm) :
MachineFunctionPass(ID),
- TII(static_cast<const SIInstrInfo*>(tm.getInstrInfo())),
- TRI(TII->getRegisterInfo()) { }
+ TII(0),
+ TRI(0),
+ ExpInstrTypesSeen(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -133,12 +134,19 @@ Counters SIInsertWaits::getHwCounts(MachineInstr &MI) {
// LGKM may uses larger values
if (TSFlags & SIInstrFlags::LGKM_CNT) {
- MachineOperand &Op = MI.getOperand(0);
- assert(Op.isReg() && "First LGKM operand must be a register!");
+ if (TII->isSMRD(MI.getOpcode())) {
- unsigned Reg = Op.getReg();
- unsigned Size = TRI.getMinimalPhysRegClass(Reg)->getSize();
- Result.Named.LGKM = Size > 4 ? 2 : 1;
+ MachineOperand &Op = MI.getOperand(0);
+ assert(Op.isReg() && "First LGKM operand must be a register!");
+
+ unsigned Reg = Op.getReg();
+ unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
+ Result.Named.LGKM = Size > 4 ? 2 : 1;
+
+ } else {
+ // DS
+ Result.Named.LGKM = 1;
+ }
} else {
Result.Named.LGKM = 0;
@@ -178,16 +186,16 @@ bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
RegInterval SIInsertWaits::getRegInterval(MachineOperand &Op) {
- if (!Op.isReg())
+ if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()))
return std::make_pair(0, 0);
unsigned Reg = Op.getReg();
- unsigned Size = TRI.getMinimalPhysRegClass(Reg)->getSize();
+ unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
assert(Size >= 4);
RegInterval Result;
- Result.first = TRI.getEncodingValue(Reg);
+ Result.first = TRI->getEncodingValue(Reg);
Result.second = Result.first + Size / 4;
return Result;
@@ -328,9 +336,11 @@ Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
}
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
-
bool Changes = false;
+ TII = static_cast<const SIInstrInfo*>(MF.getTarget().getInstrInfo());
+ TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
MRI = &MF.getRegInfo();
WaitedOn = ZeroCounts;
diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td
index f737ddd28077..53ebaaf15a7c 100644
--- a/lib/Target/R600/SIInstrFormats.td
+++ b/lib/Target/R600/SIInstrFormats.td
@@ -17,10 +17,24 @@ class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
field bits<1> VM_CNT = 0;
field bits<1> EXP_CNT = 0;
field bits<1> LGKM_CNT = 0;
+ field bits<1> MIMG = 0;
+ field bits<1> SMRD = 0;
+ field bits<1> VOP1 = 0;
+ field bits<1> VOP2 = 0;
+ field bits<1> VOP3 = 0;
+ field bits<1> VOPC = 0;
+ field bits<1> SALU = 0;
let TSFlags{0} = VM_CNT;
let TSFlags{1} = EXP_CNT;
let TSFlags{2} = LGKM_CNT;
+ let TSFlags{3} = MIMG;
+ let TSFlags{4} = SMRD;
+ let TSFlags{5} = VOP1;
+ let TSFlags{6} = VOP2;
+ let TSFlags{7} = VOP3;
+ let TSFlags{8} = VOPC;
+ let TSFlags{9} = SALU;
}
class Enc32 <dag outs, dag ins, string asm, list<dag> pattern> :
@@ -55,6 +69,7 @@ class SOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -73,6 +88,7 @@ class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -90,6 +106,7 @@ class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -106,6 +123,7 @@ class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern> : Enc32 <
@@ -123,6 +141,7 @@ class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern> : Enc32 <
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SMRD <bits<5> op, bits<1> imm, dag outs, dag ins, string asm,
@@ -140,6 +159,7 @@ class SMRD <bits<5> op, bits<1> imm, dag outs, dag ins, string asm,
let Inst{31-27} = 0x18; //encoding
let LGKM_CNT = 1;
+ let SMRD = 1;
}
//===----------------------------------------------------------------------===//
@@ -162,6 +182,8 @@ class VOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP1 = 1;
}
class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -180,60 +202,66 @@ class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP2 = 1;
}
class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
Enc64 <outs, ins, asm, pattern> {
- bits<8> VDST;
- bits<9> SRC0;
- bits<9> SRC1;
- bits<9> SRC2;
- bits<3> ABS;
- bits<1> CLAMP;
- bits<2> OMOD;
- bits<3> NEG;
-
- let Inst{7-0} = VDST;
- let Inst{10-8} = ABS;
- let Inst{11} = CLAMP;
+ bits<8> dst;
+ bits<9> src0;
+ bits<9> src1;
+ bits<9> src2;
+ bits<3> abs;
+ bits<1> clamp;
+ bits<2> omod;
+ bits<3> neg;
+
+ let Inst{7-0} = dst;
+ let Inst{10-8} = abs;
+ let Inst{11} = clamp;
let Inst{25-17} = op;
let Inst{31-26} = 0x34; //encoding
- let Inst{40-32} = SRC0;
- let Inst{49-41} = SRC1;
- let Inst{58-50} = SRC2;
- let Inst{60-59} = OMOD;
- let Inst{63-61} = NEG;
+ let Inst{40-32} = src0;
+ let Inst{49-41} = src1;
+ let Inst{58-50} = src2;
+ let Inst{60-59} = omod;
+ let Inst{63-61} = neg;
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP3 = 1;
}
class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
Enc64 <outs, ins, asm, pattern> {
- bits<8> VDST;
- bits<9> SRC0;
- bits<9> SRC1;
- bits<9> SRC2;
- bits<7> SDST;
- bits<2> OMOD;
- bits<3> NEG;
+ bits<8> dst;
+ bits<9> src0;
+ bits<9> src1;
+ bits<9> src2;
+ bits<7> sdst;
+ bits<2> omod;
+ bits<3> neg;
- let Inst{7-0} = VDST;
- let Inst{14-8} = SDST;
+ let Inst{7-0} = dst;
+ let Inst{14-8} = sdst;
let Inst{25-17} = op;
let Inst{31-26} = 0x34; //encoding
- let Inst{40-32} = SRC0;
- let Inst{49-41} = SRC1;
- let Inst{58-50} = SRC2;
- let Inst{60-59} = OMOD;
- let Inst{63-61} = NEG;
+ let Inst{40-32} = src0;
+ let Inst{49-41} = src1;
+ let Inst{58-50} = src2;
+ let Inst{60-59} = omod;
+ let Inst{63-61} = neg;
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP3 = 1;
}
class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
@@ -251,6 +279,7 @@ class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let VOPC = 1;
}
class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -281,6 +310,30 @@ class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Uses = [EXEC] in {
+class DS <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ Enc64 <outs, ins, asm, pattern> {
+
+ bits<8> vdst;
+ bits<1> gds;
+ bits<8> addr;
+ bits<8> data0;
+ bits<8> data1;
+ bits<8> offset0;
+ bits<8> offset1;
+
+ let Inst{7-0} = offset0;
+ let Inst{15-8} = offset1;
+ let Inst{17} = gds;
+ let Inst{25-18} = op;
+ let Inst{31-26} = 0x36; //encoding
+ let Inst{39-32} = addr;
+ let Inst{47-40} = data0;
+ let Inst{55-48} = data1;
+ let Inst{63-56} = vdst;
+
+ let LGKM_CNT = 1;
+}
+
class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
Enc64<outs, ins, asm, pattern> {
@@ -390,6 +443,7 @@ class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let VM_CNT = 1;
let EXP_CNT = 1;
+ let MIMG = 1;
}
def EXP : Enc64<
diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
index 9a04c609b6c9..ab55c1b173ce 100644
--- a/lib/Target/R600/SIInstrInfo.cpp
+++ b/lib/Target/R600/SIInstrInfo.cpp
@@ -15,22 +15,26 @@
#include "SIInstrInfo.h"
#include "AMDGPUTargetMachine.h"
+#include "SIDefines.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrDesc.h"
-#include <stdio.h>
using namespace llvm;
SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
: AMDGPUInstrInfo(tm),
- RI(tm, *this)
+ RI(tm)
{ }
const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
return RI;
}
+//===----------------------------------------------------------------------===//
+// TargetInstrInfo callbacks
+//===----------------------------------------------------------------------===//
+
void
SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
@@ -42,27 +46,27 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// never be necessary.
assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
- const int16_t Sub0_15[] = {
+ static const int16_t Sub0_15[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
};
- const int16_t Sub0_7[] = {
+ static const int16_t Sub0_7[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
};
- const int16_t Sub0_3[] = {
+ static const int16_t Sub0_3[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
};
- const int16_t Sub0_2[] = {
+ static const int16_t Sub0_2[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
};
- const int16_t Sub0_1[] = {
+ static const int16_t Sub0_1[] = {
AMDGPU::sub0, AMDGPU::sub1, 0
};
@@ -118,14 +122,14 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
} else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
- AMDGPU::SReg_32RegClass.contains(SrcReg));
+ AMDGPU::SReg_32RegClass.contains(SrcReg));
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
} else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
- AMDGPU::SReg_64RegClass.contains(SrcReg));
+ AMDGPU::SReg_64RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_1;
@@ -136,19 +140,19 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
} else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
- AMDGPU::SReg_128RegClass.contains(SrcReg));
+ AMDGPU::SReg_128RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_3;
} else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
- AMDGPU::SReg_256RegClass.contains(SrcReg));
+ AMDGPU::SReg_256RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_7;
} else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
- AMDGPU::SReg_512RegClass.contains(SrcReg));
+ AMDGPU::SReg_512RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_15;
@@ -168,7 +172,6 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
-
int NewOpc;
// Try to map original to commuted opcode
@@ -185,11 +188,36 @@ unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
bool NewMI) const {
- if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
- !MI->getOperand(2).isReg())
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
return 0;
- MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
+ // Cannot commute VOP2 if src0 is SGPR.
+ if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
+ RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
+ return 0;
+
+ if (!MI->getOperand(2).isReg()) {
+ // XXX: Commute instructions with FPImm operands
+ if (NewMI || MI->getOperand(2).isFPImm() ||
+ (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
+ return 0;
+ }
+
+ // XXX: Commute VOP3 instructions with abs and neg set.
+ if (isVOP3(MI->getOpcode()) &&
+ (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::abs)).getImm() ||
+ MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::neg)).getImm()))
+ return 0;
+
+ unsigned Reg = MI->getOperand(1).getReg();
+ MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
+ MI->getOperand(2).ChangeToRegister(Reg, false);
+ } else {
+ MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
+ }
if (MI)
MI->setDesc(get(commuteOpcode(MI->getOpcode())));
@@ -197,15 +225,12 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
return MI;
}
-MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const {
- MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
- MachineInstrBuilder MIB(*MF, MI);
- MIB.addReg(DstReg, RegState::Define);
- MIB.addImm(Imm);
-
- return MI;
-
+MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg,
+ unsigned SrcReg) const {
+ return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
+ DstReg) .addReg(SrcReg);
}
bool SIInstrInfo::isMov(unsigned Opcode) const {
@@ -224,32 +249,397 @@ SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
return RC != &AMDGPU::EXECRegRegClass;
}
-//===----------------------------------------------------------------------===//
-// Indirect addressing callbacks
-//===----------------------------------------------------------------------===//
+int SIInstrInfo::isMIMG(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::MIMG;
+}
-unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const {
- assert(Channel == 0);
- return RegIndex;
+int SIInstrInfo::isSMRD(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SMRD;
+}
+
+bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP1;
+}
+
+bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP2;
+}
+
+bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP3;
+}
+
+bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOPC;
+}
+
+bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
+ return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
+}
+
+bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
+ if(MO.isImm()) {
+ return MO.getImm() >= -16 && MO.getImm() <= 64;
+ }
+ if (MO.isFPImm()) {
+ return MO.getFPImm()->isExactlyValue(0.0) ||
+ MO.getFPImm()->isExactlyValue(0.5) ||
+ MO.getFPImm()->isExactlyValue(-0.5) ||
+ MO.getFPImm()->isExactlyValue(1.0) ||
+ MO.getFPImm()->isExactlyValue(-1.0) ||
+ MO.getFPImm()->isExactlyValue(2.0) ||
+ MO.getFPImm()->isExactlyValue(-2.0) ||
+ MO.getFPImm()->isExactlyValue(4.0) ||
+ MO.getFPImm()->isExactlyValue(-4.0);
+ }
+ return false;
+}
+
+bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
+ return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
+}
+
+bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
+ StringRef &ErrInfo) const {
+ uint16_t Opcode = MI->getOpcode();
+ int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
+ int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
+ int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
+
+ // Verify VOP*
+ if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
+ unsigned ConstantBusCount = 0;
+ unsigned SGPRUsed = AMDGPU::NoRegister;
+ for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isUse() &&
+ !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+
+ // EXEC register uses the constant bus.
+ if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
+ ++ConstantBusCount;
+
+ // SGPRs use the constant bus
+ if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
+ (!MO.isImplicit() &&
+ (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
+ AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
+ if (SGPRUsed != MO.getReg()) {
+ ++ConstantBusCount;
+ SGPRUsed = MO.getReg();
+ }
+ }
+ }
+ // Literal constants use the constant bus.
+ if (isLiteralConstant(MO))
+ ++ConstantBusCount;
+ }
+ if (ConstantBusCount > 1) {
+ ErrInfo = "VOP* instruction uses the constant bus more than once";
+ return false;
+ }
+ }
+
+ // Verify SRC1 for VOP2 and VOPC
+ if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
+ const MachineOperand &Src1 = MI->getOperand(Src1Idx);
+ if (Src1.isImm() || Src1.isFPImm()) {
+ ErrInfo = "VOP[2C] src1 cannot be an immediate.";
+ return false;
+ }
+ }
+
+ // Verify VOP3
+ if (isVOP3(Opcode)) {
+ if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
+ ErrInfo = "VOP3 src0 cannot be a literal constant.";
+ return false;
+ }
+ if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
+ ErrInfo = "VOP3 src1 cannot be a literal constant.";
+ return false;
+ }
+ if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
+ ErrInfo = "VOP3 src2 cannot be a literal constant.";
+ return false;
+ }
+ }
+ return true;
+}
+
+unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ default: return AMDGPU::INSTRUCTION_LIST_END;
+ case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
+ case AMDGPU::COPY: return AMDGPU::COPY;
+ case AMDGPU::PHI: return AMDGPU::PHI;
+ case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
+ case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
+ case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
+ case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
+ case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
+ case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
+ case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
+ case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
+ case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
+ case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
+ }
+}
+
+bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
+ return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
+}
+
+const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
+ unsigned OpNo) const {
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ const MCInstrDesc &Desc = get(MI.getOpcode());
+ if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
+ Desc.OpInfo[OpNo].RegClass == -1)
+ return MRI.getRegClass(MI.getOperand(OpNo).getReg());
+
+ unsigned RCID = Desc.OpInfo[OpNo].RegClass;
+ return RI.getRegClass(RCID);
+}
+
+bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
+ switch (MI.getOpcode()) {
+ case AMDGPU::COPY:
+ case AMDGPU::REG_SEQUENCE:
+ return RI.hasVGPRs(getOpRegClass(MI, 0));
+ default:
+ return RI.hasVGPRs(getOpRegClass(MI, OpNo));
+ }
}
+void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
+ MachineBasicBlock::iterator I = MI;
+ MachineOperand &MO = MI->getOperand(OpIdx);
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
+ const TargetRegisterClass *RC = RI.getRegClass(RCID);
+ unsigned Opcode = AMDGPU::V_MOV_B32_e32;
+ if (MO.isReg()) {
+ Opcode = AMDGPU::COPY;
+ } else if (RI.isSGPRClass(RC)) {
+ Opcode = AMDGPU::S_MOV_B32;
+ }
+
+ const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
+ unsigned Reg = MRI.createVirtualRegister(VRC);
+ BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
+ Reg).addOperand(MO);
+ MO.ChangeToRegister(Reg, false);
+}
+
+void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::src0);
+ int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::src1);
+ int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::src2);
+
+ // Legalize VOP2
+ if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
+ MachineOperand &Src0 = MI->getOperand(Src0Idx);
+ MachineOperand &Src1 = MI->getOperand(Src1Idx);
+
+ // If the instruction implicitly reads VCC, we can't have any SGPR operands,
+ // so move any.
+ bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
+ if (ReadsVCC && Src0.isReg() &&
+ RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
+ legalizeOpWithMove(MI, Src0Idx);
+ return;
+ }
+
+ if (ReadsVCC && Src1.isReg() &&
+ RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
+ legalizeOpWithMove(MI, Src1Idx);
+ return;
+ }
+
+ // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
+ // be the first operand, and there can only be one.
+ if (Src1.isImm() || Src1.isFPImm() ||
+ (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
+ if (MI->isCommutable()) {
+ if (commuteInstruction(MI))
+ return;
+ }
+ legalizeOpWithMove(MI, Src1Idx);
+ }
+ }
+
+ // XXX - Do any VOP3 instructions read VCC?
+ // Legalize VOP3
+ if (isVOP3(MI->getOpcode())) {
+ int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
+ unsigned SGPRReg = AMDGPU::NoRegister;
+ for (unsigned i = 0; i < 3; ++i) {
+ int Idx = VOP3Idx[i];
+ if (Idx == -1)
+ continue;
+ MachineOperand &MO = MI->getOperand(Idx);
+
+ if (MO.isReg()) {
+ if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
+ continue; // VGPRs are legal
+
+ assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
+
+ if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
+ SGPRReg = MO.getReg();
+ // We can use one SGPR in each VOP3 instruction.
+ continue;
+ }
+ } else if (!isLiteralConstant(MO)) {
+ // If it is not a register and not a literal constant, then it must be
+ // an inline constant which is always legal.
+ continue;
+ }
+ // If we make it this far, then the operand is not legal and we must
+ // legalize it.
+ legalizeOpWithMove(MI, Idx);
+ }
+ }
+
+ // Legalize REG_SEQUENCE
+ // The register class of the operands much be the same type as the register
+ // class of the output.
+ if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
+ const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL;
+ for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
+ if (!MI->getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
+ continue;
+ const TargetRegisterClass *OpRC =
+ MRI.getRegClass(MI->getOperand(i).getReg());
+ if (RI.hasVGPRs(OpRC)) {
+ VRC = OpRC;
+ } else {
+ SRC = OpRC;
+ }
+ }
+
+ // If any of the operands are VGPR registers, then they all most be
+ // otherwise we will create illegal VGPR->SGPR copies when legalizing
+ // them.
+ if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
+ if (!VRC) {
+ assert(SRC);
+ VRC = RI.getEquivalentVGPRClass(SRC);
+ }
+ RC = VRC;
+ } else {
+ RC = SRC;
+ }
-int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
- llvm_unreachable("Unimplemented");
+ // Update all the operands so they have the same type.
+ for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
+ if (!MI->getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
+ continue;
+ unsigned DstReg = MRI.createVirtualRegister(RC);
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ get(AMDGPU::COPY), DstReg)
+ .addOperand(MI->getOperand(i));
+ MI->getOperand(i).setReg(DstReg);
+ }
+ }
}
-int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
- llvm_unreachable("Unimplemented");
+void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
+ SmallVector<MachineInstr *, 128> Worklist;
+ Worklist.push_back(&TopInst);
+
+ while (!Worklist.empty()) {
+ MachineInstr *Inst = Worklist.pop_back_val();
+ unsigned NewOpcode = getVALUOp(*Inst);
+ if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
+ continue;
+
+ MachineRegisterInfo &MRI = Inst->getParent()->getParent()->getRegInfo();
+
+ // Use the new VALU Opcode.
+ const MCInstrDesc &NewDesc = get(NewOpcode);
+ Inst->setDesc(NewDesc);
+
+ // Remove any references to SCC. Vector instructions can't read from it, and
+ // We're just about to add the implicit use / defs of VCC, and we don't want
+ // both.
+ for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
+ MachineOperand &Op = Inst->getOperand(i);
+ if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
+ Inst->RemoveOperand(i);
+ }
+
+ // Add the implict and explicit register definitions.
+ if (NewDesc.ImplicitUses) {
+ for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
+ unsigned Reg = NewDesc.ImplicitUses[i];
+ Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
+ }
+ }
+
+ if (NewDesc.ImplicitDefs) {
+ for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
+ unsigned Reg = NewDesc.ImplicitDefs[i];
+ Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
+ }
+ }
+
+ legalizeOperands(Inst);
+
+ // Update the destination register class.
+ const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
+
+ switch (Inst->getOpcode()) {
+ // For target instructions, getOpRegClass just returns the virtual
+ // register class associated with the operand, so we need to find an
+ // equivalent VGPR register class in order to move the instruction to the
+ // VALU.
+ case AMDGPU::COPY:
+ case AMDGPU::PHI:
+ case AMDGPU::REG_SEQUENCE:
+ if (RI.hasVGPRs(NewDstRC))
+ continue;
+ NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
+ if (!NewDstRC)
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ unsigned DstReg = Inst->getOperand(0).getReg();
+ unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
+ MRI.replaceRegWith(DstReg, NewDstReg);
+
+ for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
+ E = MRI.use_end(); I != E; ++I) {
+ MachineInstr &UseMI = *I;
+ if (!canReadVGPR(UseMI, I.getOperandNo())) {
+ Worklist.push_back(&UseMI);
+ }
+ }
+ }
}
-const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
- unsigned SourceReg) const {
- llvm_unreachable("Unimplemented");
+//===----------------------------------------------------------------------===//
+// Indirect addressing callbacks
+//===----------------------------------------------------------------------===//
+
+unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
+ unsigned Channel) const {
+ assert(Channel == 0);
+ return RegIndex;
}
-const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
- llvm_unreachable("Unimplemented");
+const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
+ return &AMDGPU::VReg_32RegClass;
}
MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
@@ -257,7 +647,17 @@ MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
MachineBasicBlock::iterator I,
unsigned ValueReg,
unsigned Address, unsigned OffsetReg) const {
- llvm_unreachable("Unimplemented");
+ const DebugLoc &DL = MBB->findDebugLoc(I);
+ unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
+ getIndirectIndexBegin(*MBB->getParent()));
+
+ return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
+ .addReg(IndirectBaseReg, RegState::Define)
+ .addOperand(I->getOperand(0))
+ .addReg(IndirectBaseReg)
+ .addReg(OffsetReg)
+ .addImm(0)
+ .addReg(ValueReg);
}
MachineInstrBuilder SIInstrInfo::buildIndirectRead(
@@ -265,9 +665,43 @@ MachineInstrBuilder SIInstrInfo::buildIndirectRead(
MachineBasicBlock::iterator I,
unsigned ValueReg,
unsigned Address, unsigned OffsetReg) const {
- llvm_unreachable("Unimplemented");
+ const DebugLoc &DL = MBB->findDebugLoc(I);
+ unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
+ getIndirectIndexBegin(*MBB->getParent()));
+
+ return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
+ .addOperand(I->getOperand(0))
+ .addOperand(I->getOperand(1))
+ .addReg(IndirectBaseReg)
+ .addReg(OffsetReg)
+ .addImm(0);
+
}
-const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
- llvm_unreachable("Unimplemented");
+void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
+ const MachineFunction &MF) const {
+ int End = getIndirectIndexEnd(MF);
+ int Begin = getIndirectIndexBegin(MF);
+
+ if (End == -1)
+ return;
+
+
+ for (int Index = Begin; Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
}
diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
index 87eff4d6c95c..4af63481e3af 100644
--- a/lib/Target/R600/SIInstrInfo.h
+++ b/lib/Target/R600/SIInstrInfo.h
@@ -1,4 +1,4 @@
-//===-- SIInstrInfo.h - SI Instruction Info Interface ---------------------===//
+//===-- SIInstrInfo.h - SI Instruction Info Interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,6 +25,14 @@ class SIInstrInfo : public AMDGPUInstrInfo {
private:
const SIRegisterInfo RI;
+ MachineInstrBuilder buildIndirectIndexLoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned OffsetVGPR,
+ unsigned MovRelOp,
+ unsigned Dst,
+ unsigned Src0) const;
+ // If you add or remove instructions from this function, you will
+
public:
explicit SIInstrInfo(AMDGPUTargetMachine &tm);
@@ -40,25 +48,65 @@ public:
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI=false) const;
- virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const;
-
virtual unsigned getIEQOpcode() const { assert(!"Implement"); return 0;}
+ MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const;
virtual bool isMov(unsigned Opcode) const;
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
-
- virtual int getIndirectIndexBegin(const MachineFunction &MF) const;
-
- virtual int getIndirectIndexEnd(const MachineFunction &MF) const;
+ int isMIMG(uint16_t Opcode) const;
+ int isSMRD(uint16_t Opcode) const;
+ bool isVOP1(uint16_t Opcode) const;
+ bool isVOP2(uint16_t Opcode) const;
+ bool isVOP3(uint16_t Opcode) const;
+ bool isVOPC(uint16_t Opcode) const;
+ bool isInlineConstant(const MachineOperand &MO) const;
+ bool isLiteralConstant(const MachineOperand &MO) const;
+
+ virtual bool verifyInstruction(const MachineInstr *MI,
+ StringRef &ErrInfo) const;
+
+ bool isSALUInstr(const MachineInstr &MI) const;
+ static unsigned getVALUOp(const MachineInstr &MI);
+ bool isSALUOpSupportedOnVALU(const MachineInstr &MI) const;
+
+ /// \brief Return the correct register class for \p OpNo. For target-specific
+ /// instructions, this will return the register class that has been defined
+ /// in tablegen. For generic instructions, like REG_SEQUENCE it will return
+ /// the register class of its machine operand.
+ /// to infer the correct register class base on the other operands.
+ const TargetRegisterClass *getOpRegClass(const MachineInstr &MI,
+ unsigned OpNo) const;\
+
+ /// \returns true if it is legal for the operand at index \p OpNo
+ /// to read a VGPR.
+ bool canReadVGPR(const MachineInstr &MI, unsigned OpNo) const;
+
+ /// \brief Legalize the \p OpIndex operand of this instruction by inserting
+ /// a MOV. For example:
+ /// ADD_I32_e32 VGPR0, 15
+ /// to
+ /// MOV VGPR1, 15
+ /// ADD_I32_e32 VGPR0, VGPR1
+ ///
+ /// If the operand being legalized is a register, then a COPY will be used
+ /// instead of MOV.
+ void legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const;
+
+ /// \brief Legalize all operands in this instruction. This function may
+ /// create new instruction and insert them before \p MI.
+ void legalizeOperands(MachineInstr *MI) const;
+
+ /// \brief Replace this instruction's opcode with the equivalent VALU
+ /// opcode. This function will also move the users of \p MI to the
+ /// VALU if necessary.
+ void moveToVALU(MachineInstr &MI) const;
virtual unsigned calculateIndirectAddress(unsigned RegIndex,
unsigned Channel) const;
- virtual const TargetRegisterClass *getIndirectAddrStoreRegClass(
- unsigned SourceReg) const;
-
- virtual const TargetRegisterClass *getIndirectAddrLoadRegClass() const;
+ virtual const TargetRegisterClass *getIndirectAddrRegClass() const;
virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
@@ -71,16 +119,18 @@ public:
unsigned ValueReg,
unsigned Address,
unsigned OffsetReg) const;
+ void reserveIndirectRegisters(BitVector &Reserved,
+ const MachineFunction &MF) const;
- virtual const TargetRegisterClass *getSuperIndirectRegClass() const;
- };
+ void LoadM0(MachineInstr *MoveRel, MachineBasicBlock::iterator I,
+ unsigned SavReg, unsigned IndexReg) const;
+};
namespace AMDGPU {
int getVOPe64(uint16_t Opcode);
int getCommuteRev(uint16_t Opcode);
int getCommuteOrig(uint16_t Opcode);
- int isMIMG(uint16_t Opcode);
} // End namespace AMDGPU
diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
index c8aecb707eec..4cd0daa55c5f 100644
--- a/lib/Target/R600/SIInstrInfo.td
+++ b/lib/Target/R600/SIInstrInfo.td
@@ -16,19 +16,64 @@ def SIadd64bit32bit : SDNode<"ISD::ADD",
SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisVT<0, i64>, SDTCisVT<2, i32>]>
>;
+def SIload_constant : SDNode<"AMDGPUISD::LOAD_CONSTANT",
+ SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisVT<1, i128>, SDTCisVT<2, i32>]>,
+ [SDNPMayLoad, SDNPMemOperand]
+>;
+
+def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT",
+ SDTypeProfile<0, 13,
+ [SDTCisVT<0, i128>, // rsrc(SGPR)
+ SDTCisVT<1, iAny>, // vdata(VGPR)
+ SDTCisVT<2, i32>, // num_channels(imm)
+ SDTCisVT<3, i32>, // vaddr(VGPR)
+ SDTCisVT<4, i32>, // soffset(SGPR)
+ SDTCisVT<5, i32>, // inst_offset(imm)
+ SDTCisVT<6, i32>, // dfmt(imm)
+ SDTCisVT<7, i32>, // nfmt(imm)
+ SDTCisVT<8, i32>, // offen(imm)
+ SDTCisVT<9, i32>, // idxen(imm)
+ SDTCisVT<10, i32>, // glc(imm)
+ SDTCisVT<11, i32>, // slc(imm)
+ SDTCisVT<12, i32> // tfe(imm)
+ ]>,
+ [SDNPMayStore, SDNPMemOperand, SDNPHasChain]
+>;
+
+def SIload_input : SDNode<"AMDGPUISD::LOAD_INPUT",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisVT<1, i128>, SDTCisVT<2, i16>,
+ SDTCisVT<3, i32>]>
+>;
+
+class SDSample<string opcode> : SDNode <opcode,
+ SDTypeProfile<1, 4, [SDTCisVT<0, v4f32>, SDTCisVT<2, v32i8>,
+ SDTCisVT<3, i128>, SDTCisVT<4, i32>]>
+>;
+
+def SIsample : SDSample<"AMDGPUISD::SAMPLE">;
+def SIsampleb : SDSample<"AMDGPUISD::SAMPLEB">;
+def SIsampled : SDSample<"AMDGPUISD::SAMPLED">;
+def SIsamplel : SDSample<"AMDGPUISD::SAMPLEL">;
+
// Transformation function, extract the lower 32bit of a 64bit immediate
def LO32 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() & 0xffffffff, MVT::i32);
}]>;
+def LO32f : SDNodeXForm<fpimm, [{
+ APInt V = N->getValueAPF().bitcastToAPInt().trunc(32);
+ return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32);
+}]>;
+
// Transformation function, extract the upper 32bit of a 64bit immediate
def HI32 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() >> 32, MVT::i32);
}]>;
-def SIbuffer_store : SDNode<"AMDGPUISD::BUFFER_STORE",
- SDTypeProfile<0, 3, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
- [SDNPHasChain, SDNPMayStore]>;
+def HI32f : SDNodeXForm<fpimm, [{
+ APInt V = N->getValueAPF().bitcastToAPInt().lshr(32).trunc(32);
+ return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32);
+}]>;
def IMM8bitDWORD : ImmLeaf <
i32, [{
@@ -39,15 +84,47 @@ def IMM8bitDWORD : ImmLeaf <
}]>
>;
-def IMM12bit : ImmLeaf <
- i16,
- [{return isUInt<12>(Imm);}]
+def as_i1imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i1);
+}]>;
+
+def as_i8imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i8);
+}]>;
+
+def as_i16imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i16);
+}]>;
+
+def IMM12bit : PatLeaf <(imm),
+ [{return isUInt<12>(N->getZExtValue());}]
>;
class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
- return ((const SITargetLowering &)TLI).analyzeImmediate(N) == 0;
+ return
+ (*(const SITargetLowering *)getTargetLowering()).analyzeImmediate(N) == 0;
+}]>;
+
+class SGPRImm <dag frag> : PatLeaf<frag, [{
+ if (TM.getSubtarget<AMDGPUSubtarget>().getGeneration() <
+ AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ return false;
+ }
+ const SIRegisterInfo *SIRI =
+ static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
+ for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
+ U != E; ++U) {
+ if (SIRI->isSGPRClass(getOperandRegClass(*U, U.getOperandNo()))) {
+ return true;
+ }
+ }
+ return false;
}]>;
+def FRAMEri64 : Operand<iPTR> {
+ let MIOperandInfo = (ops SReg_32:$ptr, i32imm:$index);
+}
+
//===----------------------------------------------------------------------===//
// SI assembler operands
//===----------------------------------------------------------------------===//
@@ -99,6 +176,11 @@ class SOP2_64 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
opName#" $dst, $src0, $src1", pattern
>;
+class SOP2_SHIFT_64 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
+ op, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1),
+ opName#" $dst, $src0, $src1", pattern
+>;
+
class SOPC_32 <bits<7> op, string opName, list<dag> pattern> : SOPC <
op, (outs SCCReg:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
opName#" $dst, $src0, $src1", pattern
@@ -163,8 +245,8 @@ multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src,
i32imm:$omod, i32imm:$neg),
opName#"_e64 $dst, $src0, $abs, $clamp, $omod, $neg", []
>, VOP <opName> {
- let SRC1 = SIOperand.ZERO;
- let SRC2 = SIOperand.ZERO;
+ let src1 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
}
}
@@ -174,6 +256,12 @@ multiclass VOP1_32 <bits<8> op, string opName, list<dag> pattern>
multiclass VOP1_64 <bits<8> op, string opName, list<dag> pattern>
: VOP1_Helper <op, VReg_64, VSrc_64, opName, pattern>;
+multiclass VOP1_32_64 <bits<8> op, string opName, list<dag> pattern>
+ : VOP1_Helper <op, VReg_32, VSrc_64, opName, pattern>;
+
+multiclass VOP1_64_32 <bits<8> op, string opName, list<dag> pattern>
+ : VOP1_Helper <op, VReg_64, VSrc_32, opName, pattern>;
+
multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
string opName, list<dag> pattern, string revOp> {
def _e32 : VOP2 <
@@ -189,7 +277,7 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
i32imm:$omod, i32imm:$neg),
opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
>, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
- let SRC2 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
}
}
@@ -217,11 +305,11 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern,
i32imm:$omod, i32imm:$neg),
opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
>, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
- let SRC2 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
/* the VOP2 variant puts the carry out into VCC, the VOP3 variant
can write it into any SGPR. We currently don't use the carry out,
so for now hardcode it to VCC as well */
- let SDST = SIOperand.VCC;
+ let sdst = SIOperand.VCC;
}
}
@@ -244,7 +332,7 @@ multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
[(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))]
)
>, VOP <opName> {
- let SRC2 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
}
}
@@ -263,6 +351,19 @@ class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
>, VOP <opName>;
+class VOP3_64_Shift <bits <9> op, string opName, list<dag> pattern> : VOP3 <
+ op, (outs VReg_64:$dst),
+ (ins VSrc_64:$src0, VSrc_32:$src1),
+ opName#" $dst, $src0, $src1", pattern
+>, VOP <opName> {
+
+ let src2 = SIOperand.ZERO;
+ let abs = 0;
+ let clamp = 0;
+ let omod = 0;
+ let neg = 0;
+}
+
class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
op, (outs VReg_64:$dst),
(ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2,
@@ -274,6 +375,41 @@ class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
// Vector I/O classes
//===----------------------------------------------------------------------===//
+class DS_Load_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+ op,
+ (outs regClass:$vdst),
+ (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1,
+ i8imm:$offset0, i8imm:$offset1),
+ asm#" $vdst, $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]",
+ []> {
+ let mayLoad = 1;
+ let mayStore = 0;
+}
+
+class DS_Store_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+ op,
+ (outs),
+ (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1,
+ i8imm:$offset0, i8imm:$offset1),
+ asm#" $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]",
+ []> {
+ let mayStore = 1;
+ let mayLoad = 0;
+ let vdst = 0;
+}
+
+class DS_1A1D_RET <bits<8> op, string asm, RegisterClass rc> : DS <
+ op,
+ (outs rc:$vdst),
+ (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, i8imm:$offset0,
+ i8imm:$offset1),
+ asm#" $gds, $vdst, $addr, $data0, $offset0, $offset1, [M0]",
+ []> {
+ let mayStore = 1;
+ let mayLoad = 1;
+ let data1 = 0;
+}
+
class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF <
op,
(outs),
@@ -287,31 +423,41 @@ class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBU
let mayLoad = 0;
}
-class MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> : MUBUF <
- op,
- (outs regClass:$vdata),
- (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
- i1imm:$lds, VReg_32:$vaddr, SReg_128:$srsrc, i1imm:$slc,
- i1imm:$tfe, SSrc_32:$soffset),
- asm#" $vdata, $offset, $offen, $idxen, $glc, $addr64, "
- #"$lds, $vaddr, $srsrc, $slc, $tfe, $soffset",
- []> {
- let mayLoad = 1;
- let mayStore = 0;
+multiclass MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> {
+
+ let glc = 0, lds = 0, slc = 0, tfe = 0, soffset = 128 /* ZERO */,
+ mayLoad = 1 in {
+
+ let offen = 1, idxen = 0, addr64 = 0, offset = 0 in {
+ def _OFFEN : MUBUF <op, (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_32:$vaddr),
+ asm#" $vdata, $srsrc + $vaddr", []>;
+ }
+
+ let offen = 0, idxen = 1, addr64 = 0 in {
+ def _IDXEN : MUBUF <op, (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_32:$vaddr, i16imm:$offset),
+ asm#" $vdata, $srsrc[$vaddr] + $offset", []>;
+ }
+
+ let offen = 0, idxen = 0, addr64 = 1 in {
+ def _ADDR64 : MUBUF <op, (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_64:$vaddr, i16imm:$offset),
+ asm#" $vdata, $srsrc + $vaddr + $offset", []>;
+ }
+ }
}
-class MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass,
- ValueType VT> :
- MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr),
- name#" $vdata, $srsrc + $vaddr",
- [(SIbuffer_store (VT vdataClass:$vdata), (i128 SReg_128:$srsrc),
- (i64 VReg_64:$vaddr))]> {
+class MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass> :
+ MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr,
+ i16imm:$offset),
+ name#" $vdata, $srsrc + $vaddr + $offset",
+ []> {
let mayLoad = 0;
let mayStore = 1;
// Encoding
- let offset = 0;
let offen = 0;
let idxen = 0;
let glc = 0;
@@ -335,11 +481,18 @@ class MTBUF_Load_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF
let mayStore = 0;
}
-class MIMG_NoSampler_Helper <bits<7> op, string asm> : MIMG <
+class MIMG_Mask <string op, int channels> {
+ string Op = op;
+ int Channels = channels;
+}
+
+class MIMG_NoSampler_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ RegisterClass src_rc> : MIMG <
op,
- (outs VReg_128:$vdata),
+ (outs dst_rc:$vdata),
(ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, unknown:$vaddr,
+ i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
SReg_256:$srsrc),
asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
#" $tfe, $lwe, $slc, $vaddr, $srsrc",
@@ -350,11 +503,31 @@ class MIMG_NoSampler_Helper <bits<7> op, string asm> : MIMG <
let hasPostISelHook = 1;
}
-class MIMG_Sampler_Helper <bits<7> op, string asm> : MIMG <
+multiclass MIMG_NoSampler_Src_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ int channels> {
+ def _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_32>,
+ MIMG_Mask<asm#"_V1", channels>;
+ def _V2 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_64>,
+ MIMG_Mask<asm#"_V2", channels>;
+ def _V4 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_128>,
+ MIMG_Mask<asm#"_V4", channels>;
+}
+
+multiclass MIMG_NoSampler <bits<7> op, string asm> {
+ defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VReg_32, 1>;
+ defm _V2 : MIMG_NoSampler_Src_Helper <op, asm, VReg_64, 2>;
+ defm _V3 : MIMG_NoSampler_Src_Helper <op, asm, VReg_96, 3>;
+ defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 4>;
+}
+
+class MIMG_Sampler_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ RegisterClass src_rc> : MIMG <
op,
- (outs VReg_128:$vdata),
+ (outs dst_rc:$vdata),
(ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, unknown:$vaddr,
+ i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
SReg_256:$srsrc, SReg_128:$ssamp),
asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
#" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
@@ -364,6 +537,28 @@ class MIMG_Sampler_Helper <bits<7> op, string asm> : MIMG <
let hasPostISelHook = 1;
}
+multiclass MIMG_Sampler_Src_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ int channels> {
+ def _V1 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_32>,
+ MIMG_Mask<asm#"_V1", channels>;
+ def _V2 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_64>,
+ MIMG_Mask<asm#"_V2", channels>;
+ def _V4 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_128>,
+ MIMG_Mask<asm#"_V4", channels>;
+ def _V8 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_256>,
+ MIMG_Mask<asm#"_V8", channels>;
+ def _V16 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_512>,
+ MIMG_Mask<asm#"_V16", channels>;
+}
+
+multiclass MIMG_Sampler <bits<7> op, string asm> {
+ defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VReg_32, 1>;
+ defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2>;
+ defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3>;
+ defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4>;
+}
+
//===----------------------------------------------------------------------===//
// Vector instruction mappings
//===----------------------------------------------------------------------===//
@@ -386,6 +581,14 @@ def getCommuteRev : InstrMapping {
let ValueCols = [["0"]];
}
+def getMaskedMIMGOp : InstrMapping {
+ let FilterClass = "MIMG_Mask";
+ let RowFields = ["Op"];
+ let ColFields = ["Channels"];
+ let KeyCol = ["4"];
+ let ValueCols = [["1"], ["2"], ["3"] ];
+}
+
// Maps an commuted opcode to its original version
def getCommuteOrig : InstrMapping {
let FilterClass = "VOP2_REV";
@@ -395,13 +598,4 @@ def getCommuteOrig : InstrMapping {
let ValueCols = [["1"]];
}
-// Test if the supplied opcode is an MIMG instruction
-def isMIMG : InstrMapping {
- let FilterClass = "MIMG";
- let RowFields = ["Inst"];
- let ColFields = ["Size"];
- let KeyCol = ["8"];
- let ValueCols = [["8"]];
-}
-
include "SIInstructions.td"
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 0d50c5d84999..76f05eb49655 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -22,8 +22,10 @@ def InterpSlot : Operand<i32> {
let PrintMethod = "printInterpSlot";
}
-def isSI : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD7XXX">;
+def isSI : Predicate<"Subtarget.getGeneration() "
+ ">= AMDGPUSubtarget::SOUTHERN_ISLANDS">;
+
+def WAIT_FLAG : InstFlag<"printWaitFlag">;
let Predicates = [isSI] in {
@@ -126,8 +128,11 @@ def S_CMPK_LT_U32 : SOPK_32 <0x0000000d, "S_CMPK_LT_U32", []>;
def S_CMPK_LE_U32 : SOPK_32 <0x0000000e, "S_CMPK_LE_U32", []>;
} // End isCompare = 1
-def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>;
-def S_MULK_I32 : SOPK_32 <0x00000010, "S_MULK_I32", []>;
+let Defs = [SCC], isCommutable = 1 in {
+ def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>;
+ def S_MULK_I32 : SOPK_32 <0x00000010, "S_MULK_I32", []>;
+}
+
//def S_CBRANCH_I_FORK : SOPK_ <0x00000011, "S_CBRANCH_I_FORK", []>;
def S_GETREG_B32 : SOPK_32 <0x00000012, "S_GETREG_B32", []>;
def S_SETREG_B32 : SOPK_32 <0x00000013, "S_SETREG_B32", []>;
@@ -138,19 +143,19 @@ def S_GETREG_REGRD_B32 : SOPK_32 <0x00000014, "S_GETREG_REGRD_B32", []>;
let isCompare = 1 in {
defm V_CMP_F_F32 : VOPC_32 <0x00000000, "V_CMP_F_F32">;
-defm V_CMP_LT_F32 : VOPC_32 <0x00000001, "V_CMP_LT_F32", f32, COND_LT>;
-defm V_CMP_EQ_F32 : VOPC_32 <0x00000002, "V_CMP_EQ_F32", f32, COND_EQ>;
-defm V_CMP_LE_F32 : VOPC_32 <0x00000003, "V_CMP_LE_F32", f32, COND_LE>;
-defm V_CMP_GT_F32 : VOPC_32 <0x00000004, "V_CMP_GT_F32", f32, COND_GT>;
-defm V_CMP_LG_F32 : VOPC_32 <0x00000005, "V_CMP_LG_F32", f32, COND_NE>;
-defm V_CMP_GE_F32 : VOPC_32 <0x00000006, "V_CMP_GE_F32", f32, COND_GE>;
-defm V_CMP_O_F32 : VOPC_32 <0x00000007, "V_CMP_O_F32">;
-defm V_CMP_U_F32 : VOPC_32 <0x00000008, "V_CMP_U_F32">;
+defm V_CMP_LT_F32 : VOPC_32 <0x00000001, "V_CMP_LT_F32", f32, COND_OLT>;
+defm V_CMP_EQ_F32 : VOPC_32 <0x00000002, "V_CMP_EQ_F32", f32, COND_OEQ>;
+defm V_CMP_LE_F32 : VOPC_32 <0x00000003, "V_CMP_LE_F32", f32, COND_OLE>;
+defm V_CMP_GT_F32 : VOPC_32 <0x00000004, "V_CMP_GT_F32", f32, COND_OGT>;
+defm V_CMP_LG_F32 : VOPC_32 <0x00000005, "V_CMP_LG_F32">;
+defm V_CMP_GE_F32 : VOPC_32 <0x00000006, "V_CMP_GE_F32", f32, COND_OGE>;
+defm V_CMP_O_F32 : VOPC_32 <0x00000007, "V_CMP_O_F32", f32, COND_O>;
+defm V_CMP_U_F32 : VOPC_32 <0x00000008, "V_CMP_U_F32", f32, COND_UO>;
defm V_CMP_NGE_F32 : VOPC_32 <0x00000009, "V_CMP_NGE_F32">;
defm V_CMP_NLG_F32 : VOPC_32 <0x0000000a, "V_CMP_NLG_F32">;
defm V_CMP_NGT_F32 : VOPC_32 <0x0000000b, "V_CMP_NGT_F32">;
defm V_CMP_NLE_F32 : VOPC_32 <0x0000000c, "V_CMP_NLE_F32">;
-defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", f32, COND_NE>;
+defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", f32, COND_UNE>;
defm V_CMP_NLT_F32 : VOPC_32 <0x0000000e, "V_CMP_NLT_F32">;
defm V_CMP_TRU_F32 : VOPC_32 <0x0000000f, "V_CMP_TRU_F32">;
@@ -176,19 +181,19 @@ defm V_CMPX_TRU_F32 : VOPC_32 <0x0000001f, "V_CMPX_TRU_F32">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64">;
-defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64">;
-defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64">;
-defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64">;
-defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64">;
+defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64", f64, COND_OLT>;
+defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64", f64, COND_OEQ>;
+defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64", f64, COND_OLE>;
+defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64", f64, COND_OGT>;
defm V_CMP_LG_F64 : VOPC_64 <0x00000025, "V_CMP_LG_F64">;
-defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64">;
-defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64">;
-defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64">;
+defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64", f64, COND_OGE>;
+defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64", f64, COND_O>;
+defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64", f64, COND_UO>;
defm V_CMP_NGE_F64 : VOPC_64 <0x00000029, "V_CMP_NGE_F64">;
defm V_CMP_NLG_F64 : VOPC_64 <0x0000002a, "V_CMP_NLG_F64">;
defm V_CMP_NGT_F64 : VOPC_64 <0x0000002b, "V_CMP_NGT_F64">;
defm V_CMP_NLE_F64 : VOPC_64 <0x0000002c, "V_CMP_NLE_F64">;
-defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64">;
+defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64", f64, COND_UNE>;
defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64">;
defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64">;
@@ -290,12 +295,12 @@ defm V_CMPSX_TRU_F64 : VOPC_64 <0x0000007f, "V_CMPSX_TRU_F64">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_I32 : VOPC_32 <0x00000080, "V_CMP_F_I32">;
-defm V_CMP_LT_I32 : VOPC_32 <0x00000081, "V_CMP_LT_I32", i32, COND_LT>;
+defm V_CMP_LT_I32 : VOPC_32 <0x00000081, "V_CMP_LT_I32", i32, COND_SLT>;
defm V_CMP_EQ_I32 : VOPC_32 <0x00000082, "V_CMP_EQ_I32", i32, COND_EQ>;
-defm V_CMP_LE_I32 : VOPC_32 <0x00000083, "V_CMP_LE_I32", i32, COND_LE>;
-defm V_CMP_GT_I32 : VOPC_32 <0x00000084, "V_CMP_GT_I32", i32, COND_GT>;
+defm V_CMP_LE_I32 : VOPC_32 <0x00000083, "V_CMP_LE_I32", i32, COND_SLE>;
+defm V_CMP_GT_I32 : VOPC_32 <0x00000084, "V_CMP_GT_I32", i32, COND_SGT>;
defm V_CMP_NE_I32 : VOPC_32 <0x00000085, "V_CMP_NE_I32", i32, COND_NE>;
-defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", i32, COND_GE>;
+defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", i32, COND_SGE>;
defm V_CMP_T_I32 : VOPC_32 <0x00000087, "V_CMP_T_I32">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -312,12 +317,12 @@ defm V_CMPX_T_I32 : VOPC_32 <0x00000097, "V_CMPX_T_I32">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_I64 : VOPC_64 <0x000000a0, "V_CMP_F_I64">;
-defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64">;
-defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64">;
-defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64">;
-defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64">;
-defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64">;
-defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64">;
+defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64", i64, COND_SLT>;
+defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64", i64, COND_EQ>;
+defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64", i64, COND_SLE>;
+defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64", i64, COND_SGT>;
+defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64", i64, COND_NE>;
+defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64", i64, COND_SGE>;
defm V_CMP_T_I64 : VOPC_64 <0x000000a7, "V_CMP_T_I64">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -334,12 +339,12 @@ defm V_CMPX_T_I64 : VOPC_64 <0x000000b7, "V_CMPX_T_I64">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_U32 : VOPC_32 <0x000000c0, "V_CMP_F_U32">;
-defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32">;
-defm V_CMP_EQ_U32 : VOPC_32 <0x000000c2, "V_CMP_EQ_U32">;
-defm V_CMP_LE_U32 : VOPC_32 <0x000000c3, "V_CMP_LE_U32">;
-defm V_CMP_GT_U32 : VOPC_32 <0x000000c4, "V_CMP_GT_U32">;
-defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32">;
-defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32">;
+defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32", i32, COND_ULT>;
+defm V_CMP_EQ_U32 : VOPC_32 <0x000000c2, "V_CMP_EQ_U32", i32, COND_EQ>;
+defm V_CMP_LE_U32 : VOPC_32 <0x000000c3, "V_CMP_LE_U32", i32, COND_ULE>;
+defm V_CMP_GT_U32 : VOPC_32 <0x000000c4, "V_CMP_GT_U32", i32, COND_UGT>;
+defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32", i32, COND_NE>;
+defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32", i32, COND_UGE>;
defm V_CMP_T_U32 : VOPC_32 <0x000000c7, "V_CMP_T_U32">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -356,12 +361,12 @@ defm V_CMPX_T_U32 : VOPC_32 <0x000000d7, "V_CMPX_T_U32">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_U64 : VOPC_64 <0x000000e0, "V_CMP_F_U64">;
-defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64">;
-defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64">;
-defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64">;
-defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64">;
-defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64">;
-defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64">;
+defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64", i64, COND_ULT>;
+defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64", i64, COND_EQ>;
+defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64", i64, COND_ULE>;
+defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64", i64, COND_UGT>;
+defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64", i64, COND_NE>;
+defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64", i64, COND_UGE>;
defm V_CMP_T_U64 : VOPC_64 <0x000000e7, "V_CMP_T_U64">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -391,32 +396,52 @@ defm V_CMPX_CLASS_F64 : VOPC_64 <0x000000b8, "V_CMPX_CLASS_F64">;
} // End isCompare = 1
+def DS_ADD_U32_RTN : DS_1A1D_RET <0x20, "DS_ADD_U32_RTN", VReg_32>;
+def DS_SUB_U32_RTN : DS_1A1D_RET <0x21, "DS_SUB_U32_RTN", VReg_32>;
+def DS_WRITE_B32 : DS_Store_Helper <0x0000000d, "DS_WRITE_B32", VReg_32>;
+def DS_WRITE_B8 : DS_Store_Helper <0x00000001e, "DS_WRITE_B8", VReg_32>;
+def DS_WRITE_B16 : DS_Store_Helper <0x00000001f, "DS_WRITE_B16", VReg_32>;
+def DS_READ_B32 : DS_Load_Helper <0x00000036, "DS_READ_B32", VReg_32>;
+def DS_READ_I8 : DS_Load_Helper <0x00000039, "DS_READ_I8", VReg_32>;
+def DS_READ_U8 : DS_Load_Helper <0x0000003a, "DS_READ_U8", VReg_32>;
+def DS_READ_I16 : DS_Load_Helper <0x0000003b, "DS_READ_I16", VReg_32>;
+def DS_READ_U16 : DS_Load_Helper <0x0000003c, "DS_READ_U16", VReg_32>;
+
//def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>;
//def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>;
//def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "BUFFER_LOAD_FORMAT_XYZ", []>;
-def BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>;
+defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>;
//def BUFFER_STORE_FORMAT_X : MUBUF_ <0x00000004, "BUFFER_STORE_FORMAT_X", []>;
//def BUFFER_STORE_FORMAT_XY : MUBUF_ <0x00000005, "BUFFER_STORE_FORMAT_XY", []>;
//def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>;
//def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>;
-//def BUFFER_LOAD_UBYTE : MUBUF_ <0x00000008, "BUFFER_LOAD_UBYTE", []>;
-//def BUFFER_LOAD_SBYTE : MUBUF_ <0x00000009, "BUFFER_LOAD_SBYTE", []>;
-//def BUFFER_LOAD_USHORT : MUBUF_ <0x0000000a, "BUFFER_LOAD_USHORT", []>;
-//def BUFFER_LOAD_SSHORT : MUBUF_ <0x0000000b, "BUFFER_LOAD_SSHORT", []>;
-def BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>;
-def BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>;
-def BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>;
-//def BUFFER_STORE_BYTE : MUBUF_ <0x00000018, "BUFFER_STORE_BYTE", []>;
-//def BUFFER_STORE_SHORT : MUBUF_ <0x0000001a, "BUFFER_STORE_SHORT", []>;
+defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <0x00000008, "BUFFER_LOAD_UBYTE", VReg_32>;
+defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <0x00000009, "BUFFER_LOAD_SBYTE", VReg_32>;
+defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <0x0000000a, "BUFFER_LOAD_USHORT", VReg_32>;
+defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <0x0000000b, "BUFFER_LOAD_SSHORT", VReg_32>;
+defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>;
+defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>;
+defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>;
+
+def BUFFER_STORE_BYTE : MUBUF_Store_Helper <
+ 0x00000018, "BUFFER_STORE_BYTE", VReg_32
+>;
+
+def BUFFER_STORE_SHORT : MUBUF_Store_Helper <
+ 0x0000001a, "BUFFER_STORE_SHORT", VReg_32
+>;
def BUFFER_STORE_DWORD : MUBUF_Store_Helper <
- 0x0000001c, "BUFFER_STORE_DWORD", VReg_32, i32
+ 0x0000001c, "BUFFER_STORE_DWORD", VReg_32
>;
def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
- 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64, i64
+ 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64
+>;
+
+def BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
+ 0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128
>;
-//def BUFFER_STORE_DWORDX4 : MUBUF_DWORDX4 <0x0000001e, "BUFFER_STORE_DWORDX4", []>;
//def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>;
//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>;
//def BUFFER_ATOMIC_ADD : MUBUF_ <0x00000032, "BUFFER_ATOMIC_ADD", []>;
@@ -457,21 +482,24 @@ def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
//def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "TBUFFER_LOAD_FORMAT_XY", []>;
//def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "TBUFFER_LOAD_FORMAT_XYZ", []>;
def TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Load_Helper <0x00000003, "TBUFFER_LOAD_FORMAT_XYZW", VReg_128>;
-//def TBUFFER_STORE_FORMAT_X : MTBUF_ <0x00000004, "TBUFFER_STORE_FORMAT_X", []>;
-//def TBUFFER_STORE_FORMAT_XY : MTBUF_ <0x00000005, "TBUFFER_STORE_FORMAT_XY", []>;
-//def TBUFFER_STORE_FORMAT_XYZ : MTBUF_ <0x00000006, "TBUFFER_STORE_FORMAT_XYZ", []>;
-//def TBUFFER_STORE_FORMAT_XYZW : MTBUF_ <0x00000007, "TBUFFER_STORE_FORMAT_XYZW", []>;
+def TBUFFER_STORE_FORMAT_X : MTBUF_Store_Helper <0x00000004, "TBUFFER_STORE_FORMAT_X", VReg_32>;
+def TBUFFER_STORE_FORMAT_XY : MTBUF_Store_Helper <0x00000005, "TBUFFER_STORE_FORMAT_XY", VReg_64>;
+def TBUFFER_STORE_FORMAT_XYZ : MTBUF_Store_Helper <0x00000006, "TBUFFER_STORE_FORMAT_XYZ", VReg_128>;
+def TBUFFER_STORE_FORMAT_XYZW : MTBUF_Store_Helper <0x00000007, "TBUFFER_STORE_FORMAT_XYZW", VReg_128>;
let mayLoad = 1 in {
-defm S_LOAD_DWORD : SMRD_Helper <0x00, "S_LOAD_DWORD", SReg_64, SReg_32>;
+// We are using the SGPR_32 and not the SReg_32 register class for 32-bit
+// SMRD instructions, because the SGPR_32 register class does not include M0
+// and writing to M0 from an SMRD instruction will hang the GPU.
+defm S_LOAD_DWORD : SMRD_Helper <0x00, "S_LOAD_DWORD", SReg_64, SGPR_32>;
defm S_LOAD_DWORDX2 : SMRD_Helper <0x01, "S_LOAD_DWORDX2", SReg_64, SReg_64>;
defm S_LOAD_DWORDX4 : SMRD_Helper <0x02, "S_LOAD_DWORDX4", SReg_64, SReg_128>;
defm S_LOAD_DWORDX8 : SMRD_Helper <0x03, "S_LOAD_DWORDX8", SReg_64, SReg_256>;
defm S_LOAD_DWORDX16 : SMRD_Helper <0x04, "S_LOAD_DWORDX16", SReg_64, SReg_512>;
defm S_BUFFER_LOAD_DWORD : SMRD_Helper <
- 0x08, "S_BUFFER_LOAD_DWORD", SReg_128, SReg_32
+ 0x08, "S_BUFFER_LOAD_DWORD", SReg_128, SGPR_32
>;
defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper <
@@ -494,8 +522,8 @@ defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper <
//def S_MEMTIME : SMRD_ <0x0000001e, "S_MEMTIME", []>;
//def S_DCACHE_INV : SMRD_ <0x0000001f, "S_DCACHE_INV", []>;
-//def IMAGE_LOAD : MIMG_NoPattern_ <"IMAGE_LOAD", 0x00000000>;
-def IMAGE_LOAD_MIP : MIMG_NoSampler_Helper <0x00000001, "IMAGE_LOAD_MIP">;
+defm IMAGE_LOAD : MIMG_NoSampler <0x00000000, "IMAGE_LOAD">;
+defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "IMAGE_LOAD_MIP">;
//def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_PCK", 0x00000002>;
//def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"IMAGE_LOAD_PCK_SGN", 0x00000003>;
//def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_MIP_PCK", 0x00000004>;
@@ -504,7 +532,7 @@ def IMAGE_LOAD_MIP : MIMG_NoSampler_Helper <0x00000001, "IMAGE_LOAD_MIP">;
//def IMAGE_STORE_MIP : MIMG_NoPattern_ <"IMAGE_STORE_MIP", 0x00000009>;
//def IMAGE_STORE_PCK : MIMG_NoPattern_ <"IMAGE_STORE_PCK", 0x0000000a>;
//def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"IMAGE_STORE_MIP_PCK", 0x0000000b>;
-def IMAGE_GET_RESINFO : MIMG_NoSampler_Helper <0x0000000e, "IMAGE_GET_RESINFO">;
+defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "IMAGE_GET_RESINFO">;
//def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_SWAP", 0x0000000f>;
//def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_CMPSWAP", 0x00000010>;
//def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"IMAGE_ATOMIC_ADD", 0x00000011>;
@@ -522,20 +550,20 @@ def IMAGE_GET_RESINFO : MIMG_NoSampler_Helper <0x0000000e, "IMAGE_GET_RESINFO">;
//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_FCMPSWAP", 0x0000001d>;
//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMIN", 0x0000001e>;
//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMAX", 0x0000001f>;
-def IMAGE_SAMPLE : MIMG_Sampler_Helper <0x00000020, "IMAGE_SAMPLE">;
+defm IMAGE_SAMPLE : MIMG_Sampler <0x00000020, "IMAGE_SAMPLE">;
//def IMAGE_SAMPLE_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL", 0x00000021>;
-def IMAGE_SAMPLE_D : MIMG_Sampler_Helper <0x00000022, "IMAGE_SAMPLE_D">;
+defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "IMAGE_SAMPLE_D">;
//def IMAGE_SAMPLE_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL", 0x00000023>;
-def IMAGE_SAMPLE_L : MIMG_Sampler_Helper <0x00000024, "IMAGE_SAMPLE_L">;
-def IMAGE_SAMPLE_B : MIMG_Sampler_Helper <0x00000025, "IMAGE_SAMPLE_B">;
+defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, "IMAGE_SAMPLE_L">;
+defm IMAGE_SAMPLE_B : MIMG_Sampler <0x00000025, "IMAGE_SAMPLE_B">;
//def IMAGE_SAMPLE_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL", 0x00000026>;
//def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>;
-def IMAGE_SAMPLE_C : MIMG_Sampler_Helper <0x00000028, "IMAGE_SAMPLE_C">;
+defm IMAGE_SAMPLE_C : MIMG_Sampler <0x00000028, "IMAGE_SAMPLE_C">;
//def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>;
-//def IMAGE_SAMPLE_C_D : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D", 0x0000002a>;
+defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, "IMAGE_SAMPLE_C_D">;
//def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>;
-def IMAGE_SAMPLE_C_L : MIMG_Sampler_Helper <0x0000002c, "IMAGE_SAMPLE_C_L">;
-def IMAGE_SAMPLE_C_B : MIMG_Sampler_Helper <0x0000002d, "IMAGE_SAMPLE_C_B">;
+defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, "IMAGE_SAMPLE_C_L">;
+defm IMAGE_SAMPLE_C_B : MIMG_Sampler <0x0000002d, "IMAGE_SAMPLE_C_B">;
//def IMAGE_SAMPLE_C_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL", 0x0000002e>;
//def IMAGE_SAMPLE_C_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ", 0x0000002f>;
//def IMAGE_SAMPLE_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_O", 0x00000030>;
@@ -597,15 +625,21 @@ defm V_MOV_B32 : VOP1_32 <0x00000001, "V_MOV_B32", []>;
} // End neverHasSideEffects = 1, isMoveImm = 1
defm V_READFIRSTLANE_B32 : VOP1_32 <0x00000002, "V_READFIRSTLANE_B32", []>;
-//defm V_CVT_I32_F64 : VOP1_32 <0x00000003, "V_CVT_I32_F64", []>;
-//defm V_CVT_F64_I32 : VOP1_64 <0x00000004, "V_CVT_F64_I32", []>;
+defm V_CVT_I32_F64 : VOP1_32_64 <0x00000003, "V_CVT_I32_F64",
+ [(set i32:$dst, (fp_to_sint f64:$src0))]
+>;
+defm V_CVT_F64_I32 : VOP1_64_32 <0x00000004, "V_CVT_F64_I32",
+ [(set f64:$dst, (sint_to_fp i32:$src0))]
+>;
defm V_CVT_F32_I32 : VOP1_32 <0x00000005, "V_CVT_F32_I32",
[(set f32:$dst, (sint_to_fp i32:$src0))]
>;
defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32",
[(set f32:$dst, (uint_to_fp i32:$src0))]
>;
-defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32", []>;
+defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32",
+ [(set i32:$dst, (fp_to_uint f32:$src0))]
+>;
defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32",
[(set i32:$dst, (fp_to_sint f32:$src0))]
>;
@@ -615,8 +649,12 @@ defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>;
//defm V_CVT_RPI_I32_F32 : VOP1_32 <0x0000000c, "V_CVT_RPI_I32_F32", []>;
//defm V_CVT_FLR_I32_F32 : VOP1_32 <0x0000000d, "V_CVT_FLR_I32_F32", []>;
//defm V_CVT_OFF_F32_I4 : VOP1_32 <0x0000000e, "V_CVT_OFF_F32_I4", []>;
-//defm V_CVT_F32_F64 : VOP1_32 <0x0000000f, "V_CVT_F32_F64", []>;
-//defm V_CVT_F64_F32 : VOP1_64 <0x00000010, "V_CVT_F64_F32", []>;
+defm V_CVT_F32_F64 : VOP1_32_64 <0x0000000f, "V_CVT_F32_F64",
+ [(set f32:$dst, (fround f64:$src0))]
+>;
+defm V_CVT_F64_F32 : VOP1_64_32 <0x00000010, "V_CVT_F64_F32",
+ [(set f64:$dst, (fextend f32:$src0))]
+>;
//defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0", []>;
//defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1", []>;
//defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2", []>;
@@ -657,12 +695,18 @@ defm V_RSQ_LEGACY_F32 : VOP1_32 <
[(set f32:$dst, (int_AMDGPU_rsq f32:$src0))]
>;
defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32", []>;
-defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64", []>;
+defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64",
+ [(set f64:$dst, (fdiv FP_ONE, f64:$src0))]
+>;
defm V_RCP_CLAMP_F64 : VOP1_64 <0x00000030, "V_RCP_CLAMP_F64", []>;
defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64", []>;
defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64", []>;
-defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32", []>;
-defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64", []>;
+defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32",
+ [(set f32:$dst, (fsqrt f32:$src0))]
+>;
+defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64",
+ [(set f64:$dst, (fsqrt f64:$src0))]
+>;
defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32", []>;
defm V_COS_F32 : VOP1_32 <0x00000036, "V_COS_F32", []>;
defm V_NOT_B32 : VOP1_32 <0x00000037, "V_NOT_B32", []>;
@@ -768,9 +812,18 @@ def S_CBRANCH_EXECNZ : SOPP <
} // End isBranch = 1
} // End isTerminator = 1
-//def S_BARRIER : SOPP_ <0x0000000a, "S_BARRIER", []>;
let hasSideEffects = 1 in {
-def S_WAITCNT : SOPP <0x0000000c, (ins i32imm:$simm16), "S_WAITCNT $simm16",
+def S_BARRIER : SOPP <0x0000000a, (ins), "S_BARRIER",
+ [(int_AMDGPU_barrier_local)]
+> {
+ let SIMM16 = 0;
+ let isBarrier = 1;
+ let hasCtrlDep = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "S_WAITCNT $simm16",
[]
>;
} // End hasSideEffects
@@ -806,6 +859,23 @@ def : Pat <
(V_CNDMASK_B32_e64 $src0, $src1, $src2)
>;
+def : Pat <
+ (i32 (trunc i64:$val)),
+ (EXTRACT_SUBREG $val, sub0)
+>;
+
+//use two V_CNDMASK_B32_e64 instructions for f64
+def : Pat <
+ (f64 (select i1:$src2, f64:$src1, f64:$src0)),
+ (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub0),
+ (EXTRACT_SUBREG $src1, sub0),
+ $src2), sub0),
+ (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub1),
+ (EXTRACT_SUBREG $src1, sub1),
+ $src2), sub1)
+>;
+
defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>;
@@ -833,14 +903,16 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32",
[(set f32:$dst, (fmul f32:$src0, f32:$src1))]
>;
-} // End isCommutable = 1
-//defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>;
+defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24",
+ [(set i32:$dst, (mul I24:$src0, I24:$src1))]
+>;
//defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
-//defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>;
+defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24",
+ [(set i32:$dst, (mul U24:$src0, U24:$src1))]
+>;
//defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
-let isCommutable = 1 in {
defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32",
[(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))]
@@ -875,9 +947,13 @@ defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32",
>;
defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", [], "V_ASHR_I32">;
+let hasPostISelHook = 1 in {
+
defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32",
[(set i32:$dst, (shl i32:$src0, i32:$src1))]
>;
+
+}
defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", [], "V_LSHL_B32">;
defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32",
@@ -897,20 +973,17 @@ defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>;
defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>;
defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>;
//defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>;
-//defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
-//defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
+defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
+defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC
-defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32",
- [(set i32:$dst, (add (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))]
->;
-
-defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32",
- [(set i32:$dst, (sub i32:$src0, i32:$src1))]
->;
+// No patterns so that the scalar instructions are always selected.
+// The scalar versions will be replaced with vector when needed later.
+defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32", []>;
+defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32", []>;
defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", [], "V_SUB_I32">;
-let Uses = [VCC] in { // Carry-out comes from VCC
+let Uses = [VCC] in { // Carry-in comes from VCC
defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32", []>;
defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32", []>;
defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", [], "V_SUBB_U32">;
@@ -948,8 +1021,12 @@ let neverHasSideEffects = 1 in {
def V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>;
def V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32", []>;
-//def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24", []>;
-//def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24", []>;
+def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24",
+ [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))]
+>;
+def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24",
+ [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))]
+>;
} // End neverHasSideEffects
def V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>;
@@ -960,10 +1037,16 @@ def V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32", []>;
def V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32", []>;
def V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32", []>;
defm : BFIPatterns <V_BFI_B32>;
-def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32", []>;
-def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64", []>;
+def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32",
+ [(set f32:$dst, (fma f32:$src0, f32:$src1, f32:$src2))]
+>;
+def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64",
+ [(set f64:$dst, (fma f64:$src0, f64:$src1, f64:$src2))]
+>;
//def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>;
def V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>;
+def : ROTRPattern <V_ALIGNBIT_B32>;
+
def V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>;
def V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>;
@@ -982,13 +1065,36 @@ def V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>;
////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>;
def V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32", []>;
def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64", []>;
-def V_LSHL_B64 : VOP3_64 <0x00000161, "V_LSHL_B64", []>;
-def V_LSHR_B64 : VOP3_64 <0x00000162, "V_LSHR_B64", []>;
-def V_ASHR_I64 : VOP3_64 <0x00000163, "V_ASHR_I64", []>;
+
+def V_LSHL_B64 : VOP3_64_Shift <0x00000161, "V_LSHL_B64",
+ [(set i64:$dst, (shl i64:$src0, i32:$src1))]
+>;
+def V_LSHR_B64 : VOP3_64_Shift <0x00000162, "V_LSHR_B64",
+ [(set i64:$dst, (srl i64:$src0, i32:$src1))]
+>;
+def V_ASHR_I64 : VOP3_64_Shift <0x00000163, "V_ASHR_I64",
+ [(set i64:$dst, (sra i64:$src0, i32:$src1))]
+>;
+
+let isCommutable = 1 in {
+
def V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>;
def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>;
def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>;
def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>;
+
+} // isCommutable = 1
+
+def : Pat <
+ (fadd f64:$src0, f64:$src1),
+ (V_ADD_F64 $src0, $src1, (i64 0))
+>;
+
+def : Pat <
+ (fmul f64:$src0, f64:$src1),
+ (V_MUL_F64 $src0, $src1, (i64 0))
+>;
+
def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>;
let isCommutable = 1 in {
@@ -1023,12 +1129,31 @@ def V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64", []>;
//def V_QSAD_U8 : VOP3_U8 <0x00000172, "V_QSAD_U8", []>;
//def V_MQSAD_U8 : VOP3_U8 <0x00000173, "V_MQSAD_U8", []>;
def V_TRIG_PREOP_F64 : VOP3_64 <0x00000174, "V_TRIG_PREOP_F64", []>;
+
+let Defs = [SCC] in { // Carry out goes to SCC
+let isCommutable = 1 in {
def S_ADD_U32 : SOP2_32 <0x00000000, "S_ADD_U32", []>;
+def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32",
+ [(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))]
+>;
+} // End isCommutable = 1
+
def S_SUB_U32 : SOP2_32 <0x00000001, "S_SUB_U32", []>;
-def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32", []>;
-def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32", []>;
-def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32", []>;
-def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32", []>;
+def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32",
+ [(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))]
+>;
+
+let Uses = [SCC] in { // Carry in comes from SCC
+let isCommutable = 1 in {
+def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32",
+ [(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+} // End isCommutable = 1
+
+def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32",
+ [(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+} // End Uses = [SCC]
+} // End Defs = [SCC]
+
def S_MIN_I32 : SOP2_32 <0x00000006, "S_MIN_I32", []>;
def S_MIN_U32 : SOP2_32 <0x00000007, "S_MIN_U32", []>;
def S_MAX_I32 : SOP2_32 <0x00000008, "S_MAX_I32", []>;
@@ -1060,7 +1185,9 @@ def : Pat <
(S_OR_B64 $src0, $src1)
>;
def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32", []>;
-def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64", []>;
+def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64",
+ [(set i1:$dst, (xor i1:$src0, i1:$src1))]
+>;
def S_ANDN2_B32 : SOP2_32 <0x00000014, "S_ANDN2_B32", []>;
def S_ANDN2_B64 : SOP2_64 <0x00000015, "S_ANDN2_B64", []>;
def S_ORN2_B32 : SOP2_32 <0x00000016, "S_ORN2_B32", []>;
@@ -1071,12 +1198,31 @@ def S_NOR_B32 : SOP2_32 <0x0000001a, "S_NOR_B32", []>;
def S_NOR_B64 : SOP2_64 <0x0000001b, "S_NOR_B64", []>;
def S_XNOR_B32 : SOP2_32 <0x0000001c, "S_XNOR_B32", []>;
def S_XNOR_B64 : SOP2_64 <0x0000001d, "S_XNOR_B64", []>;
-def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32", []>;
-def S_LSHL_B64 : SOP2_64 <0x0000001f, "S_LSHL_B64", []>;
-def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32", []>;
-def S_LSHR_B64 : SOP2_64 <0x00000021, "S_LSHR_B64", []>;
-def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32", []>;
-def S_ASHR_I64 : SOP2_64 <0x00000023, "S_ASHR_I64", []>;
+
+// Use added complexity so these patterns are preferred to the VALU patterns.
+let AddedComplexity = 1 in {
+
+def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32",
+ [(set i32:$dst, (shl i32:$src0, i32:$src1))]
+>;
+def S_LSHL_B64 : SOP2_SHIFT_64 <0x0000001f, "S_LSHL_B64",
+ [(set i64:$dst, (shl i64:$src0, i32:$src1))]
+>;
+def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32",
+ [(set i32:$dst, (srl i32:$src0, i32:$src1))]
+>;
+def S_LSHR_B64 : SOP2_SHIFT_64 <0x00000021, "S_LSHR_B64",
+ [(set i64:$dst, (srl i64:$src0, i32:$src1))]
+>;
+def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32",
+ [(set i32:$dst, (sra i32:$src0, i32:$src1))]
+>;
+def S_ASHR_I64 : SOP2_SHIFT_64 <0x00000023, "S_ASHR_I64",
+ [(set i64:$dst, (sra i64:$src0, i32:$src1))]
+>;
+
+} // End AddedComplexity = 1
+
def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>;
def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>;
def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>;
@@ -1096,7 +1242,7 @@ def LOAD_CONST : AMDGPUShaderInst <
[(set GPRF32:$dst, (int_AMDGPU_load_const imm:$src))]
>;
-// SI Psuedo instructions. These are used by the CFG structurizer pass
+// SI pseudo instructions. These are used by the CFG structurizer pass
// and should be lowered to ISA instructions prior to codegen.
let mayLoad = 1, mayStore = 1, hasSideEffects = 1,
@@ -1169,6 +1315,36 @@ def SI_KILL : InstSI <
let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
+//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri64, ADDRIndirect>;
+
+let UseNamedOperandTable = 1 in {
+
+def SI_RegisterLoad : AMDGPUShaderInst <
+ (outs VReg_32:$dst, SReg_64:$temp),
+ (ins FRAMEri64:$addr, i32imm:$chan),
+ "", []
+> {
+ let isRegisterLoad = 1;
+ let mayLoad = 1;
+}
+
+class SIRegStore<dag outs> : AMDGPUShaderInst <
+ outs,
+ (ins VReg_32:$val, FRAMEri64:$addr, i32imm:$chan),
+ "", []
+> {
+ let isRegisterStore = 1;
+ let mayStore = 1;
+}
+
+let usesCustomInserter = 1 in {
+def SI_RegisterStorePseudo : SIRegStore<(outs)>;
+} // End usesCustomInserter = 1
+def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
+
+
+} // End UseNamedOperandTable = 1
+
def SI_INDIRECT_SRC : InstSI <
(outs VReg_32:$dst, SReg_64:$temp),
(ins unknown:$src, VSrc_32:$idx, i32imm:$off),
@@ -1185,6 +1361,7 @@ class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
let Constraints = "$src = $dst";
}
+def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VReg_32>;
def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
@@ -1192,6 +1369,25 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
} // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0]
+let usesCustomInserter = 1 in {
+
+// This pseudo instruction takes a pointer as input and outputs a resource
+// constant that can be used with the ADDR64 MUBUF instructions.
+def SI_ADDR64_RSRC : InstSI <
+ (outs SReg_128:$srsrc),
+ (ins SReg_64:$ptr),
+ "", []
+>;
+
+def V_SUB_F64 : InstSI <
+ (outs VReg_64:$dst),
+ (ins VReg_64:$src0, VReg_64:$src1),
+ "V_SUB_F64 $dst, $src0, $src1",
+ []
+>;
+
+} // end usesCustomInserter
+
} // end IsCodeGenOnly, isPseudo
def : Pat<
@@ -1206,10 +1402,8 @@ def : Pat <
/* int_SI_vs_load_input */
def : Pat<
- (int_SI_vs_load_input v16i8:$tlst, IMM12bit:$attr_offset,
- i32:$buf_idx_vgpr),
- (BUFFER_LOAD_FORMAT_XYZW imm:$attr_offset, 0, 1, 0, 0, 0,
- $buf_idx_vgpr, $tlst, 0, 0, 0)
+ (SIload_input i128:$tlst, IMM12bit:$attr_offset, i32:$buf_idx_vgpr),
+ (BUFFER_LOAD_FORMAT_XYZW_IDXEN $tlst, $buf_idx_vgpr, imm:$attr_offset)
>;
/* int_SI_export */
@@ -1220,66 +1414,94 @@ def : Pat <
$src0, $src1, $src2, $src3)
>;
+def : Pat <
+ (f64 (fsub f64:$src0, f64:$src1)),
+ (V_SUB_F64 $src0, $src1)
+>;
+
/********** ======================= **********/
/********** Image sampling patterns **********/
/********** ======================= **********/
-/* int_SI_sample for simple 1D texture lookup */
+/* SIsample for simple 1D texture lookup */
def : Pat <
- (int_SI_sample v1i32:$addr, v32i8:$rsrc, v16i8:$sampler, imm),
- (IMAGE_SAMPLE 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (SIsample i32:$addr, v32i8:$rsrc, i128:$sampler, imm),
+ (IMAGE_SAMPLE_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SamplePattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, imm),
+class SamplePattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, imm),
(opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleRectPattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_RECT),
+class SampleRectPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_RECT),
(opcode 0xf, 1, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleArrayPattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_ARRAY),
+class SampleArrayPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_ARRAY),
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleShadowPattern<Intrinsic name, MIMG opcode,
+class SampleShadowPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_SHADOW),
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_SHADOW),
(opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleShadowArrayPattern<Intrinsic name, MIMG opcode,
+class SampleShadowArrayPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_SHADOW_ARRAY),
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_SHADOW_ARRAY),
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-/* int_SI_sample* for texture lookups consuming more address parameters */
-multiclass SamplePatterns<ValueType addr_type> {
- def : SamplePattern <int_SI_sample, IMAGE_SAMPLE, addr_type>;
- def : SampleRectPattern <int_SI_sample, IMAGE_SAMPLE, addr_type>;
- def : SampleArrayPattern <int_SI_sample, IMAGE_SAMPLE, addr_type>;
- def : SampleShadowPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_type>;
- def : SampleShadowArrayPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_type>;
-
- def : SamplePattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_type>;
- def : SampleArrayPattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_type>;
- def : SampleShadowPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_type>;
- def : SampleShadowArrayPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_type>;
-
- def : SamplePattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>;
- def : SampleArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>;
- def : SampleShadowPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>;
- def : SampleShadowArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>;
+/* SIsample* for texture lookups consuming more address parameters */
+multiclass SamplePatterns<MIMG sample, MIMG sample_c, MIMG sample_l,
+ MIMG sample_c_l, MIMG sample_b, MIMG sample_c_b,
+MIMG sample_d, MIMG sample_c_d, ValueType addr_type> {
+ def : SamplePattern <SIsample, sample, addr_type>;
+ def : SampleRectPattern <SIsample, sample, addr_type>;
+ def : SampleArrayPattern <SIsample, sample, addr_type>;
+ def : SampleShadowPattern <SIsample, sample_c, addr_type>;
+ def : SampleShadowArrayPattern <SIsample, sample_c, addr_type>;
+
+ def : SamplePattern <SIsamplel, sample_l, addr_type>;
+ def : SampleArrayPattern <SIsamplel, sample_l, addr_type>;
+ def : SampleShadowPattern <SIsamplel, sample_c_l, addr_type>;
+ def : SampleShadowArrayPattern <SIsamplel, sample_c_l, addr_type>;
+
+ def : SamplePattern <SIsampleb, sample_b, addr_type>;
+ def : SampleArrayPattern <SIsampleb, sample_b, addr_type>;
+ def : SampleShadowPattern <SIsampleb, sample_c_b, addr_type>;
+ def : SampleShadowArrayPattern <SIsampleb, sample_c_b, addr_type>;
+
+ def : SamplePattern <SIsampled, sample_d, addr_type>;
+ def : SampleArrayPattern <SIsampled, sample_d, addr_type>;
+ def : SampleShadowPattern <SIsampled, sample_c_d, addr_type>;
+ def : SampleShadowArrayPattern <SIsampled, sample_c_d, addr_type>;
}
-defm : SamplePatterns<v2i32>;
-defm : SamplePatterns<v4i32>;
-defm : SamplePatterns<v8i32>;
-defm : SamplePatterns<v16i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V2, IMAGE_SAMPLE_C_V4_V2,
+ IMAGE_SAMPLE_L_V4_V2, IMAGE_SAMPLE_C_L_V4_V2,
+ IMAGE_SAMPLE_B_V4_V2, IMAGE_SAMPLE_C_B_V4_V2,
+ IMAGE_SAMPLE_D_V4_V2, IMAGE_SAMPLE_C_D_V4_V2,
+ v2i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V4, IMAGE_SAMPLE_C_V4_V4,
+ IMAGE_SAMPLE_L_V4_V4, IMAGE_SAMPLE_C_L_V4_V4,
+ IMAGE_SAMPLE_B_V4_V4, IMAGE_SAMPLE_C_B_V4_V4,
+ IMAGE_SAMPLE_D_V4_V4, IMAGE_SAMPLE_C_D_V4_V4,
+ v4i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V8, IMAGE_SAMPLE_C_V4_V8,
+ IMAGE_SAMPLE_L_V4_V8, IMAGE_SAMPLE_C_L_V4_V8,
+ IMAGE_SAMPLE_B_V4_V8, IMAGE_SAMPLE_C_B_V4_V8,
+ IMAGE_SAMPLE_D_V4_V8, IMAGE_SAMPLE_C_D_V4_V8,
+ v8i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V16, IMAGE_SAMPLE_C_V4_V16,
+ IMAGE_SAMPLE_L_V4_V16, IMAGE_SAMPLE_C_L_V4_V16,
+ IMAGE_SAMPLE_B_V4_V16, IMAGE_SAMPLE_C_B_V4_V16,
+ IMAGE_SAMPLE_D_V4_V16, IMAGE_SAMPLE_C_D_V4_V16,
+ v16i32>;
/* int_SI_imageload for texture fetches consuming varying address parameters */
class ImageLoadPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
@@ -1292,23 +1514,46 @@ class ImageLoadArrayPattern<Intrinsic name, MIMG opcode, ValueType addr_type> :
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
>;
-multiclass ImageLoadPatterns<ValueType addr_type> {
- def : ImageLoadPattern <int_SI_imageload, IMAGE_LOAD_MIP, addr_type>;
- def : ImageLoadArrayPattern <int_SI_imageload, IMAGE_LOAD_MIP, addr_type>;
+class ImageLoadMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
+ (name addr_type:$addr, v32i8:$rsrc, TEX_MSAA),
+ (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
+>;
+
+class ImageLoadArrayMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
+ (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY_MSAA),
+ (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
+>;
+
+multiclass ImageLoadPatterns<MIMG opcode, ValueType addr_type> {
+ def : ImageLoadPattern <int_SI_imageload, opcode, addr_type>;
+ def : ImageLoadArrayPattern <int_SI_imageload, opcode, addr_type>;
+}
+
+multiclass ImageLoadMSAAPatterns<MIMG opcode, ValueType addr_type> {
+ def : ImageLoadMSAAPattern <int_SI_imageload, opcode, addr_type>;
+ def : ImageLoadArrayMSAAPattern <int_SI_imageload, opcode, addr_type>;
}
-defm : ImageLoadPatterns<v2i32>;
-defm : ImageLoadPatterns<v4i32>;
+defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V2, v2i32>;
+defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V4, v4i32>;
+
+defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V2, v2i32>;
+defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V4, v4i32>;
/* Image resource information */
def : Pat <
(int_SI_resinfo i32:$mipid, v32i8:$rsrc, imm),
- (IMAGE_GET_RESINFO 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
+ (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
>;
def : Pat <
(int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY),
- (IMAGE_GET_RESINFO 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
+ (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
+>;
+
+def : Pat <
+ (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY_MSAA),
+ (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
>;
/********** ============================================ **********/
@@ -1379,22 +1624,30 @@ foreach Index = 0-15 in {
>;
}
-def : Vector1_Build <v1i32, i32, VReg_32>;
-def : Vector2_Build <v2i32, i32>;
-def : Vector2_Build <v2f32, f32>;
-def : Vector4_Build <v4i32, i32>;
-def : Vector4_Build <v4f32, f32>;
-def : Vector8_Build <v8i32, i32>;
-def : Vector8_Build <v8f32, f32>;
-def : Vector16_Build <v16i32, i32>;
-def : Vector16_Build <v16f32, f32>;
-
def : BitConvert <i32, f32, SReg_32>;
def : BitConvert <i32, f32, VReg_32>;
def : BitConvert <f32, i32, SReg_32>;
def : BitConvert <f32, i32, VReg_32>;
+def : BitConvert <i64, f64, VReg_64>;
+
+def : BitConvert <f64, i64, VReg_64>;
+
+def : BitConvert <v2f32, v2i32, VReg_64>;
+def : BitConvert <v2i32, v2f32, VReg_64>;
+def : BitConvert <v2i32, i64, VReg_64>;
+
+def : BitConvert <v4f32, v4i32, VReg_128>;
+def : BitConvert <v4i32, v4f32, VReg_128>;
+def : BitConvert <v4i32, i128, VReg_128>;
+def : BitConvert <i128, v4i32, VReg_128>;
+
+def : BitConvert <v8i32, v32i8, SReg_256>;
+def : BitConvert <v32i8, v8i32, SReg_256>;
+def : BitConvert <v8i32, v32i8, VReg_256>;
+def : BitConvert <v32i8, v8i32, VReg_256>;
+
/********** =================== **********/
/********** Src & Dst modifiers **********/
/********** =================== **********/
@@ -1422,6 +1675,16 @@ def : Pat <
/********** ================== **********/
def : Pat <
+ (SGPRImm<(i32 imm)>:$imm),
+ (S_MOV_B32 imm:$imm)
+>;
+
+def : Pat <
+ (SGPRImm<(f32 fpimm)>:$imm),
+ (S_MOV_B32 fpimm:$imm)
+>;
+
+def : Pat <
(i32 imm:$imm),
(V_MOV_B32_e32 imm:$imm)
>;
@@ -1449,6 +1712,13 @@ def : Pat <
(S_MOV_B32 (i32 (HI32 imm:$imm))), sub1)
>;
+def : Pat <
+ (f64 fpimm:$imm),
+ (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (V_MOV_B32_e32 (f32 (LO32f fpimm:$imm))), sub0),
+ (V_MOV_B32_e32 (f32 (HI32f fpimm:$imm))), sub1)
+>;
+
/********** ===================== **********/
/********** Interpolation Paterns **********/
/********** ===================== **********/
@@ -1483,6 +1753,11 @@ def : Pat<
(V_MUL_F32_e32 $src0, (V_RCP_F32_e32 $src1))
>;
+def : Pat<
+ (fdiv f64:$src0, f64:$src1),
+ (V_MUL_F64 $src0, (V_RCP_F64_e32 $src1), (i64 0))
+>;
+
def : Pat <
(fcos f32:$src0),
(V_COS_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV)))
@@ -1521,20 +1796,20 @@ def : Pat <
// 1. Offset as 8bit DWORD immediate
def : Pat <
- (int_SI_load_const v16i8:$sbase, IMM8bitDWORD:$offset),
+ (SIload_constant i128:$sbase, IMM8bitDWORD:$offset),
(S_BUFFER_LOAD_DWORD_IMM $sbase, IMM8bitDWORD:$offset)
>;
// 2. Offset loaded in an 32bit SGPR
def : Pat <
- (int_SI_load_const v16i8:$sbase, imm:$offset),
+ (SIload_constant i128:$sbase, imm:$offset),
(S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset))
>;
// 3. Offset in an 32Bit VGPR
def : Pat <
- (int_SI_load_const v16i8:$sbase, i32:$voff),
- (BUFFER_LOAD_DWORD 0, 1, 0, 0, 0, 0, $voff, $sbase, 0, 0, 0)
+ (SIload_constant i128:$sbase, i32:$voff),
+ (BUFFER_LOAD_DWORD_OFFEN $sbase, $voff)
>;
// The multiplication scales from [0,1] to the unsigned integer range
@@ -1545,6 +1820,12 @@ def : Pat <
(V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
>;
+def : Pat <
+ (int_SI_tid),
+ (V_MBCNT_HI_U32_B32_e32 0xffffffff,
+ (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0, 0, 0, 0, 0))
+>;
+
/********** ================== **********/
/********** VOP3 Patterns **********/
/********** ================== **********/
@@ -1554,6 +1835,40 @@ def : Pat <
(V_MAD_F32 $src0, $src1, $src2)
>;
+/********** ======================= **********/
+/********** Load/Store Patterns **********/
+/********** ======================= **********/
+
+class DSReadPat <DS inst, ValueType vt, PatFrag frag> : Pat <
+ (frag i32:$src0),
+ (vt (inst 0, $src0, $src0, $src0, 0, 0))
+>;
+
+def : DSReadPat <DS_READ_I8, i32, sextloadi8_local>;
+def : DSReadPat <DS_READ_U8, i32, az_extloadi8_local>;
+def : DSReadPat <DS_READ_I16, i32, sextloadi16_local>;
+def : DSReadPat <DS_READ_U16, i32, az_extloadi16_local>;
+def : DSReadPat <DS_READ_B32, i32, local_load>;
+def : Pat <
+ (local_load i32:$src0),
+ (i32 (DS_READ_B32 0, $src0, $src0, $src0, 0, 0))
+>;
+
+class DSWritePat <DS inst, ValueType vt, PatFrag frag> : Pat <
+ (frag i32:$src1, i32:$src0),
+ (inst 0, $src0, $src1, $src1, 0, 0)
+>;
+
+def : DSWritePat <DS_WRITE_B8, i32, truncstorei8_local>;
+def : DSWritePat <DS_WRITE_B16, i32, truncstorei16_local>;
+def : DSWritePat <DS_WRITE_B32, i32, local_store>;
+
+def : Pat <(atomic_load_add_local i32:$ptr, i32:$val),
+ (DS_ADD_U32_RTN 0, $ptr, $val, 0, 0)>;
+
+def : Pat <(atomic_load_sub_local i32:$ptr, i32:$val),
+ (DS_SUB_U32_RTN 0, $ptr, $val, 0, 0)>;
+
/********** ================== **********/
/********** SMRD Patterns **********/
/********** ================== **********/
@@ -1581,8 +1896,100 @@ multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v16i8>;
+defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, i64>;
+defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, i128>;
+defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
+defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
+
+//===----------------------------------------------------------------------===//
+// MUBUF Patterns
+//===----------------------------------------------------------------------===//
+
+multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
+ PatFrag global_ld, PatFrag constant_ld> {
+ def : Pat <
+ (vt (global_ld (add i64:$ptr, (i64 IMM12bit:$offset)))),
+ (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, (as_i16imm $offset))
+ >;
+
+ def : Pat <
+ (vt (global_ld i64:$ptr)),
+ (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, 0)
+ >;
+
+ def : Pat <
+ (vt (global_ld (add i64:$ptr, i64:$offset))),
+ (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ >;
+
+ def : Pat <
+ (vt (constant_ld (add i64:$ptr, i64:$offset))),
+ (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ >;
+}
+
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32,
+ sextloadi8_global, sextloadi8_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32,
+ az_extloadi8_global, az_extloadi8_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32,
+ sextloadi16_global, sextloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32,
+ az_extloadi16_global, az_extloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32,
+ global_load, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64,
+ global_load, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64,
+ az_extloadi32_global, az_extloadi32_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32,
+ global_load, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32,
+ global_load, constant_load>;
+
+multiclass MUBUFStore_Pattern <MUBUF Instr, ValueType vt, PatFrag st> {
+
+ def : Pat <
+ (st vt:$value, i64:$ptr),
+ (Instr $value, (SI_ADDR64_RSRC (i64 0)), $ptr, 0)
+ >;
+
+ def : Pat <
+ (st vt:$value, (add i64:$ptr, i64:$offset)),
+ (Instr $value, (SI_ADDR64_RSRC $ptr), $offset, 0)
+ >;
+}
+
+defm : MUBUFStore_Pattern <BUFFER_STORE_BYTE, i32, truncstorei8_global>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_SHORT, i32, truncstorei16_global>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORD, i32, global_store>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, i64, global_store>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, v2i32, global_store>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4, v4i32, global_store>;
+
+//===----------------------------------------------------------------------===//
+// MTBUF Patterns
+//===----------------------------------------------------------------------===//
+
+// TBUFFER_STORE_FORMAT_*, addr64=0
+class MTBUF_StoreResource <ValueType vt, int num_channels, MTBUF opcode> : Pat<
+ (SItbuffer_store i128:$rsrc, vt:$vdata, num_channels, i32:$vaddr,
+ i32:$soffset, imm:$inst_offset, imm:$dfmt,
+ imm:$nfmt, imm:$offen, imm:$idxen,
+ imm:$glc, imm:$slc, imm:$tfe),
+ (opcode
+ $vdata, (as_i16imm $inst_offset), (as_i1imm $offen), (as_i1imm $idxen),
+ (as_i1imm $glc), 0, (as_i8imm $dfmt), (as_i8imm $nfmt), $vaddr, $rsrc,
+ (as_i1imm $slc), (as_i1imm $tfe), $soffset)
+>;
+
+def : MTBUF_StoreResource <i32, 1, TBUFFER_STORE_FORMAT_X>;
+def : MTBUF_StoreResource <v2i32, 2, TBUFFER_STORE_FORMAT_XY>;
+def : MTBUF_StoreResource <v4i32, 3, TBUFFER_STORE_FORMAT_XYZ>;
+def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>;
/********** ====================== **********/
/********** Indirect adressing **********/
@@ -1592,25 +1999,25 @@ multiclass SI_INDIRECT_Pattern <ValueType vt, SI_INDIRECT_DST IndDst> {
// 1. Extract with offset
def : Pat<
- (vector_extract vt:$vec, (i64 (zext (add i32:$idx, imm:$off)))),
+ (vector_extract vt:$vec, (add i32:$idx, imm:$off)),
(f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, imm:$off))
>;
// 2. Extract without offset
def : Pat<
- (vector_extract vt:$vec, (i64 (zext i32:$idx))),
+ (vector_extract vt:$vec, i32:$idx),
(f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, 0))
>;
// 3. Insert with offset
def : Pat<
- (vector_insert vt:$vec, f32:$val, (i64 (zext (add i32:$idx, imm:$off)))),
+ (vector_insert vt:$vec, f32:$val, (add i32:$idx, imm:$off)),
(IndDst (IMPLICIT_DEF), $vec, $idx, imm:$off, $val)
>;
// 4. Insert without offset
def : Pat<
- (vector_insert vt:$vec, f32:$val, (i64 (zext i32:$idx))),
+ (vector_insert vt:$vec, f32:$val, i32:$idx),
(IndDst (IMPLICIT_DEF), $vec, $idx, 0, $val)
>;
}
@@ -1634,6 +2041,37 @@ def : Pat<
(V_CMP_U_F32_e64 $src0, $src1)
>;
+//===----------------------------------------------------------------------===//
+// Miscellaneous Patterns
+//===----------------------------------------------------------------------===//
+
+def : Pat <
+ (i64 (trunc i128:$x)),
+ (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (i32 (EXTRACT_SUBREG $x, sub0)), sub0),
+ (i32 (EXTRACT_SUBREG $x, sub1)), sub1)
+>;
+
+def : Pat <
+ (i32 (trunc i64:$a)),
+ (EXTRACT_SUBREG $a, sub0)
+>;
+
+// V_ADD_I32_e32/S_ADD_I32 produces carry in VCC/SCC. For the vector
+// case, the sgpr-copies pass will fix this to use the vector version.
+def : Pat <
+ (i32 (addc i32:$src0, i32:$src1)),
+ (S_ADD_I32 $src0, $src1)
+>;
+
+def : Pat <
+ (or i64:$a, i64:$b),
+ (INSERT_SUBREG
+ (INSERT_SUBREG (IMPLICIT_DEF),
+ (V_OR_B32_e32 (EXTRACT_SUBREG $a, sub0), (EXTRACT_SUBREG $b, sub0)), sub0),
+ (V_OR_B32_e32 (EXTRACT_SUBREG $a, sub1), (EXTRACT_SUBREG $b, sub1)), sub1)
+>;
+
//============================================================================//
// Miscellaneous Optimization Patterns
//============================================================================//
diff --git a/lib/Target/R600/SIIntrinsics.td b/lib/Target/R600/SIIntrinsics.td
index 224cd2f608c1..7fcc96452114 100644
--- a/lib/Target/R600/SIIntrinsics.td
+++ b/lib/Target/R600/SIIntrinsics.td
@@ -14,15 +14,35 @@
let TargetPrefix = "SI", isTarget = 1 in {
+ def int_SI_tid : Intrinsic <[llvm_i32_ty], [], [IntrNoMem]>;
def int_SI_packf16 : Intrinsic <[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_SI_export : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], []>;
- def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_v16i8_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]> ;
+ def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_anyint_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]> ;
- class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ // Fully-flexible TBUFFER_STORE_FORMAT_* except for the ADDR64 bit, which is not exposed
+ def int_SI_tbuffer_store : Intrinsic <
+ [],
+ [llvm_anyint_ty, // rsrc(SGPR)
+ llvm_anyint_ty, // vdata(VGPR), overloaded for types i32, v2i32, v4i32
+ llvm_i32_ty, // num_channels(imm), selects opcode suffix: 1=X, 2=XY, 3=XYZ, 4=XYZW
+ llvm_i32_ty, // vaddr(VGPR)
+ llvm_i32_ty, // soffset(SGPR)
+ llvm_i32_ty, // inst_offset(imm)
+ llvm_i32_ty, // dfmt(imm)
+ llvm_i32_ty, // nfmt(imm)
+ llvm_i32_ty, // offen(imm)
+ llvm_i32_ty, // idxen(imm)
+ llvm_i32_ty, // glc(imm)
+ llvm_i32_ty, // slc(imm)
+ llvm_i32_ty], // tfe(imm)
+ []>;
+
+ class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
def int_SI_sample : Sample;
def int_SI_sampleb : Sample;
+ def int_SI_sampled : Sample;
def int_SI_samplel : Sample;
def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp
index 2b60eb9fb375..958763dffc22 100644
--- a/lib/Target/R600/SILowerControlFlow.cpp
+++ b/lib/Target/R600/SILowerControlFlow.cpp
@@ -91,8 +91,7 @@ private:
public:
SILowerControlFlowPass(TargetMachine &tm) :
- MachineFunctionPass(ID), TRI(tm.getRegisterInfo()),
- TII(tm.getInstrInfo()) { }
+ MachineFunctionPass(ID), TRI(0), TII(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -378,10 +377,13 @@ void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
unsigned Dst = MI.getOperand(0).getReg();
unsigned Vec = MI.getOperand(2).getReg();
unsigned Off = MI.getOperand(4).getImm();
+ unsigned SubReg = TRI->getSubReg(Vec, AMDGPU::sub0);
+ if (!SubReg)
+ SubReg = Vec;
- MachineInstr *MovRel =
+ MachineInstr *MovRel =
BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
- .addReg(TRI->getSubReg(Vec, AMDGPU::sub0) + Off)
+ .addReg(SubReg + Off)
.addReg(AMDGPU::M0, RegState::Implicit)
.addReg(Vec, RegState::Implicit);
@@ -396,10 +398,13 @@ void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
unsigned Dst = MI.getOperand(0).getReg();
unsigned Off = MI.getOperand(4).getImm();
unsigned Val = MI.getOperand(5).getReg();
+ unsigned SubReg = TRI->getSubReg(Dst, AMDGPU::sub0);
+ if (!SubReg)
+ SubReg = Dst;
MachineInstr *MovRel =
BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
- .addReg(TRI->getSubReg(Dst, AMDGPU::sub0) + Off, RegState::Define)
+ .addReg(SubReg + Off, RegState::Define)
.addReg(Val)
.addReg(AMDGPU::M0, RegState::Implicit)
.addReg(Dst, RegState::Implicit);
@@ -408,8 +413,12 @@ void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
}
bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
bool HaveKill = false;
+ bool NeedM0 = false;
bool NeedWQM = false;
unsigned Depth = 0;
@@ -474,6 +483,7 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
IndirectSrc(MI);
break;
+ case AMDGPU::SI_INDIRECT_DST_V1:
case AMDGPU::SI_INDIRECT_DST_V2:
case AMDGPU::SI_INDIRECT_DST_V4:
case AMDGPU::SI_INDIRECT_DST_V8:
@@ -481,6 +491,14 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
IndirectDst(MI);
break;
+ case AMDGPU::DS_READ_B32:
+ NeedWQM = true;
+ // Fall through
+ case AMDGPU::DS_WRITE_B32:
+ case AMDGPU::DS_ADD_U32_RTN:
+ NeedM0 = true;
+ break;
+
case AMDGPU::V_INTERP_P1_F32:
case AMDGPU::V_INTERP_P2_F32:
case AMDGPU::V_INTERP_MOV_F32:
@@ -491,7 +509,15 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
}
}
- if (NeedWQM) {
+ if (NeedM0) {
+ MachineBasicBlock &MBB = MF.front();
+ // Initialize M0 to a value that won't cause LDS access to be discarded
+ // due to offset clamping
+ BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_MOV_B32),
+ AMDGPU::M0).addImm(0xffffffff);
+ }
+
+ if (NeedWQM && MFI->ShaderType != ShaderType::COMPUTE) {
MachineBasicBlock &MBB = MF.front();
BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
AMDGPU::EXEC).addReg(AMDGPU::EXEC);
diff --git a/lib/Target/R600/SIMachineFunctionInfo.cpp b/lib/Target/R600/SIMachineFunctionInfo.cpp
index ee0e30755f01..071f9fa43a16 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.cpp
+++ b/lib/Target/R600/SIMachineFunctionInfo.cpp
@@ -13,6 +13,10 @@
using namespace llvm;
+
+// Pin the vtable to this file.
+void SIMachineFunctionInfo::anchor() {}
+
SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
: AMDGPUMachineFunction(MF),
PSInputAddr(0) { }
diff --git a/lib/Target/R600/SIMachineFunctionInfo.h b/lib/Target/R600/SIMachineFunctionInfo.h
index 6da9f7f9a14d..2f1961cafdb4 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.h
+++ b/lib/Target/R600/SIMachineFunctionInfo.h
@@ -22,6 +22,7 @@ namespace llvm {
/// This class keeps track of the SPI_SP_INPUT_ADDR config register, which
/// tells the hardware which interpolation parameters to load.
class SIMachineFunctionInfo : public AMDGPUMachineFunction {
+ virtual void anchor();
public:
SIMachineFunctionInfo(const MachineFunction &MF);
unsigned PSInputAddr;
diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp
index 99278ae8dceb..ed0bbaffae60 100644
--- a/lib/Target/R600/SIRegisterInfo.cpp
+++ b/lib/Target/R600/SIRegisterInfo.cpp
@@ -15,18 +15,21 @@
#include "SIRegisterInfo.h"
#include "AMDGPUTargetMachine.h"
+#include "SIInstrInfo.h"
using namespace llvm;
-SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm,
- const TargetInstrInfo &tii)
-: AMDGPURegisterInfo(tm, tii),
- TM(tm),
- TII(tii)
+SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm)
+: AMDGPURegisterInfo(tm),
+ TM(tm)
{ }
BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ Reserved.set(AMDGPU::EXEC);
+ Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(TM.getInstrInfo());
+ TII->reserveIndirectRegisters(Reserved, MF);
return Reserved;
}
@@ -51,3 +54,78 @@ const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
case MVT::i32: return &AMDGPU::VReg_32RegClass;
}
}
+
+unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
+ return getEncodingValue(Reg);
+}
+
+const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
+ assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+
+ const TargetRegisterClass *BaseClasses[] = {
+ &AMDGPU::VReg_32RegClass,
+ &AMDGPU::SReg_32RegClass,
+ &AMDGPU::VReg_64RegClass,
+ &AMDGPU::SReg_64RegClass,
+ &AMDGPU::SReg_128RegClass,
+ &AMDGPU::SReg_256RegClass
+ };
+
+ for (unsigned i = 0, e = sizeof(BaseClasses) /
+ sizeof(const TargetRegisterClass*); i != e; ++i) {
+ if (BaseClasses[i]->contains(Reg)) {
+ return BaseClasses[i];
+ }
+ }
+ return NULL;
+}
+
+bool SIRegisterInfo::isSGPRClass(const TargetRegisterClass *RC) const {
+ if (!RC) {
+ return false;
+ }
+ return !hasVGPRs(RC);
+}
+
+bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
+ return getCommonSubClass(&AMDGPU::VReg_32RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
+}
+
+const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
+ const TargetRegisterClass *SRC) const {
+ if (hasVGPRs(SRC)) {
+ return SRC;
+ } else if (SRC == &AMDGPU::SCCRegRegClass) {
+ return &AMDGPU::VCCRegRegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
+ return &AMDGPU::VReg_32RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
+ return &AMDGPU::VReg_64RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
+ return &AMDGPU::VReg_128RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
+ return &AMDGPU::VReg_256RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
+ return &AMDGPU::VReg_512RegClass;
+ }
+ return NULL;
+}
+
+const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
+ const TargetRegisterClass *RC, unsigned SubIdx) const {
+ if (SubIdx == AMDGPU::NoSubRegister)
+ return RC;
+
+ // If this register has a sub-register, we can safely assume it is a 32-bit
+ // register, becuase all of SI's sub-registers are 32-bit.
+ if (isSGPRClass(RC)) {
+ return &AMDGPU::SGPR_32RegClass;
+ } else {
+ return &AMDGPU::VGPR_32RegClass;
+ }
+}
diff --git a/lib/Target/R600/SIRegisterInfo.h b/lib/Target/R600/SIRegisterInfo.h
index caec22841345..8148f7fa476a 100644
--- a/lib/Target/R600/SIRegisterInfo.h
+++ b/lib/Target/R600/SIRegisterInfo.h
@@ -21,13 +21,11 @@
namespace llvm {
class AMDGPUTargetMachine;
-class TargetInstrInfo;
struct SIRegisterInfo : public AMDGPURegisterInfo {
AMDGPUTargetMachine &TM;
- const TargetInstrInfo &TII;
- SIRegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
+ SIRegisterInfo(AMDGPUTargetMachine &tm);
virtual BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -43,6 +41,28 @@ struct SIRegisterInfo : public AMDGPURegisterInfo {
/// \brief get the register class of the specified type to use in the
/// CFGStructurizer
virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
+
+ virtual unsigned getHWRegIndex(unsigned Reg) const;
+
+ /// \brief Return the 'base' register class for this register.
+ /// e.g. SGPR0 => SReg_32, VGPR => VReg_32 SGPR0_SGPR1 -> SReg_32, etc.
+ const TargetRegisterClass *getPhysRegClass(unsigned Reg) const;
+
+ /// \returns true if this class contains only SGPR registers
+ bool isSGPRClass(const TargetRegisterClass *RC) const;
+
+ /// \returns true if this class contains VGPR registers.
+ bool hasVGPRs(const TargetRegisterClass *RC) const;
+
+ /// \returns A VGPR reg class with the same width as \p SRC
+ const TargetRegisterClass *getEquivalentVGPRClass(
+ const TargetRegisterClass *SRC) const;
+
+ /// \returns The register class that is used for a sub-register of \p RC for
+ /// the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC will
+ /// be returned.
+ const TargetRegisterClass *getSubRegClass(const TargetRegisterClass *RC,
+ unsigned SubIdx) const;
};
} // End namespace llvm
diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td
index 244d4c00348d..49bdbc9fdbfa 100644
--- a/lib/Target/R600/SIRegisterInfo.td
+++ b/lib/Target/R600/SIRegisterInfo.td
@@ -43,7 +43,7 @@ def SGPR_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add (sequence "SGPR%u", 0, 101))>;
// SGPR 64-bit registers
-def SGPR_64 : RegisterTuples<[sub0, sub1],
+def SGPR_64Regs : RegisterTuples<[sub0, sub1],
[(add (decimate (trunc SGPR_32, 101), 2)),
(add (decimate (shl SGPR_32, 1), 2))]>;
@@ -153,15 +153,17 @@ def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add SGPR_32, M0Reg)
>;
-def SReg_64 : RegisterClass<"AMDGPU", [i64, i1], 64,
- (add SGPR_64, VCCReg, EXECReg)
+def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64], 64, (add SGPR_64Regs)>;
+
+def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, i1], 64,
+ (add SGPR_64Regs, VCCReg, EXECReg)
>;
-def SReg_128 : RegisterClass<"AMDGPU", [v16i8, i128], 128, (add SGPR_128)>;
+def SReg_128 : RegisterClass<"AMDGPU", [i128, v4i32], 128, (add SGPR_128)>;
-def SReg_256 : RegisterClass<"AMDGPU", [v32i8], 256, (add SGPR_256)>;
+def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add SGPR_256)>;
-def SReg_512 : RegisterClass<"AMDGPU", [v64i8], 512, (add SGPR_512)>;
+def SReg_512 : RegisterClass<"AMDGPU", [v64i8, v16i32], 512, (add SGPR_512)>;
// Register class for all vector registers (VGPRs + Interploation Registers)
def VReg_32 : RegisterClass<"AMDGPU", [i32, f32, v1i32], 32, (add VGPR_32)>;
@@ -172,9 +174,9 @@ def VReg_96 : RegisterClass<"AMDGPU", [untyped], 96, (add VGPR_96)> {
let Size = 96;
}
-def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32], 128, (add VGPR_128)>;
+def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, i128], 128, (add VGPR_128)>;
-def VReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32], 256, (add VGPR_256)>;
+def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add VGPR_256)>;
def VReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 512, (add VGPR_512)>;
diff --git a/lib/Target/R600/SITypeRewriter.cpp b/lib/Target/R600/SITypeRewriter.cpp
new file mode 100644
index 000000000000..f194d8b56dc6
--- /dev/null
+++ b/lib/Target/R600/SITypeRewriter.cpp
@@ -0,0 +1,162 @@
+//===-- SITypeRewriter.cpp - Remove unwanted types ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass removes performs the following type substitution on all
+/// non-compute shaders:
+///
+/// v16i8 => i128
+/// - v16i8 is used for constant memory resource descriptors. This type is
+/// legal for some compute APIs, and we don't want to declare it as legal
+/// in the backend, because we want the legalizer to expand all v16i8
+/// operations.
+/// v1* => *
+/// - Having v1* types complicates the legalizer and we can easily replace
+/// - them with the element type.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/InstVisitor.h"
+
+using namespace llvm;
+
+namespace {
+
+class SITypeRewriter : public FunctionPass,
+ public InstVisitor<SITypeRewriter> {
+
+ static char ID;
+ Module *Mod;
+ Type *v16i8;
+ Type *i128;
+
+public:
+ SITypeRewriter() : FunctionPass(ID) { }
+ virtual bool doInitialization(Module &M);
+ virtual bool runOnFunction(Function &F);
+ virtual const char *getPassName() const {
+ return "SI Type Rewriter";
+ }
+ void visitLoadInst(LoadInst &I);
+ void visitCallInst(CallInst &I);
+ void visitBitCast(BitCastInst &I);
+};
+
+} // End anonymous namespace
+
+char SITypeRewriter::ID = 0;
+
+bool SITypeRewriter::doInitialization(Module &M) {
+ Mod = &M;
+ v16i8 = VectorType::get(Type::getInt8Ty(M.getContext()), 16);
+ i128 = Type::getIntNTy(M.getContext(), 128);
+ return false;
+}
+
+bool SITypeRewriter::runOnFunction(Function &F) {
+ AttributeSet Set = F.getAttributes();
+ Attribute A = Set.getAttribute(AttributeSet::FunctionIndex, "ShaderType");
+
+ unsigned ShaderType = ShaderType::COMPUTE;
+ if (A.isStringAttribute()) {
+ StringRef Str = A.getValueAsString();
+ Str.getAsInteger(0, ShaderType);
+ }
+ if (ShaderType != ShaderType::COMPUTE) {
+ visit(F);
+ }
+
+ visit(F);
+
+ return false;
+}
+
+void SITypeRewriter::visitLoadInst(LoadInst &I) {
+ Value *Ptr = I.getPointerOperand();
+ Type *PtrTy = Ptr->getType();
+ Type *ElemTy = PtrTy->getPointerElementType();
+ IRBuilder<> Builder(&I);
+ if (ElemTy == v16i8) {
+ Value *BitCast = Builder.CreateBitCast(Ptr, Type::getIntNPtrTy(I.getContext(), 128, 2));
+ LoadInst *Load = Builder.CreateLoad(BitCast);
+ SmallVector <std::pair<unsigned, MDNode*>, 8> MD;
+ I.getAllMetadataOtherThanDebugLoc(MD);
+ for (unsigned i = 0, e = MD.size(); i != e; ++i) {
+ Load->setMetadata(MD[i].first, MD[i].second);
+ }
+ Value *BitCastLoad = Builder.CreateBitCast(Load, I.getType());
+ I.replaceAllUsesWith(BitCastLoad);
+ I.eraseFromParent();
+ }
+}
+
+void SITypeRewriter::visitCallInst(CallInst &I) {
+ IRBuilder<> Builder(&I);
+ SmallVector <Value*, 8> Args;
+ SmallVector <Type*, 8> Types;
+ bool NeedToReplace = false;
+ Function *F = I.getCalledFunction();
+ std::string Name = F->getName().str();
+ for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
+ Value *Arg = I.getArgOperand(i);
+ if (Arg->getType() == v16i8) {
+ Args.push_back(Builder.CreateBitCast(Arg, i128));
+ Types.push_back(i128);
+ NeedToReplace = true;
+ Name = Name + ".i128";
+ } else if (Arg->getType()->isVectorTy() &&
+ Arg->getType()->getVectorNumElements() == 1 &&
+ Arg->getType()->getVectorElementType() ==
+ Type::getInt32Ty(I.getContext())){
+ Type *ElementTy = Arg->getType()->getVectorElementType();
+ std::string TypeName = "i32";
+ InsertElementInst *Def = dyn_cast<InsertElementInst>(Arg);
+ assert(Def);
+ Args.push_back(Def->getOperand(1));
+ Types.push_back(ElementTy);
+ std::string VecTypeName = "v1" + TypeName;
+ Name = Name.replace(Name.find(VecTypeName), VecTypeName.length(), TypeName);
+ NeedToReplace = true;
+ } else {
+ Args.push_back(Arg);
+ Types.push_back(Arg->getType());
+ }
+ }
+
+ if (!NeedToReplace) {
+ return;
+ }
+ Function *NewF = Mod->getFunction(Name);
+ if (!NewF) {
+ NewF = Function::Create(FunctionType::get(F->getReturnType(), Types, false), GlobalValue::ExternalLinkage, Name, Mod);
+ NewF->setAttributes(F->getAttributes());
+ }
+ I.replaceAllUsesWith(Builder.CreateCall(NewF, Args));
+ I.eraseFromParent();
+}
+
+void SITypeRewriter::visitBitCast(BitCastInst &I) {
+ IRBuilder<> Builder(&I);
+ if (I.getDestTy() != i128) {
+ return;
+ }
+
+ if (BitCastInst *Op = dyn_cast<BitCastInst>(I.getOperand(0))) {
+ if (Op->getSrcTy() == i128) {
+ I.replaceAllUsesWith(Op->getOperand(0));
+ I.eraseFromParent();
+ }
+ }
+}
+
+FunctionPass *llvm::createSITypeRewriter() {
+ return new SITypeRewriter();
+}
diff --git a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
index 46b1f18c6263..f437564f4b84 100644
--- a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
+++ b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
diff --git a/lib/Target/Sparc/CMakeLists.txt b/lib/Target/Sparc/CMakeLists.txt
index efb10db4c0a7..6339394eab65 100644
--- a/lib/Target/Sparc/CMakeLists.txt
+++ b/lib/Target/Sparc/CMakeLists.txt
@@ -2,6 +2,7 @@ set(LLVM_TARGET_DEFINITIONS Sparc.td)
tablegen(LLVM SparcGenRegisterInfo.inc -gen-register-info)
tablegen(LLVM SparcGenInstrInfo.inc -gen-instr-info)
+tablegen(LLVM SparcGenCodeEmitter.inc -gen-emitter)
tablegen(LLVM SparcGenAsmWriter.inc -gen-asm-writer)
tablegen(LLVM SparcGenDAGISel.inc -gen-dag-isel)
tablegen(LLVM SparcGenSubtargetInfo.inc -gen-subtarget)
@@ -10,7 +11,6 @@ add_public_tablegen_target(SparcCommonTableGen)
add_llvm_target(SparcCodeGen
DelaySlotFiller.cpp
- FPMover.cpp
SparcAsmPrinter.cpp
SparcInstrInfo.cpp
SparcISelDAGToDAG.cpp
@@ -21,9 +21,11 @@ add_llvm_target(SparcCodeGen
SparcSubtarget.cpp
SparcTargetMachine.cpp
SparcSelectionDAGInfo.cpp
+ SparcJITInfo.cpp
+ SparcCodeEmitter.cpp
)
-add_dependencies(LLVMSparcCodeGen intrinsics_gen)
+add_dependencies(LLVMSparcCodeGen SparcCommonTableGen intrinsics_gen)
add_subdirectory(TargetInfo)
add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/Sparc/DelaySlotFiller.cpp b/lib/Target/Sparc/DelaySlotFiller.cpp
index 6123773d5f4b..9a0466aa692d 100644
--- a/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -14,6 +14,7 @@
#define DEBUG_TYPE "delay-slot-filler"
#include "Sparc.h"
+#include "SparcSubtarget.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -39,11 +40,13 @@ namespace {
/// layout, etc.
///
TargetMachine &TM;
- const TargetInstrInfo *TII;
+ const SparcSubtarget *Subtarget;
static char ID;
- Filler(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
+ Filler(TargetMachine &tm)
+ : MachineFunctionPass(ID), TM(tm),
+ Subtarget(&TM.getSubtarget<SparcSubtarget>()) {
+ }
virtual const char *getPassName() const {
return "SPARC Delay Slot Filler";
@@ -61,8 +64,9 @@ namespace {
bool isDelayFiller(MachineBasicBlock &MBB,
MachineBasicBlock::iterator candidate);
- void insertCallUses(MachineBasicBlock::iterator MI,
- SmallSet<unsigned, 32>& RegUses);
+ void insertCallDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses);
void insertDefsUses(MachineBasicBlock::iterator MI,
SmallSet<unsigned, 32>& RegDefs,
@@ -81,6 +85,9 @@ namespace {
bool needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize);
+ bool tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI);
+
};
char Filler::ID = 0;
} // end of anonymous namespace
@@ -99,29 +106,54 @@ FunctionPass *llvm::createSparcDelaySlotFillerPass(TargetMachine &tm) {
bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
- if (I->hasDelaySlot()) {
- MachineBasicBlock::iterator D = MBB.end();
- MachineBasicBlock::iterator J = I;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+
+ for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
+ MachineBasicBlock::iterator MI = I;
+ ++I;
- if (!DisableDelaySlotFiller)
- D = findDelayInstr(MBB, I);
+ // If MI is restore, try combining it with previous inst.
+ if (!DisableDelaySlotFiller &&
+ (MI->getOpcode() == SP::RESTORErr
+ || MI->getOpcode() == SP::RESTOREri)) {
+ Changed |= tryCombineRestoreWithPrevInst(MBB, MI);
+ continue;
+ }
- ++FilledSlots;
+ if (!Subtarget->isV9() &&
+ (MI->getOpcode() == SP::FCMPS || MI->getOpcode() == SP::FCMPD
+ || MI->getOpcode() == SP::FCMPQ)) {
+ BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
Changed = true;
+ continue;
+ }
+
+ // If MI has no delay slot, skip.
+ if (!MI->hasDelaySlot())
+ continue;
+
+ MachineBasicBlock::iterator D = MBB.end();
+
+ if (!DisableDelaySlotFiller)
+ D = findDelayInstr(MBB, MI);
+
+ ++FilledSlots;
+ Changed = true;
+
+ if (D == MBB.end())
+ BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
+ else
+ MBB.splice(I, &MBB, D);
- if (D == MBB.end())
- BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(SP::NOP));
- else
- MBB.splice(++J, &MBB, D);
- unsigned structSize = 0;
- if (needsUnimp(I, structSize)) {
- MachineBasicBlock::iterator J = I;
- ++J; //skip the delay filler.
- BuildMI(MBB, ++J, I->getDebugLoc(),
- TII->get(SP::UNIMP)).addImm(structSize);
- }
+ unsigned structSize = 0;
+ if (needsUnimp(MI, structSize)) {
+ MachineBasicBlock::iterator J = MI;
+ ++J; // skip the delay filler.
+ assert (J != MBB.end() && "MI needs a delay instruction.");
+ BuildMI(MBB, ++J, MI->getDebugLoc(),
+ TII->get(SP::UNIMP)).addImm(structSize);
}
+ }
return Changed;
}
@@ -134,28 +166,34 @@ Filler::findDelayInstr(MachineBasicBlock &MBB,
bool sawLoad = false;
bool sawStore = false;
- MachineBasicBlock::iterator I = slot;
+ if (slot == MBB.begin())
+ return MBB.end();
- if (slot->getOpcode() == SP::RET)
+ if (slot->getOpcode() == SP::RET || slot->getOpcode() == SP::TLS_CALL)
return MBB.end();
if (slot->getOpcode() == SP::RETL) {
- --I;
- if (I->getOpcode() != SP::RESTORErr)
- return MBB.end();
- //change retl to ret
- slot->setDesc(TII->get(SP::RET));
- return I;
+ MachineBasicBlock::iterator J = slot;
+ --J;
+
+ if (J->getOpcode() == SP::RESTORErr
+ || J->getOpcode() == SP::RESTOREri) {
+ // change retl to ret.
+ slot->setDesc(TM.getInstrInfo()->get(SP::RET));
+ return J;
+ }
}
- //Call's delay filler can def some of call's uses.
+ // Call's delay filler can def some of call's uses.
if (slot->isCall())
- insertCallUses(slot, RegUses);
+ insertCallDefsUses(slot, RegDefs, RegUses);
else
insertDefsUses(slot, RegDefs, RegUses);
bool done = false;
+ MachineBasicBlock::iterator I = slot;
+
while (!done) {
done = (I == MBB.begin());
@@ -216,12 +254,12 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
unsigned Reg = MO.getReg();
if (MO.isDef()) {
- //check whether Reg is defined or used before delay slot.
+ // check whether Reg is defined or used before delay slot.
if (IsRegInSet(RegDefs, Reg) || IsRegInSet(RegUses, Reg))
return true;
}
if (MO.isUse()) {
- //check whether Reg is defined before delay slot.
+ // check whether Reg is defined before delay slot.
if (IsRegInSet(RegDefs, Reg))
return true;
}
@@ -230,9 +268,12 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
}
-void Filler::insertCallUses(MachineBasicBlock::iterator MI,
- SmallSet<unsigned, 32>& RegUses)
+void Filler::insertCallDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses)
{
+ // Call defines o7, which is visible to the instruction in delay slot.
+ RegDefs.insert(SP::O7);
switch(MI->getOpcode()) {
default: llvm_unreachable("Unknown opcode.");
@@ -255,7 +296,7 @@ void Filler::insertCallUses(MachineBasicBlock::iterator MI,
}
}
-//Insert Defs and Uses of MI into the sets RegDefs and RegUses.
+// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
SmallSet<unsigned, 32>& RegDefs,
SmallSet<unsigned, 32>& RegUses)
@@ -270,13 +311,17 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
continue;
if (MO.isDef())
RegDefs.insert(Reg);
- if (MO.isUse())
+ if (MO.isUse()) {
+ // Implicit register uses of retl are return values and
+ // retl does not use them.
+ if (MO.isImplicit() && MI->getOpcode() == SP::RETL)
+ continue;
RegUses.insert(Reg);
-
+ }
}
}
-//returns true if the Reg or its alias is in the RegSet.
+// returns true if the Reg or its alias is in the RegSet.
bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
{
// Check Reg and all aliased Registers.
@@ -310,6 +355,7 @@ bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
case SP::CALL: structSizeOpNum = 1; break;
case SP::JMPLrr:
case SP::JMPLri: structSizeOpNum = 2; break;
+ case SP::TLS_CALL: return false;
}
const MachineOperand &MO = I->getOperand(structSizeOpNum);
@@ -318,3 +364,142 @@ bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
StructSize = MO.getImm();
return true;
}
+
+static bool combineRestoreADD(MachineBasicBlock::iterator RestoreMI,
+ MachineBasicBlock::iterator AddMI,
+ const TargetInstrInfo *TII)
+{
+ // Before: add <op0>, <op1>, %i[0-7]
+ // restore %g0, %g0, %i[0-7]
+ //
+ // After : restore <op0>, <op1>, %o[0-7]
+
+ unsigned reg = AddMI->getOperand(0).getReg();
+ if (reg < SP::I0 || reg > SP::I7)
+ return false;
+
+ // Erase RESTORE.
+ RestoreMI->eraseFromParent();
+
+ // Change ADD to RESTORE.
+ AddMI->setDesc(TII->get((AddMI->getOpcode() == SP::ADDrr)
+ ? SP::RESTORErr
+ : SP::RESTOREri));
+
+ // Map the destination register.
+ AddMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
+
+ return true;
+}
+
+static bool combineRestoreOR(MachineBasicBlock::iterator RestoreMI,
+ MachineBasicBlock::iterator OrMI,
+ const TargetInstrInfo *TII)
+{
+ // Before: or <op0>, <op1>, %i[0-7]
+ // restore %g0, %g0, %i[0-7]
+ // and <op0> or <op1> is zero,
+ //
+ // After : restore <op0>, <op1>, %o[0-7]
+
+ unsigned reg = OrMI->getOperand(0).getReg();
+ if (reg < SP::I0 || reg > SP::I7)
+ return false;
+
+ // check whether it is a copy.
+ if (OrMI->getOpcode() == SP::ORrr
+ && OrMI->getOperand(1).getReg() != SP::G0
+ && OrMI->getOperand(2).getReg() != SP::G0)
+ return false;
+
+ if (OrMI->getOpcode() == SP::ORri
+ && OrMI->getOperand(1).getReg() != SP::G0
+ && (!OrMI->getOperand(2).isImm() || OrMI->getOperand(2).getImm() != 0))
+ return false;
+
+ // Erase RESTORE.
+ RestoreMI->eraseFromParent();
+
+ // Change OR to RESTORE.
+ OrMI->setDesc(TII->get((OrMI->getOpcode() == SP::ORrr)
+ ? SP::RESTORErr
+ : SP::RESTOREri));
+
+ // Map the destination register.
+ OrMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
+
+ return true;
+}
+
+static bool combineRestoreSETHIi(MachineBasicBlock::iterator RestoreMI,
+ MachineBasicBlock::iterator SetHiMI,
+ const TargetInstrInfo *TII)
+{
+ // Before: sethi imm3, %i[0-7]
+ // restore %g0, %g0, %g0
+ //
+ // After : restore %g0, (imm3<<10), %o[0-7]
+
+ unsigned reg = SetHiMI->getOperand(0).getReg();
+ if (reg < SP::I0 || reg > SP::I7)
+ return false;
+
+ if (!SetHiMI->getOperand(1).isImm())
+ return false;
+
+ int64_t imm = SetHiMI->getOperand(1).getImm();
+
+ // Is it a 3 bit immediate?
+ if (!isInt<3>(imm))
+ return false;
+
+ // Make it a 13 bit immediate.
+ imm = (imm << 10) & 0x1FFF;
+
+ assert(RestoreMI->getOpcode() == SP::RESTORErr);
+
+ RestoreMI->setDesc(TII->get(SP::RESTOREri));
+
+ RestoreMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
+ RestoreMI->getOperand(1).setReg(SP::G0);
+ RestoreMI->getOperand(2).ChangeToImmediate(imm);
+
+
+ // Erase the original SETHI.
+ SetHiMI->eraseFromParent();
+
+ return true;
+}
+
+bool Filler::tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI)
+{
+ // No previous instruction.
+ if (MBBI == MBB.begin())
+ return false;
+
+ // assert that MBBI is a "restore %g0, %g0, %g0".
+ assert(MBBI->getOpcode() == SP::RESTORErr
+ && MBBI->getOperand(0).getReg() == SP::G0
+ && MBBI->getOperand(1).getReg() == SP::G0
+ && MBBI->getOperand(2).getReg() == SP::G0);
+
+ MachineBasicBlock::iterator PrevInst = MBBI; --PrevInst;
+
+ // It cannot combine with a delay filler.
+ if (isDelayFiller(MBB, PrevInst))
+ return false;
+
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+
+ switch (PrevInst->getOpcode()) {
+ default: break;
+ case SP::ADDrr:
+ case SP::ADDri: return combineRestoreADD(MBBI, PrevInst, TII); break;
+ case SP::ORrr:
+ case SP::ORri: return combineRestoreOR(MBBI, PrevInst, TII); break;
+ case SP::SETHIi: return combineRestoreSETHIi(MBBI, PrevInst, TII); break;
+ }
+ // It cannot combine with the previous instruction.
+ return false;
+}
diff --git a/lib/Target/Sparc/FPMover.cpp b/lib/Target/Sparc/FPMover.cpp
deleted file mode 100644
index 1325b98cf0ee..000000000000
--- a/lib/Target/Sparc/FPMover.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- FPMover.cpp - Sparc double-precision floating point move fixer ----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Expand FpMOVD/FpABSD/FpNEGD instructions into their single-precision pieces.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "fpmover"
-#include "Sparc.h"
-#include "SparcSubtarget.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-using namespace llvm;
-
-STATISTIC(NumFpDs , "Number of instructions translated");
-STATISTIC(NoopFpDs, "Number of noop instructions removed");
-
-namespace {
- struct FPMover : public MachineFunctionPass {
- /// Target machine description which we query for reg. names, data
- /// layout, etc.
- ///
- TargetMachine &TM;
-
- static char ID;
- explicit FPMover(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm) { }
-
- virtual const char *getPassName() const {
- return "Sparc Double-FP Move Fixer";
- }
-
- bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
- bool runOnMachineFunction(MachineFunction &F);
- };
- char FPMover::ID = 0;
-} // end of anonymous namespace
-
-/// createSparcFPMoverPass - Returns a pass that turns FpMOVD
-/// instructions into FMOVS instructions
-///
-FunctionPass *llvm::createSparcFPMoverPass(TargetMachine &tm) {
- return new FPMover(tm);
-}
-
-/// getDoubleRegPair - Given a DFP register, return the even and odd FP
-/// registers that correspond to it.
-static void getDoubleRegPair(unsigned DoubleReg, unsigned &EvenReg,
- unsigned &OddReg) {
- static const uint16_t EvenHalvesOfPairs[] = {
- SP::F0, SP::F2, SP::F4, SP::F6, SP::F8, SP::F10, SP::F12, SP::F14,
- SP::F16, SP::F18, SP::F20, SP::F22, SP::F24, SP::F26, SP::F28, SP::F30
- };
- static const uint16_t OddHalvesOfPairs[] = {
- SP::F1, SP::F3, SP::F5, SP::F7, SP::F9, SP::F11, SP::F13, SP::F15,
- SP::F17, SP::F19, SP::F21, SP::F23, SP::F25, SP::F27, SP::F29, SP::F31
- };
- static const uint16_t DoubleRegsInOrder[] = {
- SP::D0, SP::D1, SP::D2, SP::D3, SP::D4, SP::D5, SP::D6, SP::D7, SP::D8,
- SP::D9, SP::D10, SP::D11, SP::D12, SP::D13, SP::D14, SP::D15
- };
- for (unsigned i = 0; i < array_lengthof(DoubleRegsInOrder); ++i)
- if (DoubleRegsInOrder[i] == DoubleReg) {
- EvenReg = EvenHalvesOfPairs[i];
- OddReg = OddHalvesOfPairs[i];
- return;
- }
- llvm_unreachable("Can't find reg");
-}
-
-/// runOnMachineBasicBlock - Fixup FpMOVD instructions in this MBB.
-///
-bool FPMover::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
- bool Changed = false;
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
- MachineInstr *MI = I++;
- DebugLoc dl = MI->getDebugLoc();
- if (MI->getOpcode() == SP::FpMOVD || MI->getOpcode() == SP::FpABSD ||
- MI->getOpcode() == SP::FpNEGD) {
- Changed = true;
- unsigned DestDReg = MI->getOperand(0).getReg();
- unsigned SrcDReg = MI->getOperand(1).getReg();
- if (DestDReg == SrcDReg && MI->getOpcode() == SP::FpMOVD) {
- MBB.erase(MI); // Eliminate the noop copy.
- ++NoopFpDs;
- continue;
- }
-
- unsigned EvenSrcReg = 0, OddSrcReg = 0, EvenDestReg = 0, OddDestReg = 0;
- getDoubleRegPair(DestDReg, EvenDestReg, OddDestReg);
- getDoubleRegPair(SrcDReg, EvenSrcReg, OddSrcReg);
-
- const TargetInstrInfo *TII = TM.getInstrInfo();
- if (MI->getOpcode() == SP::FpMOVD)
- MI->setDesc(TII->get(SP::FMOVS));
- else if (MI->getOpcode() == SP::FpNEGD)
- MI->setDesc(TII->get(SP::FNEGS));
- else if (MI->getOpcode() == SP::FpABSD)
- MI->setDesc(TII->get(SP::FABSS));
- else
- llvm_unreachable("Unknown opcode!");
-
- MI->getOperand(0).setReg(EvenDestReg);
- MI->getOperand(1).setReg(EvenSrcReg);
- DEBUG(errs() << "FPMover: the modified instr is: " << *MI);
- // Insert copy for the other half of the double.
- if (DestDReg != SrcDReg) {
- MI = BuildMI(MBB, I, dl, TM.getInstrInfo()->get(SP::FMOVS), OddDestReg)
- .addReg(OddSrcReg);
- DEBUG(errs() << "FPMover: the inserted instr is: " << *MI);
- }
- ++NumFpDs;
- }
- }
- return Changed;
-}
-
-bool FPMover::runOnMachineFunction(MachineFunction &F) {
- // If the target has V9 instructions, the fp-mover pseudos will never be
- // emitted. Avoid a scan of the instructions to improve compile time.
- if (TM.getSubtarget<SparcSubtarget>().isV9())
- return false;
-
- bool Changed = false;
- for (MachineFunction::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI)
- Changed |= runOnMachineBasicBlock(*FI);
- return Changed;
-}
diff --git a/lib/Target/Sparc/LLVMBuild.txt b/lib/Target/Sparc/LLVMBuild.txt
index fe20d2f4bd15..fd8e5d9fd8fe 100644
--- a/lib/Target/Sparc/LLVMBuild.txt
+++ b/lib/Target/Sparc/LLVMBuild.txt
@@ -23,10 +23,12 @@ type = TargetGroup
name = Sparc
parent = Target
has_asmprinter = 1
+has_jit = 1
[component_1]
type = Library
name = SparcCodeGen
parent = Sparc
-required_libraries = AsmPrinter CodeGen Core MC SelectionDAG SparcDesc SparcInfo Support Target
+required_libraries = AsmPrinter CodeGen Core MC SelectionDAG SparcDesc
+ SparcInfo Support Target
add_to_library_groups = Sparc
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
index aac0e8d74a85..f3caeaa0c23a 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
+++ b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
@@ -53,7 +53,27 @@ enum TOF {
// Extract bits 41-32 of an address.
// Assembler: %hm(addr)
- MO_HM
+ MO_HM,
+
+ // TargetFlags for Thread Local Storage.
+ MO_TLS_GD_HI22,
+ MO_TLS_GD_LO10,
+ MO_TLS_GD_ADD,
+ MO_TLS_GD_CALL,
+ MO_TLS_LDM_HI22,
+ MO_TLS_LDM_LO10,
+ MO_TLS_LDM_ADD,
+ MO_TLS_LDM_CALL,
+ MO_TLS_LDO_HIX22,
+ MO_TLS_LDO_LOX10,
+ MO_TLS_LDO_ADD,
+ MO_TLS_IE_HI22,
+ MO_TLS_IE_LO10,
+ MO_TLS_IE_LD,
+ MO_TLS_IE_LDX,
+ MO_TLS_IE_ADD,
+ MO_TLS_LE_HIX22,
+ MO_TLS_LE_LOX10
};
} // end namespace SPII
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
index 3d4bfdcd5e6d..baac36b1db64 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -18,26 +18,29 @@ using namespace llvm;
void SparcELFMCAsmInfo::anchor() { }
-SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Target &T, StringRef TT) {
+SparcELFMCAsmInfo::SparcELFMCAsmInfo(StringRef TT) {
IsLittleEndian = false;
Triple TheTriple(TT);
- if (TheTriple.getArch() == Triple::sparcv9) {
+ bool isV9 = (TheTriple.getArch() == Triple::sparcv9);
+
+ if (isV9) {
PointerSize = CalleeSaveStackSlotSize = 8;
}
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
- Data64bitsDirective = 0; // .xword is only supported by V9.
+ // .xword is only supported by V9.
+ Data64bitsDirective = (isV9) ? "\t.xword\t" : 0;
ZeroDirective = "\t.skip\t";
CommentString = "!";
HasLEB128 = true;
SupportsDebugInformation = true;
-
+
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+
SunStyleELFSectionSwitchSyntax = true;
UsesELFSectionDirectiveForBSS = true;
- WeakRefDirective = "\t.weak\t";
-
PrivateGlobalPrefix = ".L";
}
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
index f0e1354c212b..1e58e37ce14b 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
@@ -14,16 +14,15 @@
#ifndef SPARCTARGETASMINFO_H
#define SPARCTARGETASMINFO_H
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class StringRef;
- class Target;
- class SparcELFMCAsmInfo : public MCAsmInfo {
+ class SparcELFMCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
- explicit SparcELFMCAsmInfo(const Target &T, StringRef TT);
+ explicit SparcELFMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/Sparc/Makefile b/lib/Target/Sparc/Makefile
index 4b81ada956f2..c171db77cf38 100644
--- a/lib/Target/Sparc/Makefile
+++ b/lib/Target/Sparc/Makefile
@@ -14,7 +14,8 @@ TARGET = Sparc
# Make sure that tblgen is run, first thing.
BUILT_SOURCES = SparcGenRegisterInfo.inc SparcGenInstrInfo.inc \
SparcGenAsmWriter.inc SparcGenDAGISel.inc \
- SparcGenSubtargetInfo.inc SparcGenCallingConv.inc
+ SparcGenSubtargetInfo.inc SparcGenCallingConv.inc \
+ SparcGenCodeEmitter.inc
DIRS = TargetInfo MCTargetDesc
diff --git a/lib/Target/Sparc/README.txt b/lib/Target/Sparc/README.txt
index b4991fe5790b..34e68cfa78f7 100644
--- a/lib/Target/Sparc/README.txt
+++ b/lib/Target/Sparc/README.txt
@@ -38,7 +38,7 @@ t1:
1) should be replaced with a brz in V9 mode.
-* Same as above, but emit conditional move on register zero (p192) in V9
+* Same as above, but emit conditional move on register zero (p192) in V9
mode. Testcase:
int %t1(int %a, int %b) {
@@ -47,13 +47,15 @@ int %t1(int %a, int %b) {
ret int %D
}
-* Emit MULX/[SU]DIVX instructions in V9 mode instead of fiddling
+* Emit MULX/[SU]DIVX instructions in V9 mode instead of fiddling
with the Y register, if they are faster.
* Codegen bswap(load)/store(bswap) -> load/store ASI
-* Implement frame pointer elimination, e.g. eliminate save/restore for
+* Implement frame pointer elimination, e.g. eliminate save/restore for
leaf fns.
* Fill delay slots
* Implement JIT support
+
+* Use %g0 directly to materialize 0. No instruction is required.
diff --git a/lib/Target/Sparc/Sparc.h b/lib/Target/Sparc/Sparc.h
index ce6ae17b6ca2..f44b60420d06 100644
--- a/lib/Target/Sparc/Sparc.h
+++ b/lib/Target/Sparc/Sparc.h
@@ -26,7 +26,8 @@ namespace llvm {
FunctionPass *createSparcISelDag(SparcTargetMachine &TM);
FunctionPass *createSparcDelaySlotFillerPass(TargetMachine &TM);
- FunctionPass *createSparcFPMoverPass(TargetMachine &TM);
+ FunctionPass *createSparcJITCodeEmitterPass(SparcTargetMachine &TM,
+ JITCodeEmitter &JCE);
} // end namespace llvm;
@@ -51,7 +52,7 @@ namespace llvm {
ICC_NEG = 6 , // Negative
ICC_VC = 15 , // Overflow Clear
ICC_VS = 7 , // Overflow Set
-
+
//FCC_A = 8+16, // Always
//FCC_N = 0+16, // Never
FCC_U = 7+16, // Unordered
@@ -70,7 +71,7 @@ namespace llvm {
FCC_O = 15+16 // Ordered
};
}
-
+
inline static const char *SPARCCondCodeToString(SPCC::CondCodes CC) {
switch (CC) {
case SPCC::ICC_NE: return "ne";
@@ -104,5 +105,22 @@ namespace llvm {
}
llvm_unreachable("Invalid cond code");
}
+
+ inline static unsigned HI22(int64_t imm) {
+ return (unsigned)((imm >> 10) & ((1 << 22)-1));
+ }
+
+ inline static unsigned LO10(int64_t imm) {
+ return (unsigned)(imm & 0x3FF);
+ }
+
+ inline static unsigned HIX22(int64_t imm) {
+ return HI22(~imm);
+ }
+
+ inline static unsigned LOX10(int64_t imm) {
+ return ~LO10(~imm);
+ }
+
} // end namespace llvm
#endif
diff --git a/lib/Target/Sparc/Sparc.td b/lib/Target/Sparc/Sparc.td
index 611f8e8129f4..0df48f60e8fb 100644
--- a/lib/Target/Sparc/Sparc.td
+++ b/lib/Target/Sparc/Sparc.td
@@ -19,7 +19,7 @@ include "llvm/Target/Target.td"
//===----------------------------------------------------------------------===//
// SPARC Subtarget features.
//
-
+
def FeatureV9
: SubtargetFeature<"v9", "IsV9", "true",
"Enable SPARC-V9 instructions">;
@@ -30,6 +30,10 @@ def FeatureVIS
: SubtargetFeature<"vis", "IsVIS", "true",
"Enable UltraSPARC Visual Instruction Set extensions">;
+def FeatureHardQuad
+ : SubtargetFeature<"hard-quad-float", "HasHardQuad", "true",
+ "Enable quad-word floating point instructions">;
+
//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp
index 108eb9047903..d06c894c7e05 100644
--- a/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
@@ -43,6 +44,7 @@ namespace {
const char *Modifier = 0);
void printCCOperand(const MachineInstr *MI, int opNum, raw_ostream &OS);
+ virtual void EmitFunctionBodyStart();
virtual void EmitInstruction(const MachineInstr *MI) {
SmallString<128> Str;
raw_svector_ostream OS(Str);
@@ -60,16 +62,38 @@ namespace {
raw_ostream &O);
bool printGetPCX(const MachineInstr *MI, unsigned OpNo, raw_ostream &OS);
-
+
virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB)
const;
+ void EmitGlobalRegisterDecl(unsigned reg) {
+ SmallString<128> Str;
+ raw_svector_ostream OS(Str);
+ OS << "\t.register "
+ << "%" << StringRef(getRegisterName(reg)).lower()
+ << ", "
+ << ((reg == SP::G6 || reg == SP::G7)? "#ignore" : "#scratch");
+ OutStreamer.EmitRawText(OS.str());
+ }
- virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
};
} // end of anonymous namespace
#include "SparcGenAsmWriter.inc"
+void SparcAsmPrinter::EmitFunctionBodyStart() {
+ if (!TM.getSubtarget<SparcSubtarget>().is64Bit())
+ return;
+
+ const MachineRegisterInfo &MRI = MF->getRegInfo();
+ const unsigned globalRegs[] = { SP::G2, SP::G3, SP::G6, SP::G7, 0 };
+ for (unsigned i = 0; globalRegs[i] != 0; ++i) {
+ unsigned reg = globalRegs[i];
+ if (MRI.use_empty(reg))
+ continue;
+ EmitGlobalRegisterDecl(reg);
+ }
+}
+
void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand (opNum);
@@ -81,11 +105,37 @@ void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
assert(TF == SPII::MO_NO_FLAG &&
"Cannot handle target flags on call address");
else if (MI->getOpcode() == SP::SETHIi)
- assert((TF == SPII::MO_HI || TF == SPII::MO_H44 || TF == SPII::MO_HH) &&
+ assert((TF == SPII::MO_HI || TF == SPII::MO_H44 || TF == SPII::MO_HH
+ || TF == SPII::MO_TLS_GD_HI22
+ || TF == SPII::MO_TLS_LDM_HI22
+ || TF == SPII::MO_TLS_LDO_HIX22
+ || TF == SPII::MO_TLS_IE_HI22
+ || TF == SPII::MO_TLS_LE_HIX22) &&
"Invalid target flags for address operand on sethi");
+ else if (MI->getOpcode() == SP::TLS_CALL)
+ assert((TF == SPII::MO_NO_FLAG
+ || TF == SPII::MO_TLS_GD_CALL
+ || TF == SPII::MO_TLS_LDM_CALL) &&
+ "Cannot handle target flags on tls call address");
+ else if (MI->getOpcode() == SP::TLS_ADDrr)
+ assert((TF == SPII::MO_TLS_GD_ADD || TF == SPII::MO_TLS_LDM_ADD
+ || TF == SPII::MO_TLS_LDO_ADD || TF == SPII::MO_TLS_IE_ADD) &&
+ "Cannot handle target flags on add for TLS");
+ else if (MI->getOpcode() == SP::TLS_LDrr)
+ assert(TF == SPII::MO_TLS_IE_LD &&
+ "Cannot handle target flags on ld for TLS");
+ else if (MI->getOpcode() == SP::TLS_LDXrr)
+ assert(TF == SPII::MO_TLS_IE_LDX &&
+ "Cannot handle target flags on ldx for TLS");
+ else if (MI->getOpcode() == SP::XORri)
+ assert((TF == SPII::MO_TLS_LDO_LOX10 || TF == SPII::MO_TLS_LE_LOX10) &&
+ "Cannot handle target flags on xor for TLS");
else
- assert((TF == SPII::MO_LO || TF == SPII::MO_M44 || TF == SPII::MO_L44 ||
- TF == SPII::MO_HM) &&
+ assert((TF == SPII::MO_LO || TF == SPII::MO_M44 || TF == SPII::MO_L44
+ || TF == SPII::MO_HM
+ || TF == SPII::MO_TLS_GD_LO10
+ || TF == SPII::MO_TLS_LDM_LO10
+ || TF == SPII::MO_TLS_IE_LO10 ) &&
"Invalid target flags for small address operand");
}
#endif
@@ -104,6 +154,24 @@ void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
case SPII::MO_L44: O << "%l44("; break;
case SPII::MO_HH: O << "%hh("; break;
case SPII::MO_HM: O << "%hm("; break;
+ case SPII::MO_TLS_GD_HI22: O << "%tgd_hi22("; break;
+ case SPII::MO_TLS_GD_LO10: O << "%tgd_lo10("; break;
+ case SPII::MO_TLS_GD_ADD: O << "%tgd_add("; break;
+ case SPII::MO_TLS_GD_CALL: O << "%tgd_call("; break;
+ case SPII::MO_TLS_LDM_HI22: O << "%tldm_hi22("; break;
+ case SPII::MO_TLS_LDM_LO10: O << "%tldm_lo10("; break;
+ case SPII::MO_TLS_LDM_ADD: O << "%tldm_add("; break;
+ case SPII::MO_TLS_LDM_CALL: O << "%tldm_call("; break;
+ case SPII::MO_TLS_LDO_HIX22: O << "%tldo_hix22("; break;
+ case SPII::MO_TLS_LDO_LOX10: O << "%tldo_lox10("; break;
+ case SPII::MO_TLS_LDO_ADD: O << "%tldo_add("; break;
+ case SPII::MO_TLS_IE_HI22: O << "%tie_hi22("; break;
+ case SPII::MO_TLS_IE_LO10: O << "%tie_lo10("; break;
+ case SPII::MO_TLS_IE_LD: O << "%tie_ld("; break;
+ case SPII::MO_TLS_IE_LDX: O << "%tie_ldx("; break;
+ case SPII::MO_TLS_IE_ADD: O << "%tie_add("; break;
+ case SPII::MO_TLS_LE_HIX22: O << "%tle_hix22("; break;
+ case SPII::MO_TLS_LE_LOX10: O << "%tle_lox10("; break;
}
switch (MO.getType()) {
@@ -118,7 +186,10 @@ void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
O << *MO.getMBB()->getSymbol();
return;
case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
+ O << *getSymbol(MO.getGlobal());
+ break;
+ case MachineOperand::MO_BlockAddress:
+ O << GetBlockAddressSymbol(MO.getBlockAddress())->getName();
break;
case MachineOperand::MO_ExternalSymbol:
O << MO.getSymbolName();
@@ -164,7 +235,7 @@ bool SparcAsmPrinter::printGetPCX(const MachineInstr *MI, unsigned opNum,
case MachineOperand::MO_Register:
assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
"Operand is not a physical register ");
- assert(MO.getReg() != SP::O7 &&
+ assert(MO.getReg() != SP::O7 &&
"%o7 is assigned as destination for getpcx!");
operand = "%" + StringRef(getRegisterName(MO.getReg())).lower();
break;
@@ -177,15 +248,15 @@ bool SparcAsmPrinter::printGetPCX(const MachineInstr *MI, unsigned opNum,
O << "\tcall\t.LLGETPC" << mfNum << '_' << bbNum << '\n' ;
O << "\t sethi\t"
- << "%hi(_GLOBAL_OFFSET_TABLE_+(.-.LLGETPCH" << mfNum << '_' << bbNum
+ << "%hi(_GLOBAL_OFFSET_TABLE_+(.-.LLGETPCH" << mfNum << '_' << bbNum
<< ")), " << operand << '\n' ;
O << ".LLGETPC" << mfNum << '_' << bbNum << ":\n" ;
- O << "\tor\t" << operand
+ O << "\tor\t" << operand
<< ", %lo(_GLOBAL_OFFSET_TABLE_+(.-.LLGETPCH" << mfNum << '_' << bbNum
<< ")), " << operand << '\n';
- O << "\tadd\t" << operand << ", %o7, " << operand << '\n';
-
+ O << "\tadd\t" << operand << ", %o7, " << operand << '\n';
+
return true;
}
@@ -243,19 +314,19 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
// then nothing falls through to it.
if (MBB->isLandingPad() || MBB->pred_empty())
return false;
-
+
// If there isn't exactly one predecessor, it can't be a fall through.
MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI;
++PI2;
if (PI2 != MBB->pred_end())
return false;
-
+
// The predecessor has to be immediately before this block.
const MachineBasicBlock *Pred = *PI;
-
+
if (!Pred->isLayoutSuccessor(MBB))
return false;
-
+
// Check if the last terminator is an unconditional branch.
MachineBasicBlock::const_iterator I = Pred->end();
while (I != Pred->begin() && !(--I)->isTerminator())
@@ -263,17 +334,8 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
return I == Pred->end() || !I->isBarrier();
}
-MachineLocation SparcAsmPrinter::
-getDebugValueLocation(const MachineInstr *MI) const {
- assert(MI->getNumOperands() == 4 && "Invalid number of operands!");
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() &&
- "Unexpected MachineOperand types");
- return MachineLocation(MI->getOperand(0).getReg(),
- MI->getOperand(1).getImm());
-}
-
// Force static initialization.
-extern "C" void LLVMInitializeSparcAsmPrinter() {
+extern "C" void LLVMInitializeSparcAsmPrinter() {
RegisterAsmPrinter<SparcAsmPrinter> X(TheSparcTarget);
RegisterAsmPrinter<SparcAsmPrinter> Y(TheSparcV9Target);
}
diff --git a/lib/Target/Sparc/SparcCallingConv.td b/lib/Target/Sparc/SparcCallingConv.td
index 54784e018834..acd4ec21de4c 100644
--- a/lib/Target/Sparc/SparcCallingConv.td
+++ b/lib/Target/Sparc/SparcCallingConv.td
@@ -16,7 +16,7 @@
//===----------------------------------------------------------------------===//
def CC_Sparc32 : CallingConv<[
- //Custom assign SRet to [sp+64].
+ // Custom assign SRet to [sp+64].
CCIfSRet<CCCustom<"CC_Sparc_Assign_SRet">>,
// i32 f32 arguments get passed in integer registers if there is space.
CCIfType<[i32, f32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>,
@@ -117,3 +117,14 @@ def CC_Sparc64 : CallingConv<[
// arguments whether they are passed in registers or not.
CCCustom<"CC_Sparc64_Full">
]>;
+
+// Callee-saved registers are handled by the register window mechanism.
+def CSR : CalleeSavedRegs<(add)> {
+ let OtherPreserved = (add (sequence "I%u", 0, 7),
+ (sequence "L%u", 0, 7));
+}
+
+// Callee-saved registers for calls with ReturnsTwice attribute.
+def RTCSR : CalleeSavedRegs<(add)> {
+ let OtherPreserved = (add I6, I7);
+}
diff --git a/lib/Target/Sparc/SparcCodeEmitter.cpp b/lib/Target/Sparc/SparcCodeEmitter.cpp
new file mode 100644
index 000000000000..9bfe31fe4969
--- /dev/null
+++ b/lib/Target/Sparc/SparcCodeEmitter.cpp
@@ -0,0 +1,245 @@
+//===-- Sparc/SparcCodeEmitter.cpp - Convert Sparc Code to Machine Code ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file contains the pass that transforms the Sparc machine instructions
+// into relocatable machine code.
+//
+//===---------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jit"
+#include "Sparc.h"
+#include "MCTargetDesc/SparcBaseInfo.h"
+#include "SparcRelocations.h"
+#include "SparcTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+STATISTIC(NumEmitted, "Number of machine instructions emitted");
+
+namespace {
+
+class SparcCodeEmitter : public MachineFunctionPass {
+ SparcJITInfo *JTI;
+ const SparcInstrInfo *II;
+ const DataLayout *TD;
+ const SparcSubtarget *Subtarget;
+ TargetMachine &TM;
+ JITCodeEmitter &MCE;
+ const std::vector<MachineConstantPoolEntry> *MCPEs;
+ bool IsPIC;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineModuleInfo> ();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ static char ID;
+
+public:
+ SparcCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
+ : MachineFunctionPass(ID), JTI(0), II(0), TD(0),
+ TM(tm), MCE(mce), MCPEs(0),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
+
+ bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual const char *getPassName() const {
+ return "Sparc Machine Code Emitter";
+ }
+
+ /// getBinaryCodeForInstr - This function, generated by the
+ /// CodeEmitterGenerator using TableGen, produces the binary encoding for
+ /// machine instructions.
+ uint64_t getBinaryCodeForInstr(const MachineInstr &MI) const;
+
+ void emitInstruction(MachineBasicBlock::instr_iterator MI,
+ MachineBasicBlock &MBB);
+
+private:
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MachineInstr &MI,
+ const MachineOperand &MO) const;
+
+ void emitWord(unsigned Word);
+
+ unsigned getRelocation(const MachineInstr &MI,
+ const MachineOperand &MO) const;
+
+ void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc) const;
+ void emitExternalSymbolAddress(const char *ES, unsigned Reloc) const;
+ void emitConstPoolAddress(unsigned CPI, unsigned Reloc) const;
+ void emitMachineBasicBlock(MachineBasicBlock *BB, unsigned Reloc) const;
+};
+} // end anonymous namespace.
+
+char SparcCodeEmitter::ID = 0;
+
+bool SparcCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
+ SparcTargetMachine &Target = static_cast<SparcTargetMachine &>(
+ const_cast<TargetMachine &>(MF.getTarget()));
+
+ JTI = Target.getJITInfo();
+ II = Target.getInstrInfo();
+ TD = Target.getDataLayout();
+ Subtarget = &TM.getSubtarget<SparcSubtarget> ();
+ MCPEs = &MF.getConstantPool()->getConstants();
+ JTI->Initialize(MF, IsPIC);
+ MCE.setModuleInfo(&getAnalysis<MachineModuleInfo> ());
+
+ do {
+ DEBUG(errs() << "JITTing function '"
+ << MF.getName() << "'\n");
+ MCE.startFunction(MF);
+
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+ MBB != E; ++MBB){
+ MCE.StartMachineBasicBlock(MBB);
+ for (MachineBasicBlock::instr_iterator I = MBB->instr_begin(),
+ E = MBB->instr_end(); I != E;)
+ emitInstruction(*I++, *MBB);
+ }
+ } while (MCE.finishFunction(MF));
+
+ return false;
+}
+
+void SparcCodeEmitter::emitInstruction(MachineBasicBlock::instr_iterator MI,
+ MachineBasicBlock &MBB) {
+ DEBUG(errs() << "JIT: " << (void*)MCE.getCurrentPCValue() << ":\t" << *MI);
+
+ MCE.processDebugLoc(MI->getDebugLoc(), true);
+
+ ++NumEmitted;
+
+ switch (MI->getOpcode()) {
+ default: {
+ emitWord(getBinaryCodeForInstr(*MI));
+ break;
+ }
+ case TargetOpcode::INLINEASM: {
+ // We allow inline assembler nodes with empty bodies - they can
+ // implicitly define registers, which is ok for JIT.
+ if (MI->getOperand(0).getSymbolName()[0]) {
+ report_fatal_error("JIT does not support inline asm!");
+ }
+ break;
+ }
+ case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::EH_LABEL: {
+ MCE.emitLabel(MI->getOperand(0).getMCSymbol());
+ break;
+ }
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL: {
+ // Do nothing.
+ break;
+ }
+ case SP::GETPCX: {
+ report_fatal_error("JIT does not support pseudo instruction GETPCX yet!");
+ break;
+ }
+ }
+
+ MCE.processDebugLoc(MI->getDebugLoc(), false);
+}
+
+void SparcCodeEmitter::emitWord(unsigned Word) {
+ DEBUG(errs() << " 0x";
+ errs().write_hex(Word) << "\n");
+ MCE.emitWordBE(Word);
+}
+
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned SparcCodeEmitter::getMachineOpValue(const MachineInstr &MI,
+ const MachineOperand &MO) const {
+ if (MO.isReg())
+ return TM.getRegisterInfo()->getEncodingValue(MO.getReg());
+ else if (MO.isImm())
+ return static_cast<unsigned>(MO.getImm());
+ else if (MO.isGlobal())
+ emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO));
+ else if (MO.isSymbol())
+ emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO));
+ else if (MO.isCPI())
+ emitConstPoolAddress(MO.getIndex(), getRelocation(MI, MO));
+ else if (MO.isMBB())
+ emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO));
+ else
+ llvm_unreachable("Unable to encode MachineOperand!");
+ return 0;
+}
+unsigned SparcCodeEmitter::getRelocation(const MachineInstr &MI,
+ const MachineOperand &MO) const {
+
+ unsigned TF = MO.getTargetFlags();
+ switch (TF) {
+ default:
+ case SPII::MO_NO_FLAG: break;
+ case SPII::MO_LO: return SP::reloc_sparc_lo;
+ case SPII::MO_HI: return SP::reloc_sparc_hi;
+ case SPII::MO_H44:
+ case SPII::MO_M44:
+ case SPII::MO_L44:
+ case SPII::MO_HH:
+ case SPII::MO_HM: assert(0 && "FIXME: Implement Medium/Large code model.");
+ }
+
+ unsigned Opc = MI.getOpcode();
+ switch (Opc) {
+ default: break;
+ case SP::CALL: return SP::reloc_sparc_pc30;
+ case SP::BA:
+ case SP::BCOND:
+ case SP::FBCOND: return SP::reloc_sparc_pc22;
+ case SP::BPXCC: return SP::reloc_sparc_pc19;
+ }
+ llvm_unreachable("unknown reloc!");
+}
+
+void SparcCodeEmitter::emitGlobalAddress(const GlobalValue *GV,
+ unsigned Reloc) const {
+ MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
+ const_cast<GlobalValue *>(GV), 0,
+ true));
+}
+
+void SparcCodeEmitter::
+emitExternalSymbolAddress(const char *ES, unsigned Reloc) const {
+ MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
+ Reloc, ES, 0, 0));
+}
+
+void SparcCodeEmitter::
+emitConstPoolAddress(unsigned CPI, unsigned Reloc) const {
+ MCE.addRelocation(MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
+ Reloc, CPI, 0, false));
+}
+
+void SparcCodeEmitter::emitMachineBasicBlock(MachineBasicBlock *BB,
+ unsigned Reloc) const {
+ MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
+ Reloc, BB));
+}
+
+
+/// createSparcJITCodeEmitterPass - Return a pass that emits the collected Sparc
+/// code to the specified MCE object.
+FunctionPass *llvm::createSparcJITCodeEmitterPass(SparcTargetMachine &TM,
+ JITCodeEmitter &JCE) {
+ return new SparcCodeEmitter(TM, JCE);
+}
+
+#include "SparcGenCodeEmitter.inc"
diff --git a/lib/Target/Sparc/SparcFrameLowering.cpp b/lib/Target/Sparc/SparcFrameLowering.cpp
index 7874240f5983..c75998a36e0d 100644
--- a/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -26,7 +26,61 @@
using namespace llvm;
+static cl::opt<bool>
+DisableLeafProc("disable-sparc-leaf-proc",
+ cl::init(false),
+ cl::desc("Disable Sparc leaf procedure optimization."),
+ cl::Hidden);
+
+
+void SparcFrameLowering::emitSPAdjustment(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ int NumBytes,
+ unsigned ADDrr,
+ unsigned ADDri) const {
+
+ DebugLoc dl = (MBBI != MBB.end()) ? MBBI->getDebugLoc() : DebugLoc();
+ const SparcInstrInfo &TII =
+ *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
+
+ if (NumBytes >= -4096 && NumBytes < 4096) {
+ BuildMI(MBB, MBBI, dl, TII.get(ADDri), SP::O6)
+ .addReg(SP::O6).addImm(NumBytes);
+ return;
+ }
+
+ // Emit this the hard way. This clobbers G1 which we always know is
+ // available here.
+ if (NumBytes >= 0) {
+ // Emit nonnegative numbers with sethi + or.
+ // sethi %hi(NumBytes), %g1
+ // or %g1, %lo(NumBytes), %g1
+ // add %sp, %g1, %sp
+ BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HI22(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(SP::ORri), SP::G1)
+ .addReg(SP::G1).addImm(LO10(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(ADDrr), SP::O6)
+ .addReg(SP::O6).addReg(SP::G1);
+ return ;
+ }
+
+ // Emit negative numbers with sethi + xor.
+ // sethi %hix(NumBytes), %g1
+ // xor %g1, %lox(NumBytes), %g1
+ // add %sp, %g1, %sp
+ BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HIX22(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(SP::XORri), SP::G1)
+ .addReg(SP::G1).addImm(LOX10(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(ADDrr), SP::O6)
+ .addReg(SP::O6).addReg(SP::G1);
+}
+
void SparcFrameLowering::emitPrologue(MachineFunction &MF) const {
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
const SparcInstrInfo &TII =
@@ -37,43 +91,36 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF) const {
// Get the number of bytes to allocate from the FrameInfo
int NumBytes = (int) MFI->getStackSize();
- if (SubTarget.is64Bit()) {
- // All 64-bit stack frames must be 16-byte aligned, and must reserve space
- // for spilling the 16 window registers at %sp+BIAS..%sp+BIAS+128.
- NumBytes += 128;
- // Frames with calls must also reserve space for 6 outgoing arguments
- // whether they are used or not. LowerCall_64 takes care of that.
- assert(NumBytes % 16 == 0 && "Stack size not 16-byte aligned");
- } else {
- // Emit the correct save instruction based on the number of bytes in
- // the frame. Minimum stack frame size according to V8 ABI is:
- // 16 words for register window spill
- // 1 word for address of returned aggregate-value
- // + 6 words for passing parameters on the stack
- // ----------
- // 23 words * 4 bytes per word = 92 bytes
- NumBytes += 92;
-
- // Round up to next doubleword boundary -- a double-word boundary
- // is required by the ABI.
- NumBytes = RoundUpToAlignment(NumBytes, 8);
+ unsigned SAVEri = SP::SAVEri;
+ unsigned SAVErr = SP::SAVErr;
+ if (FuncInfo->isLeafProc()) {
+ if (NumBytes == 0)
+ return;
+ SAVEri = SP::ADDri;
+ SAVErr = SP::ADDrr;
}
- NumBytes = -NumBytes;
+ NumBytes = - SubTarget.getAdjustedFrameSize(NumBytes);
+ emitSPAdjustment(MF, MBB, MBBI, NumBytes, SAVErr, SAVEri);
- if (NumBytes >= -4096) {
- BuildMI(MBB, MBBI, dl, TII.get(SP::SAVEri), SP::O6)
- .addReg(SP::O6).addImm(NumBytes);
- } else {
- // Emit this the hard way. This clobbers G1 which we always know is
- // available here.
- unsigned OffHi = (unsigned)NumBytes >> 10U;
- BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1).addImm(OffHi);
- // Emit G1 = G1 + I6
- BuildMI(MBB, MBBI, dl, TII.get(SP::ORri), SP::G1)
- .addReg(SP::G1).addImm(NumBytes & ((1 << 10)-1));
- BuildMI(MBB, MBBI, dl, TII.get(SP::SAVErr), SP::O6)
- .addReg(SP::O6).addReg(SP::G1);
- }
+ MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+ MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(SP::PROLOG_LABEL)).addSym(FrameLabel);
+
+ unsigned regFP = MRI->getDwarfRegNum(SP::I6, true);
+
+ // Emit ".cfi_def_cfa_register 30".
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(FrameLabel,
+ regFP));
+ // Emit ".cfi_window_save".
+ MMI.addFrameInst(MCCFIInstruction::createWindowSave(FrameLabel));
+
+ unsigned regInRA = MRI->getDwarfRegNum(SP::I7, true);
+ unsigned regOutRA = MRI->getDwarfRegNum(SP::O7, true);
+ // Emit ".cfi_register 15, 31".
+ MMI.addFrameInst(MCCFIInstruction::createRegister(FrameLabel,
+ regOutRA,
+ regInRA));
}
void SparcFrameLowering::
@@ -81,15 +128,12 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
if (!hasReservedCallFrame(MF)) {
MachineInstr &MI = *I;
- DebugLoc DL = MI.getDebugLoc();
int Size = MI.getOperand(0).getImm();
if (MI.getOpcode() == SP::ADJCALLSTACKDOWN)
Size = -Size;
- const SparcInstrInfo &TII =
- *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
+
if (Size)
- BuildMI(MBB, I, DL, TII.get(SP::ADDri), SP::O6).addReg(SP::O6)
- .addImm(Size);
+ emitSPAdjustment(MF, MBB, I, Size, SP::ADDrr, SP::ADDri);
}
MBB.erase(I);
}
@@ -97,12 +141,112 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
const SparcInstrInfo &TII =
*static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
DebugLoc dl = MBBI->getDebugLoc();
assert(MBBI->getOpcode() == SP::RETL &&
"Can only put epilog before 'retl' instruction!");
- BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
- .addReg(SP::G0);
+ if (!FuncInfo->isLeafProc()) {
+ BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
+ .addReg(SP::G0);
+ return;
+ }
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ int NumBytes = (int) MFI->getStackSize();
+ if (NumBytes == 0)
+ return;
+
+ NumBytes = SubTarget.getAdjustedFrameSize(NumBytes);
+ emitSPAdjustment(MF, MBB, MBBI, NumBytes, SP::ADDrr, SP::ADDri);
+}
+
+bool SparcFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ // Reserve call frame if there are no variable sized objects on the stack.
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+// hasFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+bool SparcFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken();
+}
+
+
+static bool LLVM_ATTRIBUTE_UNUSED verifyLeafProcRegUse(MachineRegisterInfo *MRI)
+{
+
+ for (unsigned reg = SP::I0; reg <= SP::I7; ++reg)
+ if (MRI->isPhysRegUsed(reg))
+ return false;
+
+ for (unsigned reg = SP::L0; reg <= SP::L7; ++reg)
+ if (MRI->isPhysRegUsed(reg))
+ return false;
+
+ return true;
+}
+
+bool SparcFrameLowering::isLeafProc(MachineFunction &MF) const
+{
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ return !(MFI->hasCalls() // has calls
+ || MRI.isPhysRegUsed(SP::L0) // Too many registers needed
+ || MRI.isPhysRegUsed(SP::O6) // %SP is used
+ || hasFP(MF)); // need %FP
+}
+
+void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const {
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ // Remap %i[0-7] to %o[0-7].
+ for (unsigned reg = SP::I0; reg <= SP::I7; ++reg) {
+ if (!MRI.isPhysRegUsed(reg))
+ continue;
+ unsigned mapped_reg = (reg - SP::I0 + SP::O0);
+ assert(!MRI.isPhysRegUsed(mapped_reg));
+
+ // Replace I register with O register.
+ MRI.replaceRegWith(reg, mapped_reg);
+
+ // Mark the reg unused.
+ MRI.setPhysRegUnused(reg);
+ }
+
+ // Rewrite MBB's Live-ins.
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+ MBB != E; ++MBB) {
+ for (unsigned reg = SP::I0; reg <= SP::I7; ++reg) {
+ if (!MBB->isLiveIn(reg))
+ continue;
+ MBB->removeLiveIn(reg);
+ MBB->addLiveIn(reg - SP::I0 + SP::O0);
+ }
+ }
+
+ assert(verifyLeafProcRegUse(&MRI));
+#ifdef XDEBUG
+ MF.verify(0, "After LeafProc Remapping");
+#endif
+}
+
+void SparcFrameLowering::processFunctionBeforeCalleeSavedScan
+ (MachineFunction &MF, RegScavenger *RS) const {
+
+ if (!DisableLeafProc && isLeafProc(MF)) {
+ SparcMachineFunctionInfo *MFI = MF.getInfo<SparcMachineFunctionInfo>();
+ MFI->setLeafProc(true);
+
+ remapRegsForLeafProc(MF);
+ }
+
}
diff --git a/lib/Target/Sparc/SparcFrameLowering.h b/lib/Target/Sparc/SparcFrameLowering.h
index c37566201613..072fde393833 100644
--- a/lib/Target/Sparc/SparcFrameLowering.h
+++ b/lib/Target/Sparc/SparcFrameLowering.h
@@ -38,7 +38,25 @@ public:
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
- bool hasFP(const MachineFunction &MF) const { return false; }
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
+ bool hasFP(const MachineFunction &MF) const;
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = NULL) const;
+
+private:
+ // Remap input registers to output registers for leaf procedure.
+ void remapRegsForLeafProc(MachineFunction &MF) const;
+
+ // Returns true if MF is a leaf procedure.
+ bool isLeafProc(MachineFunction &MF) const;
+
+
+ // Emits code for adjusting SP in function prologue/epilogue.
+ void emitSPAdjustment(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ int NumBytes, unsigned ADDrr, unsigned ADDri) const;
+
};
} // End llvm namespace
diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index a709685cd08c..b012bfdb0102 100644
--- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -33,7 +33,7 @@ class SparcDAGToDAGISel : public SelectionDAGISel {
/// Subtarget - Keep a pointer to the Sparc Subtarget around so that we can
/// make the right decision when generating code for different targets.
const SparcSubtarget &Subtarget;
- SparcTargetMachine& TM;
+ SparcTargetMachine &TM;
public:
explicit SparcDAGToDAGISel(SparcTargetMachine &tm)
: SelectionDAGISel(tm),
@@ -67,18 +67,21 @@ private:
SDNode* SparcDAGToDAGISel::getGlobalBaseReg() {
unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF);
- return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
+ return CurDAG->getRegister(GlobalBaseReg,
+ getTargetLowering()->getPointerTy()).getNode();
}
bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
SDValue &Base, SDValue &Offset) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), TLI.getPointerTy());
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
+ getTargetLowering()->getPointerTy());
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress)
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
return false; // direct calls.
if (Addr.getOpcode() == ISD::ADD) {
@@ -88,7 +91,7 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
// Constant offset from frame ref.
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
- TLI.getPointerTy());
+ getTargetLowering()->getPointerTy());
} else {
Base = Addr.getOperand(0);
}
@@ -115,7 +118,8 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
if (Addr.getOpcode() == ISD::FrameIndex) return false;
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress)
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
return false; // direct calls.
if (Addr.getOpcode() == ISD::ADD) {
@@ -131,14 +135,16 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
}
R1 = Addr;
- R2 = CurDAG->getRegister(SP::G0, TLI.getPointerTy());
+ R2 = CurDAG->getRegister(SP::G0, getTargetLowering()->getPointerTy());
return true;
}
SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
- if (N->isMachineOpcode())
+ SDLoc dl(N);
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
return NULL; // Already selected.
+ }
switch (N->getOpcode()) {
default: break;
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index 3863e2cc1caf..64625f701858 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -14,6 +14,7 @@
#include "SparcISelLowering.h"
#include "SparcMachineFunctionInfo.h"
+#include "SparcRegisterInfo.h"
#include "SparcTargetMachine.h"
#include "MCTargetDesc/SparcBaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
@@ -40,7 +41,7 @@ static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
{
assert (ArgFlags.isSRet());
- //Assign SRet argument
+ // Assign SRet argument.
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
0,
LocVT, LocInfo));
@@ -54,18 +55,18 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
static const uint16_t RegList[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
- //Try to get first reg
+ // Try to get first reg.
if (unsigned Reg = State.AllocateReg(RegList, 6)) {
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
} else {
- //Assign whole thing in stack
+ // Assign whole thing in stack.
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
State.AllocateStack(8,4),
LocVT, LocInfo));
return true;
}
- //Try to get second reg
+ // Try to get second reg.
if (unsigned Reg = State.AllocateReg(RegList, 6))
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
else
@@ -164,7 +165,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
if (Subtarget->is64Bit())
return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
@@ -175,7 +176,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
// CCValAssign - represent the assignment of the return value to locations.
@@ -206,7 +207,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain,
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
- unsigned RetAddrOffset = 8; //Call Inst + Delay Slot
+ unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
// If the function returns a struct, copy the SRetReturnReg to I0
if (MF.getFunction()->hasStructRetAttr()) {
SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
@@ -238,7 +239,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
@@ -314,7 +315,7 @@ LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL,
+ SDLoc DL,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
if (Subtarget->is64Bit())
@@ -332,7 +333,7 @@ LowerFormalArguments_32(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
+ SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -351,7 +352,7 @@ LowerFormalArguments_32(SDValue Chain,
CCValAssign &VA = ArgLocs[i];
if (i == 0 && Ins[i].Flags.isSRet()) {
- //Get SRet from [%fp+64]
+ // Get SRet from [%fp+64].
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
@@ -410,7 +411,7 @@ LowerFormalArguments_32(SDValue Chain,
if (VA.needsCustom()) {
assert(VA.getValVT() == MVT::f64);
- //If it is double-word aligned, just load.
+ // If it is double-word aligned, just load.
if (Offset % 8 == 0) {
int FI = MF.getFrameInfo()->CreateFixedObject(8,
Offset,
@@ -470,7 +471,7 @@ LowerFormalArguments_32(SDValue Chain,
}
if (MF.getFunction()->hasStructRetAttr()) {
- //Copy the SRet Argument to SRetReturnReg
+ // Copy the SRet Argument to SRetReturnReg.
SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
unsigned Reg = SFI->getSRetReturnReg();
if (!Reg) {
@@ -532,7 +533,7 @@ LowerFormalArguments_64(SDValue Chain,
CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL,
+ SDLoc DL,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -648,15 +649,36 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
return LowerCall_32(CLI, InVals);
}
+static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
+ ImmutableCallSite *CS) {
+ if (CS)
+ return CS->hasFnAttr(Attribute::ReturnsTwice);
+
+ const Function *CalleeFn = 0;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ CalleeFn = dyn_cast<Function>(G->getGlobal());
+ } else if (ExternalSymbolSDNode *E =
+ dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const Function *Fn = DAG.getMachineFunction().getFunction();
+ const Module *M = Fn->getParent();
+ const char *CalleeName = E->getSymbol();
+ CalleeFn = M->getFunction(CalleeName);
+ }
+
+ if (!CalleeFn)
+ return false;
+ return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
+}
+
// Lower a call for the 32-bit ABI.
SDValue
SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -680,7 +702,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
- //Create local copies for byval args.
+ // Create local copies for byval args.
SmallVector<SDValue, 8> ByValArgs;
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
ISD::ArgFlagsTy Flags = Outs[i].Flags;
@@ -696,13 +718,14 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SDValue SizeNode = DAG.getConstant(Size, MVT::i32);
Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
- false, //isVolatile,
- (Size <= 32), //AlwaysInline if size <= 32
+ false, // isVolatile,
+ (Size <= 32), // AlwaysInline if size <= 32
MachinePointerInfo(), MachinePointerInfo());
ByValArgs.push_back(FIPtr);
}
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true),
+ dl);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
@@ -718,7 +741,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
- //Use local copy if it is a byval arg.
+ // Use local copy if it is a byval arg.
if (Flags.isByVal())
Arg = ByValArgs[byvalArgIdx++];
@@ -758,7 +781,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
if (VA.isMemLoc()) {
unsigned Offset = VA.getLocMemOffset() + StackOffset;
- //if it is double-word aligned, just store.
+ // if it is double-word aligned, just store.
if (Offset % 8 == 0) {
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(Offset);
@@ -791,7 +814,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
if (NextVA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Lo));
} else {
- //Store the low part in stack.
+ // Store the low part in stack.
unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(Offset);
@@ -860,6 +883,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
}
unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
+ bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
@@ -879,6 +903,16 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
RegsToPass[i].second.getValueType()));
+
+ // Add a register mask operand representing the call-preserved registers.
+ const SparcRegisterInfo *TRI =
+ ((const SparcTargetMachine&)getTargetMachine()).getRegisterInfo();
+ const uint32_t *Mask = ((hasReturnsTwice)
+ ? TRI->getRTCallPreservedMask(CallConv)
+ : TRI->getCallPreservedMask(CallConv));
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
if (InFlag.getNode())
Ops.push_back(InFlag);
@@ -886,7 +920,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
InFlag = Chain.getValue(1);
// Assign locations to each value returned by this call.
@@ -907,6 +941,23 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
return Chain;
}
+// This functions returns true if CalleeName is a ABI function that returns
+// a long double (fp128).
+static bool isFP128ABICall(const char *CalleeName)
+{
+ static const char *const ABICalls[] =
+ { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
+ "_Q_sqrt", "_Q_neg",
+ "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
+ "_Q_lltoq", "_Q_ulltoq",
+ 0
+ };
+ for (const char * const *I = ABICalls; *I != 0; ++I)
+ if (strcmp(CalleeName, *I) == 0)
+ return true;
+ return false;
+}
+
unsigned
SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
{
@@ -917,7 +968,10 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
dyn_cast<ExternalSymbolSDNode>(Callee)) {
const Function *Fn = DAG.getMachineFunction().getFunction();
const Module *M = Fn->getParent();
- CalleeFn = M->getFunction(E->getSymbol());
+ const char *CalleeName = E->getSymbol();
+ CalleeFn = M->getFunction(CalleeName);
+ if (!CalleeFn && isFP128ABICall(CalleeName))
+ return 16; // Return sizeof(fp128)
}
if (!CalleeFn)
@@ -979,9 +1033,12 @@ SDValue
SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc DL = CLI.DL;
+ SDLoc DL = CLI.DL;
SDValue Chain = CLI.Chain;
+ // Sparc target does not yet support tail call optimization.
+ CLI.IsTailCall = false;
+
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
@@ -1004,7 +1061,8 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Adjust the stack pointer to make room for the arguments.
// FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
// with more than 6 arguments.
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true),
+ DL);
// Collect the set of registers to pass to the function and their values.
// This will be emitted as a sequence of CopyToReg nodes glued to the call
@@ -1097,6 +1155,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
SDValue Callee = CLI.Callee;
+ bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy());
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
@@ -1110,6 +1169,15 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
+ // Add a register mask operand representing the call-preserved registers.
+ const SparcRegisterInfo *TRI =
+ ((const SparcTargetMachine&)getTargetMachine()).getRegisterInfo();
+ const uint32_t *Mask = ((hasReturnsTwice)
+ ? TRI->getRTCallPreservedMask(CLI.CallConv)
+ : TRI->getCallPreservedMask(CLI.CallConv));
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
// Make sure the CopyToReg nodes are glued to the call instruction which
// consumes the registers.
if (InGlue.getNode())
@@ -1122,7 +1190,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Revert the stack pointer immediately after the call.
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
- DAG.getIntPtrConstant(0, true), InGlue);
+ DAG.getIntPtrConstant(0, true), InGlue, DL);
InGlue = Chain.getValue(1);
// Now extract the return values. This is more or less the same as
@@ -1242,20 +1310,27 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
+ addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
if (Subtarget->is64Bit())
addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
// Turn FP extload into load/fextend
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
+
// Sparc doesn't have i1 sign extending load
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+
// Turn FP truncstore into trunc + store.
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f64, Expand);
// Custom legalize GlobalAddress nodes into LO/HI parts.
setOperationAction(ISD::GlobalAddress, getPointerTy(), Custom);
setOperationAction(ISD::GlobalTLSAddress, getPointerTy(), Custom);
setOperationAction(ISD::ConstantPool, getPointerTy(), Custom);
+ setOperationAction(ISD::BlockAddress, getPointerTy(), Custom);
// Sparc doesn't have sext_inreg, replace them with shl/sra
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
@@ -1268,13 +1343,25 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ // ... nor does SparcV9.
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+ }
+
// Custom expand fp<->sint
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
- // Expand fp<->uint
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
+ // Custom Expand fp<->uint
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::BITCAST, MVT::f32, Expand);
setOperationAction(ISD::BITCAST, MVT::i32, Expand);
@@ -1283,9 +1370,12 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::i32, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT, MVT::f128, Expand);
+
setOperationAction(ISD::SETCC, MVT::i32, Expand);
setOperationAction(ISD::SETCC, MVT::f32, Expand);
setOperationAction(ISD::SETCC, MVT::f64, Expand);
+ setOperationAction(ISD::SETCC, MVT::f128, Expand);
// Sparc doesn't have BRCOND either, it has BR_CC.
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
@@ -1294,20 +1384,51 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
setOperationAction(ISD::BR_CC, MVT::f32, Custom);
setOperationAction(ISD::BR_CC, MVT::f64, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f128, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::ADDC, MVT::i64, Custom);
+ setOperationAction(ISD::ADDE, MVT::i64, Custom);
+ setOperationAction(ISD::SUBC, MVT::i64, Custom);
+ setOperationAction(ISD::SUBE, MVT::i64, Custom);
+ setOperationAction(ISD::BITCAST, MVT::f64, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i64, Expand);
+ setOperationAction(ISD::SELECT, MVT::i64, Expand);
+ setOperationAction(ISD::SETCC, MVT::i64, Expand);
setOperationAction(ISD::BR_CC, MVT::i64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
+
+ setOperationAction(ISD::CTPOP, MVT::i64, Legal);
+ setOperationAction(ISD::CTTZ , MVT::i64, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ , MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+ setOperationAction(ISD::ROTL , MVT::i64, Expand);
+ setOperationAction(ISD::ROTR , MVT::i64, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
}
// FIXME: There are instructions available for ATOMIC_FENCE
// on SparcV8 and later.
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
+ if (!Subtarget->isV9()) {
+ // SparcV8 does not have FNEGD and FABSD.
+ setOperationAction(ISD::FNEG, MVT::f64, Custom);
+ setOperationAction(ISD::FABS, MVT::f64, Custom);
+ }
+
+ setOperationAction(ISD::FSIN , MVT::f128, Expand);
+ setOperationAction(ISD::FCOS , MVT::f128, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
+ setOperationAction(ISD::FREM , MVT::f128, Expand);
+ setOperationAction(ISD::FMA , MVT::f128, Expand);
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
@@ -1326,8 +1447,10 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::f128, Expand);
setOperationAction(ISD::FPOW , MVT::f64, Expand);
setOperationAction(ISD::FPOW , MVT::f32, Expand);
@@ -1339,7 +1462,12 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
+ }
// VASTART needs to be custom lowered to use the VarArgsFrameIndex.
setOperationAction(ISD::VASTART , MVT::Other, Custom);
@@ -1353,14 +1481,100 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
- // No debug info support yet.
- setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
+ setExceptionPointerRegister(SP::I0);
+ setExceptionSelectorRegister(SP::I1);
setStackPointerRegisterToSaveRestore(SP::O6);
- if (TM.getSubtarget<SparcSubtarget>().isV9())
+ if (Subtarget->isV9())
setOperationAction(ISD::CTPOP, MVT::i32, Legal);
+ if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
+ setOperationAction(ISD::LOAD, MVT::f128, Legal);
+ setOperationAction(ISD::STORE, MVT::f128, Legal);
+ } else {
+ setOperationAction(ISD::LOAD, MVT::f128, Custom);
+ setOperationAction(ISD::STORE, MVT::f128, Custom);
+ }
+
+ if (Subtarget->hasHardQuad()) {
+ setOperationAction(ISD::FADD, MVT::f128, Legal);
+ setOperationAction(ISD::FSUB, MVT::f128, Legal);
+ setOperationAction(ISD::FMUL, MVT::f128, Legal);
+ setOperationAction(ISD::FDIV, MVT::f128, Legal);
+ setOperationAction(ISD::FSQRT, MVT::f128, Legal);
+ setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
+ if (Subtarget->isV9()) {
+ setOperationAction(ISD::FNEG, MVT::f128, Legal);
+ setOperationAction(ISD::FABS, MVT::f128, Legal);
+ } else {
+ setOperationAction(ISD::FNEG, MVT::f128, Custom);
+ setOperationAction(ISD::FABS, MVT::f128, Custom);
+ }
+
+ if (!Subtarget->is64Bit()) {
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
+ }
+
+ } else {
+ // Custom legalize f128 operations.
+
+ setOperationAction(ISD::FADD, MVT::f128, Custom);
+ setOperationAction(ISD::FSUB, MVT::f128, Custom);
+ setOperationAction(ISD::FMUL, MVT::f128, Custom);
+ setOperationAction(ISD::FDIV, MVT::f128, Custom);
+ setOperationAction(ISD::FSQRT, MVT::f128, Custom);
+ setOperationAction(ISD::FNEG, MVT::f128, Custom);
+ setOperationAction(ISD::FABS, MVT::f128, Custom);
+
+ setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
+
+ // Setup Runtime library names.
+ if (Subtarget->is64Bit()) {
+ setLibcallName(RTLIB::ADD_F128, "_Qp_add");
+ setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
+ setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
+ setLibcallName(RTLIB::DIV_F128, "_Qp_div");
+ setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
+ setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
+ setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
+ setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
+ setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
+ setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
+ setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
+ setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
+ setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
+ } else {
+ setLibcallName(RTLIB::ADD_F128, "_Q_add");
+ setLibcallName(RTLIB::SUB_F128, "_Q_sub");
+ setLibcallName(RTLIB::MUL_F128, "_Q_mul");
+ setLibcallName(RTLIB::DIV_F128, "_Q_div");
+ setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
+ setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
+ setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
+ setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
+ setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
+ setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
+ setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
+ setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
+ setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
+ }
+ }
+
setMinFunctionAlignment(2);
computeRegisterProperties();
@@ -1381,21 +1595,33 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
case SPISD::Lo: return "SPISD::Lo";
case SPISD::FTOI: return "SPISD::FTOI";
case SPISD::ITOF: return "SPISD::ITOF";
+ case SPISD::FTOX: return "SPISD::FTOX";
+ case SPISD::XTOF: return "SPISD::XTOF";
case SPISD::CALL: return "SPISD::CALL";
case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
case SPISD::FLUSHW: return "SPISD::FLUSHW";
+ case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
+ case SPISD::TLS_LD: return "SPISD::TLS_LD";
+ case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
}
}
+EVT SparcTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
+}
+
/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
/// be zero. Op is expected to be a target specific node. Used by DAG
/// combiner.
-void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void SparcTargetLowering::computeMaskedBitsForTargetNode
+ (const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
APInt KnownZero2, KnownOne2;
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
@@ -1444,7 +1670,7 @@ SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
SelectionDAG &DAG) const {
if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
return DAG.getTargetGlobalAddress(GA->getGlobal(),
- GA->getDebugLoc(),
+ SDLoc(GA),
GA->getValueType(0),
GA->getOffset(), TF);
@@ -1454,6 +1680,12 @@ SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
CP->getAlignment(),
CP->getOffset(), TF);
+ if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
+ return DAG.getTargetBlockAddress(BA->getBlockAddress(),
+ Op.getValueType(),
+ 0,
+ TF);
+
if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
return DAG.getTargetExternalSymbol(ES->getSymbol(),
ES->getValueType(0), TF);
@@ -1466,7 +1698,7 @@ SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
unsigned HiTF, unsigned LoTF,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
@@ -1476,7 +1708,7 @@ SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
// or ExternalSymbol SDNode.
SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = getPointerTy();
// Handle PIC mode first.
@@ -1485,6 +1717,10 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
+ // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
+ // function has calls.
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setHasCalls(true);
return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
MachinePointerInfo::getGOT(), false, false, false, 0);
}
@@ -1493,6 +1729,7 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
switch(getTargetMachine().getCodeModel()) {
default:
llvm_unreachable("Unsupported absolute code model");
+ case CodeModel::JITDefault:
case CodeModel::Small:
// abs32.
return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
@@ -1524,29 +1761,441 @@ SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
return makeAddress(Op, DAG);
}
-static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
- // Convert the fp value to integer in an FP register.
- assert(Op.getValueType() == MVT::i32);
- Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
- return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
+SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ return makeAddress(Op, DAG);
+}
+
+SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
+ SDLoc DL(GA);
+ const GlobalValue *GV = GA->getGlobal();
+ EVT PtrVT = getPointerTy();
+
+ TLSModel::Model model = getTargetMachine().getTLSModel(GV);
+
+ if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
+ unsigned HiTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_HI22
+ : SPII::MO_TLS_LDM_HI22);
+ unsigned LoTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_LO10
+ : SPII::MO_TLS_LDM_LO10);
+ unsigned addTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_ADD
+ : SPII::MO_TLS_LDM_ADD);
+ unsigned callTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_CALL
+ : SPII::MO_TLS_LDM_CALL);
+
+ SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
+ SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
+ SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
+ withTargetFlags(Op, addTF, DAG));
+
+ SDValue Chain = DAG.getEntryNode();
+ SDValue InFlag;
+
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, true), DL);
+ Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
+ InFlag = Chain.getValue(1);
+ SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
+ SDValue Symbol = withTargetFlags(Op, callTF, DAG);
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 4> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ Ops.push_back(Symbol);
+ Ops.push_back(DAG.getRegister(SP::O0, PtrVT));
+ const uint32_t *Mask = getTargetMachine()
+ .getRegisterInfo()->getCallPreservedMask(CallingConv::C);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+ Ops.push_back(InFlag);
+ Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, true),
+ DAG.getIntPtrConstant(0, true), InFlag, DL);
+ InFlag = Chain.getValue(1);
+ SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
+
+ if (model != TLSModel::LocalDynamic)
+ return Ret;
+
+ SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
+ withTargetFlags(Op, SPII::MO_TLS_LDO_HIX22, DAG));
+ SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
+ withTargetFlags(Op, SPII::MO_TLS_LDO_LOX10, DAG));
+ HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
+ return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
+ withTargetFlags(Op, SPII::MO_TLS_LDO_ADD, DAG));
+ }
+
+ if (model == TLSModel::InitialExec) {
+ unsigned ldTF = ((PtrVT == MVT::i64)? SPII::MO_TLS_IE_LDX
+ : SPII::MO_TLS_IE_LD);
+
+ SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
+
+ // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
+ // function has calls.
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setHasCalls(true);
+
+ SDValue TGA = makeHiLoPair(Op,
+ SPII::MO_TLS_IE_HI22, SPII::MO_TLS_IE_LO10, DAG);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
+ SDValue Offset = DAG.getNode(SPISD::TLS_LD,
+ DL, PtrVT, Ptr,
+ withTargetFlags(Op, ldTF, DAG));
+ return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
+ DAG.getRegister(SP::G7, PtrVT), Offset,
+ withTargetFlags(Op, SPII::MO_TLS_IE_ADD, DAG));
+ }
+
+ assert(model == TLSModel::LocalExec);
+ SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
+ withTargetFlags(Op, SPII::MO_TLS_LE_HIX22, DAG));
+ SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
+ withTargetFlags(Op, SPII::MO_TLS_LE_LOX10, DAG));
+ SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
+
+ return DAG.getNode(ISD::ADD, DL, PtrVT,
+ DAG.getRegister(SP::G7, PtrVT), Offset);
+}
+
+SDValue
+SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args,
+ SDValue Arg, SDLoc DL,
+ SelectionDAG &DAG) const {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ EVT ArgVT = Arg.getValueType();
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+
+ ArgListEntry Entry;
+ Entry.Node = Arg;
+ Entry.Ty = ArgTy;
+
+ if (ArgTy->isFP128Ty()) {
+ // Create a stack object and pass the pointer to the library function.
+ int FI = MFI->CreateStackObject(16, 8, false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
+ Chain = DAG.getStore(Chain,
+ DL,
+ Entry.Node,
+ FIPtr,
+ MachinePointerInfo(),
+ false,
+ false,
+ 8);
+
+ Entry.Node = FIPtr;
+ Entry.Ty = PointerType::getUnqual(ArgTy);
+ }
+ Args.push_back(Entry);
+ return Chain;
+}
+
+SDValue
+SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
+ const char *LibFuncName,
+ unsigned numArgs) const {
+
+ ArgListTy Args;
+
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+
+ SDValue Callee = DAG.getExternalSymbol(LibFuncName, getPointerTy());
+ Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
+ Type *RetTyABI = RetTy;
+ SDValue Chain = DAG.getEntryNode();
+ SDValue RetPtr;
+
+ if (RetTy->isFP128Ty()) {
+ // Create a Stack Object to receive the return value of type f128.
+ ArgListEntry Entry;
+ int RetFI = MFI->CreateStackObject(16, 8, false);
+ RetPtr = DAG.getFrameIndex(RetFI, getPointerTy());
+ Entry.Node = RetPtr;
+ Entry.Ty = PointerType::getUnqual(RetTy);
+ if (!Subtarget->is64Bit())
+ Entry.isSRet = true;
+ Entry.isReturned = false;
+ Args.push_back(Entry);
+ RetTyABI = Type::getVoidTy(*DAG.getContext());
+ }
+
+ assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
+ for (unsigned i = 0, e = numArgs; i != e; ++i) {
+ Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
+ }
+ TargetLowering::
+ CallLoweringInfo CLI(Chain,
+ RetTyABI,
+ false, false, false, false,
+ 0, CallingConv::C,
+ false, false, true,
+ Callee, Args, DAG, SDLoc(Op));
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+
+ // chain is in second result.
+ if (RetTyABI == RetTy)
+ return CallInfo.first;
+
+ assert (RetTy->isFP128Ty() && "Unexpected return type!");
+
+ Chain = CallInfo.second;
+
+ // Load RetPtr to get the return value.
+ return DAG.getLoad(Op.getValueType(),
+ SDLoc(Op),
+ Chain,
+ RetPtr,
+ MachinePointerInfo(),
+ false, false, false, 8);
+}
+
+SDValue
+SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
+ unsigned &SPCC,
+ SDLoc DL,
+ SelectionDAG &DAG) const {
+
+ const char *LibCall = 0;
+ bool is64Bit = Subtarget->is64Bit();
+ switch(SPCC) {
+ default: llvm_unreachable("Unhandled conditional code!");
+ case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
+ case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
+ case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
+ case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
+ case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
+ case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
+ case SPCC::FCC_UL :
+ case SPCC::FCC_ULE:
+ case SPCC::FCC_UG :
+ case SPCC::FCC_UGE:
+ case SPCC::FCC_U :
+ case SPCC::FCC_O :
+ case SPCC::FCC_LG :
+ case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
+ }
+
+ SDValue Callee = DAG.getExternalSymbol(LibCall, getPointerTy());
+ Type *RetTy = Type::getInt32Ty(*DAG.getContext());
+ ArgListTy Args;
+ SDValue Chain = DAG.getEntryNode();
+ Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
+ Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
+
+ TargetLowering::
+ CallLoweringInfo CLI(Chain,
+ RetTy,
+ false, false, false, false,
+ 0, CallingConv::C,
+ false, false, true,
+ Callee, Args, DAG, DL);
+
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+
+ // result is in first, and chain is in second result.
+ SDValue Result = CallInfo.first;
+
+ switch(SPCC) {
+ default: {
+ SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UL : {
+ SDValue Mask = DAG.getTargetConstant(1, Result.getValueType());
+ Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
+ SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_ULE: {
+ SDValue RHS = DAG.getTargetConstant(2, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UG : {
+ SDValue RHS = DAG.getTargetConstant(1, Result.getValueType());
+ SPCC = SPCC::ICC_G;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UGE: {
+ SDValue RHS = DAG.getTargetConstant(1, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+
+ case SPCC::FCC_U : {
+ SDValue RHS = DAG.getTargetConstant(3, Result.getValueType());
+ SPCC = SPCC::ICC_E;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_O : {
+ SDValue RHS = DAG.getTargetConstant(3, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_LG : {
+ SDValue Mask = DAG.getTargetConstant(3, Result.getValueType());
+ Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
+ SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UE : {
+ SDValue Mask = DAG.getTargetConstant(3, Result.getValueType());
+ Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
+ SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SPCC = SPCC::ICC_E;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ }
+}
+
+static SDValue
+LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) {
+
+ if (Op.getOperand(0).getValueType() == MVT::f64)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
+
+ if (Op.getOperand(0).getValueType() == MVT::f32)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
+
+ llvm_unreachable("fpextend with non-float operand!");
+ return SDValue(0, 0);
}
-static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
- assert(Op.getOperand(0).getValueType() == MVT::i32);
- SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
- // Convert the int value to FP in an FP register.
- return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
+static SDValue
+LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) {
+ // FP_ROUND on f64 and f32 are legal.
+ if (Op.getOperand(0).getValueType() != MVT::f128)
+ return Op;
+
+ if (Op.getValueType() == MVT::f64)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
+ if (Op.getValueType() == MVT::f32)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
+
+ llvm_unreachable("fpround to non-float!");
+ return SDValue(0, 0);
}
-static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+ assert(VT == MVT::i32 || VT == MVT::i64);
+
+ // Expand f128 operations to fp128 abi calls.
+ if (Op.getOperand(0).getValueType() == MVT::f128
+ && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
+ const char *libName = TLI.getLibcallName(VT == MVT::i32
+ ? RTLIB::FPTOSINT_F128_I32
+ : RTLIB::FPTOSINT_F128_I64);
+ return TLI.LowerF128Op(Op, DAG, libName, 1);
+ }
+
+ // Expand if the resulting type is illegal.
+ if (!TLI.isTypeLegal(VT))
+ return SDValue(0, 0);
+
+ // Otherwise, Convert the fp value to integer in an FP register.
+ if (VT == MVT::i32)
+ Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
+ else
+ Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
+
+ return DAG.getNode(ISD::BITCAST, dl, VT, Op);
+}
+
+static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT OpVT = Op.getOperand(0).getValueType();
+ assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
+
+ EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
+
+ // Expand f128 operations to fp128 ABI calls.
+ if (Op.getValueType() == MVT::f128
+ && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
+ const char *libName = TLI.getLibcallName(OpVT == MVT::i32
+ ? RTLIB::SINTTOFP_I32_F128
+ : RTLIB::SINTTOFP_I64_F128);
+ return TLI.LowerF128Op(Op, DAG, libName, 1);
+ }
+
+ // Expand if the operand type is illegal.
+ if (!TLI.isTypeLegal(OpVT))
+ return SDValue(0, 0);
+
+ // Otherwise, Convert the int value to FP in an FP register.
+ SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
+ unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
+ return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
+}
+
+static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+
+ // Expand if it does not involve f128 or the target has support for
+ // quad floating point instructions and the resulting type is legal.
+ if (Op.getOperand(0).getValueType() != MVT::f128 ||
+ (hasHardQuad && TLI.isTypeLegal(VT)))
+ return SDValue(0, 0);
+
+ assert(VT == MVT::i32 || VT == MVT::i64);
+
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(VT == MVT::i32
+ ? RTLIB::FPTOUINT_F128_I32
+ : RTLIB::FPTOUINT_F128_I64),
+ 1);
+}
+
+static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT OpVT = Op.getOperand(0).getValueType();
+ assert(OpVT == MVT::i32 || OpVT == MVT::i64);
+
+ // Expand if it does not involve f128 or the target has support for
+ // quad floating point instructions and the operand type is legal.
+ if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
+ return SDValue(0, 0);
+
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(OpVT == MVT::i32
+ ? RTLIB::UINTTOFP_I32_F128
+ : RTLIB::UINTTOFP_I64_F128),
+ 1);
+}
+
+static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
SDValue LHS = Op.getOperand(2);
SDValue RHS = Op.getOperand(3);
SDValue Dest = Op.getOperand(4);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Opc, SPCC = ~0U;
// If this is a br_cc of a "setcc", and if the setcc got lowered into
@@ -1556,28 +2205,34 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
// Get the condition flag.
SDValue CompareFlag;
if (LHS.getValueType().isInteger()) {
- EVT VTs[] = { LHS.getValueType(), MVT::Glue };
- SDValue Ops[2] = { LHS, RHS };
- CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
+ CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
// 32-bit compares use the icc flags, 64-bit uses the xcc flags.
Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
} else {
- CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
- if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
- Opc = SPISD::BRFCC;
+ if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
+ Opc = SPISD::BRICC;
+ } else {
+ CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ Opc = SPISD::BRFCC;
+ }
}
return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
DAG.getConstant(SPCC, MVT::i32), CompareFlag);
}
-static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDValue TrueVal = Op.getOperand(2);
SDValue FalseVal = Op.getOperand(3);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Opc, SPCC = ~0U;
// If this is a select_cc of a "setcc", and if the setcc got lowered into
@@ -1586,17 +2241,20 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
SDValue CompareFlag;
if (LHS.getValueType().isInteger()) {
- // subcc returns a value
- EVT VTs[] = { LHS.getValueType(), MVT::Glue };
- SDValue Ops[2] = { LHS, RHS };
- CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
+ CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
Opc = LHS.getValueType() == MVT::i32 ?
SPISD::SELECT_ICC : SPISD::SELECT_XCC;
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
} else {
- CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
- Opc = SPISD::SELECT_FCC;
- if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
+ Opc = SPISD::SELECT_ICC;
+ } else {
+ CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
+ Opc = SPISD::SELECT_FCC;
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ }
}
return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
DAG.getConstant(SPCC, MVT::i32), CompareFlag);
@@ -1607,9 +2265,12 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
MachineFunction &MF = DAG.getMachineFunction();
SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+ // Need frame address to find the address of VarArgsFrameIndex.
+ MF.getFrameInfo()->setFrameAddressIsTaken(true);
+
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Offset =
DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(),
DAG.getRegister(SP::I6, TLI.getPointerTy()),
@@ -1626,7 +2287,7 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
SDValue VAListPtr = Node->getOperand(1);
EVT PtrVT = VAListPtr.getValueType();
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr,
MachinePointerInfo(SV), false, false, false, 0);
// Increment the pointer, VAList, to the next vaarg.
@@ -1642,27 +2303,32 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8);
}
-static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
+ const SparcSubtarget *Subtarget) {
SDValue Chain = Op.getOperand(0); // Legalize the chain.
SDValue Size = Op.getOperand(1); // Legalize the size.
- DebugLoc dl = Op.getDebugLoc();
+ EVT VT = Size->getValueType(0);
+ SDLoc dl(Op);
unsigned SPReg = SP::O6;
- SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
- SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value
+ SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
+ SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
// The resultant pointer is actually 16 words from the bottom of the stack,
// to provide a register spill area.
- SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP,
- DAG.getConstant(96, MVT::i32));
+ unsigned regSpillArea = Subtarget->is64Bit() ? 128 : 96;
+ regSpillArea += Subtarget->getStackPointerBias();
+
+ SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
+ DAG.getConstant(regSpillArea, VT));
SDValue Ops[2] = { NewVal, Chain };
return DAG.getMergeValues(Ops, 2, dl);
}
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Chain = DAG.getNode(SPISD::FLUSHW,
dl, MVT::Other, DAG.getEntryNode());
return Chain;
@@ -1673,7 +2339,7 @@ static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned FrameReg = SP::I6;
uint64_t depth = Op.getConstantOperandVal(0);
@@ -1699,20 +2365,25 @@ static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
return FrameAddr;
}
-static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
- unsigned RetReg = SP::I7;
-
+ SDLoc dl(Op);
uint64_t depth = Op.getConstantOperandVal(0);
SDValue RetAddr;
- if (depth == 0)
+ if (depth == 0) {
+ unsigned RetReg = MF.addLiveIn(SP::I7,
+ TLI.getRegClassFor(TLI.getPointerTy()));
RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
- else {
+ } else {
+ // Need frame address to find return address of the caller.
+ MFI->setFrameAddressIsTaken(true);
+
// flush first to make sure the windowed registers' values are in stack
SDValue Chain = getFLUSHW(Op, DAG);
RetAddr = DAG.getCopyFromReg(Chain, dl, SP::I6, VT);
@@ -1731,23 +2402,272 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
return RetAddr;
}
+static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode)
+{
+ SDLoc dl(Op);
+
+ assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
+ assert(opcode == ISD::FNEG || opcode == ISD::FABS);
+
+ // Lower fneg/fabs on f64 to fneg/fabs on f32.
+ // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
+ // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
+
+ SDValue SrcReg64 = Op.getOperand(0);
+ SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
+ SrcReg64);
+ SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
+ SrcReg64);
+
+ Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
+
+ SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, MVT::f64), 0);
+ DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
+ DstReg64, Hi32);
+ DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
+ DstReg64, Lo32);
+ return DstReg64;
+}
+
+// Lower a f128 load into two f64 loads.
+static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
+{
+ SDLoc dl(Op);
+ LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
+ assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF
+ && "Unexpected node type");
+
+ unsigned alignment = LdNode->getAlignment();
+ if (alignment > 8)
+ alignment = 8;
+
+ SDValue Hi64 = DAG.getLoad(MVT::f64,
+ dl,
+ LdNode->getChain(),
+ LdNode->getBasePtr(),
+ LdNode->getPointerInfo(),
+ false, false, false, alignment);
+ EVT addrVT = LdNode->getBasePtr().getValueType();
+ SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
+ LdNode->getBasePtr(),
+ DAG.getConstant(8, addrVT));
+ SDValue Lo64 = DAG.getLoad(MVT::f64,
+ dl,
+ LdNode->getChain(),
+ LoPtr,
+ LdNode->getPointerInfo(),
+ false, false, false, alignment);
+
+ SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32);
+ SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32);
+
+ SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, MVT::f128);
+ InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
+ MVT::f128,
+ SDValue(InFP128, 0),
+ Hi64,
+ SubRegEven);
+ InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
+ MVT::f128,
+ SDValue(InFP128, 0),
+ Lo64,
+ SubRegOdd);
+ SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
+ SDValue(Lo64.getNode(), 1) };
+ SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ &OutChains[0], 2);
+ SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
+ return DAG.getMergeValues(Ops, 2, dl);
+}
+
+// Lower a f128 store into two f64 stores.
+static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
+ SDLoc dl(Op);
+ StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
+ assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF
+ && "Unexpected node type");
+ SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32);
+ SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32);
+
+ SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ dl,
+ MVT::f64,
+ StNode->getValue(),
+ SubRegEven);
+ SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ dl,
+ MVT::f64,
+ StNode->getValue(),
+ SubRegOdd);
+
+ unsigned alignment = StNode->getAlignment();
+ if (alignment > 8)
+ alignment = 8;
+
+ SDValue OutChains[2];
+ OutChains[0] = DAG.getStore(StNode->getChain(),
+ dl,
+ SDValue(Hi64, 0),
+ StNode->getBasePtr(),
+ MachinePointerInfo(),
+ false, false, alignment);
+ EVT addrVT = StNode->getBasePtr().getValueType();
+ SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
+ StNode->getBasePtr(),
+ DAG.getConstant(8, addrVT));
+ OutChains[1] = DAG.getStore(StNode->getChain(),
+ dl,
+ SDValue(Lo64, 0),
+ LoPtr,
+ MachinePointerInfo(),
+ false, false, alignment);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ &OutChains[0], 2);
+}
+
+static SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool is64Bit) {
+ if (Op.getValueType() == MVT::f64)
+ return LowerF64Op(Op, DAG, ISD::FNEG);
+ if (Op.getValueType() == MVT::f128)
+ return TLI.LowerF128Op(Op, DAG, ((is64Bit) ? "_Qp_neg" : "_Q_neg"), 1);
+ return Op;
+}
+
+static SDValue LowerFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
+ if (Op.getValueType() == MVT::f64)
+ return LowerF64Op(Op, DAG, ISD::FABS);
+ if (Op.getValueType() != MVT::f128)
+ return Op;
+
+ // Lower fabs on f128 to fabs on f64
+ // fabs f128 => fabs f64:sub_even64, fmov f64:sub_odd64
+
+ SDLoc dl(Op);
+ SDValue SrcReg128 = Op.getOperand(0);
+ SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
+ SrcReg128);
+ SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
+ SrcReg128);
+ if (isV9)
+ Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
+ else
+ Hi64 = LowerF64Op(Hi64, DAG, ISD::FABS);
+
+ SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, MVT::f128), 0);
+ DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
+ DstReg128, Hi64);
+ DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
+ DstReg128, Lo64);
+ return DstReg128;
+}
+
+static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
+
+ if (Op.getValueType() != MVT::i64)
+ return Op;
+
+ SDLoc dl(Op);
+ SDValue Src1 = Op.getOperand(0);
+ SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
+ SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
+ DAG.getConstant(32, MVT::i64));
+ Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
+
+ SDValue Src2 = Op.getOperand(1);
+ SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
+ SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
+ DAG.getConstant(32, MVT::i64));
+ Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
+
+
+ bool hasChain = false;
+ unsigned hiOpc = Op.getOpcode();
+ switch (Op.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case ISD::ADDC: hiOpc = ISD::ADDE; break;
+ case ISD::ADDE: hasChain = true; break;
+ case ISD::SUBC: hiOpc = ISD::SUBE; break;
+ case ISD::SUBE: hasChain = true; break;
+ }
+ SDValue Lo;
+ SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
+ if (hasChain) {
+ Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
+ Op.getOperand(2));
+ } else {
+ Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
+ }
+ SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
+ SDValue Carry = Hi.getValue(1);
+
+ Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
+ Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
+ Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
+ DAG.getConstant(32, MVT::i64));
+
+ SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
+ SDValue Ops[2] = { Dst, Carry };
+ return DAG.getMergeValues(Ops, 2, dl);
+}
+
SDValue SparcTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+
+ bool hasHardQuad = Subtarget->hasHardQuad();
+ bool is64Bit = Subtarget->is64Bit();
+ bool isV9 = Subtarget->isV9();
+
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
- case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
+
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
- case ISD::GlobalTLSAddress:
- llvm_unreachable("TLS not implemented for Sparc.");
+ case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
- case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
- case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
- case ISD::BR_CC: return LowerBR_CC(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
+ hasHardQuad);
case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
case ISD::VAARG: return LowerVAARG(Op, DAG);
- case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
+ Subtarget);
+
+ case ISD::LOAD: return LowerF128Load(Op, DAG);
+ case ISD::STORE: return LowerF128Store(Op, DAG);
+ case ISD::FADD: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::ADD_F128), 2);
+ case ISD::FSUB: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::SUB_F128), 2);
+ case ISD::FMUL: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::MUL_F128), 2);
+ case ISD::FDIV: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::DIV_F128), 2);
+ case ISD::FSQRT: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::SQRT_F128),1);
+ case ISD::FNEG: return LowerFNEG(Op, DAG, *this, is64Bit);
+ case ISD::FABS: return LowerFABS(Op, DAG, isV9);
+ case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
+ case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::SUBC:
+ case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
}
}
@@ -1764,11 +2684,13 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case SP::SELECT_CC_Int_ICC:
case SP::SELECT_CC_FP_ICC:
case SP::SELECT_CC_DFP_ICC:
+ case SP::SELECT_CC_QFP_ICC:
BROpcode = SP::BCOND;
break;
case SP::SELECT_CC_Int_FCC:
case SP::SELECT_CC_FP_FCC:
case SP::SELECT_CC_DFP_FCC:
+ case SP::SELECT_CC_QFP_FCC:
BROpcode = SP::FBCOND;
break;
}
@@ -1847,7 +2769,7 @@ SparcTargetLowering::getConstraintType(const std::string &Constraint) const {
std::pair<unsigned, const TargetRegisterClass*>
SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
@@ -1863,3 +2785,50 @@ SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// The Sparc target isn't yet aware of offsets.
return false;
}
+
+void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue>& Results,
+ SelectionDAG &DAG) const {
+
+ SDLoc dl(N);
+
+ RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
+
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Do not know how to custom type legalize this operation!");
+
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ // Custom lower only if it involves f128 or i64.
+ if (N->getOperand(0).getValueType() != MVT::f128
+ || N->getValueType(0) != MVT::i64)
+ return;
+ libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
+ ? RTLIB::FPTOSINT_F128_I64
+ : RTLIB::FPTOUINT_F128_I64);
+
+ Results.push_back(LowerF128Op(SDValue(N, 0),
+ DAG,
+ getLibcallName(libCall),
+ 1));
+ return;
+
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ // Custom lower only if it involves f128 or i64.
+ if (N->getValueType(0) != MVT::f128
+ || N->getOperand(0).getValueType() != MVT::i64)
+ return;
+
+ libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
+ ? RTLIB::SINTTOFP_I64_F128
+ : RTLIB::UINTTOFP_I64_F128);
+
+ Results.push_back(LowerF128Op(SDValue(N, 0),
+ DAG,
+ getLibcallName(libCall),
+ 1));
+ return;
+ }
+}
diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h
index fd706bebd415..2659fc89501d 100644
--- a/lib/Target/Sparc/SparcISelLowering.h
+++ b/lib/Target/Sparc/SparcISelLowering.h
@@ -37,11 +37,17 @@ namespace llvm {
FTOI, // FP to Int within a FP register.
ITOF, // Int to FP within a FP register.
+ FTOX, // FP to Int64 within a FP register.
+ XTOF, // Int64 to FP within a FP register.
CALL, // A call instruction.
RET_FLAG, // Return with a flag operand.
- GLOBAL_BASE_REG, // Global base reg for PIC
- FLUSHW // FLUSH register windows to stack
+ GLOBAL_BASE_REG, // Global base reg for PIC.
+ FLUSHW, // FLUSH register windows to stack.
+
+ TLS_ADD, // For Thread Local Storage (TLS).
+ TLS_LD,
+ TLS_CALL
};
}
@@ -68,29 +74,32 @@ namespace llvm {
ConstraintType getConstraintType(const std::string &Constraint) const;
std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+ /// getSetCCResultType - Return the ISD::SETCC ValueType
+ virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerFormalArguments_32(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerFormalArguments_64(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -106,26 +115,49 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerReturn_32(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const;
+ SDLoc DL, SelectionDAG &DAG) const;
SDValue LowerReturn_64(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const;
+ SDLoc DL, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const;
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const;
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF,
SelectionDAG &DAG) const;
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args,
+ SDValue Arg, SDLoc DL,
+ SelectionDAG &DAG) const;
+ SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG,
+ const char *LibFuncName,
+ unsigned numArgs) const;
+ SDValue LowerF128Compare(SDValue LHS, SDValue RHS,
+ unsigned &SPCC,
+ SDLoc DL,
+ SelectionDAG &DAG) const;
+
+ bool ShouldShrinkFPConstant(EVT VT) const {
+ // Do not shrink FP constpool if VT == MVT::f128.
+ // (ldd, call _Q_fdtoq) is more expensive than two ldds.
+ return VT != MVT::f128;
+ }
+
+ virtual void ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue>& Results,
+ SelectionDAG &DAG) const;
};
} // end namespace llvm
diff --git a/lib/Target/Sparc/SparcInstr64Bit.td b/lib/Target/Sparc/SparcInstr64Bit.td
index 91805f9f11b5..8656de5c8ba9 100644
--- a/lib/Target/Sparc/SparcInstr64Bit.td
+++ b/lib/Target/Sparc/SparcInstr64Bit.td
@@ -59,10 +59,6 @@ defm SRAX : F3_S<"srax", 0b100111, 1, sra, i64, I64Regs>;
// preferable to use a constant pool load instead, depending on the
// microarchitecture.
-// The %g0 register is constant 0.
-// This is useful for stx %g0, [...], for example.
-def : Pat<(i64 0), (i64 G0)>, Requires<[Is64Bit]>;
-
// Single-instruction patterns.
// The ALU instructions want their simm13 operands as i32 immediates.
@@ -157,14 +153,10 @@ def : Pat<(xor i64:$a, (not i64:$b)), (XNORrr $a, $b)>;
def : Pat<(add i64:$a, i64:$b), (ADDrr $a, $b)>;
def : Pat<(sub i64:$a, i64:$b), (SUBrr $a, $b)>;
-// Add/sub with carry were renamed to addc/subc in SPARC v9.
-def : Pat<(adde i64:$a, i64:$b), (ADDXrr $a, $b)>;
-def : Pat<(sube i64:$a, i64:$b), (SUBXrr $a, $b)>;
-
-def : Pat<(addc i64:$a, i64:$b), (ADDCCrr $a, $b)>;
-def : Pat<(subc i64:$a, i64:$b), (SUBCCrr $a, $b)>;
+def : Pat<(SPcmpicc i64:$a, i64:$b), (CMPrr $a, $b)>;
-def : Pat<(SPcmpicc i64:$a, i64:$b), (SUBCCrr $a, $b)>;
+def : Pat<(tlsadd i64:$a, i64:$b, tglobaltlsaddr:$sym),
+ (TLS_ADDrr $a, $b, $sym)>;
// Register-immediate instructions.
@@ -175,7 +167,15 @@ def : Pat<(xor i64:$a, (i64 simm13:$b)), (XORri $a, (as_i32imm $b))>;
def : Pat<(add i64:$a, (i64 simm13:$b)), (ADDri $a, (as_i32imm $b))>;
def : Pat<(sub i64:$a, (i64 simm13:$b)), (SUBri $a, (as_i32imm $b))>;
-def : Pat<(SPcmpicc i64:$a, (i64 simm13:$b)), (SUBCCri $a, (as_i32imm $b))>;
+def : Pat<(SPcmpicc i64:$a, (i64 simm13:$b)), (CMPri $a, (as_i32imm $b))>;
+
+def : Pat<(ctpop i64:$src), (POPCrr $src)>;
+
+// "LEA" form of add
+def LEAX_ADDri : F3_2<2, 0b000000,
+ (outs I64Regs:$dst), (ins MEMri:$addr),
+ "add ${addr:arith}, $dst",
+ [(set iPTR:$dst, ADDRri:$addr)]>;
} // Predicates = [Is64Bit]
@@ -241,8 +241,19 @@ def LDXri : F3_2<3, 0b001011,
(outs I64Regs:$dst), (ins MEMri:$addr),
"ldx [$addr], $dst",
[(set i64:$dst, (load ADDRri:$addr))]>;
+let mayLoad = 1 in
+ def TLS_LDXrr : F3_1<3, 0b001011,
+ (outs IntRegs:$dst), (ins MEMrr:$addr, TLSSym:$sym),
+ "ldx [$addr], $dst, $sym",
+ [(set i64:$dst,
+ (tlsld ADDRrr:$addr, tglobaltlsaddr:$sym))]>;
// Extending loads to i64.
+def : Pat<(i64 (zextloadi1 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi1 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi1 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi1 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+
def : Pat<(i64 (zextloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
def : Pat<(i64 (zextloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
def : Pat<(i64 (extloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
@@ -290,6 +301,10 @@ def : Pat<(truncstorei16 i64:$src, ADDRri:$addr), (STHri ADDRri:$addr, $src)>;
def : Pat<(truncstorei32 i64:$src, ADDRrr:$addr), (STrr ADDRrr:$addr, $src)>;
def : Pat<(truncstorei32 i64:$src, ADDRri:$addr), (STri ADDRri:$addr, $src)>;
+// store 0, addr -> store %g0, addr
+def : Pat<(store (i64 0), ADDRrr:$dst), (STXrr ADDRrr:$dst, (i64 G0))>;
+def : Pat<(store (i64 0), ADDRri:$dst), (STXri ADDRri:$dst, (i64 G0))>;
+
} // Predicates = [Is64Bit]
@@ -307,9 +322,9 @@ def : Pat<(truncstorei32 i64:$src, ADDRri:$addr), (STri ADDRri:$addr, $src)>;
let Predicates = [Is64Bit] in {
let Uses = [ICC] in
-def BPXCC : BranchSP<0, (ins brtarget:$dst, CCOp:$cc),
- "bp$cc %xcc, $dst",
- [(SPbrxcc bb:$dst, imm:$cc)]>;
+def BPXCC : BranchSP<(ins brtarget:$imm22, CCOp:$cond),
+ "b$cond %xcc, $imm22",
+ [(SPbrxcc bb:$imm22, imm:$cond)]>;
// Conditional moves on %xcc.
let Uses = [ICC], Constraints = "$f = $rd" in {
@@ -322,12 +337,68 @@ def MOVXCCri : Pseudo<(outs IntRegs:$rd),
(ins i32imm:$i, IntRegs:$f, CCOp:$cond),
"mov$cond %xcc, $i, $rd",
[(set i32:$rd,
- (SPselecticc simm11:$i, i32:$f, imm:$cond))]>;
+ (SPselectxcc simm11:$i, i32:$f, imm:$cond))]>;
+def FMOVS_XCC : Pseudo<(outs FPRegs:$rd),
+ (ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond %xcc, $rs2, $rd",
+ [(set f32:$rd,
+ (SPselectxcc f32:$rs2, f32:$f, imm:$cond))]>;
+def FMOVD_XCC : Pseudo<(outs DFPRegs:$rd),
+ (ins DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %xcc, $rs2, $rd",
+ [(set f64:$rd,
+ (SPselectxcc f64:$rs2, f64:$f, imm:$cond))]>;
} // Uses, Constraints
+//===----------------------------------------------------------------------===//
+// 64-bit Floating Point Conversions.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+
+def FXTOS : F3_3u<2, 0b110100, 0b010000100,
+ (outs FPRegs:$dst), (ins DFPRegs:$src),
+ "fxtos $src, $dst",
+ [(set FPRegs:$dst, (SPxtof DFPRegs:$src))]>;
+def FXTOD : F3_3u<2, 0b110100, 0b010001000,
+ (outs DFPRegs:$dst), (ins DFPRegs:$src),
+ "fxtod $src, $dst",
+ [(set DFPRegs:$dst, (SPxtof DFPRegs:$src))]>;
+def FXTOQ : F3_3u<2, 0b110100, 0b010001100,
+ (outs QFPRegs:$dst), (ins DFPRegs:$src),
+ "fxtoq $src, $dst",
+ [(set QFPRegs:$dst, (SPxtof DFPRegs:$src))]>,
+ Requires<[HasHardQuad]>;
+
+def FSTOX : F3_3u<2, 0b110100, 0b010000001,
+ (outs DFPRegs:$dst), (ins FPRegs:$src),
+ "fstox $src, $dst",
+ [(set DFPRegs:$dst, (SPftox FPRegs:$src))]>;
+def FDTOX : F3_3u<2, 0b110100, 0b010000010,
+ (outs DFPRegs:$dst), (ins DFPRegs:$src),
+ "fdtox $src, $dst",
+ [(set DFPRegs:$dst, (SPftox DFPRegs:$src))]>;
+def FQTOX : F3_3u<2, 0b110100, 0b010000011,
+ (outs DFPRegs:$dst), (ins QFPRegs:$src),
+ "fqtox $src, $dst",
+ [(set DFPRegs:$dst, (SPftox QFPRegs:$src))]>,
+ Requires<[HasHardQuad]>;
+
+} // Predicates = [Is64Bit]
+
def : Pat<(SPselectxcc i64:$t, i64:$f, imm:$cond),
(MOVXCCrr $t, $f, imm:$cond)>;
def : Pat<(SPselectxcc (i64 simm11:$t), i64:$f, imm:$cond),
(MOVXCCri (as_i32imm $t), $f, imm:$cond)>;
+def : Pat<(SPselecticc i64:$t, i64:$f, imm:$cond),
+ (MOVICCrr $t, $f, imm:$cond)>;
+def : Pat<(SPselecticc (i64 simm11:$t), i64:$f, imm:$cond),
+ (MOVICCri (as_i32imm $t), $f, imm:$cond)>;
+
+def : Pat<(SPselectfcc i64:$t, i64:$f, imm:$cond),
+ (MOVFCCrr $t, $f, imm:$cond)>;
+def : Pat<(SPselectfcc (i64 simm11:$t), i64:$f, imm:$cond),
+ (MOVFCCri (as_i32imm $t), $f, imm:$cond)>;
+
} // Predicates = [Is64Bit]
diff --git a/lib/Target/Sparc/SparcInstrFormats.td b/lib/Target/Sparc/SparcInstrFormats.td
index e7fde085beea..afa287415d31 100644
--- a/lib/Target/Sparc/SparcInstrFormats.td
+++ b/lib/Target/Sparc/SparcInstrFormats.td
@@ -7,14 +7,15 @@
//
//===----------------------------------------------------------------------===//
-class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern> : Instruction {
+class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Instruction {
field bits<32> Inst;
let Namespace = "SP";
bits<2> op;
let Inst{31-30} = op; // Top two bits are the 'op' field
-
+
dag OutOperandList = outs;
dag InOperandList = ins;
let AsmString = asmstr;
@@ -46,12 +47,11 @@ class F2_1<bits<3> op2Val, dag outs, dag ins, string asmstr, list<dag> pattern>
let Inst{29-25} = rd;
}
-class F2_2<bits<4> condVal, bits<3> op2Val, dag outs, dag ins, string asmstr,
+class F2_2<bits<3> op2Val, dag outs, dag ins, string asmstr,
list<dag> pattern> : F2<outs, ins, asmstr, pattern> {
bits<4> cond;
bit annul = 0; // currently unused
- let cond = condVal;
let op2 = op2Val;
let Inst{29} = annul;
@@ -88,7 +88,7 @@ class F3_1<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
let Inst{4-0} = rs2;
}
-class F3_2<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
+class F3_2<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
bits<13> simm13;
@@ -111,6 +111,32 @@ class F3_3<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
let Inst{4-0} = rs2;
}
+// floating-point unary operations.
+class F3_3u<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
+ string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+ let rs1 = 0;
+
+ let Inst{13-5} = opfval; // fp opcode
+ let Inst{4-0} = rs2;
+}
+
+// floating-point compares.
+class F3_3c<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
+ string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+ let rd = 0;
+
+ let Inst{13-5} = opfval; // fp opcode
+ let Inst{4-0} = rs2;
+}
+
// Shift by register rs2.
class F3_Sr<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
@@ -149,3 +175,59 @@ multiclass F3_S<string OpcStr, bits<6> Op3Val, bit XVal, SDNode OpNode,
!strconcat(OpcStr, " $rs, $shcnt, $rd"),
[(set VT:$rd, (OpNode VT:$rs, (i32 imm:$shcnt)))]>;
}
+
+class F4<bits<6> op3, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSP<outs, ins, asmstr, pattern> {
+ bits<5> rd;
+
+ let op = 2;
+ let Inst{29-25} = rd;
+ let Inst{24-19} = op3;
+}
+
+
+class F4_1<bits<6> op3, dag outs, dag ins,
+ string asmstr, list<dag> pattern>
+ : F4<op3, outs, ins, asmstr, pattern> {
+
+ bits<3> cc;
+ bits<4> cond;
+ bits<5> rs2;
+
+ let Inst{4-0} = rs2;
+ let Inst{11} = cc{0};
+ let Inst{12} = cc{1};
+ let Inst{13} = 0;
+ let Inst{17-14} = cond;
+ let Inst{18} = cc{2};
+
+}
+
+class F4_2<bits<6> op3, dag outs, dag ins,
+ string asmstr, list<dag> pattern>
+ : F4<op3, outs, ins, asmstr, pattern> {
+ bits<3> cc;
+ bits<4> cond;
+ bits<11> simm11;
+
+ let Inst{10-0} = simm11;
+ let Inst{11} = cc{0};
+ let Inst{12} = cc{1};
+ let Inst{13} = 1;
+ let Inst{17-14} = cond;
+ let Inst{18} = cc{2};
+}
+
+class F4_3<bits<6> op3, bits<6> opf_low, dag outs, dag ins,
+ string asmstr, list<dag> pattern>
+ : F4<op3, outs, ins, asmstr, pattern> {
+ bits<4> cond;
+ bits<3> opf_cc;
+ bits<5> rs2;
+
+ let Inst{18} = 0;
+ let Inst{17-14} = cond;
+ let Inst{13-11} = opf_cc;
+ let Inst{10-5} = opf_low;
+ let Inst{4-0} = rs2;
+}
diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp
index 39d7329f2663..c10b5b300512 100644
--- a/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -17,19 +17,25 @@
#include "SparcSubtarget.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "SparcGenInstrInfo.inc"
using namespace llvm;
+
+// Pin the vtable to this file.
+void SparcInstrInfo::anchor() {}
+
SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
: SparcGenInstrInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP),
- RI(ST, *this), Subtarget(ST) {
+ RI(ST), Subtarget(ST) {
}
/// isLoadFromStackSlot - If the specified machine instruction is a direct
@@ -40,8 +46,10 @@ SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
if (MI->getOpcode() == SP::LDri ||
+ MI->getOpcode() == SP::LDXri ||
MI->getOpcode() == SP::LDFri ||
- MI->getOpcode() == SP::LDDFri) {
+ MI->getOpcode() == SP::LDDFri ||
+ MI->getOpcode() == SP::LDQFri) {
if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
@@ -59,8 +67,10 @@ unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
if (MI->getOpcode() == SP::STri ||
+ MI->getOpcode() == SP::STXri ||
MI->getOpcode() == SP::STFri ||
- MI->getOpcode() == SP::STDFri) {
+ MI->getOpcode() == SP::STDFri ||
+ MI->getOpcode() == SP::STQFri) {
if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
MI->getOperand(1).getImm() == 0) {
FrameIndex = MI->getOperand(0).getIndex();
@@ -96,14 +106,14 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
case SPCC::FCC_U: return SPCC::FCC_O;
case SPCC::FCC_O: return SPCC::FCC_U;
- case SPCC::FCC_G: return SPCC::FCC_LE;
- case SPCC::FCC_LE: return SPCC::FCC_G;
- case SPCC::FCC_UG: return SPCC::FCC_ULE;
- case SPCC::FCC_ULE: return SPCC::FCC_UG;
- case SPCC::FCC_L: return SPCC::FCC_GE;
- case SPCC::FCC_GE: return SPCC::FCC_L;
- case SPCC::FCC_UL: return SPCC::FCC_UGE;
- case SPCC::FCC_UGE: return SPCC::FCC_UL;
+ case SPCC::FCC_G: return SPCC::FCC_ULE;
+ case SPCC::FCC_LE: return SPCC::FCC_UG;
+ case SPCC::FCC_UG: return SPCC::FCC_LE;
+ case SPCC::FCC_ULE: return SPCC::FCC_G;
+ case SPCC::FCC_L: return SPCC::FCC_UGE;
+ case SPCC::FCC_GE: return SPCC::FCC_UL;
+ case SPCC::FCC_UL: return SPCC::FCC_GE;
+ case SPCC::FCC_UGE: return SPCC::FCC_L;
case SPCC::FCC_LG: return SPCC::FCC_UE;
case SPCC::FCC_UE: return SPCC::FCC_LG;
case SPCC::FCC_NE: return SPCC::FCC_E;
@@ -112,18 +122,6 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
llvm_unreachable("Invalid cond code");
}
-MachineInstr *
-SparcInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx,
- uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc dl) const {
- MachineInstrBuilder MIB = BuildMI(MF, dl, get(SP::DBG_VALUE))
- .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
-
bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -139,15 +137,15 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
if (I->isDebugValue())
continue;
- //When we see a non-terminator, we are done
+ // When we see a non-terminator, we are done.
if (!isUnpredicatedTerminator(I))
break;
- //Terminator is not a branch
+ // Terminator is not a branch.
if (!I->isBranch())
return true;
- //Handle Unconditional branches
+ // Handle Unconditional branches.
if (I->getOpcode() == SP::BA) {
UnCondBrIter = I;
@@ -176,7 +174,7 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned Opcode = I->getOpcode();
if (Opcode != SP::BCOND && Opcode != SP::FBCOND)
- return true; //Unknown Opcode
+ return true; // Unknown Opcode.
SPCC::CondCodes BranchCode = (SPCC::CondCodes)I->getOperand(1).getImm();
@@ -185,7 +183,7 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
if (AllowModify && UnCondBrIter != MBB.end() &&
MBB.isLayoutSuccessor(TargetBB)) {
- //Transform the code
+ // Transform the code
//
// brCC L1
// ba L2
@@ -219,8 +217,8 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
Cond.push_back(MachineOperand::CreateImm(BranchCode));
continue;
}
- //FIXME: Handle subsequent conditional branches
- //For now, we can't handle multiple conditional branches
+ // FIXME: Handle subsequent conditional branches.
+ // For now, we can't handle multiple conditional branches.
return true;
}
return false;
@@ -241,7 +239,7 @@ SparcInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
return 1;
}
- //Conditional branch
+ // Conditional branch
unsigned CC = Cond[0].getImm();
if (IsIntegerCC(CC))
@@ -281,17 +279,69 @@ void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
+ unsigned numSubRegs = 0;
+ unsigned movOpc = 0;
+ const unsigned *subRegIdx = 0;
+
+ const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
+ const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 };
+ const unsigned QFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd,
+ SP::sub_odd64_then_sub_even,
+ SP::sub_odd64_then_sub_odd };
+
if (SP::IntRegsRegClass.contains(DestReg, SrcReg))
BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0)
.addReg(SrcReg, getKillRegState(KillSrc));
else if (SP::FPRegsRegClass.contains(DestReg, SrcReg))
BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
- else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg))
- BuildMI(MBB, I, DL, get(Subtarget.isV9() ? SP::FMOVD : SP::FpMOVD), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- else
+ else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.isV9()) {
+ BuildMI(MBB, I, DL, get(SP::FMOVD), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ // Use two FMOVS instructions.
+ subRegIdx = DFP_FP_SubRegsIdx;
+ numSubRegs = 2;
+ movOpc = SP::FMOVS;
+ }
+ } else if (SP::QFPRegsRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.isV9()) {
+ if (Subtarget.hasHardQuad()) {
+ BuildMI(MBB, I, DL, get(SP::FMOVQ), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ // Use two FMOVD instructions.
+ subRegIdx = QFP_DFP_SubRegsIdx;
+ numSubRegs = 2;
+ movOpc = SP::FMOVD;
+ }
+ } else {
+ // Use four FMOVS instructions.
+ subRegIdx = QFP_FP_SubRegsIdx;
+ numSubRegs = 4;
+ movOpc = SP::FMOVS;
+ }
+ } else
llvm_unreachable("Impossible reg-to-reg copy");
+
+ if (numSubRegs == 0 || subRegIdx == 0 || movOpc == 0)
+ return;
+
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ MachineInstr *MovMI = 0;
+
+ for (unsigned i = 0; i != numSubRegs; ++i) {
+ unsigned Dst = TRI->getSubReg(DestReg, subRegIdx[i]);
+ unsigned Src = TRI->getSubReg(SrcReg, subRegIdx[i]);
+ assert(Dst && Src && "Bad sub-register");
+
+ MovMI = BuildMI(MBB, I, DL, get(movOpc), Dst).addReg(Src);
+ }
+ // Add implicit super-register defs and kills to the last MovMI.
+ MovMI->addRegisterDefined(DestReg, TRI);
+ if (KillSrc)
+ MovMI->addRegisterKilled(SrcReg, TRI);
}
void SparcInstrInfo::
@@ -302,16 +352,32 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = *MF->getFrameInfo();
+ MachineMemOperand *MMO =
+ MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
+ MachineMemOperand::MOStore,
+ MFI.getObjectSize(FI),
+ MFI.getObjectAlignment(FI));
+
// On the order of operands here: think "[FrameIdx + 0] = SrcReg".
- if (RC == &SP::IntRegsRegClass)
+ if (RC == &SP::I64RegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::STXri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (RC == &SP::IntRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0)
- .addReg(SrcReg, getKillRegState(isKill));
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
else if (RC == &SP::FPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0)
- .addReg(SrcReg, getKillRegState(isKill));
- else if (RC == &SP::DFPRegsRegClass)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0)
- .addReg(SrcReg, getKillRegState(isKill));
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
+ // Use STQFri irrespective of its legality. If STQ is not legal, it will be
+ // lowered into two STDs in eliminateFrameIndex.
+ BuildMI(MBB, I, DL, get(SP::STQFri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
else
llvm_unreachable("Can't store this register to stack slot");
}
@@ -324,12 +390,31 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
- if (RC == &SP::IntRegsRegClass)
- BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0);
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = *MF->getFrameInfo();
+ MachineMemOperand *MMO =
+ MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FI),
+ MFI.getObjectAlignment(FI));
+
+ if (RC == &SP::I64RegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (RC == &SP::IntRegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
else if (RC == &SP::FPRegsRegClass)
- BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0);
- else if (RC == &SP::DFPRegsRegClass)
- BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0);
+ BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
+ BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
+ // Use LDQFri irrespective of its legality. If LDQ is not legal, it will be
+ // lowered into two LDDs in eliminateFrameIndex.
+ BuildMI(MBB, I, DL, get(SP::LDQFri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
else
llvm_unreachable("Can't load this register from stack slot");
}
diff --git a/lib/Target/Sparc/SparcInstrInfo.h b/lib/Target/Sparc/SparcInstrInfo.h
index 204f69855c23..a86cbcb1c4ba 100644
--- a/lib/Target/Sparc/SparcInstrInfo.h
+++ b/lib/Target/Sparc/SparcInstrInfo.h
@@ -37,6 +37,7 @@ namespace SPII {
class SparcInstrInfo : public SparcGenInstrInfo {
const SparcRegisterInfo RI;
const SparcSubtarget& Subtarget;
+ virtual void anchor();
public:
explicit SparcInstrInfo(SparcSubtarget &ST);
@@ -53,7 +54,7 @@ public:
/// any side effects other than loading from the stack slot.
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
-
+
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
@@ -62,14 +63,6 @@ public:
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
- /// emitFrameIndexDebugValue - Emit a target-dependent form of
- /// DBG_VALUE encoding the address of a frame index.
- virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx,
- uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc dl) const;
-
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -86,7 +79,7 @@ public:
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
-
+
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@@ -98,7 +91,7 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
-
+
unsigned getGlobalBaseReg(MachineFunction *MF) const;
};
diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td
index baefb0642d47..ef7a11457071 100644
--- a/lib/Target/Sparc/SparcInstrInfo.td
+++ b/lib/Target/Sparc/SparcInstrInfo.td
@@ -39,6 +39,10 @@ def HasNoV9 : Predicate<"!Subtarget.isV9()">;
// HasVIS - This is true when the target processor has VIS extensions.
def HasVIS : Predicate<"Subtarget.isVIS()">;
+// HasHardQuad - This is true when the target processor supports quad floating
+// point instructions.
+def HasHardQuad : Predicate<"Subtarget.hasHardQuad()">;
+
// UseDeprecatedInsts - This predicate is true when the target processor is a
// V8, or when it is V9 but the V8 deprecated instructions are efficient enough
// to use when appropriate. In either of these cases, the instruction selector
@@ -81,6 +85,8 @@ def MEMri : Operand<iPTR> {
let MIOperandInfo = (ops ptr_rc, i32imm);
}
+def TLSSym : Operand<iPTR>;
+
// Branch targets have OtherVT type.
def brtarget : Operand<OtherVT>;
def calltarget : Operand<i32>;
@@ -89,9 +95,11 @@ def calltarget : Operand<i32>;
let PrintMethod = "printCCOperand" in
def CCOp : Operand<i32>;
-def SDTSPcmpfcc :
+def SDTSPcmpicc :
+SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>]>;
+def SDTSPcmpfcc :
SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisSameAs<0, 1>]>;
-def SDTSPbrcc :
+def SDTSPbrcc :
SDTypeProfile<0, 2, [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>]>;
def SDTSPselectcc :
SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, i32>]>;
@@ -99,8 +107,17 @@ def SDTSPFTOI :
SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
def SDTSPITOF :
SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
+def SDTSPFTOX :
+SDTypeProfile<1, 1, [SDTCisVT<0, f64>, SDTCisFP<1>]>;
+def SDTSPXTOF :
+SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f64>]>;
+
+def SDTSPtlsadd :
+SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
+def SDTSPtlsld :
+SDTypeProfile<1, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
-def SPcmpicc : SDNode<"SPISD::CMPICC", SDTIntBinOp, [SDNPOutGlue]>;
+def SPcmpicc : SDNode<"SPISD::CMPICC", SDTSPcmpicc, [SDNPOutGlue]>;
def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutGlue]>;
def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
def SPbrxcc : SDNode<"SPISD::BRXCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
@@ -111,6 +128,8 @@ def SPlo : SDNode<"SPISD::Lo", SDTIntUnaryOp>;
def SPftoi : SDNode<"SPISD::FTOI", SDTSPFTOI>;
def SPitof : SDNode<"SPISD::ITOF", SDTSPITOF>;
+def SPftox : SDNode<"SPISD::FTOX", SDTSPFTOX>;
+def SPxtof : SDNode<"SPISD::XTOF", SDTSPXTOF>;
def SPselecticc : SDNode<"SPISD::SELECT_ICC", SDTSPselectcc, [SDNPInGlue]>;
def SPselectxcc : SDNode<"SPISD::SELECT_XCC", SDTSPselectcc, [SDNPInGlue]>;
@@ -138,6 +157,12 @@ def retflag : SDNode<"SPISD::RET_FLAG", SDT_SPRet,
def flushw : SDNode<"SPISD::FLUSHW", SDTNone,
[SDNPHasChain, SDNPSideEffect, SDNPMayStore]>;
+def tlsadd : SDNode<"SPISD::TLS_ADD", SDTSPtlsadd>;
+def tlsld : SDNode<"SPISD::TLS_LD", SDTSPtlsld>;
+def tlscall : SDNode<"SPISD::TLS_CALL", SDT_SPCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+
def getPCX : Operand<i32> {
let PrintMethod = "printGetPCX";
}
@@ -186,7 +211,7 @@ def FCC_O : FCC_VAL<29>; // Ordered
/// F3_12 multiclass - Define a normal F3_1/F3_2 pattern in one shot.
multiclass F3_12<string OpcStr, bits<6> Op3Val, SDNode OpNode> {
- def rr : F3_1<2, Op3Val,
+ def rr : F3_1<2, Op3Val,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat(OpcStr, " $b, $c, $dst"),
[(set i32:$dst, (OpNode i32:$b, i32:$c))]>;
@@ -199,7 +224,7 @@ multiclass F3_12<string OpcStr, bits<6> Op3Val, SDNode OpNode> {
/// F3_12np multiclass - Define a normal F3_1/F3_2 pattern in one shot, with no
/// pattern.
multiclass F3_12np<string OpcStr, bits<6> Op3Val> {
- def rr : F3_1<2, Op3Val,
+ def rr : F3_1<2, Op3Val,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat(OpcStr, " $b, $c, $dst"), []>;
def ri : F3_2<2, Op3Val,
@@ -240,27 +265,15 @@ let hasSideEffects = 1, mayStore = 1 in {
[(flushw)]>;
}
-def UNIMP : F2_1<0b000, (outs), (ins i32imm:$val),
- "unimp $val", []>;
-
-// FpMOVD/FpNEGD/FpABSD - These are lowered to single-precision ops by the
-// fpmover pass.
-let Predicates = [HasNoV9] in { // Only emit these in V8 mode.
- def FpMOVD : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$src),
- "!FpMOVD $src, $dst", []>;
- def FpNEGD : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$src),
- "!FpNEGD $src, $dst",
- [(set f64:$dst, (fneg f64:$src))]>;
- def FpABSD : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$src),
- "!FpABSD $src, $dst",
- [(set f64:$dst, (fabs f64:$src))]>;
-}
+let rd = 0 in
+ def UNIMP : F2_1<0b000, (outs), (ins i32imm:$val),
+ "unimp $val", []>;
// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
// instruction selection into a branch sequence. This has to handle all
// permutations of selection between i32/f32/f64 on ICC and FCC.
- // Expanded after instruction selection.
-let Uses = [ICC], usesCustomInserter = 1 in {
+// Expanded after instruction selection.
+let Uses = [ICC], usesCustomInserter = 1 in {
def SELECT_CC_Int_ICC
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
"; SELECT_CC_Int_ICC PSEUDO!",
@@ -274,6 +287,11 @@ let Uses = [ICC], usesCustomInserter = 1 in {
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
"; SELECT_CC_DFP_ICC PSEUDO!",
[(set f64:$dst, (SPselecticc f64:$T, f64:$F, imm:$Cond))]>;
+
+ def SELECT_CC_QFP_ICC
+ : Pseudo<(outs QFPRegs:$dst), (ins QFPRegs:$T, QFPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_QFP_ICC PSEUDO!",
+ [(set f128:$dst, (SPselecticc f128:$T, f128:$F, imm:$Cond))]>;
}
let usesCustomInserter = 1, Uses = [FCC] in {
@@ -291,17 +309,21 @@ let usesCustomInserter = 1, Uses = [FCC] in {
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
"; SELECT_CC_DFP_FCC PSEUDO!",
[(set f64:$dst, (SPselectfcc f64:$T, f64:$F, imm:$Cond))]>;
+ def SELECT_CC_QFP_FCC
+ : Pseudo<(outs QFPRegs:$dst), (ins QFPRegs:$T, QFPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_QFP_FCC PSEUDO!",
+ [(set f128:$dst, (SPselectfcc f128:$T, f128:$F, imm:$Cond))]>;
}
// Section A.3 - Synthetic Instructions, p. 85
// special cases of JMPL:
let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1 in {
- let rd = O7.Num, rs1 = G0.Num in
+ let rd = 0, rs1 = 15 in
def RETL: F3_2<2, 0b111000, (outs), (ins i32imm:$val),
"jmp %o7+$val", [(retflag simm13:$val)]>;
- let rd = I7.Num, rs1 = G0.Num in
+ let rd = 0, rs1 = 31 in
def RET: F3_2<2, 0b111000, (outs), (ins i32imm:$val),
"jmp %i7+$val", []>;
}
@@ -365,56 +387,76 @@ def LDDFri : F3_2<3, 0b100011,
(outs DFPRegs:$dst), (ins MEMri:$addr),
"ldd [$addr], $dst",
[(set f64:$dst, (load ADDRri:$addr))]>;
+def LDQFrr : F3_1<3, 0b100010,
+ (outs QFPRegs:$dst), (ins MEMrr:$addr),
+ "ldq [$addr], $dst",
+ [(set f128:$dst, (load ADDRrr:$addr))]>,
+ Requires<[HasV9, HasHardQuad]>;
+def LDQFri : F3_2<3, 0b100010,
+ (outs QFPRegs:$dst), (ins MEMri:$addr),
+ "ldq [$addr], $dst",
+ [(set f128:$dst, (load ADDRri:$addr))]>,
+ Requires<[HasV9, HasHardQuad]>;
// Section B.4 - Store Integer Instructions, p. 95
def STBrr : F3_1<3, 0b000101,
- (outs), (ins MEMrr:$addr, IntRegs:$src),
- "stb $src, [$addr]",
- [(truncstorei8 i32:$src, ADDRrr:$addr)]>;
+ (outs), (ins MEMrr:$addr, IntRegs:$rd),
+ "stb $rd, [$addr]",
+ [(truncstorei8 i32:$rd, ADDRrr:$addr)]>;
def STBri : F3_2<3, 0b000101,
- (outs), (ins MEMri:$addr, IntRegs:$src),
- "stb $src, [$addr]",
- [(truncstorei8 i32:$src, ADDRri:$addr)]>;
+ (outs), (ins MEMri:$addr, IntRegs:$rd),
+ "stb $rd, [$addr]",
+ [(truncstorei8 i32:$rd, ADDRri:$addr)]>;
def STHrr : F3_1<3, 0b000110,
- (outs), (ins MEMrr:$addr, IntRegs:$src),
- "sth $src, [$addr]",
- [(truncstorei16 i32:$src, ADDRrr:$addr)]>;
+ (outs), (ins MEMrr:$addr, IntRegs:$rd),
+ "sth $rd, [$addr]",
+ [(truncstorei16 i32:$rd, ADDRrr:$addr)]>;
def STHri : F3_2<3, 0b000110,
- (outs), (ins MEMri:$addr, IntRegs:$src),
- "sth $src, [$addr]",
- [(truncstorei16 i32:$src, ADDRri:$addr)]>;
+ (outs), (ins MEMri:$addr, IntRegs:$rd),
+ "sth $rd, [$addr]",
+ [(truncstorei16 i32:$rd, ADDRri:$addr)]>;
def STrr : F3_1<3, 0b000100,
- (outs), (ins MEMrr:$addr, IntRegs:$src),
- "st $src, [$addr]",
- [(store i32:$src, ADDRrr:$addr)]>;
+ (outs), (ins MEMrr:$addr, IntRegs:$rd),
+ "st $rd, [$addr]",
+ [(store i32:$rd, ADDRrr:$addr)]>;
def STri : F3_2<3, 0b000100,
- (outs), (ins MEMri:$addr, IntRegs:$src),
- "st $src, [$addr]",
- [(store i32:$src, ADDRri:$addr)]>;
+ (outs), (ins MEMri:$addr, IntRegs:$rd),
+ "st $rd, [$addr]",
+ [(store i32:$rd, ADDRri:$addr)]>;
// Section B.5 - Store Floating-point Instructions, p. 97
def STFrr : F3_1<3, 0b100100,
- (outs), (ins MEMrr:$addr, FPRegs:$src),
- "st $src, [$addr]",
- [(store f32:$src, ADDRrr:$addr)]>;
+ (outs), (ins MEMrr:$addr, FPRegs:$rd),
+ "st $rd, [$addr]",
+ [(store f32:$rd, ADDRrr:$addr)]>;
def STFri : F3_2<3, 0b100100,
- (outs), (ins MEMri:$addr, FPRegs:$src),
- "st $src, [$addr]",
- [(store f32:$src, ADDRri:$addr)]>;
+ (outs), (ins MEMri:$addr, FPRegs:$rd),
+ "st $rd, [$addr]",
+ [(store f32:$rd, ADDRri:$addr)]>;
def STDFrr : F3_1<3, 0b100111,
- (outs), (ins MEMrr:$addr, DFPRegs:$src),
- "std $src, [$addr]",
- [(store f64:$src, ADDRrr:$addr)]>;
+ (outs), (ins MEMrr:$addr, DFPRegs:$rd),
+ "std $rd, [$addr]",
+ [(store f64:$rd, ADDRrr:$addr)]>;
def STDFri : F3_2<3, 0b100111,
- (outs), (ins MEMri:$addr, DFPRegs:$src),
- "std $src, [$addr]",
- [(store f64:$src, ADDRri:$addr)]>;
+ (outs), (ins MEMri:$addr, DFPRegs:$rd),
+ "std $rd, [$addr]",
+ [(store f64:$rd, ADDRri:$addr)]>;
+def STQFrr : F3_1<3, 0b100110,
+ (outs), (ins MEMrr:$addr, QFPRegs:$rd),
+ "stq $rd, [$addr]",
+ [(store f128:$rd, ADDRrr:$addr)]>,
+ Requires<[HasV9, HasHardQuad]>;
+def STQFri : F3_2<3, 0b100110,
+ (outs), (ins MEMri:$addr, QFPRegs:$rd),
+ "stq $rd, [$addr]",
+ [(store f128:$rd, ADDRri:$addr)]>,
+ Requires<[HasV9, HasHardQuad]>;
// Section B.9 - SETHI Instruction, p. 104
def SETHIi: F2_1<0b100,
- (outs IntRegs:$dst), (ins i32imm:$src),
- "sethi $src, $dst",
- [(set i32:$dst, SETHIimm:$src)]>;
+ (outs IntRegs:$rd), (ins i32imm:$imm22),
+ "sethi $imm22, $rd",
+ [(set i32:$rd, SETHIimm:$imm22)]>;
// Section B.10 - NOP Instruction, p. 105
// (It's a special case of SETHI)
@@ -460,27 +502,39 @@ defm SRA : F3_12<"sra", 0b100111, sra>;
defm ADD : F3_12<"add", 0b000000, add>;
// "LEA" forms of add (patterns to make tblgen happy)
-def LEA_ADDri : F3_2<2, 0b000000,
- (outs IntRegs:$dst), (ins MEMri:$addr),
- "add ${addr:arith}, $dst",
- [(set i32:$dst, ADDRri:$addr)]>;
+let Predicates = [Is32Bit] in
+ def LEA_ADDri : F3_2<2, 0b000000,
+ (outs IntRegs:$dst), (ins MEMri:$addr),
+ "add ${addr:arith}, $dst",
+ [(set iPTR:$dst, ADDRri:$addr)]>;
-let Defs = [ICC] in
+let Defs = [ICC] in
defm ADDCC : F3_12<"addcc", 0b010000, addc>;
-let Uses = [ICC] in
- defm ADDX : F3_12<"addx", 0b001000, adde>;
+let Uses = [ICC], Defs = [ICC] in
+ defm ADDX : F3_12<"addxcc", 0b011000, adde>;
// Section B.15 - Subtract Instructions, p. 110
defm SUB : F3_12 <"sub" , 0b000100, sub>;
-let Uses = [ICC] in
- defm SUBX : F3_12 <"subx" , 0b001100, sube>;
-
-let Defs = [ICC] in
- defm SUBCC : F3_12 <"subcc", 0b010100, SPcmpicc>;
+let Uses = [ICC], Defs = [ICC] in
+ defm SUBX : F3_12 <"subxcc" , 0b011100, sube>;
+
+let Defs = [ICC] in
+ defm SUBCC : F3_12 <"subcc", 0b010100, subc>;
+
+let Defs = [ICC], rd = 0 in {
+ def CMPrr : F3_1<2, 0b010100,
+ (outs), (ins IntRegs:$b, IntRegs:$c),
+ "cmp $b, $c",
+ [(SPcmpicc i32:$b, i32:$c)]>;
+ def CMPri : F3_2<2, 0b010100,
+ (outs), (ins IntRegs:$b, i32imm:$c),
+ "cmp $b, $c",
+ [(SPcmpicc i32:$b, (i32 simm13:$c))]>;
+}
let Uses = [ICC], Defs = [ICC] in
- def SUBXCCrr: F3_1<2, 0b011100,
+ def SUBXCCrr: F3_1<2, 0b011100,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
"subxcc $b, $c, $dst", []>;
@@ -503,76 +557,91 @@ defm RESTORE : F3_12np<"restore", 0b111101>;
// Section B.21 - Branch on Integer Condition Codes Instructions, p. 119
+// unconditional branch class.
+class BranchAlways<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b010, (outs), ins, asmstr, pattern> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let hasDelaySlot = 1;
+ let isBarrier = 1;
+}
+
+let cond = 8 in
+ def BA : BranchAlways<(ins brtarget:$imm22), "ba $imm22", [(br bb:$imm22)]>;
+
// conditional branch class:
-class BranchSP<bits<4> cc, dag ins, string asmstr, list<dag> pattern>
- : F2_2<cc, 0b010, (outs), ins, asmstr, pattern> {
+class BranchSP<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b010, (outs), ins, asmstr, pattern> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
}
-let isBarrier = 1 in
- def BA : BranchSP<0b1000, (ins brtarget:$dst),
- "ba $dst",
- [(br bb:$dst)]>;
+// Indirect branch instructions.
+let isTerminator = 1, isBarrier = 1,
+ hasDelaySlot = 1, isBranch =1,
+ isIndirectBranch = 1, rd = 0 in {
+ def BINDrr : F3_1<2, 0b111000,
+ (outs), (ins MEMrr:$ptr),
+ "jmp $ptr",
+ [(brind ADDRrr:$ptr)]>;
+ def BINDri : F3_2<2, 0b111000,
+ (outs), (ins MEMri:$ptr),
+ "jmp $ptr",
+ [(brind ADDRri:$ptr)]>;
+}
-// FIXME: the encoding for the JIT should look at the condition field.
let Uses = [ICC] in
- def BCOND : BranchSP<0, (ins brtarget:$dst, CCOp:$cc),
- "b$cc $dst",
- [(SPbricc bb:$dst, imm:$cc)]>;
-
+ def BCOND : BranchSP<(ins brtarget:$imm22, CCOp:$cond),
+ "b$cond $imm22",
+ [(SPbricc bb:$imm22, imm:$cond)]>;
// Section B.22 - Branch on Floating-point Condition Codes Instructions, p. 121
// floating-point conditional branch class:
-class FPBranchSP<bits<4> cc, dag ins, string asmstr, list<dag> pattern>
- : F2_2<cc, 0b110, (outs), ins, asmstr, pattern> {
+class FPBranchSP<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b110, (outs), ins, asmstr, pattern> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
}
-// FIXME: the encoding for the JIT should look at the condition field.
let Uses = [FCC] in
- def FBCOND : FPBranchSP<0, (ins brtarget:$dst, CCOp:$cc),
- "fb$cc $dst",
- [(SPbrfcc bb:$dst, imm:$cc)]>;
+ def FBCOND : FPBranchSP<(ins brtarget:$imm22, CCOp:$cond),
+ "fb$cond $imm22",
+ [(SPbrfcc bb:$imm22, imm:$cond)]>;
// Section B.24 - Call and Link Instruction, p. 125
// This is the only Format 1 instruction
let Uses = [O6],
- hasDelaySlot = 1, isCall = 1,
- Defs = [O0, O1, O2, O3, O4, O5, O7, G1, G2, G3, G4, G5, G6, G7,
- D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
- ICC, FCC, Y] in {
+ hasDelaySlot = 1, isCall = 1 in {
def CALL : InstSP<(outs), (ins calltarget:$dst, variable_ops),
"call $dst", []> {
bits<30> disp;
let op = 1;
let Inst{29-0} = disp;
}
-
+
// indirect calls
def JMPLrr : F3_1<2, 0b111000,
(outs), (ins MEMrr:$ptr, variable_ops),
"call $ptr",
- [(call ADDRrr:$ptr)]>;
+ [(call ADDRrr:$ptr)]> { let rd = 15; }
def JMPLri : F3_2<2, 0b111000,
(outs), (ins MEMri:$ptr, variable_ops),
"call $ptr",
- [(call ADDRri:$ptr)]>;
+ [(call ADDRri:$ptr)]> { let rd = 15; }
}
// Section B.28 - Read State Register Instructions
-let Uses = [Y] in
+let Uses = [Y], rs1 = 0, rs2 = 0 in
def RDY : F3_1<2, 0b101000,
(outs IntRegs:$dst), (ins),
"rd %y, $dst", []>;
// Section B.29 - Write State Register Instructions
-let Defs = [Y] in {
+let Defs = [Y], rd = 0 in {
def WRYrr : F3_1<2, 0b110000,
(outs), (ins IntRegs:$b, IntRegs:$c),
"wr $b, $c, %y", []>;
@@ -581,58 +650,93 @@ let Defs = [Y] in {
"wr $b, $c, %y", []>;
}
// Convert Integer to Floating-point Instructions, p. 141
-def FITOS : F3_3<2, 0b110100, 0b011000100,
+def FITOS : F3_3u<2, 0b110100, 0b011000100,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fitos $src, $dst",
[(set FPRegs:$dst, (SPitof FPRegs:$src))]>;
-def FITOD : F3_3<2, 0b110100, 0b011001000,
+def FITOD : F3_3u<2, 0b110100, 0b011001000,
(outs DFPRegs:$dst), (ins FPRegs:$src),
"fitod $src, $dst",
[(set DFPRegs:$dst, (SPitof FPRegs:$src))]>;
+def FITOQ : F3_3u<2, 0b110100, 0b011001100,
+ (outs QFPRegs:$dst), (ins FPRegs:$src),
+ "fitoq $src, $dst",
+ [(set QFPRegs:$dst, (SPitof FPRegs:$src))]>,
+ Requires<[HasHardQuad]>;
// Convert Floating-point to Integer Instructions, p. 142
-def FSTOI : F3_3<2, 0b110100, 0b011010001,
+def FSTOI : F3_3u<2, 0b110100, 0b011010001,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fstoi $src, $dst",
[(set FPRegs:$dst, (SPftoi FPRegs:$src))]>;
-def FDTOI : F3_3<2, 0b110100, 0b011010010,
+def FDTOI : F3_3u<2, 0b110100, 0b011010010,
(outs FPRegs:$dst), (ins DFPRegs:$src),
"fdtoi $src, $dst",
[(set FPRegs:$dst, (SPftoi DFPRegs:$src))]>;
+def FQTOI : F3_3u<2, 0b110100, 0b011010011,
+ (outs FPRegs:$dst), (ins QFPRegs:$src),
+ "fqtoi $src, $dst",
+ [(set FPRegs:$dst, (SPftoi QFPRegs:$src))]>,
+ Requires<[HasHardQuad]>;
// Convert between Floating-point Formats Instructions, p. 143
-def FSTOD : F3_3<2, 0b110100, 0b011001001,
+def FSTOD : F3_3u<2, 0b110100, 0b011001001,
(outs DFPRegs:$dst), (ins FPRegs:$src),
"fstod $src, $dst",
[(set f64:$dst, (fextend f32:$src))]>;
-def FDTOS : F3_3<2, 0b110100, 0b011000110,
+def FSTOQ : F3_3u<2, 0b110100, 0b011001101,
+ (outs QFPRegs:$dst), (ins FPRegs:$src),
+ "fstoq $src, $dst",
+ [(set f128:$dst, (fextend f32:$src))]>,
+ Requires<[HasHardQuad]>;
+def FDTOS : F3_3u<2, 0b110100, 0b011000110,
(outs FPRegs:$dst), (ins DFPRegs:$src),
"fdtos $src, $dst",
[(set f32:$dst, (fround f64:$src))]>;
+def FDTOQ : F3_3u<2, 0b110100, 0b01101110,
+ (outs QFPRegs:$dst), (ins DFPRegs:$src),
+ "fdtoq $src, $dst",
+ [(set f128:$dst, (fextend f64:$src))]>,
+ Requires<[HasHardQuad]>;
+def FQTOS : F3_3u<2, 0b110100, 0b011000111,
+ (outs FPRegs:$dst), (ins QFPRegs:$src),
+ "fqtos $src, $dst",
+ [(set f32:$dst, (fround f128:$src))]>,
+ Requires<[HasHardQuad]>;
+def FQTOD : F3_3u<2, 0b110100, 0b011001011,
+ (outs DFPRegs:$dst), (ins QFPRegs:$src),
+ "fqtod $src, $dst",
+ [(set f64:$dst, (fround f128:$src))]>,
+ Requires<[HasHardQuad]>;
// Floating-point Move Instructions, p. 144
-def FMOVS : F3_3<2, 0b110100, 0b000000001,
+def FMOVS : F3_3u<2, 0b110100, 0b000000001,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fmovs $src, $dst", []>;
-def FNEGS : F3_3<2, 0b110100, 0b000000101,
+def FNEGS : F3_3u<2, 0b110100, 0b000000101,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fnegs $src, $dst",
[(set f32:$dst, (fneg f32:$src))]>;
-def FABSS : F3_3<2, 0b110100, 0b000001001,
+def FABSS : F3_3u<2, 0b110100, 0b000001001,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fabss $src, $dst",
[(set f32:$dst, (fabs f32:$src))]>;
// Floating-point Square Root Instructions, p.145
-def FSQRTS : F3_3<2, 0b110100, 0b000101001,
+def FSQRTS : F3_3u<2, 0b110100, 0b000101001,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fsqrts $src, $dst",
[(set f32:$dst, (fsqrt f32:$src))]>;
-def FSQRTD : F3_3<2, 0b110100, 0b000101010,
+def FSQRTD : F3_3u<2, 0b110100, 0b000101010,
(outs DFPRegs:$dst), (ins DFPRegs:$src),
"fsqrtd $src, $dst",
[(set f64:$dst, (fsqrt f64:$src))]>;
+def FSQRTQ : F3_3u<2, 0b110100, 0b000101011,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src),
+ "fsqrtq $src, $dst",
+ [(set f128:$dst, (fsqrt f128:$src))]>,
+ Requires<[HasHardQuad]>;
@@ -645,6 +749,12 @@ def FADDD : F3_3<2, 0b110100, 0b001000010,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"faddd $src1, $src2, $dst",
[(set f64:$dst, (fadd f64:$src1, f64:$src2))]>;
+def FADDQ : F3_3<2, 0b110100, 0b001000011,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src1, QFPRegs:$src2),
+ "faddq $src1, $src2, $dst",
+ [(set f128:$dst, (fadd f128:$src1, f128:$src2))]>,
+ Requires<[HasHardQuad]>;
+
def FSUBS : F3_3<2, 0b110100, 0b001000101,
(outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fsubs $src1, $src2, $dst",
@@ -653,6 +763,12 @@ def FSUBD : F3_3<2, 0b110100, 0b001000110,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"fsubd $src1, $src2, $dst",
[(set f64:$dst, (fsub f64:$src1, f64:$src2))]>;
+def FSUBQ : F3_3<2, 0b110100, 0b001000111,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src1, QFPRegs:$src2),
+ "fsubq $src1, $src2, $dst",
+ [(set f128:$dst, (fsub f128:$src1, f128:$src2))]>,
+ Requires<[HasHardQuad]>;
+
// Floating-point Multiply and Divide Instructions, p. 147
def FMULS : F3_3<2, 0b110100, 0b001001001,
@@ -663,11 +779,24 @@ def FMULD : F3_3<2, 0b110100, 0b001001010,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"fmuld $src1, $src2, $dst",
[(set f64:$dst, (fmul f64:$src1, f64:$src2))]>;
+def FMULQ : F3_3<2, 0b110100, 0b001001011,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src1, QFPRegs:$src2),
+ "fmulq $src1, $src2, $dst",
+ [(set f128:$dst, (fmul f128:$src1, f128:$src2))]>,
+ Requires<[HasHardQuad]>;
+
def FSMULD : F3_3<2, 0b110100, 0b001101001,
(outs DFPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fsmuld $src1, $src2, $dst",
[(set f64:$dst, (fmul (fextend f32:$src1),
(fextend f32:$src2)))]>;
+def FDMULQ : F3_3<2, 0b110100, 0b001101110,
+ (outs QFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
+ "fdmulq $src1, $src2, $dst",
+ [(set f128:$dst, (fmul (fextend f64:$src1),
+ (fextend f64:$src2)))]>,
+ Requires<[HasHardQuad]>;
+
def FDIVS : F3_3<2, 0b110100, 0b001001101,
(outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fdivs $src1, $src2, $dst",
@@ -676,21 +805,61 @@ def FDIVD : F3_3<2, 0b110100, 0b001001110,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"fdivd $src1, $src2, $dst",
[(set f64:$dst, (fdiv f64:$src1, f64:$src2))]>;
+def FDIVQ : F3_3<2, 0b110100, 0b001001111,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src1, QFPRegs:$src2),
+ "fdivq $src1, $src2, $dst",
+ [(set f128:$dst, (fdiv f128:$src1, f128:$src2))]>,
+ Requires<[HasHardQuad]>;
// Floating-point Compare Instructions, p. 148
// Note: the 2nd template arg is different for these guys.
// Note 2: the result of a FCMP is not available until the 2nd cycle
-// after the instr is retired, but there is no interlock. This behavior
-// is modelled with a forced noop after the instruction.
+// after the instr is retired, but there is no interlock in Sparc V8.
+// This behavior is modeled with a forced noop after the instruction in
+// DelaySlotFiller.
+
let Defs = [FCC] in {
- def FCMPS : F3_3<2, 0b110101, 0b001010001,
+ def FCMPS : F3_3c<2, 0b110101, 0b001010001,
(outs), (ins FPRegs:$src1, FPRegs:$src2),
- "fcmps $src1, $src2\n\tnop",
+ "fcmps $src1, $src2",
[(SPcmpfcc f32:$src1, f32:$src2)]>;
- def FCMPD : F3_3<2, 0b110101, 0b001010010,
+ def FCMPD : F3_3c<2, 0b110101, 0b001010010,
(outs), (ins DFPRegs:$src1, DFPRegs:$src2),
- "fcmpd $src1, $src2\n\tnop",
+ "fcmpd $src1, $src2",
[(SPcmpfcc f64:$src1, f64:$src2)]>;
+ def FCMPQ : F3_3c<2, 0b110101, 0b001010011,
+ (outs), (ins QFPRegs:$src1, QFPRegs:$src2),
+ "fcmpq $src1, $src2",
+ [(SPcmpfcc f128:$src1, f128:$src2)]>,
+ Requires<[HasHardQuad]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions for Thread Local Storage(TLS).
+//===----------------------------------------------------------------------===//
+
+def TLS_ADDrr : F3_1<2, 0b000000,
+ (outs IntRegs:$rd),
+ (ins IntRegs:$rs1, IntRegs:$rs2, TLSSym:$sym),
+ "add $rs1, $rs2, $rd, $sym",
+ [(set i32:$rd,
+ (tlsadd i32:$rs1, i32:$rs2, tglobaltlsaddr:$sym))]>;
+
+let mayLoad = 1 in
+ def TLS_LDrr : F3_1<3, 0b000000,
+ (outs IntRegs:$dst), (ins MEMrr:$addr, TLSSym:$sym),
+ "ld [$addr], $dst, $sym",
+ [(set i32:$dst,
+ (tlsld ADDRrr:$addr, tglobaltlsaddr:$sym))]>;
+
+let Uses = [O6], isCall = 1, hasDelaySlot = 1 in
+ def TLS_CALL : InstSP<(outs),
+ (ins calltarget:$disp, TLSSym:$sym, variable_ops),
+ "call $disp, $sym",
+ [(tlscall texternalsym:$disp, tglobaltlsaddr:$sym)]> {
+ bits<30> disp;
+ let op = 1;
+ let Inst{29-0} = disp;
}
//===----------------------------------------------------------------------===//
@@ -698,76 +867,110 @@ let Defs = [FCC] in {
//===----------------------------------------------------------------------===//
// V9 Conditional Moves.
-let Predicates = [HasV9], Constraints = "$T = $dst" in {
+let Predicates = [HasV9], Constraints = "$f = $rd" in {
// Move Integer Register on Condition (MOVcc) p. 194 of the V9 manual.
- // FIXME: Add instruction encodings for the JIT some day.
- let Uses = [ICC] in {
+ let Uses = [ICC], cc = 0b100 in {
def MOVICCrr
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
- "mov$cc %icc, $F, $dst",
- [(set i32:$dst, (SPselecticc i32:$F, i32:$T, imm:$cc))]>;
+ : F4_1<0b101100, (outs IntRegs:$rd),
+ (ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond %icc, $rs2, $rd",
+ [(set i32:$rd, (SPselecticc i32:$rs2, i32:$f, imm:$cond))]>;
+
def MOVICCri
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
- "mov$cc %icc, $F, $dst",
- [(set i32:$dst, (SPselecticc simm11:$F, i32:$T, imm:$cc))]>;
+ : F4_2<0b101100, (outs IntRegs:$rd),
+ (ins i32imm:$simm11, IntRegs:$f, CCOp:$cond),
+ "mov$cond %icc, $simm11, $rd",
+ [(set i32:$rd,
+ (SPselecticc simm11:$simm11, i32:$f, imm:$cond))]>;
}
- let Uses = [FCC] in {
+ let Uses = [FCC], cc = 0b000 in {
def MOVFCCrr
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
- "mov$cc %fcc0, $F, $dst",
- [(set i32:$dst, (SPselectfcc i32:$F, i32:$T, imm:$cc))]>;
+ : F4_1<0b101100, (outs IntRegs:$rd),
+ (ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond %fcc0, $rs2, $rd",
+ [(set i32:$rd, (SPselectfcc i32:$rs2, i32:$f, imm:$cond))]>;
def MOVFCCri
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
- "mov$cc %fcc0, $F, $dst",
- [(set i32:$dst, (SPselectfcc simm11:$F, i32:$T, imm:$cc))]>;
+ : F4_2<0b101100, (outs IntRegs:$rd),
+ (ins i32imm:$simm11, IntRegs:$f, CCOp:$cond),
+ "mov$cond %fcc0, $simm11, $rd",
+ [(set i32:$rd,
+ (SPselectfcc simm11:$simm11, i32:$f, imm:$cond))]>;
}
- let Uses = [ICC] in {
+ let Uses = [ICC], opf_cc = 0b100 in {
def FMOVS_ICC
- : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
- "fmovs$cc %icc, $F, $dst",
- [(set f32:$dst,
- (SPselecticc f32:$F, f32:$T, imm:$cc))]>;
+ : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
+ (ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond %icc, $rs2, $rd",
+ [(set f32:$rd, (SPselecticc f32:$rs2, f32:$f, imm:$cond))]>;
def FMOVD_ICC
- : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
- "fmovd$cc %icc, $F, $dst",
- [(set f64:$dst, (SPselecticc f64:$F, f64:$T, imm:$cc))]>;
+ : F4_3<0b110101, 0b000010, (outs DFPRegs:$rd),
+ (ins DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %icc, $rs2, $rd",
+ [(set f64:$rd, (SPselecticc f64:$rs2, f64:$f, imm:$cond))]>;
+ def FMOVQ_ICC
+ : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
+ (ins QFPRegs:$rs2, QFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %icc, $rs2, $rd",
+ [(set f128:$rd, (SPselecticc f128:$rs2, f128:$f, imm:$cond))]>;
}
- let Uses = [FCC] in {
+ let Uses = [FCC], opf_cc = 0b000 in {
def FMOVS_FCC
- : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
- "fmovs$cc %fcc0, $F, $dst",
- [(set f32:$dst, (SPselectfcc f32:$F, f32:$T, imm:$cc))]>;
+ : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
+ (ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond %fcc0, $rs2, $rd",
+ [(set f32:$rd, (SPselectfcc f32:$rs2, f32:$f, imm:$cond))]>;
def FMOVD_FCC
- : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
- "fmovd$cc %fcc0, $F, $dst",
- [(set f64:$dst, (SPselectfcc f64:$F, f64:$T, imm:$cc))]>;
+ : F4_3<0b110101, 0b000010, (outs DFPRegs:$rd),
+ (ins DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %fcc0, $rs2, $rd",
+ [(set f64:$rd, (SPselectfcc f64:$rs2, f64:$f, imm:$cond))]>;
+ def FMOVQ_FCC
+ : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
+ (ins QFPRegs:$rs2, QFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %fcc0, $rs2, $rd",
+ [(set f128:$rd, (SPselectfcc f128:$rs2, f128:$f, imm:$cond))]>;
}
}
// Floating-Point Move Instructions, p. 164 of the V9 manual.
let Predicates = [HasV9] in {
- def FMOVD : F3_3<2, 0b110100, 0b000000010,
+ def FMOVD : F3_3u<2, 0b110100, 0b000000010,
(outs DFPRegs:$dst), (ins DFPRegs:$src),
"fmovd $src, $dst", []>;
- def FNEGD : F3_3<2, 0b110100, 0b000000110,
+ def FMOVQ : F3_3u<2, 0b110100, 0b000000011,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src),
+ "fmovq $src, $dst", []>,
+ Requires<[HasHardQuad]>;
+ def FNEGD : F3_3u<2, 0b110100, 0b000000110,
(outs DFPRegs:$dst), (ins DFPRegs:$src),
"fnegd $src, $dst",
[(set f64:$dst, (fneg f64:$src))]>;
- def FABSD : F3_3<2, 0b110100, 0b000001010,
+ def FNEGQ : F3_3u<2, 0b110100, 0b000000111,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src),
+ "fnegq $src, $dst",
+ [(set f128:$dst, (fneg f128:$src))]>,
+ Requires<[HasHardQuad]>;
+ def FABSD : F3_3u<2, 0b110100, 0b000001010,
(outs DFPRegs:$dst), (ins DFPRegs:$src),
"fabsd $src, $dst",
[(set f64:$dst, (fabs f64:$src))]>;
+ def FABSQ : F3_3u<2, 0b110100, 0b000001011,
+ (outs QFPRegs:$dst), (ins QFPRegs:$src),
+ "fabsq $src, $dst",
+ [(set f128:$dst, (fabs f128:$src))]>,
+ Requires<[HasHardQuad]>;
}
// POPCrr - This does a ctpop of a 64-bit register. As such, we have to clear
// the top 32-bits before using it. To do this clearing, we use a SLLri X,0.
-def POPCrr : F3_1<2, 0b101110,
- (outs IntRegs:$dst), (ins IntRegs:$src),
- "popc $src, $dst", []>, Requires<[HasV9]>;
+let rs1 = 0 in
+ def POPCrr : F3_1<2, 0b101110,
+ (outs IntRegs:$dst), (ins IntRegs:$src),
+ "popc $src, $dst", []>, Requires<[HasV9]>;
def : Pat<(ctpop i32:$src),
(POPCrr (SLLri $src, 0))>;
@@ -782,11 +985,6 @@ def : Pat<(i32 simm13:$val),
def : Pat<(i32 imm:$val),
(ORri (SETHIi (HI22 imm:$val)), (LO10 imm:$val))>;
-// subc
-def : Pat<(subc i32:$b, i32:$c),
- (SUBCCrr $b, $c)>;
-def : Pat<(subc i32:$b, simm13:$val),
- (SUBCCri $b, imm:$val)>;
// Global addresses, constant pool entries
def : Pat<(SPhi tglobaladdr:$in), (SETHIi tglobaladdr:$in)>;
@@ -794,11 +992,25 @@ def : Pat<(SPlo tglobaladdr:$in), (ORri (i32 G0), tglobaladdr:$in)>;
def : Pat<(SPhi tconstpool:$in), (SETHIi tconstpool:$in)>;
def : Pat<(SPlo tconstpool:$in), (ORri (i32 G0), tconstpool:$in)>;
+// GlobalTLS addresses
+def : Pat<(SPhi tglobaltlsaddr:$in), (SETHIi tglobaltlsaddr:$in)>;
+def : Pat<(SPlo tglobaltlsaddr:$in), (ORri (i32 G0), tglobaltlsaddr:$in)>;
+def : Pat<(add (SPhi tglobaltlsaddr:$in1), (SPlo tglobaltlsaddr:$in2)),
+ (ADDri (SETHIi tglobaltlsaddr:$in1), (tglobaltlsaddr:$in2))>;
+def : Pat<(xor (SPhi tglobaltlsaddr:$in1), (SPlo tglobaltlsaddr:$in2)),
+ (XORri (SETHIi tglobaltlsaddr:$in1), (tglobaltlsaddr:$in2))>;
+
+// Blockaddress
+def : Pat<(SPhi tblockaddress:$in), (SETHIi tblockaddress:$in)>;
+def : Pat<(SPlo tblockaddress:$in), (ORri (i32 G0), tblockaddress:$in)>;
+
// Add reg, lo. This is used when taking the addr of a global/constpool entry.
def : Pat<(add iPTR:$r, (SPlo tglobaladdr:$in)), (ADDri $r, tglobaladdr:$in)>;
def : Pat<(add iPTR:$r, (SPlo tconstpool:$in)), (ADDri $r, tconstpool:$in)>;
+def : Pat<(add iPTR:$r, (SPlo tblockaddress:$in)),
+ (ADDri $r, tblockaddress:$in)>;
-// Calls:
+// Calls:
def : Pat<(call tglobaladdr:$dst),
(CALL tglobaladdr:$dst)>;
def : Pat<(call texternalsym:$dst),
@@ -816,4 +1028,8 @@ def : Pat<(i32 (extloadi16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
def : Pat<(i32 (zextloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+// store 0, addr -> store %g0, addr
+def : Pat<(store (i32 0), ADDRrr:$dst), (STrr ADDRrr:$dst, (i32 G0))>;
+def : Pat<(store (i32 0), ADDRri:$dst), (STri ADDRri:$dst, (i32 G0))>;
+
include "SparcInstr64Bit.td"
diff --git a/lib/Target/Sparc/SparcJITInfo.cpp b/lib/Target/Sparc/SparcJITInfo.cpp
new file mode 100644
index 000000000000..6493c7d464e8
--- /dev/null
+++ b/lib/Target/Sparc/SparcJITInfo.cpp
@@ -0,0 +1,165 @@
+//===-- SparcJITInfo.cpp - Implement the Sparc JIT Interface --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the JIT interfaces for the Sparc target.
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "jit"
+#include "SparcJITInfo.h"
+#include "SparcRelocations.h"
+
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/Support/Memory.h"
+
+using namespace llvm;
+
+/// JITCompilerFunction - This contains the address of the JIT function used to
+/// compile a function lazily.
+static TargetJITInfo::JITCompilerFn JITCompilerFunction;
+
+extern "C" void SparcCompilationCallback();
+
+extern "C" {
+#if defined (__sparc__)
+ asm(
+ ".text\n"
+ "\t.align 4\n"
+ "\t.global SparcCompilationCallback\n"
+ "\t.type SparcCompilationCallback, #function\n"
+ "SparcCompilationCallback:\n"
+ // Save current register window.
+ "\tsave %sp, -192, %sp\n"
+ // stubaddr+4 is in %g1.
+ "\tcall SparcCompilationCallbackC\n"
+ "\t sub %g1, 4, %o0\n"
+ // restore original register window and
+ // copy %o0 to %g1
+ "\t restore %o0, 0, %g1\n"
+ // call the new stub
+ "\tjmp %g1\n"
+ "\t nop\n"
+ "\t.size SparcCompilationCallback, .-SparcCompilationCallback"
+ );
+
+#else
+ void SparcCompilationCallback() {
+ llvm_unreachable(
+ "Cannot call SparcCompilationCallback() on a non-sparc arch!");
+ }
+#endif
+}
+
+#define HI(Val) (((unsigned)(Val)) >> 10)
+#define LO(Val) (((unsigned)(Val)) & 0x3FF)
+
+#define SETHI_INST(imm, rd) (0x01000000 | ((rd) << 25) | ((imm) & 0x3FFFFF))
+#define JMP_INST(rs1, imm, rd) (0x80000000 | ((rd) << 25) | (0x38 << 19) \
+ | ((rs1) << 14) | (1 << 13) | ((imm) & 0x1FFF))
+#define NOP_INST SETHI_INST(0, 0)
+
+extern "C" void *SparcCompilationCallbackC(intptr_t StubAddr) {
+ // Get the address of the compiled code for this function.
+ intptr_t NewVal = (intptr_t) JITCompilerFunction((void*) StubAddr);
+
+ // Rewrite the function stub so that we don't end up here every time we
+ // execute the call. We're replacing the first three instructions of the
+ // stub with code that jumps to the compiled function:
+ // sethi %hi(NewVal), %g1
+ // jmp %g1+%lo(NewVal)
+ // nop
+
+ *(intptr_t *)(StubAddr) = SETHI_INST(HI(NewVal), 1);
+ *(intptr_t *)(StubAddr + 4) = JMP_INST(1, LO(NewVal), 0);
+ *(intptr_t *)(StubAddr + 8) = NOP_INST;
+
+ sys::Memory::InvalidateInstructionCache((void*) StubAddr, 12);
+ return (void*)StubAddr;
+}
+
+void SparcJITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
+ assert(0 && "FIXME: Implement SparcJITInfo::replaceMachineCodeForFunction");
+}
+
+
+TargetJITInfo::StubLayout SparcJITInfo::getStubLayout() {
+ // The stub contains 3 4-byte instructions, aligned at 4 bytes. See
+ // emitFunctionStub for details.
+
+ StubLayout Result = { 3*4, 4 };
+ return Result;
+}
+
+void *SparcJITInfo::emitFunctionStub(const Function *F, void *Fn,
+ JITCodeEmitter &JCE)
+{
+ JCE.emitAlignment(4);
+ void *Addr = (void*) (JCE.getCurrentPCValue());
+ if (!sys::Memory::setRangeWritable(Addr, 12))
+ llvm_unreachable("ERROR: Unable to mark stub writable.");
+
+ intptr_t EmittedAddr;
+ if (Fn != (void*)(intptr_t)SparcCompilationCallback)
+ EmittedAddr = (intptr_t)Fn;
+ else
+ EmittedAddr = (intptr_t)SparcCompilationCallback;
+
+ // sethi %hi(EmittedAddr), %g1
+ // jmp %g1+%lo(EmittedAddr), %g1
+ // nop
+
+ JCE.emitWordBE(SETHI_INST(HI(EmittedAddr), 1));
+ JCE.emitWordBE(JMP_INST(1, LO(EmittedAddr), 1));
+ JCE.emitWordBE(NOP_INST);
+
+ sys::Memory::InvalidateInstructionCache(Addr, 12);
+ if (!sys::Memory::setRangeExecutable(Addr, 12))
+ llvm_unreachable("ERROR: Unable to mark stub executable.");
+
+ return Addr;
+}
+
+TargetJITInfo::LazyResolverFn
+SparcJITInfo::getLazyResolverFunction(JITCompilerFn F) {
+ JITCompilerFunction = F;
+ return SparcCompilationCallback;
+}
+
+/// relocate - Before the JIT can run a block of code that has been emitted,
+/// it must rewrite the code to contain the actual addresses of any
+/// referenced global symbols.
+void SparcJITInfo::relocate(void *Function, MachineRelocation *MR,
+ unsigned NumRelocs, unsigned char *GOTBase) {
+ for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
+ void *RelocPos = (char*) Function + MR->getMachineCodeOffset();
+ intptr_t ResultPtr = (intptr_t) MR->getResultPointer();
+
+ switch ((SP::RelocationType) MR->getRelocationType()) {
+ case SP::reloc_sparc_hi:
+ ResultPtr = (ResultPtr >> 10) & 0x3fffff;
+ break;
+
+ case SP::reloc_sparc_lo:
+ ResultPtr = (ResultPtr & 0x3ff);
+ break;
+
+ case SP::reloc_sparc_pc30:
+ ResultPtr = ((ResultPtr - (intptr_t)RelocPos) >> 2) & 0x3fffffff;
+ break;
+
+ case SP::reloc_sparc_pc22:
+ ResultPtr = ((ResultPtr - (intptr_t)RelocPos) >> 2) & 0x3fffff;
+ break;
+
+ case SP::reloc_sparc_pc19:
+ ResultPtr = ((ResultPtr - (intptr_t)RelocPos) >> 2) & 0x7ffff;
+ break;
+ }
+ *((unsigned*) RelocPos) |= (unsigned) ResultPtr;
+ }
+}
diff --git a/lib/Target/Sparc/SparcJITInfo.h b/lib/Target/Sparc/SparcJITInfo.h
new file mode 100644
index 000000000000..9c6e488e4886
--- /dev/null
+++ b/lib/Target/Sparc/SparcJITInfo.h
@@ -0,0 +1,67 @@
+//==- SparcJITInfo.h - Sparc Implementation of the JIT Interface -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the SparcJITInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SPARCJITINFO_H
+#define SPARCJITINFO_H
+
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Target/TargetJITInfo.h"
+
+namespace llvm {
+class SparcTargetMachine;
+
+class SparcJITInfo : public TargetJITInfo {
+
+ bool IsPIC;
+
+ public:
+ explicit SparcJITInfo()
+ : IsPIC(false) {}
+
+ /// replaceMachineCodeForFunction - Make it so that calling the function
+ /// whose machine code is at OLD turns into a call to NEW, perhaps by
+ /// overwriting OLD with a branch to NEW. This is used for self-modifying
+ /// code.
+ ///
+ virtual void replaceMachineCodeForFunction(void *Old, void *New);
+
+ // getStubLayout - Returns the size and alignment of the largest call stub
+ // on Sparc.
+ virtual StubLayout getStubLayout();
+
+
+ /// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
+ /// small native function that simply calls the function at the specified
+ /// address.
+ virtual void *emitFunctionStub(const Function *F, void *Fn,
+ JITCodeEmitter &JCE);
+
+ /// getLazyResolverFunction - Expose the lazy resolver to the JIT.
+ virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
+
+ /// relocate - Before the JIT can run a block of code that has been emitted,
+ /// it must rewrite the code to contain the actual addresses of any
+ /// referenced global symbols.
+ virtual void relocate(void *Function, MachineRelocation *MR,
+ unsigned NumRelocs, unsigned char *GOTBase);
+
+ /// Initialize - Initialize internal stage for the function being JITted.
+ void Initialize(const MachineFunction &MF, bool isPIC) {
+ IsPIC = isPIC;
+ }
+
+};
+}
+
+#endif
diff --git a/lib/Target/Sparc/SparcMachineFunctionInfo.h b/lib/Target/Sparc/SparcMachineFunctionInfo.h
index 90c27a4459a1..3783c16d9922 100644
--- a/lib/Target/Sparc/SparcMachineFunctionInfo.h
+++ b/lib/Target/Sparc/SparcMachineFunctionInfo.h
@@ -28,11 +28,16 @@ namespace llvm {
/// SRetReturnReg - Holds the virtual register into which the sret
/// argument is passed.
unsigned SRetReturnReg;
+
+ /// IsLeafProc - True if the function is a leaf procedure.
+ bool IsLeafProc;
public:
SparcMachineFunctionInfo()
- : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0) {}
+ : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0),
+ IsLeafProc(false) {}
explicit SparcMachineFunctionInfo(MachineFunction &MF)
- : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0) {}
+ : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0),
+ IsLeafProc(false) {}
unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
@@ -42,6 +47,9 @@ namespace llvm {
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
+
+ void setLeafProc(bool rhs) { IsLeafProc = rhs; }
+ bool isLeafProc() const { return IsLeafProc; }
};
}
diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp
index 3af4c614cd44..c98613aa1200 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -13,6 +13,7 @@
#include "SparcRegisterInfo.h"
#include "Sparc.h"
+#include "SparcMachineFunctionInfo.h"
#include "SparcSubtarget.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
@@ -20,6 +21,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Type.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -28,31 +30,59 @@
using namespace llvm;
-SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st,
- const TargetInstrInfo &tii)
- : SparcGenRegisterInfo(SP::I7), Subtarget(st), TII(tii) {
+static cl::opt<bool>
+ReserveAppRegisters("sparc-reserve-app-registers", cl::Hidden, cl::init(false),
+ cl::desc("Reserve application registers (%g2-%g4)"));
+
+SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st)
+ : SparcGenRegisterInfo(SP::I7), Subtarget(st) {
}
const uint16_t* SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
const {
- static const uint16_t CalleeSavedRegs[] = { 0 };
- return CalleeSavedRegs;
+ return CSR_SaveList;
+}
+
+const uint32_t*
+SparcRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ return CSR_RegMask;
+}
+
+const uint32_t*
+SparcRegisterInfo::getRTCallPreservedMask(CallingConv::ID CC) const {
+ return RTCSR_RegMask;
}
BitVector SparcRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
// FIXME: G1 reserved for now for large imm generation by frame code.
Reserved.set(SP::G1);
- Reserved.set(SP::G2);
- Reserved.set(SP::G3);
- Reserved.set(SP::G4);
+
+ // G1-G4 can be used in applications.
+ if (ReserveAppRegisters) {
+ Reserved.set(SP::G2);
+ Reserved.set(SP::G3);
+ Reserved.set(SP::G4);
+ }
+ // G5 is not reserved in 64 bit mode.
+ if (!Subtarget.is64Bit())
+ Reserved.set(SP::G5);
+
Reserved.set(SP::O6);
Reserved.set(SP::I6);
Reserved.set(SP::I7);
Reserved.set(SP::G0);
- Reserved.set(SP::G5);
Reserved.set(SP::G6);
Reserved.set(SP::G7);
+
+ // Unaliased double registers are not available in non-V9 targets.
+ if (!Subtarget.isV9()) {
+ for (unsigned n = 0; n != 16; ++n) {
+ for (MCRegAliasIterator AI(SP::D16 + n, this, true); AI.isValid(); ++AI)
+ Reserved.set(*AI);
+ }
+ }
+
return Reserved;
}
@@ -62,6 +92,62 @@ SparcRegisterInfo::getPointerRegClass(const MachineFunction &MF,
return Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
}
+static void replaceFI(MachineFunction &MF,
+ MachineBasicBlock::iterator II,
+ MachineInstr &MI,
+ DebugLoc dl,
+ unsigned FIOperandNum, int Offset,
+ unsigned FramePtr)
+{
+ // Replace frame index with a frame pointer reference.
+ if (Offset >= -4096 && Offset <= 4095) {
+ // If the offset is small enough to fit in the immediate field, directly
+ // encode it.
+ MI.getOperand(FIOperandNum).ChangeToRegister(FramePtr, false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ return;
+ }
+
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ // FIXME: it would be better to scavenge a register here instead of
+ // reserving G1 all of the time.
+ if (Offset >= 0) {
+ // Emit nonnegaive immediates with sethi + or.
+ // sethi %hi(Offset), %g1
+ // add %g1, %fp, %g1
+ // Insert G1+%lo(offset) into the user.
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HI22(Offset));
+
+
+ // Emit G1 = G1 + I6
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::ADDrr), SP::G1).addReg(SP::G1)
+ .addReg(FramePtr);
+ // Insert: G1+%lo(offset) into the user.
+ MI.getOperand(FIOperandNum).ChangeToRegister(SP::G1, false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(LO10(Offset));
+ return;
+ }
+
+ // Emit Negative numbers with sethi + xor
+ // sethi %hix(Offset), %g1
+ // xor %g1, %lox(offset), %g1
+ // add %g1, %fp, %g1
+ // Insert: G1 + 0 into the user.
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HIX22(Offset));
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::XORri), SP::G1)
+ .addReg(SP::G1).addImm(LOX10(Offset));
+
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::ADDrr), SP::G1).addReg(SP::G1)
+ .addReg(FramePtr);
+ // Insert: G1+%lo(offset) into the user.
+ MI.getOperand(FIOperandNum).ChangeToRegister(SP::G1, false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
+}
+
+
void
SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
@@ -77,35 +163,49 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int64_t Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
MI.getOperand(FIOperandNum + 1).getImm() +
Subtarget.getStackPointerBias();
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+ unsigned FramePtr = SP::I6;
+ if (FuncInfo->isLeafProc()) {
+ // Use %sp and adjust offset if needed.
+ FramePtr = SP::O6;
+ int stackSize = MF.getFrameInfo()->getStackSize();
+ Offset += (stackSize) ? Subtarget.getAdjustedFrameSize(stackSize) : 0 ;
+ }
- // Replace frame index with a frame pointer reference.
- if (Offset >= -4096 && Offset <= 4095) {
- // If the offset is small enough to fit in the immediate field, directly
- // encode it.
- MI.getOperand(FIOperandNum).ChangeToRegister(SP::I6, false);
- MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
- } else {
- // Otherwise, emit a G1 = SETHI %hi(offset). FIXME: it would be better to
- // scavenge a register here instead of reserving G1 all of the time.
- unsigned OffHi = (unsigned)Offset >> 10U;
- BuildMI(*MI.getParent(), II, dl, TII.get(SP::SETHIi), SP::G1).addImm(OffHi);
- // Emit G1 = G1 + I6
- BuildMI(*MI.getParent(), II, dl, TII.get(SP::ADDrr), SP::G1).addReg(SP::G1)
- .addReg(SP::I6);
- // Insert: G1+%lo(offset) into the user.
- MI.getOperand(FIOperandNum).ChangeToRegister(SP::G1, false);
- MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset & ((1 << 10)-1));
+ if (!Subtarget.isV9() || !Subtarget.hasHardQuad()) {
+ if (MI.getOpcode() == SP::STQFri) {
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ unsigned SrcReg = MI.getOperand(2).getReg();
+ unsigned SrcEvenReg = getSubReg(SrcReg, SP::sub_even64);
+ unsigned SrcOddReg = getSubReg(SrcReg, SP::sub_odd64);
+ MachineInstr *StMI =
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::STDFri))
+ .addReg(FramePtr).addImm(0).addReg(SrcEvenReg);
+ replaceFI(MF, II, *StMI, dl, 0, Offset, FramePtr);
+ MI.setDesc(TII.get(SP::STDFri));
+ MI.getOperand(2).setReg(SrcOddReg);
+ Offset += 8;
+ } else if (MI.getOpcode() == SP::LDQFri) {
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned DestEvenReg = getSubReg(DestReg, SP::sub_even64);
+ unsigned DestOddReg = getSubReg(DestReg, SP::sub_odd64);
+ MachineInstr *StMI =
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::LDDFri), DestEvenReg)
+ .addReg(FramePtr).addImm(0);
+ replaceFI(MF, II, *StMI, dl, 1, Offset, FramePtr);
+
+ MI.setDesc(TII.get(SP::LDDFri));
+ MI.getOperand(0).setReg(DestOddReg);
+ Offset += 8;
+ }
}
+
+ replaceFI(MF, II, MI, dl, FIOperandNum, Offset, FramePtr);
+
}
unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return SP::I6;
}
-unsigned SparcRegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
-}
-
-unsigned SparcRegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
-}
diff --git a/lib/Target/Sparc/SparcRegisterInfo.h b/lib/Target/Sparc/SparcRegisterInfo.h
index f91df5398953..00b5a98dd79f 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/lib/Target/Sparc/SparcRegisterInfo.h
@@ -27,12 +27,14 @@ class Type;
struct SparcRegisterInfo : public SparcGenRegisterInfo {
SparcSubtarget &Subtarget;
- const TargetInstrInfo &TII;
- SparcRegisterInfo(SparcSubtarget &st, const TargetInstrInfo &tii);
+ SparcRegisterInfo(SparcSubtarget &st);
/// Code Generation virtual methods...
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const uint32_t* getCallPreservedMask(CallingConv::ID CC) const;
+
+ const uint32_t* getRTCallPreservedMask(CallingConv::ID CC) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -48,10 +50,6 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
-
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
};
} // end namespace llvm
diff --git a/lib/Target/Sparc/SparcRegisterInfo.td b/lib/Target/Sparc/SparcRegisterInfo.td
index 497e7c5d5612..2a575c05a952 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.td
+++ b/lib/Target/Sparc/SparcRegisterInfo.td
@@ -8,11 +8,11 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Declarations that describe the Sparc register file
+// Declarations that describe the Sparc register file
//===----------------------------------------------------------------------===//
-class SparcReg<string n> : Register<n> {
- field bits<5> Num;
+class SparcReg<bits<16> Enc, string n> : Register<n> {
+ let HWEncoding = Enc;
let Namespace = "SP";
}
@@ -21,27 +21,33 @@ class SparcCtrlReg<string n>: Register<n> {
}
let Namespace = "SP" in {
-def sub_even : SubRegIndex;
-def sub_odd : SubRegIndex;
+def sub_even : SubRegIndex<32>;
+def sub_odd : SubRegIndex<32, 32>;
+def sub_even64 : SubRegIndex<64>;
+def sub_odd64 : SubRegIndex<64, 64>;
}
// Registers are identified with 5-bit ID numbers.
// Ri - 32-bit integer registers
-class Ri<bits<5> num, string n> : SparcReg<n> {
- let Num = num;
-}
+class Ri<bits<16> Enc, string n> : SparcReg<Enc, n>;
+
// Rf - 32-bit floating-point registers
-class Rf<bits<5> num, string n> : SparcReg<n> {
- let Num = num;
-}
+class Rf<bits<16> Enc, string n> : SparcReg<Enc, n>;
+
// Rd - Slots in the FP register file for 64-bit floating-point values.
-class Rd<bits<5> num, string n, list<Register> subregs> : SparcReg<n> {
- let Num = num;
+class Rd<bits<16> Enc, string n, list<Register> subregs> : SparcReg<Enc, n> {
let SubRegs = subregs;
let SubRegIndices = [sub_even, sub_odd];
let CoveredBySubRegs = 1;
}
+// Rq - Slots in the FP register file for 128-bit floating-point values.
+class Rq<bits<16> Enc, string n, list<Register> subregs> : SparcReg<Enc, n> {
+ let SubRegs = subregs;
+ let SubRegIndices = [sub_even64, sub_odd64];
+ let CoveredBySubRegs = 1;
+}
+
// Control Registers
def ICC : SparcCtrlReg<"ICC">; // This represents icc and xcc in 64-bit code.
def FCC : SparcCtrlReg<"FCC">;
@@ -52,68 +58,68 @@ def Y : SparcCtrlReg<"Y">;
// Integer registers
def G0 : Ri< 0, "G0">, DwarfRegNum<[0]>;
def G1 : Ri< 1, "G1">, DwarfRegNum<[1]>;
-def G2 : Ri< 2, "G2">, DwarfRegNum<[2]>;
+def G2 : Ri< 2, "G2">, DwarfRegNum<[2]>;
def G3 : Ri< 3, "G3">, DwarfRegNum<[3]>;
def G4 : Ri< 4, "G4">, DwarfRegNum<[4]>;
-def G5 : Ri< 5, "G5">, DwarfRegNum<[5]>;
+def G5 : Ri< 5, "G5">, DwarfRegNum<[5]>;
def G6 : Ri< 6, "G6">, DwarfRegNum<[6]>;
def G7 : Ri< 7, "G7">, DwarfRegNum<[7]>;
def O0 : Ri< 8, "O0">, DwarfRegNum<[8]>;
def O1 : Ri< 9, "O1">, DwarfRegNum<[9]>;
-def O2 : Ri<10, "O2">, DwarfRegNum<[10]>;
+def O2 : Ri<10, "O2">, DwarfRegNum<[10]>;
def O3 : Ri<11, "O3">, DwarfRegNum<[11]>;
def O4 : Ri<12, "O4">, DwarfRegNum<[12]>;
-def O5 : Ri<13, "O5">, DwarfRegNum<[13]>;
+def O5 : Ri<13, "O5">, DwarfRegNum<[13]>;
def O6 : Ri<14, "SP">, DwarfRegNum<[14]>;
def O7 : Ri<15, "O7">, DwarfRegNum<[15]>;
def L0 : Ri<16, "L0">, DwarfRegNum<[16]>;
def L1 : Ri<17, "L1">, DwarfRegNum<[17]>;
-def L2 : Ri<18, "L2">, DwarfRegNum<[18]>;
+def L2 : Ri<18, "L2">, DwarfRegNum<[18]>;
def L3 : Ri<19, "L3">, DwarfRegNum<[19]>;
def L4 : Ri<20, "L4">, DwarfRegNum<[20]>;
-def L5 : Ri<21, "L5">, DwarfRegNum<[21]>;
+def L5 : Ri<21, "L5">, DwarfRegNum<[21]>;
def L6 : Ri<22, "L6">, DwarfRegNum<[22]>;
def L7 : Ri<23, "L7">, DwarfRegNum<[23]>;
def I0 : Ri<24, "I0">, DwarfRegNum<[24]>;
def I1 : Ri<25, "I1">, DwarfRegNum<[25]>;
-def I2 : Ri<26, "I2">, DwarfRegNum<[26]>;
+def I2 : Ri<26, "I2">, DwarfRegNum<[26]>;
def I3 : Ri<27, "I3">, DwarfRegNum<[27]>;
def I4 : Ri<28, "I4">, DwarfRegNum<[28]>;
-def I5 : Ri<29, "I5">, DwarfRegNum<[29]>;
+def I5 : Ri<29, "I5">, DwarfRegNum<[29]>;
def I6 : Ri<30, "FP">, DwarfRegNum<[30]>;
def I7 : Ri<31, "I7">, DwarfRegNum<[31]>;
// Floating-point registers
def F0 : Rf< 0, "F0">, DwarfRegNum<[32]>;
def F1 : Rf< 1, "F1">, DwarfRegNum<[33]>;
-def F2 : Rf< 2, "F2">, DwarfRegNum<[34]>;
+def F2 : Rf< 2, "F2">, DwarfRegNum<[34]>;
def F3 : Rf< 3, "F3">, DwarfRegNum<[35]>;
def F4 : Rf< 4, "F4">, DwarfRegNum<[36]>;
-def F5 : Rf< 5, "F5">, DwarfRegNum<[37]>;
+def F5 : Rf< 5, "F5">, DwarfRegNum<[37]>;
def F6 : Rf< 6, "F6">, DwarfRegNum<[38]>;
def F7 : Rf< 7, "F7">, DwarfRegNum<[39]>;
-def F8 : Rf< 8, "F8">, DwarfRegNum<[40]>;
+def F8 : Rf< 8, "F8">, DwarfRegNum<[40]>;
def F9 : Rf< 9, "F9">, DwarfRegNum<[41]>;
def F10 : Rf<10, "F10">, DwarfRegNum<[42]>;
-def F11 : Rf<11, "F11">, DwarfRegNum<[43]>;
+def F11 : Rf<11, "F11">, DwarfRegNum<[43]>;
def F12 : Rf<12, "F12">, DwarfRegNum<[44]>;
def F13 : Rf<13, "F13">, DwarfRegNum<[45]>;
-def F14 : Rf<14, "F14">, DwarfRegNum<[46]>;
+def F14 : Rf<14, "F14">, DwarfRegNum<[46]>;
def F15 : Rf<15, "F15">, DwarfRegNum<[47]>;
def F16 : Rf<16, "F16">, DwarfRegNum<[48]>;
-def F17 : Rf<17, "F17">, DwarfRegNum<[49]>;
+def F17 : Rf<17, "F17">, DwarfRegNum<[49]>;
def F18 : Rf<18, "F18">, DwarfRegNum<[50]>;
def F19 : Rf<19, "F19">, DwarfRegNum<[51]>;
-def F20 : Rf<20, "F20">, DwarfRegNum<[52]>;
+def F20 : Rf<20, "F20">, DwarfRegNum<[52]>;
def F21 : Rf<21, "F21">, DwarfRegNum<[53]>;
def F22 : Rf<22, "F22">, DwarfRegNum<[54]>;
def F23 : Rf<23, "F23">, DwarfRegNum<[55]>;
def F24 : Rf<24, "F24">, DwarfRegNum<[56]>;
def F25 : Rf<25, "F25">, DwarfRegNum<[57]>;
-def F26 : Rf<26, "F26">, DwarfRegNum<[58]>;
+def F26 : Rf<26, "F26">, DwarfRegNum<[58]>;
def F27 : Rf<27, "F27">, DwarfRegNum<[59]>;
def F28 : Rf<28, "F28">, DwarfRegNum<[60]>;
-def F29 : Rf<29, "F29">, DwarfRegNum<[61]>;
+def F29 : Rf<29, "F29">, DwarfRegNum<[61]>;
def F30 : Rf<30, "F30">, DwarfRegNum<[62]>;
def F31 : Rf<31, "F31">, DwarfRegNum<[63]>;
@@ -135,6 +141,43 @@ def D13 : Rd<26, "F26", [F26, F27]>, DwarfRegNum<[85]>;
def D14 : Rd<28, "F28", [F28, F29]>, DwarfRegNum<[86]>;
def D15 : Rd<30, "F30", [F30, F31]>, DwarfRegNum<[87]>;
+// Unaliased double precision floating point registers.
+// FIXME: Define DwarfRegNum for these registers.
+def D16 : SparcReg< 1, "F32">;
+def D17 : SparcReg< 3, "F34">;
+def D18 : SparcReg< 5, "F36">;
+def D19 : SparcReg< 7, "F38">;
+def D20 : SparcReg< 9, "F40">;
+def D21 : SparcReg<11, "F42">;
+def D22 : SparcReg<13, "F44">;
+def D23 : SparcReg<15, "F46">;
+def D24 : SparcReg<17, "F48">;
+def D25 : SparcReg<19, "F50">;
+def D26 : SparcReg<21, "F52">;
+def D27 : SparcReg<23, "F54">;
+def D28 : SparcReg<25, "F56">;
+def D29 : SparcReg<27, "F58">;
+def D30 : SparcReg<29, "F60">;
+def D31 : SparcReg<31, "F62">;
+
+// Aliases of the F* registers used to hold 128-bit for values (long doubles).
+def Q0 : Rq< 0, "F0", [D0, D1]>;
+def Q1 : Rq< 4, "F4", [D2, D3]>;
+def Q2 : Rq< 8, "F8", [D4, D5]>;
+def Q3 : Rq<12, "F12", [D6, D7]>;
+def Q4 : Rq<16, "F16", [D8, D9]>;
+def Q5 : Rq<20, "F20", [D10, D11]>;
+def Q6 : Rq<24, "F24", [D12, D13]>;
+def Q7 : Rq<28, "F28", [D14, D15]>;
+def Q8 : Rq< 1, "F32", [D16, D17]>;
+def Q9 : Rq< 5, "F36", [D18, D19]>;
+def Q10 : Rq< 9, "F40", [D20, D21]>;
+def Q11 : Rq<13, "F44", [D22, D23]>;
+def Q12 : Rq<17, "F48", [D24, D25]>;
+def Q13 : Rq<21, "F52", [D26, D27]>;
+def Q14 : Rq<25, "F56", [D28, D29]>;
+def Q15 : Rq<29, "F60", [D30, D31]>;
+
// Register classes.
//
// FIXME: the register order should be defined in terms of the preferred
@@ -144,19 +187,10 @@ def D15 : Rd<30, "F30", [F30, F31]>, DwarfRegNum<[87]>;
// register class for that. The i64 type is included here to allow i64 patterns
// using the integer instructions.
def IntRegs : RegisterClass<"SP", [i32, i64], 32,
- (add L0, L1, L2, L3, L4, L5, L6,
- L7, I0, I1, I2, I3, I4, I5,
- O0, O1, O2, O3, O4, O5, O7,
- G1,
- // Non-allocatable regs:
- G2, G3, G4, // FIXME: OK for use only in
- // applications, not libraries.
- O6, // stack ptr
- I6, // frame ptr
- I7, // return address
- G0, // constant zero
- G5, G6, G7 // reserved for kernel
- )>;
+ (add (sequence "I%u", 0, 7),
+ (sequence "G%u", 0, 7),
+ (sequence "L%u", 0, 7),
+ (sequence "O%u", 0, 7))>;
// Register class for 64-bit mode, with a 64-bit spill slot size.
// These are the same as the 32-bit registers, so TableGen will consider this
@@ -167,4 +201,6 @@ def I64Regs : RegisterClass<"SP", [i64], 64, (add IntRegs)>;
// Floating point register classes.
def FPRegs : RegisterClass<"SP", [f32], 32, (sequence "F%u", 0, 31)>;
-def DFPRegs : RegisterClass<"SP", [f64], 64, (sequence "D%u", 0, 15)>;
+def DFPRegs : RegisterClass<"SP", [f64], 64, (sequence "D%u", 0, 31)>;
+
+def QFPRegs : RegisterClass<"SP", [f128], 128, (sequence "Q%u", 0, 15)>;
diff --git a/lib/Target/Sparc/SparcRelocations.h b/lib/Target/Sparc/SparcRelocations.h
new file mode 100644
index 000000000000..388cfe75a5eb
--- /dev/null
+++ b/lib/Target/Sparc/SparcRelocations.h
@@ -0,0 +1,41 @@
+//===-- SparcRelocations.h - Sparc Code Relocations -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Sparc target-specific relocation types
+// (for relocation-model=static).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SPARC_RELOCATIONS_H
+#define SPARC_RELOCATIONS_H
+
+#include "llvm/CodeGen/MachineRelocation.h"
+
+namespace llvm {
+ namespace SP {
+ enum RelocationType {
+ // reloc_sparc_hi - upper 22 bits
+ reloc_sparc_hi = 1,
+
+ // reloc_sparc_lo - lower 10 bits
+ reloc_sparc_lo = 2,
+
+ // reloc_sparc_pc30 - pc rel. 30 bits for call
+ reloc_sparc_pc30 = 3,
+
+ // reloc_sparc_pc22 - pc rel. 22 bits for branch
+ reloc_sparc_pc22 = 4,
+
+ // reloc_sparc_pc22 - pc rel. 19 bits for branch with icc/xcc
+ reloc_sparc_pc19 = 5
+ };
+ }
+}
+
+#endif
diff --git a/lib/Target/Sparc/SparcSubtarget.cpp b/lib/Target/Sparc/SparcSubtarget.cpp
index e5b2aeb1bb85..7d09d0e3f7db 100644
--- a/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/lib/Target/Sparc/SparcSubtarget.cpp
@@ -13,6 +13,7 @@
#include "SparcSubtarget.h"
#include "Sparc.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_SUBTARGETINFO_TARGET_DESC
@@ -29,8 +30,9 @@ SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
IsV9(false),
V8DeprecatedInsts(false),
IsVIS(false),
- Is64Bit(is64Bit) {
-
+ Is64Bit(is64Bit),
+ HasHardQuad(false) {
+
// Determine default and user specified characteristics
std::string CPUName = CPU;
if (CPUName.empty()) {
@@ -44,3 +46,30 @@ SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
// Parse features string.
ParseSubtargetFeatures(CPUName, FS);
}
+
+
+int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
+
+ if (is64Bit()) {
+ // All 64-bit stack frames must be 16-byte aligned, and must reserve space
+ // for spilling the 16 window registers at %sp+BIAS..%sp+BIAS+128.
+ frameSize += 128;
+ // Frames with calls must also reserve space for 6 outgoing arguments
+ // whether they are used or not. LowerCall_64 takes care of that.
+ assert(frameSize % 16 == 0 && "Stack size not 16-byte aligned");
+ } else {
+ // Emit the correct save instruction based on the number of bytes in
+ // the frame. Minimum stack frame size according to V8 ABI is:
+ // 16 words for register window spill
+ // 1 word for address of returned aggregate-value
+ // + 6 words for passing parameters on the stack
+ // ----------
+ // 23 words * 4 bytes per word = 92 bytes
+ frameSize += 92;
+
+ // Round up to next doubleword boundary -- a double-word boundary
+ // is required by the ABI.
+ frameSize = RoundUpToAlignment(frameSize, 8);
+ }
+ return frameSize;
+}
diff --git a/lib/Target/Sparc/SparcSubtarget.h b/lib/Target/Sparc/SparcSubtarget.h
index b94dd110ea9f..0f81cc960f82 100644
--- a/lib/Target/Sparc/SparcSubtarget.h
+++ b/lib/Target/Sparc/SparcSubtarget.h
@@ -29,7 +29,8 @@ class SparcSubtarget : public SparcGenSubtargetInfo {
bool V8DeprecatedInsts;
bool IsVIS;
bool Is64Bit;
-
+ bool HasHardQuad;
+
public:
SparcSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64bit);
@@ -37,11 +38,12 @@ public:
bool isV9() const { return IsV9; }
bool isVIS() const { return IsVIS; }
bool useDeprecatedV8Instructions() const { return V8DeprecatedInsts; }
-
- /// ParseSubtargetFeatures - Parses features string setting specified
+ bool hasHardQuad() const { return HasHardQuad; }
+
+ /// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
+
bool is64Bit() const { return Is64Bit; }
std::string getDataLayout() const {
const char *p;
@@ -58,6 +60,12 @@ public:
int64_t getStackPointerBias() const {
return is64Bit() ? 2047 : 0;
}
+
+ /// Given a actual stack size as determined by FrameInfo, this function
+ /// returns adjusted framesize which includes space for register window
+ /// spills and arguments.
+ int getAdjustedFrameSize(int stackSize) const;
+
};
} // end namespace llvm
diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp
index 60bceb708fbc..0f936747cfed 100644
--- a/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -37,6 +37,7 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
InstrInfo(Subtarget),
TLInfo(*this), TSInfo(*this),
FrameLowering(Subtarget) {
+ initAsmInfo();
}
namespace {
@@ -64,11 +65,17 @@ bool SparcPassConfig::addInstSelector() {
return false;
}
+bool SparcTargetMachine::addCodeEmitter(PassManagerBase &PM,
+ JITCodeEmitter &JCE) {
+ // Machine code emitter pass for Sparc.
+ PM.add(createSparcJITCodeEmitterPass(*this, JCE));
+ return false;
+}
+
/// addPreEmitPass - This pass may be implemented by targets that want to run
/// passes immediately before machine code is emitted. This should return
/// true if -print-machineinstrs should print out the code after the passes.
bool SparcPassConfig::addPreEmitPass(){
- addPass(createSparcFPMoverPass(getSparcTargetMachine()));
addPass(createSparcDelaySlotFillerPass(getSparcTargetMachine()));
return true;
}
diff --git a/lib/Target/Sparc/SparcTargetMachine.h b/lib/Target/Sparc/SparcTargetMachine.h
index 081075de2dc8..8c9bcd36bf33 100644
--- a/lib/Target/Sparc/SparcTargetMachine.h
+++ b/lib/Target/Sparc/SparcTargetMachine.h
@@ -17,6 +17,7 @@
#include "SparcFrameLowering.h"
#include "SparcISelLowering.h"
#include "SparcInstrInfo.h"
+#include "SparcJITInfo.h"
#include "SparcSelectionDAGInfo.h"
#include "SparcSubtarget.h"
#include "llvm/IR/DataLayout.h"
@@ -32,6 +33,7 @@ class SparcTargetMachine : public LLVMTargetMachine {
SparcTargetLowering TLInfo;
SparcSelectionDAGInfo TSInfo;
SparcFrameLowering FrameLowering;
+ SparcJITInfo JITInfo;
public:
SparcTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -52,10 +54,14 @@ public:
virtual const SparcSelectionDAGInfo* getSelectionDAGInfo() const {
return &TSInfo;
}
+ virtual SparcJITInfo *getJITInfo() {
+ return &JITInfo;
+ }
virtual const DataLayout *getDataLayout() const { return &DL; }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE);
};
/// SparcV8TargetMachine - Sparc 32-bit target machine
diff --git a/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp b/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp
index bb714632349a..4eea16370ea1 100644
--- a/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp
+++ b/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp
@@ -15,7 +15,9 @@ using namespace llvm;
Target llvm::TheSparcTarget;
Target llvm::TheSparcV9Target;
-extern "C" void LLVMInitializeSparcTargetInfo() {
- RegisterTarget<Triple::sparc> X(TheSparcTarget, "sparc", "Sparc");
- RegisterTarget<Triple::sparcv9> Y(TheSparcV9Target, "sparcv9", "Sparc V9");
+extern "C" void LLVMInitializeSparcTargetInfo() {
+ RegisterTarget<Triple::sparc, /*HasJIT=*/ true>
+ X(TheSparcTarget, "sparc", "Sparc");
+ RegisterTarget<Triple::sparcv9, /*HasJIT=*/ true>
+ Y(TheSparcV9Target, "sparcv9", "Sparc V9");
}
diff --git a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
index c7725a14593c..763f40c7ff44 100644
--- a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
+++ b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -8,6 +8,8 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/SystemZMCTargetDesc.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
@@ -28,21 +30,29 @@ static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) {
}
namespace {
+enum RegisterKind {
+ GR32Reg,
+ GRH32Reg,
+ GR64Reg,
+ GR128Reg,
+ ADDR32Reg,
+ ADDR64Reg,
+ FP32Reg,
+ FP64Reg,
+ FP128Reg
+};
+
+enum MemoryKind {
+ BDMem,
+ BDXMem,
+ BDLMem
+};
+
class SystemZOperand : public MCParsedAsmOperand {
public:
- enum RegisterKind {
- GR32Reg,
- GR64Reg,
- GR128Reg,
- ADDR32Reg,
- ADDR64Reg,
- FP32Reg,
- FP64Reg,
- FP128Reg
- };
-
private:
enum OperandKind {
+ KindInvalid,
KindToken,
KindReg,
KindAccessReg,
@@ -59,7 +69,15 @@ private:
unsigned Length;
};
- // LLVM register Num, which has kind Kind.
+ // LLVM register Num, which has kind Kind. In some ways it might be
+ // easier for this class to have a register bank (general, floating-point
+ // or access) and a raw register number (0-15). This would postpone the
+ // interpretation of the operand to the add*() methods and avoid the need
+ // for context-dependent parsing. However, we do things the current way
+ // because of the virtual getReg() method, which needs to distinguish
+ // between (say) %r0 used as a single register and %r0 used as a pair.
+ // Context-dependent parsing can also give us slightly better error
+ // messages when invalid pairs like %r1 are used.
struct RegOp {
RegisterKind Kind;
unsigned Num;
@@ -67,12 +85,15 @@ private:
// Base + Disp + Index, where Base and Index are LLVM registers or 0.
// RegKind says what type the registers have (ADDR32Reg or ADDR64Reg).
+ // Length is the operand length for D(L,B)-style operands, otherwise
+ // it is null.
struct MemOp {
unsigned Base : 8;
unsigned Index : 8;
unsigned RegKind : 8;
unsigned Unused : 8;
const MCExpr *Disp;
+ const MCExpr *Length;
};
union {
@@ -99,6 +120,9 @@ private:
public:
// Create particular kinds of operand.
+ static SystemZOperand *createInvalid(SMLoc StartLoc, SMLoc EndLoc) {
+ return new SystemZOperand(KindInvalid, StartLoc, EndLoc);
+ }
static SystemZOperand *createToken(StringRef Str, SMLoc Loc) {
SystemZOperand *Op = new SystemZOperand(KindToken, Loc, Loc);
Op->Token.Data = Str.data();
@@ -126,12 +150,14 @@ public:
}
static SystemZOperand *createMem(RegisterKind RegKind, unsigned Base,
const MCExpr *Disp, unsigned Index,
- SMLoc StartLoc, SMLoc EndLoc) {
+ const MCExpr *Length, SMLoc StartLoc,
+ SMLoc EndLoc) {
SystemZOperand *Op = new SystemZOperand(KindMem, StartLoc, EndLoc);
Op->Mem.RegKind = RegKind;
Op->Mem.Base = Base;
Op->Mem.Index = Index;
Op->Mem.Disp = Disp;
+ Op->Mem.Length = Length;
return Op;
}
@@ -178,16 +204,20 @@ public:
virtual bool isMem() const LLVM_OVERRIDE {
return Kind == KindMem;
}
- bool isMem(RegisterKind RegKind, bool HasIndex) const {
+ bool isMem(RegisterKind RegKind, MemoryKind MemKind) const {
return (Kind == KindMem &&
Mem.RegKind == RegKind &&
- (HasIndex || !Mem.Index));
+ (MemKind == BDXMem || !Mem.Index) &&
+ (MemKind == BDLMem) == (Mem.Length != 0));
+ }
+ bool isMemDisp12(RegisterKind RegKind, MemoryKind MemKind) const {
+ return isMem(RegKind, MemKind) && inRange(Mem.Disp, 0, 0xfff);
}
- bool isMemDisp12(RegisterKind RegKind, bool HasIndex) const {
- return isMem(RegKind, HasIndex) && inRange(Mem.Disp, 0, 0xfff);
+ bool isMemDisp20(RegisterKind RegKind, MemoryKind MemKind) const {
+ return isMem(RegKind, MemKind) && inRange(Mem.Disp, -524288, 524287);
}
- bool isMemDisp20(RegisterKind RegKind, bool HasIndex) const {
- return isMem(RegKind, HasIndex) && inRange(Mem.Disp, -524288, 524287);
+ bool isMemDisp12Len8(RegisterKind RegKind) const {
+ return isMemDisp12(RegKind, BDLMem) && inRange(Mem.Length, 1, 0x100);
}
// Override MCParsedAsmOperand.
@@ -223,9 +253,18 @@ public:
addExpr(Inst, Mem.Disp);
Inst.addOperand(MCOperand::CreateReg(Mem.Index));
}
+ void addBDLAddrOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 3 && "Invalid number of operands");
+ assert(Kind == KindMem && "Invalid operand type");
+ Inst.addOperand(MCOperand::CreateReg(Mem.Base));
+ addExpr(Inst, Mem.Disp);
+ addExpr(Inst, Mem.Length);
+ }
// Used by the TableGen code to check for particular operand types.
bool isGR32() const { return isReg(GR32Reg); }
+ bool isGRH32() const { return isReg(GRH32Reg); }
+ bool isGRX32() const { return false; }
bool isGR64() const { return isReg(GR64Reg); }
bool isGR128() const { return isReg(GR128Reg); }
bool isADDR32() const { return isReg(ADDR32Reg); }
@@ -234,12 +273,13 @@ public:
bool isFP32() const { return isReg(FP32Reg); }
bool isFP64() const { return isReg(FP64Reg); }
bool isFP128() const { return isReg(FP128Reg); }
- bool isBDAddr32Disp12() const { return isMemDisp12(ADDR32Reg, false); }
- bool isBDAddr32Disp20() const { return isMemDisp20(ADDR32Reg, false); }
- bool isBDAddr64Disp12() const { return isMemDisp12(ADDR64Reg, false); }
- bool isBDAddr64Disp20() const { return isMemDisp20(ADDR64Reg, false); }
- bool isBDXAddr64Disp12() const { return isMemDisp12(ADDR64Reg, true); }
- bool isBDXAddr64Disp20() const { return isMemDisp20(ADDR64Reg, true); }
+ bool isBDAddr32Disp12() const { return isMemDisp12(ADDR32Reg, BDMem); }
+ bool isBDAddr32Disp20() const { return isMemDisp20(ADDR32Reg, BDMem); }
+ bool isBDAddr64Disp12() const { return isMemDisp12(ADDR64Reg, BDMem); }
+ bool isBDAddr64Disp20() const { return isMemDisp20(ADDR64Reg, BDMem); }
+ bool isBDXAddr64Disp12() const { return isMemDisp12(ADDR64Reg, BDXMem); }
+ bool isBDXAddr64Disp20() const { return isMemDisp20(ADDR64Reg, BDXMem); }
+ bool isBDLAddr64Disp12Len8() const { return isMemDisp12Len8(ADDR64Reg); }
bool isU4Imm() const { return isImm(0, 15); }
bool isU6Imm() const { return isImm(0, 63); }
bool isU8Imm() const { return isImm(0, 255); }
@@ -250,46 +290,6 @@ public:
bool isS32Imm() const { return isImm(-(1LL << 31), (1LL << 31) - 1); }
};
-// Maps of asm register numbers to LLVM register numbers, with 0 indicating
-// an invalid register. We don't use register class directly because that
-// specifies the allocation order.
-static const unsigned GR32Regs[] = {
- SystemZ::R0W, SystemZ::R1W, SystemZ::R2W, SystemZ::R3W,
- SystemZ::R4W, SystemZ::R5W, SystemZ::R6W, SystemZ::R7W,
- SystemZ::R8W, SystemZ::R9W, SystemZ::R10W, SystemZ::R11W,
- SystemZ::R12W, SystemZ::R13W, SystemZ::R14W, SystemZ::R15W
-};
-static const unsigned GR64Regs[] = {
- SystemZ::R0D, SystemZ::R1D, SystemZ::R2D, SystemZ::R3D,
- SystemZ::R4D, SystemZ::R5D, SystemZ::R6D, SystemZ::R7D,
- SystemZ::R8D, SystemZ::R9D, SystemZ::R10D, SystemZ::R11D,
- SystemZ::R12D, SystemZ::R13D, SystemZ::R14D, SystemZ::R15D
-};
-static const unsigned GR128Regs[] = {
- SystemZ::R0Q, 0, SystemZ::R2Q, 0,
- SystemZ::R4Q, 0, SystemZ::R6Q, 0,
- SystemZ::R8Q, 0, SystemZ::R10Q, 0,
- SystemZ::R12Q, 0, SystemZ::R14Q, 0
-};
-static const unsigned FP32Regs[] = {
- SystemZ::F0S, SystemZ::F1S, SystemZ::F2S, SystemZ::F3S,
- SystemZ::F4S, SystemZ::F5S, SystemZ::F6S, SystemZ::F7S,
- SystemZ::F8S, SystemZ::F9S, SystemZ::F10S, SystemZ::F11S,
- SystemZ::F12S, SystemZ::F13S, SystemZ::F14S, SystemZ::F15S
-};
-static const unsigned FP64Regs[] = {
- SystemZ::F0D, SystemZ::F1D, SystemZ::F2D, SystemZ::F3D,
- SystemZ::F4D, SystemZ::F5D, SystemZ::F6D, SystemZ::F7D,
- SystemZ::F8D, SystemZ::F9D, SystemZ::F10D, SystemZ::F11D,
- SystemZ::F12D, SystemZ::F13D, SystemZ::F14D, SystemZ::F15D
-};
-static const unsigned FP128Regs[] = {
- SystemZ::F0Q, SystemZ::F1Q, 0, 0,
- SystemZ::F4Q, SystemZ::F5Q, 0, 0,
- SystemZ::F8Q, SystemZ::F9Q, 0, 0,
- SystemZ::F12Q, SystemZ::F13Q, 0, 0
-};
-
class SystemZAsmParser : public MCTargetAsmParser {
#define GET_ASSEMBLER_HEADER
#include "SystemZGenAsmMatcher.inc"
@@ -297,35 +297,42 @@ class SystemZAsmParser : public MCTargetAsmParser {
private:
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ enum RegisterGroup {
+ RegGR,
+ RegFP,
+ RegAccess
+ };
struct Register {
- char Prefix;
- unsigned Number;
+ RegisterGroup Group;
+ unsigned Num;
SMLoc StartLoc, EndLoc;
};
bool parseRegister(Register &Reg);
- OperandMatchResultTy
- parseRegister(Register &Reg, char Prefix, const unsigned *Regs,
- bool IsAddress = false);
+ bool parseRegister(Register &Reg, RegisterGroup Group, const unsigned *Regs,
+ bool IsAddress = false);
OperandMatchResultTy
parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- char Prefix, const unsigned *Regs,
- SystemZOperand::RegisterKind Kind,
- bool IsAddress = false);
+ RegisterGroup Group, const unsigned *Regs, RegisterKind Kind);
+
+ bool parseAddress(unsigned &Base, const MCExpr *&Disp,
+ unsigned &Index, const MCExpr *&Length,
+ const unsigned *Regs, RegisterKind RegKind);
OperandMatchResultTy
parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- const unsigned *Regs, SystemZOperand::RegisterKind RegKind,
- bool HasIndex);
+ const unsigned *Regs, RegisterKind RegKind,
+ MemoryKind MemKind);
bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
StringRef Mnemonic);
public:
- SystemZAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
- : MCTargetAsmParser(), STI(sti), Parser(parser) {
+ SystemZAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(sti), Parser(parser) {
MCAsmParserExtension::Initialize(Parser);
// Initialize the set of available features.
@@ -349,25 +356,31 @@ public:
// Used by the TableGen code to parse particular operand types.
OperandMatchResultTy
parseGR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'r', GR32Regs, SystemZOperand::GR32Reg);
+ return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, GR32Reg);
+ }
+ OperandMatchResultTy
+ parseGRH32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegGR, SystemZMC::GRH32Regs, GRH32Reg);
+ }
+ OperandMatchResultTy
+ parseGRX32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ llvm_unreachable("GRX32 should only be used for pseudo instructions");
}
OperandMatchResultTy
parseGR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'r', GR64Regs, SystemZOperand::GR64Reg);
+ return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, GR64Reg);
}
OperandMatchResultTy
parseGR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'r', GR128Regs, SystemZOperand::GR128Reg);
+ return parseRegister(Operands, RegGR, SystemZMC::GR128Regs, GR128Reg);
}
OperandMatchResultTy
parseADDR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'r', GR32Regs, SystemZOperand::ADDR32Reg,
- true);
+ return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, ADDR32Reg);
}
OperandMatchResultTy
parseADDR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'r', GR64Regs, SystemZOperand::ADDR64Reg,
- true);
+ return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, ADDR64Reg);
}
OperandMatchResultTy
parseADDR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -375,30 +388,45 @@ public:
}
OperandMatchResultTy
parseFP32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'f', FP32Regs, SystemZOperand::FP32Reg);
+ return parseRegister(Operands, RegFP, SystemZMC::FP32Regs, FP32Reg);
}
OperandMatchResultTy
parseFP64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'f', FP64Regs, SystemZOperand::FP64Reg);
+ return parseRegister(Operands, RegFP, SystemZMC::FP64Regs, FP64Reg);
}
OperandMatchResultTy
parseFP128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseRegister(Operands, 'f', FP128Regs, SystemZOperand::FP128Reg);
+ return parseRegister(Operands, RegFP, SystemZMC::FP128Regs, FP128Reg);
}
OperandMatchResultTy
parseBDAddr32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseAddress(Operands, GR32Regs, SystemZOperand::ADDR32Reg, false);
+ return parseAddress(Operands, SystemZMC::GR32Regs, ADDR32Reg, BDMem);
}
OperandMatchResultTy
parseBDAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseAddress(Operands, GR64Regs, SystemZOperand::ADDR64Reg, false);
+ return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDMem);
}
OperandMatchResultTy
parseBDXAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return parseAddress(Operands, GR64Regs, SystemZOperand::ADDR64Reg, true);
+ return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDXMem);
+ }
+ OperandMatchResultTy
+ parseBDLAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDLMem);
}
OperandMatchResultTy
parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ OperandMatchResultTy
+ parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ int64_t MinVal, int64_t MaxVal);
+ OperandMatchResultTy
+ parsePCRel16(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parsePCRel(Operands, -(1LL << 16), (1LL << 16) - 1);
+ }
+ OperandMatchResultTy
+ parsePCRel32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1);
+ }
};
}
@@ -417,122 +445,160 @@ bool SystemZAsmParser::parseRegister(Register &Reg) {
// Eat the % prefix.
if (Parser.getTok().isNot(AsmToken::Percent))
- return true;
+ return Error(Parser.getTok().getLoc(), "register expected");
Parser.Lex();
// Expect a register name.
if (Parser.getTok().isNot(AsmToken::Identifier))
- return true;
+ return Error(Reg.StartLoc, "invalid register");
- // Check the prefix.
+ // Check that there's a prefix.
StringRef Name = Parser.getTok().getString();
if (Name.size() < 2)
- return true;
- Reg.Prefix = Name[0];
+ return Error(Reg.StartLoc, "invalid register");
+ char Prefix = Name[0];
// Treat the rest of the register name as a register number.
- if (Name.substr(1).getAsInteger(10, Reg.Number))
- return true;
+ if (Name.substr(1).getAsInteger(10, Reg.Num))
+ return Error(Reg.StartLoc, "invalid register");
+
+ // Look for valid combinations of prefix and number.
+ if (Prefix == 'r' && Reg.Num < 16)
+ Reg.Group = RegGR;
+ else if (Prefix == 'f' && Reg.Num < 16)
+ Reg.Group = RegFP;
+ else if (Prefix == 'a' && Reg.Num < 16)
+ Reg.Group = RegAccess;
+ else
+ return Error(Reg.StartLoc, "invalid register");
Reg.EndLoc = Parser.getTok().getLoc();
Parser.Lex();
return false;
}
-// Parse a register with prefix Prefix and convert it to LLVM numbering.
-// Regs maps asm register numbers to LLVM register numbers, with zero
-// entries indicating an invalid register. IsAddress says whether the
-// register appears in an address context.
-SystemZAsmParser::OperandMatchResultTy
-SystemZAsmParser::parseRegister(Register &Reg, char Prefix,
- const unsigned *Regs, bool IsAddress) {
+// Parse a register of group Group. If Regs is nonnull, use it to map
+// the raw register number to LLVM numbering, with zero entries indicating
+// an invalid register. IsAddress says whether the register appears in an
+// address context.
+bool SystemZAsmParser::parseRegister(Register &Reg, RegisterGroup Group,
+ const unsigned *Regs, bool IsAddress) {
if (parseRegister(Reg))
- return MatchOperand_NoMatch;
- if (Reg.Prefix != Prefix || Reg.Number > 15 || Regs[Reg.Number] == 0) {
- Error(Reg.StartLoc, "invalid register");
- return MatchOperand_ParseFail;
- }
- if (Reg.Number == 0 && IsAddress) {
- Error(Reg.StartLoc, "%r0 used in an address");
- return MatchOperand_ParseFail;
- }
- Reg.Number = Regs[Reg.Number];
- return MatchOperand_Success;
+ return true;
+ if (Reg.Group != Group)
+ return Error(Reg.StartLoc, "invalid operand for instruction");
+ if (Regs && Regs[Reg.Num] == 0)
+ return Error(Reg.StartLoc, "invalid register pair");
+ if (Reg.Num == 0 && IsAddress)
+ return Error(Reg.StartLoc, "%r0 used in an address");
+ if (Regs)
+ Reg.Num = Regs[Reg.Num];
+ return false;
}
-// Parse a register and add it to Operands. Prefix is 'r' for GPRs,
-// 'f' for FPRs, etc. Regs maps asm register numbers to LLVM register numbers,
-// with zero entries indicating an invalid register. Kind is the type of
-// register represented by Regs and IsAddress says whether the register is
-// being parsed in an address context, meaning that %r0 evaluates as 0.
+// Parse a register and add it to Operands. The other arguments are as above.
SystemZAsmParser::OperandMatchResultTy
SystemZAsmParser::parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- char Prefix, const unsigned *Regs,
- SystemZOperand::RegisterKind Kind,
- bool IsAddress) {
+ RegisterGroup Group, const unsigned *Regs,
+ RegisterKind Kind) {
+ if (Parser.getTok().isNot(AsmToken::Percent))
+ return MatchOperand_NoMatch;
+
Register Reg;
- OperandMatchResultTy Result = parseRegister(Reg, Prefix, Regs, IsAddress);
- if (Result == MatchOperand_Success)
- Operands.push_back(SystemZOperand::createReg(Kind, Reg.Number,
- Reg.StartLoc, Reg.EndLoc));
- return Result;
-}
+ bool IsAddress = (Kind == ADDR32Reg || Kind == ADDR64Reg);
+ if (parseRegister(Reg, Group, Regs, IsAddress))
+ return MatchOperand_ParseFail;
-// Parse a memory operand and add it to Operands. Regs maps asm register
-// numbers to LLVM address registers and RegKind says what kind of address
-// register we're using (ADDR32Reg or ADDR64Reg). HasIndex says whether
-// the address allows index registers.
-SystemZAsmParser::OperandMatchResultTy
-SystemZAsmParser::parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- const unsigned *Regs,
- SystemZOperand::RegisterKind RegKind,
- bool HasIndex) {
- SMLoc StartLoc = Parser.getTok().getLoc();
+ Operands.push_back(SystemZOperand::createReg(Kind, Reg.Num,
+ Reg.StartLoc, Reg.EndLoc));
+ return MatchOperand_Success;
+}
+// Parse a memory operand into Base, Disp, Index and Length.
+// Regs maps asm register numbers to LLVM register numbers and RegKind
+// says what kind of address register we're using (ADDR32Reg or ADDR64Reg).
+bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp,
+ unsigned &Index, const MCExpr *&Length,
+ const unsigned *Regs,
+ RegisterKind RegKind) {
// Parse the displacement, which must always be present.
- const MCExpr *Disp;
if (getParser().parseExpression(Disp))
- return MatchOperand_NoMatch;
+ return true;
// Parse the optional base and index.
- unsigned Index = 0;
- unsigned Base = 0;
+ Index = 0;
+ Base = 0;
+ Length = 0;
if (getLexer().is(AsmToken::LParen)) {
Parser.Lex();
- // Parse the first register.
- Register Reg;
- OperandMatchResultTy Result = parseRegister(Reg, 'r', GR64Regs, true);
- if (Result != MatchOperand_Success)
- return Result;
+ if (getLexer().is(AsmToken::Percent)) {
+ // Parse the first register and decide whether it's a base or an index.
+ Register Reg;
+ if (parseRegister(Reg, RegGR, Regs, RegKind))
+ return true;
+ if (getLexer().is(AsmToken::Comma))
+ Index = Reg.Num;
+ else
+ Base = Reg.Num;
+ } else {
+ // Parse the length.
+ if (getParser().parseExpression(Length))
+ return true;
+ }
- // Check whether there's a second register. If so, the one that we
- // just parsed was the index.
+ // Check whether there's a second register. It's the base if so.
if (getLexer().is(AsmToken::Comma)) {
Parser.Lex();
-
- if (!HasIndex) {
- Error(Reg.StartLoc, "invalid use of indexed addressing");
- return MatchOperand_ParseFail;
- }
-
- Index = Reg.Number;
- Result = parseRegister(Reg, 'r', GR64Regs, true);
- if (Result != MatchOperand_Success)
- return Result;
+ Register Reg;
+ if (parseRegister(Reg, RegGR, Regs, RegKind))
+ return true;
+ Base = Reg.Num;
}
- Base = Reg.Number;
// Consume the closing bracket.
if (getLexer().isNot(AsmToken::RParen))
- return MatchOperand_NoMatch;
+ return Error(Parser.getTok().getLoc(), "unexpected token in address");
Parser.Lex();
}
+ return false;
+}
+
+// Parse a memory operand and add it to Operands. The other arguments
+// are as above.
+SystemZAsmParser::OperandMatchResultTy
+SystemZAsmParser::parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ const unsigned *Regs, RegisterKind RegKind,
+ MemoryKind MemKind) {
+ SMLoc StartLoc = Parser.getTok().getLoc();
+ unsigned Base, Index;
+ const MCExpr *Disp;
+ const MCExpr *Length;
+ if (parseAddress(Base, Disp, Index, Length, Regs, RegKind))
+ return MatchOperand_ParseFail;
+
+ if (Index && MemKind != BDXMem)
+ {
+ Error(StartLoc, "invalid use of indexed addressing");
+ return MatchOperand_ParseFail;
+ }
+
+ if (Length && MemKind != BDLMem)
+ {
+ Error(StartLoc, "invalid use of length addressing");
+ return MatchOperand_ParseFail;
+ }
+
+ if (!Length && MemKind == BDLMem)
+ {
+ Error(StartLoc, "missing length in address");
+ return MatchOperand_ParseFail;
+ }
SMLoc EndLoc =
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(SystemZOperand::createMem(RegKind, Base, Disp, Index,
- StartLoc, EndLoc));
+ Length, StartLoc, EndLoc));
return MatchOperand_Success;
}
@@ -544,13 +610,14 @@ bool SystemZAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) {
Register Reg;
if (parseRegister(Reg))
- return Error(Reg.StartLoc, "register expected");
- if (Reg.Prefix == 'r' && Reg.Number < 16)
- RegNo = GR64Regs[Reg.Number];
- else if (Reg.Prefix == 'f' && Reg.Number < 16)
- RegNo = FP64Regs[Reg.Number];
+ return true;
+ if (Reg.Group == RegGR)
+ RegNo = SystemZMC::GR64Regs[Reg.Num];
+ else if (Reg.Group == RegFP)
+ RegNo = SystemZMC::FP64Regs[Reg.Num];
else
- return Error(Reg.StartLoc, "invalid register");
+ // FIXME: Access registers aren't modelled as LLVM registers yet.
+ return Error(Reg.StartLoc, "invalid operand for instruction");
StartLoc = Reg.StartLoc;
EndLoc = Reg.EndLoc;
return false;
@@ -604,15 +671,33 @@ parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
if (ResTy == MatchOperand_ParseFail)
return true;
- // The only other type of operand is an immediate.
- const MCExpr *Expr;
+ // Check for a register. All real register operands should have used
+ // a context-dependent parse routine, which gives the required register
+ // class. The code is here to mop up other cases, like those where
+ // the instruction isn't recognized.
+ if (Parser.getTok().is(AsmToken::Percent)) {
+ Register Reg;
+ if (parseRegister(Reg))
+ return true;
+ Operands.push_back(SystemZOperand::createInvalid(Reg.StartLoc, Reg.EndLoc));
+ return false;
+ }
+
+ // The only other type of operand is an immediate or address. As above,
+ // real address operands should have used a context-dependent parse routine,
+ // so we treat any plain expression as an immediate.
SMLoc StartLoc = Parser.getTok().getLoc();
- if (getParser().parseExpression(Expr))
+ unsigned Base, Index;
+ const MCExpr *Expr, *Length;
+ if (parseAddress(Base, Expr, Index, Length, SystemZMC::GR64Regs, ADDR64Reg))
return true;
SMLoc EndLoc =
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
+ if (Base || Index || Length)
+ Operands.push_back(SystemZOperand::createInvalid(StartLoc, EndLoc));
+ else
+ Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
return false;
}
@@ -671,15 +756,47 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- Register Reg;
- if (parseRegister(Reg))
+ if (Parser.getTok().isNot(AsmToken::Percent))
return MatchOperand_NoMatch;
- if (Reg.Prefix != 'a' || Reg.Number > 15) {
- Error(Reg.StartLoc, "invalid register");
+
+ Register Reg;
+ if (parseRegister(Reg, RegAccess, 0))
return MatchOperand_ParseFail;
+
+ Operands.push_back(SystemZOperand::createAccessReg(Reg.Num,
+ Reg.StartLoc,
+ Reg.EndLoc));
+ return MatchOperand_Success;
+}
+
+SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
+parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ int64_t MinVal, int64_t MaxVal) {
+ MCContext &Ctx = getContext();
+ MCStreamer &Out = getStreamer();
+ const MCExpr *Expr;
+ SMLoc StartLoc = Parser.getTok().getLoc();
+ if (getParser().parseExpression(Expr))
+ return MatchOperand_NoMatch;
+
+ // For consistency with the GNU assembler, treat immediates as offsets
+ // from ".".
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) {
+ int64_t Value = CE->getValue();
+ if ((Value & 1) || Value < MinVal || Value > MaxVal) {
+ Error(StartLoc, "offset out of range");
+ return MatchOperand_ParseFail;
+ }
+ MCSymbol *Sym = Ctx.CreateTempSymbol();
+ Out.EmitLabel(Sym);
+ const MCExpr *Base = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None,
+ Ctx);
+ Expr = Value == 0 ? Base : MCBinaryExpr::CreateAdd(Base, Expr, Ctx);
}
- Operands.push_back(SystemZOperand::createAccessReg(Reg.Number,
- Reg.StartLoc, Reg.EndLoc));
+
+ SMLoc EndLoc =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
return MatchOperand_Success;
}
diff --git a/lib/Target/SystemZ/CMakeLists.txt b/lib/Target/SystemZ/CMakeLists.txt
index 67b17fcc5936..d21c0a8086fb 100644
--- a/lib/Target/SystemZ/CMakeLists.txt
+++ b/lib/Target/SystemZ/CMakeLists.txt
@@ -4,6 +4,7 @@ tablegen(LLVM SystemZGenAsmMatcher.inc -gen-asm-matcher)
tablegen(LLVM SystemZGenAsmWriter.inc -gen-asm-writer)
tablegen(LLVM SystemZGenCallingConv.inc -gen-callingconv)
tablegen(LLVM SystemZGenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM SystemZGenDisassemblerTables.inc -gen-disassembler)
tablegen(LLVM SystemZGenMCCodeEmitter.inc -gen-emitter -mc-emitter)
tablegen(LLVM SystemZGenInstrInfo.inc -gen-instr-info)
tablegen(LLVM SystemZGenRegisterInfo.inc -gen-register-info)
@@ -14,19 +15,25 @@ add_llvm_target(SystemZCodeGen
SystemZAsmPrinter.cpp
SystemZCallingConv.cpp
SystemZConstantPoolValue.cpp
+ SystemZElimCompare.cpp
SystemZFrameLowering.cpp
SystemZISelDAGToDAG.cpp
SystemZISelLowering.cpp
SystemZInstrInfo.cpp
+ SystemZLongBranch.cpp
+ SystemZMachineFunctionInfo.cpp
SystemZMCInstLower.cpp
SystemZRegisterInfo.cpp
+ SystemZSelectionDAGInfo.cpp
+ SystemZShortenInst.cpp
SystemZSubtarget.cpp
SystemZTargetMachine.cpp
)
-add_dependencies(LLVMSystemZCodeGen intrinsics_gen)
+add_dependencies(LLVMSystemZCodeGen SystemZCommonTableGen intrinsics_gen)
add_subdirectory(AsmParser)
+add_subdirectory(Disassembler)
add_subdirectory(InstPrinter)
add_subdirectory(TargetInfo)
add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/SystemZ/Disassembler/CMakeLists.txt b/lib/Target/SystemZ/Disassembler/CMakeLists.txt
new file mode 100644
index 000000000000..5bc1859816fa
--- /dev/null
+++ b/lib/Target/SystemZ/Disassembler/CMakeLists.txt
@@ -0,0 +1,7 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMSystemZDisassembler
+ SystemZDisassembler.cpp
+ )
+
+add_dependencies(LLVMSystemZDisassembler SystemZCommonTableGen)
diff --git a/lib/Target/MBlaze/Disassembler/LLVMBuild.txt b/lib/Target/SystemZ/Disassembler/LLVMBuild.txt
index 28dd9dc98da6..c3081f5447bc 100644
--- a/lib/Target/MBlaze/Disassembler/LLVMBuild.txt
+++ b/lib/Target/SystemZ/Disassembler/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/MBlaze/Disassembler/LLVMBuild.txt -----------*- Conf -*--===;
+;===-- ./lib/Target/SystemZ/Disassembler/LLVMBuild.txt ---------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -17,7 +17,7 @@
[component_0]
type = Library
-name = MBlazeDisassembler
-parent = MBlaze
-required_libraries = MBlazeDesc MBlazeInfo MC Support
-add_to_library_groups = MBlaze
+name = SystemZDisassembler
+parent = SystemZ
+required_libraries = MC Support SystemZDesc SystemZInfo
+add_to_library_groups = SystemZ
diff --git a/lib/Target/MBlaze/MCTargetDesc/Makefile b/lib/Target/SystemZ/Disassembler/Makefile
index 71075ffbf47c..efc4cc8e9cb0 100644
--- a/lib/Target/MBlaze/MCTargetDesc/Makefile
+++ b/lib/Target/SystemZ/Disassembler/Makefile
@@ -1,4 +1,4 @@
-##===- lib/Target/MBlaze/TargetDesc/Makefile ---------------*- Makefile -*-===##
+##===-- lib/Target/SystemZ/Disassembler/Makefile -----------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
@@ -8,9 +8,9 @@
##===----------------------------------------------------------------------===##
LEVEL = ../../../..
-LIBRARYNAME = LLVMMBlazeDesc
+LIBRARYNAME = LLVMSystemZDisassembler
-# Hack: we need to include 'main' target directory to grab private headers
+# Hack: we need to include 'main' x86 target directory to grab private headers
CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common
diff --git a/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp b/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
new file mode 100644
index 000000000000..fc3c38d2f343
--- /dev/null
+++ b/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
@@ -0,0 +1,323 @@
+//===-- SystemZDisassembler.cpp - Disassembler for SystemZ ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZ.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+typedef MCDisassembler::DecodeStatus DecodeStatus;
+
+namespace {
+class SystemZDisassembler : public MCDisassembler {
+public:
+ SystemZDisassembler(const MCSubtargetInfo &STI)
+ : MCDisassembler(STI) {}
+ virtual ~SystemZDisassembler() {}
+
+ // Override MCDisassembler.
+ virtual DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const LLVM_OVERRIDE;
+};
+} // end anonymous namespace
+
+static MCDisassembler *createSystemZDisassembler(const Target &T,
+ const MCSubtargetInfo &STI) {
+ return new SystemZDisassembler(STI);
+}
+
+extern "C" void LLVMInitializeSystemZDisassembler() {
+ // Register the disassembler.
+ TargetRegistry::RegisterMCDisassembler(TheSystemZTarget,
+ createSystemZDisassembler);
+}
+
+static DecodeStatus decodeRegisterClass(MCInst &Inst, uint64_t RegNo,
+ const unsigned *Regs) {
+ assert(RegNo < 16 && "Invalid register");
+ RegNo = Regs[RegNo];
+ if (RegNo == 0)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::CreateReg(RegNo));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeGR32BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR32Regs);
+}
+
+static DecodeStatus DecodeGRH32BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GRH32Regs);
+}
+
+static DecodeStatus DecodeGR64BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus DecodeGR128BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR128Regs);
+}
+
+static DecodeStatus DecodeADDR64BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus DecodeFP32BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::FP32Regs);
+}
+
+static DecodeStatus DecodeFP64BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::FP64Regs);
+}
+
+static DecodeStatus DecodeFP128BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::FP128Regs);
+}
+
+template<unsigned N>
+static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ return MCDisassembler::Success;
+}
+
+template<unsigned N>
+static DecodeStatus decodeSImmOperand(MCInst &Inst, uint64_t Imm) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<N>(Imm)));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeAccessRegOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeUImmOperand<4>(Inst, Imm);
+}
+
+static DecodeStatus decodeU4ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<4>(Inst, Imm);
+}
+
+static DecodeStatus decodeU6ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<6>(Inst, Imm);
+}
+
+static DecodeStatus decodeU8ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<8>(Inst, Imm);
+}
+
+static DecodeStatus decodeU16ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<16>(Inst, Imm);
+}
+
+static DecodeStatus decodeU32ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<32>(Inst, Imm);
+}
+
+static DecodeStatus decodeS8ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeSImmOperand<8>(Inst, Imm);
+}
+
+static DecodeStatus decodeS16ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeSImmOperand<16>(Inst, Imm);
+}
+
+static DecodeStatus decodeS32ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeSImmOperand<32>(Inst, Imm);
+}
+
+template<unsigned N>
+static DecodeStatus decodePCDBLOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address) {
+ assert(isUInt<N>(Imm) && "Invalid PC-relative offset");
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<N>(Imm) * 2 + Address));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodePC16DBLOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodePCDBLOperand<16>(Inst, Imm, Address);
+}
+
+static DecodeStatus decodePC32DBLOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodePCDBLOperand<32>(Inst, Imm, Address);
+}
+
+static DecodeStatus decodeBDAddr12Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Base = Field >> 12;
+ uint64_t Disp = Field & 0xfff;
+ assert(Base < 16 && "Invalid BDAddr12");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(Disp));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDAddr20Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Base = Field >> 20;
+ uint64_t Disp = ((Field << 12) & 0xff000) | ((Field >> 8) & 0xfff);
+ assert(Base < 16 && "Invalid BDAddr20");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<20>(Disp)));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDXAddr12Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Index = Field >> 16;
+ uint64_t Base = (Field >> 12) & 0xf;
+ uint64_t Disp = Field & 0xfff;
+ assert(Index < 16 && "Invalid BDXAddr12");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(Disp));
+ Inst.addOperand(MCOperand::CreateReg(Index == 0 ? 0 : Regs[Index]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDXAddr20Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Index = Field >> 24;
+ uint64_t Base = (Field >> 20) & 0xf;
+ uint64_t Disp = ((Field & 0xfff00) >> 8) | ((Field & 0xff) << 12);
+ assert(Index < 16 && "Invalid BDXAddr20");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<20>(Disp)));
+ Inst.addOperand(MCOperand::CreateReg(Index == 0 ? 0 : Regs[Index]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDLAddr12Len8Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Length = Field >> 16;
+ uint64_t Base = (Field >> 12) & 0xf;
+ uint64_t Disp = Field & 0xfff;
+ assert(Length < 256 && "Invalid BDLAddr12Len8");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(Disp));
+ Inst.addOperand(MCOperand::CreateImm(Length + 1));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDAddr32Disp12Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr12Operand(Inst, Field, SystemZMC::GR32Regs);
+}
+
+static DecodeStatus decodeBDAddr32Disp20Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr20Operand(Inst, Field, SystemZMC::GR32Regs);
+}
+
+static DecodeStatus decodeBDAddr64Disp12Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr12Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDAddr64Disp20Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr20Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDXAddr64Disp12Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDXAddr12Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDXAddr64Disp20Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDXAddr20Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDLAddr64Disp12Len8Operand(MCInst &Inst,
+ uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDLAddr12Len8Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+#include "SystemZGenDisassemblerTables.inc"
+
+DecodeStatus SystemZDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
+ const MemoryObject &Region,
+ uint64_t Address,
+ raw_ostream &os,
+ raw_ostream &cs) const {
+ // Get the first two bytes of the instruction.
+ uint8_t Bytes[6];
+ Size = 0;
+ if (Region.readBytes(Address, 2, Bytes) == -1)
+ return MCDisassembler::Fail;
+
+ // The top 2 bits of the first byte specify the size.
+ const uint8_t *Table;
+ if (Bytes[0] < 0x40) {
+ Size = 2;
+ Table = DecoderTable16;
+ } else if (Bytes[0] < 0xc0) {
+ Size = 4;
+ Table = DecoderTable32;
+ } else {
+ Size = 6;
+ Table = DecoderTable48;
+ }
+
+ // Read any remaining bytes.
+ if (Size > 2 && Region.readBytes(Address + 2, Size - 2, Bytes + 2) == -1)
+ return MCDisassembler::Fail;
+
+ // Construct the instruction.
+ uint64_t Inst = 0;
+ for (uint64_t I = 0; I < Size; ++I)
+ Inst = (Inst << 8) | Bytes[I];
+
+ return decodeInstruction(Table, MI, Inst, Address, this, STI);
+}
diff --git a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
index d73cf4980891..e1e64d3ac177 100644
--- a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
+++ b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
@@ -114,10 +114,14 @@ void SystemZInstPrinter::printAccessRegOperand(const MCInst *MI, int OpNum,
O << "%a" << (unsigned int)Value;
}
-void SystemZInstPrinter::printCallOperand(const MCInst *MI, int OpNum,
- raw_ostream &O) {
- printOperand(MI, OpNum, O);
- O << "@PLT";
+void SystemZInstPrinter::printPCRelOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+ if (MO.isImm()) {
+ O << "0x";
+ O.write_hex(MO.getImm());
+ } else
+ O << *MO.getExpr();
}
void SystemZInstPrinter::printOperand(const MCInst *MI, int OpNum,
@@ -138,6 +142,17 @@ void SystemZInstPrinter::printBDXAddrOperand(const MCInst *MI, int OpNum,
MI->getOperand(OpNum + 2).getReg(), O);
}
+void SystemZInstPrinter::printBDLAddrOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ unsigned Base = MI->getOperand(OpNum).getReg();
+ uint64_t Disp = MI->getOperand(OpNum + 1).getImm();
+ uint64_t Length = MI->getOperand(OpNum + 2).getImm();
+ O << Disp << '(' << Length;
+ if (Base)
+ O << ",%" << getRegisterName(Base);
+ O << ')';
+}
+
void SystemZInstPrinter::printCond4Operand(const MCInst *MI, int OpNum,
raw_ostream &O) {
static const char *const CondNames[] = {
diff --git a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
index b82e79d93cd8..734ecf0ff230 100644
--- a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
+++ b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
@@ -48,6 +48,7 @@ private:
void printOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printBDAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printBDXAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printBDLAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printU4ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printU6ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printS8ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
@@ -56,7 +57,7 @@ private:
void printU16ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printS32ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printU32ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
- void printCallOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printPCRelOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printAccessRegOperand(const MCInst *MI, int OpNum, raw_ostream &O);
// Print the mnemonic for a condition-code mask ("ne", "lh", etc.)
diff --git a/lib/Target/SystemZ/LLVMBuild.txt b/lib/Target/SystemZ/LLVMBuild.txt
index aba0de27ac07..95e657f7bd15 100644
--- a/lib/Target/SystemZ/LLVMBuild.txt
+++ b/lib/Target/SystemZ/LLVMBuild.txt
@@ -16,7 +16,7 @@
;===------------------------------------------------------------------------===;
[common]
-subdirectories = AsmParser InstPrinter MCTargetDesc TargetInfo
+subdirectories = AsmParser Disassembler InstPrinter MCTargetDesc TargetInfo
[component_0]
type = TargetGroup
@@ -24,6 +24,7 @@ name = SystemZ
parent = Target
has_asmparser = 1
has_asmprinter = 1
+has_disassembler = 1
has_jit = 1
[component_1]
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index e901c6c6919c..26a8faeea10b 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -35,16 +35,6 @@ static uint64_t extractBitsForFixup(MCFixupKind Kind, uint64_t Value) {
llvm_unreachable("Unknown fixup kind!");
}
-// If Opcode can be relaxed, return the relaxed form, otherwise return 0.
-static unsigned getRelaxedOpcode(unsigned Opcode) {
- switch (Opcode) {
- case SystemZ::BRC: return SystemZ::BRCL;
- case SystemZ::J: return SystemZ::JG;
- case SystemZ::BRAS: return SystemZ::BRASL;
- }
- return 0;
-}
-
namespace {
class SystemZMCAsmBackend : public MCAsmBackend {
uint8_t OSABI;
@@ -60,14 +50,20 @@ public:
LLVM_OVERRIDE;
virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const LLVM_OVERRIDE;
- virtual bool mayNeedRelaxation(const MCInst &Inst) const LLVM_OVERRIDE;
+ virtual bool mayNeedRelaxation(const MCInst &Inst) const LLVM_OVERRIDE {
+ return false;
+ }
virtual bool fixupNeedsRelaxation(const MCFixup &Fixup,
uint64_t Value,
const MCRelaxableFragment *Fragment,
const MCAsmLayout &Layout) const
- LLVM_OVERRIDE;
+ LLVM_OVERRIDE {
+ return false;
+ }
virtual void relaxInstruction(const MCInst &Inst,
- MCInst &Res) const LLVM_OVERRIDE;
+ MCInst &Res) const LLVM_OVERRIDE {
+ llvm_unreachable("SystemZ does do not have assembler relaxation");
+ }
virtual bool writeNopData(uint64_t Count,
MCObjectWriter *OW) const LLVM_OVERRIDE;
virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const
@@ -115,28 +111,6 @@ void SystemZMCAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
}
}
-bool SystemZMCAsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
- return getRelaxedOpcode(Inst.getOpcode()) != 0;
-}
-
-bool
-SystemZMCAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *Fragment,
- const MCAsmLayout &Layout) const {
- // At the moment we just need to relax 16-bit fields to wider fields.
- Value = extractBitsForFixup(Fixup.getKind(), Value);
- return (int16_t)Value != (int64_t)Value;
-}
-
-void SystemZMCAsmBackend::relaxInstruction(const MCInst &Inst,
- MCInst &Res) const {
- unsigned Opcode = getRelaxedOpcode(Inst.getOpcode());
- assert(Opcode && "Unexpected insn to relax");
- Res = Inst;
- Res.setOpcode(Opcode);
-}
-
bool SystemZMCAsmBackend::writeNopData(uint64_t Count,
MCObjectWriter *OW) const {
for (uint64_t I = 0; I != Count; ++I)
@@ -144,8 +118,9 @@ bool SystemZMCAsmBackend::writeNopData(uint64_t Count,
return true;
}
-MCAsmBackend *llvm::createSystemZMCAsmBackend(const Target &T, StringRef TT,
- StringRef CPU) {
+MCAsmBackend *llvm::createSystemZMCAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
return new SystemZMCAsmBackend(OSABI);
}
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
index c96a0d4c67d5..965c41e2d151 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -13,16 +13,14 @@
using namespace llvm;
-SystemZMCAsmInfo::SystemZMCAsmInfo(const Target &T, StringRef TT) {
+SystemZMCAsmInfo::SystemZMCAsmInfo(StringRef TT) {
PointerSize = 8;
CalleeSaveStackSlotSize = 8;
IsLittleEndian = false;
CommentString = "#";
- PCSymbol = ".";
GlobalPrefix = "";
PrivateGlobalPrefix = ".L";
- WeakRefDirective = "\t.weak\t";
ZeroDirective = "\t.space\t";
Data64bitsDirective = "\t.quad\t";
UsesELFSectionDirectiveForBSS = true;
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
index bac1bca38128..b9ac92a6934f 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
@@ -10,16 +10,15 @@
#ifndef SystemZTARGETASMINFO_H
#define SystemZTARGETASMINFO_H
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
-class Target;
class StringRef;
-class SystemZMCAsmInfo : public MCAsmInfo {
+class SystemZMCAsmInfo : public MCAsmInfoELF {
public:
- explicit SystemZMCAsmInfo(const Target &T, StringRef TT);
+ explicit SystemZMCAsmInfo(StringRef TT);
// Override MCAsmInfo;
virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
index ea2250f5469b..f07ea7b31e6a 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
@@ -45,33 +45,40 @@ private:
// Called by the TableGen code to get the binary encoding of operand
// MO in MI. Fixups is the list of fixups against MI.
- unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const;
+ // Called by the TableGen code to get the binary encoding of an address.
+ // The index or length, if any, is encoded first, followed by the base,
+ // followed by the displacement. In a 20-bit displacement,
+ // the low 12 bits are encoded before the high 8 bits.
+ uint64_t getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
// Operand OpNum of MI needs a PC-relative fixup of kind Kind at
// Offset bytes from the start of MI. Add the fixup to Fixups
// and return the in-place addend, which since we're a RELA target
// is always 0.
- unsigned getPCRelEncoding(const MCInst &MI, unsigned int OpNum,
+ uint64_t getPCRelEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups,
unsigned Kind, int64_t Offset) const;
- unsigned getPC16DBLEncoding(const MCInst &MI, unsigned int OpNum,
+ uint64_t getPC16DBLEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups) const {
return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC16DBL, 2);
}
- unsigned getPC32DBLEncoding(const MCInst &MI, unsigned int OpNum,
+ uint64_t getPC32DBLEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups) const {
return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC32DBL, 2);
}
- unsigned getPLT16DBLEncoding(const MCInst &MI, unsigned int OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PLT16DBL, 2);
- }
- unsigned getPLT32DBLEncoding(const MCInst &MI, unsigned int OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PLT32DBL, 2);
- }
};
}
@@ -95,34 +102,83 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
}
}
-unsigned SystemZMCCodeEmitter::
+uint64_t SystemZMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const {
if (MO.isReg())
- return Ctx.getRegisterInfo().getEncodingValue(MO.getReg());
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
+ return static_cast<uint64_t>(MO.getImm());
llvm_unreachable("Unexpected operand type!");
}
-unsigned
-SystemZMCCodeEmitter::getPCRelEncoding(const MCInst &MI, unsigned int OpNum,
+uint64_t SystemZMCCodeEmitter::
+getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ assert(isUInt<4>(Base) && isUInt<12>(Disp));
+ return (Base << 12) | Disp;
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ assert(isUInt<4>(Base) && isInt<20>(Disp));
+ return (Base << 20) | ((Disp & 0xfff) << 8) | ((Disp & 0xff000) >> 12);
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<4>(Index));
+ return (Index << 16) | (Base << 12) | Disp;
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ assert(isUInt<4>(Base) && isInt<20>(Disp) && isUInt<4>(Index));
+ return (Index << 24) | (Base << 20) | ((Disp & 0xfff) << 8)
+ | ((Disp & 0xff000) >> 12);
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups) - 1;
+ assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<8>(Len));
+ return (Len << 16) | (Base << 12) | Disp;
+}
+
+uint64_t
+SystemZMCCodeEmitter::getPCRelEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups,
unsigned Kind, int64_t Offset) const {
const MCOperand &MO = MI.getOperand(OpNum);
- // For compatibility with the GNU assembler, treat constant operands as
- // unadjusted PC-relative offsets.
+ const MCExpr *Expr;
if (MO.isImm())
- return MO.getImm() / 2;
-
- const MCExpr *Expr = MO.getExpr();
- if (Offset) {
- // The operand value is relative to the start of MI, but the fixup
- // is relative to the operand field itself, which is Offset bytes
- // into MI. Add Offset to the relocation value to cancel out
- // this difference.
- const MCExpr *OffsetExpr = MCConstantExpr::Create(Offset, Ctx);
- Expr = MCBinaryExpr::CreateAdd(Expr, OffsetExpr, Ctx);
+ Expr = MCConstantExpr::Create(MO.getImm() + Offset, Ctx);
+ else {
+ Expr = MO.getExpr();
+ if (Offset) {
+ // The operand value is relative to the start of MI, but the fixup
+ // is relative to the operand field itself, which is Offset bytes
+ // into MI. Add Offset to the relocation value to cancel out
+ // this difference.
+ const MCExpr *OffsetExpr = MCConstantExpr::Create(Offset, Ctx);
+ Expr = MCBinaryExpr::CreateAdd(Expr, OffsetExpr, Ctx);
+ }
}
Fixups.push_back(MCFixup::Create(Offset, Expr, (MCFixupKind)Kind));
return 0;
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
index 49a7f47e7d5f..9e1296b912a0 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
@@ -27,11 +27,80 @@
using namespace llvm;
-static MCAsmInfo *createSystemZMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new SystemZMCAsmInfo(T, TT);
- MachineLocation FPDst(MachineLocation::VirtualFP);
- MachineLocation FPSrc(SystemZ::R15D, -SystemZMC::CFAOffsetFromInitialSP);
- MAI->addInitialFrameState(0, FPDst, FPSrc);
+const unsigned SystemZMC::GR32Regs[16] = {
+ SystemZ::R0L, SystemZ::R1L, SystemZ::R2L, SystemZ::R3L,
+ SystemZ::R4L, SystemZ::R5L, SystemZ::R6L, SystemZ::R7L,
+ SystemZ::R8L, SystemZ::R9L, SystemZ::R10L, SystemZ::R11L,
+ SystemZ::R12L, SystemZ::R13L, SystemZ::R14L, SystemZ::R15L
+};
+
+const unsigned SystemZMC::GRH32Regs[16] = {
+ SystemZ::R0H, SystemZ::R1H, SystemZ::R2H, SystemZ::R3H,
+ SystemZ::R4H, SystemZ::R5H, SystemZ::R6H, SystemZ::R7H,
+ SystemZ::R8H, SystemZ::R9H, SystemZ::R10H, SystemZ::R11H,
+ SystemZ::R12H, SystemZ::R13H, SystemZ::R14H, SystemZ::R15H
+};
+
+const unsigned SystemZMC::GR64Regs[16] = {
+ SystemZ::R0D, SystemZ::R1D, SystemZ::R2D, SystemZ::R3D,
+ SystemZ::R4D, SystemZ::R5D, SystemZ::R6D, SystemZ::R7D,
+ SystemZ::R8D, SystemZ::R9D, SystemZ::R10D, SystemZ::R11D,
+ SystemZ::R12D, SystemZ::R13D, SystemZ::R14D, SystemZ::R15D
+};
+
+const unsigned SystemZMC::GR128Regs[16] = {
+ SystemZ::R0Q, 0, SystemZ::R2Q, 0,
+ SystemZ::R4Q, 0, SystemZ::R6Q, 0,
+ SystemZ::R8Q, 0, SystemZ::R10Q, 0,
+ SystemZ::R12Q, 0, SystemZ::R14Q, 0
+};
+
+const unsigned SystemZMC::FP32Regs[16] = {
+ SystemZ::F0S, SystemZ::F1S, SystemZ::F2S, SystemZ::F3S,
+ SystemZ::F4S, SystemZ::F5S, SystemZ::F6S, SystemZ::F7S,
+ SystemZ::F8S, SystemZ::F9S, SystemZ::F10S, SystemZ::F11S,
+ SystemZ::F12S, SystemZ::F13S, SystemZ::F14S, SystemZ::F15S
+};
+
+const unsigned SystemZMC::FP64Regs[16] = {
+ SystemZ::F0D, SystemZ::F1D, SystemZ::F2D, SystemZ::F3D,
+ SystemZ::F4D, SystemZ::F5D, SystemZ::F6D, SystemZ::F7D,
+ SystemZ::F8D, SystemZ::F9D, SystemZ::F10D, SystemZ::F11D,
+ SystemZ::F12D, SystemZ::F13D, SystemZ::F14D, SystemZ::F15D
+};
+
+const unsigned SystemZMC::FP128Regs[16] = {
+ SystemZ::F0Q, SystemZ::F1Q, 0, 0,
+ SystemZ::F4Q, SystemZ::F5Q, 0, 0,
+ SystemZ::F8Q, SystemZ::F9Q, 0, 0,
+ SystemZ::F12Q, SystemZ::F13Q, 0, 0
+};
+
+unsigned SystemZMC::getFirstReg(unsigned Reg) {
+ static unsigned Map[SystemZ::NUM_TARGET_REGS];
+ static bool Initialized = false;
+ if (!Initialized) {
+ for (unsigned I = 0; I < 16; ++I) {
+ Map[GR32Regs[I]] = I;
+ Map[GRH32Regs[I]] = I;
+ Map[GR64Regs[I]] = I;
+ Map[GR128Regs[I]] = I;
+ Map[FP32Regs[I]] = I;
+ Map[FP64Regs[I]] = I;
+ Map[FP128Regs[I]] = I;
+ }
+ }
+ assert(Reg < SystemZ::NUM_TARGET_REGS);
+ return Map[Reg];
+}
+
+static MCAsmInfo *createSystemZMCAsmInfo(const MCRegisterInfo &MRI,
+ StringRef TT) {
+ MCAsmInfo *MAI = new SystemZMCAsmInfo(TT);
+ MCCFIInstruction Inst =
+ MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(SystemZ::R15D, true),
+ SystemZMC::CFAOffsetFromInitialSP);
+ MAI->addInitialFrameState(Inst);
return MAI;
}
@@ -118,7 +187,7 @@ static MCStreamer *createSystemZMCObjectStreamer(const Target &T, StringRef TT,
MCCodeEmitter *Emitter,
bool RelaxAll,
bool NoExecStack) {
- return createELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ return createELFStreamer(Ctx, 0, MAB, OS, Emitter, RelaxAll, NoExecStack);
}
extern "C" void LLVMInitializeSystemZTargetMC() {
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
index 229912f161c8..97e325b984f6 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
@@ -34,6 +34,39 @@ namespace SystemZMC {
// The offset of the DWARF CFA from the incoming stack pointer.
const int64_t CFAOffsetFromInitialSP = CallFrameSize;
+
+ // Maps of asm register numbers to LLVM register numbers, with 0 indicating
+ // an invalid register. In principle we could use 32-bit and 64-bit register
+ // classes directly, provided that we relegated the GPR allocation order
+ // in SystemZRegisterInfo.td to an AltOrder and left the default order
+ // as %r0-%r15. It seems better to provide the same interface for
+ // all classes though.
+ extern const unsigned GR32Regs[16];
+ extern const unsigned GRH32Regs[16];
+ extern const unsigned GR64Regs[16];
+ extern const unsigned GR128Regs[16];
+ extern const unsigned FP32Regs[16];
+ extern const unsigned FP64Regs[16];
+ extern const unsigned FP128Regs[16];
+
+ // Return the 0-based number of the first architectural register that
+ // contains the given LLVM register. E.g. R1D -> 1.
+ unsigned getFirstReg(unsigned Reg);
+
+ // Return the given register as a GR64.
+ inline unsigned getRegAsGR64(unsigned Reg) {
+ return GR64Regs[getFirstReg(Reg)];
+ }
+
+ // Return the given register as a low GR32.
+ inline unsigned getRegAsGR32(unsigned Reg) {
+ return GR32Regs[getFirstReg(Reg)];
+ }
+
+ // Return the given register as a high GR32.
+ inline unsigned getRegAsGRH32(unsigned Reg) {
+ return GRH32Regs[getFirstReg(Reg)];
+ }
}
MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
@@ -41,8 +74,9 @@ MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createSystemZMCAsmBackend(const Target &T, StringRef TT,
- StringRef CPU);
+MCAsmBackend *createSystemZMCAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
MCObjectWriter *createSystemZObjectWriter(raw_ostream &OS, uint8_t OSABI);
} // end namespace llvm
diff --git a/lib/Target/SystemZ/Makefile b/lib/Target/SystemZ/Makefile
index c992584af916..445725bd1e12 100644
--- a/lib/Target/SystemZ/Makefile
+++ b/lib/Target/SystemZ/Makefile
@@ -16,13 +16,14 @@ BUILT_SOURCES = SystemZGenRegisterInfo.inc \
SystemZGenAsmWriter.inc \
SystemZGenAsmMatcher.inc \
SystemZGenCodeEmitter.inc \
+ SystemZGenDisassemblerTables.inc \
SystemZGenInstrInfo.inc \
SystemZGenDAGISel.inc \
SystemZGenSubtargetInfo.inc \
SystemZGenCallingConv.inc \
SystemZGenMCCodeEmitter.inc
-DIRS = InstPrinter AsmParser TargetInfo MCTargetDesc
+DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc
include $(LEVEL)/Makefile.common
diff --git a/lib/Target/SystemZ/README.txt b/lib/Target/SystemZ/README.txt
index d1f56a491688..afa6cf090d07 100644
--- a/lib/Target/SystemZ/README.txt
+++ b/lib/Target/SystemZ/README.txt
@@ -29,7 +29,7 @@ to load 103. This seems to be a general target-independent problem.
--
-The tuning of the choice between Load Address (LA) and addition in
+The tuning of the choice between LOAD ADDRESS (LA) and addition in
SystemZISelDAGToDAG.cpp is suspect. It should be tweaked based on
performance measurements.
@@ -39,22 +39,35 @@ There is no scheduling support.
--
-We don't use the Branch on Count or Branch on Index families of instruction.
+We don't use the BRANCH ON INDEX instructions.
--
-We don't use the condition code results of anything except comparisons.
+We might want to use BRANCH ON CONDITION for conditional indirect calls
+and conditional returns.
-Implementing this may need something more finely grained than the z_cmp
-and z_ucmp that we have now. It might (or might not) also be useful to
-have a mask of "don't care" values in conditional branches. For example,
-integer comparisons never set CC to 3, so the bottom bit of the CC mask
-isn't particularly relevant. JNLH and JE are equally good for testing
-equality after an integer comparison, etc.
+--
+
+We don't use the TEST DATA CLASS instructions.
+
+--
+
+We could use the generic floating-point forms of LOAD COMPLEMENT,
+LOAD NEGATIVE and LOAD POSITIVE in cases where we don't need the
+condition codes. For example, we could use LCDFR instead of LCDBR.
--
-We don't optimize string and block memory operations.
+We only use MVC, XC and CLC for constant-length block operations.
+We could extend them to variable-length operations too,
+using EXECUTE RELATIVE LONG.
+
+MVCIN, MVCLE and CLCLE may be worthwhile too.
+
+--
+
+We don't use CUSE or the TRANSLATE family of instructions for string
+operations. The TRANSLATE ones are probably more difficult to exploit.
--
@@ -63,9 +76,21 @@ conventions require f128s to be returned by invisible reference.
--
-DAGCombiner can detect integer absolute, but there's not yet an associated
-ISD opcode. We could add one and implement it using Load Positive.
-Negated absolutes could use Load Negative.
+ADD LOGICAL WITH SIGNED IMMEDIATE could be useful when we need to
+produce a carry. SUBTRACT LOGICAL IMMEDIATE could be useful when we
+need to produce a borrow. (Note that there are no memory forms of
+ADD LOGICAL WITH CARRY and SUBTRACT LOGICAL WITH BORROW, so the high
+part of 128-bit memory operations would probably need to be done
+via a register.)
+
+--
+
+We don't use the halfword forms of LOAD REVERSED and STORE REVERSED
+(LRVH and STRVH).
+
+--
+
+We don't use ICM or STCM.
--
@@ -142,5 +167,15 @@ See CodeGen/SystemZ/alloca-01.ll for an example.
--
Atomic loads and stores use the default compare-and-swap based implementation.
-This is probably much too conservative in practice, and the overhead is
-especially bad for 8- and 16-bit accesses.
+This is much too conservative in practice, since the architecture guarantees
+that 1-, 2-, 4- and 8-byte loads and stores to aligned addresses are
+inherently atomic.
+
+--
+
+If needed, we can support 16-byte atomics using LPQ, STPQ and CSDG.
+
+--
+
+We might want to model all access registers and use them to spill
+32-bit values.
diff --git a/lib/Target/SystemZ/SystemZ.h b/lib/Target/SystemZ/SystemZ.h
index b811cbe9f043..dcebbad59071 100644
--- a/lib/Target/SystemZ/SystemZ.h
+++ b/lib/Target/SystemZ/SystemZ.h
@@ -30,16 +30,51 @@ namespace llvm {
const unsigned CCMASK_3 = 1 << 0;
const unsigned CCMASK_ANY = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
- // Condition-code mask assignments for floating-point comparisons.
+ // Condition-code mask assignments for integer and floating-point
+ // comparisons.
const unsigned CCMASK_CMP_EQ = CCMASK_0;
const unsigned CCMASK_CMP_LT = CCMASK_1;
const unsigned CCMASK_CMP_GT = CCMASK_2;
- const unsigned CCMASK_CMP_UO = CCMASK_3;
const unsigned CCMASK_CMP_NE = CCMASK_CMP_LT | CCMASK_CMP_GT;
const unsigned CCMASK_CMP_LE = CCMASK_CMP_EQ | CCMASK_CMP_LT;
const unsigned CCMASK_CMP_GE = CCMASK_CMP_EQ | CCMASK_CMP_GT;
+
+ // Condition-code mask assignments for floating-point comparisons only.
+ const unsigned CCMASK_CMP_UO = CCMASK_3;
const unsigned CCMASK_CMP_O = CCMASK_ANY ^ CCMASK_CMP_UO;
+ // All condition-code values produced by comparisons.
+ const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2;
+ const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
+
+ // Condition-code mask assignments for CS.
+ const unsigned CCMASK_CS_EQ = CCMASK_0;
+ const unsigned CCMASK_CS_NE = CCMASK_1;
+ const unsigned CCMASK_CS = CCMASK_0 | CCMASK_1;
+
+ // Condition-code mask assignments for a completed SRST loop.
+ const unsigned CCMASK_SRST_FOUND = CCMASK_1;
+ const unsigned CCMASK_SRST_NOTFOUND = CCMASK_2;
+ const unsigned CCMASK_SRST = CCMASK_1 | CCMASK_2;
+
+ // Condition-code mask assignments for TEST UNDER MASK.
+ const unsigned CCMASK_TM_ALL_0 = CCMASK_0;
+ const unsigned CCMASK_TM_MIXED_MSB_0 = CCMASK_1;
+ const unsigned CCMASK_TM_MIXED_MSB_1 = CCMASK_2;
+ const unsigned CCMASK_TM_ALL_1 = CCMASK_3;
+ const unsigned CCMASK_TM_SOME_0 = CCMASK_TM_ALL_1 ^ CCMASK_ANY;
+ const unsigned CCMASK_TM_SOME_1 = CCMASK_TM_ALL_0 ^ CCMASK_ANY;
+ const unsigned CCMASK_TM_MSB_0 = CCMASK_0 | CCMASK_1;
+ const unsigned CCMASK_TM_MSB_1 = CCMASK_2 | CCMASK_3;
+ const unsigned CCMASK_TM = CCMASK_ANY;
+
+ // The position of the low CC bit in an IPM result.
+ const unsigned IPM_CC = 28;
+
+ // Mask assignments for PFD.
+ const unsigned PFD_READ = 1;
+ const unsigned PFD_WRITE = 2;
+
// Return true if Val fits an LLILL operand.
static inline bool isImmLL(uint64_t Val) {
return (Val & ~0x000000000000ffffULL) == 0;
@@ -73,5 +108,8 @@ namespace llvm {
FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
CodeGenOpt::Level OptLevel);
+ FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
+ FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
+ FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
} // end namespace llvm;
#endif
diff --git a/lib/Target/SystemZ/SystemZ.td b/lib/Target/SystemZ/SystemZ.td
index e03c32f56d80..abf5c8eb320c 100644
--- a/lib/Target/SystemZ/SystemZ.td
+++ b/lib/Target/SystemZ/SystemZ.td
@@ -14,13 +14,10 @@
include "llvm/Target/Target.td"
//===----------------------------------------------------------------------===//
-// SystemZ supported processors
+// SystemZ supported processors and features
//===----------------------------------------------------------------------===//
-class Proc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, NoItineraries, Features>;
-
-def : Proc<"z10", []>;
+include "SystemZProcessors.td"
//===----------------------------------------------------------------------===//
// Register file description
diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index 1e15ab141321..75cbda4958a2 100644
--- a/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -19,16 +19,142 @@
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/Mangler.h"
using namespace llvm;
+// Return an RI instruction like MI with opcode Opcode, but with the
+// GR64 register operands turned into GR32s.
+static MCInst lowerRILow(const MachineInstr *MI, unsigned Opcode) {
+ if (MI->isCompare())
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(1).getImm());
+ else
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(1).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+}
+
+// Return an RI instruction like MI with opcode Opcode, but with the
+// GR64 register operands turned into GRH32s.
+static MCInst lowerRIHigh(const MachineInstr *MI, unsigned Opcode) {
+ if (MI->isCompare())
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(1).getImm());
+ else
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(0).getReg()))
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(1).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+}
+
+// Return an RI instruction like MI with opcode Opcode, but with the
+// R2 register turned into a GR64.
+static MCInst lowerRIEfLow(const MachineInstr *MI, unsigned Opcode) {
+ return MCInstBuilder(Opcode)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(SystemZMC::getRegAsGR64(MI->getOperand(2).getReg()))
+ .addImm(MI->getOperand(3).getImm())
+ .addImm(MI->getOperand(4).getImm())
+ .addImm(MI->getOperand(5).getImm());
+}
+
void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- SystemZMCInstLower Lower(Mang, MF->getContext(), *this);
+ SystemZMCInstLower Lower(MF->getContext(), *this);
MCInst LoweredMI;
- Lower.lower(MI, LoweredMI);
+ switch (MI->getOpcode()) {
+ case SystemZ::Return:
+ LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R14D);
+ break;
+
+ case SystemZ::CallBRASL:
+ LoweredMI = MCInstBuilder(SystemZ::BRASL)
+ .addReg(SystemZ::R14D)
+ .addExpr(Lower.getExpr(MI->getOperand(0), MCSymbolRefExpr::VK_PLT));
+ break;
+
+ case SystemZ::CallBASR:
+ LoweredMI = MCInstBuilder(SystemZ::BASR)
+ .addReg(SystemZ::R14D)
+ .addReg(MI->getOperand(0).getReg());
+ break;
+
+ case SystemZ::CallJG:
+ LoweredMI = MCInstBuilder(SystemZ::JG)
+ .addExpr(Lower.getExpr(MI->getOperand(0), MCSymbolRefExpr::VK_PLT));
+ break;
+
+ case SystemZ::CallBR:
+ LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R1D);
+ break;
+
+ case SystemZ::IILF64:
+ LoweredMI = MCInstBuilder(SystemZ::IILF)
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+ break;
+
+ case SystemZ::IIHF64:
+ LoweredMI = MCInstBuilder(SystemZ::IIHF)
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+ break;
+
+ case SystemZ::RISBHH:
+ case SystemZ::RISBHL:
+ LoweredMI = lowerRIEfLow(MI, SystemZ::RISBHG);
+ break;
+
+ case SystemZ::RISBLH:
+ case SystemZ::RISBLL:
+ LoweredMI = lowerRIEfLow(MI, SystemZ::RISBLG);
+ break;
+
+#define LOWER_LOW(NAME) \
+ case SystemZ::NAME##64: LoweredMI = lowerRILow(MI, SystemZ::NAME); break
+
+ LOWER_LOW(IILL);
+ LOWER_LOW(IILH);
+ LOWER_LOW(TMLL);
+ LOWER_LOW(TMLH);
+ LOWER_LOW(NILL);
+ LOWER_LOW(NILH);
+ LOWER_LOW(NILF);
+ LOWER_LOW(OILL);
+ LOWER_LOW(OILH);
+ LOWER_LOW(OILF);
+ LOWER_LOW(XILF);
+
+#undef LOWER_LOW
+
+#define LOWER_HIGH(NAME) \
+ case SystemZ::NAME##64: LoweredMI = lowerRIHigh(MI, SystemZ::NAME); break
+
+ LOWER_HIGH(IIHL);
+ LOWER_HIGH(IIHH);
+ LOWER_HIGH(TMHL);
+ LOWER_HIGH(TMHH);
+ LOWER_HIGH(NIHL);
+ LOWER_HIGH(NIHH);
+ LOWER_HIGH(NIHF);
+ LOWER_HIGH(OIHL);
+ LOWER_HIGH(OIHH);
+ LOWER_HIGH(OIHF);
+ LOWER_HIGH(XIHF);
+
+#undef LOWER_HIGH
+
+ default:
+ Lower.lower(MI, LoweredMI);
+ break;
+ }
OutStreamer.EmitInstruction(LoweredMI);
}
@@ -48,7 +174,7 @@ EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
static_cast<SystemZConstantPoolValue*>(MCPV);
const MCExpr *Expr =
- MCSymbolRefExpr::Create(Mang->getSymbol(ZCPV->getGlobalValue()),
+ MCSymbolRefExpr::Create(getSymbol(ZCPV->getGlobalValue()),
getModifierVariantKind(ZCPV->getModifier()),
OutContext);
uint64_t Size = TM.getDataLayout()->getTypeAllocSize(ZCPV->getType());
@@ -66,7 +192,7 @@ bool SystemZAsmPrinter::PrintAsmOperand(const MachineInstr *MI,
return true;
OS << -int64_t(MI->getOperand(OpNo).getImm());
} else {
- SystemZMCInstLower Lower(Mang, MF->getContext(), *this);
+ SystemZMCInstLower Lower(MF->getContext(), *this);
MCOperand MO(Lower.lowerOperand(MI->getOperand(OpNo)));
SystemZInstPrinter::printOperand(MO, OS);
}
@@ -100,7 +226,7 @@ void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) {
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
- TD->getPointerSize(0), 0);
+ TD->getPointerSize(0));
}
Stubs.clear();
}
diff --git a/lib/Target/SystemZ/SystemZCallingConv.td b/lib/Target/SystemZ/SystemZCallingConv.td
index c2d727fe4d04..c4f641e7bdec 100644
--- a/lib/Target/SystemZ/SystemZCallingConv.td
+++ b/lib/Target/SystemZ/SystemZCallingConv.td
@@ -23,7 +23,7 @@ def RetCC_SystemZ : CallingConv<[
// call-clobbered argument registers available for code that doesn't
// care about the ABI. (R6 is an argument register too, but is
// call-saved and therefore not suitable for return values.)
- CCIfType<[i32], CCAssignToReg<[R2W, R3W, R4W, R5W]>>,
+ CCIfType<[i32], CCAssignToReg<[R2L, R3L, R4L, R5L]>>,
CCIfType<[i64], CCAssignToReg<[R2D, R3D, R4D, R5D]>>,
// ABI-complaint code returns float and double in F0. Make the
@@ -53,7 +53,7 @@ def CC_SystemZ : CallingConv<[
// The first 5 integer arguments are passed in R2-R6. Note that R6
// is call-saved.
- CCIfType<[i32], CCAssignToReg<[R2W, R3W, R4W, R5W, R6W]>>,
+ CCIfType<[i32], CCAssignToReg<[R2L, R3L, R4L, R5L, R6L]>>,
CCIfType<[i64], CCAssignToReg<[R2D, R3D, R4D, R5D, R6D]>>,
// The first 4 float and double arguments are passed in even registers F0-F6.
diff --git a/lib/Target/SystemZ/SystemZConstantPoolValue.cpp b/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
index e9c4f6df52c3..6c7081169a96 100644
--- a/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
+++ b/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
@@ -39,7 +39,7 @@ unsigned SystemZConstantPoolValue::getRelocationInfo() const {
int SystemZConstantPoolValue::
getExistingMachineCPValue(MachineConstantPool *CP, unsigned Alignment) {
unsigned AlignMask = Alignment - 1;
- const std::vector<MachineConstantPoolEntry> Constants = CP->getConstants();
+ const std::vector<MachineConstantPoolEntry> &Constants = CP->getConstants();
for (unsigned I = 0, E = Constants.size(); I != E; ++I) {
if (Constants[I].isMachineConstantPoolEntry() &&
(Constants[I].getAlignment() & AlignMask) == 0) {
diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp
new file mode 100644
index 000000000000..b8a77db0f845
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -0,0 +1,471 @@
+//===-- SystemZElimCompare.cpp - Eliminate comparison instructions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass:
+// (1) tries to remove compares if CC already contains the required information
+// (2) fuses compares and branches into COMPARE AND BRANCH instructions
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-elim-compare"
+
+#include "SystemZTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+STATISTIC(BranchOnCounts, "Number of branch-on-count instructions");
+STATISTIC(EliminatedComparisons, "Number of eliminated comparisons");
+STATISTIC(FusedComparisons, "Number of fused compare-and-branch instructions");
+
+namespace {
+ // Represents the references to a particular register in one or more
+ // instructions.
+ struct Reference {
+ Reference()
+ : Def(false), Use(false), IndirectDef(false), IndirectUse(false) {}
+
+ Reference &operator|=(const Reference &Other) {
+ Def |= Other.Def;
+ IndirectDef |= Other.IndirectDef;
+ Use |= Other.Use;
+ IndirectUse |= Other.IndirectUse;
+ return *this;
+ }
+
+ operator bool() const { return Def || Use; }
+
+ // True if the register is defined or used in some form, either directly or
+ // via a sub- or super-register.
+ bool Def;
+ bool Use;
+
+ // True if the register is defined or used indirectly, by a sub- or
+ // super-register.
+ bool IndirectDef;
+ bool IndirectUse;
+ };
+
+ class SystemZElimCompare : public MachineFunctionPass {
+ public:
+ static char ID;
+ SystemZElimCompare(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(0), TRI(0) {}
+
+ virtual const char *getPassName() const {
+ return "SystemZ Comparison Elimination";
+ }
+
+ bool processBlock(MachineBasicBlock *MBB);
+ bool runOnMachineFunction(MachineFunction &F);
+
+ private:
+ Reference getRegReferences(MachineInstr *MI, unsigned Reg);
+ bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool convertToLoadAndTest(MachineInstr *MI);
+ bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool optimizeCompareZero(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool fuseCompareAndBranch(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+
+ const SystemZInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ };
+
+ char SystemZElimCompare::ID = 0;
+} // end of anonymous namespace
+
+FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) {
+ return new SystemZElimCompare(TM);
+}
+
+// Return true if CC is live out of MBB.
+static bool isCCLiveOut(MachineBasicBlock *MBB) {
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI)
+ if ((*SI)->isLiveIn(SystemZ::CC))
+ return true;
+ return false;
+}
+
+// Return true if any CC result of MI would reflect the value of subreg
+// SubReg of Reg.
+static bool resultTests(MachineInstr *MI, unsigned Reg, unsigned SubReg) {
+ if (MI->getNumOperands() > 0 &&
+ MI->getOperand(0).isReg() &&
+ MI->getOperand(0).isDef() &&
+ MI->getOperand(0).getReg() == Reg &&
+ MI->getOperand(0).getSubReg() == SubReg)
+ return true;
+
+ switch (MI->getOpcode()) {
+ case SystemZ::LR:
+ case SystemZ::LGR:
+ case SystemZ::LGFR:
+ case SystemZ::LTR:
+ case SystemZ::LTGR:
+ case SystemZ::LTGFR:
+ case SystemZ::LER:
+ case SystemZ::LDR:
+ case SystemZ::LXR:
+ case SystemZ::LTEBR:
+ case SystemZ::LTDBR:
+ case SystemZ::LTXBR:
+ if (MI->getOperand(1).getReg() == Reg &&
+ MI->getOperand(1).getSubReg() == SubReg)
+ return true;
+ }
+
+ return false;
+}
+
+// Describe the references to Reg in MI, including sub- and super-registers.
+Reference SystemZElimCompare::getRegReferences(MachineInstr *MI, unsigned Reg) {
+ Reference Ref;
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ if (MO.isReg()) {
+ if (unsigned MOReg = MO.getReg()) {
+ if (MOReg == Reg || TRI->regsOverlap(MOReg, Reg)) {
+ if (MO.isUse()) {
+ Ref.Use = true;
+ Ref.IndirectUse |= (MOReg != Reg);
+ }
+ if (MO.isDef()) {
+ Ref.Def = true;
+ Ref.IndirectDef |= (MOReg != Reg);
+ }
+ }
+ }
+ }
+ }
+ return Ref;
+}
+
+// Compare compares the result of MI against zero. If MI is an addition
+// of -1 and if CCUsers is a single branch on nonzero, eliminate the addition
+// and convert the branch to a BRCT(G). Return true on success.
+bool
+SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ // Check whether we have an addition of -1.
+ unsigned Opcode = MI->getOpcode();
+ unsigned BRCT;
+ if (Opcode == SystemZ::AHI)
+ BRCT = SystemZ::BRCT;
+ else if (Opcode == SystemZ::AGHI)
+ BRCT = SystemZ::BRCTG;
+ else
+ return false;
+ if (MI->getOperand(2).getImm() != -1)
+ return false;
+
+ // Check whether we have a single JLH.
+ if (CCUsers.size() != 1)
+ return false;
+ MachineInstr *Branch = CCUsers[0];
+ if (Branch->getOpcode() != SystemZ::BRC ||
+ Branch->getOperand(0).getImm() != SystemZ::CCMASK_ICMP ||
+ Branch->getOperand(1).getImm() != SystemZ::CCMASK_CMP_NE)
+ return false;
+
+ // We already know that there are no references to the register between
+ // MI and Compare. Make sure that there are also no references between
+ // Compare and Branch.
+ unsigned SrcReg = Compare->getOperand(0).getReg();
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
+ for (++MBBI; MBBI != MBBE; ++MBBI)
+ if (getRegReferences(MBBI, SrcReg))
+ return false;
+
+ // The transformation is OK. Rebuild Branch as a BRCT(G).
+ MachineOperand Target(Branch->getOperand(2));
+ Branch->RemoveOperand(2);
+ Branch->RemoveOperand(1);
+ Branch->RemoveOperand(0);
+ Branch->setDesc(TII->get(BRCT));
+ MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
+ .addOperand(MI->getOperand(0))
+ .addOperand(MI->getOperand(1))
+ .addOperand(Target)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine);
+ MI->removeFromParent();
+ return true;
+}
+
+// If MI is a load instruction, try to convert it into a LOAD AND TEST.
+// Return true on success.
+bool SystemZElimCompare::convertToLoadAndTest(MachineInstr *MI) {
+ unsigned Opcode = TII->getLoadAndTest(MI->getOpcode());
+ if (!Opcode)
+ return false;
+
+ MI->setDesc(TII->get(Opcode));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine);
+ return true;
+}
+
+// The CC users in CCUsers are testing the result of a comparison of some
+// value X against zero and we know that any CC value produced by MI
+// would also reflect the value of X. Try to adjust CCUsers so that
+// they test the result of MI directly, returning true on success.
+// Leave everything unchanged on failure.
+bool SystemZElimCompare::
+adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ int Opcode = MI->getOpcode();
+ const MCInstrDesc &Desc = TII->get(Opcode);
+ unsigned MIFlags = Desc.TSFlags;
+
+ // See which compare-style condition codes are available.
+ unsigned ReusableCCMask = SystemZII::getCompareZeroCCMask(MIFlags);
+
+ // For unsigned comparisons with zero, only equality makes sense.
+ unsigned CompareFlags = Compare->getDesc().TSFlags;
+ if (CompareFlags & SystemZII::IsLogical)
+ ReusableCCMask &= SystemZ::CCMASK_CMP_EQ;
+
+ if (ReusableCCMask == 0)
+ return false;
+
+ unsigned CCValues = SystemZII::getCCValues(MIFlags);
+ assert((ReusableCCMask & ~CCValues) == 0 && "Invalid CCValues");
+
+ // Now check whether these flags are enough for all users.
+ SmallVector<MachineOperand *, 4> AlterMasks;
+ for (unsigned int I = 0, E = CCUsers.size(); I != E; ++I) {
+ MachineInstr *MI = CCUsers[I];
+
+ // Fail if this isn't a use of CC that we understand.
+ unsigned Flags = MI->getDesc().TSFlags;
+ unsigned FirstOpNum;
+ if (Flags & SystemZII::CCMaskFirst)
+ FirstOpNum = 0;
+ else if (Flags & SystemZII::CCMaskLast)
+ FirstOpNum = MI->getNumExplicitOperands() - 2;
+ else
+ return false;
+
+ // Check whether the instruction predicate treats all CC values
+ // outside of ReusableCCMask in the same way. In that case it
+ // doesn't matter what those CC values mean.
+ unsigned CCValid = MI->getOperand(FirstOpNum).getImm();
+ unsigned CCMask = MI->getOperand(FirstOpNum + 1).getImm();
+ unsigned OutValid = ~ReusableCCMask & CCValid;
+ unsigned OutMask = ~ReusableCCMask & CCMask;
+ if (OutMask != 0 && OutMask != OutValid)
+ return false;
+
+ AlterMasks.push_back(&MI->getOperand(FirstOpNum));
+ AlterMasks.push_back(&MI->getOperand(FirstOpNum + 1));
+ }
+
+ // All users are OK. Adjust the masks for MI.
+ for (unsigned I = 0, E = AlterMasks.size(); I != E; I += 2) {
+ AlterMasks[I]->setImm(CCValues);
+ unsigned CCMask = AlterMasks[I + 1]->getImm();
+ if (CCMask & ~ReusableCCMask)
+ AlterMasks[I + 1]->setImm((CCMask & ReusableCCMask) |
+ (CCValues & ~ReusableCCMask));
+ }
+
+ // CC is now live after MI.
+ int CCDef = MI->findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI);
+ assert(CCDef >= 0 && "Couldn't find CC set");
+ MI->getOperand(CCDef).setIsDead(false);
+
+ // Clear any intervening kills of CC.
+ MachineBasicBlock::iterator MBBI = MI, MBBE = Compare;
+ for (++MBBI; MBBI != MBBE; ++MBBI)
+ MBBI->clearRegisterKills(SystemZ::CC, TRI);
+
+ return true;
+}
+
+// Return true if Compare is a comparison against zero.
+static bool isCompareZero(MachineInstr *Compare) {
+ switch (Compare->getOpcode()) {
+ case SystemZ::LTEBRCompare:
+ case SystemZ::LTDBRCompare:
+ case SystemZ::LTXBRCompare:
+ return true;
+
+ default:
+ return (Compare->getNumExplicitOperands() == 2 &&
+ Compare->getOperand(1).isImm() &&
+ Compare->getOperand(1).getImm() == 0);
+ }
+}
+
+// Try to optimize cases where comparison instruction Compare is testing
+// a value against zero. Return true on success and if Compare should be
+// deleted as dead. CCUsers is the list of instructions that use the CC
+// value produced by Compare.
+bool SystemZElimCompare::
+optimizeCompareZero(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ if (!isCompareZero(Compare))
+ return false;
+
+ // Search back for CC results that are based on the first operand.
+ unsigned SrcReg = Compare->getOperand(0).getReg();
+ unsigned SrcSubReg = Compare->getOperand(0).getSubReg();
+ MachineBasicBlock *MBB = Compare->getParent();
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB->begin();
+ Reference CCRefs;
+ Reference SrcRefs;
+ while (MBBI != MBBE) {
+ --MBBI;
+ MachineInstr *MI = MBBI;
+ if (resultTests(MI, SrcReg, SrcSubReg)) {
+ // Try to remove both MI and Compare by converting a branch to BRCT(G).
+ // We don't care in this case whether CC is modified between MI and
+ // Compare.
+ if (!CCRefs.Use && !SrcRefs && convertToBRCT(MI, Compare, CCUsers)) {
+ BranchOnCounts += 1;
+ return true;
+ }
+ // Try to eliminate Compare by reusing a CC result from MI.
+ if ((!CCRefs && convertToLoadAndTest(MI)) ||
+ (!CCRefs.Def && adjustCCMasksForInstr(MI, Compare, CCUsers))) {
+ EliminatedComparisons += 1;
+ return true;
+ }
+ }
+ SrcRefs |= getRegReferences(MI, SrcReg);
+ if (SrcRefs.Def)
+ return false;
+ CCRefs |= getRegReferences(MI, SystemZ::CC);
+ if (CCRefs.Use && CCRefs.Def)
+ return false;
+ }
+ return false;
+}
+
+// Try to fuse comparison instruction Compare into a later branch.
+// Return true on success and if Compare is therefore redundant.
+bool SystemZElimCompare::
+fuseCompareAndBranch(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ // See whether we have a comparison that can be fused.
+ unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(),
+ Compare);
+ if (!FusedOpcode)
+ return false;
+
+ // See whether we have a single branch with which to fuse.
+ if (CCUsers.size() != 1)
+ return false;
+ MachineInstr *Branch = CCUsers[0];
+ if (Branch->getOpcode() != SystemZ::BRC)
+ return false;
+
+ // Make sure that the operands are available at the branch.
+ unsigned SrcReg = Compare->getOperand(0).getReg();
+ unsigned SrcReg2 = (Compare->getOperand(1).isReg() ?
+ Compare->getOperand(1).getReg() : 0);
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
+ for (++MBBI; MBBI != MBBE; ++MBBI)
+ if (MBBI->modifiesRegister(SrcReg, TRI) ||
+ (SrcReg2 && MBBI->modifiesRegister(SrcReg2, TRI)))
+ return false;
+
+ // Read the branch mask and target.
+ MachineOperand CCMask(MBBI->getOperand(1));
+ MachineOperand Target(MBBI->getOperand(2));
+ assert((CCMask.getImm() & ~SystemZ::CCMASK_ICMP) == 0 &&
+ "Invalid condition-code mask for integer comparison");
+
+ // Clear out all current operands.
+ int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, false, TRI);
+ assert(CCUse >= 0 && "BRC must use CC");
+ Branch->RemoveOperand(CCUse);
+ Branch->RemoveOperand(2);
+ Branch->RemoveOperand(1);
+ Branch->RemoveOperand(0);
+
+ // Rebuild Branch as a fused compare and branch.
+ Branch->setDesc(TII->get(FusedOpcode));
+ MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
+ .addOperand(Compare->getOperand(0))
+ .addOperand(Compare->getOperand(1))
+ .addOperand(CCMask)
+ .addOperand(Target)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine);
+
+ // Clear any intervening kills of SrcReg and SrcReg2.
+ MBBI = Compare;
+ for (++MBBI; MBBI != MBBE; ++MBBI) {
+ MBBI->clearRegisterKills(SrcReg, TRI);
+ if (SrcReg2)
+ MBBI->clearRegisterKills(SrcReg2, TRI);
+ }
+ FusedComparisons += 1;
+ return true;
+}
+
+// Process all comparison instructions in MBB. Return true if something
+// changed.
+bool SystemZElimCompare::processBlock(MachineBasicBlock *MBB) {
+ bool Changed = false;
+
+ // Walk backwards through the block looking for comparisons, recording
+ // all CC users as we go. The subroutines can delete Compare and
+ // instructions before it.
+ bool CompleteCCUsers = !isCCLiveOut(MBB);
+ SmallVector<MachineInstr *, 4> CCUsers;
+ MachineBasicBlock::iterator MBBI = MBB->end();
+ while (MBBI != MBB->begin()) {
+ MachineInstr *MI = --MBBI;
+ if (CompleteCCUsers &&
+ MI->isCompare() &&
+ (optimizeCompareZero(MI, CCUsers) ||
+ fuseCompareAndBranch(MI, CCUsers))) {
+ ++MBBI;
+ MI->removeFromParent();
+ Changed = true;
+ CCUsers.clear();
+ CompleteCCUsers = true;
+ continue;
+ }
+
+ Reference CCRefs(getRegReferences(MI, SystemZ::CC));
+ if (CCRefs.Def) {
+ CCUsers.clear();
+ CompleteCCUsers = !CCRefs.IndirectDef;
+ }
+ if (CompleteCCUsers && CCRefs.Use)
+ CCUsers.push_back(MI);
+ }
+ return Changed;
+}
+
+bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
+ TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
+ TRI = &TII->getRegisterInfo();
+
+ bool Changed = false;
+ for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
+ MFI != MFE; ++MFI)
+ Changed |= processBlock(MFI);
+
+ return Changed;
+}
diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp
index fda33dee48f5..acfb491b953b 100644
--- a/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -14,19 +14,15 @@
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
using namespace llvm;
-SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm,
- const SystemZSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8,
- -SystemZMC::CallFrameSize),
- TM(tm),
- STI(sti) {
+namespace {
// The ABI-defined register save slots, relative to the incoming stack
// pointer.
- static const unsigned SpillOffsetTable[][2] = {
+ static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {
{ SystemZ::R2D, 0x10 },
{ SystemZ::R3D, 0x18 },
{ SystemZ::R4D, 0x20 },
@@ -46,11 +42,23 @@ SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm,
{ SystemZ::F4D, 0x90 },
{ SystemZ::F6D, 0x98 }
};
+}
+SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm,
+ const SystemZSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8,
+ -SystemZMC::CallFrameSize, 8),
+ TM(tm), STI(sti) {
// Create a mapping from register number to save slot offset.
RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
for (unsigned I = 0, E = array_lengthof(SpillOffsetTable); I != E; ++I)
- RegSpillOffsets[SpillOffsetTable[I][0]] = SpillOffsetTable[I][1];
+ RegSpillOffsets[SpillOffsetTable[I].Reg] = SpillOffsetTable[I].Offset;
+}
+
+const TargetFrameLowering::SpillSlot *
+SystemZFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const {
+ NumEntries = array_lengthof(SpillOffsetTable);
+ return SpillOffsetTable;
}
void SystemZFrameLowering::
@@ -103,7 +111,7 @@ static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB,
const SystemZTargetMachine &TM,
unsigned GPR64, bool IsImplicit) {
const SystemZRegisterInfo *RI = TM.getRegisterInfo();
- unsigned GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_32bit);
+ unsigned GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32);
bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32);
if (!IsLive || !IsImplicit) {
MIB.addReg(GPR64, getImplRegState(IsImplicit) | getKillRegState(!IsLive));
@@ -127,14 +135,12 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
// Scan the call-saved GPRs and find the bounds of the register spill area.
- unsigned SavedGPRFrameSize = 0;
unsigned LowGPR = 0;
unsigned HighGPR = SystemZ::R15D;
unsigned StartOffset = -1U;
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
unsigned Reg = CSI[I].getReg();
if (SystemZ::GR64BitRegClass.contains(Reg)) {
- SavedGPRFrameSize += 8;
unsigned Offset = RegSpillOffsets[Reg];
assert(Offset && "Unexpected GPR save");
if (StartOffset > Offset) {
@@ -144,9 +150,7 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
}
}
- // Save information about the range and location of the call-saved
- // registers, for use by the epilogue inserter.
- ZFI->setSavedGPRFrameSize(SavedGPRFrameSize);
+ // Save the range of call-saved registers, for use by the epilogue inserter.
ZFI->setLowSavedGPR(LowGPR);
ZFI->setHighSavedGPR(HighGPR);
@@ -260,6 +264,22 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
return true;
}
+void SystemZFrameLowering::
+processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFFrame = MF.getFrameInfo();
+ uint64_t MaxReach = (MFFrame->estimateStackSize(MF) +
+ SystemZMC::CallFrameSize * 2);
+ if (!isUInt<12>(MaxReach)) {
+ // We may need register scavenging slots if some parts of the frame
+ // are outside the reach of an unsigned 12-bit displacement.
+ // Create 2 for the case where both addresses in an MVC are
+ // out of range.
+ RS->addScavengingFrameIndex(MFFrame->CreateStackObject(8, 8, false));
+ RS->addScavengingFrameIndex(MFFrame->CreateStackObject(8, 8, false));
+ }
+}
+
// Emit instructions before MBBI (in MBB) to add NumBytes to Reg.
static void emitIncrement(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
@@ -283,7 +303,7 @@ static void emitIncrement(MachineBasicBlock &MBB,
}
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII->get(Opcode), Reg)
.addReg(Reg).addImm(ThisVal);
- // The PSW implicit def is dead.
+ // The CC implicit def is dead.
MI->getOperand(3).setIsDead();
NumBytes -= ThisVal;
}
@@ -297,7 +317,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
const std::vector<CalleeSavedInfo> &CSI = MFFrame->getCalleeSavedInfo();
bool HasFP = hasFP(MF);
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
@@ -321,9 +341,8 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned Reg = I->getReg();
if (SystemZ::GR64BitRegClass.contains(Reg)) {
int64_t Offset = SPOffsetFromCFA + RegSpillOffsets[Reg];
- MachineLocation StackSlot(MachineLocation::VirtualFP, Offset);
- MachineLocation RegValue(Reg);
- Moves.push_back(MachineMove(GPRSaveLabel, StackSlot, RegValue));
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ GPRSaveLabel, MRI->getDwarfRegNum(Reg, true), Offset));
}
}
}
@@ -338,9 +357,8 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
.addSym(AdjustSPLabel);
- MachineLocation FPDest(MachineLocation::VirtualFP);
- MachineLocation FPSrc(MachineLocation::VirtualFP, SPOffsetFromCFA + Delta);
- Moves.push_back(MachineMove(AdjustSPLabel, FPDest, FPSrc));
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(
+ AdjustSPLabel, SPOffsetFromCFA + Delta));
SPOffsetFromCFA += Delta;
}
@@ -353,9 +371,9 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
.addSym(SetFPLabel);
- MachineLocation HardFP(SystemZ::R11D);
- MachineLocation VirtualFP(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(SetFPLabel, HardFP, VirtualFP));
+ unsigned HardFP = MRI->getDwarfRegNum(SystemZ::R11D, true);
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(SetFPLabel, HardFP));
// Mark the FramePtr as live at the beginning of every block except
// the entry block. (We'll have marked R11 as live on entry when
@@ -381,12 +399,10 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
// Add CFI for the this save.
if (!FPRSaveLabel)
FPRSaveLabel = MMI.getContext().CreateTempSymbol();
- unsigned Reg = I->getReg();
+ unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true);
int64_t Offset = getFrameIndexOffset(MF, I->getFrameIdx());
- MachineLocation Slot(MachineLocation::VirtualFP,
- SPOffsetFromCFA + Offset);
- MachineLocation RegValue(Reg);
- Moves.push_back(MachineMove(FPRSaveLabel, Slot, RegValue));
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ FPRSaveLabel, Reg, SPOffsetFromCFA + Offset));
}
}
// Complete the CFI for the FPR saves, modelling them as taking effect
@@ -404,8 +420,7 @@ void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
// Skip the return instruction.
- assert(MBBI->getOpcode() == SystemZ::RET &&
- "Can only insert epilogue into returning blocks");
+ assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks");
uint64_t StackSize = getAllocatedStackSize(MF);
if (ZFI->getLowSavedGPR()) {
@@ -453,11 +468,6 @@ int SystemZFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
// offset is therefore negative.
int64_t Offset = (MFFrame->getObjectOffset(FI) +
MFFrame->getOffsetAdjustment());
- if (FI >= 0)
- // Non-fixed objects are allocated below the incoming stack pointer.
- // Account for the space at the top of the frame that we choose not
- // to allocate.
- Offset += getUnallocatedTopBytes(MF);
// Make the offset relative to the incoming stack pointer.
Offset -= getOffsetOfLocalArea();
@@ -469,23 +479,12 @@ int SystemZFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
}
uint64_t SystemZFrameLowering::
-getUnallocatedTopBytes(const MachineFunction &MF) const {
- return MF.getInfo<SystemZMachineFunctionInfo>()->getSavedGPRFrameSize();
-}
-
-uint64_t SystemZFrameLowering::
getAllocatedStackSize(const MachineFunction &MF) const {
const MachineFrameInfo *MFFrame = MF.getFrameInfo();
// Start with the size of the local variables and spill slots.
uint64_t StackSize = MFFrame->getStackSize();
- // Remove any bytes that we choose not to allocate.
- StackSize -= getUnallocatedTopBytes(MF);
-
- // Include space for an emergency spill slot, if one might be needed.
- StackSize += getEmergencySpillSlotSize(MF);
-
// We need to allocate the ABI-defined 160-byte base area whenever
// we allocate stack space for our own use and whenever we call another
// function.
@@ -495,19 +494,6 @@ getAllocatedStackSize(const MachineFunction &MF) const {
return StackSize;
}
-unsigned SystemZFrameLowering::
-getEmergencySpillSlotSize(const MachineFunction &MF) const {
- const MachineFrameInfo *MFFrame = MF.getFrameInfo();
- uint64_t MaxReach = MFFrame->getStackSize() + SystemZMC::CallFrameSize * 2;
- return isUInt<12>(MaxReach) ? 0 : 8;
-}
-
-unsigned SystemZFrameLowering::
-getEmergencySpillSlotOffset(const MachineFunction &MF) const {
- assert(getEmergencySpillSlotSize(MF) && "No emergency spill slot");
- return SystemZMC::CallFrameSize;
-}
-
bool
SystemZFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
// The ABI requires us to allocate 160 bytes of stack space for the callee,
diff --git a/lib/Target/SystemZ/SystemZFrameLowering.h b/lib/Target/SystemZ/SystemZFrameLowering.h
index 5ca049c486ce..9b0a1d5f224c 100644
--- a/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -29,7 +29,10 @@ public:
SystemZFrameLowering(const SystemZTargetMachine &tm,
const SystemZSubtarget &sti);
- // Override FrameLowering.
+ // Override TargetFrameLowering.
+ virtual bool isFPCloseToIncomingSP() const LLVM_OVERRIDE { return false; }
+ virtual const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const
+ LLVM_OVERRIDE;
virtual void
processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const LLVM_OVERRIDE;
@@ -45,6 +48,8 @@ public:
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const
LLVM_OVERRIDE;
+ virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const;
virtual void emitPrologue(MachineFunction &MF) const LLVM_OVERRIDE;
virtual void emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const LLVM_OVERRIDE;
@@ -59,29 +64,9 @@ public:
MachineBasicBlock::iterator MI) const
LLVM_OVERRIDE;
- // The target-independent code automatically allocates save slots for
- // call-saved GPRs. However, we don't need those slots for SystemZ,
- // because the ABI sets aside GPR save slots in the caller-allocated part
- // of the frame. Since the target-independent code puts this unneeded
- // area at the top of the callee-allocated part of frame, we choose not
- // to allocate it and adjust the offsets accordingly. Return the
- // size of this unallocated area.
- // FIXME: seems a bit hackish.
- uint64_t getUnallocatedTopBytes(const MachineFunction &MF) const;
-
// Return the number of bytes in the callee-allocated part of the frame.
uint64_t getAllocatedStackSize(const MachineFunction &MF) const;
- // Return the number of frame bytes that should be reserved for
- // an emergency spill slot, for use by the register scaveneger.
- // Return 0 if register scaveging won't be needed.
- unsigned getEmergencySpillSlotSize(const MachineFunction &MF) const;
-
- // Return the offset from the frame pointer of the emergency spill slot,
- // which always fits within a 12-bit unsigned displacement field.
- // Only valid if getEmergencySpillSlotSize(MF) returns nonzero.
- unsigned getEmergencySpillSlotOffset(const MachineFunction &MF) const;
-
// Return the byte offset from the incoming stack pointer of Reg's
// ABI-defined save slot. Return 0 if no slot is defined for Reg.
unsigned getRegSpillOffset(unsigned Reg) const {
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index d436ba9dd078..f4a27733ce0e 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "SystemZTargetMachine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -91,44 +92,90 @@ struct SystemZAddressingMode {
}
};
+// Return a mask with Count low bits set.
+static uint64_t allOnes(unsigned int Count) {
+ return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
+}
+
+// Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation
+// given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and
+// Rotate (I5). The combined operand value is effectively:
+//
+// (or (rotl Input, Rotate), ~Mask)
+//
+// for RNSBG and:
+//
+// (and (rotl Input, Rotate), Mask)
+//
+// otherwise. The output value has BitSize bits, although Input may be
+// narrower (in which case the upper bits are don't care).
+struct RxSBGOperands {
+ RxSBGOperands(unsigned Op, SDValue N)
+ : Opcode(Op), BitSize(N.getValueType().getSizeInBits()),
+ Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),
+ Rotate(0) {}
+
+ unsigned Opcode;
+ unsigned BitSize;
+ uint64_t Mask;
+ SDValue Input;
+ unsigned Start;
+ unsigned End;
+ unsigned Rotate;
+};
+
class SystemZDAGToDAGISel : public SelectionDAGISel {
const SystemZTargetLowering &Lowering;
const SystemZSubtarget &Subtarget;
// Used by SystemZOperands.td to create integer constants.
- inline SDValue getImm(const SDNode *Node, uint64_t Imm) {
+ inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
return CurDAG->getTargetConstant(Imm, Node->getValueType(0));
}
+ const SystemZTargetMachine &getTargetMachine() const {
+ return static_cast<const SystemZTargetMachine &>(TM);
+ }
+
+ const SystemZInstrInfo *getInstrInfo() const {
+ return getTargetMachine().getInstrInfo();
+ }
+
// Try to fold more of the base or index of AM into AM, where IsBase
// selects between the base and index.
- bool expandAddress(SystemZAddressingMode &AM, bool IsBase);
+ bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const;
// Try to describe N in AM, returning true on success.
- bool selectAddress(SDValue N, SystemZAddressingMode &AM);
+ bool selectAddress(SDValue N, SystemZAddressingMode &AM) const;
// Extract individual target operands from matched address AM.
void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
- SDValue &Base, SDValue &Disp);
+ SDValue &Base, SDValue &Disp) const;
void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
- SDValue &Base, SDValue &Disp, SDValue &Index);
+ SDValue &Base, SDValue &Disp, SDValue &Index) const;
// Try to match Addr as a FormBD address with displacement type DR.
// Return true on success, storing the base and displacement in
// Base and Disp respectively.
bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
- SDValue &Base, SDValue &Disp);
+ SDValue &Base, SDValue &Disp) const;
+
+ // Try to match Addr as a FormBDX address with displacement type DR.
+ // Return true on success and if the result had no index. Store the
+ // base and displacement in Base and Disp respectively.
+ bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
+ SDValue &Base, SDValue &Disp) const;
// Try to match Addr as a FormBDX* address of form Form with
// displacement type DR. Return true on success, storing the base,
// displacement and index in Base, Disp and Index respectively.
bool selectBDXAddr(SystemZAddressingMode::AddrForm Form,
SystemZAddressingMode::DispRange DR, SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index);
+ SDValue &Base, SDValue &Disp, SDValue &Index) const;
// PC-relative address matching routines used by SystemZOperands.td.
- bool selectPCRelAddress(SDValue Addr, SDValue &Target) {
- if (Addr.getOpcode() == SystemZISD::PCREL_WRAPPER) {
+ bool selectPCRelAddress(SDValue Addr, SDValue &Target) const {
+ if (SystemZISD::isPCREL(Addr.getOpcode())) {
Target = Addr.getOperand(0);
return true;
}
@@ -136,69 +183,104 @@ class SystemZDAGToDAGISel : public SelectionDAGISel {
}
// BD matching routines used by SystemZOperands.td.
- bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) {
+ bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp);
}
- bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) {
+ bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
}
- bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) {
+ bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp);
}
- bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) {
+ bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
}
+ // MVI matching routines used by SystemZOperands.td.
+ bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
+ }
+ bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
+ }
+
// BDX matching routines used by SystemZOperands.td.
bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
SystemZAddressingMode::Disp12Only,
Addr, Base, Disp, Index);
}
bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
SystemZAddressingMode::Disp12Pair,
Addr, Base, Disp, Index);
}
bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc,
SystemZAddressingMode::Disp12Only,
Addr, Base, Disp, Index);
}
bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
SystemZAddressingMode::Disp20Only,
Addr, Base, Disp, Index);
}
bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
SystemZAddressingMode::Disp20Only128,
Addr, Base, Disp, Index);
}
bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
SystemZAddressingMode::Disp20Pair,
Addr, Base, Disp, Index);
}
bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
SystemZAddressingMode::Disp12Pair,
Addr, Base, Disp, Index);
}
bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
- SDValue &Index) {
+ SDValue &Index) const {
return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
SystemZAddressingMode::Disp20Pair,
Addr, Base, Disp, Index);
}
+ // Check whether (or Op (and X InsertMask)) is effectively an insertion
+ // of X into bits InsertMask of some Y != Op. Return true if so and
+ // set Op to that Y.
+ bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const;
+
+ // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used.
+ // Return true on success.
+ bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const;
+
+ // Try to fold some of RxSBG.Input into other fields of RxSBG.
+ // Return true on success.
+ bool expandRxSBG(RxSBGOperands &RxSBG) const;
+
+ // Return an undefined value of type VT.
+ SDValue getUNDEF(SDLoc DL, EVT VT) const;
+
+ // Convert N to VT, if it isn't already.
+ SDValue convertTo(SDLoc DL, EVT VT, SDValue N) const;
+
+ // Try to implement AND or shift node N using RISBG with the zero flag set.
+ // Return the selected node on success, otherwise return null.
+ SDNode *tryRISBGZero(SDNode *N);
+
+ // Try to use RISBG or Opcode to implement OR or XOR node N.
+ // Return the selected node on success, otherwise return null.
+ SDNode *tryRxSBG(SDNode *N, unsigned Opcode);
+
// If Op0 is null, then Node is a constant that can be loaded using:
//
// (Opcode UpperVal LowerVal)
@@ -209,6 +291,26 @@ class SystemZDAGToDAGISel : public SelectionDAGISel {
SDNode *splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
uint64_t UpperVal, uint64_t LowerVal);
+ // Return true if Load and Store are loads and stores of the same size
+ // and are guaranteed not to overlap. Such operations can be implemented
+ // using block (SS-format) instructions.
+ //
+ // Partial overlap would lead to incorrect code, since the block operations
+ // are logically bytewise, even though they have a fast path for the
+ // non-overlapping case. We also need to avoid full overlap (i.e. two
+ // addresses that might be equal at run time) because although that case
+ // would be handled correctly, it might be implemented by millicode.
+ bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const;
+
+ // N is a (store (load Y), X) pattern. Return true if it can use an MVC
+ // from Y to X.
+ bool storeLoadCanUseMVC(SDNode *N) const;
+
+ // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true
+ // if A[1 - I] == X and if N can use a block operation like NC from A[I]
+ // to X.
+ bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const;
+
public:
SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
: SelectionDAGISel(TM, OptLevel),
@@ -294,9 +396,9 @@ static bool expandIndex(SystemZAddressingMode &AM, SDValue Base,
// The base or index of AM is equivalent to Op0 + Op1, where IsBase selects
// between the base and index. Try to fold Op1 into AM's displacement.
static bool expandDisp(SystemZAddressingMode &AM, bool IsBase,
- SDValue Op0, ConstantSDNode *Op1) {
+ SDValue Op0, uint64_t Op1) {
// First try adjusting the displacement.
- int64_t TestDisp = AM.Disp + Op1->getSExtValue();
+ int64_t TestDisp = AM.Disp + Op1;
if (selectDisp(AM.DR, TestDisp)) {
changeComponent(AM, IsBase, Op0);
AM.Disp = TestDisp;
@@ -309,7 +411,7 @@ static bool expandDisp(SystemZAddressingMode &AM, bool IsBase,
}
bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM,
- bool IsBase) {
+ bool IsBase) const {
SDValue N = IsBase ? AM.Base : AM.Index;
unsigned Opcode = N.getOpcode();
if (Opcode == ISD::TRUNCATE) {
@@ -329,13 +431,23 @@ bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM,
return expandAdjDynAlloc(AM, IsBase, Op0);
if (Op0Code == ISD::Constant)
- return expandDisp(AM, IsBase, Op1, cast<ConstantSDNode>(Op0));
+ return expandDisp(AM, IsBase, Op1,
+ cast<ConstantSDNode>(Op0)->getSExtValue());
if (Op1Code == ISD::Constant)
- return expandDisp(AM, IsBase, Op0, cast<ConstantSDNode>(Op1));
+ return expandDisp(AM, IsBase, Op0,
+ cast<ConstantSDNode>(Op1)->getSExtValue());
if (IsBase && expandIndex(AM, Op0, Op1))
return true;
}
+ if (Opcode == SystemZISD::PCREL_OFFSET) {
+ SDValue Full = N.getOperand(0);
+ SDValue Base = N.getOperand(1);
+ SDValue Anchor = Base.getOperand(0);
+ uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() -
+ cast<GlobalAddressSDNode>(Anchor)->getOffset());
+ return expandDisp(AM, IsBase, Base, Offset);
+ }
return false;
}
@@ -414,14 +526,15 @@ static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) {
// Return true if Addr is suitable for AM, updating AM if so.
bool SystemZDAGToDAGISel::selectAddress(SDValue Addr,
- SystemZAddressingMode &AM) {
+ SystemZAddressingMode &AM) const {
// Start out assuming that the address will need to be loaded separately,
// then try to extend it as much as we can.
AM.Base = Addr;
// First try treating the address as a constant.
if (Addr.getOpcode() == ISD::Constant &&
- expandDisp(AM, true, SDValue(), cast<ConstantSDNode>(Addr)))
+ expandDisp(AM, true, SDValue(),
+ cast<ConstantSDNode>(Addr)->getSExtValue()))
;
else
// Otherwise try expanding each component.
@@ -461,7 +574,7 @@ static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) {
void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
EVT VT, SDValue &Base,
- SDValue &Disp) {
+ SDValue &Disp) const {
Base = AM.Base;
if (!Base.getNode())
// Register 0 means "no base". This is mostly useful for shifts.
@@ -474,7 +587,7 @@ void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
// Truncate values from i64 to i32, for shifts.
assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 &&
"Unexpected truncation");
- DebugLoc DL = Base.getDebugLoc();
+ SDLoc DL(Base);
SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base);
insertDAGNode(CurDAG, Base.getNode(), Trunc);
Base = Trunc;
@@ -486,7 +599,8 @@ void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
EVT VT, SDValue &Base,
- SDValue &Disp, SDValue &Index) {
+ SDValue &Disp,
+ SDValue &Index) const {
getAddressOperands(AM, VT, Base, Disp);
Index = AM.Index;
@@ -497,7 +611,7 @@ void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR,
SDValue Addr, SDValue &Base,
- SDValue &Disp) {
+ SDValue &Disp) const {
SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR);
if (!selectAddress(Addr, AM))
return false;
@@ -506,10 +620,21 @@ bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR,
return true;
}
+bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR,
+ SDValue Addr, SDValue &Base,
+ SDValue &Disp) const {
+ SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR);
+ if (!selectAddress(Addr, AM) || AM.Index.getNode())
+ return false;
+
+ getAddressOperands(AM, Addr.getValueType(), Base, Disp);
+ return true;
+}
+
bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form,
SystemZAddressingMode::DispRange DR,
SDValue Addr, SDValue &Base,
- SDValue &Disp, SDValue &Index) {
+ SDValue &Disp, SDValue &Index) const {
SystemZAddressingMode AM(Form, DR);
if (!selectAddress(Addr, AM))
return false;
@@ -518,11 +643,317 @@ bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form,
return true;
}
+bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
+ uint64_t InsertMask) const {
+ // We're only interested in cases where the insertion is into some operand
+ // of Op, rather than into Op itself. The only useful case is an AND.
+ if (Op.getOpcode() != ISD::AND)
+ return false;
+
+ // We need a constant mask.
+ ConstantSDNode *MaskNode =
+ dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
+ if (!MaskNode)
+ return false;
+
+ // It's not an insertion of Op.getOperand(0) if the two masks overlap.
+ uint64_t AndMask = MaskNode->getZExtValue();
+ if (InsertMask & AndMask)
+ return false;
+
+ // It's only an insertion if all bits are covered or are known to be zero.
+ // The inner check covers all cases but is more expensive.
+ uint64_t Used = allOnes(Op.getValueType().getSizeInBits());
+ if (Used != (AndMask | InsertMask)) {
+ APInt KnownZero, KnownOne;
+ CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne);
+ if (Used != (AndMask | InsertMask | KnownZero.getZExtValue()))
+ return false;
+ }
+
+ Op = Op.getOperand(0);
+ return true;
+}
+
+bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG,
+ uint64_t Mask) const {
+ const SystemZInstrInfo *TII = getInstrInfo();
+ if (RxSBG.Rotate != 0)
+ Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate));
+ Mask &= RxSBG.Mask;
+ if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) {
+ RxSBG.Mask = Mask;
+ return true;
+ }
+ return false;
+}
+
+// Return true if any bits of (RxSBG.Input & Mask) are significant.
+static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) {
+ // Rotate the mask in the same way as RxSBG.Input is rotated.
+ if (RxSBG.Rotate != 0)
+ Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)));
+ return (Mask & RxSBG.Mask) != 0;
+}
+
+bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
+ SDValue N = RxSBG.Input;
+ unsigned Opcode = N.getOpcode();
+ switch (Opcode) {
+ case ISD::AND: {
+ if (RxSBG.Opcode == SystemZ::RNSBG)
+ return false;
+
+ ConstantSDNode *MaskNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!MaskNode)
+ return false;
+
+ SDValue Input = N.getOperand(0);
+ uint64_t Mask = MaskNode->getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask)) {
+ // If some bits of Input are already known zeros, those bits will have
+ // been removed from the mask. See if adding them back in makes the
+ // mask suitable.
+ APInt KnownZero, KnownOne;
+ CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ Mask |= KnownZero.getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask))
+ return false;
+ }
+ RxSBG.Input = Input;
+ return true;
+ }
+
+ case ISD::OR: {
+ if (RxSBG.Opcode != SystemZ::RNSBG)
+ return false;
+
+ ConstantSDNode *MaskNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!MaskNode)
+ return false;
+
+ SDValue Input = N.getOperand(0);
+ uint64_t Mask = ~MaskNode->getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask)) {
+ // If some bits of Input are already known ones, those bits will have
+ // been removed from the mask. See if adding them back in makes the
+ // mask suitable.
+ APInt KnownZero, KnownOne;
+ CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ Mask &= ~KnownOne.getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask))
+ return false;
+ }
+ RxSBG.Input = Input;
+ return true;
+ }
+
+ case ISD::ROTL: {
+ // Any 64-bit rotate left can be merged into the RxSBG.
+ if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
+ return false;
+ ConstantSDNode *CountNode
+ = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!CountNode)
+ return false;
+
+ RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND: {
+ // Check that the extension bits are don't-care (i.e. are masked out
+ // by the final mask).
+ unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
+ if (maskMatters(RxSBG, allOnes(RxSBG.BitSize) - allOnes(InnerBitSize)))
+ return false;
+
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+
+ case ISD::SHL: {
+ ConstantSDNode *CountNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!CountNode)
+ return false;
+
+ uint64_t Count = CountNode->getZExtValue();
+ unsigned BitSize = N.getValueType().getSizeInBits();
+ if (Count < 1 || Count >= BitSize)
+ return false;
+
+ if (RxSBG.Opcode == SystemZ::RNSBG) {
+ // Treat (shl X, count) as (rotl X, size-count) as long as the bottom
+ // count bits from RxSBG.Input are ignored.
+ if (maskMatters(RxSBG, allOnes(Count)))
+ return false;
+ } else {
+ // Treat (shl X, count) as (and (rotl X, count), ~0<<count).
+ if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count))
+ return false;
+ }
+
+ RxSBG.Rotate = (RxSBG.Rotate + Count) & 63;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+
+ case ISD::SRL:
+ case ISD::SRA: {
+ ConstantSDNode *CountNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!CountNode)
+ return false;
+
+ uint64_t Count = CountNode->getZExtValue();
+ unsigned BitSize = N.getValueType().getSizeInBits();
+ if (Count < 1 || Count >= BitSize)
+ return false;
+
+ if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) {
+ // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top
+ // count bits from RxSBG.Input are ignored.
+ if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count)))
+ return false;
+ } else {
+ // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count),
+ // which is similar to SLL above.
+ if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count)))
+ return false;
+ }
+
+ RxSBG.Rotate = (RxSBG.Rotate - Count) & 63;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+SDValue SystemZDAGToDAGISel::getUNDEF(SDLoc DL, EVT VT) const {
+ SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
+ return SDValue(N, 0);
+}
+
+SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) const {
+ if (N.getValueType() == MVT::i32 && VT == MVT::i64)
+ return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32,
+ DL, VT, getUNDEF(DL, MVT::i64), N);
+ if (N.getValueType() == MVT::i64 && VT == MVT::i32)
+ return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N);
+ assert(N.getValueType() == VT && "Unexpected value types");
+ return N;
+}
+
+SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
+ EVT VT = N->getValueType(0);
+ RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
+ unsigned Count = 0;
+ while (expandRxSBG(RISBG))
+ if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND)
+ Count += 1;
+ if (Count == 0)
+ return 0;
+ if (Count == 1) {
+ // Prefer to use normal shift instructions over RISBG, since they can handle
+ // all cases and are sometimes shorter.
+ if (N->getOpcode() != ISD::AND)
+ return 0;
+
+ // Prefer register extensions like LLC over RISBG. Also prefer to start
+ // out with normal ANDs if one instruction would be enough. We can convert
+ // these ANDs into an RISBG later if a three-address instruction is useful.
+ if (VT == MVT::i32 ||
+ RISBG.Mask == 0xff ||
+ RISBG.Mask == 0xffff ||
+ SystemZ::isImmLF(~RISBG.Mask) ||
+ SystemZ::isImmHF(~RISBG.Mask)) {
+ // Force the new mask into the DAG, since it may include known-one bits.
+ ConstantSDNode *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
+ if (MaskN->getZExtValue() != RISBG.Mask) {
+ SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT);
+ N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask);
+ return SelectCode(N);
+ }
+ return 0;
+ }
+ }
+
+ unsigned Opcode = SystemZ::RISBG;
+ EVT OpcodeVT = MVT::i64;
+ if (VT == MVT::i32 && Subtarget.hasHighWord()) {
+ Opcode = SystemZ::RISBMux;
+ OpcodeVT = MVT::i32;
+ RISBG.Start &= 31;
+ RISBG.End &= 31;
+ }
+ SDValue Ops[5] = {
+ getUNDEF(SDLoc(N), OpcodeVT),
+ convertTo(SDLoc(N), OpcodeVT, RISBG.Input),
+ CurDAG->getTargetConstant(RISBG.Start, MVT::i32),
+ CurDAG->getTargetConstant(RISBG.End | 128, MVT::i32),
+ CurDAG->getTargetConstant(RISBG.Rotate, MVT::i32)
+ };
+ N = CurDAG->getMachineNode(Opcode, SDLoc(N), OpcodeVT, Ops);
+ return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode();
+}
+
+SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
+ // Try treating each operand of N as the second operand of the RxSBG
+ // and see which goes deepest.
+ RxSBGOperands RxSBG[] = {
+ RxSBGOperands(Opcode, N->getOperand(0)),
+ RxSBGOperands(Opcode, N->getOperand(1))
+ };
+ unsigned Count[] = { 0, 0 };
+ for (unsigned I = 0; I < 2; ++I)
+ while (expandRxSBG(RxSBG[I]))
+ if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND)
+ Count[I] += 1;
+
+ // Do nothing if neither operand is suitable.
+ if (Count[0] == 0 && Count[1] == 0)
+ return 0;
+
+ // Pick the deepest second operand.
+ unsigned I = Count[0] > Count[1] ? 0 : 1;
+ SDValue Op0 = N->getOperand(I ^ 1);
+
+ // Prefer IC for character insertions from memory.
+ if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
+ if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
+ if (Load->getMemoryVT() == MVT::i8)
+ return 0;
+
+ // See whether we can avoid an AND in the first operand by converting
+ // ROSBG to RISBG.
+ if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask))
+ Opcode = SystemZ::RISBG;
+
+ EVT VT = N->getValueType(0);
+ SDValue Ops[5] = {
+ convertTo(SDLoc(N), MVT::i64, Op0),
+ convertTo(SDLoc(N), MVT::i64, RxSBG[I].Input),
+ CurDAG->getTargetConstant(RxSBG[I].Start, MVT::i32),
+ CurDAG->getTargetConstant(RxSBG[I].End, MVT::i32),
+ CurDAG->getTargetConstant(RxSBG[I].Rotate, MVT::i32)
+ };
+ N = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, Ops);
+ return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode();
+}
+
SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
SDValue Op0, uint64_t UpperVal,
uint64_t LowerVal) {
EVT VT = Node->getValueType(0);
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
SDValue Upper = CurDAG->getConstant(UpperVal, VT);
if (Op0.getNode())
Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper);
@@ -533,6 +964,64 @@ SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
return Or.getNode();
}
+bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
+ LoadSDNode *Load) const {
+ // Check that the two memory operands have the same size.
+ if (Load->getMemoryVT() != Store->getMemoryVT())
+ return false;
+
+ // Volatility stops an access from being decomposed.
+ if (Load->isVolatile() || Store->isVolatile())
+ return false;
+
+ // There's no chance of overlap if the load is invariant.
+ if (Load->isInvariant())
+ return true;
+
+ // Otherwise we need to check whether there's an alias.
+ const Value *V1 = Load->getSrcValue();
+ const Value *V2 = Store->getSrcValue();
+ if (!V1 || !V2)
+ return false;
+
+ // Reject equality.
+ uint64_t Size = Load->getMemoryVT().getStoreSize();
+ int64_t End1 = Load->getSrcValueOffset() + Size;
+ int64_t End2 = Store->getSrcValueOffset() + Size;
+ if (V1 == V2 && End1 == End2)
+ return false;
+
+ return !AA->alias(AliasAnalysis::Location(V1, End1, Load->getTBAAInfo()),
+ AliasAnalysis::Location(V2, End2, Store->getTBAAInfo()));
+}
+
+bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
+ StoreSDNode *Store = cast<StoreSDNode>(N);
+ LoadSDNode *Load = cast<LoadSDNode>(Store->getValue());
+
+ // Prefer not to use MVC if either address can use ... RELATIVE LONG
+ // instructions.
+ uint64_t Size = Load->getMemoryVT().getStoreSize();
+ if (Size > 1 && Size <= 8) {
+ // Prefer LHRL, LRL and LGRL.
+ if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode()))
+ return false;
+ // Prefer STHRL, STRL and STGRL.
+ if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode()))
+ return false;
+ }
+
+ return canUseBlockOperation(Store, Load);
+}
+
+bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
+ unsigned I) const {
+ StoreSDNode *StoreA = cast<StoreSDNode>(N);
+ LoadSDNode *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
+ LoadSDNode *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
+ return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB);
+}
+
SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected
DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n");
@@ -540,16 +1029,26 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ Node->setNodeId(-1);
return 0;
}
unsigned Opcode = Node->getOpcode();
+ SDNode *ResNode = 0;
switch (Opcode) {
case ISD::OR:
+ if (Node->getOperand(1).getOpcode() != ISD::Constant)
+ ResNode = tryRxSBG(Node, SystemZ::ROSBG);
+ goto or_xor;
+
case ISD::XOR:
+ if (Node->getOperand(1).getOpcode() != ISD::Constant)
+ ResNode = tryRxSBG(Node, SystemZ::RXSBG);
+ // Fall through.
+ or_xor:
// If this is a 64-bit operation in which both 32-bit halves are nonzero,
// split the operation into two.
- if (Node->getValueType(0) == MVT::i64)
+ if (!ResNode && Node->getValueType(0) == MVT::i64)
if (ConstantSDNode *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
uint64_t Val = Op1->getZExtValue();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val))
@@ -558,6 +1057,17 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
}
break;
+ case ISD::AND:
+ if (Node->getOperand(1).getOpcode() != ISD::Constant)
+ ResNode = tryRxSBG(Node, SystemZ::RNSBG);
+ // Fall through.
+ case ISD::ROTL:
+ case ISD::SHL:
+ case ISD::SRL:
+ if (!ResNode)
+ ResNode = tryRISBGZero(Node);
+ break;
+
case ISD::Constant:
// If this is a 64-bit constant that is out of the range of LLILF,
// LLIHF and LGFI, split it into two 32-bit pieces.
@@ -582,10 +1092,32 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
}
}
break;
+
+ case SystemZISD::SELECT_CCMASK: {
+ SDValue Op0 = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ // Prefer to put any load first, so that it can be matched as a
+ // conditional load.
+ if (Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) {
+ SDValue CCValid = Node->getOperand(2);
+ SDValue CCMask = Node->getOperand(3);
+ uint64_t ConstCCValid =
+ cast<ConstantSDNode>(CCValid.getNode())->getZExtValue();
+ uint64_t ConstCCMask =
+ cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
+ // Invert the condition.
+ CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask,
+ CCMask.getValueType());
+ SDValue Op4 = Node->getOperand(4);
+ Node = CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4);
+ }
+ break;
+ }
}
// Select the default instruction
- SDNode *ResNode = SelectCode(Node);
+ if (!ResNode)
+ ResNode = SelectCode(Node);
DEBUG(errs() << "=> ";
if (ResNode == NULL || ResNode == Node)
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index eb21b31da827..f6e18530f4a5 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -23,8 +23,23 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include <cctype>
+
using namespace llvm;
+namespace {
+// Represents a sequence for extracting a 0/1 value from an IPM result:
+// (((X ^ XORValue) + AddValue) >> Bit)
+struct IPMConversion {
+ IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
+ : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
+
+ int64_t XORValue;
+ int64_t AddValue;
+ unsigned Bit;
+};
+}
+
// Classify VT as either 32 or 64 bit.
static bool is32Bit(EVT VT) {
switch (VT.getSimpleVT().SimpleTy) {
@@ -51,7 +66,10 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
MVT PtrVT = getPointerTy();
// Set up the register classes.
- addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
+ if (Subtarget.hasHighWord())
+ addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
+ else
+ addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
@@ -67,7 +85,7 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
// TODO: It may be better to default to latency-oriented scheduling, however
// LLVM's current latency-oriented scheduler can't handle physreg definitions
- // such as SystemZ has with PSW, so set this to the register-pressure
+ // such as SystemZ has with CC, so set this to the register-pressure
// scheduler, because it can.
setSchedulingPreference(Sched::RegPressure);
@@ -83,8 +101,8 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
++I) {
MVT VT = MVT::SimpleValueType(I);
if (isTypeLegal(VT)) {
- // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND).
- setOperationAction(ISD::SETCC, VT, Expand);
+ // Lower SET_CC into an IPM-based sequence.
+ setOperationAction(ISD::SETCC, VT, Custom);
// Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
setOperationAction(ISD::SELECT, VT, Expand);
@@ -128,9 +146,11 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
- // Use *MUL_LOHI where possible and a wider multiplication otherwise.
+ // Use *MUL_LOHI where possible instead of MULH*.
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, VT, Custom);
+ setOperationAction(ISD::UMUL_LOHI, VT, Custom);
// We have instructions for signed but not unsigned FP conversion.
setOperationAction(ISD::FP_TO_UINT, VT, Expand);
@@ -165,14 +185,6 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
// Give LowerOperation the chance to replace 64-bit ORs with subregs.
setOperationAction(ISD::OR, MVT::i64, Custom);
- // The architecture has 32-bit SMUL_LOHI and UMUL_LOHI (MR and MLR),
- // but they aren't really worth using. There is no 64-bit SMUL_LOHI,
- // but there is a 64-bit UMUL_LOHI: MLGR.
- setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom);
-
// FIXME: Can we support these natively?
setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
@@ -200,10 +212,8 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
- // Expand these using getExceptionSelectorRegister() and
- // getExceptionPointerRegister().
- setOperationAction(ISD::EXCEPTIONADDR, PtrVT, Expand);
- setOperationAction(ISD::EHSELECTION, PtrVT, Expand);
+ // Handle prefetches with PFD or PFDRL.
+ setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
// Handle floating-point types.
for (unsigned I = MVT::FIRST_FP_VALUETYPE;
@@ -214,6 +224,15 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
// We can use FI for FRINT.
setOperationAction(ISD::FRINT, VT, Legal);
+ // We can use the extended form of FI for other rounding operations.
+ if (Subtarget.hasFPExtension()) {
+ setOperationAction(ISD::FNEARBYINT, VT, Legal);
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FROUND, VT, Legal);
+ }
+
// No special instructions for these.
setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FCOS, VT, Expand);
@@ -246,6 +265,43 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::VACOPY, MVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
+
+ // We want to use MVC in preference to even a single load/store pair.
+ MaxStoresPerMemcpy = 0;
+ MaxStoresPerMemcpyOptSize = 0;
+
+ // The main memset sequence is a byte store followed by an MVC.
+ // Two STC or MV..I stores win over that, but the kind of fused stores
+ // generated by target-independent code don't when the byte value is
+ // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
+ // than "STC;MVC". Handle the choice in target-specific code instead.
+ MaxStoresPerMemset = 0;
+ MaxStoresPerMemsetOptSize = 0;
+}
+
+EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
+}
+
+bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ VT = VT.getScalarType();
+
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f32:
+ case MVT::f64:
+ return true;
+ case MVT::f128:
+ return false;
+ default:
+ break;
+ }
+
+ return false;
}
bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
@@ -253,6 +309,47 @@ bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
return Imm.isZero() || Imm.isNegZero();
}
+bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ bool *Fast) const {
+ // Unaligned accesses should never be slower than the expanded version.
+ // We check specifically for aligned accesses in the few cases where
+ // they are required.
+ if (Fast)
+ *Fast = true;
+ return true;
+}
+
+bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+ // Punt on globals for now, although they can be used in limited
+ // RELATIVE LONG cases.
+ if (AM.BaseGV)
+ return false;
+
+ // Require a 20-bit signed offset.
+ if (!isInt<20>(AM.BaseOffs))
+ return false;
+
+ // Indexing is OK but no scale factor can be applied.
+ return AM.Scale == 0 || AM.Scale == 1;
+}
+
+bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
+ if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
+ return false;
+ unsigned FromBits = FromType->getPrimitiveSizeInBits();
+ unsigned ToBits = ToType->getPrimitiveSizeInBits();
+ return FromBits > ToBits;
+}
+
+bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
+ if (!FromVT.isInteger() || !ToVT.isInteger())
+ return false;
+ unsigned FromBits = FromVT.getSizeInBits();
+ unsigned ToBits = ToVT.getSizeInBits();
+ return FromBits > ToBits;
+}
+
//===----------------------------------------------------------------------===//
// Inline asm support
//===----------------------------------------------------------------------===//
@@ -264,6 +361,7 @@ SystemZTargetLowering::getConstraintType(const std::string &Constraint) const {
case 'a': // Address register
case 'd': // Data register (equivalent to 'r')
case 'f': // Floating-point register
+ case 'h': // High-part register
case 'r': // General-purpose register
return C_RegisterClass;
@@ -306,6 +404,7 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info,
case 'a': // Address register
case 'd': // Data register (equivalent to 'r')
+ case 'h': // High-part register
case 'r': // General-purpose register
if (CallOperandVal->getType()->isIntegerTy())
weight = CW_Register;
@@ -349,8 +448,24 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info,
return weight;
}
+// Parse a "{tNNN}" register constraint for which the register type "t"
+// has already been verified. MC is the class associated with "t" and
+// Map maps 0-based register numbers to LLVM register numbers.
+static std::pair<unsigned, const TargetRegisterClass *>
+parseRegisterNumber(const std::string &Constraint,
+ const TargetRegisterClass *RC, const unsigned *Map) {
+ assert(*(Constraint.end()-1) == '}' && "Missing '}'");
+ if (isdigit(Constraint[2])) {
+ std::string Suffix(Constraint.data() + 2, Constraint.size() - 2);
+ unsigned Index = atoi(Suffix.c_str());
+ if (Index < 16 && Map[Index])
+ return std::make_pair(Map[Index], RC);
+ }
+ return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
+}
+
std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
+getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
@@ -370,6 +485,9 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
+ case 'h': // High-part register (an LLVM extension)
+ return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
+
case 'f': // Floating-point register
if (VT == MVT::f64)
return std::make_pair(0U, &SystemZ::FP64BitRegClass);
@@ -378,6 +496,32 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
return std::make_pair(0U, &SystemZ::FP32BitRegClass);
}
}
+ if (Constraint[0] == '{') {
+ // We need to override the default register parsing for GPRs and FPRs
+ // because the interpretation depends on VT. The internal names of
+ // the registers are also different from the external names
+ // (F0D and F0S instead of F0, etc.).
+ if (Constraint[1] == 'r') {
+ if (VT == MVT::i32)
+ return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
+ SystemZMC::GR32Regs);
+ if (VT == MVT::i128)
+ return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
+ SystemZMC::GR128Regs);
+ return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
+ SystemZMC::GR64Regs);
+ }
+ if (Constraint[1] == 'f') {
+ if (VT == MVT::f32)
+ return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
+ SystemZMC::FP32Regs);
+ if (VT == MVT::f128)
+ return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
+ SystemZMC::FP128Regs);
+ return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
+ SystemZMC::FP64Regs);
+ }
+ }
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
@@ -433,10 +577,21 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
#include "SystemZGenCallingConv.inc"
+bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
+ Type *ToType) const {
+ return isTruncateFree(FromType, ToType);
+}
+
+bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+ if (!CI->isTailCall())
+ return false;
+ return true;
+}
+
// Value is a value that has been passed to us in the location described by VA
// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
// any loads onto Chain.
-static SDValue convertLocVTToValVT(SelectionDAG &DAG, DebugLoc DL,
+static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
CCValAssign &VA, SDValue Chain,
SDValue Value) {
// If the argument has been promoted from a smaller type, insert an
@@ -461,7 +616,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, DebugLoc DL,
// Value is a value of type VA.getValVT() that we need to copy into
// the location described by VA. Return a copy of Value converted to
// VA.getValVT(). The caller is responsible for handling indirect values.
-static SDValue convertValVTToLocVT(SelectionDAG &DAG, DebugLoc DL,
+static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
CCValAssign &VA, SDValue Value) {
switch (VA.getLocInfo()) {
case CCValAssign::SExt:
@@ -480,7 +635,7 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, DebugLoc DL,
SDValue SystemZTargetLowering::
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -595,35 +750,56 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
return Chain;
}
+static bool canUseSiblingCall(CCState ArgCCInfo,
+ SmallVectorImpl<CCValAssign> &ArgLocs) {
+ // Punt if there are any indirect or stack arguments, or if the call
+ // needs the call-saved argument register R6.
+ for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
+ CCValAssign &VA = ArgLocs[I];
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return false;
+ if (!VA.isRegLoc())
+ return false;
+ unsigned Reg = VA.getLocReg();
+ if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
+ return false;
+ }
+ return true;
+}
+
SDValue
SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &DL = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
- bool &isTailCall = CLI.IsTailCall;
+ bool &IsTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool IsVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
EVT PtrVT = getPointerTy();
- // SystemZ target does not yet support tail call optimization.
- isTailCall = false;
-
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
+ // We don't support GuaranteedTailCallOpt, only automatically-detected
+ // sibling calls.
+ if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs))
+ IsTailCall = false;
+
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = ArgCCInfo.getNextStackOffset();
// Mark the start of the call.
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true));
+ if (!IsTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true),
+ DL);
// Copy argument values to their designated locations.
SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
@@ -672,22 +848,27 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
&MemOpChains[0], MemOpChains.size());
- // Build a sequence of copy-to-reg nodes, chained and glued together.
- SDValue Glue;
- for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
- Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
- RegsToPass[I].second, Glue);
- Glue = Chain.getValue(1);
- }
-
// Accept direct calls by converting symbolic call addresses to the
- // associated Target* opcodes.
+ // associated Target* opcodes. Force %r1 to be used for indirect
+ // tail calls.
+ SDValue Glue;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
} else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
+ } else if (IsTailCall) {
+ Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
+ Glue = Chain.getValue(1);
+ Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
+ }
+
+ // Build a sequence of copy-to-reg nodes, chained and glued together.
+ for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
+ Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
+ RegsToPass[I].second, Glue);
+ Glue = Chain.getValue(1);
}
// The first call operand is the chain and the second is the target address.
@@ -707,6 +888,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Emit the call.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ if (IsTailCall)
+ return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
Glue = Chain.getValue(1);
@@ -714,7 +897,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getConstant(NumBytes, PtrVT, true),
DAG.getConstant(0, PtrVT, true),
- Glue);
+ Glue, DL);
Glue = Chain.getValue(1);
// Assign locations to each value returned by this call.
@@ -745,7 +928,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
// Assign locations to each returned value.
@@ -815,6 +998,96 @@ static unsigned CCMaskForCondCode(ISD::CondCode CC) {
#undef CONV
}
+// Return a sequence for getting a 1 from an IPM result when CC has a
+// value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
+// The handling of CC values outside CCValid doesn't matter.
+static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
+ // Deal with cases where the result can be taken directly from a bit
+ // of the IPM result.
+ if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
+ return IPMConversion(0, 0, SystemZ::IPM_CC);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
+ return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
+
+ // Deal with cases where we can add a value to force the sign bit
+ // to contain the right value. Putting the bit in 31 means we can
+ // use SRL rather than RISBG(L), and also makes it easier to get a
+ // 0/-1 value, so it has priority over the other tests below.
+ //
+ // These sequences rely on the fact that the upper two bits of the
+ // IPM result are zero.
+ uint64_t TopBit = uint64_t(1) << 31;
+ if (CCMask == (CCValid & SystemZ::CCMASK_0))
+ return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
+ return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0
+ | SystemZ::CCMASK_1
+ | SystemZ::CCMASK_2)))
+ return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & SystemZ::CCMASK_3))
+ return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_1
+ | SystemZ::CCMASK_2
+ | SystemZ::CCMASK_3)))
+ return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
+
+ // Next try inverting the value and testing a bit. 0/1 could be
+ // handled this way too, but we dealt with that case above.
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
+ return IPMConversion(-1, 0, SystemZ::IPM_CC);
+
+ // Handle cases where adding a value forces a non-sign bit to contain
+ // the right value.
+ if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
+ return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
+ return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
+
+ // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are
+ // can be done by inverting the low CC bit and applying one of the
+ // sign-based extractions above.
+ if (CCMask == (CCValid & SystemZ::CCMASK_1))
+ return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & SystemZ::CCMASK_2))
+ return IPMConversion(1 << SystemZ::IPM_CC,
+ TopBit - (3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0
+ | SystemZ::CCMASK_1
+ | SystemZ::CCMASK_3)))
+ return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0
+ | SystemZ::CCMASK_2
+ | SystemZ::CCMASK_3)))
+ return IPMConversion(1 << SystemZ::IPM_CC,
+ TopBit - (1 << SystemZ::IPM_CC), 31);
+
+ llvm_unreachable("Unexpected CC combination");
+}
+
+// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
+// can be converted to a comparison against zero, adjust the operands
+// as necessary.
+static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned,
+ SDValue &CmpOp0, SDValue &CmpOp1,
+ unsigned &CCMask) {
+ if (IsUnsigned)
+ return;
+
+ ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode());
+ if (!ConstOp1)
+ return;
+
+ int64_t Value = ConstOp1->getSExtValue();
+ if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) ||
+ (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) ||
+ (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) ||
+ (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) {
+ CCMask ^= SystemZ::CCMASK_CMP_EQ;
+ CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType());
+ }
+}
+
// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary.
static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
@@ -840,7 +1113,7 @@ static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
uint64_t Mask = (1 << NumBits) - 1;
if (Load->getExtensionType() == ISD::SEXTLOAD) {
int64_t SignedValue = Constant->getSExtValue();
- if (uint64_t(SignedValue) + (1 << (NumBits - 1)) > Mask)
+ if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask)
return;
// Unsigned comparison between two sign-extended values is equivalent
// to unsigned comparison between two zero-extended values.
@@ -859,7 +1132,7 @@ static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT)
// Test whether the high bit of the byte is set.
Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true;
- else if (SignedValue == -1 && CCMask == SystemZ::CCMASK_CMP_GT)
+ else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE)
// Test whether the high bit of the byte is clear.
Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true;
else
@@ -879,7 +1152,7 @@ static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
if (CmpOp0.getValueType() != MVT::i32 ||
Load->getExtensionType() != ExtType)
- CmpOp0 = DAG.getExtLoad(ExtType, Load->getDebugLoc(), MVT::i32,
+ CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
Load->getChain(), Load->getBasePtr(),
Load->getPointerInfo(), Load->getMemoryVT(),
Load->isVolatile(), Load->isNonTemporal(),
@@ -891,67 +1164,309 @@ static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
CmpOp1 = DAG.getConstant(Value, MVT::i32);
}
-// Return true if a comparison described by CCMask, CmpOp0 and CmpOp1
-// is an equality comparison that is better implemented using unsigned
-// rather than signed comparison instructions.
-static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0,
- SDValue CmpOp1, unsigned CCMask) {
- // The test must be for equality or inequality.
- if (CCMask != SystemZ::CCMASK_CMP_EQ && CCMask != SystemZ::CCMASK_CMP_NE)
+// Return true if Op is either an unextended load, or a load suitable
+// for integer register-memory comparisons of type ICmpType.
+static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
+ LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode());
+ if (Load) {
+ // There are no instructions to compare a register with a memory byte.
+ if (Load->getMemoryVT() == MVT::i8)
+ return false;
+ // Otherwise decide on extension type.
+ switch (Load->getExtensionType()) {
+ case ISD::NON_EXTLOAD:
+ return true;
+ case ISD::SEXTLOAD:
+ return ICmpType != SystemZICMP::UnsignedOnly;
+ case ISD::ZEXTLOAD:
+ return ICmpType != SystemZICMP::SignedOnly;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+// Return true if it is better to swap comparison operands Op0 and Op1.
+// ICmpType is the type of an integer comparison.
+static bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1,
+ unsigned ICmpType) {
+ // Leave f128 comparisons alone, since they have no memory forms.
+ if (Op0.getValueType() == MVT::f128)
return false;
- if (CmpOp1.getOpcode() == ISD::Constant) {
- uint64_t Value = cast<ConstantSDNode>(CmpOp1)->getSExtValue();
+ // Always keep a floating-point constant second, since comparisons with
+ // zero can use LOAD TEST and comparisons with other constants make a
+ // natural memory operand.
+ if (isa<ConstantFPSDNode>(Op1))
+ return false;
- // If we're comparing with memory, prefer unsigned comparisons for
- // values that are in the unsigned 16-bit range but not the signed
- // 16-bit range. We want to use CLFHSI and CLGHSI.
- if (CmpOp0.hasOneUse() &&
- ISD::isNormalLoad(CmpOp0.getNode()) &&
- (Value >= 32768 && Value < 65536))
- return true;
+ // Never swap comparisons with zero since there are many ways to optimize
+ // those later.
+ ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
+ if (COp1 && COp1->getZExtValue() == 0)
+ return false;
- // Use unsigned comparisons for values that are in the CLGFI range
- // but not in the CGFI range.
- if (CmpOp0.getValueType() == MVT::i64 && (Value >> 31) == 1)
+ // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
+ // In that case we generally prefer the memory to be second.
+ if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) &&
+ !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) {
+ // The only exceptions are when the second operand is a constant and
+ // we can use things like CHHSI.
+ if (!COp1)
return true;
+ // The unsigned memory-immediate instructions can handle 16-bit
+ // unsigned integers.
+ if (ICmpType != SystemZICMP::SignedOnly &&
+ isUInt<16>(COp1->getZExtValue()))
+ return false;
+ // The signed memory-immediate instructions can handle 16-bit
+ // signed integers.
+ if (ICmpType != SystemZICMP::UnsignedOnly &&
+ isInt<16>(COp1->getSExtValue()))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+// Return true if shift operation N has an in-range constant shift value.
+// Store it in ShiftVal if so.
+static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
+ ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (!Shift)
+ return false;
+ uint64_t Amount = Shift->getZExtValue();
+ if (Amount >= N.getValueType().getSizeInBits())
return false;
+
+ ShiftVal = Amount;
+ return true;
+}
+
+// Check whether an AND with Mask is suitable for a TEST UNDER MASK
+// instruction and whether the CC value is descriptive enough to handle
+// a comparison of type Opcode between the AND result and CmpVal.
+// CCMask says which comparison result is being tested and BitSize is
+// the number of bits in the operands. If TEST UNDER MASK can be used,
+// return the corresponding CC mask, otherwise return 0.
+static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
+ uint64_t Mask, uint64_t CmpVal,
+ unsigned ICmpType) {
+ assert(Mask != 0 && "ANDs with zero should have been removed by now");
+
+ // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
+ if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
+ !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
+ return 0;
+
+ // Work out the masks for the lowest and highest bits.
+ unsigned HighShift = 63 - countLeadingZeros(Mask);
+ uint64_t High = uint64_t(1) << HighShift;
+ uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
+
+ // Signed ordered comparisons are effectively unsigned if the sign
+ // bit is dropped.
+ bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
+
+ // Check for equality comparisons with 0, or the equivalent.
+ if (CmpVal == 0) {
+ if (CCMask == SystemZ::CCMASK_CMP_EQ)
+ return SystemZ::CCMASK_TM_ALL_0;
+ if (CCMask == SystemZ::CCMASK_CMP_NE)
+ return SystemZ::CCMASK_TM_SOME_1;
+ }
+ if (EffectivelyUnsigned && CmpVal <= Low) {
+ if (CCMask == SystemZ::CCMASK_CMP_LT)
+ return SystemZ::CCMASK_TM_ALL_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GE)
+ return SystemZ::CCMASK_TM_SOME_1;
+ }
+ if (EffectivelyUnsigned && CmpVal < Low) {
+ if (CCMask == SystemZ::CCMASK_CMP_LE)
+ return SystemZ::CCMASK_TM_ALL_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GT)
+ return SystemZ::CCMASK_TM_SOME_1;
}
- // Prefer CL for zero-extended loads.
- if (CmpOp1.getOpcode() == ISD::ZERO_EXTEND ||
- ISD::isZEXTLoad(CmpOp1.getNode()))
- return true;
+ // Check for equality comparisons with the mask, or the equivalent.
+ if (CmpVal == Mask) {
+ if (CCMask == SystemZ::CCMASK_CMP_EQ)
+ return SystemZ::CCMASK_TM_ALL_1;
+ if (CCMask == SystemZ::CCMASK_CMP_NE)
+ return SystemZ::CCMASK_TM_SOME_0;
+ }
+ if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
+ if (CCMask == SystemZ::CCMASK_CMP_GT)
+ return SystemZ::CCMASK_TM_ALL_1;
+ if (CCMask == SystemZ::CCMASK_CMP_LE)
+ return SystemZ::CCMASK_TM_SOME_0;
+ }
+ if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
+ if (CCMask == SystemZ::CCMASK_CMP_GE)
+ return SystemZ::CCMASK_TM_ALL_1;
+ if (CCMask == SystemZ::CCMASK_CMP_LT)
+ return SystemZ::CCMASK_TM_SOME_0;
+ }
- // ...and for "in-register" zero extensions.
- if (CmpOp1.getOpcode() == ISD::AND && CmpOp1.getValueType() == MVT::i64) {
- SDValue Mask = CmpOp1.getOperand(1);
- if (Mask.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Mask)->getZExtValue() == 0xffffffff)
- return true;
+ // Check for ordered comparisons with the top bit.
+ if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
+ if (CCMask == SystemZ::CCMASK_CMP_LE)
+ return SystemZ::CCMASK_TM_MSB_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GT)
+ return SystemZ::CCMASK_TM_MSB_1;
+ }
+ if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
+ if (CCMask == SystemZ::CCMASK_CMP_LT)
+ return SystemZ::CCMASK_TM_MSB_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GE)
+ return SystemZ::CCMASK_TM_MSB_1;
}
- return false;
+ // If there are just two bits, we can do equality checks for Low and High
+ // as well.
+ if (Mask == Low + High) {
+ if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
+ return SystemZ::CCMASK_TM_MIXED_MSB_0;
+ if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
+ return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
+ if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
+ return SystemZ::CCMASK_TM_MIXED_MSB_1;
+ if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
+ return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
+ }
+
+ // Looks like we've exhausted our options.
+ return 0;
+}
+
+// See whether the comparison (Opcode CmpOp0, CmpOp1, ICmpType) can be
+// implemented as a TEST UNDER MASK instruction when the condition being
+// tested is as described by CCValid and CCMask. Update the arguments
+// with the TM version if so.
+static void adjustForTestUnderMask(SelectionDAG &DAG, unsigned &Opcode,
+ SDValue &CmpOp0, SDValue &CmpOp1,
+ unsigned &CCValid, unsigned &CCMask,
+ unsigned &ICmpType) {
+ // Check that we have a comparison with a constant.
+ ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1);
+ if (!ConstCmpOp1)
+ return;
+ uint64_t CmpVal = ConstCmpOp1->getZExtValue();
+
+ // Check whether the nonconstant input is an AND with a constant mask.
+ if (CmpOp0.getOpcode() != ISD::AND)
+ return;
+ SDValue AndOp0 = CmpOp0.getOperand(0);
+ SDValue AndOp1 = CmpOp0.getOperand(1);
+ ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode());
+ if (!Mask)
+ return;
+ uint64_t MaskVal = Mask->getZExtValue();
+
+ // Check whether the combination of mask, comparison value and comparison
+ // type are suitable.
+ unsigned BitSize = CmpOp0.getValueType().getSizeInBits();
+ unsigned NewCCMask, ShiftVal;
+ if (ICmpType != SystemZICMP::SignedOnly &&
+ AndOp0.getOpcode() == ISD::SHL &&
+ isSimpleShift(AndOp0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal >> ShiftVal,
+ CmpVal >> ShiftVal,
+ SystemZICMP::Any))) {
+ AndOp0 = AndOp0.getOperand(0);
+ AndOp1 = DAG.getConstant(MaskVal >> ShiftVal, AndOp0.getValueType());
+ } else if (ICmpType != SystemZICMP::SignedOnly &&
+ AndOp0.getOpcode() == ISD::SRL &&
+ isSimpleShift(AndOp0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, CCMask,
+ MaskVal << ShiftVal,
+ CmpVal << ShiftVal,
+ SystemZICMP::UnsignedOnly))) {
+ AndOp0 = AndOp0.getOperand(0);
+ AndOp1 = DAG.getConstant(MaskVal << ShiftVal, AndOp0.getValueType());
+ } else {
+ NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, CmpVal,
+ ICmpType);
+ if (!NewCCMask)
+ return;
+ }
+
+ // Go ahead and make the change.
+ Opcode = SystemZISD::TM;
+ CmpOp0 = AndOp0;
+ CmpOp1 = AndOp1;
+ ICmpType = (bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
+ bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
+ CCValid = SystemZ::CCMASK_TM;
+ CCMask = NewCCMask;
}
-// Return a target node that compares CmpOp0 and CmpOp1. Set CCMask to the
-// 4-bit condition-code mask for CC.
-static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
- ISD::CondCode CC, unsigned &CCMask) {
+// Return a target node that compares CmpOp0 with CmpOp1 and stores a
+// 2-bit result in CC. Set CCValid to the CCMASK_* of all possible
+// 2-bit results and CCMask to the subset of those results that are
+// associated with Cond.
+static SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG,
+ SDLoc DL, SDValue CmpOp0, SDValue CmpOp1,
+ ISD::CondCode Cond, unsigned &CCValid,
+ unsigned &CCMask) {
bool IsUnsigned = false;
- CCMask = CCMaskForCondCode(CC);
- if (!CmpOp0.getValueType().isFloatingPoint()) {
+ CCMask = CCMaskForCondCode(Cond);
+ unsigned Opcode, ICmpType = 0;
+ if (CmpOp0.getValueType().isFloatingPoint()) {
+ CCValid = SystemZ::CCMASK_FCMP;
+ Opcode = SystemZISD::FCMP;
+ } else {
IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO;
- CCMask &= ~SystemZ::CCMASK_CMP_UO;
+ CCValid = SystemZ::CCMASK_ICMP;
+ CCMask &= CCValid;
+ adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
- if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask))
- IsUnsigned = true;
+ Opcode = SystemZISD::ICMP;
+ // Choose the type of comparison. Equality and inequality tests can
+ // use either signed or unsigned comparisons. The choice also doesn't
+ // matter if both sign bits are known to be clear. In those cases we
+ // want to give the main isel code the freedom to choose whichever
+ // form fits best.
+ if (CCMask == SystemZ::CCMASK_CMP_EQ ||
+ CCMask == SystemZ::CCMASK_CMP_NE ||
+ (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1)))
+ ICmpType = SystemZICMP::Any;
+ else if (IsUnsigned)
+ ICmpType = SystemZICMP::UnsignedOnly;
+ else
+ ICmpType = SystemZICMP::SignedOnly;
}
- DebugLoc DL = CmpOp0.getDebugLoc();
- return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP),
- DL, MVT::Glue, CmpOp0, CmpOp1);
+ if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) {
+ std::swap(CmpOp0, CmpOp1);
+ CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) |
+ (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_UO));
+ }
+
+ adjustForTestUnderMask(DAG, Opcode, CmpOp0, CmpOp1, CCValid, CCMask,
+ ICmpType);
+ if (Opcode == SystemZISD::ICMP || Opcode == SystemZISD::TM)
+ return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1,
+ DAG.getConstant(ICmpType, MVT::i32));
+ return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1);
+}
+
+// Implement a 32-bit *MUL_LOHI operation by extending both operands to
+// 64 bits. Extend is the extension type to use. Store the high part
+// in Hi and the low part in Lo.
+static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
+ unsigned Extend, SDValue Op0, SDValue Op1,
+ SDValue &Hi, SDValue &Lo) {
+ Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
+ Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
+ SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
+ Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64));
+ Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
+ Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
}
// Lower a binary operation that produces two VT results, one in each
@@ -959,7 +1474,7 @@ static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
// on the extended Op0 and (unextended) Op1. Store the even register result
// in Even and the odd register result in Odd.
-static void lowerGR128Binary(SelectionDAG &DAG, DebugLoc DL, EVT VT,
+static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
unsigned Extend, unsigned Opcode,
SDValue Op0, SDValue Op1,
SDValue &Even, SDValue &Odd) {
@@ -967,14 +1482,38 @@ static void lowerGR128Binary(SelectionDAG &DAG, DebugLoc DL, EVT VT,
SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
SDValue(In128, 0), Op1);
bool Is32Bit = is32Bit(VT);
- SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT);
- SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT);
- SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
- VT, Result, SubReg0);
- SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
- VT, Result, SubReg1);
- Even = SDValue(Reg0, 0);
- Odd = SDValue(Reg1, 0);
+ Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
+ Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
+}
+
+SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CmpOp0 = Op.getOperand(0);
+ SDValue CmpOp1 = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SDLoc DL(Op);
+
+ unsigned CCValid, CCMask;
+ SDValue Glue = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+
+ IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
+ SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
+
+ if (Conversion.XORValue)
+ Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
+ DAG.getConstant(Conversion.XORValue, MVT::i32));
+
+ if (Conversion.AddValue)
+ Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
+ DAG.getConstant(Conversion.AddValue, MVT::i32));
+
+ // The SHR/AND sequence should get optimized to an RISBG.
+ Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
+ DAG.getConstant(Conversion.Bit, MVT::i32));
+ if (Conversion.Bit != 31)
+ Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
+ DAG.getConstant(1, MVT::i32));
+ return Result;
}
SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
@@ -983,12 +1522,13 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue CmpOp0 = Op.getOperand(2);
SDValue CmpOp1 = Op.getOperand(3);
SDValue Dest = Op.getOperand(4);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
- unsigned CCMask;
- SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask);
+ unsigned CCValid, CCMask;
+ SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
- Chain, DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
+ Chain, DAG.getConstant(CCValid, MVT::i32),
+ DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
}
SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
@@ -998,14 +1538,15 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
SDValue TrueOp = Op.getOperand(2);
SDValue FalseOp = Op.getOperand(3);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
- unsigned CCMask;
- SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask);
+ unsigned CCValid, CCMask;
+ SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
- SmallVector<SDValue, 4> Ops;
+ SmallVector<SDValue, 5> Ops;
Ops.push_back(TrueOp);
Ops.push_back(FalseOp);
+ Ops.push_back(DAG.getConstant(CCValid, MVT::i32));
Ops.push_back(DAG.getConstant(CCMask, MVT::i32));
Ops.push_back(Flags);
@@ -1015,7 +1556,7 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const {
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
const GlobalValue *GV = Node->getGlobal();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy();
@@ -1024,18 +1565,18 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
SDValue Result;
if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
- // Make sure that the offset is aligned to a halfword. If it isn't,
- // create an "anchor" at the previous 12-bit boundary.
- // FIXME check whether there is a better way of handling this.
- if (Offset & 1) {
- Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
- Offset & ~uint64_t(0xfff));
- Offset &= 0xfff;
- } else {
- Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
+ // Assign anchors at 1<<12 byte boundaries.
+ uint64_t Anchor = Offset & ~uint64_t(0xfff);
+ Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
+ Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
+
+ // The offset can be folded into the address if it is aligned to a halfword.
+ Offset -= Anchor;
+ if (Offset != 0 && (Offset & 1) == 0) {
+ SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
+ Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
Offset = 0;
}
- Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
} else {
Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
@@ -1054,7 +1595,7 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const {
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
const GlobalValue *GV = Node->getGlobal();
EVT PtrVT = getPointerTy();
TLSModel::Model model = TM.getTLSModel(GV);
@@ -1093,7 +1634,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
SelectionDAG &DAG) const {
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
const BlockAddress *BA = Node->getBlockAddress();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy();
@@ -1105,7 +1646,7 @@ SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
SelectionDAG &DAG) const {
- DebugLoc DL = JT->getDebugLoc();
+ SDLoc DL(JT);
EVT PtrVT = getPointerTy();
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
@@ -1115,7 +1656,7 @@ SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
SelectionDAG &DAG) const {
- DebugLoc DL = CP->getDebugLoc();
+ SDLoc DL(CP);
EVT PtrVT = getPointerTy();
SDValue Result;
@@ -1132,29 +1673,38 @@ SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue In = Op.getOperand(0);
EVT InVT = In.getValueType();
EVT ResVT = Op.getValueType();
- SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64);
- SDValue Shift32 = DAG.getConstant(32, MVT::i64);
if (InVT == MVT::i32 && ResVT == MVT::f32) {
- SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
- SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32);
- SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift);
- SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
- MVT::f32, Out64, SubReg32);
- return SDValue(Out, 0);
+ SDValue In64;
+ if (Subtarget.hasHighWord()) {
+ SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
+ MVT::i64);
+ In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
+ MVT::i64, SDValue(U64, 0), In);
+ } else {
+ In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
+ In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
+ DAG.getConstant(32, MVT::i64));
+ }
+ SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
+ return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
+ DL, MVT::f32, Out64);
}
if (InVT == MVT::f32 && ResVT == MVT::i32) {
SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
- SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
- MVT::f64, SDValue(U64, 0), In, SubReg32);
- SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0));
- SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32);
- SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
- return Out;
+ SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
+ MVT::f64, SDValue(U64, 0), In);
+ SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
+ if (Subtarget.hasHighWord())
+ return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
+ MVT::i32, Out64);
+ SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
+ DAG.getConstant(32, MVT::i64));
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
}
llvm_unreachable("Unexpected bitcast combination");
}
@@ -1169,7 +1719,7 @@ SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
SDValue Chain = Op.getOperand(0);
SDValue Addr = Op.getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// The initial values of each field.
const unsigned NumFields = 4;
@@ -1203,7 +1753,7 @@ SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
SDValue SrcPtr = Op.getOperand(2);
const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32),
/*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
@@ -1214,7 +1764,7 @@ SDValue SystemZTargetLowering::
lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
unsigned SPReg = getStackPointerRegisterToSaveRestore();
@@ -1237,18 +1787,64 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
return DAG.getMergeValues(Ops, 2, DL);
}
-SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
+SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
- assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI");
+ SDLoc DL(Op);
+ SDValue Ops[2];
+ if (is32Bit(VT))
+ // Just do a normal 64-bit multiplication and extract the results.
+ // We define this so that it can be used for constant division.
+ lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
+ Op.getOperand(1), Ops[1], Ops[0]);
+ else {
+ // Do a full 128-bit multiplication based on UMUL_LOHI64:
+ //
+ // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
+ //
+ // but using the fact that the upper halves are either all zeros
+ // or all ones:
+ //
+ // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
+ //
+ // and grouping the right terms together since they are quicker than the
+ // multiplication:
+ //
+ // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
+ SDValue C63 = DAG.getConstant(63, MVT::i64);
+ SDValue LL = Op.getOperand(0);
+ SDValue RL = Op.getOperand(1);
+ SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
+ SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
+ // UMUL_LOHI64 returns the low result in the odd register and the high
+ // result in the even register. SMUL_LOHI is defined to return the
+ // low half first, so the results are in reverse order.
+ lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
+ LL, RL, Ops[1], Ops[0]);
+ SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
+ SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
+ SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
+ Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
+ }
+ return DAG.getMergeValues(Ops, 2, DL);
+}
- // UMUL_LOHI64 returns the low result in the odd register and the high
- // result in the even register. UMUL_LOHI is defined to return the
- // low half first, so the results are in reverse order.
+SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
SDValue Ops[2];
- lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
- Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
+ if (is32Bit(VT))
+ // Just do a normal 64-bit multiplication and extract the results.
+ // We define this so that it can be used for constant division.
+ lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
+ Op.getOperand(1), Ops[1], Ops[0]);
+ else
+ // UMUL_LOHI64 returns the low result in the odd register and the high
+ // result in the even register. UMUL_LOHI is defined to return the
+ // low half first, so the results are in reverse order.
+ lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
+ Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, 2, DL);
}
@@ -1257,19 +1853,24 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
+ unsigned Opcode;
// We use DSGF for 32-bit division.
if (is32Bit(VT)) {
Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
- Op1 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op1);
- }
+ Opcode = SystemZISD::SDIVREM32;
+ } else if (DAG.ComputeNumSignBits(Op1) > 32) {
+ Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
+ Opcode = SystemZISD::SDIVREM32;
+ } else
+ Opcode = SystemZISD::SDIVREM64;
// DSG(F) takes a 64-bit dividend, so the even register in the GR128
// input is "don't care". The instruction returns the remainder in
// the even register and the quotient in the odd register.
SDValue Ops[2];
- lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::SDIVREM64,
+ lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
Op0, Op1, Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, 2, DL);
}
@@ -1277,7 +1878,7 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// DL(G) uses a double-width dividend, so we need to clear the even
// register in the GR128 input. The instruction returns the remainder
@@ -1332,22 +1933,20 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// high 32 bits and just masks out low bits. We can skip it if so.
if (HighOp.getOpcode() == ISD::AND &&
HighOp.getOperand(1).getOpcode() == ISD::Constant) {
- ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1));
- uint64_t Mask = MaskNode->getZExtValue() | Masks[High];
- if ((Mask >> 32) == 0xffffffff)
- HighOp = HighOp.getOperand(0);
+ SDValue HighOp0 = HighOp.getOperand(0);
+ uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
+ if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
+ HighOp = HighOp0;
}
// Take advantage of the fact that all GR32 operations only change the
// low 32 bits by truncating Low to an i32 and inserting it directly
// using a subreg. The interesting cases are those where the truncation
// can be folded.
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
- SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64);
- SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
- MVT::i64, HighOp, Low32, SubReg32);
- return SDValue(Result, 0);
+ return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
+ MVT::i64, HighOp, Low32);
}
// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
@@ -1368,7 +1967,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SDValue Addr = Node->getBasePtr();
SDValue Src2 = Node->getVal();
MachineMemOperand *MMO = Node->getMemOperand();
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
EVT PtrVT = Addr.getValueType();
// Convert atomic subtracts of constants into additions.
@@ -1442,7 +2041,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
SDValue CmpVal = Node->getOperand(2);
SDValue SwapVal = Node->getOperand(3);
MachineMemOperand *MMO = Node->getMemOperand();
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
EVT PtrVT = Addr.getValueType();
// Get the address of the containing word.
@@ -1474,7 +2073,7 @@ SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
- return DAG.getCopyFromReg(Op.getOperand(0), Op.getDebugLoc(),
+ return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
SystemZ::R15D, Op.getValueType());
}
@@ -1482,10 +2081,30 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
- return DAG.getCopyToReg(Op.getOperand(0), Op.getDebugLoc(),
+ return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op),
SystemZ::R15D, Op.getOperand(1));
}
+SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
+ SelectionDAG &DAG) const {
+ bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+ if (!IsData)
+ // Just preserve the chain.
+ return Op.getOperand(0);
+
+ bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
+ MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode());
+ SDValue Ops[] = {
+ Op.getOperand(0),
+ DAG.getConstant(Code, MVT::i32),
+ Op.getOperand(1)
+ };
+ return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
+ Node->getVTList(), Ops, array_lengthof(Ops),
+ Node->getMemoryVT(), Node->getMemOperand());
+}
+
SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
@@ -1493,6 +2112,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
return lowerBR_CC(Op, DAG);
case ISD::SELECT_CC:
return lowerSELECT_CC(Op, DAG);
+ case ISD::SETCC:
+ return lowerSETCC(Op, DAG);
case ISD::GlobalAddress:
return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
case ISD::GlobalTLSAddress:
@@ -1511,6 +2132,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
return lowerVACOPY(Op, DAG);
case ISD::DYNAMIC_STACKALLOC:
return lowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::SMUL_LOHI:
+ return lowerSMUL_LOHI(Op, DAG);
case ISD::UMUL_LOHI:
return lowerUMUL_LOHI(Op, DAG);
case ISD::SDIVREM:
@@ -1547,6 +2170,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
return lowerSTACKSAVE(Op, DAG);
case ISD::STACKRESTORE:
return lowerSTACKRESTORE(Op, DAG);
+ case ISD::PREFETCH:
+ return lowerPREFETCH(Op, DAG);
default:
llvm_unreachable("Unexpected node to lower");
}
@@ -1557,9 +2182,12 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
OPCODE(RET_FLAG);
OPCODE(CALL);
+ OPCODE(SIBCALL);
OPCODE(PCREL_WRAPPER);
- OPCODE(CMP);
- OPCODE(UCMP);
+ OPCODE(PCREL_OFFSET);
+ OPCODE(ICMP);
+ OPCODE(FCMP);
+ OPCODE(TM);
OPCODE(BR_CCMASK);
OPCODE(SELECT_CCMASK);
OPCODE(ADJDYNALLOC);
@@ -1568,6 +2196,20 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(SDIVREM64);
OPCODE(UDIVREM32);
OPCODE(UDIVREM64);
+ OPCODE(MVC);
+ OPCODE(MVC_LOOP);
+ OPCODE(NC);
+ OPCODE(NC_LOOP);
+ OPCODE(OC);
+ OPCODE(OC_LOOP);
+ OPCODE(XC);
+ OPCODE(XC_LOOP);
+ OPCODE(CLC);
+ OPCODE(CLC_LOOP);
+ OPCODE(STRCMP);
+ OPCODE(STPCPY);
+ OPCODE(SEARCH_STRING);
+ OPCODE(IPM);
OPCODE(ATOMIC_SWAPW);
OPCODE(ATOMIC_LOADW_ADD);
OPCODE(ATOMIC_LOADW_SUB);
@@ -1580,6 +2222,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(ATOMIC_LOADW_UMIN);
OPCODE(ATOMIC_LOADW_UMAX);
OPCODE(ATOMIC_CMP_SWAPW);
+ OPCODE(PREFETCH);
}
return NULL;
#undef OPCODE
@@ -1609,6 +2252,31 @@ static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
return NewMBB;
}
+// Split MBB before MI and return the new block (the one that contains MI).
+static MachineBasicBlock *splitBlockBefore(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
+ NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
+ NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
+ return NewMBB;
+}
+
+// Force base value Base into a register before MI. Return the register.
+static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
+ const SystemZInstrInfo *TII) {
+ if (Base.isReg())
+ return Base.getReg();
+
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg)
+ .addOperand(Base).addImm(0).addReg(0);
+ return Reg;
+}
+
// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr *MI,
@@ -1618,21 +2286,20 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI,
unsigned DestReg = MI->getOperand(0).getReg();
unsigned TrueReg = MI->getOperand(1).getReg();
unsigned FalseReg = MI->getOperand(2).getReg();
- unsigned CCMask = MI->getOperand(3).getImm();
+ unsigned CCValid = MI->getOperand(3).getImm();
+ unsigned CCMask = MI->getOperand(4).getImm();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock *StartMBB = MBB;
- MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB);
+ MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
// StartMBB:
- // ...
- // TrueVal = ...
- // cmpTY ccX, r1, r2
- // jCC JoinMBB
+ // BRC CCMask, JoinMBB
// # fallthrough to FalseMBB
MBB = StartMBB;
- BuildMI(MBB, DL, TII->get(SystemZ::BRCL)).addImm(CCMask).addMBB(JoinMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
MBB->addSuccessor(JoinMBB);
MBB->addSuccessor(FalseMBB);
@@ -1645,7 +2312,7 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI,
// %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
// ...
MBB = JoinMBB;
- BuildMI(*MBB, MBB->begin(), DL, TII->get(SystemZ::PHI), DestReg)
+ BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
.addReg(TrueReg).addMBB(StartMBB)
.addReg(FalseReg).addMBB(FalseMBB);
@@ -1653,6 +2320,69 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI,
return JoinMBB;
}
+// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
+// StoreOpcode is the store to use and Invert says whether the store should
+// happen when the condition is false rather than true. If a STORE ON
+// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
+MachineBasicBlock *
+SystemZTargetLowering::emitCondStore(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned StoreOpcode, unsigned STOCOpcode,
+ bool Invert) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+
+ unsigned SrcReg = MI->getOperand(0).getReg();
+ MachineOperand Base = MI->getOperand(1);
+ int64_t Disp = MI->getOperand(2).getImm();
+ unsigned IndexReg = MI->getOperand(3).getReg();
+ unsigned CCValid = MI->getOperand(4).getImm();
+ unsigned CCMask = MI->getOperand(5).getImm();
+ DebugLoc DL = MI->getDebugLoc();
+
+ StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
+
+ // Use STOCOpcode if possible. We could use different store patterns in
+ // order to avoid matching the index register, but the performance trade-offs
+ // might be more complicated in that case.
+ if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (Invert)
+ CCMask ^= CCValid;
+ BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
+ .addReg(SrcReg).addOperand(Base).addImm(Disp)
+ .addImm(CCValid).addImm(CCMask);
+ MI->eraseFromParent();
+ return MBB;
+ }
+
+ // Get the condition needed to branch around the store.
+ if (!Invert)
+ CCMask ^= CCValid;
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
+
+ // StartMBB:
+ // BRC CCMask, JoinMBB
+ // # fallthrough to FalseMBB
+ MBB = StartMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
+ MBB->addSuccessor(JoinMBB);
+ MBB->addSuccessor(FalseMBB);
+
+ // FalseMBB:
+ // store %SrcReg, %Disp(%Index,%Base)
+ // # fallthrough to JoinMBB
+ MBB = FalseMBB;
+ BuildMI(MBB, DL, TII->get(StoreOpcode))
+ .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
+ MBB->addSuccessor(JoinMBB);
+
+ MI->eraseFromParent();
+ return JoinMBB;
+}
+
// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
// or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
@@ -1669,7 +2399,6 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
- unsigned MaskNE = CCMaskForCondCode(ISD::SETNE);
bool IsSubWord = (BitSize < 32);
// Extract the operands. Base can be a register or a frame index.
@@ -1706,7 +2435,7 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
// Insert a basic block for the main loop.
MachineBasicBlock *StartMBB = MBB;
- MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB);
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
// StartMBB:
@@ -1740,11 +2469,11 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
.addReg(RotatedOldVal).addOperand(Src2);
if (BitSize < 32)
// XILF with the upper BitSize bits set.
- BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal)
+ BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
.addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize)));
else if (BitSize == 32)
// XILF with every bit set.
- BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal)
+ BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
.addReg(Tmp).addImm(~uint32_t(0));
else {
// Use LCGR and add -1 to the result, which is more compact than
@@ -1769,7 +2498,8 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
.addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
.addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
- BuildMI(MBB, DL, TII->get(SystemZ::BRCL)).addImm(MaskNE).addMBB(LoopMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
@@ -1792,7 +2522,6 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
- unsigned MaskNE = CCMaskForCondCode(ISD::SETNE);
bool IsSubWord = (BitSize < 32);
// Extract the operands. Base can be a register or a frame index.
@@ -1828,7 +2557,7 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
// Insert 3 basic blocks for the loop.
MachineBasicBlock *StartMBB = MBB;
- MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB);
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
@@ -1846,7 +2575,7 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
// %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
// %RotatedOldVal = RLL %OldVal, 0(%BitShift)
// CompareOpcode %RotatedOldVal, %Src2
- // BRCL KeepOldMask, UpdateMBB
+ // BRC KeepOldMask, UpdateMBB
MBB = LoopMBB;
BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
.addReg(OrigVal).addMBB(StartMBB)
@@ -1856,8 +2585,8 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
.addReg(OldVal).addReg(BitShift).addImm(0);
BuildMI(MBB, DL, TII->get(CompareOpcode))
.addReg(RotatedOldVal).addReg(Src2);
- BuildMI(MBB, DL, TII->get(SystemZ::BRCL))
- .addImm(KeepOldMask).addMBB(UpdateMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
MBB->addSuccessor(UpdateMBB);
MBB->addSuccessor(UseAltMBB);
@@ -1887,7 +2616,8 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
.addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
.addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
- BuildMI(MBB, DL, TII->get(SystemZ::BRCL)).addImm(MaskNE).addMBB(LoopMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
@@ -1903,7 +2633,6 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
- unsigned MaskNE = CCMaskForCondCode(ISD::SETNE);
// Extract the operands. Base can be a register or a frame index.
unsigned Dest = MI->getOperand(0).getReg();
@@ -1935,7 +2664,7 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
// Insert 2 basic blocks for the loop.
MachineBasicBlock *StartMBB = MBB;
- MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB);
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
@@ -1978,7 +2707,9 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
.addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
BuildMI(MBB, DL, TII->get(SystemZ::CR))
.addReg(Dest).addReg(RetryCmpVal);
- BuildMI(MBB, DL, TII->get(SystemZ::BRCL)).addImm(MaskNE).addMBB(DoneMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP)
+ .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
MBB->addSuccessor(DoneMBB);
MBB->addSuccessor(SetMBB);
@@ -1998,7 +2729,8 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
.addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
.addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp);
- BuildMI(MBB, DL, TII->get(SystemZ::BRCL)).addImm(MaskNE).addMBB(LoopMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
@@ -2008,8 +2740,8 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
// Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true
// if the high register of the GR128 value must be cleared or false if
-// it's "don't care". SubReg is subreg_odd32 when extending a GR32
-// and subreg_odd when extending a GR64.
+// it's "don't care". SubReg is subreg_l32 when extending a GR32
+// and subreg_l64 when extending a GR64.
MachineBasicBlock *
SystemZTargetLowering::emitExt128(MachineInstr *MI,
MachineBasicBlock *MBB,
@@ -2031,7 +2763,7 @@ SystemZTargetLowering::emitExt128(MachineInstr *MI,
BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
.addImm(0);
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
- .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high);
+ .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
In128 = NewIn128;
}
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
@@ -2041,9 +2773,238 @@ SystemZTargetLowering::emitExt128(MachineInstr *MI,
return MBB;
}
+MachineBasicBlock *
+SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned Opcode) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ MachineOperand DestBase = earlyUseOperand(MI->getOperand(0));
+ uint64_t DestDisp = MI->getOperand(1).getImm();
+ MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2));
+ uint64_t SrcDisp = MI->getOperand(3).getImm();
+ uint64_t Length = MI->getOperand(4).getImm();
+
+ // When generating more than one CLC, all but the last will need to
+ // branch to the end when a difference is found.
+ MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
+ splitBlockAfter(MI, MBB) : 0);
+
+ // Check for the loop form, in which operand 5 is the trip count.
+ if (MI->getNumExplicitOperands() > 5) {
+ bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
+
+ uint64_t StartCountReg = MI->getOperand(5).getReg();
+ uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
+ uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
+ forceReg(MI, DestBase, TII));
+
+ const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
+ uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
+ uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
+ MRI.createVirtualRegister(RC));
+ uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
+ uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
+ MRI.createVirtualRegister(RC));
+
+ RC = &SystemZ::GR64BitRegClass;
+ uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
+ uint64_t NextCountReg = MRI.createVirtualRegister(RC);
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
+ MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
+
+ // StartMBB:
+ // # fall through to LoopMMB
+ MBB->addSuccessor(LoopMBB);
+
+ // LoopMBB:
+ // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
+ // [ %NextDestReg, NextMBB ]
+ // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
+ // [ %NextSrcReg, NextMBB ]
+ // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
+ // [ %NextCountReg, NextMBB ]
+ // ( PFD 2, 768+DestDisp(%ThisDestReg) )
+ // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
+ // ( JLH EndMBB )
+ //
+ // The prefetch is used only for MVC. The JLH is used only for CLC.
+ MBB = LoopMBB;
+
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
+ .addReg(StartDestReg).addMBB(StartMBB)
+ .addReg(NextDestReg).addMBB(NextMBB);
+ if (!HaveSingleBase)
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
+ .addReg(StartSrcReg).addMBB(StartMBB)
+ .addReg(NextSrcReg).addMBB(NextMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
+ .addReg(StartCountReg).addMBB(StartMBB)
+ .addReg(NextCountReg).addMBB(NextMBB);
+ if (Opcode == SystemZ::MVC)
+ BuildMI(MBB, DL, TII->get(SystemZ::PFD))
+ .addImm(SystemZ::PFD_WRITE)
+ .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
+ BuildMI(MBB, DL, TII->get(Opcode))
+ .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
+ .addReg(ThisSrcReg).addImm(SrcDisp);
+ if (EndMBB) {
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
+ .addMBB(EndMBB);
+ MBB->addSuccessor(EndMBB);
+ MBB->addSuccessor(NextMBB);
+ }
+
+ // NextMBB:
+ // %NextDestReg = LA 256(%ThisDestReg)
+ // %NextSrcReg = LA 256(%ThisSrcReg)
+ // %NextCountReg = AGHI %ThisCountReg, -1
+ // CGHI %NextCountReg, 0
+ // JLH LoopMBB
+ // # fall through to DoneMMB
+ //
+ // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
+ MBB = NextMBB;
+
+ BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
+ .addReg(ThisDestReg).addImm(256).addReg(0);
+ if (!HaveSingleBase)
+ BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
+ .addReg(ThisSrcReg).addImm(256).addReg(0);
+ BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
+ .addReg(ThisCountReg).addImm(-1);
+ BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
+ .addReg(NextCountReg).addImm(0);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
+ .addMBB(LoopMBB);
+ MBB->addSuccessor(LoopMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ DestBase = MachineOperand::CreateReg(NextDestReg, false);
+ SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
+ Length &= 255;
+ MBB = DoneMBB;
+ }
+ // Handle any remaining bytes with straight-line code.
+ while (Length > 0) {
+ uint64_t ThisLength = std::min(Length, uint64_t(256));
+ // The previous iteration might have created out-of-range displacements.
+ // Apply them using LAY if so.
+ if (!isUInt<12>(DestDisp)) {
+ unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
+ .addOperand(DestBase).addImm(DestDisp).addReg(0);
+ DestBase = MachineOperand::CreateReg(Reg, false);
+ DestDisp = 0;
+ }
+ if (!isUInt<12>(SrcDisp)) {
+ unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
+ .addOperand(SrcBase).addImm(SrcDisp).addReg(0);
+ SrcBase = MachineOperand::CreateReg(Reg, false);
+ SrcDisp = 0;
+ }
+ BuildMI(*MBB, MI, DL, TII->get(Opcode))
+ .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength)
+ .addOperand(SrcBase).addImm(SrcDisp);
+ DestDisp += ThisLength;
+ SrcDisp += ThisLength;
+ Length -= ThisLength;
+ // If there's another CLC to go, branch to the end if a difference
+ // was found.
+ if (EndMBB && Length > 0) {
+ MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
+ .addMBB(EndMBB);
+ MBB->addSuccessor(EndMBB);
+ MBB->addSuccessor(NextMBB);
+ MBB = NextMBB;
+ }
+ }
+ if (EndMBB) {
+ MBB->addSuccessor(EndMBB);
+ MBB = EndMBB;
+ MBB->addLiveIn(SystemZ::CC);
+ }
+
+ MI->eraseFromParent();
+ return MBB;
+}
+
+// Decompose string pseudo-instruction MI into a loop that continually performs
+// Opcode until CC != 3.
+MachineBasicBlock *
+SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned Opcode) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ uint64_t End1Reg = MI->getOperand(0).getReg();
+ uint64_t Start1Reg = MI->getOperand(1).getReg();
+ uint64_t Start2Reg = MI->getOperand(2).getReg();
+ uint64_t CharReg = MI->getOperand(3).getReg();
+
+ const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
+ uint64_t This1Reg = MRI.createVirtualRegister(RC);
+ uint64_t This2Reg = MRI.createVirtualRegister(RC);
+ uint64_t End2Reg = MRI.createVirtualRegister(RC);
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
+
+ // StartMBB:
+ // # fall through to LoopMMB
+ MBB->addSuccessor(LoopMBB);
+
+ // LoopMBB:
+ // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
+ // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
+ // R0L = %CharReg
+ // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
+ // JO LoopMBB
+ // # fall through to DoneMMB
+ //
+ // The load of R0L can be hoisted by post-RA LICM.
+ MBB = LoopMBB;
+
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
+ .addReg(Start1Reg).addMBB(StartMBB)
+ .addReg(End1Reg).addMBB(LoopMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
+ .addReg(Start2Reg).addMBB(StartMBB)
+ .addReg(End2Reg).addMBB(LoopMBB);
+ BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
+ BuildMI(MBB, DL, TII->get(Opcode))
+ .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
+ .addReg(This1Reg).addReg(This2Reg);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
+ MBB->addSuccessor(LoopMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ DoneMBB->addLiveIn(SystemZ::CC);
+
+ MI->eraseFromParent();
+ return DoneMBB;
+}
+
MachineBasicBlock *SystemZTargetLowering::
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
switch (MI->getOpcode()) {
+ case SystemZ::Select32Mux:
case SystemZ::Select32:
case SystemZ::SelectF32:
case SystemZ::Select64:
@@ -2051,12 +3012,45 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
case SystemZ::SelectF128:
return emitSelect(MI, MBB);
+ case SystemZ::CondStore8Mux:
+ return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
+ case SystemZ::CondStore8MuxInv:
+ return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
+ case SystemZ::CondStore16Mux:
+ return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
+ case SystemZ::CondStore16MuxInv:
+ return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
+ case SystemZ::CondStore8:
+ return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
+ case SystemZ::CondStore8Inv:
+ return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
+ case SystemZ::CondStore16:
+ return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
+ case SystemZ::CondStore16Inv:
+ return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
+ case SystemZ::CondStore32:
+ return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
+ case SystemZ::CondStore32Inv:
+ return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
+ case SystemZ::CondStore64:
+ return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
+ case SystemZ::CondStore64Inv:
+ return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
+ case SystemZ::CondStoreF32:
+ return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
+ case SystemZ::CondStoreF32Inv:
+ return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
+ case SystemZ::CondStoreF64:
+ return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
+ case SystemZ::CondStoreF64Inv:
+ return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
+
case SystemZ::AEXT128_64:
- return emitExt128(MI, MBB, false, SystemZ::subreg_low);
+ return emitExt128(MI, MBB, false, SystemZ::subreg_l64);
case SystemZ::ZEXT128_32:
- return emitExt128(MI, MBB, true, SystemZ::subreg_low32);
+ return emitExt128(MI, MBB, true, SystemZ::subreg_l32);
case SystemZ::ZEXT128_64:
- return emitExt128(MI, MBB, true, SystemZ::subreg_low);
+ return emitExt128(MI, MBB, true, SystemZ::subreg_l64);
case SystemZ::ATOMIC_SWAPW:
return emitAtomicLoadBinary(MI, MBB, 0, 0);
@@ -2092,98 +3086,98 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
case SystemZ::ATOMIC_LOADW_NR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
case SystemZ::ATOMIC_LOADW_NILH:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
case SystemZ::ATOMIC_LOAD_NR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
- case SystemZ::ATOMIC_LOAD_NILL32:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32);
- case SystemZ::ATOMIC_LOAD_NILH32:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32);
- case SystemZ::ATOMIC_LOAD_NILF32:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32);
- case SystemZ::ATOMIC_LOAD_NGR:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
case SystemZ::ATOMIC_LOAD_NILL:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
case SystemZ::ATOMIC_LOAD_NILH:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64);
- case SystemZ::ATOMIC_LOAD_NIHL:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64);
- case SystemZ::ATOMIC_LOAD_NIHH:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
case SystemZ::ATOMIC_LOAD_NILF:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64);
- case SystemZ::ATOMIC_LOAD_NIHF:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
+ case SystemZ::ATOMIC_LOAD_NGR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
+ case SystemZ::ATOMIC_LOAD_NILL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
+ case SystemZ::ATOMIC_LOAD_NILH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
+ case SystemZ::ATOMIC_LOAD_NIHL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
+ case SystemZ::ATOMIC_LOAD_NIHH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
+ case SystemZ::ATOMIC_LOAD_NILF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
+ case SystemZ::ATOMIC_LOAD_NIHF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
case SystemZ::ATOMIC_LOADW_OR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
case SystemZ::ATOMIC_LOADW_OILH:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
case SystemZ::ATOMIC_LOAD_OR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
- case SystemZ::ATOMIC_LOAD_OILL32:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32);
- case SystemZ::ATOMIC_LOAD_OILH32:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32);
- case SystemZ::ATOMIC_LOAD_OILF32:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32);
- case SystemZ::ATOMIC_LOAD_OGR:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
case SystemZ::ATOMIC_LOAD_OILL:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
case SystemZ::ATOMIC_LOAD_OILH:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64);
- case SystemZ::ATOMIC_LOAD_OIHL:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64);
- case SystemZ::ATOMIC_LOAD_OIHH:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
case SystemZ::ATOMIC_LOAD_OILF:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64);
- case SystemZ::ATOMIC_LOAD_OIHF:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
+ case SystemZ::ATOMIC_LOAD_OGR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
+ case SystemZ::ATOMIC_LOAD_OILL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
+ case SystemZ::ATOMIC_LOAD_OILH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
+ case SystemZ::ATOMIC_LOAD_OIHL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
+ case SystemZ::ATOMIC_LOAD_OIHH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
+ case SystemZ::ATOMIC_LOAD_OILF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
+ case SystemZ::ATOMIC_LOAD_OIHF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
case SystemZ::ATOMIC_LOADW_XR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
case SystemZ::ATOMIC_LOADW_XILF:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
case SystemZ::ATOMIC_LOAD_XR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
- case SystemZ::ATOMIC_LOAD_XILF32:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32);
+ case SystemZ::ATOMIC_LOAD_XILF:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
case SystemZ::ATOMIC_LOAD_XGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
- case SystemZ::ATOMIC_LOAD_XILF:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64);
- case SystemZ::ATOMIC_LOAD_XIHF:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64);
+ case SystemZ::ATOMIC_LOAD_XILF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
+ case SystemZ::ATOMIC_LOAD_XIHF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
case SystemZ::ATOMIC_LOADW_NRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
case SystemZ::ATOMIC_LOADW_NILHi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
case SystemZ::ATOMIC_LOAD_NRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
- case SystemZ::ATOMIC_LOAD_NILL32i:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true);
- case SystemZ::ATOMIC_LOAD_NILH32i:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true);
- case SystemZ::ATOMIC_LOAD_NILF32i:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true);
- case SystemZ::ATOMIC_LOAD_NGRi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
case SystemZ::ATOMIC_LOAD_NILLi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
case SystemZ::ATOMIC_LOAD_NILHi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true);
- case SystemZ::ATOMIC_LOAD_NIHLi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true);
- case SystemZ::ATOMIC_LOAD_NIHHi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
case SystemZ::ATOMIC_LOAD_NILFi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true);
- case SystemZ::ATOMIC_LOAD_NIHFi:
- return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true);
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
+ case SystemZ::ATOMIC_LOAD_NGRi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
+ case SystemZ::ATOMIC_LOAD_NILL64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NILH64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NIHL64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NIHH64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NILF64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NIHF64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
case SystemZ::ATOMIC_LOADW_MIN:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
@@ -2227,6 +3221,27 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
case SystemZ::ATOMIC_CMP_SWAPW:
return emitAtomicCmpSwapW(MI, MBB);
+ case SystemZ::MVCSequence:
+ case SystemZ::MVCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
+ case SystemZ::NCSequence:
+ case SystemZ::NCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::NC);
+ case SystemZ::OCSequence:
+ case SystemZ::OCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::OC);
+ case SystemZ::XCSequence:
+ case SystemZ::XCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::XC);
+ case SystemZ::CLCSequence:
+ case SystemZ::CLCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
+ case SystemZ::CLSTLoop:
+ return emitStringWrapper(MI, MBB, SystemZ::CLST);
+ case SystemZ::MVSTLoop:
+ return emitStringWrapper(MI, MBB, SystemZ::MVST);
+ case SystemZ::SRSTLoop:
+ return emitStringWrapper(MI, MBB, SystemZ::SRST);
default:
llvm_unreachable("Unexpected instr type to insert");
}
diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h
index eea820c8e525..c6dcca6982a6 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/lib/Target/SystemZ/SystemZISelLowering.h
@@ -16,6 +16,7 @@
#define LLVM_TARGET_SystemZ_ISELLOWERING_H
#include "SystemZ.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
@@ -31,17 +32,32 @@ namespace SystemZISD {
// is the target address. The arguments start at operand 2.
// There is an optional glue operand at the end.
CALL,
+ SIBCALL,
// Wraps a TargetGlobalAddress that should be loaded using PC-relative
// accesses (LARL). Operand 0 is the address.
PCREL_WRAPPER,
- // Signed integer and floating-point comparisons. The operands are the
- // two values to compare.
- CMP,
+ // Used in cases where an offset is applied to a TargetGlobalAddress.
+ // Operand 0 is the full TargetGlobalAddress and operand 1 is a
+ // PCREL_WRAPPER for an anchor point. This is used so that we can
+ // cheaply refer to either the full address or the anchor point
+ // as a register base.
+ PCREL_OFFSET,
- // Likewise unsigned integer comparison.
- UCMP,
+ // Integer comparisons. There are three operands: the two values
+ // to compare, and an integer of type SystemZICMP.
+ ICMP,
+
+ // Floating-point comparisons. The two operands are the values to compare.
+ FCMP,
+
+ // Test under mask. The first operand is ANDed with the second operand
+ // and the condition codes are set on the result. The third operand is
+ // a boolean that is true if the condition codes need to distinguish
+ // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
+ // register forms do but the memory forms don't).
+ TM,
// Branches if a condition is true. Operand 0 is the chain operand;
// operand 1 is the 4-bit condition-code mask, with bit N in
@@ -67,10 +83,55 @@ namespace SystemZISD {
// first input operands are GR128s. The trailing numbers are the
// widths of the second operand in bits.
UMUL_LOHI64,
+ SDIVREM32,
SDIVREM64,
UDIVREM32,
UDIVREM64,
+ // Use a series of MVCs to copy bytes from one memory location to another.
+ // The operands are:
+ // - the target address
+ // - the source address
+ // - the constant length
+ //
+ // This isn't a memory opcode because we'd need to attach two
+ // MachineMemOperands rather than one.
+ MVC,
+
+ // Like MVC, but implemented as a loop that handles X*256 bytes
+ // followed by straight-line code to handle the rest (if any).
+ // The value of X is passed as an additional operand.
+ MVC_LOOP,
+
+ // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
+ NC,
+ NC_LOOP,
+ OC,
+ OC_LOOP,
+ XC,
+ XC_LOOP,
+
+ // Use CLC to compare two blocks of memory, with the same comments
+ // as for MVC and MVC_LOOP.
+ CLC,
+ CLC_LOOP,
+
+ // Use an MVST-based sequence to implement stpcpy().
+ STPCPY,
+
+ // Use a CLST-based sequence to implement strcmp(). The two input operands
+ // are the addresses of the strings to compare.
+ STRCMP,
+
+ // Use an SRST-based sequence to search a block of memory. The first
+ // operand is the end address, the second is the start, and the third
+ // is the character to search for. CC is set to 1 on success and 2
+ // on failure.
+ SEARCH_STRING,
+
+ // Store the CC value in bits 29 and 28 of an integer.
+ IPM,
+
// Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
// ATOMIC_LOAD_<op>.
//
@@ -102,7 +163,27 @@ namespace SystemZISD {
// operand into the high bits
// Operand 4: the negative of operand 2, for rotating the other way
// Operand 5: the width of the field in bits (8 or 16)
- ATOMIC_CMP_SWAPW
+ ATOMIC_CMP_SWAPW,
+
+ // Prefetch from the second operand using the 4-bit control code in
+ // the first operand. The code is 1 for a load prefetch and 2 for
+ // a store prefetch.
+ PREFETCH
+ };
+
+ // Return true if OPCODE is some kind of PC-relative address.
+ inline bool isPCREL(unsigned Opcode) {
+ return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
+ }
+}
+
+namespace SystemZICMP {
+ // Describes whether an integer comparison needs to be signed or unsigned,
+ // or whether either type is OK.
+ enum {
+ Any,
+ UnsignedOnly,
+ SignedOnly
};
}
@@ -117,17 +198,19 @@ public:
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const LLVM_OVERRIDE {
return MVT::i32;
}
- virtual EVT getSetCCResultType(EVT VT) const {
- return MVT::i32;
- }
- virtual bool isFMAFasterThanMulAndAdd(EVT) const LLVM_OVERRIDE {
- return true;
- }
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &, EVT) const LLVM_OVERRIDE;
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const LLVM_OVERRIDE;
+ virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const LLVM_OVERRIDE;
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
+ LLVM_OVERRIDE;
+ virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
+ LLVM_OVERRIDE;
+ virtual bool isTruncateFree(Type *, Type *) const LLVM_OVERRIDE;
+ virtual bool isTruncateFree(EVT, EVT) const LLVM_OVERRIDE;
virtual const char *getTargetNodeName(unsigned Opcode) const LLVM_OVERRIDE;
virtual std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const LLVM_OVERRIDE;
+ MVT VT) const LLVM_OVERRIDE;
virtual TargetLowering::ConstraintType
getConstraintType(const std::string &Constraint) const LLVM_OVERRIDE;
virtual TargetLowering::ConstraintWeight
@@ -143,11 +226,13 @@ public:
MachineBasicBlock *BB) const LLVM_OVERRIDE;
virtual SDValue LowerOperation(SDValue Op,
SelectionDAG &DAG) const LLVM_OVERRIDE;
+ virtual bool allowTruncateForTailCall(Type *, Type *) const LLVM_OVERRIDE;
+ virtual bool mayBeEmittedAsTailCall(CallInst *CI) const LLVM_OVERRIDE;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE;
virtual SDValue
LowerCall(CallLoweringInfo &CLI,
@@ -158,13 +243,14 @@ public:
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const LLVM_OVERRIDE;
+ SDLoc DL, SelectionDAG &DAG) const LLVM_OVERRIDE;
private:
const SystemZSubtarget &Subtarget;
const SystemZTargetMachine &TM;
// Implement LowerOperation for individual opcodes.
+ SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerGlobalAddress(GlobalAddressSDNode *Node,
@@ -178,6 +264,7 @@ private:
SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
@@ -188,10 +275,24 @@ private:
SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
+
+ // If the last instruction before MBBI in MBB was some form of COMPARE,
+ // try to replace it with a COMPARE AND BRANCH just before MBBI.
+ // CCMask and Target are the BRC-like operands for the branch.
+ // Return true if the change was made.
+ bool convertPrevCompareToBranch(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned CCMask,
+ MachineBasicBlock *Target) const;
// Implement EmitInstrWithCustomInserter for individual operation types.
MachineBasicBlock *emitSelect(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitCondStore(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned StoreOpcode, unsigned STOCOpcode,
+ bool Invert) const;
MachineBasicBlock *emitExt128(MachineInstr *MI,
MachineBasicBlock *MBB,
bool ClearEven, unsigned SubReg) const;
@@ -206,6 +307,12 @@ private:
unsigned BitSize) const;
MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitMemMemWrapper(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned Opcode) const;
+ MachineBasicBlock *emitStringWrapper(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned Opcode) const;
};
} // end namespace llvm
diff --git a/lib/Target/SystemZ/SystemZInstrFP.td b/lib/Target/SystemZ/SystemZInstrFP.td
index 7c9f0e668b95..60800460fca7 100644
--- a/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/lib/Target/SystemZ/SystemZInstrFP.td
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Control-flow instructions
+// Select instructions
//===----------------------------------------------------------------------===//
// C's ?: operator for floating-point operands.
@@ -16,6 +16,11 @@ def SelectF32 : SelectWrapper<FP32>;
def SelectF64 : SelectWrapper<FP64>;
def SelectF128 : SelectWrapper<FP128>;
+defm CondStoreF32 : CondStores<FP32, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+defm CondStoreF64 : CondStores<FP64, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
//===----------------------------------------------------------------------===//
// Move instructions
//===----------------------------------------------------------------------===//
@@ -29,57 +34,69 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
// Moves between two floating-point registers.
let neverHasSideEffects = 1 in {
- def LER : UnaryRR <"ler", 0x38, null_frag, FP32, FP32>;
- def LDR : UnaryRR <"ldr", 0x28, null_frag, FP64, FP64>;
- def LXR : UnaryRRE<"lxr", 0xB365, null_frag, FP128, FP128>;
+ def LER : UnaryRR <"le", 0x38, null_frag, FP32, FP32>;
+ def LDR : UnaryRR <"ld", 0x28, null_frag, FP64, FP64>;
+ def LXR : UnaryRRE<"lx", 0xB365, null_frag, FP128, FP128>;
+}
+
+// Moves between two floating-point registers that also set the condition
+// codes.
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ defm LTEBR : LoadAndTestRRE<"lteb", 0xB302, FP32>;
+ defm LTDBR : LoadAndTestRRE<"ltdb", 0xB312, FP64>;
+ defm LTXBR : LoadAndTestRRE<"ltxb", 0xB342, FP128>;
}
+def : CompareZeroFP<LTEBRCompare, FP32>;
+def : CompareZeroFP<LTDBRCompare, FP64>;
+def : CompareZeroFP<LTXBRCompare, FP128>;
// Moves between 64-bit integer and floating-point registers.
-def LGDR : UnaryRRE<"lgdr", 0xB3CD, bitconvert, GR64, FP64>;
-def LDGR : UnaryRRE<"ldgr", 0xB3C1, bitconvert, FP64, GR64>;
+def LGDR : UnaryRRE<"lgd", 0xB3CD, bitconvert, GR64, FP64>;
+def LDGR : UnaryRRE<"ldg", 0xB3C1, bitconvert, FP64, GR64>;
// fcopysign with an FP32 result.
let isCodeGenOnly = 1 in {
- def CPSDRss : BinaryRevRRF<"cpsdr", 0xB372, fcopysign, FP32, FP32>;
- def CPSDRsd : BinaryRevRRF<"cpsdr", 0xB372, fcopysign, FP32, FP64>;
+ def CPSDRss : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP32>;
+ def CPSDRsd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP64>;
}
-// The sign of an FP128 is in the high register. Give the CPSDRsd
-// operands in R1, R2, R3 order.
+// The sign of an FP128 is in the high register.
def : Pat<(fcopysign FP32:$src1, FP128:$src2),
- (CPSDRsd (EXTRACT_SUBREG FP128:$src2, subreg_high), FP32:$src1)>;
+ (CPSDRsd FP32:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>;
// fcopysign with an FP64 result.
let isCodeGenOnly = 1 in
- def CPSDRds : BinaryRevRRF<"cpsdr", 0xB372, fcopysign, FP64, FP32>;
-def CPSDRdd : BinaryRevRRF<"cpsdr", 0xB372, fcopysign, FP64, FP64>;
+ def CPSDRds : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP32>;
+def CPSDRdd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP64>;
-// The sign of an FP128 is in the high register. Give the CPSDRdd
-// operands in R1, R2, R3 order.
+// The sign of an FP128 is in the high register.
def : Pat<(fcopysign FP64:$src1, FP128:$src2),
- (CPSDRdd (EXTRACT_SUBREG FP128:$src2, subreg_high), FP64:$src1)>;
+ (CPSDRdd FP64:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>;
// fcopysign with an FP128 result. Use "upper" as the high half and leave
// the low half as-is.
class CopySign128<RegisterOperand cls, dag upper>
: Pat<(fcopysign FP128:$src1, cls:$src2),
- (INSERT_SUBREG FP128:$src1, upper, subreg_high)>;
+ (INSERT_SUBREG FP128:$src1, upper, subreg_h64)>;
-// Give the CPSDR* operands in R1, R2, R3 order.
-def : CopySign128<FP32, (CPSDRds FP32:$src2,
- (EXTRACT_SUBREG FP128:$src1, subreg_high))>;
-def : CopySign128<FP64, (CPSDRdd FP64:$src2,
- (EXTRACT_SUBREG FP128:$src1, subreg_high))>;
-def : CopySign128<FP128, (CPSDRdd (EXTRACT_SUBREG FP128:$src2, subreg_high),
- (EXTRACT_SUBREG FP128:$src1, subreg_high))>;
+def : CopySign128<FP32, (CPSDRds (EXTRACT_SUBREG FP128:$src1, subreg_h64),
+ FP32:$src2)>;
+def : CopySign128<FP64, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64),
+ FP64:$src2)>;
+def : CopySign128<FP128, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64),
+ (EXTRACT_SUBREG FP128:$src2, subreg_h64))>;
+
+defm LoadStoreF32 : MVCLoadStore<load, f32, MVCSequence, 4>;
+defm LoadStoreF64 : MVCLoadStore<load, f64, MVCSequence, 8>;
+defm LoadStoreF128 : MVCLoadStore<load, f128, MVCSequence, 16>;
//===----------------------------------------------------------------------===//
// Load instructions
//===----------------------------------------------------------------------===//
let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
- defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32>;
- defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64>;
+ defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32, 4>;
+ defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64, 8>;
// These instructions are split after register allocation, so we don't
// want a custom inserter.
@@ -94,8 +111,8 @@ let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
//===----------------------------------------------------------------------===//
let SimpleBDXStore = 1 in {
- defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32>;
- defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64>;
+ defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32, 4>;
+ defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64, 8>;
// These instructions are split after register allocation, so we don't
// want a custom inserter.
@@ -112,201 +129,232 @@ let SimpleBDXStore = 1 in {
// Convert floating-point values to narrower representations, rounding
// according to the current mode. The destination of LEXBR and LDXBR
// is a 128-bit value, but only the first register of the pair is used.
-def LEDBR : UnaryRRE<"ledbr", 0xB344, fround, FP32, FP64>;
-def LEXBR : UnaryRRE<"lexbr", 0xB346, null_frag, FP128, FP128>;
-def LDXBR : UnaryRRE<"ldxbr", 0xB345, null_frag, FP128, FP128>;
+def LEDBR : UnaryRRE<"ledb", 0xB344, fround, FP32, FP64>;
+def LEXBR : UnaryRRE<"lexb", 0xB346, null_frag, FP128, FP128>;
+def LDXBR : UnaryRRE<"ldxb", 0xB345, null_frag, FP128, FP128>;
def : Pat<(f32 (fround FP128:$src)),
- (EXTRACT_SUBREG (LEXBR FP128:$src), subreg_32bit)>;
+ (EXTRACT_SUBREG (LEXBR FP128:$src), subreg_hh32)>;
def : Pat<(f64 (fround FP128:$src)),
- (EXTRACT_SUBREG (LDXBR FP128:$src), subreg_high)>;
+ (EXTRACT_SUBREG (LDXBR FP128:$src), subreg_h64)>;
// Extend register floating-point values to wider representations.
-def LDEBR : UnaryRRE<"ldebr", 0xB304, fextend, FP64, FP32>;
-def LXEBR : UnaryRRE<"lxebr", 0xB306, fextend, FP128, FP32>;
-def LXDBR : UnaryRRE<"lxdbr", 0xB305, fextend, FP128, FP64>;
+def LDEBR : UnaryRRE<"ldeb", 0xB304, fextend, FP64, FP32>;
+def LXEBR : UnaryRRE<"lxeb", 0xB306, fextend, FP128, FP32>;
+def LXDBR : UnaryRRE<"lxdb", 0xB305, fextend, FP128, FP64>;
// Extend memory floating-point values to wider representations.
-def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64>;
-def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128>;
-def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128>;
+def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64, 4>;
+def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128, 4>;
+def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128, 8>;
// Convert a signed integer register value to a floating-point one.
-let Defs = [PSW] in {
- def CEFBR : UnaryRRE<"cefbr", 0xB394, sint_to_fp, FP32, GR32>;
- def CDFBR : UnaryRRE<"cdfbr", 0xB395, sint_to_fp, FP64, GR32>;
- def CXFBR : UnaryRRE<"cxfbr", 0xB396, sint_to_fp, FP128, GR32>;
-
- def CEGBR : UnaryRRE<"cegbr", 0xB3A4, sint_to_fp, FP32, GR64>;
- def CDGBR : UnaryRRE<"cdgbr", 0xB3A5, sint_to_fp, FP64, GR64>;
- def CXGBR : UnaryRRE<"cxgbr", 0xB3A6, sint_to_fp, FP128, GR64>;
-}
+def CEFBR : UnaryRRE<"cefb", 0xB394, sint_to_fp, FP32, GR32>;
+def CDFBR : UnaryRRE<"cdfb", 0xB395, sint_to_fp, FP64, GR32>;
+def CXFBR : UnaryRRE<"cxfb", 0xB396, sint_to_fp, FP128, GR32>;
+
+def CEGBR : UnaryRRE<"cegb", 0xB3A4, sint_to_fp, FP32, GR64>;
+def CDGBR : UnaryRRE<"cdgb", 0xB3A5, sint_to_fp, FP64, GR64>;
+def CXGBR : UnaryRRE<"cxgb", 0xB3A6, sint_to_fp, FP128, GR64>;
// Convert a floating-point register value to a signed integer value,
// with the second operand (modifier M3) specifying the rounding mode.
-let Defs = [PSW] in {
- def CFEBR : UnaryRRF<"cfebr", 0xB398, GR32, FP32>;
- def CFDBR : UnaryRRF<"cfdbr", 0xB399, GR32, FP64>;
- def CFXBR : UnaryRRF<"cfxbr", 0xB39A, GR32, FP128>;
-
- def CGEBR : UnaryRRF<"cgebr", 0xB3A8, GR64, FP32>;
- def CGDBR : UnaryRRF<"cgdbr", 0xB3A9, GR64, FP64>;
- def CGXBR : UnaryRRF<"cgxbr", 0xB3AA, GR64, FP128>;
+let Defs = [CC] in {
+ def CFEBR : UnaryRRF<"cfeb", 0xB398, GR32, FP32>;
+ def CFDBR : UnaryRRF<"cfdb", 0xB399, GR32, FP64>;
+ def CFXBR : UnaryRRF<"cfxb", 0xB39A, GR32, FP128>;
+
+ def CGEBR : UnaryRRF<"cgeb", 0xB3A8, GR64, FP32>;
+ def CGDBR : UnaryRRF<"cgdb", 0xB3A9, GR64, FP64>;
+ def CGXBR : UnaryRRF<"cgxb", 0xB3AA, GR64, FP128>;
}
// fp_to_sint always rounds towards zero, which is modifier value 5.
-def : Pat<(i32 (fp_to_sint FP32:$src)), (CFEBR FP32:$src, 5)>;
-def : Pat<(i32 (fp_to_sint FP64:$src)), (CFDBR FP64:$src, 5)>;
-def : Pat<(i32 (fp_to_sint FP128:$src)), (CFXBR FP128:$src, 5)>;
+def : Pat<(i32 (fp_to_sint FP32:$src)), (CFEBR 5, FP32:$src)>;
+def : Pat<(i32 (fp_to_sint FP64:$src)), (CFDBR 5, FP64:$src)>;
+def : Pat<(i32 (fp_to_sint FP128:$src)), (CFXBR 5, FP128:$src)>;
-def : Pat<(i64 (fp_to_sint FP32:$src)), (CGEBR FP32:$src, 5)>;
-def : Pat<(i64 (fp_to_sint FP64:$src)), (CGDBR FP64:$src, 5)>;
-def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR FP128:$src, 5)>;
+def : Pat<(i64 (fp_to_sint FP32:$src)), (CGEBR 5, FP32:$src)>;
+def : Pat<(i64 (fp_to_sint FP64:$src)), (CGDBR 5, FP64:$src)>;
+def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>;
//===----------------------------------------------------------------------===//
// Unary arithmetic
//===----------------------------------------------------------------------===//
// Negation (Load Complement).
-let Defs = [PSW] in {
- def LCEBR : UnaryRRE<"lcebr", 0xB303, fneg, FP32, FP32>;
- def LCDBR : UnaryRRE<"lcdbr", 0xB313, fneg, FP64, FP64>;
- def LCXBR : UnaryRRE<"lcxbr", 0xB343, fneg, FP128, FP128>;
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def LCEBR : UnaryRRE<"lceb", 0xB303, fneg, FP32, FP32>;
+ def LCDBR : UnaryRRE<"lcdb", 0xB313, fneg, FP64, FP64>;
+ def LCXBR : UnaryRRE<"lcxb", 0xB343, fneg, FP128, FP128>;
}
// Absolute value (Load Positive).
-let Defs = [PSW] in {
- def LPEBR : UnaryRRE<"lpebr", 0xB300, fabs, FP32, FP32>;
- def LPDBR : UnaryRRE<"lpdbr", 0xB310, fabs, FP64, FP64>;
- def LPXBR : UnaryRRE<"lpxbr", 0xB340, fabs, FP128, FP128>;
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def LPEBR : UnaryRRE<"lpeb", 0xB300, fabs, FP32, FP32>;
+ def LPDBR : UnaryRRE<"lpdb", 0xB310, fabs, FP64, FP64>;
+ def LPXBR : UnaryRRE<"lpxb", 0xB340, fabs, FP128, FP128>;
}
// Negative absolute value (Load Negative).
-let Defs = [PSW] in {
- def LNEBR : UnaryRRE<"lnebr", 0xB301, fnabs, FP32, FP32>;
- def LNDBR : UnaryRRE<"lndbr", 0xB311, fnabs, FP64, FP64>;
- def LNXBR : UnaryRRE<"lnxbr", 0xB341, fnabs, FP128, FP128>;
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def LNEBR : UnaryRRE<"lneb", 0xB301, fnabs, FP32, FP32>;
+ def LNDBR : UnaryRRE<"lndb", 0xB311, fnabs, FP64, FP64>;
+ def LNXBR : UnaryRRE<"lnxb", 0xB341, fnabs, FP128, FP128>;
}
// Square root.
-def SQEBR : UnaryRRE<"sqebr", 0xB314, fsqrt, FP32, FP32>;
-def SQDBR : UnaryRRE<"sqdbr", 0xB315, fsqrt, FP64, FP64>;
-def SQXBR : UnaryRRE<"sqxbr", 0xB316, fsqrt, FP128, FP128>;
+def SQEBR : UnaryRRE<"sqeb", 0xB314, fsqrt, FP32, FP32>;
+def SQDBR : UnaryRRE<"sqdb", 0xB315, fsqrt, FP64, FP64>;
+def SQXBR : UnaryRRE<"sqxb", 0xB316, fsqrt, FP128, FP128>;
-def SQEB : UnaryRXE<"sqeb", 0xED14, loadu<fsqrt>, FP32>;
-def SQDB : UnaryRXE<"sqdb", 0xED15, loadu<fsqrt>, FP64>;
+def SQEB : UnaryRXE<"sqeb", 0xED14, loadu<fsqrt>, FP32, 4>;
+def SQDB : UnaryRXE<"sqdb", 0xED15, loadu<fsqrt>, FP64, 8>;
// Round to an integer, with the second operand (modifier M3) specifying
-// the rounding mode.
-//
-// These forms always check for inexact conditions. z196 added versions
-// that allow this to suppressed (as for fnearbyint), but we don't yet
-// support -march=z196.
-let Defs = [PSW] in {
- def FIEBR : UnaryRRF<"fiebr", 0xB357, FP32, FP32>;
- def FIDBR : UnaryRRF<"fidbr", 0xB35F, FP64, FP64>;
- def FIXBR : UnaryRRF<"fixbr", 0xB347, FP128, FP128>;
-}
+// the rounding mode. These forms always check for inexact conditions.
+def FIEBR : UnaryRRF<"fieb", 0xB357, FP32, FP32>;
+def FIDBR : UnaryRRF<"fidb", 0xB35F, FP64, FP64>;
+def FIXBR : UnaryRRF<"fixb", 0xB347, FP128, FP128>;
+
+// Extended forms of the previous three instructions. M4 can be set to 4
+// to suppress detection of inexact conditions.
+def FIEBRA : UnaryRRF4<"fiebra", 0xB357, FP32, FP32>,
+ Requires<[FeatureFPExtension]>;
+def FIDBRA : UnaryRRF4<"fidbra", 0xB35F, FP64, FP64>,
+ Requires<[FeatureFPExtension]>;
+def FIXBRA : UnaryRRF4<"fixbra", 0xB347, FP128, FP128>,
+ Requires<[FeatureFPExtension]>;
// frint rounds according to the current mode (modifier 0) and detects
// inexact conditions.
-def : Pat<(frint FP32:$src), (FIEBR FP32:$src, 0)>;
-def : Pat<(frint FP64:$src), (FIDBR FP64:$src, 0)>;
-def : Pat<(frint FP128:$src), (FIXBR FP128:$src, 0)>;
+def : Pat<(frint FP32:$src), (FIEBR 0, FP32:$src)>;
+def : Pat<(frint FP64:$src), (FIDBR 0, FP64:$src)>;
+def : Pat<(frint FP128:$src), (FIXBR 0, FP128:$src)>;
+
+let Predicates = [FeatureFPExtension] in {
+ // fnearbyint is like frint but does not detect inexact conditions.
+ def : Pat<(fnearbyint FP32:$src), (FIEBRA 0, FP32:$src, 4)>;
+ def : Pat<(fnearbyint FP64:$src), (FIDBRA 0, FP64:$src, 4)>;
+ def : Pat<(fnearbyint FP128:$src), (FIXBRA 0, FP128:$src, 4)>;
+
+ // floor is no longer allowed to raise an inexact condition,
+ // so restrict it to the cases where the condition can be suppressed.
+ // Mode 7 is round towards -inf.
+ def : Pat<(ffloor FP32:$src), (FIEBRA 7, FP32:$src, 4)>;
+ def : Pat<(ffloor FP64:$src), (FIDBRA 7, FP64:$src, 4)>;
+ def : Pat<(ffloor FP128:$src), (FIXBRA 7, FP128:$src, 4)>;
+
+ // Same idea for ceil, where mode 6 is round towards +inf.
+ def : Pat<(fceil FP32:$src), (FIEBRA 6, FP32:$src, 4)>;
+ def : Pat<(fceil FP64:$src), (FIDBRA 6, FP64:$src, 4)>;
+ def : Pat<(fceil FP128:$src), (FIXBRA 6, FP128:$src, 4)>;
+
+ // Same idea for trunc, where mode 5 is round towards zero.
+ def : Pat<(ftrunc FP32:$src), (FIEBRA 5, FP32:$src, 4)>;
+ def : Pat<(ftrunc FP64:$src), (FIDBRA 5, FP64:$src, 4)>;
+ def : Pat<(ftrunc FP128:$src), (FIXBRA 5, FP128:$src, 4)>;
+
+ // Same idea for round, where mode 1 is round towards nearest with
+ // ties away from zero.
+ def : Pat<(frnd FP32:$src), (FIEBRA 1, FP32:$src, 4)>;
+ def : Pat<(frnd FP64:$src), (FIDBRA 1, FP64:$src, 4)>;
+ def : Pat<(frnd FP128:$src), (FIXBRA 1, FP128:$src, 4)>;
+}
//===----------------------------------------------------------------------===//
// Binary arithmetic
//===----------------------------------------------------------------------===//
// Addition.
-let Defs = [PSW] in {
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
let isCommutable = 1 in {
- def AEBR : BinaryRRE<"aebr", 0xB30A, fadd, FP32, FP32>;
- def ADBR : BinaryRRE<"adbr", 0xB31A, fadd, FP64, FP64>;
- def AXBR : BinaryRRE<"axbr", 0xB34A, fadd, FP128, FP128>;
+ def AEBR : BinaryRRE<"aeb", 0xB30A, fadd, FP32, FP32>;
+ def ADBR : BinaryRRE<"adb", 0xB31A, fadd, FP64, FP64>;
+ def AXBR : BinaryRRE<"axb", 0xB34A, fadd, FP128, FP128>;
}
- def AEB : BinaryRXE<"aeb", 0xED0A, fadd, FP32, load>;
- def ADB : BinaryRXE<"adb", 0xED1A, fadd, FP64, load>;
+ def AEB : BinaryRXE<"aeb", 0xED0A, fadd, FP32, load, 4>;
+ def ADB : BinaryRXE<"adb", 0xED1A, fadd, FP64, load, 8>;
}
// Subtraction.
-let Defs = [PSW] in {
- def SEBR : BinaryRRE<"sebr", 0xB30B, fsub, FP32, FP32>;
- def SDBR : BinaryRRE<"sdbr", 0xB31B, fsub, FP64, FP64>;
- def SXBR : BinaryRRE<"sxbr", 0xB34B, fsub, FP128, FP128>;
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def SEBR : BinaryRRE<"seb", 0xB30B, fsub, FP32, FP32>;
+ def SDBR : BinaryRRE<"sdb", 0xB31B, fsub, FP64, FP64>;
+ def SXBR : BinaryRRE<"sxb", 0xB34B, fsub, FP128, FP128>;
- def SEB : BinaryRXE<"seb", 0xED0B, fsub, FP32, load>;
- def SDB : BinaryRXE<"sdb", 0xED1B, fsub, FP64, load>;
+ def SEB : BinaryRXE<"seb", 0xED0B, fsub, FP32, load, 4>;
+ def SDB : BinaryRXE<"sdb", 0xED1B, fsub, FP64, load, 8>;
}
// Multiplication.
let isCommutable = 1 in {
- def MEEBR : BinaryRRE<"meebr", 0xB317, fmul, FP32, FP32>;
- def MDBR : BinaryRRE<"mdbr", 0xB31C, fmul, FP64, FP64>;
- def MXBR : BinaryRRE<"mxbr", 0xB34C, fmul, FP128, FP128>;
+ def MEEBR : BinaryRRE<"meeb", 0xB317, fmul, FP32, FP32>;
+ def MDBR : BinaryRRE<"mdb", 0xB31C, fmul, FP64, FP64>;
+ def MXBR : BinaryRRE<"mxb", 0xB34C, fmul, FP128, FP128>;
}
-def MEEB : BinaryRXE<"meeb", 0xED17, fmul, FP32, load>;
-def MDB : BinaryRXE<"mdb", 0xED1C, fmul, FP64, load>;
+def MEEB : BinaryRXE<"meeb", 0xED17, fmul, FP32, load, 4>;
+def MDB : BinaryRXE<"mdb", 0xED1C, fmul, FP64, load, 8>;
// f64 multiplication of two FP32 registers.
-def MDEBR : BinaryRRE<"mdebr", 0xB30C, null_frag, FP64, FP32>;
+def MDEBR : BinaryRRE<"mdeb", 0xB30C, null_frag, FP64, FP32>;
def : Pat<(fmul (f64 (fextend FP32:$src1)), (f64 (fextend FP32:$src2))),
(MDEBR (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
- FP32:$src1, subreg_32bit), FP32:$src2)>;
+ FP32:$src1, subreg_h32), FP32:$src2)>;
// f64 multiplication of an FP32 register and an f32 memory.
-def MDEB : BinaryRXE<"mdeb", 0xED0C, null_frag, FP64, load>;
+def MDEB : BinaryRXE<"mdeb", 0xED0C, null_frag, FP64, load, 4>;
def : Pat<(fmul (f64 (fextend FP32:$src1)),
(f64 (extloadf32 bdxaddr12only:$addr))),
- (MDEB (INSERT_SUBREG (f64 (IMPLICIT_DEF)), FP32:$src1, subreg_32bit),
+ (MDEB (INSERT_SUBREG (f64 (IMPLICIT_DEF)), FP32:$src1, subreg_h32),
bdxaddr12only:$addr)>;
// f128 multiplication of two FP64 registers.
-def MXDBR : BinaryRRE<"mxdbr", 0xB307, null_frag, FP128, FP64>;
+def MXDBR : BinaryRRE<"mxdb", 0xB307, null_frag, FP128, FP64>;
def : Pat<(fmul (f128 (fextend FP64:$src1)), (f128 (fextend FP64:$src2))),
(MXDBR (INSERT_SUBREG (f128 (IMPLICIT_DEF)),
- FP64:$src1, subreg_high), FP64:$src2)>;
+ FP64:$src1, subreg_h64), FP64:$src2)>;
// f128 multiplication of an FP64 register and an f64 memory.
-def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load>;
+def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load, 8>;
def : Pat<(fmul (f128 (fextend FP64:$src1)),
(f128 (extloadf64 bdxaddr12only:$addr))),
- (MXDB (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_high),
+ (MXDB (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_h64),
bdxaddr12only:$addr)>;
// Fused multiply-add.
-def MAEBR : TernaryRRD<"maebr", 0xB30E, z_fma, FP32>;
-def MADBR : TernaryRRD<"madbr", 0xB31E, z_fma, FP64>;
+def MAEBR : TernaryRRD<"maeb", 0xB30E, z_fma, FP32>;
+def MADBR : TernaryRRD<"madb", 0xB31E, z_fma, FP64>;
-def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load>;
-def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load>;
+def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load, 4>;
+def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load, 8>;
// Fused multiply-subtract.
-def MSEBR : TernaryRRD<"msebr", 0xB30F, z_fms, FP32>;
-def MSDBR : TernaryRRD<"msdbr", 0xB31F, z_fms, FP64>;
+def MSEBR : TernaryRRD<"mseb", 0xB30F, z_fms, FP32>;
+def MSDBR : TernaryRRD<"msdb", 0xB31F, z_fms, FP64>;
-def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load>;
-def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load>;
+def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load, 4>;
+def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load, 8>;
// Division.
-def DEBR : BinaryRRE<"debr", 0xB30D, fdiv, FP32, FP32>;
-def DDBR : BinaryRRE<"ddbr", 0xB31D, fdiv, FP64, FP64>;
-def DXBR : BinaryRRE<"dxbr", 0xB34D, fdiv, FP128, FP128>;
+def DEBR : BinaryRRE<"deb", 0xB30D, fdiv, FP32, FP32>;
+def DDBR : BinaryRRE<"ddb", 0xB31D, fdiv, FP64, FP64>;
+def DXBR : BinaryRRE<"dxb", 0xB34D, fdiv, FP128, FP128>;
-def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load>;
-def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load>;
+def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load, 4>;
+def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load, 8>;
//===----------------------------------------------------------------------===//
// Comparisons
//===----------------------------------------------------------------------===//
-let Defs = [PSW] in {
- def CEBR : CompareRRE<"cebr", 0xB309, z_cmp, FP32, FP32>;
- def CDBR : CompareRRE<"cdbr", 0xB319, z_cmp, FP64, FP64>;
- def CXBR : CompareRRE<"cxbr", 0xB349, z_cmp, FP128, FP128>;
+let Defs = [CC], CCValues = 0xF in {
+ def CEBR : CompareRRE<"ceb", 0xB309, z_fcmp, FP32, FP32>;
+ def CDBR : CompareRRE<"cdb", 0xB319, z_fcmp, FP64, FP64>;
+ def CXBR : CompareRRE<"cxb", 0xB349, z_fcmp, FP128, FP128>;
- def CEB : CompareRXE<"ceb", 0xED09, z_cmp, FP32, load>;
- def CDB : CompareRXE<"cdb", 0xED19, z_cmp, FP64, load>;
+ def CEB : CompareRXE<"ceb", 0xED09, z_fcmp, FP32, load, 4>;
+ def CDB : CompareRXE<"cdb", 0xED19, z_fcmp, FP64, load, 8>;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td
index b32b7eb0fc67..a8efe165e36f 100644
--- a/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -21,12 +21,24 @@ class InstSystemZ<int size, dag outs, dag ins, string asmstr,
let Pattern = pattern;
let AsmString = asmstr;
- // Used to identify a group of related instructions, such as ST and STY.
- string Function = "";
-
- // "12" for an instruction that has a ...Y equivalent, "20" for that
- // ...Y equivalent.
- string PairType = "none";
+ // Some instructions come in pairs, one having a 12-bit displacement
+ // and the other having a 20-bit displacement. Both instructions in
+ // the pair have the same DispKey and their DispSizes are "12" and "20"
+ // respectively.
+ string DispKey = "";
+ string DispSize = "none";
+
+ // Many register-based <INSN>R instructions have a memory-based <INSN>
+ // counterpart. OpKey uniquely identifies <INSN>, while OpType is
+ // "reg" for <INSN>R and "mem" for <INSN>.
+ string OpKey = "";
+ string OpType = "none";
+
+ // Many distinct-operands instructions have older 2-operand equivalents.
+ // NumOpsKey uniquely identifies one of these 2-operand and 3-operand pairs,
+ // with NumOpsValue being "2" or "3" as appropriate.
+ string NumOpsKey = "";
+ string NumOpsValue = "none";
// True if this instruction is a simple D(X,B) load of a register
// (with no sign or zero extension).
@@ -46,11 +58,40 @@ class InstSystemZ<int size, dag outs, dag ins, string asmstr,
// operations.
bit Is128Bit = 0;
- let TSFlags{0} = SimpleBDXLoad;
- let TSFlags{1} = SimpleBDXStore;
- let TSFlags{2} = Has20BitOffset;
- let TSFlags{3} = HasIndex;
- let TSFlags{4} = Is128Bit;
+ // The access size of all memory operands in bytes, or 0 if not known.
+ bits<5> AccessBytes = 0;
+
+ // If the instruction sets CC to a useful value, this gives the mask
+ // of all possible CC results. The mask has the same form as
+ // SystemZ::CCMASK_*.
+ bits<4> CCValues = 0;
+
+ // The subset of CCValues that have the same meaning as they would after
+ // a comparison of the first operand against zero.
+ bits<4> CompareZeroCCMask = 0;
+
+ // True if the instruction is conditional and if the CC mask operand
+ // comes first (as for BRC, etc.).
+ bit CCMaskFirst = 0;
+
+ // Similar, but true if the CC mask operand comes last (as for LOC, etc.).
+ bit CCMaskLast = 0;
+
+ // True if the instruction is the "logical" rather than "arithmetic" form,
+ // in cases where a distinction exists.
+ bit IsLogical = 0;
+
+ let TSFlags{0} = SimpleBDXLoad;
+ let TSFlags{1} = SimpleBDXStore;
+ let TSFlags{2} = Has20BitOffset;
+ let TSFlags{3} = HasIndex;
+ let TSFlags{4} = Is128Bit;
+ let TSFlags{9-5} = AccessBytes;
+ let TSFlags{13-10} = CCValues;
+ let TSFlags{17-14} = CompareZeroCCMask;
+ let TSFlags{18} = CCMaskFirst;
+ let TSFlags{19} = CCMaskLast;
+ let TSFlags{20} = IsLogical;
}
//===----------------------------------------------------------------------===//
@@ -61,8 +102,8 @@ class InstSystemZ<int size, dag outs, dag ins, string asmstr,
// displacement.
def getDisp12Opcode : InstrMapping {
let FilterClass = "InstSystemZ";
- let RowFields = ["Function"];
- let ColFields = ["PairType"];
+ let RowFields = ["DispKey"];
+ let ColFields = ["DispSize"];
let KeyCol = ["20"];
let ValueCols = [["12"]];
}
@@ -70,37 +111,54 @@ def getDisp12Opcode : InstrMapping {
// Return the version of an instruction that has a signed 20-bit displacement.
def getDisp20Opcode : InstrMapping {
let FilterClass = "InstSystemZ";
- let RowFields = ["Function"];
- let ColFields = ["PairType"];
+ let RowFields = ["DispKey"];
+ let ColFields = ["DispSize"];
let KeyCol = ["12"];
let ValueCols = [["20"]];
}
+// Return the memory form of a register instruction.
+def getMemOpcode : InstrMapping {
+ let FilterClass = "InstSystemZ";
+ let RowFields = ["OpKey"];
+ let ColFields = ["OpType"];
+ let KeyCol = ["reg"];
+ let ValueCols = [["mem"]];
+}
+
+// Return the 3-operand form of a 2-operand instruction.
+def getThreeOperandOpcode : InstrMapping {
+ let FilterClass = "InstSystemZ";
+ let RowFields = ["NumOpsKey"];
+ let ColFields = ["NumOpsValue"];
+ let KeyCol = ["2"];
+ let ValueCols = [["3"]];
+}
+
//===----------------------------------------------------------------------===//
// Instruction formats
//===----------------------------------------------------------------------===//
//
// Formats are specified using operand field declarations of the form:
//
-// bits<4> Rn : register input or output for operand n
-// bits<m> In : immediate value of width m for operand n
-// bits<4> Bn : base register for address operand n
-// bits<m> Dn : displacement value of width m for address operand n
-// bits<4> Xn : index register for address operand n
-// bits<4> Mn : mode value for operand n
+// bits<4> Rn : register input or output for operand n
+// bits<m> In : immediate value of width m for operand n
+// bits<4> BDn : address operand n, which has a base and a displacement
+// bits<m> XBDn : address operand n, which has an index, a base and a
+// displacement
+// bits<4> Xn : index register for address operand n
+// bits<4> Mn : mode value for operand n
//
-// The operand numbers ("n" in the list above) follow the architecture manual,
-// but the fields are always declared in assembly order, so there are some
-// cases where operand "2" comes after operand "3". For address operands,
-// the base register field is declared first, followed by the displacement,
-// followed by the index (if any). This matches the bdaddr* and bdxaddr*
-// orders.
+// The operand numbers ("n" in the list above) follow the architecture manual.
+// Assembly operands sometimes have a different order; in particular, R3 often
+// is often written between operands 1 and 2.
//
//===----------------------------------------------------------------------===//
class InstRI<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
bits<4> R1;
bits<16> I2;
@@ -111,9 +169,64 @@ class InstRI<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
let Inst{15-0} = I2;
}
+class InstRIEb<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+ bits<4> M3;
+ bits<16> RI4;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = R2;
+ let Inst{31-16} = RI4;
+ let Inst{15-12} = M3;
+ let Inst{11-8} = 0;
+ let Inst{7-0} = op{7-0};
+}
+
+class InstRIEc<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<8> I2;
+ bits<4> M3;
+ bits<16> RI4;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = M3;
+ let Inst{31-16} = RI4;
+ let Inst{15-8} = I2;
+ let Inst{7-0} = op{7-0};
+}
+
+class InstRIEd<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R3;
+ bits<16> I2;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = R3;
+ let Inst{31-16} = I2;
+ let Inst{15-8} = 0;
+ let Inst{7-0} = op{7-0};
+}
+
class InstRIEf<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
bits<4> R1;
bits<4> R2;
@@ -133,6 +246,7 @@ class InstRIEf<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRIL<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
bits<4> R1;
bits<32> I2;
@@ -146,6 +260,7 @@ class InstRIL<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRR<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<2, outs, ins, asmstr, pattern> {
field bits<16> Inst;
+ field bits<16> SoftFail = 0;
bits<4> R1;
bits<4> R2;
@@ -158,6 +273,7 @@ class InstRR<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRRD<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
bits<4> R1;
bits<4> R3;
@@ -173,6 +289,7 @@ class InstRRD<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRRE<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
bits<4> R1;
bits<4> R2;
@@ -186,14 +303,16 @@ class InstRRE<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRRF<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
bits<4> R1;
bits<4> R2;
bits<4> R3;
+ bits<4> R4;
let Inst{31-16} = op;
let Inst{15-12} = R3;
- let Inst{11-8} = 0;
+ let Inst{11-8} = R4;
let Inst{7-4} = R1;
let Inst{3-0} = R2;
}
@@ -201,17 +320,14 @@ class InstRRF<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRX<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
bits<4> R1;
- bits<4> B2;
- bits<12> D2;
- bits<4> X2;
+ bits<20> XBD2;
let Inst{31-24} = op;
let Inst{23-20} = R1;
- let Inst{19-16} = X2;
- let Inst{15-12} = B2;
- let Inst{11-0} = D2;
+ let Inst{19-0} = XBD2;
let HasIndex = 1;
}
@@ -219,17 +335,14 @@ class InstRX<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRXE<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
bits<4> R1;
- bits<4> B2;
- bits<12> D2;
- bits<4> X2;
+ bits<20> XBD2;
let Inst{47-40} = op{15-8};
let Inst{39-36} = R1;
- let Inst{35-32} = X2;
- let Inst{31-28} = B2;
- let Inst{27-16} = D2;
+ let Inst{35-16} = XBD2;
let Inst{15-8} = 0;
let Inst{7-0} = op{7-0};
@@ -239,18 +352,15 @@ class InstRXE<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRXF<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
bits<4> R1;
bits<4> R3;
- bits<4> B2;
- bits<12> D2;
- bits<4> X2;
+ bits<20> XBD2;
let Inst{47-40} = op{15-8};
let Inst{39-36} = R3;
- let Inst{35-32} = X2;
- let Inst{31-28} = B2;
- let Inst{27-16} = D2;
+ let Inst{35-16} = XBD2;
let Inst{15-12} = R1;
let Inst{11-8} = 0;
let Inst{7-0} = op{7-0};
@@ -261,18 +371,14 @@ class InstRXF<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRXY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
bits<4> R1;
- bits<4> B2;
- bits<20> D2;
- bits<4> X2;
+ bits<28> XBD2;
let Inst{47-40} = op{15-8};
let Inst{39-36} = R1;
- let Inst{35-32} = X2;
- let Inst{31-28} = B2;
- let Inst{27-16} = D2{11-0};
- let Inst{15-8} = D2{19-12};
+ let Inst{35-8} = XBD2;
let Inst{7-0} = op{7-0};
let Has20BitOffset = 1;
@@ -282,34 +388,31 @@ class InstRXY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstRS<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
bits<4> R1;
bits<4> R3;
- bits<4> B2;
- bits<12> D2;
+ bits<16> BD2;
let Inst{31-24} = op;
let Inst{23-20} = R1;
let Inst{19-16} = R3;
- let Inst{15-12} = B2;
- let Inst{11-0} = D2;
+ let Inst{15-0} = BD2;
}
class InstRSY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
bits<4> R1;
bits<4> R3;
- bits<4> B2;
- bits<20> D2;
+ bits<24> BD2;
let Inst{47-40} = op{15-8};
let Inst{39-36} = R1;
let Inst{35-32} = R3;
- let Inst{31-28} = B2;
- let Inst{27-16} = D2{11-0};
- let Inst{15-8} = D2{19-12};
+ let Inst{31-8} = BD2;
let Inst{7-0} = op{7-0};
let Has20BitOffset = 1;
@@ -318,60 +421,77 @@ class InstRSY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InstSI<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
- bits<4> B1;
- bits<12> D1;
+ bits<16> BD1;
bits<8> I2;
let Inst{31-24} = op;
let Inst{23-16} = I2;
- let Inst{15-12} = B1;
- let Inst{11-0} = D1;
+ let Inst{15-0} = BD1;
}
class InstSIL<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
- bits<4> B1;
- bits<12> D1;
+ bits<16> BD1;
bits<16> I2;
let Inst{47-32} = op;
- let Inst{31-28} = B1;
- let Inst{27-16} = D1;
+ let Inst{31-16} = BD1;
let Inst{15-0} = I2;
}
class InstSIY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
+ field bits<48> SoftFail = 0;
- bits<4> B1;
- bits<20> D1;
+ bits<24> BD1;
bits<8> I2;
let Inst{47-40} = op{15-8};
let Inst{39-32} = I2;
- let Inst{31-28} = B1;
- let Inst{27-16} = D1{11-0};
- let Inst{15-8} = D1{19-12};
+ let Inst{31-8} = BD1;
let Inst{7-0} = op{7-0};
let Has20BitOffset = 1;
}
+class InstSS<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<24> BDL1;
+ bits<16> BD2;
+
+ let Inst{47-40} = op;
+ let Inst{39-16} = BDL1;
+ let Inst{15-0} = BD2;
+}
+
//===----------------------------------------------------------------------===//
// Instruction definitions with semantics
//===----------------------------------------------------------------------===//
//
-// These classes have the form <Category><Format>, where <Format> is one
+// These classes have the form [Cond]<Category><Format>, where <Format> is one
// of the formats defined above and where <Category> describes the inputs
-// and outputs. <Category> can be one of:
+// and outputs. "Cond" is used if the instruction is conditional,
+// in which case the 4-bit condition-code mask is added as a final operand.
+// <Category> can be one of:
//
// Inherent:
// One register output operand and no input operands.
//
+// BranchUnary:
+// One register output operand, one register input operand and
+// one branch displacement. The instructions stores a modified
+// form of the source register in the destination register and
+// branches on the result.
+//
// Store:
// One register or immediate input operand and one address input operand.
// The instruction stores the first operand to the address.
@@ -420,6 +540,10 @@ class InstSIY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
// One output operand and five input operands. The first two operands
// are registers and the other three are immediates.
//
+// Prefetch:
+// One 4-bit immediate operand and one address operand. The immediate
+// operand is 1 for a load prefetch and 2 for a store prefetch.
+//
// The format determines which input operands are tied to output operands,
// and also determines the shape of any address operand.
//
@@ -432,23 +556,32 @@ class InstSIY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
class InherentRRE<string mnemonic, bits<16> opcode, RegisterOperand cls,
dag src>
- : InstRRE<opcode, (outs cls:$dst), (ins),
- mnemonic#"\t$dst",
- [(set cls:$dst, src)]> {
+ : InstRRE<opcode, (outs cls:$R1), (ins),
+ mnemonic#"\t$R1",
+ [(set cls:$R1, src)]> {
let R2 = 0;
}
+class BranchUnaryRI<string mnemonic, bits<12> opcode, RegisterOperand cls>
+ : InstRI<opcode, (outs cls:$R1), (ins cls:$R1src, brtarget16:$I2),
+ mnemonic##"\t$R1, $I2", []> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
class LoadMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls>
- : InstRSY<opcode, (outs cls:$dst1, cls:$dst2), (ins bdaddr20only:$addr),
- mnemonic#"\t$dst1, $dst2, $addr", []> {
+ : InstRSY<opcode, (outs cls:$R1, cls:$R3), (ins bdaddr20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
let mayLoad = 1;
}
class StoreRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls>
- : InstRIL<opcode, (outs), (ins cls:$src, pcrel32:$addr),
- mnemonic#"\t$src, $addr",
- [(operator cls:$src, pcrel32:$addr)]> {
+ : InstRIL<opcode, (outs), (ins cls:$R1, pcrel32:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, pcrel32:$I2)]> {
let mayStore = 1;
// We want PC-relative addresses to be tried ahead of BD and BDX addresses.
// However, BDXs have two extra operands and are therefore 6 units more
@@ -457,105 +590,206 @@ class StoreRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
}
class StoreRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- RegisterOperand cls, AddressingMode mode = bdxaddr12only>
- : InstRX<opcode, (outs), (ins cls:$src, mode:$addr),
- mnemonic#"\t$src, $addr",
- [(operator cls:$src, mode:$addr)]> {
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr12only>
+ : InstRX<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, mode:$XBD2)]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
let mayStore = 1;
+ let AccessBytes = bytes;
}
class StoreRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, AddressingMode mode = bdxaddr20only>
- : InstRXY<opcode, (outs), (ins cls:$src, mode:$addr),
- mnemonic#"\t$src, $addr",
- [(operator cls:$src, mode:$addr)]> {
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : InstRXY<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, mode:$XBD2)]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
let mayStore = 1;
+ let AccessBytes = bytes;
}
multiclass StoreRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
- SDPatternOperator operator, RegisterOperand cls> {
- let Function = mnemonic ## #cls in {
- let PairType = "12" in
- def "" : StoreRX<mnemonic, rxOpcode, operator, cls, bdxaddr12pair>;
- let PairType = "20" in
- def Y : StoreRXY<mnemonic#"y", rxyOpcode, operator, cls, bdxaddr20pair>;
+ SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : StoreRX<mnemonic, rxOpcode, operator, cls, bytes, bdxaddr12pair>;
+ let DispSize = "20" in
+ def Y : StoreRXY<mnemonic#"y", rxyOpcode, operator, cls, bytes,
+ bdxaddr20pair>;
}
}
class StoreMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls>
- : InstRSY<opcode, (outs), (ins cls:$from, cls:$to, bdaddr20only:$addr),
- mnemonic#"\t$from, $to, $addr", []> {
+ : InstRSY<opcode, (outs), (ins cls:$R1, cls:$R3, bdaddr20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
let mayStore = 1;
}
+// StoreSI* instructions are used to store an integer to memory, but the
+// addresses are more restricted than for normal stores. If we are in the
+// situation of having to force either the address into a register or the
+// constant into a register, it's usually better to do the latter.
+// We therefore match the address in the same way as a normal store and
+// only use the StoreSI* instruction if the matched address is suitable.
class StoreSI<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- Immediate imm, AddressingMode mode = bdaddr12only>
- : InstSI<opcode, (outs), (ins mode:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(operator imm:$src, mode:$addr)]> {
+ Immediate imm>
+ : InstSI<opcode, (outs), (ins mviaddr12pair:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator imm:$I2, mviaddr12pair:$BD1)]> {
let mayStore = 1;
}
class StoreSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- Immediate imm, AddressingMode mode = bdaddr20only>
- : InstSIY<opcode, (outs), (ins mode:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(operator imm:$src, mode:$addr)]> {
+ Immediate imm>
+ : InstSIY<opcode, (outs), (ins mviaddr20pair:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator imm:$I2, mviaddr20pair:$BD1)]> {
let mayStore = 1;
}
class StoreSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator,
Immediate imm>
- : InstSIL<opcode, (outs), (ins bdaddr12only:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(operator imm:$src, bdaddr12only:$addr)]> {
+ : InstSIL<opcode, (outs), (ins mviaddr12pair:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator imm:$I2, mviaddr12pair:$BD1)]> {
let mayStore = 1;
}
multiclass StoreSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode,
SDPatternOperator operator, Immediate imm> {
- let Function = mnemonic in {
- let PairType = "12" in
- def "" : StoreSI<mnemonic, siOpcode, operator, imm, bdaddr12pair>;
- let PairType = "20" in
- def Y : StoreSIY<mnemonic#"y", siyOpcode, operator, imm, bdaddr20pair>;
+ let DispKey = mnemonic in {
+ let DispSize = "12" in
+ def "" : StoreSI<mnemonic, siOpcode, operator, imm>;
+ let DispSize = "20" in
+ def Y : StoreSIY<mnemonic#"y", siyOpcode, operator, imm>;
}
}
+class CondStoreRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, cond4:$valid, cond4:$R3),
+ mnemonic#"$R3\t$R1, $BD2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayStore = 1;
+ let AccessBytes = bytes;
+ let CCMaskLast = 1;
+}
+
+// Like CondStoreRSY, but used for the raw assembly form. The condition-code
+// mask is the third operand rather than being part of the mnemonic.
+class AsmCondStoreRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, uimm8zx4:$R3),
+ mnemonic#"\t$R1, $BD2, $R3", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayStore = 1;
+ let AccessBytes = bytes;
+}
+
+// Like CondStoreRSY, but with a fixed CC mask.
+class FixedCondStoreRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<4> ccmask, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2),
+ mnemonic#"\t$R1, $BD2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayStore = 1;
+ let AccessBytes = bytes;
+ let R3 = ccmask;
+}
+
class UnaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
- : InstRR<opcode, (outs cls1:$dst), (ins cls2:$src),
- mnemonic#"\t$dst, $src",
- [(set cls1:$dst, (operator cls2:$src))]>;
+ : InstRR<opcode, (outs cls1:$R1), (ins cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+}
class UnaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
- : InstRRE<opcode, (outs cls1:$dst), (ins cls2:$src),
- mnemonic#"\t$dst, $src",
- [(set cls1:$dst, (operator cls2:$src))]>;
+ : InstRRE<opcode, (outs cls1:$R1), (ins cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+}
class UnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$dst), (ins cls2:$src, uimm8zx4:$mode),
- mnemonic#"\t$dst, $mode, $src", []>;
+ : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2),
+ mnemonic#"r\t$R1, $R3, $R2", []> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let R4 = 0;
+}
+
+class UnaryRRF4<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2, uimm8zx4:$R4),
+ mnemonic#"\t$R1, $R3, $R2, $R4", []>;
+
+// These instructions are generated by if conversion. The old value of R1
+// is added as an implicit use.
+class CondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls2:$R2, cond4:$valid, cond4:$R3),
+ mnemonic#"r$R3\t$R1, $R2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let CCMaskLast = 1;
+ let R4 = 0;
+}
+
+// Like CondUnaryRRF, but used for the raw assembly form. The condition-code
+// mask is the third operand rather than being part of the mnemonic.
+class AsmCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2, uimm8zx4:$R3),
+ mnemonic#"r\t$R1, $R2, $R3", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let R4 = 0;
+}
+
+// Like CondUnaryRRF, but with a fixed CC mask.
+class FixedCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2, bits<4> ccmask>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2),
+ mnemonic#"\t$R1, $R2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let R3 = ccmask;
+ let R4 = 0;
+}
class UnaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, Immediate imm>
- : InstRI<opcode, (outs cls:$dst), (ins imm:$src),
- mnemonic#"\t$dst, $src",
- [(set cls:$dst, (operator imm:$src))]>;
+ : InstRI<opcode, (outs cls:$R1), (ins imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator imm:$I2))]>;
class UnaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, Immediate imm>
- : InstRIL<opcode, (outs cls:$dst), (ins imm:$src),
- mnemonic#"\t$dst, $src",
- [(set cls:$dst, (operator imm:$src))]>;
+ : InstRIL<opcode, (outs cls:$R1), (ins imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator imm:$I2))]>;
class UnaryRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls>
- : InstRIL<opcode, (outs cls:$dst), (ins pcrel32:$addr),
- mnemonic#"\t$dst, $addr",
- [(set cls:$dst, (operator pcrel32:$addr))]> {
+ : InstRIL<opcode, (outs cls:$R1), (ins pcrel32:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator pcrel32:$I2))]> {
let mayLoad = 1;
// We want PC-relative addresses to be tried ahead of BD and BDX addresses.
// However, BDXs have two extra operands and are therefore 6 units more
@@ -563,148 +797,267 @@ class UnaryRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
let AddedComplexity = 7;
}
+class CondUnaryRSY<string mnemonic, bits<16> opcode,
+ SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes, AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1),
+ (ins cls:$R1src, mode:$BD2, cond4:$valid, cond4:$R3),
+ mnemonic#"$R3\t$R1, $BD2",
+ [(set cls:$R1,
+ (z_select_ccmask (load bdaddr20only:$BD2), cls:$R1src,
+ cond4:$valid, cond4:$R3))]>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+ let CCMaskLast = 1;
+}
+
+// Like CondUnaryRSY, but used for the raw assembly form. The condition-code
+// mask is the third operand rather than being part of the mnemonic.
+class AsmCondUnaryRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2, uimm8zx4:$R3),
+ mnemonic#"\t$R1, $BD2, $R3", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+// Like CondUnaryRSY, but with a fixed CC mask.
+class FixedCondUnaryRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<4> ccmask, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2),
+ mnemonic#"\t$R1, $BD2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let R3 = ccmask;
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
class UnaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- RegisterOperand cls, AddressingMode mode = bdxaddr12only>
- : InstRX<opcode, (outs cls:$dst), (ins mode:$addr),
- mnemonic#"\t$dst, $addr",
- [(set cls:$dst, (operator mode:$addr))]> {
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr12only>
+ : InstRX<opcode, (outs cls:$R1), (ins mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
let mayLoad = 1;
+ let AccessBytes = bytes;
}
class UnaryRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls>
- : InstRXE<opcode, (outs cls:$dst), (ins bdxaddr12only:$addr),
- mnemonic#"\t$dst, $addr",
- [(set cls:$dst, (operator bdxaddr12only:$addr))]> {
+ RegisterOperand cls, bits<5> bytes>
+ : InstRXE<opcode, (outs cls:$R1), (ins bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator bdxaddr12only:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
let mayLoad = 1;
+ let AccessBytes = bytes;
}
class UnaryRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, AddressingMode mode = bdxaddr20only>
- : InstRXY<opcode, (outs cls:$dst), (ins mode:$addr),
- mnemonic#"\t$dst, $addr",
- [(set cls:$dst, (operator mode:$addr))]> {
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : InstRXY<opcode, (outs cls:$R1), (ins mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
let mayLoad = 1;
+ let AccessBytes = bytes;
}
multiclass UnaryRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
- SDPatternOperator operator, RegisterOperand cls> {
- let Function = mnemonic ## #cls in {
- let PairType = "12" in
- def "" : UnaryRX<mnemonic, rxOpcode, operator, cls, bdxaddr12pair>;
- let PairType = "20" in
- def Y : UnaryRXY<mnemonic#"y", rxyOpcode, operator, cls, bdxaddr20pair>;
+ SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : UnaryRX<mnemonic, rxOpcode, operator, cls, bytes, bdxaddr12pair>;
+ let DispSize = "20" in
+ def Y : UnaryRXY<mnemonic#"y", rxyOpcode, operator, cls, bytes,
+ bdxaddr20pair>;
}
}
class BinaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
- : InstRR<opcode, (outs cls1:$dst), (ins cls1:$src1, cls2:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls1:$dst, (operator cls1:$src1, cls2:$src2))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRR<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls1:$R1src, cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
}
class BinaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
- : InstRRE<opcode, (outs cls1:$dst), (ins cls1:$src1, cls2:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls1:$dst, (operator cls1:$src1, cls2:$src2))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRRE<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls1:$R1src, cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
}
-// Here the assembly and dag operands are in natural order,
-// but the first input operand maps to R3 and the second to R2.
-// This is used for "CPSDR R1, R3, R2", which is equivalent to
-// R1 = copysign (R3, R2).
-//
-// Direct uses of the instruction must pass operands in encoding order --
-// R1, R2, R3 -- so they must pass the source operands in reverse order.
-class BinaryRevRRF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls1, RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$dst), (ins cls2:$src2, cls1:$src1),
- mnemonic#"\t$dst, $src1, $src2",
- [(set cls1:$dst, (operator cls1:$src1, cls2:$src2))]>;
+class BinaryRRF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R3, cls2:$R2),
+ mnemonic#"r\t$R1, $R3, $R2",
+ [(set cls1:$R1, (operator cls1:$R3, cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let R4 = 0;
+}
+
+class BinaryRRFK<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R2, cls2:$R3),
+ mnemonic#"rk\t$R1, $R2, $R3",
+ [(set cls1:$R1, (operator cls1:$R2, cls2:$R3))]> {
+ let R4 = 0;
+}
+
+multiclass BinaryRRAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls1,
+ RegisterOperand cls2> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRRFK<mnemonic, opcode2, null_frag, cls1, cls2>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRR<mnemonic, opcode1, operator, cls1, cls2>;
+ }
+}
+
+multiclass BinaryRREAndK<string mnemonic, bits<16> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls1,
+ RegisterOperand cls2> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRRFK<mnemonic, opcode2, null_frag, cls1, cls2>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRRE<mnemonic, opcode1, operator, cls1, cls2>;
+ }
+}
class BinaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, Immediate imm>
- : InstRI<opcode, (outs cls:$dst), (ins cls:$src1, imm:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls:$dst, (operator cls:$src1, imm:$src2))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRI<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class BinaryRIE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRIEd<opcode, (outs cls:$R1), (ins cls:$R3, imm:$I2),
+ mnemonic#"\t$R1, $R3, $I2",
+ [(set cls:$R1, (operator cls:$R3, imm:$I2))]>;
+
+multiclass BinaryRIAndK<string mnemonic, bits<12> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRIE<mnemonic##"k", opcode2, null_frag, cls, imm>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRI<mnemonic, opcode1, operator, cls, imm>;
+ }
}
class BinaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, Immediate imm>
- : InstRIL<opcode, (outs cls:$dst), (ins cls:$src1, imm:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls:$dst, (operator cls:$src1, imm:$src2))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRIL<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
}
class BinaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- RegisterOperand cls, SDPatternOperator load,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
AddressingMode mode = bdxaddr12only>
- : InstRX<opcode, (outs cls:$dst), (ins cls:$src1, mode:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls:$dst, (operator cls:$src1, (load mode:$src2)))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRX<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src, (load mode:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
let mayLoad = 1;
+ let AccessBytes = bytes;
}
class BinaryRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, SDPatternOperator load>
- : InstRXE<opcode, (outs cls:$dst), (ins cls:$src1, bdxaddr12only:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls:$dst, (operator cls:$src1,
- (load bdxaddr12only:$src2)))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes>
+ : InstRXE<opcode, (outs cls:$R1), (ins cls:$R1src, bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src,
+ (load bdxaddr12only:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
let mayLoad = 1;
+ let AccessBytes = bytes;
}
class BinaryRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, SDPatternOperator load,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
AddressingMode mode = bdxaddr20only>
- : InstRXY<opcode, (outs cls:$dst), (ins cls:$src1, mode:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls:$dst, (operator cls:$src1, (load mode:$src2)))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRXY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src, (load mode:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
let mayLoad = 1;
+ let AccessBytes = bytes;
}
multiclass BinaryRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
SDPatternOperator operator, RegisterOperand cls,
- SDPatternOperator load> {
- let Function = mnemonic ## #cls in {
- let PairType = "12" in
- def "" : BinaryRX<mnemonic, rxOpcode, operator, cls, load, bdxaddr12pair>;
- let PairType = "20" in
- def Y : BinaryRXY<mnemonic#"y", rxyOpcode, operator, cls, load,
+ SDPatternOperator load, bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : BinaryRX<mnemonic, rxOpcode, operator, cls, load, bytes,
+ bdxaddr12pair>;
+ let DispSize = "20" in
+ def Y : BinaryRXY<mnemonic#"y", rxyOpcode, operator, cls, load, bytes,
bdxaddr20pair>;
}
}
class BinarySI<string mnemonic, bits<8> opcode, SDPatternOperator operator,
Operand imm, AddressingMode mode = bdaddr12only>
- : InstSI<opcode, (outs), (ins mode:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(store (operator (load mode:$addr), imm:$src), mode:$addr)]> {
+ : InstSI<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(store (operator (load mode:$BD1), imm:$I2), mode:$BD1)]> {
let mayLoad = 1;
let mayStore = 1;
}
class BinarySIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
Operand imm, AddressingMode mode = bdaddr20only>
- : InstSIY<opcode, (outs), (ins mode:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(store (operator (load mode:$addr), imm:$src), mode:$addr)]> {
+ : InstSIY<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(store (operator (load mode:$BD1), imm:$I2), mode:$BD1)]> {
let mayLoad = 1;
let mayStore = 1;
}
@@ -712,59 +1065,83 @@ class BinarySIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
multiclass BinarySIPair<string mnemonic, bits<8> siOpcode,
bits<16> siyOpcode, SDPatternOperator operator,
Operand imm> {
- let Function = mnemonic ## #cls in {
- let PairType = "12" in
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
def "" : BinarySI<mnemonic, siOpcode, operator, imm, bdaddr12pair>;
- let PairType = "20" in
+ let DispSize = "20" in
def Y : BinarySIY<mnemonic#"y", siyOpcode, operator, imm, bdaddr20pair>;
}
}
class ShiftRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- RegisterOperand cls, AddressingMode mode>
- : InstRS<opcode, (outs cls:$dst), (ins cls:$src1, mode:$src2),
- mnemonic#"\t$dst, $src2",
- [(set cls:$dst, (operator cls:$src1, mode:$src2))]> {
+ RegisterOperand cls>
+ : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, shift12only:$BD2),
+ mnemonic#"\t$R1, $BD2",
+ [(set cls:$R1, (operator cls:$R1src, shift12only:$BD2))]> {
let R3 = 0;
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
}
class ShiftRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, AddressingMode mode>
- : InstRSY<opcode, (outs cls:$dst), (ins cls:$src1, mode:$src2),
- mnemonic#"\t$dst, $src1, $src2",
- [(set cls:$dst, (operator cls:$src1, mode:$src2))]>;
+ RegisterOperand cls>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, shift20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator cls:$R3, shift20only:$BD2))]>;
+
+multiclass ShiftRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : ShiftRSY<mnemonic##"k", opcode2, null_frag, cls>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : ShiftRS<mnemonic, opcode1, operator, cls>;
+ }
+}
class CompareRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
- : InstRR<opcode, (outs), (ins cls1:$src1, cls2:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls1:$src1, cls2:$src2)]>;
+ : InstRR<opcode, (outs), (ins cls1:$R1, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(operator cls1:$R1, cls2:$R2)]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let isCompare = 1;
+}
class CompareRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
- : InstRRE<opcode, (outs), (ins cls1:$src1, cls2:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls1:$src1, cls2:$src2)]>;
+ : InstRRE<opcode, (outs), (ins cls1:$R1, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(operator cls1:$R1, cls2:$R2)]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let isCompare = 1;
+}
class CompareRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, Immediate imm>
- : InstRI<opcode, (outs), (ins cls:$src1, imm:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls:$src1, imm:$src2)]>;
+ : InstRI<opcode, (outs), (ins cls:$R1, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, imm:$I2)]> {
+ let isCompare = 1;
+}
class CompareRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, Immediate imm>
- : InstRIL<opcode, (outs), (ins cls:$src1, imm:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls:$src1, imm:$src2)]>;
+ : InstRIL<opcode, (outs), (ins cls:$R1, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, imm:$I2)]> {
+ let isCompare = 1;
+}
class CompareRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, SDPatternOperator load>
- : InstRIL<opcode, (outs), (ins cls:$src1, pcrel32:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls:$src1, (load pcrel32:$src2))]> {
+ : InstRIL<opcode, (outs), (ins cls:$R1, pcrel32:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, (load pcrel32:$I2))]> {
+ let isCompare = 1;
let mayLoad = 1;
// We want PC-relative addresses to be tried ahead of BD and BDX addresses.
// However, BDXs have two extra operands and are therefore 6 units more
@@ -773,77 +1150,92 @@ class CompareRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
}
class CompareRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- RegisterOperand cls, SDPatternOperator load,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
AddressingMode mode = bdxaddr12only>
- : InstRX<opcode, (outs), (ins cls:$src1, mode:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls:$src1, (load mode:$src2))]> {
+ : InstRX<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, (load mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let isCompare = 1;
let mayLoad = 1;
+ let AccessBytes = bytes;
}
class CompareRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, SDPatternOperator load>
- : InstRXE<opcode, (outs), (ins cls:$src1, bdxaddr12only:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls:$src1, (load bdxaddr12only:$src2))]> {
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes>
+ : InstRXE<opcode, (outs), (ins cls:$R1, bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, (load bdxaddr12only:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let isCompare = 1;
let mayLoad = 1;
+ let AccessBytes = bytes;
}
class CompareRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, SDPatternOperator load,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
AddressingMode mode = bdxaddr20only>
- : InstRXY<opcode, (outs), (ins cls:$src1, mode:$src2),
- mnemonic#"\t$src1, $src2",
- [(operator cls:$src1, (load mode:$src2))]> {
+ : InstRXY<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, (load mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let isCompare = 1;
let mayLoad = 1;
+ let AccessBytes = bytes;
}
multiclass CompareRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
SDPatternOperator operator, RegisterOperand cls,
- SDPatternOperator load> {
- let Function = mnemonic ## #cls in {
- let PairType = "12" in
+ SDPatternOperator load, bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
def "" : CompareRX<mnemonic, rxOpcode, operator, cls,
- load, bdxaddr12pair>;
- let PairType = "20" in
+ load, bytes, bdxaddr12pair>;
+ let DispSize = "20" in
def Y : CompareRXY<mnemonic#"y", rxyOpcode, operator, cls,
- load, bdxaddr20pair>;
+ load, bytes, bdxaddr20pair>;
}
}
class CompareSI<string mnemonic, bits<8> opcode, SDPatternOperator operator,
SDPatternOperator load, Immediate imm,
AddressingMode mode = bdaddr12only>
- : InstSI<opcode, (outs), (ins mode:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(operator (load mode:$addr), imm:$src)]> {
+ : InstSI<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator (load mode:$BD1), imm:$I2)]> {
+ let isCompare = 1;
let mayLoad = 1;
}
class CompareSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator,
SDPatternOperator load, Immediate imm>
- : InstSIL<opcode, (outs), (ins bdaddr12only:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(operator (load bdaddr12only:$addr), imm:$src)]> {
+ : InstSIL<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator (load bdaddr12only:$BD1), imm:$I2)]> {
+ let isCompare = 1;
let mayLoad = 1;
}
class CompareSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
SDPatternOperator load, Immediate imm,
AddressingMode mode = bdaddr20only>
- : InstSIY<opcode, (outs), (ins mode:$addr, imm:$src),
- mnemonic#"\t$addr, $src",
- [(operator (load mode:$addr), imm:$src)]> {
+ : InstSIY<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator (load mode:$BD1), imm:$I2)]> {
+ let isCompare = 1;
let mayLoad = 1;
}
multiclass CompareSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode,
SDPatternOperator operator, SDPatternOperator load,
Immediate imm> {
- let Function = mnemonic in {
- let PairType = "12" in
+ let DispKey = mnemonic in {
+ let DispSize = "12" in
def "" : CompareSI<mnemonic, siOpcode, operator, load, imm, bdaddr12pair>;
- let PairType = "20" in
+ let DispSize = "20" in
def Y : CompareSIY<mnemonic#"y", siyOpcode, operator, load, imm,
bdaddr20pair>;
}
@@ -851,65 +1243,94 @@ multiclass CompareSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode,
class TernaryRRD<string mnemonic, bits<16> opcode,
SDPatternOperator operator, RegisterOperand cls>
- : InstRRD<opcode, (outs cls:$dst), (ins cls:$src1, cls:$src2, cls:$src3),
- mnemonic#"\t$dst, $src2, $src3",
- [(set cls:$dst, (operator cls:$src1, cls:$src2, cls:$src3))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRRD<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, cls:$R2),
+ mnemonic#"r\t$R1, $R3, $R2",
+ [(set cls:$R1, (operator cls:$R1src, cls:$R3, cls:$R2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "reg";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
}
class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls, SDPatternOperator load>
- : InstRXF<opcode, (outs cls:$dst),
- (ins cls:$src1, cls:$src2, bdxaddr12only:$src3),
- mnemonic#"\t$dst, $src2, $src3",
- [(set cls:$dst, (operator cls:$src1, cls:$src2,
- (load bdxaddr12only:$src3)))]> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes>
+ : InstRXF<opcode, (outs cls:$R1),
+ (ins cls:$R1src, cls:$R3, bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $R3, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src, cls:$R3,
+ (load bdxaddr12only:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
let mayLoad = 1;
+ let AccessBytes = bytes;
}
class CmpSwapRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls, AddressingMode mode = bdaddr12only>
- : InstRS<opcode, (outs cls:$dst), (ins cls:$old, cls:$new, mode:$ptr),
- mnemonic#"\t$dst, $new, $ptr",
- [(set cls:$dst, (operator mode:$ptr, cls:$old, cls:$new))]> {
- let Constraints = "$old = $dst";
- let DisableEncoding = "$old";
+ : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator mode:$BD2, cls:$R1src, cls:$R3))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
let mayLoad = 1;
let mayStore = 1;
}
class CmpSwapRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
RegisterOperand cls, AddressingMode mode = bdaddr20only>
- : InstRSY<opcode, (outs cls:$dst), (ins cls:$old, cls:$new, mode:$ptr),
- mnemonic#"\t$dst, $new, $ptr",
- [(set cls:$dst, (operator mode:$ptr, cls:$old, cls:$new))]> {
- let Constraints = "$old = $dst";
- let DisableEncoding = "$old";
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator mode:$BD2, cls:$R1src, cls:$R3))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
let mayLoad = 1;
let mayStore = 1;
}
multiclass CmpSwapRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode,
SDPatternOperator operator, RegisterOperand cls> {
- let Function = mnemonic ## #cls in {
- let PairType = "12" in
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
def "" : CmpSwapRS<mnemonic, rsOpcode, operator, cls, bdaddr12pair>;
- let PairType = "20" in
+ let DispSize = "20" in
def Y : CmpSwapRSY<mnemonic#"y", rsyOpcode, operator, cls, bdaddr20pair>;
}
}
class RotateSelectRIEf<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRIEf<opcode, (outs cls1:$dst),
- (ins cls1:$src1, cls2:$src2,
- uimm8zx6:$imm1, uimm8zx6:$imm2, uimm8zx6:$imm3),
- mnemonic#"\t$dst, $src2, $imm1, $imm2, $imm3", []> {
- let Constraints = "$src1 = $dst";
- let DisableEncoding = "$src1";
+ : InstRIEf<opcode, (outs cls1:$R1),
+ (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ mnemonic#"\t$R1, $R2, $I3, $I4, $I5", []> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class PrefetchRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator>
+ : InstRXY<opcode, (outs), (ins uimm8zx4:$R1, bdxaddr20only:$XBD2),
+ mnemonic##"\t$R1, $XBD2",
+ [(operator uimm8zx4:$R1, bdxaddr20only:$XBD2)]>;
+
+class PrefetchRILPC<string mnemonic, bits<12> opcode,
+ SDPatternOperator operator>
+ : InstRIL<opcode, (outs), (ins uimm8zx4:$R1, pcrel32:$I2),
+ mnemonic##"\t$R1, $I2",
+ [(operator uimm8zx4:$R1, pcrel32:$I2)]> {
+ // We want PC-relative addresses to be tried ahead of BD and BDX addresses.
+ // However, BDXs have two extra operands and are therefore 6 units more
+ // complex.
+ let AddedComplexity = 7;
+}
+
+// A floating-point load-and test operation. Create both a normal unary
+// operation and one that acts as a comparison against zero.
+multiclass LoadAndTestRRE<string mnemonic, bits<16> opcode,
+ RegisterOperand cls> {
+ def "" : UnaryRRE<mnemonic, opcode, null_frag, cls, cls>;
+ let isCodeGenOnly = 1 in
+ def Compare : CompareRRE<mnemonic, opcode, null_frag, cls, cls>;
}
//===----------------------------------------------------------------------===//
@@ -928,17 +1349,130 @@ class Pseudo<dag outs, dag ins, list<dag> pattern>
let isCodeGenOnly = 1;
}
+// Like UnaryRI, but expanded after RA depending on the choice of register.
+class UnaryRIPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs cls:$R1), (ins imm:$I2),
+ [(set cls:$R1, (operator imm:$I2))]>;
+
+// Like UnaryRXY, but expanded after RA depending on the choice of register.
+class UnaryRXYPseudo<string key, SDPatternOperator operator,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : Pseudo<(outs cls:$R1), (ins mode:$XBD2),
+ [(set cls:$R1, (operator mode:$XBD2))]> {
+ let OpKey = key ## cls;
+ let OpType = "mem";
+ let mayLoad = 1;
+ let Has20BitOffset = 1;
+ let HasIndex = 1;
+ let AccessBytes = bytes;
+}
+
+// Like UnaryRR, but expanded after RA depending on the choice of registers.
+class UnaryRRPseudo<string key, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : Pseudo<(outs cls1:$R1), (ins cls2:$R2),
+ [(set cls1:$R1, (operator cls2:$R2))]> {
+ let OpKey = key ## cls1;
+ let OpType = "reg";
+}
+
+// Like BinaryRI, but expanded after RA depending on the choice of register.
+class BinaryRIPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+}
+
+// Like BinaryRIE, but expanded after RA depending on the choice of register.
+class BinaryRIEPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs cls:$R1), (ins cls:$R3, imm:$I2),
+ [(set cls:$R1, (operator cls:$R3, imm:$I2))]>;
+
+// Like BinaryRIAndK, but expanded after RA depending on the choice of register.
+multiclass BinaryRIAndKPseudo<string key, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm> {
+ let NumOpsKey = key in {
+ let NumOpsValue = "3" in
+ def K : BinaryRIEPseudo<null_frag, cls, imm>,
+ Requires<[FeatureHighWord, FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRIPseudo<operator, cls, imm>,
+ Requires<[FeatureHighWord]>;
+ }
+}
+
+// Like CompareRI, but expanded after RA depending on the choice of register.
+class CompareRIPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs), (ins cls:$R1, imm:$I2), [(operator cls:$R1, imm:$I2)]>;
+
+// Like CompareRXY, but expanded after RA depending on the choice of register.
+class CompareRXYPseudo<SDPatternOperator operator, RegisterOperand cls,
+ SDPatternOperator load, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : Pseudo<(outs), (ins cls:$R1, mode:$XBD2),
+ [(operator cls:$R1, (load mode:$XBD2))]> {
+ let mayLoad = 1;
+ let Has20BitOffset = 1;
+ let HasIndex = 1;
+ let AccessBytes = bytes;
+}
+
+// Like StoreRXY, but expanded after RA depending on the choice of register.
+class StoreRXYPseudo<SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes, AddressingMode mode = bdxaddr20only>
+ : Pseudo<(outs), (ins cls:$R1, mode:$XBD2),
+ [(operator cls:$R1, mode:$XBD2)]> {
+ let mayStore = 1;
+ let Has20BitOffset = 1;
+ let HasIndex = 1;
+ let AccessBytes = bytes;
+}
+
+// Like RotateSelectRIEf, but expanded after RA depending on the choice
+// of registers.
+class RotateSelectRIEfPseudo<RegisterOperand cls1, RegisterOperand cls2>
+ : Pseudo<(outs cls1:$R1),
+ (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ []> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
// Implements "$dst = $cc & (8 >> CC) ? $src1 : $src2", where CC is
// the value of the PSW's 2-bit condition code field.
class SelectWrapper<RegisterOperand cls>
- : Pseudo<(outs cls:$dst), (ins cls:$src1, cls:$src2, i8imm:$cc),
- [(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2, imm:$cc))]> {
+ : Pseudo<(outs cls:$dst),
+ (ins cls:$src1, cls:$src2, uimm8zx4:$valid, uimm8zx4:$cc),
+ [(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2,
+ uimm8zx4:$valid, uimm8zx4:$cc))]> {
let usesCustomInserter = 1;
// Although the instructions used by these nodes do not in themselves
- // change the PSW, the insertion requires new blocks, and the PSW cannot
- // be live across them.
- let Defs = [PSW];
- let Uses = [PSW];
+ // change CC, the insertion requires new blocks, and CC cannot be live
+ // across them.
+ let Defs = [CC];
+ let Uses = [CC];
+}
+
+// Stores $new to $addr if $cc is true ("" case) or false (Inv case).
+multiclass CondStores<RegisterOperand cls, SDPatternOperator store,
+ SDPatternOperator load, AddressingMode mode> {
+ let Defs = [CC], Uses = [CC], usesCustomInserter = 1 in {
+ def "" : Pseudo<(outs),
+ (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ [(store (z_select_ccmask cls:$new, (load mode:$addr),
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr)]>;
+ def Inv : Pseudo<(outs),
+ (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ [(store (z_select_ccmask (load mode:$addr), cls:$new,
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr)]>;
+ }
}
// OPERATOR is ATOMIC_SWAP or an ATOMIC_LOAD_* operation. PAT and OPERAND
@@ -947,7 +1481,7 @@ class AtomicLoadBinary<SDPatternOperator operator, RegisterOperand cls,
dag pat, DAGOperand operand>
: Pseudo<(outs cls:$dst), (ins bdaddr20only:$ptr, operand:$src2),
[(set cls:$dst, (operator bdaddr20only:$ptr, pat))]> {
- let Defs = [PSW];
+ let Defs = [CC];
let Has20BitOffset = 1;
let mayLoad = 1;
let mayStore = 1;
@@ -973,7 +1507,7 @@ class AtomicLoadWBinary<SDPatternOperator operator, dag pat,
ADDR32:$negbitshift, uimm32:$bitsize),
[(set GR32:$dst, (operator bdaddr20only:$ptr, pat, ADDR32:$bitshift,
ADDR32:$negbitshift, uimm32:$bitsize))]> {
- let Defs = [PSW];
+ let Defs = [CC];
let Has20BitOffset = 1;
let mayLoad = 1;
let mayStore = 1;
@@ -985,3 +1519,85 @@ class AtomicLoadWBinaryReg<SDPatternOperator operator>
: AtomicLoadWBinary<operator, (i32 GR32:$src2), GR32>;
class AtomicLoadWBinaryImm<SDPatternOperator operator, Immediate imm>
: AtomicLoadWBinary<operator, (i32 imm:$src2), imm>;
+
+// Define an instruction that operates on two fixed-length blocks of memory,
+// and associated pseudo instructions for operating on blocks of any size.
+// The Sequence form uses a straight-line sequence of instructions and
+// the Loop form uses a loop of length-256 instructions followed by
+// another instruction to handle the excess.
+multiclass MemorySS<string mnemonic, bits<8> opcode,
+ SDPatternOperator sequence, SDPatternOperator loop> {
+ def "" : InstSS<opcode, (outs), (ins bdladdr12onlylen8:$BDL1,
+ bdaddr12only:$BD2),
+ mnemonic##"\t$BDL1, $BD2", []>;
+ let usesCustomInserter = 1 in {
+ def Sequence : Pseudo<(outs), (ins bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length),
+ [(sequence bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length)]>;
+ def Loop : Pseudo<(outs), (ins bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length, GR64:$count256),
+ [(loop bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length, GR64:$count256)]>;
+ }
+}
+
+// Define an instruction that operates on two strings, both terminated
+// by the character in R0. The instruction processes a CPU-determinated
+// number of bytes at a time and sets CC to 3 if the instruction needs
+// to be repeated. Also define a pseudo instruction that represents
+// the full loop (the main instruction plus the branch on CC==3).
+multiclass StringRRE<string mnemonic, bits<16> opcode,
+ SDPatternOperator operator> {
+ def "" : InstRRE<opcode, (outs GR64:$R1, GR64:$R2),
+ (ins GR64:$R1src, GR64:$R2src),
+ mnemonic#"\t$R1, $R2", []> {
+ let Constraints = "$R1 = $R1src, $R2 = $R2src";
+ let DisableEncoding = "$R1src, $R2src";
+ }
+ let usesCustomInserter = 1 in
+ def Loop : Pseudo<(outs GR64:$end),
+ (ins GR64:$start1, GR64:$start2, GR32:$char),
+ [(set GR64:$end, (operator GR64:$start1, GR64:$start2,
+ GR32:$char))]>;
+}
+
+// A pseudo instruction that is a direct alias of a real instruction.
+// These aliases are used in cases where a particular register operand is
+// fixed or where the same instruction is used with different register sizes.
+// The size parameter is the size in bytes of the associated real instruction.
+class Alias<int size, dag outs, dag ins, list<dag> pattern>
+ : InstSystemZ<size, outs, ins, "", pattern> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+// An alias of a BinaryRI, but with different register sizes.
+class BinaryAliasRI<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Alias<4, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+}
+
+// An alias of a BinaryRIL, but with different register sizes.
+class BinaryAliasRIL<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Alias<6, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+}
+
+// An alias of a CompareRI, but with different register sizes.
+class CompareAliasRI<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Alias<4, (outs), (ins cls:$R1, imm:$I2), [(operator cls:$R1, imm:$I2)]> {
+ let isCompare = 1;
+}
+
+// An alias of a RotateSelectRIEf, but with different register sizes.
+class RotateSelectAliasRIEf<RegisterOperand cls1, RegisterOperand cls2>
+ : Alias<6, (outs cls1:$R1),
+ (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5), []> {
+ let Constraints = "$R1 = $R1src";
+}
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 0718c83fc728..acfeed80b54a 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -12,17 +12,37 @@
//===----------------------------------------------------------------------===//
#include "SystemZInstrInfo.h"
+#include "SystemZTargetMachine.h"
#include "SystemZInstrBuilder.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRMAP_INFO
#include "SystemZGenInstrInfo.inc"
using namespace llvm;
+// Return a mask with Count low bits set.
+static uint64_t allOnes(unsigned int Count) {
+ return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
+}
+
+// Reg should be a 32-bit GPR. Return true if it is a high register rather
+// than a low register.
+static bool isHighReg(unsigned int Reg) {
+ if (SystemZ::GRH32BitRegClass.contains(Reg))
+ return true;
+ assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32");
+ return false;
+}
+
+// Pin the vtable to this file.
+void SystemZInstrInfo::anchor() {}
+
SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
: SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
- RI(tm, *this) {
+ RI(tm), TM(tm) {
}
// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
@@ -40,8 +60,8 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
// Set up the two 64-bit registers.
MachineOperand &HighRegOp = EarlierMI->getOperand(0);
MachineOperand &LowRegOp = MI->getOperand(0);
- HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_high));
- LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_low));
+ HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
+ LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
// The address in the first (high) instruction is already correct.
// Adjust the offset in the second (low) instruction.
@@ -74,12 +94,104 @@ void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
OffsetMO.setImm(Offset);
}
+// MI is an RI-style pseudo instruction. Replace it with LowOpcode
+// if the first operand is a low GR32 and HighOpcode if the first operand
+// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
+// and HighOpcode takes an unsigned 32-bit operand. In those cases,
+// MI has the same kind of operand as LowOpcode, so needs to be converted
+// if HighOpcode is used.
+void SystemZInstrInfo::expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode,
+ bool ConvertHigh) const {
+ unsigned Reg = MI->getOperand(0).getReg();
+ bool IsHigh = isHighReg(Reg);
+ MI->setDesc(get(IsHigh ? HighOpcode : LowOpcode));
+ if (IsHigh && ConvertHigh)
+ MI->getOperand(1).setImm(uint32_t(MI->getOperand(1).getImm()));
+}
+
+// MI is a three-operand RIE-style pseudo instruction. Replace it with
+// LowOpcode3 if the registers are both low GR32s, otherwise use a move
+// followed by HighOpcode or LowOpcode, depending on whether the target
+// is a high or low GR32.
+void SystemZInstrInfo::expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned LowOpcodeK,
+ unsigned HighOpcode) const {
+ unsigned DestReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ bool DestIsHigh = isHighReg(DestReg);
+ bool SrcIsHigh = isHighReg(SrcReg);
+ if (!DestIsHigh && !SrcIsHigh)
+ MI->setDesc(get(LowOpcodeK));
+ else {
+ emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
+ DestReg, SrcReg, SystemZ::LR, 32,
+ MI->getOperand(1).isKill());
+ MI->setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
+ MI->getOperand(1).setReg(DestReg);
+ }
+}
+
+// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
+// if the first operand is a low GR32 and HighOpcode if the first operand
+// is a high GR32.
+void SystemZInstrInfo::expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode) const {
+ unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
+ MI->getOperand(2).getImm());
+ MI->setDesc(get(Opcode));
+}
+
+// MI is an RR-style pseudo instruction that zero-extends the low Size bits
+// of one GRX32 into another. Replace it with LowOpcode if both operands
+// are low registers, otherwise use RISB[LH]G.
+void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned Size) const {
+ emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
+ MI->getOperand(0).getReg(), MI->getOperand(1).getReg(),
+ LowOpcode, Size, MI->getOperand(1).isKill());
+ MI->eraseFromParent();
+}
+
+// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
+// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
+// are low registers, otherwise use RISB[LH]G. Size is the number of bits
+// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
+// KillSrc is true if this move is the last use of SrcReg.
+void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, unsigned DestReg,
+ unsigned SrcReg, unsigned LowLowOpcode,
+ unsigned Size, bool KillSrc) const {
+ unsigned Opcode;
+ bool DestIsHigh = isHighReg(DestReg);
+ bool SrcIsHigh = isHighReg(SrcReg);
+ if (DestIsHigh && SrcIsHigh)
+ Opcode = SystemZ::RISBHH;
+ else if (DestIsHigh && !SrcIsHigh)
+ Opcode = SystemZ::RISBHL;
+ else if (!DestIsHigh && SrcIsHigh)
+ Opcode = SystemZ::RISBLH;
+ else {
+ BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+ unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
+ BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
+ .addReg(DestReg, RegState::Undef)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
+}
+
// If MI is a simple load or store for a frame object, return the register
// it loads or stores and set FrameIndex to the index of the frame object.
// Return 0 otherwise.
//
// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
-static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, int Flag) {
+static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
+ unsigned Flag) {
const MCInstrDesc &MCID = MI->getDesc();
if ((MCID.TSFlags & Flag) &&
MI->getOperand(1).isFI() &&
@@ -101,6 +213,31 @@ unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
}
+bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
+ int &DestFrameIndex,
+ int &SrcFrameIndex) const {
+ // Check for MVC 0(Length,FI1),0(FI2)
+ const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
+ if (MI->getOpcode() != SystemZ::MVC ||
+ !MI->getOperand(0).isFI() ||
+ MI->getOperand(1).getImm() != 0 ||
+ !MI->getOperand(3).isFI() ||
+ MI->getOperand(4).getImm() != 0)
+ return false;
+
+ // Check that Length covers the full slots.
+ int64_t Length = MI->getOperand(2).getImm();
+ unsigned FI1 = MI->getOperand(0).getIndex();
+ unsigned FI2 = MI->getOperand(3).getIndex();
+ if (MFI->getObjectSize(FI1) != Length ||
+ MFI->getObjectSize(FI2) != Length)
+ return false;
+
+ DestFrameIndex = FI1;
+ SrcFrameIndex = FI2;
+ return true;
+}
+
bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -123,19 +260,22 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// A terminator that isn't a branch can't easily be handled by this
// analysis.
- unsigned ThisCond;
- const MachineOperand *ThisTarget;
- if (!isBranch(I, ThisCond, ThisTarget))
+ if (!I->isBranch())
return true;
// Can't handle indirect branches.
- if (!ThisTarget->isMBB())
+ SystemZII::Branch Branch(getBranchInfo(I));
+ if (!Branch.Target->isMBB())
+ return true;
+
+ // Punt on compound branches.
+ if (Branch.Type != SystemZII::BranchNormal)
return true;
- if (ThisCond == SystemZ::CCMASK_ANY) {
+ if (Branch.CCMask == SystemZ::CCMASK_ANY) {
// Handle unconditional branches.
if (!AllowModify) {
- TBB = ThisTarget->getMBB();
+ TBB = Branch.Target->getMBB();
continue;
}
@@ -147,7 +287,7 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
FBB = 0;
// Delete the JMP if it's equivalent to a fall-through.
- if (MBB.isLayoutSuccessor(ThisTarget->getMBB())) {
+ if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
TBB = 0;
I->eraseFromParent();
I = MBB.end();
@@ -155,7 +295,7 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// TBB is used to indicate the unconditinal destination.
- TBB = ThisTarget->getMBB();
+ TBB = Branch.Target->getMBB();
continue;
}
@@ -163,26 +303,28 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
if (Cond.empty()) {
// FIXME: add X86-style branch swap
FBB = TBB;
- TBB = ThisTarget->getMBB();
- Cond.push_back(MachineOperand::CreateImm(ThisCond));
+ TBB = Branch.Target->getMBB();
+ Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
+ Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
continue;
}
// Handle subsequent conditional branches.
- assert(Cond.size() == 1);
- assert(TBB);
+ assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
// Only handle the case where all conditional branches branch to the same
// destination.
- if (TBB != ThisTarget->getMBB())
+ if (TBB != Branch.Target->getMBB())
return true;
// If the conditions are the same, we can leave them alone.
- unsigned OldCond = Cond[0].getImm();
- if (OldCond == ThisCond)
+ unsigned OldCCValid = Cond[0].getImm();
+ unsigned OldCCMask = Cond[1].getImm();
+ if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
continue;
// FIXME: Try combining conditions like X86 does. Should be easy on Z!
+ return false;
}
return false;
@@ -197,11 +339,9 @@ unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
--I;
if (I->isDebugValue())
continue;
- unsigned Cond;
- const MachineOperand *Target;
- if (!isBranch(I, Cond, Target))
+ if (!I->isBranch())
break;
- if (!Target->isMBB())
+ if (!getBranchInfo(I).Target->isMBB())
break;
// Remove the branch.
I->eraseFromParent();
@@ -212,6 +352,13 @@ unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return Count;
}
+bool SystemZInstrInfo::
+ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 2 && "Invalid condition");
+ Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
+ return false;
+}
+
unsigned
SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
@@ -223,30 +370,185 @@ SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 1 || Cond.size() == 0) &&
+ assert((Cond.size() == 2 || Cond.size() == 0) &&
"SystemZ branch conditions have one component!");
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, DL, get(SystemZ::JG)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
return 1;
}
// Conditional branch.
unsigned Count = 0;
- unsigned CC = Cond[0].getImm();
- BuildMI(&MBB, DL, get(SystemZ::BRCL)).addImm(CC).addMBB(TBB);
+ unsigned CCValid = Cond[0].getImm();
+ unsigned CCMask = Cond[1].getImm();
+ BuildMI(&MBB, DL, get(SystemZ::BRC))
+ .addImm(CCValid).addImm(CCMask).addMBB(TBB);
++Count;
if (FBB) {
// Two-way Conditional branch. Insert the second branch.
- BuildMI(&MBB, DL, get(SystemZ::JG)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
++Count;
}
return Count;
}
+bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, unsigned &SrcReg2,
+ int &Mask, int &Value) const {
+ assert(MI->isCompare() && "Caller should have checked for a comparison");
+
+ if (MI->getNumExplicitOperands() == 2 &&
+ MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isImm()) {
+ SrcReg = MI->getOperand(0).getReg();
+ SrcReg2 = 0;
+ Value = MI->getOperand(1).getImm();
+ Mask = ~0;
+ return true;
+ }
+
+ return false;
+}
+
+// If Reg is a virtual register, return its definition, otherwise return null.
+static MachineInstr *getDef(unsigned Reg,
+ const MachineRegisterInfo *MRI) {
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ return 0;
+ return MRI->getUniqueVRegDef(Reg);
+}
+
+// Return true if MI is a shift of type Opcode by Imm bits.
+static bool isShift(MachineInstr *MI, int Opcode, int64_t Imm) {
+ return (MI->getOpcode() == Opcode &&
+ !MI->getOperand(2).getReg() &&
+ MI->getOperand(3).getImm() == Imm);
+}
+
+// If the destination of MI has no uses, delete it as dead.
+static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
+ if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
+ MI->eraseFromParent();
+}
+
+// Compare compares SrcReg against zero. Check whether SrcReg contains
+// the result of an IPM sequence whose input CC survives until Compare,
+// and whether Compare is therefore redundant. Delete it and return
+// true if so.
+static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI) {
+ MachineInstr *LGFR = 0;
+ MachineInstr *RLL = getDef(SrcReg, MRI);
+ if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
+ LGFR = RLL;
+ RLL = getDef(LGFR->getOperand(1).getReg(), MRI);
+ }
+ if (!RLL || !isShift(RLL, SystemZ::RLL, 31))
+ return false;
+
+ MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI);
+ if (!SRL || !isShift(SRL, SystemZ::SRL, SystemZ::IPM_CC))
+ return false;
+
+ MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI);
+ if (!IPM || IPM->getOpcode() != SystemZ::IPM)
+ return false;
+
+ // Check that there are no assignments to CC between the IPM and Compare,
+ if (IPM->getParent() != Compare->getParent())
+ return false;
+ MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare;
+ for (++MBBI; MBBI != MBBE; ++MBBI) {
+ MachineInstr *MI = MBBI;
+ if (MI->modifiesRegister(SystemZ::CC, TRI))
+ return false;
+ }
+
+ Compare->eraseFromParent();
+ if (LGFR)
+ eraseIfDead(LGFR, MRI);
+ eraseIfDead(RLL, MRI);
+ eraseIfDead(SRL, MRI);
+ eraseIfDead(IPM, MRI);
+
+ return true;
+}
+
+bool
+SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
+ unsigned SrcReg, unsigned SrcReg2,
+ int Mask, int Value,
+ const MachineRegisterInfo *MRI) const {
+ assert(!SrcReg2 && "Only optimizing constant comparisons so far");
+ bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
+ if (Value == 0 &&
+ !IsLogical &&
+ removeIPMBasedCompare(Compare, SrcReg, MRI, TM.getRegisterInfo()))
+ return true;
+ return false;
+}
+
+// If Opcode is a move that has a conditional variant, return that variant,
+// otherwise return 0.
+static unsigned getConditionalMove(unsigned Opcode) {
+ switch (Opcode) {
+ case SystemZ::LR: return SystemZ::LOCR;
+ case SystemZ::LGR: return SystemZ::LOCGR;
+ default: return 0;
+ }
+}
+
+bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
+ unsigned Opcode = MI->getOpcode();
+ if (TM.getSubtargetImpl()->hasLoadStoreOnCond() &&
+ getConditionalMove(Opcode))
+ return true;
+ return false;
+}
+
+bool SystemZInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumCycles, unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const {
+ // For now only convert single instructions.
+ return NumCycles == 1;
+}
+
+bool SystemZInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumCyclesT, unsigned ExtraPredCyclesT,
+ MachineBasicBlock &FMBB,
+ unsigned NumCyclesF, unsigned ExtraPredCyclesF,
+ const BranchProbability &Probability) const {
+ // For now avoid converting mutually-exclusive cases.
+ return false;
+}
+
+bool SystemZInstrInfo::
+PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const {
+ assert(Pred.size() == 2 && "Invalid condition");
+ unsigned CCValid = Pred[0].getImm();
+ unsigned CCMask = Pred[1].getImm();
+ assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
+ unsigned Opcode = MI->getOpcode();
+ if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (unsigned CondOpcode = getConditionalMove(Opcode)) {
+ MI->setDesc(get(CondOpcode));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addImm(CCValid).addImm(CCMask)
+ .addReg(SystemZ::CC, RegState::Implicit);;
+ return true;
+ }
+ }
+ return false;
+}
+
void
SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, DebugLoc DL,
@@ -254,18 +556,21 @@ SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
// Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
- copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_high),
- RI.getSubReg(SrcReg, SystemZ::subreg_high), KillSrc);
- copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_low),
- RI.getSubReg(SrcReg, SystemZ::subreg_low), KillSrc);
+ copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
+ RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
+ copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
+ RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
+ return;
+ }
+
+ if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
+ emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc);
return;
}
// Everything else needs only one instruction.
unsigned Opcode;
- if (SystemZ::GR32BitRegClass.contains(DestReg, SrcReg))
- Opcode = SystemZ::LR;
- else if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
+ if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LGR;
else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LER;
@@ -313,6 +618,256 @@ SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
FrameIdx);
}
+// Return true if MI is a simple load or store with a 12-bit displacement
+// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
+static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
+ const MCInstrDesc &MCID = MI->getDesc();
+ return ((MCID.TSFlags & Flag) &&
+ isUInt<12>(MI->getOperand(2).getImm()) &&
+ MI->getOperand(3).getReg() == 0);
+}
+
+namespace {
+ struct LogicOp {
+ LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
+ LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
+ : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
+
+ operator bool() const { return RegSize; }
+
+ unsigned RegSize, ImmLSB, ImmSize;
+ };
+}
+
+static LogicOp interpretAndImmediate(unsigned Opcode) {
+ switch (Opcode) {
+ case SystemZ::NILMux: return LogicOp(32, 0, 16);
+ case SystemZ::NIHMux: return LogicOp(32, 16, 16);
+ case SystemZ::NILL64: return LogicOp(64, 0, 16);
+ case SystemZ::NILH64: return LogicOp(64, 16, 16);
+ case SystemZ::NIHL64: return LogicOp(64, 32, 16);
+ case SystemZ::NIHH64: return LogicOp(64, 48, 16);
+ case SystemZ::NIFMux: return LogicOp(32, 0, 32);
+ case SystemZ::NILF64: return LogicOp(64, 0, 32);
+ case SystemZ::NIHF64: return LogicOp(64, 32, 32);
+ default: return LogicOp();
+ }
+}
+
+// Used to return from convertToThreeAddress after replacing two-address
+// instruction OldMI with three-address instruction NewMI.
+static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
+ MachineInstr *NewMI,
+ LiveVariables *LV) {
+ if (LV) {
+ unsigned NumOps = OldMI->getNumOperands();
+ for (unsigned I = 1; I < NumOps; ++I) {
+ MachineOperand &Op = OldMI->getOperand(I);
+ if (Op.isReg() && Op.isKill())
+ LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI);
+ }
+ }
+ return NewMI;
+}
+
+MachineInstr *
+SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const {
+ MachineInstr *MI = MBBI;
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+
+ unsigned Opcode = MI->getOpcode();
+ unsigned NumOps = MI->getNumOperands();
+
+ // Try to convert something like SLL into SLLK, if supported.
+ // We prefer to keep the two-operand form where possible both
+ // because it tends to be shorter and because some instructions
+ // have memory forms that can be used during spilling.
+ if (TM.getSubtargetImpl()->hasDistinctOps()) {
+ MachineOperand &Dest = MI->getOperand(0);
+ MachineOperand &Src = MI->getOperand(1);
+ unsigned DestReg = Dest.getReg();
+ unsigned SrcReg = Src.getReg();
+ // AHIMux is only really a three-operand instruction when both operands
+ // are low registers. Try to constrain both operands to be low if
+ // possible.
+ if (Opcode == SystemZ::AHIMux &&
+ TargetRegisterInfo::isVirtualRegister(DestReg) &&
+ TargetRegisterInfo::isVirtualRegister(SrcReg) &&
+ MRI.getRegClass(DestReg)->contains(SystemZ::R1L) &&
+ MRI.getRegClass(SrcReg)->contains(SystemZ::R1L)) {
+ MRI.constrainRegClass(DestReg, &SystemZ::GR32BitRegClass);
+ MRI.constrainRegClass(SrcReg, &SystemZ::GR32BitRegClass);
+ }
+ int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
+ if (ThreeOperandOpcode >= 0) {
+ MachineInstrBuilder MIB =
+ BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode))
+ .addOperand(Dest);
+ // Keep the kill state, but drop the tied flag.
+ MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
+ // Keep the remaining operands as-is.
+ for (unsigned I = 2; I < NumOps; ++I)
+ MIB.addOperand(MI->getOperand(I));
+ return finishConvertToThreeAddress(MI, MIB, LV);
+ }
+ }
+
+ // Try to convert an AND into an RISBG-type instruction.
+ if (LogicOp And = interpretAndImmediate(Opcode)) {
+ uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
+ // AND IMMEDIATE leaves the other bits of the register unchanged.
+ Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
+ unsigned Start, End;
+ if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
+ unsigned NewOpcode;
+ if (And.RegSize == 64)
+ NewOpcode = SystemZ::RISBG;
+ else {
+ NewOpcode = SystemZ::RISBMux;
+ Start &= 31;
+ End &= 31;
+ }
+ MachineOperand &Dest = MI->getOperand(0);
+ MachineOperand &Src = MI->getOperand(1);
+ MachineInstrBuilder MIB =
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
+ .addOperand(Dest).addReg(0)
+ .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
+ .addImm(Start).addImm(End + 128).addImm(0);
+ return finishConvertToThreeAddress(MI, MIB, LV);
+ }
+ }
+ return 0;
+}
+
+MachineInstr *
+SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ unsigned Size = MFI->getObjectSize(FrameIndex);
+ unsigned Opcode = MI->getOpcode();
+
+ if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+ if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
+ isInt<8>(MI->getOperand(2).getImm()) &&
+ !MI->getOperand(3).getReg()) {
+ // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
+ return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI))
+ .addFrameIndex(FrameIndex).addImm(0)
+ .addImm(MI->getOperand(2).getImm());
+ }
+ return 0;
+ }
+
+ // All other cases require a single operand.
+ if (Ops.size() != 1)
+ return 0;
+
+ unsigned OpNum = Ops[0];
+ assert(Size == MF.getRegInfo()
+ .getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
+ "Invalid size combination");
+
+ if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) &&
+ OpNum == 0 &&
+ isInt<8>(MI->getOperand(2).getImm())) {
+ // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
+ Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
+ return BuildMI(MF, MI->getDebugLoc(), get(Opcode))
+ .addFrameIndex(FrameIndex).addImm(0)
+ .addImm(MI->getOperand(2).getImm());
+ }
+
+ if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
+ bool Op0IsGPR = (Opcode == SystemZ::LGDR);
+ bool Op1IsGPR = (Opcode == SystemZ::LDGR);
+ // If we're spilling the destination of an LDGR or LGDR, store the
+ // source register instead.
+ if (OpNum == 0) {
+ unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
+ return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
+ .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
+ .addImm(0).addReg(0);
+ }
+ // If we're spilling the source of an LDGR or LGDR, load the
+ // destination register instead.
+ if (OpNum == 1) {
+ unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
+ unsigned Dest = MI->getOperand(0).getReg();
+ return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
+ .addFrameIndex(FrameIndex).addImm(0).addReg(0);
+ }
+ }
+
+ // Look for cases where the source of a simple store or the destination
+ // of a simple load is being spilled. Try to use MVC instead.
+ //
+ // Although MVC is in practice a fast choice in these cases, it is still
+ // logically a bytewise copy. This means that we cannot use it if the
+ // load or store is volatile. We also wouldn't be able to use MVC if
+ // the two memories partially overlap, but that case cannot occur here,
+ // because we know that one of the memories is a full frame index.
+ //
+ // For performance reasons, we also want to avoid using MVC if the addresses
+ // might be equal. We don't worry about that case here, because spill slot
+ // coloring happens later, and because we have special code to remove
+ // MVCs that turn out to be redundant.
+ if (OpNum == 0 && MI->hasOneMemOperand()) {
+ MachineMemOperand *MMO = *MI->memoperands_begin();
+ if (MMO->getSize() == Size && !MMO->isVolatile()) {
+ // Handle conversion of loads.
+ if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
+ return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
+ .addFrameIndex(FrameIndex).addImm(0).addImm(Size)
+ .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
+ .addMemOperand(MMO);
+ }
+ // Handle conversion of stores.
+ if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
+ return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
+ .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
+ .addImm(Size).addFrameIndex(FrameIndex).addImm(0)
+ .addMemOperand(MMO);
+ }
+ }
+ }
+
+ // If the spilled operand is the final one, try to change <INSN>R
+ // into <INSN>.
+ int MemOpcode = SystemZ::getMemOpcode(Opcode);
+ if (MemOpcode >= 0) {
+ unsigned NumOps = MI->getNumExplicitOperands();
+ if (OpNum == NumOps - 1) {
+ const MCInstrDesc &MemDesc = get(MemOpcode);
+ uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
+ assert(AccessBytes != 0 && "Size of access should be known");
+ assert(AccessBytes <= Size && "Access outside the frame index");
+ uint64_t Offset = Size - AccessBytes;
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
+ for (unsigned I = 0; I < OpNum; ++I)
+ MIB.addOperand(MI->getOperand(I));
+ MIB.addFrameIndex(FrameIndex).addImm(Offset);
+ if (MemDesc.TSFlags & SystemZII::HasIndex)
+ MIB.addReg(0);
+ return MIB;
+ }
+ }
+
+ return 0;
+}
+
+MachineInstr *
+SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const {
+ return 0;
+}
+
bool
SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
switch (MI->getOpcode()) {
@@ -332,6 +887,138 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
splitMove(MI, SystemZ::STD);
return true;
+ case SystemZ::LBMux:
+ expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
+ return true;
+
+ case SystemZ::LHMux:
+ expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
+ return true;
+
+ case SystemZ::LLCRMux:
+ expandZExtPseudo(MI, SystemZ::LLCR, 8);
+ return true;
+
+ case SystemZ::LLHRMux:
+ expandZExtPseudo(MI, SystemZ::LLHR, 16);
+ return true;
+
+ case SystemZ::LLCMux:
+ expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
+ return true;
+
+ case SystemZ::LLHMux:
+ expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
+ return true;
+
+ case SystemZ::LMux:
+ expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
+ return true;
+
+ case SystemZ::STCMux:
+ expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
+ return true;
+
+ case SystemZ::STHMux:
+ expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
+ return true;
+
+ case SystemZ::STMux:
+ expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
+ return true;
+
+ case SystemZ::LHIMux:
+ expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
+ return true;
+
+ case SystemZ::IIFMux:
+ expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
+ return true;
+
+ case SystemZ::IILMux:
+ expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
+ return true;
+
+ case SystemZ::IIHMux:
+ expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
+ return true;
+
+ case SystemZ::NIFMux:
+ expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
+ return true;
+
+ case SystemZ::NILMux:
+ expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
+ return true;
+
+ case SystemZ::NIHMux:
+ expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
+ return true;
+
+ case SystemZ::OIFMux:
+ expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
+ return true;
+
+ case SystemZ::OILMux:
+ expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
+ return true;
+
+ case SystemZ::OIHMux:
+ expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
+ return true;
+
+ case SystemZ::XIFMux:
+ expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
+ return true;
+
+ case SystemZ::TMLMux:
+ expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
+ return true;
+
+ case SystemZ::TMHMux:
+ expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
+ return true;
+
+ case SystemZ::AHIMux:
+ expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
+ return true;
+
+ case SystemZ::AHIMuxK:
+ expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
+ return true;
+
+ case SystemZ::AFIMux:
+ expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
+ return true;
+
+ case SystemZ::CFIMux:
+ expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
+ return true;
+
+ case SystemZ::CLFIMux:
+ expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
+ return true;
+
+ case SystemZ::CMux:
+ expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
+ return true;
+
+ case SystemZ::CLMux:
+ expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
+ return true;
+
+ case SystemZ::RISBMux: {
+ bool DestIsHigh = isHighReg(MI->getOperand(0).getReg());
+ bool SrcIsHigh = isHighReg(MI->getOperand(2).getReg());
+ if (SrcIsHigh == DestIsHigh)
+ MI->setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
+ else {
+ MI->setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
+ MI->getOperand(5).setImm(MI->getOperand(5).getImm() ^ 32);
+ }
+ return true;
+ }
+
case SystemZ::ADJDYNALLOC:
splitAdjDynAlloc(MI);
return true;
@@ -341,32 +1028,60 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
}
}
-bool SystemZInstrInfo::
-ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
- assert(Cond.size() == 1 && "Invalid branch condition!");
- Cond[0].setImm(Cond[0].getImm() ^ SystemZ::CCMASK_ANY);
- return false;
+uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
+ if (MI->getOpcode() == TargetOpcode::INLINEASM) {
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const char *AsmStr = MI->getOperand(0).getSymbolName();
+ return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ }
+ return MI->getDesc().getSize();
}
-bool SystemZInstrInfo::isBranch(const MachineInstr *MI, unsigned &Cond,
- const MachineOperand *&Target) const {
+SystemZII::Branch
+SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
case SystemZ::BR:
case SystemZ::J:
case SystemZ::JG:
- Cond = SystemZ::CCMASK_ANY;
- Target = &MI->getOperand(0);
- return true;
+ return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
+ SystemZ::CCMASK_ANY, &MI->getOperand(0));
case SystemZ::BRC:
case SystemZ::BRCL:
- Cond = MI->getOperand(0).getImm();
- Target = &MI->getOperand(1);
- return true;
+ return SystemZII::Branch(SystemZII::BranchNormal,
+ MI->getOperand(0).getImm(),
+ MI->getOperand(1).getImm(), &MI->getOperand(2));
+
+ case SystemZ::BRCT:
+ return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
+ SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
+
+ case SystemZ::BRCTG:
+ return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
+ SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
+
+ case SystemZ::CIJ:
+ case SystemZ::CRJ:
+ return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
+
+ case SystemZ::CLIJ:
+ case SystemZ::CLRJ:
+ return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
+
+ case SystemZ::CGIJ:
+ case SystemZ::CGRJ:
+ return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
+
+ case SystemZ::CLGIJ:
+ case SystemZ::CLGRJ:
+ return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
default:
- assert(!MI->getDesc().isBranch() && "Unknown branch opcode");
- return false;
+ llvm_unreachable("Unrecognized branch opcode");
}
}
@@ -375,7 +1090,13 @@ void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
unsigned &StoreOpcode) const {
if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
LoadOpcode = SystemZ::L;
- StoreOpcode = SystemZ::ST32;
+ StoreOpcode = SystemZ::ST;
+ } else if (RC == &SystemZ::GRH32BitRegClass) {
+ LoadOpcode = SystemZ::LFH;
+ StoreOpcode = SystemZ::STFH;
+ } else if (RC == &SystemZ::GRX32BitRegClass) {
+ LoadOpcode = SystemZ::LMux;
+ StoreOpcode = SystemZ::STMux;
} else if (RC == &SystemZ::GR64BitRegClass ||
RC == &SystemZ::ADDR64BitRegClass) {
LoadOpcode = SystemZ::LG;
@@ -424,6 +1145,88 @@ unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
return 0;
}
+unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
+ switch (Opcode) {
+ case SystemZ::L: return SystemZ::LT;
+ case SystemZ::LY: return SystemZ::LT;
+ case SystemZ::LG: return SystemZ::LTG;
+ case SystemZ::LGF: return SystemZ::LTGF;
+ case SystemZ::LR: return SystemZ::LTR;
+ case SystemZ::LGFR: return SystemZ::LTGFR;
+ case SystemZ::LGR: return SystemZ::LTGR;
+ case SystemZ::LER: return SystemZ::LTEBR;
+ case SystemZ::LDR: return SystemZ::LTDBR;
+ case SystemZ::LXR: return SystemZ::LTXBR;
+ default: return 0;
+ }
+}
+
+// Return true if Mask matches the regexp 0*1+0*, given that zero masks
+// have already been filtered out. Store the first set bit in LSB and
+// the number of set bits in Length if so.
+static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
+ unsigned First = findFirstSet(Mask);
+ uint64_t Top = (Mask >> First) + 1;
+ if ((Top & -Top) == Top) {
+ LSB = First;
+ Length = findFirstSet(Top);
+ return true;
+ }
+ return false;
+}
+
+bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
+ unsigned &Start, unsigned &End) const {
+ // Reject trivial all-zero masks.
+ if (Mask == 0)
+ return false;
+
+ // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
+ // the msb and End specifies the index of the lsb.
+ unsigned LSB, Length;
+ if (isStringOfOnes(Mask, LSB, Length)) {
+ Start = 63 - (LSB + Length - 1);
+ End = 63 - LSB;
+ return true;
+ }
+
+ // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
+ // of the low 1s and End specifies the lsb of the high 1s.
+ if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
+ assert(LSB > 0 && "Bottom bit must be set");
+ assert(LSB + Length < BitSize && "Top bit must be set");
+ Start = 63 - (LSB - 1);
+ End = 63 - (LSB + Length);
+ return true;
+ }
+
+ return false;
+}
+
+unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode,
+ const MachineInstr *MI) const {
+ switch (Opcode) {
+ case SystemZ::CR:
+ return SystemZ::CRJ;
+ case SystemZ::CGR:
+ return SystemZ::CGRJ;
+ case SystemZ::CHI:
+ return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0;
+ case SystemZ::CGHI:
+ return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0;
+ case SystemZ::CLR:
+ return SystemZ::CLRJ;
+ case SystemZ::CLGR:
+ return SystemZ::CLGRJ;
+ case SystemZ::CLFI:
+ return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLIJ : 0;
+ case SystemZ::CLGFI:
+ return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLGIJ : 0;
+ default:
+ return 0;
+ }
+}
+
void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned Reg, uint64_t Value) const {
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h
index 0fc47617f0a2..be4c8fe2add2 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -28,12 +28,31 @@ class SystemZTargetMachine;
namespace SystemZII {
enum {
// See comments in SystemZInstrFormats.td.
- SimpleBDXLoad = (1 << 0),
- SimpleBDXStore = (1 << 1),
- Has20BitOffset = (1 << 2),
- HasIndex = (1 << 3),
- Is128Bit = (1 << 4)
+ SimpleBDXLoad = (1 << 0),
+ SimpleBDXStore = (1 << 1),
+ Has20BitOffset = (1 << 2),
+ HasIndex = (1 << 3),
+ Is128Bit = (1 << 4),
+ AccessSizeMask = (31 << 5),
+ AccessSizeShift = 5,
+ CCValuesMask = (15 << 10),
+ CCValuesShift = 10,
+ CompareZeroCCMaskMask = (15 << 14),
+ CompareZeroCCMaskShift = 14,
+ CCMaskFirst = (1 << 18),
+ CCMaskLast = (1 << 19),
+ IsLogical = (1 << 20)
};
+ static inline unsigned getAccessSize(unsigned int Flags) {
+ return (Flags & AccessSizeMask) >> AccessSizeShift;
+ }
+ static inline unsigned getCCValues(unsigned int Flags) {
+ return (Flags & CCValuesMask) >> CCValuesShift;
+ }
+ static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
+ return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift;
+ }
+
// SystemZ MachineOperand target flags.
enum {
// Masks out the bits for the access model.
@@ -42,14 +61,74 @@ namespace SystemZII {
// @GOT (aka @GOTENT)
MO_GOT = (1 << 0)
};
+ // Classifies a branch.
+ enum BranchType {
+ // An instruction that branches on the current value of CC.
+ BranchNormal,
+
+ // An instruction that peforms a 32-bit signed comparison and branches
+ // on the result.
+ BranchC,
+
+ // An instruction that peforms a 32-bit unsigned comparison and branches
+ // on the result.
+ BranchCL,
+
+ // An instruction that peforms a 64-bit signed comparison and branches
+ // on the result.
+ BranchCG,
+
+ // An instruction that peforms a 64-bit unsigned comparison and branches
+ // on the result.
+ BranchCLG,
+
+ // An instruction that decrements a 32-bit register and branches if
+ // the result is nonzero.
+ BranchCT,
+
+ // An instruction that decrements a 64-bit register and branches if
+ // the result is nonzero.
+ BranchCTG
+ };
+ // Information about a branch instruction.
+ struct Branch {
+ // The type of the branch.
+ BranchType Type;
+
+ // CCMASK_<N> is set if CC might be equal to N.
+ unsigned CCValid;
+
+ // CCMASK_<N> is set if the branch should be taken when CC == N.
+ unsigned CCMask;
+
+ // The target of the branch.
+ const MachineOperand *Target;
+
+ Branch(BranchType type, unsigned ccValid, unsigned ccMask,
+ const MachineOperand *target)
+ : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
+ };
}
class SystemZInstrInfo : public SystemZGenInstrInfo {
const SystemZRegisterInfo RI;
+ SystemZTargetMachine &TM;
void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const;
void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
-
+ void expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode, bool ConvertHigh) const;
+ void expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned LowOpcodeK, unsigned HighOpcode) const;
+ void expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode) const;
+ void expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned Size) const;
+ void emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ unsigned LowLowOpcode, unsigned Size, bool KillSrc) const;
+ virtual void anchor();
+
public:
explicit SystemZInstrInfo(SystemZTargetMachine &TM);
@@ -58,6 +137,8 @@ public:
int &FrameIndex) const LLVM_OVERRIDE;
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const LLVM_OVERRIDE;
+ virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
+ int &SrcFrameIndex) const LLVM_OVERRIDE;
virtual bool AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -68,6 +149,29 @@ public:
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const LLVM_OVERRIDE;
+ bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask, int &Value) const
+ LLVM_OVERRIDE;
+ bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int Mask, int Value,
+ const MachineRegisterInfo *MRI) const LLVM_OVERRIDE;
+ virtual bool isPredicable(MachineInstr *MI) const LLVM_OVERRIDE;
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const
+ LLVM_OVERRIDE;
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumCyclesT,
+ unsigned ExtraPredCyclesT,
+ MachineBasicBlock &FMBB,
+ unsigned NumCyclesF,
+ unsigned ExtraPredCyclesF,
+ const BranchProbability &Probability) const
+ LLVM_OVERRIDE;
+ virtual bool
+ PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const
+ LLVM_OVERRIDE;
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -84,6 +188,18 @@ public:
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const LLVM_OVERRIDE;
+ virtual MachineInstr *
+ convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const;
+ virtual MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
+ virtual MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const;
virtual bool
expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const LLVM_OVERRIDE;
virtual bool
@@ -93,13 +209,15 @@ public:
// Return the SystemZRegisterInfo, which this class owns.
const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
+ // Return the size in bytes of MI.
+ uint64_t getInstSizeInBytes(const MachineInstr *MI) const;
+
// Return true if MI is a conditional or unconditional branch.
// When returning true, set Cond to the mask of condition-code
// values on which the instruction will branch, and set Target
// to the operand that contains the branch target. This target
// can be a register or a basic block.
- bool isBranch(const MachineInstr *MI, unsigned &Cond,
- const MachineOperand *&Target) const;
+ SystemZII::Branch getBranchInfo(const MachineInstr *MI) const;
// Get the load and store opcodes for a given register class.
void getLoadStoreOpcodes(const TargetRegisterClass *RC,
@@ -112,6 +230,22 @@ public:
// exists.
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset) const;
+ // If Opcode is a load instruction that has a LOAD AND TEST form,
+ // return the opcode for the testing form, otherwise return 0.
+ unsigned getLoadAndTest(unsigned Opcode) const;
+
+ // Return true if ROTATE AND ... SELECTED BITS can be used to select bits
+ // Mask of the R2 operand, given that only the low BitSize bits of Mask are
+ // significant. Set Start and End to the I3 and I4 operands if so.
+ bool isRxSBGMask(uint64_t Mask, unsigned BitSize,
+ unsigned &Start, unsigned &End) const;
+
+ // If Opcode is a COMPARE opcode for which an associated COMPARE AND
+ // BRANCH exists, return the opcode for the latter, otherwise return 0.
+ // MI, if nonnull, is the compare instruction.
+ unsigned getCompareAndBranch(unsigned Opcode,
+ const MachineInstr *MI = 0) const;
+
// Emit code before MBBI in MI to move immediate value Value into
// physical register Reg.
void loadImmediate(MachineBasicBlock &MBB,
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td
index 7ffa382d36a5..6524e442b63d 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -32,77 +32,202 @@ let neverHasSideEffects = 1 in {
// Control flow instructions
//===----------------------------------------------------------------------===//
-// A return instruction. R1 is the condition-code mask (all 1s)
-// and R2 is the target address, which is always stored in %r14.
-let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1,
- R1 = 15, R2 = 14, isCodeGenOnly = 1 in {
- def RET : InstRR<0x07, (outs), (ins), "br\t%r14", [(z_retflag)]>;
-}
+// A return instruction (br %r14).
+let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
+ def Return : Alias<2, (outs), (ins), [(z_retflag)]>;
// Unconditional branches. R1 is the condition-code mask (all 1s).
let isBranch = 1, isTerminator = 1, isBarrier = 1, R1 = 15 in {
let isIndirectBranch = 1 in
- def BR : InstRR<0x07, (outs), (ins ADDR64:$dst),
- "br\t$dst", [(brind ADDR64:$dst)]>;
+ def BR : InstRR<0x07, (outs), (ins ADDR64:$R2),
+ "br\t$R2", [(brind ADDR64:$R2)]>;
- // An assembler extended mnemonic for BRC. Use a separate instruction for
- // the asm parser, so that we don't relax Js to external symbols into JGs.
- let isCodeGenOnly = 1 in
- def J : InstRI<0xA74, (outs), (ins brtarget16:$dst), "j\t$dst", []>;
- let isAsmParserOnly = 1 in
- def AsmJ : InstRI<0xA74, (outs), (ins brtarget16:$dst), "j\t$dst", []>;
+ // An assembler extended mnemonic for BRC.
+ def J : InstRI<0xA74, (outs), (ins brtarget16:$I2), "j\t$I2",
+ [(br bb:$I2)]>;
// An assembler extended mnemonic for BRCL. (The extension is "G"
// rather than "L" because "JL" is "Jump if Less".)
- def JG : InstRIL<0xC04, (outs), (ins brtarget32:$dst),
- "jg\t$dst", [(br bb:$dst)]>;
+ def JG : InstRIL<0xC04, (outs), (ins brtarget32:$I2), "jg\t$I2", []>;
}
// Conditional branches. It's easier for LLVM to handle these branches
// in their raw BRC/BRCL form, with the 4-bit condition-code mask being
// the first operand. It seems friendlier to use mnemonic forms like
// JE and JLH when writing out the assembly though.
-multiclass CondBranches<Operand imm, string short, string long> {
- let isBranch = 1, isTerminator = 1, Uses = [PSW] in {
- def "" : InstRI<0xA74, (outs), (ins imm:$cond, brtarget16:$dst), short, []>;
- def L : InstRIL<0xC04, (outs), (ins imm:$cond, brtarget32:$dst), long, []>;
+let isBranch = 1, isTerminator = 1, Uses = [CC] in {
+ let isCodeGenOnly = 1, CCMaskFirst = 1 in {
+ def BRC : InstRI<0xA74, (outs), (ins cond4:$valid, cond4:$R1,
+ brtarget16:$I2), "j$R1\t$I2",
+ [(z_br_ccmask cond4:$valid, cond4:$R1, bb:$I2)]>;
+ def BRCL : InstRIL<0xC04, (outs), (ins cond4:$valid, cond4:$R1,
+ brtarget32:$I2), "jg$R1\t$I2", []>;
+ }
+ def AsmBRC : InstRI<0xA74, (outs), (ins uimm8zx4:$R1, brtarget16:$I2),
+ "brc\t$R1, $I2", []>;
+ def AsmBRCL : InstRIL<0xC04, (outs), (ins uimm8zx4:$R1, brtarget32:$I2),
+ "brcl\t$R1, $I2", []>;
+ def AsmBCR : InstRR<0x07, (outs), (ins uimm8zx4:$R1, GR64:$R2),
+ "bcr\t$R1, $R2", []>;
+}
+
+// Fused compare-and-branch instructions. As for normal branches,
+// we handle these instructions internally in their raw CRJ-like form,
+// but use assembly macros like CRJE when writing them out.
+//
+// These instructions do not use or clobber the condition codes.
+// We nevertheless pretend that they clobber CC, so that we can lower
+// them to separate comparisons and BRCLs if the branch ends up being
+// out of range.
+multiclass CompareBranches<Operand ccmask, string pos1, string pos2> {
+ let isBranch = 1, isTerminator = 1, Defs = [CC] in {
+ def RJ : InstRIEb<0xEC76, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "crj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def GRJ : InstRIEb<0xEC64, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "cgrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def IJ : InstRIEc<0xEC7E, (outs), (ins GR32:$R1, imm32sx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "cij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ def GIJ : InstRIEc<0xEC7C, (outs), (ins GR64:$R1, imm64sx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "cgij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ def LRJ : InstRIEb<0xEC77, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def LGRJ : InstRIEb<0xEC65, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clgrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def LIJ : InstRIEc<0xEC7F, (outs), (ins GR32:$R1, imm32zx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ def LGIJ : InstRIEc<0xEC7D, (outs), (ins GR64:$R1, imm64zx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clgij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
}
}
let isCodeGenOnly = 1 in
- defm BRC : CondBranches<cond4, "j$cond\t$dst", "jg$cond\t$dst">;
-let isAsmParserOnly = 1 in
- defm AsmBRC : CondBranches<uimm8zx4, "brc\t$cond, $dst", "brcl\t$cond, $dst">;
-
-def : Pat<(z_br_ccmask cond4:$cond, bb:$dst), (BRCL cond4:$cond, bb:$dst)>;
-
-// Define AsmParser mnemonics for each condition code.
-multiclass CondExtendedMnemonic<bits<4> Cond, string name> {
- let R1 = Cond in {
- def "" : InstRI<0xA74, (outs), (ins brtarget16:$dst),
- "j"##name##"\t$dst", []>;
- def L : InstRIL<0xC04, (outs), (ins brtarget32:$dst),
- "jg"##name##"\t$dst", []>;
+ defm C : CompareBranches<cond4, "$M3", "">;
+defm AsmC : CompareBranches<uimm8zx4, "", "$M3, ">;
+
+// Define AsmParser mnemonics for each general condition-code mask
+// (integer or floating-point)
+multiclass CondExtendedMnemonic<bits<4> ccmask, string name> {
+ let R1 = ccmask in {
+ def J : InstRI<0xA74, (outs), (ins brtarget16:$I2),
+ "j"##name##"\t$I2", []>;
+ def JG : InstRIL<0xC04, (outs), (ins brtarget32:$I2),
+ "jg"##name##"\t$I2", []>;
+ def BR : InstRR<0x07, (outs), (ins ADDR64:$R2), "b"##name##"r\t$R2", []>;
+ }
+ def LOCR : FixedCondUnaryRRF<"locr"##name, 0xB9F2, GR32, GR32, ccmask>;
+ def LOCGR : FixedCondUnaryRRF<"locgr"##name, 0xB9E2, GR64, GR64, ccmask>;
+ def LOC : FixedCondUnaryRSY<"loc"##name, 0xEBF2, GR32, ccmask, 4>;
+ def LOCG : FixedCondUnaryRSY<"locg"##name, 0xEBE2, GR64, ccmask, 8>;
+ def STOC : FixedCondStoreRSY<"stoc"##name, 0xEBF3, GR32, ccmask, 4>;
+ def STOCG : FixedCondStoreRSY<"stocg"##name, 0xEBE3, GR64, ccmask, 8>;
+}
+defm AsmO : CondExtendedMnemonic<1, "o">;
+defm AsmH : CondExtendedMnemonic<2, "h">;
+defm AsmNLE : CondExtendedMnemonic<3, "nle">;
+defm AsmL : CondExtendedMnemonic<4, "l">;
+defm AsmNHE : CondExtendedMnemonic<5, "nhe">;
+defm AsmLH : CondExtendedMnemonic<6, "lh">;
+defm AsmNE : CondExtendedMnemonic<7, "ne">;
+defm AsmE : CondExtendedMnemonic<8, "e">;
+defm AsmNLH : CondExtendedMnemonic<9, "nlh">;
+defm AsmHE : CondExtendedMnemonic<10, "he">;
+defm AsmNL : CondExtendedMnemonic<11, "nl">;
+defm AsmLE : CondExtendedMnemonic<12, "le">;
+defm AsmNH : CondExtendedMnemonic<13, "nh">;
+defm AsmNO : CondExtendedMnemonic<14, "no">;
+
+// Define AsmParser mnemonics for each integer condition-code mask.
+// This is like the list above, except that condition 3 is not possible
+// and that the low bit of the mask is therefore always 0. This means
+// that each condition has two names. Conditions "o" and "no" are not used.
+//
+// We don't make one of the two names an alias of the other because
+// we need the custom parsing routines to select the correct register class.
+multiclass IntCondExtendedMnemonicA<bits<4> ccmask, string name> {
+ let M3 = ccmask in {
+ def CR : InstRIEb<0xEC76, (outs), (ins GR32:$R1, GR32:$R2,
+ brtarget16:$RI4),
+ "crj"##name##"\t$R1, $R2, $RI4", []>;
+ def CGR : InstRIEb<0xEC64, (outs), (ins GR64:$R1, GR64:$R2,
+ brtarget16:$RI4),
+ "cgrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CI : InstRIEc<0xEC7E, (outs), (ins GR32:$R1, imm32sx8:$I2,
+ brtarget16:$RI4),
+ "cij"##name##"\t$R1, $I2, $RI4", []>;
+ def CGI : InstRIEc<0xEC7C, (outs), (ins GR64:$R1, imm64sx8:$I2,
+ brtarget16:$RI4),
+ "cgij"##name##"\t$R1, $I2, $RI4", []>;
+ def CLR : InstRIEb<0xEC77, (outs), (ins GR32:$R1, GR32:$R2,
+ brtarget16:$RI4),
+ "clrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CLGR : InstRIEb<0xEC65, (outs), (ins GR64:$R1, GR64:$R2,
+ brtarget16:$RI4),
+ "clgrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CLI : InstRIEc<0xEC7F, (outs), (ins GR32:$R1, imm32zx8:$I2,
+ brtarget16:$RI4),
+ "clij"##name##"\t$R1, $I2, $RI4", []>;
+ def CLGI : InstRIEc<0xEC7D, (outs), (ins GR64:$R1, imm64zx8:$I2,
+ brtarget16:$RI4),
+ "clgij"##name##"\t$R1, $I2, $RI4", []>;
}
}
-let isAsmParserOnly = 1 in {
- defm AsmJO : CondExtendedMnemonic<1, "o">;
- defm AsmJH : CondExtendedMnemonic<2, "h">;
- defm AsmJNLE : CondExtendedMnemonic<3, "nle">;
- defm AsmJL : CondExtendedMnemonic<4, "l">;
- defm AsmJNHE : CondExtendedMnemonic<5, "nhe">;
- defm AsmJLH : CondExtendedMnemonic<6, "lh">;
- defm AsmJNE : CondExtendedMnemonic<7, "ne">;
- defm AsmJE : CondExtendedMnemonic<8, "e">;
- defm AsmJNLH : CondExtendedMnemonic<9, "nlh">;
- defm AsmJHE : CondExtendedMnemonic<10, "he">;
- defm AsmJNL : CondExtendedMnemonic<11, "nl">;
- defm AsmJLE : CondExtendedMnemonic<12, "le">;
- defm AsmJNH : CondExtendedMnemonic<13, "nh">;
- defm AsmJNO : CondExtendedMnemonic<14, "no">;
+multiclass IntCondExtendedMnemonic<bits<4> ccmask, string name1, string name2>
+ : IntCondExtendedMnemonicA<ccmask, name1> {
+ let isAsmParserOnly = 1 in
+ defm Alt : IntCondExtendedMnemonicA<ccmask, name2>;
+}
+defm AsmJH : IntCondExtendedMnemonic<2, "h", "nle">;
+defm AsmJL : IntCondExtendedMnemonic<4, "l", "nhe">;
+defm AsmJLH : IntCondExtendedMnemonic<6, "lh", "ne">;
+defm AsmJE : IntCondExtendedMnemonic<8, "e", "nlh">;
+defm AsmJHE : IntCondExtendedMnemonic<10, "he", "nl">;
+defm AsmJLE : IntCondExtendedMnemonic<12, "le", "nh">;
+
+// Decrement a register and branch if it is nonzero. These don't clobber CC,
+// but we might need to split long branches into sequences that do.
+let Defs = [CC] in {
+ def BRCT : BranchUnaryRI<"brct", 0xA76, GR32>;
+ def BRCTG : BranchUnaryRI<"brctg", 0xA77, GR64>;
}
-def Select32 : SelectWrapper<GR32>;
-def Select64 : SelectWrapper<GR64>;
+//===----------------------------------------------------------------------===//
+// Select instructions
+//===----------------------------------------------------------------------===//
+
+def Select32Mux : SelectWrapper<GRX32>, Requires<[FeatureHighWord]>;
+def Select32 : SelectWrapper<GR32>;
+def Select64 : SelectWrapper<GR64>;
+
+// We don't define 32-bit Mux stores because the low-only STOC should
+// always be used if possible.
+defm CondStore8Mux : CondStores<GRX32, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>,
+ Requires<[FeatureHighWord]>;
+defm CondStore16Mux : CondStores<GRX32, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>,
+ Requires<[FeatureHighWord]>;
+defm CondStore8 : CondStores<GR32, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>;
+defm CondStore16 : CondStores<GR32, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>;
+defm CondStore32 : CondStores<GR32, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
+defm : CondStores64<CondStore8, CondStore8Inv, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>;
+defm : CondStores64<CondStore16, CondStore16Inv, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>;
+defm : CondStores64<CondStore32, CondStore32Inv, nonvolatile_truncstorei32,
+ nonvolatile_anyextloadi32, bdxaddr20only>;
+defm CondStore64 : CondStores<GR64, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
//===----------------------------------------------------------------------===//
// Call instructions
@@ -110,26 +235,30 @@ def Select64 : SelectWrapper<GR64>;
// The definitions here are for the call-clobbered registers.
let isCall = 1, Defs = [R0D, R1D, R2D, R3D, R4D, R5D, R14D,
- F0D, F1D, F2D, F3D, F4D, F5D, F6D, F7D],
- R1 = 14, isCodeGenOnly = 1 in {
- def BRAS : InstRI<0xA75, (outs), (ins pcrel16call:$dst, variable_ops),
- "bras\t%r14, $dst", []>;
- def BRASL : InstRIL<0xC05, (outs), (ins pcrel32call:$dst, variable_ops),
- "brasl\t%r14, $dst", [(z_call pcrel32call:$dst)]>;
- def BASR : InstRR<0x0D, (outs), (ins ADDR64:$dst, variable_ops),
- "basr\t%r14, $dst", [(z_call ADDR64:$dst)]>;
+ F0D, F1D, F2D, F3D, F4D, F5D, F6D, F7D, CC] in {
+ def CallBRASL : Alias<6, (outs), (ins pcrel32:$I2, variable_ops),
+ [(z_call pcrel32:$I2)]>;
+ def CallBASR : Alias<2, (outs), (ins ADDR64:$R2, variable_ops),
+ [(z_call ADDR64:$R2)]>;
+}
+
+// Sibling calls. Indirect sibling calls must be via R1, since R2 upwards
+// are argument registers and since branching to R0 is a no-op.
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
+ def CallJG : Alias<6, (outs), (ins pcrel32:$I2),
+ [(z_sibcall pcrel32:$I2)]>;
+ let Uses = [R1D] in
+ def CallBR : Alias<2, (outs), (ins), [(z_sibcall R1D)]>;
}
// Define the general form of the call instructions for the asm parser.
// These instructions don't hard-code %r14 as the return address register.
-let isAsmParserOnly = 1 in {
- def AsmBRAS : InstRI<0xA75, (outs), (ins GR64:$save, brtarget16:$dst),
- "bras\t$save, $dst", []>;
- def AsmBRASL : InstRIL<0xC05, (outs), (ins GR64:$save, brtarget32:$dst),
- "brasl\t$save, $dst", []>;
- def AsmBASR : InstRR<0x0D, (outs), (ins GR64:$save, ADDR64:$dst),
- "basr\t$save, $dst", []>;
-}
+def BRAS : InstRI<0xA75, (outs), (ins GR64:$R1, brtarget16:$I2),
+ "bras\t$R1, $I2", []>;
+def BRASL : InstRIL<0xC05, (outs), (ins GR64:$R1, brtarget32:$I2),
+ "brasl\t$R1, $I2", []>;
+def BASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2),
+ "basr\t$R1, $R2", []>;
//===----------------------------------------------------------------------===//
// Move instructions
@@ -137,13 +266,34 @@ let isAsmParserOnly = 1 in {
// Register moves.
let neverHasSideEffects = 1 in {
- def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>;
- def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>;
+ // Expands to LR, RISBHG or RISBLG, depending on the choice of registers.
+ def LRMux : UnaryRRPseudo<"l", null_frag, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+ def LR : UnaryRR <"l", 0x18, null_frag, GR32, GR32>;
+ def LGR : UnaryRRE<"lg", 0xB904, null_frag, GR64, GR64>;
+}
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
+ def LTR : UnaryRR <"lt", 0x12, null_frag, GR32, GR32>;
+ def LTGR : UnaryRRE<"ltg", 0xB902, null_frag, GR64, GR64>;
+}
+
+// Move on condition.
+let isCodeGenOnly = 1, Uses = [CC] in {
+ def LOCR : CondUnaryRRF<"loc", 0xB9F2, GR32, GR32>;
+ def LOCGR : CondUnaryRRF<"locg", 0xB9E2, GR64, GR64>;
+}
+let Uses = [CC] in {
+ def AsmLOCR : AsmCondUnaryRRF<"loc", 0xB9F2, GR32, GR32>;
+ def AsmLOCGR : AsmCondUnaryRRF<"locg", 0xB9E2, GR64, GR64>;
}
// Immediate moves.
-let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
- // 16-bit sign-extended immediates.
+let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
+ isReMaterializable = 1 in {
+ // 16-bit sign-extended immediates. LHIMux expands to LHI or IIHF,
+ // deopending on the choice of register.
+ def LHIMux : UnaryRIPseudo<bitconvert, GRX32, imm32sx16>,
+ Requires<[FeatureHighWord]>;
def LHI : UnaryRI<"lhi", 0xA78, bitconvert, GR32, imm32sx16>;
def LGHI : UnaryRI<"lghi", 0xA79, bitconvert, GR64, imm64sx16>;
@@ -161,11 +311,13 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
// Register loads.
let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
- defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32>;
- def LRL : UnaryRILPC<"lrl", 0xC4D, aligned_load, GR32>;
-
- def LG : UnaryRXY<"lg", 0xE304, load, GR64>;
- def LGRL : UnaryRILPC<"lgrl", 0xC48, aligned_load, GR64>;
+ // Expands to L, LY or LFH, depending on the choice of register.
+ def LMux : UnaryRXYPseudo<"l", load, GRX32, 4>,
+ Requires<[FeatureHighWord]>;
+ defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32, 4>;
+ def LFH : UnaryRXY<"lfh", 0xE3CA, load, GRH32, 4>,
+ Requires<[FeatureHighWord]>;
+ def LG : UnaryRXY<"lg", 0xE304, load, GR64, 8>;
// These instructions are split after register allocation, so we don't
// want a custom inserter.
@@ -174,16 +326,35 @@ let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
[(set GR128:$dst, (load bdxaddr20only128:$src))]>;
}
}
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
+ def LT : UnaryRXY<"lt", 0xE312, load, GR32, 4>;
+ def LTG : UnaryRXY<"ltg", 0xE302, load, GR64, 8>;
+}
+
+let canFoldAsLoad = 1 in {
+ def LRL : UnaryRILPC<"lrl", 0xC4D, aligned_load, GR32>;
+ def LGRL : UnaryRILPC<"lgrl", 0xC48, aligned_load, GR64>;
+}
+
+// Load on condition.
+let isCodeGenOnly = 1, Uses = [CC] in {
+ def LOC : CondUnaryRSY<"loc", 0xEBF2, nonvolatile_load, GR32, 4>;
+ def LOCG : CondUnaryRSY<"locg", 0xEBE2, nonvolatile_load, GR64, 8>;
+}
+let Uses = [CC] in {
+ def AsmLOC : AsmCondUnaryRSY<"loc", 0xEBF2, GR32, 4>;
+ def AsmLOCG : AsmCondUnaryRSY<"locg", 0xEBE2, GR64, 8>;
+}
// Register stores.
let SimpleBDXStore = 1 in {
- let isCodeGenOnly = 1 in {
- defm ST32 : StoreRXPair<"st", 0x50, 0xE350, store, GR32>;
- def STRL32 : StoreRILPC<"strl", 0xC4F, aligned_store, GR32>;
- }
-
- def STG : StoreRXY<"stg", 0xE324, store, GR64>;
- def STGRL : StoreRILPC<"stgrl", 0xC4B, aligned_store, GR64>;
+ // Expands to ST, STY or STFH, depending on the choice of register.
+ def STMux : StoreRXYPseudo<store, GRX32, 4>,
+ Requires<[FeatureHighWord]>;
+ defm ST : StoreRXPair<"st", 0x50, 0xE350, store, GR32, 4>;
+ def STFH : StoreRXY<"stfh", 0xE3CB, store, GRH32, 4>,
+ Requires<[FeatureHighWord]>;
+ def STG : StoreRXY<"stg", 0xE324, store, GR64, 8>;
// These instructions are split after register allocation, so we don't
// want a custom inserter.
@@ -192,6 +363,18 @@ let SimpleBDXStore = 1 in {
[(store GR128:$src, bdxaddr20only128:$dst)]>;
}
}
+def STRL : StoreRILPC<"strl", 0xC4F, aligned_store, GR32>;
+def STGRL : StoreRILPC<"stgrl", 0xC4B, aligned_store, GR64>;
+
+// Store on condition.
+let isCodeGenOnly = 1, Uses = [CC] in {
+ def STOC : CondStoreRSY<"stoc", 0xEBF3, GR32, 4>;
+ def STOCG : CondStoreRSY<"stocg", 0xEBE3, GR64, 8>;
+}
+let Uses = [CC] in {
+ def AsmSTOC : AsmCondStoreRSY<"stoc", 0xEBF3, GR32, 4>;
+ def AsmSTOCG : AsmCondStoreRSY<"stocg", 0xEBE3, GR64, 8>;
+}
// 8-bit immediate stores to 8-bit fields.
defm MVI : StoreSIPair<"mvi", 0x92, 0xEB52, truncstorei8, imm32zx8trunc>;
@@ -201,50 +384,70 @@ def MVHHI : StoreSIL<"mvhhi", 0xE544, truncstorei16, imm32sx16trunc>;
def MVHI : StoreSIL<"mvhi", 0xE54C, store, imm32sx16>;
def MVGHI : StoreSIL<"mvghi", 0xE548, store, imm64sx16>;
+// Memory-to-memory moves.
+let mayLoad = 1, mayStore = 1 in
+ defm MVC : MemorySS<"mvc", 0xD2, z_mvc, z_mvc_loop>;
+
+// String moves.
+let mayLoad = 1, mayStore = 1, Defs = [CC], Uses = [R0L] in
+ defm MVST : StringRRE<"mvst", 0xB255, z_stpcpy>;
+
//===----------------------------------------------------------------------===//
// Sign extensions
//===----------------------------------------------------------------------===//
+//
+// Note that putting these before zero extensions mean that we will prefer
+// them for anyextload*. There's not really much to choose between the two
+// either way, but signed-extending loads have a short LH and a long LHY,
+// while zero-extending loads have only the long LLH.
+//
+//===----------------------------------------------------------------------===//
// 32-bit extensions from registers.
let neverHasSideEffects = 1 in {
- def LBR : UnaryRRE<"lbr", 0xB926, sext8, GR32, GR32>;
- def LHR : UnaryRRE<"lhr", 0xB927, sext16, GR32, GR32>;
+ def LBR : UnaryRRE<"lb", 0xB926, sext8, GR32, GR32>;
+ def LHR : UnaryRRE<"lh", 0xB927, sext16, GR32, GR32>;
}
// 64-bit extensions from registers.
let neverHasSideEffects = 1 in {
- def LGBR : UnaryRRE<"lgbr", 0xB906, sext8, GR64, GR64>;
- def LGHR : UnaryRRE<"lghr", 0xB907, sext16, GR64, GR64>;
- def LGFR : UnaryRRE<"lgfr", 0xB914, sext32, GR64, GR32>;
+ def LGBR : UnaryRRE<"lgb", 0xB906, sext8, GR64, GR64>;
+ def LGHR : UnaryRRE<"lgh", 0xB907, sext16, GR64, GR64>;
+ def LGFR : UnaryRRE<"lgf", 0xB914, sext32, GR64, GR32>;
}
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LTGFR : UnaryRRE<"ltgf", 0xB912, null_frag, GR64, GR64>;
// Match 32-to-64-bit sign extensions in which the source is already
// in a 64-bit register.
def : Pat<(sext_inreg GR64:$src, i32),
- (LGFR (EXTRACT_SUBREG GR64:$src, subreg_32bit))>;
-
-// 32-bit extensions from memory.
-def LB : UnaryRXY<"lb", 0xE376, sextloadi8, GR32>;
-defm LH : UnaryRXPair<"lh", 0x48, 0xE378, sextloadi16, GR32>;
-def LHRL : UnaryRILPC<"lhrl", 0xC45, aligned_sextloadi16, GR32>;
+ (LGFR (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
+
+// 32-bit extensions from 8-bit memory. LBMux expands to LB or LBH,
+// depending on the choice of register.
+def LBMux : UnaryRXYPseudo<"lb", asextloadi8, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+def LB : UnaryRXY<"lb", 0xE376, asextloadi8, GR32, 1>;
+def LBH : UnaryRXY<"lbh", 0xE3C0, asextloadi8, GRH32, 1>,
+ Requires<[FeatureHighWord]>;
+
+// 32-bit extensions from 16-bit memory. LHMux expands to LH or LHH,
+// depending on the choice of register.
+def LHMux : UnaryRXYPseudo<"lh", asextloadi16, GRX32, 2>,
+ Requires<[FeatureHighWord]>;
+defm LH : UnaryRXPair<"lh", 0x48, 0xE378, asextloadi16, GR32, 2>;
+def LHH : UnaryRXY<"lhh", 0xE3C4, asextloadi16, GRH32, 2>,
+ Requires<[FeatureHighWord]>;
+def LHRL : UnaryRILPC<"lhrl", 0xC45, aligned_asextloadi16, GR32>;
// 64-bit extensions from memory.
-def LGB : UnaryRXY<"lgb", 0xE377, sextloadi8, GR64>;
-def LGH : UnaryRXY<"lgh", 0xE315, sextloadi16, GR64>;
-def LGF : UnaryRXY<"lgf", 0xE314, sextloadi32, GR64>;
-def LGHRL : UnaryRILPC<"lghrl", 0xC44, aligned_sextloadi16, GR64>;
-def LGFRL : UnaryRILPC<"lgfrl", 0xC4C, aligned_sextloadi32, GR64>;
-
-// If the sign of a load-extend operation doesn't matter, use the signed ones.
-// There's not really much to choose between the sign and zero extensions,
-// but LH is more compact than LLH for small offsets.
-def : Pat<(i32 (extloadi8 bdxaddr20only:$src)), (LB bdxaddr20only:$src)>;
-def : Pat<(i32 (extloadi16 bdxaddr12pair:$src)), (LH bdxaddr12pair:$src)>;
-def : Pat<(i32 (extloadi16 bdxaddr20pair:$src)), (LHY bdxaddr20pair:$src)>;
-
-def : Pat<(i64 (extloadi8 bdxaddr20only:$src)), (LGB bdxaddr20only:$src)>;
-def : Pat<(i64 (extloadi16 bdxaddr20only:$src)), (LGH bdxaddr20only:$src)>;
-def : Pat<(i64 (extloadi32 bdxaddr20only:$src)), (LGF bdxaddr20only:$src)>;
+def LGB : UnaryRXY<"lgb", 0xE377, asextloadi8, GR64, 1>;
+def LGH : UnaryRXY<"lgh", 0xE315, asextloadi16, GR64, 2>;
+def LGF : UnaryRXY<"lgf", 0xE314, asextloadi32, GR64, 4>;
+def LGHRL : UnaryRILPC<"lghrl", 0xC44, aligned_asextloadi16, GR64>;
+def LGFRL : UnaryRILPC<"lgfrl", 0xC4C, aligned_asextloadi32, GR64>;
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LTGF : UnaryRXY<"ltgf", 0xE332, asextloadi32, GR64, 4>;
//===----------------------------------------------------------------------===//
// Zero extensions
@@ -252,33 +455,51 @@ def : Pat<(i64 (extloadi32 bdxaddr20only:$src)), (LGF bdxaddr20only:$src)>;
// 32-bit extensions from registers.
let neverHasSideEffects = 1 in {
- def LLCR : UnaryRRE<"llcr", 0xB994, zext8, GR32, GR32>;
- def LLHR : UnaryRRE<"llhr", 0xB995, zext16, GR32, GR32>;
+ // Expands to LLCR or RISB[LH]G, depending on the choice of registers.
+ def LLCRMux : UnaryRRPseudo<"llc", zext8, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+ def LLCR : UnaryRRE<"llc", 0xB994, zext8, GR32, GR32>;
+ // Expands to LLHR or RISB[LH]G, depending on the choice of registers.
+ def LLHRMux : UnaryRRPseudo<"llh", zext16, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+ def LLHR : UnaryRRE<"llh", 0xB995, zext16, GR32, GR32>;
}
// 64-bit extensions from registers.
let neverHasSideEffects = 1 in {
- def LLGCR : UnaryRRE<"llgcr", 0xB984, zext8, GR64, GR64>;
- def LLGHR : UnaryRRE<"llghr", 0xB985, zext16, GR64, GR64>;
- def LLGFR : UnaryRRE<"llgfr", 0xB916, zext32, GR64, GR32>;
+ def LLGCR : UnaryRRE<"llgc", 0xB984, zext8, GR64, GR64>;
+ def LLGHR : UnaryRRE<"llgh", 0xB985, zext16, GR64, GR64>;
+ def LLGFR : UnaryRRE<"llgf", 0xB916, zext32, GR64, GR32>;
}
// Match 32-to-64-bit zero extensions in which the source is already
// in a 64-bit register.
def : Pat<(and GR64:$src, 0xffffffff),
- (LLGFR (EXTRACT_SUBREG GR64:$src, subreg_32bit))>;
-
-// 32-bit extensions from memory.
-def LLC : UnaryRXY<"llc", 0xE394, zextloadi8, GR32>;
-def LLH : UnaryRXY<"llh", 0xE395, zextloadi16, GR32>;
-def LLHRL : UnaryRILPC<"llhrl", 0xC42, aligned_zextloadi16, GR32>;
+ (LLGFR (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
+
+// 32-bit extensions from 8-bit memory. LLCMux expands to LLC or LLCH,
+// depending on the choice of register.
+def LLCMux : UnaryRXYPseudo<"llc", azextloadi8, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+def LLC : UnaryRXY<"llc", 0xE394, azextloadi8, GR32, 1>;
+def LLCH : UnaryRXY<"llch", 0xE3C2, azextloadi8, GR32, 1>,
+ Requires<[FeatureHighWord]>;
+
+// 32-bit extensions from 16-bit memory. LLHMux expands to LLH or LLHH,
+// depending on the choice of register.
+def LLHMux : UnaryRXYPseudo<"llh", azextloadi16, GRX32, 2>,
+ Requires<[FeatureHighWord]>;
+def LLH : UnaryRXY<"llh", 0xE395, azextloadi16, GR32, 2>;
+def LLHH : UnaryRXY<"llhh", 0xE3C6, azextloadi16, GR32, 2>,
+ Requires<[FeatureHighWord]>;
+def LLHRL : UnaryRILPC<"llhrl", 0xC42, aligned_azextloadi16, GR32>;
// 64-bit extensions from memory.
-def LLGC : UnaryRXY<"llgc", 0xE390, zextloadi8, GR64>;
-def LLGH : UnaryRXY<"llgh", 0xE391, zextloadi16, GR64>;
-def LLGF : UnaryRXY<"llgf", 0xE316, zextloadi32, GR64>;
-def LLGHRL : UnaryRILPC<"llghrl", 0xC46, aligned_zextloadi16, GR64>;
-def LLGFRL : UnaryRILPC<"llgfrl", 0xC4E, aligned_zextloadi32, GR64>;
+def LLGC : UnaryRXY<"llgc", 0xE390, azextloadi8, GR64, 1>;
+def LLGH : UnaryRXY<"llgh", 0xE391, azextloadi16, GR64, 2>;
+def LLGF : UnaryRXY<"llgf", 0xE316, azextloadi32, GR64, 4>;
+def LLGHRL : UnaryRILPC<"llghrl", 0xC46, aligned_azextloadi16, GR64>;
+def LLGFRL : UnaryRILPC<"llgfrl", 0xC4E, aligned_azextloadi32, GR64>;
//===----------------------------------------------------------------------===//
// Truncations
@@ -286,21 +507,31 @@ def LLGFRL : UnaryRILPC<"llgfrl", 0xC4E, aligned_zextloadi32, GR64>;
// Truncations of 64-bit registers to 32-bit registers.
def : Pat<(i32 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, subreg_32bit)>;
-
-// Truncations of 32-bit registers to memory.
-let isCodeGenOnly = 1 in {
- defm STC32 : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32>;
- defm STH32 : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32>;
- def STHRL32 : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR32>;
-}
+ (EXTRACT_SUBREG GR64:$src, subreg_l32)>;
+
+// Truncations of 32-bit registers to 8-bit memory. STCMux expands to
+// STC, STCY or STCH, depending on the choice of register.
+def STCMux : StoreRXYPseudo<truncstorei8, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32, 1>;
+def STCH : StoreRXY<"stch", 0xE3C3, truncstorei8, GRH32, 1>,
+ Requires<[FeatureHighWord]>;
+
+// Truncations of 32-bit registers to 16-bit memory. STHMux expands to
+// STH, STHY or STHH, depending on the choice of register.
+def STHMux : StoreRXYPseudo<truncstorei16, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32, 2>;
+def STHH : StoreRXY<"sthh", 0xE3C7, truncstorei16, GRH32, 2>,
+ Requires<[FeatureHighWord]>;
+def STHRL : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR32>;
// Truncations of 64-bit registers to memory.
-defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR64>;
-defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR64>;
-def STHRL : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR64>;
-defm ST : StoreRXPair<"st", 0x50, 0xE350, truncstorei32, GR64>;
-def STRL : StoreRILPC<"strl", 0xC4F, aligned_truncstorei32, GR64>;
+defm : StoreGR64Pair<STC, STCY, truncstorei8>;
+defm : StoreGR64Pair<STH, STHY, truncstorei16>;
+def : StoreGR64PC<STHRL, aligned_truncstorei16>;
+defm : StoreGR64Pair<ST, STY, truncstorei32>;
+def : StoreGR64PC<STRL, aligned_truncstorei32>;
//===----------------------------------------------------------------------===//
// Multi-register moves
@@ -318,50 +549,77 @@ def STMG : StoreMultipleRSY<"stmg", 0xEB24, GR64>;
// Byte-swapping register moves.
let neverHasSideEffects = 1 in {
- def LRVR : UnaryRRE<"lrvr", 0xB91F, bswap, GR32, GR32>;
- def LRVGR : UnaryRRE<"lrvgr", 0xB90F, bswap, GR64, GR64>;
+ def LRVR : UnaryRRE<"lrv", 0xB91F, bswap, GR32, GR32>;
+ def LRVGR : UnaryRRE<"lrvg", 0xB90F, bswap, GR64, GR64>;
}
-// Byte-swapping loads.
-def LRV : UnaryRXY<"lrv", 0xE31E, loadu<bswap>, GR32>;
-def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu<bswap>, GR64>;
+// Byte-swapping loads. Unlike normal loads, these instructions are
+// allowed to access storage more than once.
+def LRV : UnaryRXY<"lrv", 0xE31E, loadu<bswap, nonvolatile_load>, GR32, 4>;
+def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu<bswap, nonvolatile_load>, GR64, 8>;
-// Byte-swapping stores.
-def STRV : StoreRXY<"strv", 0xE33E, storeu<bswap>, GR32>;
-def STRVG : StoreRXY<"strvg", 0xE32F, storeu<bswap>, GR64>;
+// Likewise byte-swapping stores.
+def STRV : StoreRXY<"strv", 0xE33E, storeu<bswap, nonvolatile_store>, GR32, 4>;
+def STRVG : StoreRXY<"strvg", 0xE32F, storeu<bswap, nonvolatile_store>,
+ GR64, 8>;
//===----------------------------------------------------------------------===//
// Load address instructions
//===----------------------------------------------------------------------===//
// Load BDX-style addresses.
-let neverHasSideEffects = 1, Function = "la" in {
- let PairType = "12" in
- def LA : InstRX<0x41, (outs GR64:$dst), (ins laaddr12pair:$src),
- "la\t$dst, $src",
- [(set GR64:$dst, laaddr12pair:$src)]>;
- let PairType = "20" in
- def LAY : InstRXY<0xE371, (outs GR64:$dst), (ins laaddr20pair:$src),
- "lay\t$dst, $src",
- [(set GR64:$dst, laaddr20pair:$src)]>;
+let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isReMaterializable = 1,
+ DispKey = "la" in {
+ let DispSize = "12" in
+ def LA : InstRX<0x41, (outs GR64:$R1), (ins laaddr12pair:$XBD2),
+ "la\t$R1, $XBD2",
+ [(set GR64:$R1, laaddr12pair:$XBD2)]>;
+ let DispSize = "20" in
+ def LAY : InstRXY<0xE371, (outs GR64:$R1), (ins laaddr20pair:$XBD2),
+ "lay\t$R1, $XBD2",
+ [(set GR64:$R1, laaddr20pair:$XBD2)]>;
}
// Load a PC-relative address. There's no version of this instruction
// with a 16-bit offset, so there's no relaxation.
-let neverHasSideEffects = 1 in {
- def LARL : InstRIL<0xC00, (outs GR64:$dst), (ins pcrel32:$src),
- "larl\t$dst, $src",
- [(set GR64:$dst, pcrel32:$src)]>;
+let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
+ isReMaterializable = 1 in {
+ def LARL : InstRIL<0xC00, (outs GR64:$R1), (ins pcrel32:$I2),
+ "larl\t$R1, $I2",
+ [(set GR64:$R1, pcrel32:$I2)]>;
}
//===----------------------------------------------------------------------===//
-// Negation
+// Absolute and Negation
//===----------------------------------------------------------------------===//
-let Defs = [PSW] in {
- def LCR : UnaryRR <"lcr", 0x13, ineg, GR32, GR32>;
- def LCGR : UnaryRRE<"lcgr", 0xB903, ineg, GR64, GR64>;
- def LCGFR : UnaryRRE<"lcgfr", 0xB913, null_frag, GR64, GR32>;
+let Defs = [CC] in {
+ let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ def LPR : UnaryRR <"lp", 0x10, z_iabs32, GR32, GR32>;
+ def LPGR : UnaryRRE<"lpg", 0xB900, z_iabs64, GR64, GR64>;
+ }
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LPGFR : UnaryRRE<"lpgf", 0xB910, null_frag, GR64, GR32>;
+}
+defm : SXU<z_iabs64, LPGFR>;
+
+let Defs = [CC] in {
+ let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ def LNR : UnaryRR <"ln", 0x11, z_inegabs32, GR32, GR32>;
+ def LNGR : UnaryRRE<"lng", 0xB901, z_inegabs64, GR64, GR64>;
+ }
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LNGFR : UnaryRRE<"lngf", 0xB911, null_frag, GR64, GR32>;
+}
+defm : SXU<z_inegabs64, LNGFR>;
+
+let Defs = [CC] in {
+ let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ def LCR : UnaryRR <"lc", 0x13, ineg, GR32, GR32>;
+ def LCGR : UnaryRRE<"lcg", 0xB903, ineg, GR64, GR64>;
+ }
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LCGFR : UnaryRRE<"lcgf", 0xB913, null_frag, GR64, GR32>;
}
defm : SXU<ineg, LCGFR>;
@@ -370,69 +628,83 @@ defm : SXU<ineg, LCGFR>;
//===----------------------------------------------------------------------===//
let isCodeGenOnly = 1 in
- defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, zextloadi8>;
-defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, zextloadi8>;
+ defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, azextloadi8, 1>;
+defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, azextloadi8, 1>;
-defm : InsertMem<"inserti8", IC32, GR32, zextloadi8, bdxaddr12pair>;
-defm : InsertMem<"inserti8", IC32Y, GR32, zextloadi8, bdxaddr20pair>;
+defm : InsertMem<"inserti8", IC32, GR32, azextloadi8, bdxaddr12pair>;
+defm : InsertMem<"inserti8", IC32Y, GR32, azextloadi8, bdxaddr20pair>;
-defm : InsertMem<"inserti8", IC, GR64, zextloadi8, bdxaddr12pair>;
-defm : InsertMem<"inserti8", ICY, GR64, zextloadi8, bdxaddr20pair>;
+defm : InsertMem<"inserti8", IC, GR64, azextloadi8, bdxaddr12pair>;
+defm : InsertMem<"inserti8", ICY, GR64, azextloadi8, bdxaddr20pair>;
// Insertions of a 16-bit immediate, leaving other bits unaffected.
// We don't have or_as_insert equivalents of these operations because
// OI is available instead.
-let isCodeGenOnly = 1 in {
- def IILL32 : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>;
- def IILH32 : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>;
-}
-def IILL : BinaryRI<"iill", 0xA53, insertll, GR64, imm64ll16>;
-def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR64, imm64lh16>;
-def IIHL : BinaryRI<"iihl", 0xA51, inserthl, GR64, imm64hl16>;
-def IIHH : BinaryRI<"iihh", 0xA50, inserthh, GR64, imm64hh16>;
+//
+// IIxMux expands to II[LH]x, depending on the choice of register.
+def IILMux : BinaryRIPseudo<insertll, GRX32, imm32ll16>,
+ Requires<[FeatureHighWord]>;
+def IIHMux : BinaryRIPseudo<insertlh, GRX32, imm32lh16>,
+ Requires<[FeatureHighWord]>;
+def IILL : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>;
+def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>;
+def IIHL : BinaryRI<"iihl", 0xA51, insertll, GRH32, imm32ll16>;
+def IIHH : BinaryRI<"iihh", 0xA50, insertlh, GRH32, imm32lh16>;
+def IILL64 : BinaryAliasRI<insertll, GR64, imm64ll16>;
+def IILH64 : BinaryAliasRI<insertlh, GR64, imm64lh16>;
+def IIHL64 : BinaryAliasRI<inserthl, GR64, imm64hl16>;
+def IIHH64 : BinaryAliasRI<inserthh, GR64, imm64hh16>;
// ...likewise for 32-bit immediates. For GR32s this is a general
// full-width move. (We use IILF rather than something like LLILF
// for 32-bit moves because IILF leaves the upper 32 bits of the
// GR64 unchanged.)
-let isCodeGenOnly = 1 in {
- def IILF32 : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>;
+let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in {
+ def IIFMux : UnaryRIPseudo<bitconvert, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def IILF : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>;
+ def IIHF : UnaryRIL<"iihf", 0xC08, bitconvert, GRH32, uimm32>;
}
-def IILF : BinaryRIL<"iilf", 0xC09, insertlf, GR64, imm64lf32>;
-def IIHF : BinaryRIL<"iihf", 0xC08, inserthf, GR64, imm64hf32>;
+def IILF64 : BinaryAliasRIL<insertlf, GR64, imm64lf32>;
+def IIHF64 : BinaryAliasRIL<inserthf, GR64, imm64hf32>;
// An alternative model of inserthf, with the first operand being
// a zero-extended value.
def : Pat<(or (zext32 GR32:$src), imm64hf32:$imm),
- (IIHF (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_32bit),
- imm64hf32:$imm)>;
+ (IIHF64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32),
+ imm64hf32:$imm)>;
//===----------------------------------------------------------------------===//
// Addition
//===----------------------------------------------------------------------===//
// Plain addition.
-let Defs = [PSW] in {
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
// Addition of a register.
let isCommutable = 1 in {
- def AR : BinaryRR <"ar", 0x1A, add, GR32, GR32>;
- def AGR : BinaryRRE<"agr", 0xB908, add, GR64, GR64>;
+ defm AR : BinaryRRAndK<"a", 0x1A, 0xB9F8, add, GR32, GR32>;
+ defm AGR : BinaryRREAndK<"ag", 0xB908, 0xB9E8, add, GR64, GR64>;
}
- def AGFR : BinaryRRE<"agfr", 0xB918, null_frag, GR64, GR32>;
+ def AGFR : BinaryRRE<"agf", 0xB918, null_frag, GR64, GR32>;
// Addition of signed 16-bit immediates.
- def AHI : BinaryRI<"ahi", 0xA7A, add, GR32, imm32sx16>;
- def AGHI : BinaryRI<"aghi", 0xA7B, add, GR64, imm64sx16>;
+ defm AHIMux : BinaryRIAndKPseudo<"ahimux", add, GRX32, imm32sx16>;
+ defm AHI : BinaryRIAndK<"ahi", 0xA7A, 0xECD8, add, GR32, imm32sx16>;
+ defm AGHI : BinaryRIAndK<"aghi", 0xA7B, 0xECD9, add, GR64, imm64sx16>;
// Addition of signed 32-bit immediates.
+ def AFIMux : BinaryRIPseudo<add, GRX32, simm32>,
+ Requires<[FeatureHighWord]>;
def AFI : BinaryRIL<"afi", 0xC29, add, GR32, simm32>;
+ def AIH : BinaryRIL<"aih", 0xCC8, add, GRH32, simm32>,
+ Requires<[FeatureHighWord]>;
def AGFI : BinaryRIL<"agfi", 0xC28, add, GR64, imm64sx32>;
// Addition of memory.
- defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, sextloadi16>;
- defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load>;
- def AGF : BinaryRXY<"agf", 0xE318, add, GR64, sextloadi32>;
- def AG : BinaryRXY<"ag", 0xE308, add, GR64, load>;
+ defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, asextloadi16, 2>;
+ defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load, 4>;
+ def AGF : BinaryRXY<"agf", 0xE318, add, GR64, asextloadi32, 4>;
+ def AG : BinaryRXY<"ag", 0xE308, add, GR64, load, 8>;
// Addition to memory.
def ASI : BinarySIY<"asi", 0xEB6A, add, imm32sx8>;
@@ -441,34 +713,40 @@ let Defs = [PSW] in {
defm : SXB<add, GR64, AGFR>;
// Addition producing a carry.
-let Defs = [PSW] in {
+let Defs = [CC] in {
// Addition of a register.
let isCommutable = 1 in {
- def ALR : BinaryRR <"alr", 0x1E, addc, GR32, GR32>;
- def ALGR : BinaryRRE<"algr", 0xB90A, addc, GR64, GR64>;
+ defm ALR : BinaryRRAndK<"al", 0x1E, 0xB9FA, addc, GR32, GR32>;
+ defm ALGR : BinaryRREAndK<"alg", 0xB90A, 0xB9EA, addc, GR64, GR64>;
}
- def ALGFR : BinaryRRE<"algfr", 0xB91A, null_frag, GR64, GR32>;
+ def ALGFR : BinaryRRE<"algf", 0xB91A, null_frag, GR64, GR32>;
+
+ // Addition of signed 16-bit immediates.
+ def ALHSIK : BinaryRIE<"alhsik", 0xECDA, addc, GR32, imm32sx16>,
+ Requires<[FeatureDistinctOps]>;
+ def ALGHSIK : BinaryRIE<"alghsik", 0xECDB, addc, GR64, imm64sx16>,
+ Requires<[FeatureDistinctOps]>;
// Addition of unsigned 32-bit immediates.
def ALFI : BinaryRIL<"alfi", 0xC2B, addc, GR32, uimm32>;
def ALGFI : BinaryRIL<"algfi", 0xC2A, addc, GR64, imm64zx32>;
// Addition of memory.
- defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load>;
- def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, zextloadi32>;
- def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load>;
+ defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load, 4>;
+ def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, azextloadi32, 4>;
+ def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load, 8>;
}
defm : ZXB<addc, GR64, ALGFR>;
// Addition producing and using a carry.
-let Defs = [PSW], Uses = [PSW] in {
+let Defs = [CC], Uses = [CC] in {
// Addition of a register.
- def ALCR : BinaryRRE<"alcr", 0xB998, adde, GR32, GR32>;
- def ALCGR : BinaryRRE<"alcgr", 0xB988, adde, GR64, GR64>;
+ def ALCR : BinaryRRE<"alc", 0xB998, adde, GR32, GR32>;
+ def ALCGR : BinaryRRE<"alcg", 0xB988, adde, GR64, GR64>;
// Addition of memory.
- def ALC : BinaryRXY<"alc", 0xE398, adde, GR32, load>;
- def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load>;
+ def ALC : BinaryRXY<"alc", 0xE398, adde, GR32, load, 4>;
+ def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load, 8>;
}
//===----------------------------------------------------------------------===//
@@ -477,25 +755,26 @@ let Defs = [PSW], Uses = [PSW] in {
// Plain substraction. Although immediate forms exist, we use the
// add-immediate instruction instead.
-let Defs = [PSW] in {
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
// Subtraction of a register.
- def SR : BinaryRR <"sr", 0x1B, sub, GR32, GR32>;
- def SGFR : BinaryRRE<"sgfr", 0xB919, null_frag, GR64, GR32>;
- def SGR : BinaryRRE<"sgr", 0xB909, sub, GR64, GR64>;
+ defm SR : BinaryRRAndK<"s", 0x1B, 0xB9F9, sub, GR32, GR32>;
+ def SGFR : BinaryRRE<"sgf", 0xB919, null_frag, GR64, GR32>;
+ defm SGR : BinaryRREAndK<"sg", 0xB909, 0xB9E9, sub, GR64, GR64>;
// Subtraction of memory.
- defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load>;
- def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, sextloadi32>;
- def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load>;
+ defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, asextloadi16, 2>;
+ defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load, 4>;
+ def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, asextloadi32, 4>;
+ def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load, 8>;
}
defm : SXB<sub, GR64, SGFR>;
// Subtraction producing a carry.
-let Defs = [PSW] in {
+let Defs = [CC] in {
// Subtraction of a register.
- def SLR : BinaryRR <"slr", 0x1F, subc, GR32, GR32>;
- def SLGFR : BinaryRRE<"slgfr", 0xB91B, null_frag, GR64, GR32>;
- def SLGR : BinaryRRE<"slgr", 0xB90B, subc, GR64, GR64>;
+ defm SLR : BinaryRRAndK<"sl", 0x1F, 0xB9FB, subc, GR32, GR32>;
+ def SLGFR : BinaryRRE<"slgf", 0xB91B, null_frag, GR64, GR32>;
+ defm SLGR : BinaryRREAndK<"slg", 0xB90B, 0xB9EB, subc, GR64, GR64>;
// Subtraction of unsigned 32-bit immediates. These don't match
// subc because we prefer addc for constants.
@@ -503,56 +782,78 @@ let Defs = [PSW] in {
def SLGFI : BinaryRIL<"slgfi", 0xC24, null_frag, GR64, imm64zx32>;
// Subtraction of memory.
- defm SL : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load>;
- def SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, zextloadi32>;
- def SLG : BinaryRXY<"slg", 0xE30B, subc, GR64, load>;
+ defm SL : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load, 4>;
+ def SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, azextloadi32, 4>;
+ def SLG : BinaryRXY<"slg", 0xE30B, subc, GR64, load, 8>;
}
defm : ZXB<subc, GR64, SLGFR>;
// Subtraction producing and using a carry.
-let Defs = [PSW], Uses = [PSW] in {
+let Defs = [CC], Uses = [CC] in {
// Subtraction of a register.
- def SLBR : BinaryRRE<"slbr", 0xB999, sube, GR32, GR32>;
- def SLGBR : BinaryRRE<"slbgr", 0xB989, sube, GR64, GR64>;
+ def SLBR : BinaryRRE<"slb", 0xB999, sube, GR32, GR32>;
+ def SLGBR : BinaryRRE<"slbg", 0xB989, sube, GR64, GR64>;
// Subtraction of memory.
- def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load>;
- def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load>;
+ def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load, 4>;
+ def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load, 8>;
}
//===----------------------------------------------------------------------===//
// AND
//===----------------------------------------------------------------------===//
-let Defs = [PSW] in {
+let Defs = [CC] in {
// ANDs of a register.
- let isCommutable = 1 in {
- def NR : BinaryRR <"nr", 0x14, and, GR32, GR32>;
- def NGR : BinaryRRE<"ngr", 0xB980, and, GR64, GR64>;
+ let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm NR : BinaryRRAndK<"n", 0x14, 0xB9F4, and, GR32, GR32>;
+ defm NGR : BinaryRREAndK<"ng", 0xB980, 0xB9E4, and, GR64, GR64>;
}
- // ANDs of a 16-bit immediate, leaving other bits unaffected.
- let isCodeGenOnly = 1 in {
- def NILL32 : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>;
- def NILH32 : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>;
+ let isConvertibleToThreeAddress = 1 in {
+ // ANDs of a 16-bit immediate, leaving other bits unaffected.
+ // The CC result only reflects the 16-bit field, not the full register.
+ //
+ // NIxMux expands to NI[LH]x, depending on the choice of register.
+ def NILMux : BinaryRIPseudo<and, GRX32, imm32ll16c>,
+ Requires<[FeatureHighWord]>;
+ def NIHMux : BinaryRIPseudo<and, GRX32, imm32lh16c>,
+ Requires<[FeatureHighWord]>;
+ def NILL : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>;
+ def NILH : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>;
+ def NIHL : BinaryRI<"nihl", 0xA55, and, GRH32, imm32ll16c>;
+ def NIHH : BinaryRI<"nihh", 0xA54, and, GRH32, imm32lh16c>;
+ def NILL64 : BinaryAliasRI<and, GR64, imm64ll16c>;
+ def NILH64 : BinaryAliasRI<and, GR64, imm64lh16c>;
+ def NIHL64 : BinaryAliasRI<and, GR64, imm64hl16c>;
+ def NIHH64 : BinaryAliasRI<and, GR64, imm64hh16c>;
+
+ // ANDs of a 32-bit immediate, leaving other bits unaffected.
+ // The CC result only reflects the 32-bit field, which means we can
+ // use it as a zero indicator for i32 operations but not otherwise.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ // Expands to NILF or NIHF, depending on the choice of register.
+ def NIFMux : BinaryRIPseudo<and, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def NILF : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>;
+ def NIHF : BinaryRIL<"nihf", 0xC0A, and, GRH32, uimm32>;
+ }
+ def NILF64 : BinaryAliasRIL<and, GR64, imm64lf32c>;
+ def NIHF64 : BinaryAliasRIL<and, GR64, imm64hf32c>;
}
- def NILL : BinaryRI<"nill", 0xA57, and, GR64, imm64ll16c>;
- def NILH : BinaryRI<"nilh", 0xA56, and, GR64, imm64lh16c>;
- def NIHL : BinaryRI<"nihl", 0xA55, and, GR64, imm64hl16c>;
- def NIHH : BinaryRI<"nihh", 0xA54, and, GR64, imm64hh16c>;
-
- // ANDs of a 32-bit immediate, leaving other bits unaffected.
- let isCodeGenOnly = 1 in
- def NILF32 : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>;
- def NILF : BinaryRIL<"nilf", 0xC0B, and, GR64, imm64lf32c>;
- def NIHF : BinaryRIL<"nihf", 0xC0A, and, GR64, imm64hf32c>;
// ANDs of memory.
- defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load>;
- def NG : BinaryRXY<"ng", 0xE380, and, GR64, load>;
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load, 4>;
+ def NG : BinaryRXY<"ng", 0xE380, and, GR64, load, 8>;
+ }
// AND to memory
defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, uimm8>;
+
+ // Block AND.
+ let mayLoad = 1, mayStore = 1 in
+ defm NC : MemorySS<"nc", 0xD4, z_nc, z_nc_loop>;
}
defm : RMWIByte<and, bdaddr12pair, NI>;
defm : RMWIByte<and, bdaddr20pair, NIY>;
@@ -561,35 +862,55 @@ defm : RMWIByte<and, bdaddr20pair, NIY>;
// OR
//===----------------------------------------------------------------------===//
-let Defs = [PSW] in {
+let Defs = [CC] in {
// ORs of a register.
- let isCommutable = 1 in {
- def OR : BinaryRR <"or", 0x16, or, GR32, GR32>;
- def OGR : BinaryRRE<"ogr", 0xB981, or, GR64, GR64>;
+ let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm OR : BinaryRRAndK<"o", 0x16, 0xB9F6, or, GR32, GR32>;
+ defm OGR : BinaryRREAndK<"og", 0xB981, 0xB9E6, or, GR64, GR64>;
}
// ORs of a 16-bit immediate, leaving other bits unaffected.
- let isCodeGenOnly = 1 in {
- def OILL32 : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>;
- def OILH32 : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>;
- }
- def OILL : BinaryRI<"oill", 0xA5B, or, GR64, imm64ll16>;
- def OILH : BinaryRI<"oilh", 0xA5A, or, GR64, imm64lh16>;
- def OIHL : BinaryRI<"oihl", 0xA59, or, GR64, imm64hl16>;
- def OIHH : BinaryRI<"oihh", 0xA58, or, GR64, imm64hh16>;
+ // The CC result only reflects the 16-bit field, not the full register.
+ //
+ // OIxMux expands to OI[LH]x, depending on the choice of register.
+ def OILMux : BinaryRIPseudo<or, GRX32, imm32ll16>,
+ Requires<[FeatureHighWord]>;
+ def OIHMux : BinaryRIPseudo<or, GRX32, imm32lh16>,
+ Requires<[FeatureHighWord]>;
+ def OILL : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>;
+ def OILH : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>;
+ def OIHL : BinaryRI<"oihl", 0xA59, or, GRH32, imm32ll16>;
+ def OIHH : BinaryRI<"oihh", 0xA58, or, GRH32, imm32lh16>;
+ def OILL64 : BinaryAliasRI<or, GR64, imm64ll16>;
+ def OILH64 : BinaryAliasRI<or, GR64, imm64lh16>;
+ def OIHL64 : BinaryAliasRI<or, GR64, imm64hl16>;
+ def OIHH64 : BinaryAliasRI<or, GR64, imm64hh16>;
// ORs of a 32-bit immediate, leaving other bits unaffected.
- let isCodeGenOnly = 1 in
- def OILF32 : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>;
- def OILF : BinaryRIL<"oilf", 0xC0D, or, GR64, imm64lf32>;
- def OIHF : BinaryRIL<"oihf", 0xC0C, or, GR64, imm64hf32>;
+ // The CC result only reflects the 32-bit field, which means we can
+ // use it as a zero indicator for i32 operations but not otherwise.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ // Expands to OILF or OIHF, depending on the choice of register.
+ def OIFMux : BinaryRIPseudo<or, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def OILF : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>;
+ def OIHF : BinaryRIL<"oihf", 0xC0C, or, GRH32, uimm32>;
+ }
+ def OILF64 : BinaryAliasRIL<or, GR64, imm64lf32>;
+ def OIHF64 : BinaryAliasRIL<or, GR64, imm64hf32>;
// ORs of memory.
- defm O : BinaryRXPair<"o", 0x56, 0xE356, or, GR32, load>;
- def OG : BinaryRXY<"og", 0xE381, or, GR64, load>;
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm O : BinaryRXPair<"o", 0x56, 0xE356, or, GR32, load, 4>;
+ def OG : BinaryRXY<"og", 0xE381, or, GR64, load, 8>;
+ }
// OR to memory
defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, uimm8>;
+
+ // Block OR.
+ let mayLoad = 1, mayStore = 1 in
+ defm OC : MemorySS<"oc", 0xD6, z_oc, z_oc_loop>;
}
defm : RMWIByte<or, bdaddr12pair, OI>;
defm : RMWIByte<or, bdaddr20pair, OIY>;
@@ -598,25 +919,38 @@ defm : RMWIByte<or, bdaddr20pair, OIY>;
// XOR
//===----------------------------------------------------------------------===//
-let Defs = [PSW] in {
+let Defs = [CC] in {
// XORs of a register.
- let isCommutable = 1 in {
- def XR : BinaryRR <"xr", 0x17, xor, GR32, GR32>;
- def XGR : BinaryRRE<"xgr", 0xB982, xor, GR64, GR64>;
+ let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm XR : BinaryRRAndK<"x", 0x17, 0xB9F7, xor, GR32, GR32>;
+ defm XGR : BinaryRREAndK<"xg", 0xB982, 0xB9E7, xor, GR64, GR64>;
}
// XORs of a 32-bit immediate, leaving other bits unaffected.
- let isCodeGenOnly = 1 in
- def XILF32 : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>;
- def XILF : BinaryRIL<"xilf", 0xC07, xor, GR64, imm64lf32>;
- def XIHF : BinaryRIL<"xihf", 0xC06, xor, GR64, imm64hf32>;
+ // The CC result only reflects the 32-bit field, which means we can
+ // use it as a zero indicator for i32 operations but not otherwise.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ // Expands to XILF or XIHF, depending on the choice of register.
+ def XIFMux : BinaryRIPseudo<xor, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def XILF : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>;
+ def XIHF : BinaryRIL<"xihf", 0xC06, xor, GRH32, uimm32>;
+ }
+ def XILF64 : BinaryAliasRIL<xor, GR64, imm64lf32>;
+ def XIHF64 : BinaryAliasRIL<xor, GR64, imm64hf32>;
// XORs of memory.
- defm X : BinaryRXPair<"x",0x57, 0xE357, xor, GR32, load>;
- def XG : BinaryRXY<"xg", 0xE382, xor, GR64, load>;
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm X : BinaryRXPair<"x",0x57, 0xE357, xor, GR32, load, 4>;
+ def XG : BinaryRXY<"xg", 0xE382, xor, GR64, load, 8>;
+ }
// XOR to memory
defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, uimm8>;
+
+ // Block XOR.
+ let mayLoad = 1, mayStore = 1 in
+ defm XC : MemorySS<"xc", 0xD7, z_xc, z_xc_loop>;
}
defm : RMWIByte<xor, bdaddr12pair, XI>;
defm : RMWIByte<xor, bdaddr20pair, XIY>;
@@ -627,10 +961,10 @@ defm : RMWIByte<xor, bdaddr20pair, XIY>;
// Multiplication of a register.
let isCommutable = 1 in {
- def MSR : BinaryRRE<"msr", 0xB252, mul, GR32, GR32>;
- def MSGR : BinaryRRE<"msgr", 0xB90C, mul, GR64, GR64>;
+ def MSR : BinaryRRE<"ms", 0xB252, mul, GR32, GR32>;
+ def MSGR : BinaryRRE<"msg", 0xB90C, mul, GR64, GR64>;
}
-def MSGFR : BinaryRRE<"msgfr", 0xB91C, null_frag, GR64, GR32>;
+def MSGFR : BinaryRRE<"msgf", 0xB91C, null_frag, GR64, GR32>;
defm : SXB<mul, GR64, MSGFR>;
// Multiplication of a signed 16-bit immediate.
@@ -642,33 +976,32 @@ def MSFI : BinaryRIL<"msfi", 0xC21, mul, GR32, simm32>;
def MSGFI : BinaryRIL<"msgfi", 0xC20, mul, GR64, imm64sx32>;
// Multiplication of memory.
-defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, sextloadi16>;
-defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load>;
-def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, sextloadi32>;
-def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load>;
+defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, asextloadi16, 2>;
+defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load, 4>;
+def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, asextloadi32, 4>;
+def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load, 8>;
// Multiplication of a register, producing two results.
-def MLGR : BinaryRRE<"mlgr", 0xB986, z_umul_lohi64, GR128, GR64>;
+def MLGR : BinaryRRE<"mlg", 0xB986, z_umul_lohi64, GR128, GR64>;
// Multiplication of memory, producing two results.
-def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load>;
+def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>;
//===----------------------------------------------------------------------===//
// Division and remainder
//===----------------------------------------------------------------------===//
// Division and remainder, from registers.
-def DSGFR : BinaryRRE<"dsgfr", 0xB91D, null_frag, GR128, GR32>;
-def DSGR : BinaryRRE<"dsgr", 0xB90D, z_sdivrem64, GR128, GR64>;
-def DLR : BinaryRRE<"dlr", 0xB997, z_udivrem32, GR128, GR32>;
-def DLGR : BinaryRRE<"dlgr", 0xB987, z_udivrem64, GR128, GR64>;
-defm : SXB<z_sdivrem64, GR128, DSGFR>;
+def DSGFR : BinaryRRE<"dsgf", 0xB91D, z_sdivrem32, GR128, GR32>;
+def DSGR : BinaryRRE<"dsg", 0xB90D, z_sdivrem64, GR128, GR64>;
+def DLR : BinaryRRE<"dl", 0xB997, z_udivrem32, GR128, GR32>;
+def DLGR : BinaryRRE<"dlg", 0xB987, z_udivrem64, GR128, GR64>;
// Division and remainder, from memory.
-def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem64, GR128, sextloadi32>;
-def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load>;
-def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load>;
-def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load>;
+def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem32, GR128, load, 4>;
+def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load, 8>;
+def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load, 4>;
+def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>;
//===----------------------------------------------------------------------===//
// Shifts
@@ -676,111 +1009,188 @@ def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load>;
// Shift left.
let neverHasSideEffects = 1 in {
- def SLL : ShiftRS <"sll", 0x89, shl, GR32, shift12only>;
- def SLLG : ShiftRSY<"sllg", 0xEB0D, shl, GR64, shift20only>;
+ defm SLL : ShiftRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
+ def SLLG : ShiftRSY<"sllg", 0xEB0D, shl, GR64>;
}
// Logical shift right.
let neverHasSideEffects = 1 in {
- def SRL : ShiftRS <"srl", 0x88, srl, GR32, shift12only>;
- def SRLG : ShiftRSY<"srlg", 0xEB0C, srl, GR64, shift20only>;
+ defm SRL : ShiftRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
+ def SRLG : ShiftRSY<"srlg", 0xEB0C, srl, GR64>;
}
// Arithmetic shift right.
-let Defs = [PSW] in {
- def SRA : ShiftRS <"sra", 0x8A, sra, GR32, shift12only>;
- def SRAG : ShiftRSY<"srag", 0xEB0A, sra, GR64, shift20only>;
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
+ defm SRA : ShiftRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>;
+ def SRAG : ShiftRSY<"srag", 0xEB0A, sra, GR64>;
}
// Rotate left.
let neverHasSideEffects = 1 in {
- def RLL : ShiftRSY<"rll", 0xEB1D, rotl, GR32, shift20only>;
- def RLLG : ShiftRSY<"rllg", 0xEB1C, rotl, GR64, shift20only>;
+ def RLL : ShiftRSY<"rll", 0xEB1D, rotl, GR32>;
+ def RLLG : ShiftRSY<"rllg", 0xEB1C, rotl, GR64>;
}
// Rotate second operand left and inserted selected bits into first operand.
// These can act like 32-bit operands provided that the constant start and
-// end bits (operands 2 and 3) are in the range [32, 64)
-let Defs = [PSW] in {
+// end bits (operands 2 and 3) are in the range [32, 64).
+let Defs = [CC] in {
let isCodeGenOnly = 1 in
- def RISBG32 : RotateSelectRIEf<"risbg", 0xEC55, GR32, GR32>;
- def RISBG : RotateSelectRIEf<"risbg", 0xEC55, GR64, GR64>;
+ def RISBG32 : RotateSelectRIEf<"risbg", 0xEC55, GR32, GR32>;
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def RISBG : RotateSelectRIEf<"risbg", 0xEC55, GR64, GR64>;
+}
+
+// Forms of RISBG that only affect one word of the destination register.
+// They do not set CC.
+def RISBMux : RotateSelectRIEfPseudo<GRX32, GRX32>, Requires<[FeatureHighWord]>;
+def RISBLL : RotateSelectAliasRIEf<GR32, GR32>, Requires<[FeatureHighWord]>;
+def RISBLH : RotateSelectAliasRIEf<GR32, GRH32>, Requires<[FeatureHighWord]>;
+def RISBHL : RotateSelectAliasRIEf<GRH32, GR32>, Requires<[FeatureHighWord]>;
+def RISBHH : RotateSelectAliasRIEf<GRH32, GRH32>, Requires<[FeatureHighWord]>;
+def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR64>,
+ Requires<[FeatureHighWord]>;
+def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GRH32, GR64>,
+ Requires<[FeatureHighWord]>;
+
+// Rotate second operand left and perform a logical operation with selected
+// bits of the first operand. The CC result only describes the selected bits,
+// so isn't useful for a full comparison against zero.
+let Defs = [CC] in {
+ def RNSBG : RotateSelectRIEf<"rnsbg", 0xEC54, GR64, GR64>;
+ def ROSBG : RotateSelectRIEf<"rosbg", 0xEC56, GR64, GR64>;
+ def RXSBG : RotateSelectRIEf<"rxsbg", 0xEC57, GR64, GR64>;
}
//===----------------------------------------------------------------------===//
// Comparison
//===----------------------------------------------------------------------===//
-// Signed comparisons.
-let Defs = [PSW] in {
+// Signed comparisons. We put these before the unsigned comparisons because
+// some of the signed forms have COMPARE AND BRANCH equivalents whereas none
+// of the unsigned forms do.
+let Defs = [CC], CCValues = 0xE in {
// Comparison with a register.
- def CR : CompareRR <"cr", 0x19, z_cmp, GR32, GR32>;
- def CGFR : CompareRRE<"cgfr", 0xB930, null_frag, GR64, GR32>;
- def CGR : CompareRRE<"cgr", 0xB920, z_cmp, GR64, GR64>;
+ def CR : CompareRR <"c", 0x19, z_scmp, GR32, GR32>;
+ def CGFR : CompareRRE<"cgf", 0xB930, null_frag, GR64, GR32>;
+ def CGR : CompareRRE<"cg", 0xB920, z_scmp, GR64, GR64>;
// Comparison with a signed 16-bit immediate.
- def CHI : CompareRI<"chi", 0xA7E, z_cmp, GR32, imm32sx16>;
- def CGHI : CompareRI<"cghi", 0xA7F, z_cmp, GR64, imm64sx16>;
-
- // Comparison with a signed 32-bit immediate.
- def CFI : CompareRIL<"cfi", 0xC2D, z_cmp, GR32, simm32>;
- def CGFI : CompareRIL<"cgfi", 0xC2C, z_cmp, GR64, imm64sx32>;
+ def CHI : CompareRI<"chi", 0xA7E, z_scmp, GR32, imm32sx16>;
+ def CGHI : CompareRI<"cghi", 0xA7F, z_scmp, GR64, imm64sx16>;
+
+ // Comparison with a signed 32-bit immediate. CFIMux expands to CFI or CIH,
+ // depending on the choice of register.
+ def CFIMux : CompareRIPseudo<z_scmp, GRX32, simm32>,
+ Requires<[FeatureHighWord]>;
+ def CFI : CompareRIL<"cfi", 0xC2D, z_scmp, GR32, simm32>;
+ def CIH : CompareRIL<"cih", 0xCCD, z_scmp, GRH32, simm32>,
+ Requires<[FeatureHighWord]>;
+ def CGFI : CompareRIL<"cgfi", 0xC2C, z_scmp, GR64, imm64sx32>;
// Comparison with memory.
- defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_cmp, GR32, sextloadi16>;
- defm C : CompareRXPair<"c", 0x59, 0xE359, z_cmp, GR32, load>;
- def CGH : CompareRXY<"cgh", 0xE334, z_cmp, GR64, sextloadi16>;
- def CGF : CompareRXY<"cgf", 0xE330, z_cmp, GR64, sextloadi32>;
- def CG : CompareRXY<"cg", 0xE320, z_cmp, GR64, load>;
- def CHRL : CompareRILPC<"chrl", 0xC65, z_cmp, GR32, aligned_sextloadi16>;
- def CRL : CompareRILPC<"crl", 0xC6D, z_cmp, GR32, aligned_load>;
- def CGHRL : CompareRILPC<"cghrl", 0xC64, z_cmp, GR64, aligned_sextloadi16>;
- def CGFRL : CompareRILPC<"cgfrl", 0xC6C, z_cmp, GR64, aligned_sextloadi32>;
- def CGRL : CompareRILPC<"cgrl", 0xC68, z_cmp, GR64, aligned_load>;
+ defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_scmp, GR32, asextloadi16, 2>;
+ def CMux : CompareRXYPseudo<z_scmp, GRX32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ defm C : CompareRXPair<"c", 0x59, 0xE359, z_scmp, GR32, load, 4>;
+ def CHF : CompareRXY<"chf", 0xE3CD, z_scmp, GRH32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ def CGH : CompareRXY<"cgh", 0xE334, z_scmp, GR64, asextloadi16, 2>;
+ def CGF : CompareRXY<"cgf", 0xE330, z_scmp, GR64, asextloadi32, 4>;
+ def CG : CompareRXY<"cg", 0xE320, z_scmp, GR64, load, 8>;
+ def CHRL : CompareRILPC<"chrl", 0xC65, z_scmp, GR32, aligned_asextloadi16>;
+ def CRL : CompareRILPC<"crl", 0xC6D, z_scmp, GR32, aligned_load>;
+ def CGHRL : CompareRILPC<"cghrl", 0xC64, z_scmp, GR64, aligned_asextloadi16>;
+ def CGFRL : CompareRILPC<"cgfrl", 0xC6C, z_scmp, GR64, aligned_asextloadi32>;
+ def CGRL : CompareRILPC<"cgrl", 0xC68, z_scmp, GR64, aligned_load>;
// Comparison between memory and a signed 16-bit immediate.
- def CHHSI : CompareSIL<"chhsi", 0xE554, z_cmp, sextloadi16, imm32sx16>;
- def CHSI : CompareSIL<"chsi", 0xE55C, z_cmp, load, imm32sx16>;
- def CGHSI : CompareSIL<"cghsi", 0xE558, z_cmp, load, imm64sx16>;
+ def CHHSI : CompareSIL<"chhsi", 0xE554, z_scmp, asextloadi16, imm32sx16>;
+ def CHSI : CompareSIL<"chsi", 0xE55C, z_scmp, load, imm32sx16>;
+ def CGHSI : CompareSIL<"cghsi", 0xE558, z_scmp, load, imm64sx16>;
}
-defm : SXB<z_cmp, GR64, CGFR>;
+defm : SXB<z_scmp, GR64, CGFR>;
// Unsigned comparisons.
-let Defs = [PSW] in {
+let Defs = [CC], CCValues = 0xE, IsLogical = 1 in {
// Comparison with a register.
- def CLR : CompareRR <"clr", 0x15, z_ucmp, GR32, GR32>;
- def CLGFR : CompareRRE<"clgfr", 0xB931, null_frag, GR64, GR32>;
- def CLGR : CompareRRE<"clgr", 0xB921, z_ucmp, GR64, GR64>;
-
- // Comparison with a signed 32-bit immediate.
+ def CLR : CompareRR <"cl", 0x15, z_ucmp, GR32, GR32>;
+ def CLGFR : CompareRRE<"clgf", 0xB931, null_frag, GR64, GR32>;
+ def CLGR : CompareRRE<"clg", 0xB921, z_ucmp, GR64, GR64>;
+
+ // Comparison with an unsigned 32-bit immediate. CLFIMux expands to CLFI
+ // or CLIH, depending on the choice of register.
+ def CLFIMux : CompareRIPseudo<z_ucmp, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
def CLFI : CompareRIL<"clfi", 0xC2F, z_ucmp, GR32, uimm32>;
+ def CLIH : CompareRIL<"clih", 0xCCF, z_ucmp, GR32, uimm32>,
+ Requires<[FeatureHighWord]>;
def CLGFI : CompareRIL<"clgfi", 0xC2E, z_ucmp, GR64, imm64zx32>;
// Comparison with memory.
- defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load>;
- def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, zextloadi32>;
- def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load>;
+ def CLMux : CompareRXYPseudo<z_ucmp, GRX32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load, 4>;
+ def CLHF : CompareRXY<"clhf", 0xE3CF, z_ucmp, GRH32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, azextloadi32, 4>;
+ def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load, 8>;
def CLHRL : CompareRILPC<"clhrl", 0xC67, z_ucmp, GR32,
- aligned_zextloadi16>;
+ aligned_azextloadi16>;
def CLRL : CompareRILPC<"clrl", 0xC6F, z_ucmp, GR32,
aligned_load>;
def CLGHRL : CompareRILPC<"clghrl", 0xC66, z_ucmp, GR64,
- aligned_zextloadi16>;
+ aligned_azextloadi16>;
def CLGFRL : CompareRILPC<"clgfrl", 0xC6E, z_ucmp, GR64,
- aligned_zextloadi32>;
+ aligned_azextloadi32>;
def CLGRL : CompareRILPC<"clgrl", 0xC6A, z_ucmp, GR64,
aligned_load>;
// Comparison between memory and an unsigned 8-bit immediate.
- defm CLI : CompareSIPair<"cli", 0x95, 0xEB55, z_ucmp, zextloadi8, imm32zx8>;
+ defm CLI : CompareSIPair<"cli", 0x95, 0xEB55, z_ucmp, azextloadi8, imm32zx8>;
// Comparison between memory and an unsigned 16-bit immediate.
- def CLHHSI : CompareSIL<"clhhsi", 0xE555, z_ucmp, zextloadi16, imm32zx16>;
- def CLFHSI : CompareSIL<"clfhsi", 0xE55D, z_ucmp, load, imm32zx16>;
- def CLGHSI : CompareSIL<"clghsi", 0xE559, z_ucmp, load, imm64zx16>;
+ def CLHHSI : CompareSIL<"clhhsi", 0xE555, z_ucmp, azextloadi16, imm32zx16>;
+ def CLFHSI : CompareSIL<"clfhsi", 0xE55D, z_ucmp, load, imm32zx16>;
+ def CLGHSI : CompareSIL<"clghsi", 0xE559, z_ucmp, load, imm64zx16>;
}
defm : ZXB<z_ucmp, GR64, CLGFR>;
+// Memory-to-memory comparison.
+let mayLoad = 1, Defs = [CC] in
+ defm CLC : MemorySS<"clc", 0xD5, z_clc, z_clc_loop>;
+
+// String comparison.
+let mayLoad = 1, Defs = [CC], Uses = [R0L] in
+ defm CLST : StringRRE<"clst", 0xB25D, z_strcmp>;
+
+// Test under mask.
+let Defs = [CC] in {
+ // TMxMux expands to TM[LH]x, depending on the choice of register.
+ def TMLMux : CompareRIPseudo<z_tm_reg, GRX32, imm32ll16>,
+ Requires<[FeatureHighWord]>;
+ def TMHMux : CompareRIPseudo<z_tm_reg, GRX32, imm32lh16>,
+ Requires<[FeatureHighWord]>;
+ def TMLL : CompareRI<"tmll", 0xA71, z_tm_reg, GR32, imm32ll16>;
+ def TMLH : CompareRI<"tmlh", 0xA70, z_tm_reg, GR32, imm32lh16>;
+ def TMHL : CompareRI<"tmhl", 0xA73, z_tm_reg, GRH32, imm32ll16>;
+ def TMHH : CompareRI<"tmhh", 0xA72, z_tm_reg, GRH32, imm32lh16>;
+
+ def TMLL64 : CompareAliasRI<z_tm_reg, GR64, imm64ll16>;
+ def TMLH64 : CompareAliasRI<z_tm_reg, GR64, imm64lh16>;
+ def TMHL64 : CompareAliasRI<z_tm_reg, GR64, imm64hl16>;
+ def TMHH64 : CompareAliasRI<z_tm_reg, GR64, imm64hh16>;
+
+ defm TM : CompareSIPair<"tm", 0x91, 0xEB51, z_tm_mem, anyextloadi8, imm32zx8>;
+}
+
+//===----------------------------------------------------------------------===//
+// Prefetch
+//===----------------------------------------------------------------------===//
+
+def PFD : PrefetchRXY<"pfd", 0xE336, z_prefetch>;
+def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
+
//===----------------------------------------------------------------------===//
// Atomic operations
//===----------------------------------------------------------------------===//
@@ -805,60 +1215,60 @@ def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
-def ATOMIC_LOAD_NILL32 : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>;
-def ATOMIC_LOAD_NILH32 : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>;
-def ATOMIC_LOAD_NILF32 : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
+def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>;
+def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>;
+def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
-def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>;
-def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>;
-def ATOMIC_LOAD_NIHL : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>;
-def ATOMIC_LOAD_NIHH : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>;
-def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>;
-def ATOMIC_LOAD_NIHF : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>;
+def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>;
+def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>;
+def ATOMIC_LOAD_NIHL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>;
+def ATOMIC_LOAD_NIHH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>;
+def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>;
+def ATOMIC_LOAD_NIHF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>;
def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>;
def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>;
def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
-def ATOMIC_LOAD_OILL32 : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
-def ATOMIC_LOAD_OILH32 : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
-def ATOMIC_LOAD_OILF32 : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
+def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
+def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
+def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
-def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
-def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
-def ATOMIC_LOAD_OIHL : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
-def ATOMIC_LOAD_OIHH : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
-def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
-def ATOMIC_LOAD_OIHF : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
+def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
+def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
+def ATOMIC_LOAD_OIHL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
+def ATOMIC_LOAD_OIHH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
+def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
+def ATOMIC_LOAD_OIHF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>;
def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>;
def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
-def ATOMIC_LOAD_XILF32 : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
+def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
-def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
-def ATOMIC_LOAD_XIHF : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
+def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
+def ATOMIC_LOAD_XIHF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>;
def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand,
imm32lh16c>;
def ATOMIC_LOAD_NRi : AtomicLoadBinaryReg32<atomic_load_nand_32>;
-def ATOMIC_LOAD_NILL32i : AtomicLoadBinaryImm32<atomic_load_nand_32,
+def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm32<atomic_load_nand_32,
imm32ll16c>;
-def ATOMIC_LOAD_NILH32i : AtomicLoadBinaryImm32<atomic_load_nand_32,
+def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm32<atomic_load_nand_32,
imm32lh16c>;
-def ATOMIC_LOAD_NILF32i : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>;
+def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>;
def ATOMIC_LOAD_NGRi : AtomicLoadBinaryReg64<atomic_load_nand_64>;
-def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm64<atomic_load_nand_64,
+def ATOMIC_LOAD_NILL64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64ll16c>;
-def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm64<atomic_load_nand_64,
+def ATOMIC_LOAD_NILH64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64lh16c>;
-def ATOMIC_LOAD_NIHLi : AtomicLoadBinaryImm64<atomic_load_nand_64,
+def ATOMIC_LOAD_NIHL64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64hl16c>;
-def ATOMIC_LOAD_NIHHi : AtomicLoadBinaryImm64<atomic_load_nand_64,
+def ATOMIC_LOAD_NIHH64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64hh16c>;
-def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm64<atomic_load_nand_64,
+def ATOMIC_LOAD_NILF64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64lf32c>;
-def ATOMIC_LOAD_NIHFi : AtomicLoadBinaryImm64<atomic_load_nand_64,
+def ATOMIC_LOAD_NIHF64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64hf32c>;
def ATOMIC_LOADW_MIN : AtomicLoadWBinaryReg<z_atomic_loadw_min>;
@@ -885,13 +1295,13 @@ def ATOMIC_CMP_SWAPW
(z_atomic_cmp_swapw bdaddr20only:$addr, GR32:$cmp, GR32:$swap,
ADDR32:$bitshift, ADDR32:$negbitshift,
uimm32:$bitsize))]> {
- let Defs = [PSW];
+ let Defs = [CC];
let mayLoad = 1;
let mayStore = 1;
let usesCustomInserter = 1;
}
-let Defs = [PSW] in {
+let Defs = [CC] in {
defm CS : CmpSwapRSPair<"cs", 0xBA, 0xEB14, atomic_cmp_swap_32, GR32>;
def CSG : CmpSwapRSY<"csg", 0xEB30, atomic_cmp_swap_64, GR64>;
}
@@ -900,34 +1310,30 @@ let Defs = [PSW] in {
// Miscellaneous Instructions.
//===----------------------------------------------------------------------===//
+// Extract CC into bits 29 and 28 of a register.
+let Uses = [CC] in
+ def IPM : InherentRRE<"ipm", 0xB222, GR32, (z_ipm)>;
+
// Read a 32-bit access register into a GR32. As with all GR32 operations,
// the upper 32 bits of the enclosing GR64 remain unchanged, which is useful
// when a 64-bit address is stored in a pair of access registers.
-def EAR : InstRRE<0xB24F, (outs GR32:$dst), (ins access_reg:$src),
- "ear\t$dst, $src",
- [(set GR32:$dst, (z_extract_access access_reg:$src))]>;
+def EAR : InstRRE<0xB24F, (outs GR32:$R1), (ins access_reg:$R2),
+ "ear\t$R1, $R2",
+ [(set GR32:$R1, (z_extract_access access_reg:$R2))]>;
// Find leftmost one, AKA count leading zeros. The instruction actually
// returns a pair of GR64s, the first giving the number of leading zeros
// and the second giving a copy of the source with the leftmost one bit
// cleared. We only use the first result here.
-let Defs = [PSW] in {
- def FLOGR : UnaryRRE<"flogr", 0xB983, null_frag, GR128, GR64>;
+let Defs = [CC] in {
+ def FLOGR : UnaryRRE<"flog", 0xB983, null_frag, GR128, GR64>;
}
def : Pat<(ctlz GR64:$src),
- (EXTRACT_SUBREG (FLOGR GR64:$src), subreg_high)>;
+ (EXTRACT_SUBREG (FLOGR GR64:$src), subreg_h64)>;
// Use subregs to populate the "don't care" bits in a 32-bit to 64-bit anyext.
def : Pat<(i64 (anyext GR32:$src)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_32bit)>;
-
-// There are no 32-bit equivalents of LLILL and LLILH, so use a full
-// 64-bit move followed by a subreg. This preserves the invariant that
-// all GR32 operations only modify the low 32 bits.
-def : Pat<(i32 imm32ll16:$src),
- (EXTRACT_SUBREG (LLILL (LL16 imm:$src)), subreg_32bit)>;
-def : Pat<(i32 imm32lh16:$src),
- (EXTRACT_SUBREG (LLILH (LH16 imm:$src)), subreg_32bit)>;
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32)>;
// Extend GR32s and GR64s to GR128s.
let usesCustomInserter = 1 in {
@@ -936,6 +1342,10 @@ let usesCustomInserter = 1 in {
def ZEXT128_64 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
}
+// Search a block of memory for a character.
+let mayLoad = 1, Defs = [CC], Uses = [R0L] in
+ defm SRST : StringRRE<"srst", 0xb25e, z_search_string>;
+
//===----------------------------------------------------------------------===//
// Peepholes.
//===----------------------------------------------------------------------===//
@@ -944,12 +1354,40 @@ let usesCustomInserter = 1 in {
defm : ZXB<add, GR64, ALGFR>;
def : Pat<(add GR64:$src1, imm64zx32:$src2),
(ALGFI GR64:$src1, imm64zx32:$src2)>;
-def : Pat<(add GR64:$src1, (zextloadi32 bdxaddr20only:$addr)),
+def : Pat<(add GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
(ALGF GR64:$src1, bdxaddr20only:$addr)>;
// Use SL* for GR64 subtractions of unsigned 32-bit values.
defm : ZXB<sub, GR64, SLGFR>;
def : Pat<(add GR64:$src1, imm64zx32n:$src2),
(SLGFI GR64:$src1, imm64zx32n:$src2)>;
-def : Pat<(sub GR64:$src1, (zextloadi32 bdxaddr20only:$addr)),
+def : Pat<(sub GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
(SLGF GR64:$src1, bdxaddr20only:$addr)>;
+
+// Optimize sign-extended 1/0 selects to -1/0 selects. This is important
+// for vector legalization.
+def : Pat<(sra (shl (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid, uimm8zx4:$cc)),
+ (i32 31)),
+ (i32 31)),
+ (Select32 (LHI -1), (LHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
+def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid,
+ uimm8zx4:$cc)))),
+ (i32 63)),
+ (i32 63)),
+ (Select64 (LGHI -1), (LGHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
+
+// Peepholes for turning scalar operations into block operations.
+defm : BlockLoadStore<anyextloadi8, i32, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 1>;
+defm : BlockLoadStore<anyextloadi16, i32, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 2>;
+defm : BlockLoadStore<load, i32, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 4>;
+defm : BlockLoadStore<anyextloadi8, i64, MVCSequence, NCSequence,
+ OCSequence, XCSequence, 1>;
+defm : BlockLoadStore<anyextloadi16, i64, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 2>;
+defm : BlockLoadStore<anyextloadi32, i64, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 4>;
+defm : BlockLoadStore<load, i64, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 8>;
diff --git a/lib/Target/SystemZ/SystemZLongBranch.cpp b/lib/Target/SystemZ/SystemZLongBranch.cpp
new file mode 100644
index 000000000000..ba027d460440
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -0,0 +1,462 @@
+//===-- SystemZLongBranch.cpp - Branch lengthening for SystemZ ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass makes sure that all branches are in range. There are several ways
+// in which this could be done. One aggressive approach is to assume that all
+// branches are in range and successively replace those that turn out not
+// to be in range with a longer form (branch relaxation). A simple
+// implementation is to continually walk through the function relaxing
+// branches until no more changes are needed and a fixed point is reached.
+// However, in the pathological worst case, this implementation is
+// quadratic in the number of blocks; relaxing branch N can make branch N-1
+// go out of range, which in turn can make branch N-2 go out of range,
+// and so on.
+//
+// An alternative approach is to assume that all branches must be
+// converted to their long forms, then reinstate the short forms of
+// branches that, even under this pessimistic assumption, turn out to be
+// in range (branch shortening). This too can be implemented as a function
+// walk that is repeated until a fixed point is reached. In general,
+// the result of shortening is not as good as that of relaxation, and
+// shortening is also quadratic in the worst case; shortening branch N
+// can bring branch N-1 in range of the short form, which in turn can do
+// the same for branch N-2, and so on. The main advantage of shortening
+// is that each walk through the function produces valid code, so it is
+// possible to stop at any point after the first walk. The quadraticness
+// could therefore be handled with a maximum pass count, although the
+// question then becomes: what maximum count should be used?
+//
+// On SystemZ, long branches are only needed for functions bigger than 64k,
+// which are relatively rare to begin with, and the long branch sequences
+// are actually relatively cheap. It therefore doesn't seem worth spending
+// much compilation time on the problem. Instead, the approach we take is:
+//
+// (1) Work out the address that each block would have if no branches
+// need relaxing. Exit the pass early if all branches are in range
+// according to this assumption.
+//
+// (2) Work out the address that each block would have if all branches
+// need relaxing.
+//
+// (3) Walk through the block calculating the final address of each instruction
+// and relaxing those that need to be relaxed. For backward branches,
+// this check uses the final address of the target block, as calculated
+// earlier in the walk. For forward branches, this check uses the
+// address of the target block that was calculated in (2). Both checks
+// give a conservatively-correct range.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-long-branch"
+
+#include "SystemZTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+STATISTIC(LongBranches, "Number of long branches.");
+
+namespace {
+ // Represents positional information about a basic block.
+ struct MBBInfo {
+ // The address that we currently assume the block has.
+ uint64_t Address;
+
+ // The size of the block in bytes, excluding terminators.
+ // This value never changes.
+ uint64_t Size;
+
+ // The minimum alignment of the block, as a log2 value.
+ // This value never changes.
+ unsigned Alignment;
+
+ // The number of terminators in this block. This value never changes.
+ unsigned NumTerminators;
+
+ MBBInfo()
+ : Address(0), Size(0), Alignment(0), NumTerminators(0) {}
+ };
+
+ // Represents the state of a block terminator.
+ struct TerminatorInfo {
+ // If this terminator is a relaxable branch, this points to the branch
+ // instruction, otherwise it is null.
+ MachineInstr *Branch;
+
+ // The address that we currently assume the terminator has.
+ uint64_t Address;
+
+ // The current size of the terminator in bytes.
+ uint64_t Size;
+
+ // If Branch is nonnull, this is the number of the target block,
+ // otherwise it is unused.
+ unsigned TargetBlock;
+
+ // If Branch is nonnull, this is the length of the longest relaxed form,
+ // otherwise it is zero.
+ unsigned ExtraRelaxSize;
+
+ TerminatorInfo() : Branch(0), Size(0), TargetBlock(0), ExtraRelaxSize(0) {}
+ };
+
+ // Used to keep track of the current position while iterating over the blocks.
+ struct BlockPosition {
+ // The address that we assume this position has.
+ uint64_t Address;
+
+ // The number of low bits in Address that are known to be the same
+ // as the runtime address.
+ unsigned KnownBits;
+
+ BlockPosition(unsigned InitialAlignment)
+ : Address(0), KnownBits(InitialAlignment) {}
+ };
+
+ class SystemZLongBranch : public MachineFunctionPass {
+ public:
+ static char ID;
+ SystemZLongBranch(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(0) {}
+
+ virtual const char *getPassName() const {
+ return "SystemZ Long Branch";
+ }
+
+ bool runOnMachineFunction(MachineFunction &F);
+
+ private:
+ void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
+ void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
+ bool AssumeRelaxed);
+ TerminatorInfo describeTerminator(MachineInstr *MI);
+ uint64_t initMBBInfo();
+ bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
+ bool mustRelaxABranch();
+ void setWorstCaseAddresses();
+ void splitBranchOnCount(MachineInstr *MI, unsigned AddOpcode);
+ void splitCompareBranch(MachineInstr *MI, unsigned CompareOpcode);
+ void relaxBranch(TerminatorInfo &Terminator);
+ void relaxBranches();
+
+ const SystemZInstrInfo *TII;
+ MachineFunction *MF;
+ SmallVector<MBBInfo, 16> MBBs;
+ SmallVector<TerminatorInfo, 16> Terminators;
+ };
+
+ char SystemZLongBranch::ID = 0;
+
+ const uint64_t MaxBackwardRange = 0x10000;
+ const uint64_t MaxForwardRange = 0xfffe;
+} // end of anonymous namespace
+
+FunctionPass *llvm::createSystemZLongBranchPass(SystemZTargetMachine &TM) {
+ return new SystemZLongBranch(TM);
+}
+
+// Position describes the state immediately before Block. Update Block
+// accordingly and move Position to the end of the block's non-terminator
+// instructions.
+void SystemZLongBranch::skipNonTerminators(BlockPosition &Position,
+ MBBInfo &Block) {
+ if (Block.Alignment > Position.KnownBits) {
+ // When calculating the address of Block, we need to conservatively
+ // assume that Block had the worst possible misalignment.
+ Position.Address += ((uint64_t(1) << Block.Alignment) -
+ (uint64_t(1) << Position.KnownBits));
+ Position.KnownBits = Block.Alignment;
+ }
+
+ // Align the addresses.
+ uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1;
+ Position.Address = (Position.Address + AlignMask) & ~AlignMask;
+
+ // Record the block's position.
+ Block.Address = Position.Address;
+
+ // Move past the non-terminators in the block.
+ Position.Address += Block.Size;
+}
+
+// Position describes the state immediately before Terminator.
+// Update Terminator accordingly and move Position past it.
+// Assume that Terminator will be relaxed if AssumeRelaxed.
+void SystemZLongBranch::skipTerminator(BlockPosition &Position,
+ TerminatorInfo &Terminator,
+ bool AssumeRelaxed) {
+ Terminator.Address = Position.Address;
+ Position.Address += Terminator.Size;
+ if (AssumeRelaxed)
+ Position.Address += Terminator.ExtraRelaxSize;
+}
+
+// Return a description of terminator instruction MI.
+TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr *MI) {
+ TerminatorInfo Terminator;
+ Terminator.Size = TII->getInstSizeInBytes(MI);
+ if (MI->isConditionalBranch() || MI->isUnconditionalBranch()) {
+ switch (MI->getOpcode()) {
+ case SystemZ::J:
+ // Relaxes to JG, which is 2 bytes longer.
+ Terminator.ExtraRelaxSize = 2;
+ break;
+ case SystemZ::BRC:
+ // Relaxes to BRCL, which is 2 bytes longer.
+ Terminator.ExtraRelaxSize = 2;
+ break;
+ case SystemZ::BRCT:
+ case SystemZ::BRCTG:
+ // Relaxes to A(G)HI and BRCL, which is 6 bytes longer.
+ Terminator.ExtraRelaxSize = 6;
+ break;
+ case SystemZ::CRJ:
+ case SystemZ::CLRJ:
+ // Relaxes to a C(L)R/BRCL sequence, which is 2 bytes longer.
+ Terminator.ExtraRelaxSize = 2;
+ break;
+ case SystemZ::CGRJ:
+ case SystemZ::CLGRJ:
+ // Relaxes to a C(L)GR/BRCL sequence, which is 4 bytes longer.
+ Terminator.ExtraRelaxSize = 4;
+ break;
+ case SystemZ::CIJ:
+ case SystemZ::CGIJ:
+ // Relaxes to a C(G)HI/BRCL sequence, which is 4 bytes longer.
+ Terminator.ExtraRelaxSize = 4;
+ break;
+ case SystemZ::CLIJ:
+ case SystemZ::CLGIJ:
+ // Relaxes to a CL(G)FI/BRCL sequence, which is 6 bytes longer.
+ Terminator.ExtraRelaxSize = 6;
+ break;
+ default:
+ llvm_unreachable("Unrecognized branch instruction");
+ }
+ Terminator.Branch = MI;
+ Terminator.TargetBlock =
+ TII->getBranchInfo(MI).Target->getMBB()->getNumber();
+ }
+ return Terminator;
+}
+
+// Fill MBBs and Terminators, setting the addresses on the assumption
+// that no branches need relaxation. Return the size of the function under
+// this assumption.
+uint64_t SystemZLongBranch::initMBBInfo() {
+ MF->RenumberBlocks();
+ unsigned NumBlocks = MF->size();
+
+ MBBs.clear();
+ MBBs.resize(NumBlocks);
+
+ Terminators.clear();
+ Terminators.reserve(NumBlocks);
+
+ BlockPosition Position(MF->getAlignment());
+ for (unsigned I = 0; I < NumBlocks; ++I) {
+ MachineBasicBlock *MBB = MF->getBlockNumbered(I);
+ MBBInfo &Block = MBBs[I];
+
+ // Record the alignment, for quick access.
+ Block.Alignment = MBB->getAlignment();
+
+ // Calculate the size of the fixed part of the block.
+ MachineBasicBlock::iterator MI = MBB->begin();
+ MachineBasicBlock::iterator End = MBB->end();
+ while (MI != End && !MI->isTerminator()) {
+ Block.Size += TII->getInstSizeInBytes(MI);
+ ++MI;
+ }
+ skipNonTerminators(Position, Block);
+
+ // Add the terminators.
+ while (MI != End) {
+ if (!MI->isDebugValue()) {
+ assert(MI->isTerminator() && "Terminator followed by non-terminator");
+ Terminators.push_back(describeTerminator(MI));
+ skipTerminator(Position, Terminators.back(), false);
+ ++Block.NumTerminators;
+ }
+ ++MI;
+ }
+ }
+
+ return Position.Address;
+}
+
+// Return true if, under current assumptions, Terminator would need to be
+// relaxed if it were placed at address Address.
+bool SystemZLongBranch::mustRelaxBranch(const TerminatorInfo &Terminator,
+ uint64_t Address) {
+ if (!Terminator.Branch)
+ return false;
+
+ const MBBInfo &Target = MBBs[Terminator.TargetBlock];
+ if (Address >= Target.Address) {
+ if (Address - Target.Address <= MaxBackwardRange)
+ return false;
+ } else {
+ if (Target.Address - Address <= MaxForwardRange)
+ return false;
+ }
+
+ return true;
+}
+
+// Return true if, under current assumptions, any terminator needs
+// to be relaxed.
+bool SystemZLongBranch::mustRelaxABranch() {
+ for (SmallVectorImpl<TerminatorInfo>::iterator TI = Terminators.begin(),
+ TE = Terminators.end(); TI != TE; ++TI)
+ if (mustRelaxBranch(*TI, TI->Address))
+ return true;
+ return false;
+}
+
+// Set the address of each block on the assumption that all branches
+// must be long.
+void SystemZLongBranch::setWorstCaseAddresses() {
+ SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
+ BlockPosition Position(MF->getAlignment());
+ for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
+ BI != BE; ++BI) {
+ skipNonTerminators(Position, *BI);
+ for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ skipTerminator(Position, *TI, true);
+ ++TI;
+ }
+ }
+}
+
+// Split BRANCH ON COUNT MI into the addition given by AddOpcode followed
+// by a BRCL on the result.
+void SystemZLongBranch::splitBranchOnCount(MachineInstr *MI,
+ unsigned AddOpcode) {
+ MachineBasicBlock *MBB = MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ BuildMI(*MBB, MI, DL, TII->get(AddOpcode))
+ .addOperand(MI->getOperand(0))
+ .addOperand(MI->getOperand(1))
+ .addImm(-1);
+ MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL))
+ .addImm(SystemZ::CCMASK_ICMP)
+ .addImm(SystemZ::CCMASK_CMP_NE)
+ .addOperand(MI->getOperand(2));
+ // The implicit use of CC is a killing use.
+ BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo());
+ MI->eraseFromParent();
+}
+
+// Split MI into the comparison given by CompareOpcode followed
+// a BRCL on the result.
+void SystemZLongBranch::splitCompareBranch(MachineInstr *MI,
+ unsigned CompareOpcode) {
+ MachineBasicBlock *MBB = MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ BuildMI(*MBB, MI, DL, TII->get(CompareOpcode))
+ .addOperand(MI->getOperand(0))
+ .addOperand(MI->getOperand(1));
+ MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL))
+ .addImm(SystemZ::CCMASK_ICMP)
+ .addOperand(MI->getOperand(2))
+ .addOperand(MI->getOperand(3));
+ // The implicit use of CC is a killing use.
+ BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo());
+ MI->eraseFromParent();
+}
+
+// Relax the branch described by Terminator.
+void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
+ MachineInstr *Branch = Terminator.Branch;
+ switch (Branch->getOpcode()) {
+ case SystemZ::J:
+ Branch->setDesc(TII->get(SystemZ::JG));
+ break;
+ case SystemZ::BRC:
+ Branch->setDesc(TII->get(SystemZ::BRCL));
+ break;
+ case SystemZ::BRCT:
+ splitBranchOnCount(Branch, SystemZ::AHI);
+ break;
+ case SystemZ::BRCTG:
+ splitBranchOnCount(Branch, SystemZ::AGHI);
+ break;
+ case SystemZ::CRJ:
+ splitCompareBranch(Branch, SystemZ::CR);
+ break;
+ case SystemZ::CGRJ:
+ splitCompareBranch(Branch, SystemZ::CGR);
+ break;
+ case SystemZ::CIJ:
+ splitCompareBranch(Branch, SystemZ::CHI);
+ break;
+ case SystemZ::CGIJ:
+ splitCompareBranch(Branch, SystemZ::CGHI);
+ break;
+ case SystemZ::CLRJ:
+ splitCompareBranch(Branch, SystemZ::CLR);
+ break;
+ case SystemZ::CLGRJ:
+ splitCompareBranch(Branch, SystemZ::CLGR);
+ break;
+ case SystemZ::CLIJ:
+ splitCompareBranch(Branch, SystemZ::CLFI);
+ break;
+ case SystemZ::CLGIJ:
+ splitCompareBranch(Branch, SystemZ::CLGFI);
+ break;
+ default:
+ llvm_unreachable("Unrecognized branch");
+ }
+
+ Terminator.Size += Terminator.ExtraRelaxSize;
+ Terminator.ExtraRelaxSize = 0;
+ Terminator.Branch = 0;
+
+ ++LongBranches;
+}
+
+// Run a shortening pass and relax any branches that need to be relaxed.
+void SystemZLongBranch::relaxBranches() {
+ SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
+ BlockPosition Position(MF->getAlignment());
+ for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
+ BI != BE; ++BI) {
+ skipNonTerminators(Position, *BI);
+ for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ assert(Position.Address <= TI->Address &&
+ "Addresses shouldn't go forwards");
+ if (mustRelaxBranch(*TI, Position.Address))
+ relaxBranch(*TI);
+ skipTerminator(Position, *TI, false);
+ ++TI;
+ }
+ }
+}
+
+bool SystemZLongBranch::runOnMachineFunction(MachineFunction &F) {
+ TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
+ MF = &F;
+ uint64_t Size = initMBBInfo();
+ if (Size <= MaxForwardRange || !mustRelaxABranch())
+ return false;
+
+ setWorstCaseAddresses();
+ relaxBranches();
+ return true;
+}
diff --git a/lib/Target/SystemZ/SystemZMCInstLower.cpp b/lib/Target/SystemZ/SystemZMCInstLower.cpp
index 5d833213e6f1..ff9a6c0a221f 100644
--- a/lib/Target/SystemZ/SystemZMCInstLower.cpp
+++ b/lib/Target/SystemZ/SystemZMCInstLower.cpp
@@ -15,20 +15,6 @@
using namespace llvm;
-// Where relaxable pairs of reloc-generating instructions exist,
-// we tend to use the longest form by default, since that produces
-// correct assembly in cases where no relaxation is performed.
-// If Opcode is one such instruction, return the opcode for the
-// shortest possible form instead, otherwise return Opcode itself.
-static unsigned getShortenedInstr(unsigned Opcode) {
- switch (Opcode) {
- case SystemZ::BRCL: return SystemZ::BRC;
- case SystemZ::JG: return SystemZ::J;
- case SystemZ::BRASL: return SystemZ::BRAS;
- }
- return Opcode;
-}
-
// Return the VK_* enumeration for MachineOperand target flags Flags.
static MCSymbolRefExpr::VariantKind getVariantKind(unsigned Flags) {
switch (Flags & SystemZII::MO_SYMBOL_MODIFIER) {
@@ -40,77 +26,75 @@ static MCSymbolRefExpr::VariantKind getVariantKind(unsigned Flags) {
llvm_unreachable("Unrecognised MO_ACCESS_MODEL");
}
-SystemZMCInstLower::SystemZMCInstLower(Mangler *mang, MCContext &ctx,
+SystemZMCInstLower::SystemZMCInstLower(MCContext &ctx,
SystemZAsmPrinter &asmprinter)
- : Mang(mang), Ctx(ctx), AsmPrinter(asmprinter) {}
+ : Ctx(ctx), AsmPrinter(asmprinter) {}
-MCOperand SystemZMCInstLower::lowerSymbolOperand(const MachineOperand &MO,
- const MCSymbol *Symbol,
- int64_t Offset) const {
- MCSymbolRefExpr::VariantKind Kind = getVariantKind(MO.getTargetFlags());
- const MCExpr *Expr = MCSymbolRefExpr::Create(Symbol, Kind, Ctx);
- if (Offset) {
- const MCExpr *OffsetExpr = MCConstantExpr::Create(Offset, Ctx);
- Expr = MCBinaryExpr::CreateAdd(Expr, OffsetExpr, Ctx);
+const MCExpr *
+SystemZMCInstLower::getExpr(const MachineOperand &MO,
+ MCSymbolRefExpr::VariantKind Kind) const {
+ const MCSymbol *Symbol;
+ bool HasOffset = true;
+ switch (MO.getType()) {
+ case MachineOperand::MO_MachineBasicBlock:
+ Symbol = MO.getMBB()->getSymbol();
+ HasOffset = false;
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ Symbol = AsmPrinter.getSymbol(MO.getGlobal());
+ break;
+
+ case MachineOperand::MO_ExternalSymbol:
+ Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName());
+ break;
+
+ case MachineOperand::MO_JumpTableIndex:
+ Symbol = AsmPrinter.GetJTISymbol(MO.getIndex());
+ HasOffset = false;
+ break;
+
+ case MachineOperand::MO_ConstantPoolIndex:
+ Symbol = AsmPrinter.GetCPISymbol(MO.getIndex());
+ break;
+
+ case MachineOperand::MO_BlockAddress:
+ Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress());
+ break;
+
+ default:
+ llvm_unreachable("unknown operand type");
}
- return MCOperand::CreateExpr(Expr);
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Symbol, Kind, Ctx);
+ if (HasOffset)
+ if (int64_t Offset = MO.getOffset()) {
+ const MCExpr *OffsetExpr = MCConstantExpr::Create(Offset, Ctx);
+ Expr = MCBinaryExpr::CreateAdd(Expr, OffsetExpr, Ctx);
+ }
+ return Expr;
}
MCOperand SystemZMCInstLower::lowerOperand(const MachineOperand &MO) const {
switch (MO.getType()) {
- default:
- llvm_unreachable("unknown operand type");
-
case MachineOperand::MO_Register:
- // Ignore all implicit register operands.
- if (MO.isImplicit())
- return MCOperand();
return MCOperand::CreateReg(MO.getReg());
case MachineOperand::MO_Immediate:
return MCOperand::CreateImm(MO.getImm());
- case MachineOperand::MO_MachineBasicBlock:
- return lowerSymbolOperand(MO, MO.getMBB()->getSymbol(),
- /* MO has no offset field */0);
-
- case MachineOperand::MO_GlobalAddress:
- return lowerSymbolOperand(MO, Mang->getSymbol(MO.getGlobal()),
- MO.getOffset());
-
- case MachineOperand::MO_ExternalSymbol: {
- StringRef Name = MO.getSymbolName();
- return lowerSymbolOperand(MO, AsmPrinter.GetExternalSymbolSymbol(Name),
- MO.getOffset());
- }
-
- case MachineOperand::MO_JumpTableIndex:
- return lowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()),
- /* MO has no offset field */0);
-
- case MachineOperand::MO_ConstantPoolIndex:
- return lowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()),
- MO.getOffset());
-
- case MachineOperand::MO_BlockAddress: {
- const BlockAddress *BA = MO.getBlockAddress();
- return lowerSymbolOperand(MO, AsmPrinter.GetBlockAddressSymbol(BA),
- MO.getOffset());
+ default: {
+ MCSymbolRefExpr::VariantKind Kind = getVariantKind(MO.getTargetFlags());
+ return MCOperand::CreateExpr(getExpr(MO, Kind));
}
}
}
void SystemZMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
- unsigned Opcode = MI->getOpcode();
- // When emitting binary code, start with the shortest form of an instruction
- // and then relax it where necessary.
- if (!AsmPrinter.OutStreamer.hasRawTextSupport())
- Opcode = getShortenedInstr(Opcode);
- OutMI.setOpcode(Opcode);
+ OutMI.setOpcode(MI->getOpcode());
for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI->getOperand(I);
- MCOperand MCOp = lowerOperand(MO);
- if (MCOp.isValid())
- OutMI.addOperand(MCOp);
+ // Ignore all implicit register operands.
+ if (!MO.isReg() || !MO.isImplicit())
+ OutMI.addOperand(lowerOperand(MO));
}
}
diff --git a/lib/Target/SystemZ/SystemZMCInstLower.h b/lib/Target/SystemZ/SystemZMCInstLower.h
index afa72f38add3..f6d5ac8c285d 100644
--- a/lib/Target/SystemZ/SystemZMCInstLower.h
+++ b/lib/Target/SystemZ/SystemZMCInstLower.h
@@ -10,37 +10,34 @@
#ifndef LLVM_SYSTEMZMCINSTLOWER_H
#define LLVM_SYSTEMZMCINSTLOWER_H
+#include "llvm/MC/MCExpr.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
-class MCContext;
class MCInst;
class MCOperand;
-class MCSymbol;
class MachineInstr;
class MachineOperand;
class Mangler;
class SystemZAsmPrinter;
class LLVM_LIBRARY_VISIBILITY SystemZMCInstLower {
- Mangler *Mang;
MCContext &Ctx;
SystemZAsmPrinter &AsmPrinter;
public:
- SystemZMCInstLower(Mangler *mang, MCContext &ctx,
- SystemZAsmPrinter &asmPrinter);
+ SystemZMCInstLower(MCContext &ctx, SystemZAsmPrinter &asmPrinter);
// Lower MachineInstr MI to MCInst OutMI.
void lower(const MachineInstr *MI, MCInst &OutMI) const;
- // Return an MCOperand for MO. Return an empty operand if MO is implicit.
+ // Return an MCOperand for MO.
MCOperand lowerOperand(const MachineOperand& MO) const;
- // Return an MCOperand for MO, given that it equals Symbol + Offset.
- MCOperand lowerSymbolOperand(const MachineOperand &MO,
- const MCSymbol *Symbol, int64_t Offset) const;
+ // Return an MCExpr for symbolic operand MO with variant kind Kind.
+ const MCExpr *getExpr(const MachineOperand &MO,
+ MCSymbolRefExpr::VariantKind Kind) const;
};
} // end namespace llvm
diff --git a/lib/Target/NVPTX/NVPTXNumRegisters.h b/lib/Target/SystemZ/SystemZMachineFunctionInfo.cpp
index a95c16b1e67e..00572d0b9d79 100644
--- a/lib/Target/NVPTX/NVPTXNumRegisters.h
+++ b/lib/Target/SystemZ/SystemZMachineFunctionInfo.cpp
@@ -1,5 +1,4 @@
-
-//===-- NVPTXNumRegisters.h - PTX Register Info ---------------------------===//
+//== SystemZMachineFuctionInfo.cpp - SystemZ machine function info-*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -8,9 +7,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef NVPTX_NUM_REGISTERS_H
-#define NVPTX_NUM_REGISTERS_H
+#include "SystemZMachineFunctionInfo.h"
+
+using namespace llvm;
+
-namespace llvm { const unsigned NVPTXNumRegisters = 396; }
+// pin vtable to this file
+void SystemZMachineFunctionInfo::anchor() {}
-#endif
diff --git a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
index 1dc05a7e12b6..845291f4f3fa 100644
--- a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
+++ b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
@@ -15,7 +15,7 @@
namespace llvm {
class SystemZMachineFunctionInfo : public MachineFunctionInfo {
- unsigned SavedGPRFrameSize;
+ virtual void anchor();
unsigned LowSavedGPR;
unsigned HighSavedGPR;
unsigned VarArgsFirstGPR;
@@ -26,14 +26,8 @@ class SystemZMachineFunctionInfo : public MachineFunctionInfo {
public:
explicit SystemZMachineFunctionInfo(MachineFunction &MF)
- : SavedGPRFrameSize(0), LowSavedGPR(0), HighSavedGPR(0), VarArgsFirstGPR(0),
- VarArgsFirstFPR(0), VarArgsFrameIndex(0), RegSaveFrameIndex(0),
- ManipulatesSP(false) {}
-
- // Get and set the number of bytes allocated by generic code to store
- // call-saved GPRs.
- unsigned getSavedGPRFrameSize() const { return SavedGPRFrameSize; }
- void setSavedGPRFrameSize(unsigned bytes) { SavedGPRFrameSize = bytes; }
+ : LowSavedGPR(0), HighSavedGPR(0), VarArgsFirstGPR(0), VarArgsFirstFPR(0),
+ VarArgsFrameIndex(0), RegSaveFrameIndex(0), ManipulatesSP(false) {}
// Get and set the first call-saved GPR that should be saved and restored
// by this function. This is 0 if no GPRs need to be saved or restored.
diff --git a/lib/Target/SystemZ/SystemZOperands.td b/lib/Target/SystemZ/SystemZOperands.td
index 0abc3f7517e0..3ad146c57d92 100644
--- a/lib/Target/SystemZ/SystemZOperands.td
+++ b/lib/Target/SystemZ/SystemZOperands.td
@@ -24,57 +24,93 @@ class ImmediateAsmOperand<string name>
class Immediate<ValueType vt, code pred, SDNodeXForm xform, string asmop>
: PatLeaf<(vt imm), pred, xform>, Operand<vt> {
let PrintMethod = "print"##asmop##"Operand";
+ let DecoderMethod = "decode"##asmop##"Operand";
let ParserMatchClass = !cast<AsmOperandClass>(asmop);
}
+// Constructs an asm operand for a PC-relative address. SIZE says how
+// many bits there are.
+class PCRelAsmOperand<string size> : ImmediateAsmOperand<"PCRel"##size> {
+ let PredicateMethod = "isImm";
+ let ParserMethod = "parsePCRel"##size;
+}
+
+// Constructs an operand for a PC-relative address with address type VT.
+// ASMOP is the associated asm operand.
+class PCRelOperand<ValueType vt, AsmOperandClass asmop> : Operand<vt> {
+ let PrintMethod = "printPCRelOperand";
+ let ParserMatchClass = asmop;
+}
+
// Constructs both a DAG pattern and instruction operand for a PC-relative
-// address with address size VT. SELF is the name of the operand.
-class PCRelAddress<ValueType vt, string self>
- : ComplexPattern<vt, 1, "selectPCRelAddress", [z_pcrel_wrapper]>,
- Operand<vt> {
+// address with address size VT. SELF is the name of the operand and
+// ASMOP is the associated asm operand.
+class PCRelAddress<ValueType vt, string self, AsmOperandClass asmop>
+ : ComplexPattern<vt, 1, "selectPCRelAddress",
+ [z_pcrel_wrapper, z_pcrel_offset]>,
+ PCRelOperand<vt, asmop> {
let MIOperandInfo = (ops !cast<Operand>(self));
}
// Constructs an AsmOperandClass for addressing mode FORMAT, treating the
// registers as having BITSIZE bits and displacements as having DISPSIZE bits.
-class AddressAsmOperand<string format, string bitsize, string dispsize>
+// LENGTH is "LenN" for addresses with an N-bit length field, otherwise it
+// is "".
+class AddressAsmOperand<string format, string bitsize, string dispsize,
+ string length = "">
: AsmOperandClass {
- let Name = format##bitsize##"Disp"##dispsize;
+ let Name = format##bitsize##"Disp"##dispsize##length;
let ParserMethod = "parse"##format##bitsize;
let RenderMethod = "add"##format##"Operands";
}
// Constructs both a DAG pattern and instruction operand for an addressing mode.
-// The mode is selected by custom code in selectTYPE...SUFFIX(). The address
-// registers have BITSIZE bits and displacements have DISPSIZE bits. NUMOPS is
-// the number of operands that make up an address and OPERANDS lists the types
-// of those operands using (ops ...). FORMAT is the type of addressing mode,
-// which needs to match the names used in AddressAsmOperand.
-class AddressingMode<string type, string bitsize, string dispsize,
- string suffix, int numops, string format, dag operands>
+// FORMAT, BITSIZE, DISPSIZE and LENGTH are the parameters to an associated
+// AddressAsmOperand. OPERANDS is a list of NUMOPS individual operands
+// (base register, displacement, etc.). SELTYPE is the type of the memory
+// operand for selection purposes; sometimes we want different selection
+// choices for the same underlying addressing mode. SUFFIX is similarly
+// a suffix appended to the displacement for selection purposes;
+// e.g. we want to reject small 20-bit displacements if a 12-bit form
+// also exists, but we want to accept them otherwise.
+class AddressingMode<string seltype, string bitsize, string dispsize,
+ string suffix, string length, int numops, string format,
+ dag operands>
: ComplexPattern<!cast<ValueType>("i"##bitsize), numops,
- "select"##type##dispsize##suffix,
+ "select"##seltype##dispsize##suffix##length,
[add, sub, or, frameindex, z_adjdynalloc]>,
Operand<!cast<ValueType>("i"##bitsize)> {
let PrintMethod = "print"##format##"Operand";
+ let EncoderMethod = "get"##format##dispsize##length##"Encoding";
+ let DecoderMethod =
+ "decode"##format##bitsize##"Disp"##dispsize##length##"Operand";
let MIOperandInfo = operands;
let ParserMatchClass =
- !cast<AddressAsmOperand>(format##bitsize##"Disp"##dispsize);
+ !cast<AddressAsmOperand>(format##bitsize##"Disp"##dispsize##length);
}
// An addressing mode with a base and displacement but no index.
class BDMode<string type, string bitsize, string dispsize, string suffix>
- : AddressingMode<type, bitsize, dispsize, suffix, 2, "BDAddr",
+ : AddressingMode<type, bitsize, dispsize, suffix, "", 2, "BDAddr",
(ops !cast<RegisterOperand>("ADDR"##bitsize),
!cast<Immediate>("disp"##dispsize##"imm"##bitsize))>;
// An addressing mode with a base, displacement and index.
class BDXMode<string type, string bitsize, string dispsize, string suffix>
- : AddressingMode<type, bitsize, dispsize, suffix, 3, "BDXAddr",
+ : AddressingMode<type, bitsize, dispsize, suffix, "", 3, "BDXAddr",
(ops !cast<RegisterOperand>("ADDR"##bitsize),
!cast<Immediate>("disp"##dispsize##"imm"##bitsize),
!cast<RegisterOperand>("ADDR"##bitsize))>;
+// A BDMode paired with an immediate length operand of LENSIZE bits.
+class BDLMode<string type, string bitsize, string dispsize, string suffix,
+ string lensize>
+ : AddressingMode<type, bitsize, dispsize, suffix, "Len"##lensize, 3,
+ "BDLAddr",
+ (ops !cast<RegisterOperand>("ADDR"##bitsize),
+ !cast<Immediate>("disp"##dispsize##"imm"##bitsize),
+ !cast<Immediate>("imm"##bitsize))>;
+
//===----------------------------------------------------------------------===//
// Extracting immediate operands from nodes
// These all create MVT::i64 nodes to ensure the value is not sign-extended
@@ -298,6 +334,10 @@ def imm64sx8 : Immediate<i64, [{
return isInt<8>(N->getSExtValue());
}], SIMM8, "S8Imm">;
+def imm64zx8 : Immediate<i64, [{
+ return isUInt<8>(N->getSExtValue());
+}], UIMM8, "U8Imm">;
+
def imm64sx16 : Immediate<i64, [{
return isInt<16>(N->getSExtValue());
}], SIMM16, "S16Imm">;
@@ -318,7 +358,7 @@ def imm64zx32n : Immediate<i64, [{
return isUInt<32>(-N->getSExtValue());
}], NEGIMM32, "U32Imm">;
-def imm64 : ImmLeaf<i64, [{}]>;
+def imm64 : ImmLeaf<i64, [{}]>, Operand<i64>;
//===----------------------------------------------------------------------===//
// Floating-point immediates
@@ -334,30 +374,26 @@ def fpimmneg0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(-0.0); }]>;
// Symbolic address operands
//===----------------------------------------------------------------------===//
+// PC-relative asm operands.
+def PCRel16 : PCRelAsmOperand<"16">;
+def PCRel32 : PCRelAsmOperand<"32">;
+
// PC-relative offsets of a basic block. The offset is sign-extended
// and multiplied by 2.
-def brtarget16 : Operand<OtherVT> {
+def brtarget16 : PCRelOperand<OtherVT, PCRel16> {
let EncoderMethod = "getPC16DBLEncoding";
+ let DecoderMethod = "decodePC16DBLOperand";
}
-def brtarget32 : Operand<OtherVT> {
+def brtarget32 : PCRelOperand<OtherVT, PCRel32> {
let EncoderMethod = "getPC32DBLEncoding";
+ let DecoderMethod = "decodePC32DBLOperand";
}
// A PC-relative offset of a global value. The offset is sign-extended
// and multiplied by 2.
-def pcrel32 : PCRelAddress<i64, "pcrel32"> {
+def pcrel32 : PCRelAddress<i64, "pcrel32", PCRel32> {
let EncoderMethod = "getPC32DBLEncoding";
-}
-
-// A PC-relative offset of a global value when the value is used as a
-// call target. The offset is sign-extended and multiplied by 2.
-def pcrel16call : PCRelAddress<i64, "pcrel16call"> {
- let PrintMethod = "printCallOperand";
- let EncoderMethod = "getPLT16DBLEncoding";
-}
-def pcrel32call : PCRelAddress<i64, "pcrel32call"> {
- let PrintMethod = "printCallOperand";
- let EncoderMethod = "getPLT32DBLEncoding";
+ let DecoderMethod = "decodePC32DBLOperand";
}
//===----------------------------------------------------------------------===//
@@ -372,22 +408,25 @@ def disp12imm64 : Operand<i64>;
def disp20imm32 : Operand<i32>;
def disp20imm64 : Operand<i64>;
-def BDAddr32Disp12 : AddressAsmOperand<"BDAddr", "32", "12">;
-def BDAddr32Disp20 : AddressAsmOperand<"BDAddr", "32", "20">;
-def BDAddr64Disp12 : AddressAsmOperand<"BDAddr", "64", "12">;
-def BDAddr64Disp20 : AddressAsmOperand<"BDAddr", "64", "20">;
-def BDXAddr64Disp12 : AddressAsmOperand<"BDXAddr", "64", "12">;
-def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">;
+def BDAddr32Disp12 : AddressAsmOperand<"BDAddr", "32", "12">;
+def BDAddr32Disp20 : AddressAsmOperand<"BDAddr", "32", "20">;
+def BDAddr64Disp12 : AddressAsmOperand<"BDAddr", "64", "12">;
+def BDAddr64Disp20 : AddressAsmOperand<"BDAddr", "64", "20">;
+def BDXAddr64Disp12 : AddressAsmOperand<"BDXAddr", "64", "12">;
+def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">;
+def BDLAddr64Disp12Len8 : AddressAsmOperand<"BDLAddr", "64", "12", "Len8">;
// DAG patterns and operands for addressing modes. Each mode has
-// the form <type><range><group> where:
+// the form <type><range><group>[<len>] where:
//
// <type> is one of:
// shift : base + displacement (32-bit)
// bdaddr : base + displacement
+// mviaddr : like bdaddr, but reject cases with a natural index
// bdxaddr : base + displacement + index
// laaddr : like bdxaddr, but used for Load Address operations
// dynalloc : base + displacement + index + ADJDYNALLOC
+// bdladdr : base + displacement with a length field
//
// <range> is one of:
// 12 : the displacement is an unsigned 12-bit value
@@ -398,20 +437,28 @@ def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">;
// range value (12 or 20)
// only : used when there is no equivalent instruction with the opposite
// range value
-def shift12only : BDMode <"BDAddr", "32", "12", "Only">;
-def shift20only : BDMode <"BDAddr", "32", "20", "Only">;
-def bdaddr12only : BDMode <"BDAddr", "64", "12", "Only">;
-def bdaddr12pair : BDMode <"BDAddr", "64", "12", "Pair">;
-def bdaddr20only : BDMode <"BDAddr", "64", "20", "Only">;
-def bdaddr20pair : BDMode <"BDAddr", "64", "20", "Pair">;
-def bdxaddr12only : BDXMode<"BDXAddr", "64", "12", "Only">;
-def bdxaddr12pair : BDXMode<"BDXAddr", "64", "12", "Pair">;
-def bdxaddr20only : BDXMode<"BDXAddr", "64", "20", "Only">;
-def bdxaddr20only128 : BDXMode<"BDXAddr", "64", "20", "Only128">;
-def bdxaddr20pair : BDXMode<"BDXAddr", "64", "20", "Pair">;
-def dynalloc12only : BDXMode<"DynAlloc", "64", "12", "Only">;
-def laaddr12pair : BDXMode<"LAAddr", "64", "12", "Pair">;
-def laaddr20pair : BDXMode<"LAAddr", "64", "20", "Pair">;
+//
+// <len> is one of:
+//
+// <empty> : there is no length field
+// len8 : the length field is 8 bits, with a range of [1, 0x100].
+def shift12only : BDMode <"BDAddr", "32", "12", "Only">;
+def shift20only : BDMode <"BDAddr", "32", "20", "Only">;
+def bdaddr12only : BDMode <"BDAddr", "64", "12", "Only">;
+def bdaddr12pair : BDMode <"BDAddr", "64", "12", "Pair">;
+def bdaddr20only : BDMode <"BDAddr", "64", "20", "Only">;
+def bdaddr20pair : BDMode <"BDAddr", "64", "20", "Pair">;
+def mviaddr12pair : BDMode <"MVIAddr", "64", "12", "Pair">;
+def mviaddr20pair : BDMode <"MVIAddr", "64", "20", "Pair">;
+def bdxaddr12only : BDXMode<"BDXAddr", "64", "12", "Only">;
+def bdxaddr12pair : BDXMode<"BDXAddr", "64", "12", "Pair">;
+def bdxaddr20only : BDXMode<"BDXAddr", "64", "20", "Only">;
+def bdxaddr20only128 : BDXMode<"BDXAddr", "64", "20", "Only128">;
+def bdxaddr20pair : BDXMode<"BDXAddr", "64", "20", "Pair">;
+def dynalloc12only : BDXMode<"DynAlloc", "64", "12", "Only">;
+def laaddr12pair : BDXMode<"LAAddr", "64", "12", "Pair">;
+def laaddr20pair : BDXMode<"LAAddr", "64", "20", "Pair">;
+def bdladdr12onlylen8 : BDLMode<"BDLAddr", "64", "12", "Only", "8">;
//===----------------------------------------------------------------------===//
// Miscellaneous
diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td
index 8c4df56b4615..31cabaa3413f 100644
--- a/lib/Target/SystemZ/SystemZOperators.td
+++ b/lib/Target/SystemZ/SystemZOperators.td
@@ -15,16 +15,25 @@ def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i64>,
SDTCisVT<1, i64>]>;
def SDT_ZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
def SDT_ZCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
-def SDT_ZBRCCMask : SDTypeProfile<0, 2,
+def SDT_ZICmp : SDTypeProfile<0, 3,
+ [SDTCisSameAs<0, 1>,
+ SDTCisVT<2, i32>]>;
+def SDT_ZBRCCMask : SDTypeProfile<0, 3,
[SDTCisVT<0, i8>,
- SDTCisVT<1, OtherVT>]>;
-def SDT_ZSelectCCMask : SDTypeProfile<1, 3,
+ SDTCisVT<1, i8>,
+ SDTCisVT<2, OtherVT>]>;
+def SDT_ZSelectCCMask : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>,
SDTCisSameAs<1, 2>,
- SDTCisVT<3, i8>]>;
+ SDTCisVT<3, i8>,
+ SDTCisVT<4, i8>]>;
def SDT_ZWrapPtr : SDTypeProfile<1, 1,
[SDTCisSameAs<0, 1>,
SDTCisPtrTy<0>]>;
+def SDT_ZWrapOffset : SDTypeProfile<1, 2,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisPtrTy<0>]>;
def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
def SDT_ZExtractAccess : SDTypeProfile<1, 1,
[SDTCisVT<0, i32>,
@@ -52,6 +61,24 @@ def SDT_ZAtomicCmpSwapW : SDTypeProfile<1, 6,
SDTCisVT<4, i32>,
SDTCisVT<5, i32>,
SDTCisVT<6, i32>]>;
+def SDT_ZMemMemLength : SDTypeProfile<0, 3,
+ [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, i64>]>;
+def SDT_ZMemMemLoop : SDTypeProfile<0, 4,
+ [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, i64>,
+ SDTCisVT<3, i64>]>;
+def SDT_ZString : SDTypeProfile<1, 3,
+ [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>,
+ SDTCisPtrTy<2>,
+ SDTCisVT<3, i32>]>;
+def SDT_ZI32Intrinsic : SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>;
+def SDT_ZPrefetch : SDTypeProfile<0, 2,
+ [SDTCisVT<0, i8>,
+ SDTCisPtrTy<1>]>;
//===----------------------------------------------------------------------===//
// Node definitions
@@ -70,9 +97,15 @@ def z_retflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
def z_call : SDNode<"SystemZISD::CALL", SDT_ZCall,
[SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
SDNPVariadic]>;
+def z_sibcall : SDNode<"SystemZISD::SIBCALL", SDT_ZCall,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
+ SDNPVariadic]>;
def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
-def z_cmp : SDNode<"SystemZISD::CMP", SDT_ZCmp, [SDNPOutGlue]>;
-def z_ucmp : SDNode<"SystemZISD::UCMP", SDT_ZCmp, [SDNPOutGlue]>;
+def z_pcrel_offset : SDNode<"SystemZISD::PCREL_OFFSET",
+ SDT_ZWrapOffset, []>;
+def z_icmp : SDNode<"SystemZISD::ICMP", SDT_ZICmp, [SDNPOutGlue]>;
+def z_fcmp : SDNode<"SystemZISD::FCMP", SDT_ZCmp, [SDNPOutGlue]>;
+def z_tm : SDNode<"SystemZISD::TM", SDT_ZICmp, [SDNPOutGlue]>;
def z_br_ccmask : SDNode<"SystemZISD::BR_CCMASK", SDT_ZBRCCMask,
[SDNPHasChain, SDNPInGlue]>;
def z_select_ccmask : SDNode<"SystemZISD::SELECT_CCMASK", SDT_ZSelectCCMask,
@@ -81,6 +114,7 @@ def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>;
def z_extract_access : SDNode<"SystemZISD::EXTRACT_ACCESS",
SDT_ZExtractAccess>;
def z_umul_lohi64 : SDNode<"SystemZISD::UMUL_LOHI64", SDT_ZGR128Binary64>;
+def z_sdivrem32 : SDNode<"SystemZISD::SDIVREM32", SDT_ZGR128Binary32>;
def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>;
def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>;
def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>;
@@ -102,10 +136,56 @@ def z_atomic_loadw_umin : AtomicWOp<"ATOMIC_LOADW_UMIN">;
def z_atomic_loadw_umax : AtomicWOp<"ATOMIC_LOADW_UMAX">;
def z_atomic_cmp_swapw : AtomicWOp<"ATOMIC_CMP_SWAPW", SDT_ZAtomicCmpSwapW>;
+def z_mvc : SDNode<"SystemZISD::MVC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_mvc_loop : SDNode<"SystemZISD::MVC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_nc : SDNode<"SystemZISD::NC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_nc_loop : SDNode<"SystemZISD::NC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_oc : SDNode<"SystemZISD::OC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_oc_loop : SDNode<"SystemZISD::OC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_xc : SDNode<"SystemZISD::XC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_xc_loop : SDNode<"SystemZISD::XC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_clc : SDNode<"SystemZISD::CLC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_clc_loop : SDNode<"SystemZISD::CLC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_strcmp : SDNode<"SystemZISD::STRCMP", SDT_ZString,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_stpcpy : SDNode<"SystemZISD::STPCPY", SDT_ZString,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_search_string : SDNode<"SystemZISD::SEARCH_STRING", SDT_ZString,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_ipm : SDNode<"SystemZISD::IPM", SDT_ZI32Intrinsic,
+ [SDNPInGlue]>;
+def z_prefetch : SDNode<"SystemZISD::PREFETCH", SDT_ZPrefetch,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
+ SDNPMemOperand]>;
+
//===----------------------------------------------------------------------===//
// Pattern fragments
//===----------------------------------------------------------------------===//
+// Signed and unsigned comparisons.
+def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{
+ unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ return Type != SystemZICMP::UnsignedOnly;
+}]>;
+def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{
+ unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ return Type != SystemZICMP::SignedOnly;
+}]>;
+
+// Register- and memory-based TEST UNDER MASK.
+def z_tm_reg : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, imm)>;
+def z_tm_mem : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, 0)>;
+
// Register sign-extend operations. Sub-32-bit values are represented as i32s.
def sext8 : PatFrag<(ops node:$src), (sext_inreg node:$src, i8)>;
def sext16 : PatFrag<(ops node:$src), (sext_inreg node:$src, i16)>;
@@ -120,17 +200,61 @@ def zext32 : PatFrag<(ops node:$src), (zext (i32 node:$src))>;
def loadf32 : PatFrag<(ops node:$src), (f32 (load node:$src))>;
def loadf64 : PatFrag<(ops node:$src), (f64 (load node:$src))>;
+// Extending loads in which the extension type can be signed.
+def asextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
+ return Type == ISD::EXTLOAD || Type == ISD::SEXTLOAD;
+}]>;
+def asextloadi8 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def asextloadi16 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def asextloadi32 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+// Extending loads in which the extension type can be unsigned.
+def azextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
+ return Type == ISD::EXTLOAD || Type == ISD::ZEXTLOAD;
+}]>;
+def azextloadi8 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def azextloadi16 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def azextloadi32 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+// Extending loads in which the extension type doesn't matter.
+def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;
+}]>;
+def anyextloadi8 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def anyextloadi16 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
// Aligned loads.
class AlignedLoad<SDPatternOperator load>
: PatFrag<(ops node:$addr), (load node:$addr), [{
LoadSDNode *Load = cast<LoadSDNode>(N);
return Load->getAlignment() >= Load->getMemoryVT().getStoreSize();
}]>;
-def aligned_load : AlignedLoad<load>;
-def aligned_sextloadi16 : AlignedLoad<sextloadi16>;
-def aligned_sextloadi32 : AlignedLoad<sextloadi32>;
-def aligned_zextloadi16 : AlignedLoad<zextloadi16>;
-def aligned_zextloadi32 : AlignedLoad<zextloadi32>;
+def aligned_load : AlignedLoad<load>;
+def aligned_asextloadi16 : AlignedLoad<asextloadi16>;
+def aligned_asextloadi32 : AlignedLoad<asextloadi32>;
+def aligned_azextloadi16 : AlignedLoad<azextloadi16>;
+def aligned_azextloadi32 : AlignedLoad<azextloadi32>;
// Aligned stores.
class AlignedStore<SDPatternOperator store>
@@ -142,6 +266,54 @@ def aligned_store : AlignedStore<store>;
def aligned_truncstorei16 : AlignedStore<truncstorei16>;
def aligned_truncstorei32 : AlignedStore<truncstorei32>;
+// Non-volatile loads. Used for instructions that might access the storage
+// location multiple times.
+class NonvolatileLoad<SDPatternOperator load>
+ : PatFrag<(ops node:$addr), (load node:$addr), [{
+ LoadSDNode *Load = cast<LoadSDNode>(N);
+ return !Load->isVolatile();
+}]>;
+def nonvolatile_load : NonvolatileLoad<load>;
+def nonvolatile_anyextloadi8 : NonvolatileLoad<anyextloadi8>;
+def nonvolatile_anyextloadi16 : NonvolatileLoad<anyextloadi16>;
+def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
+
+// Non-volatile stores.
+class NonvolatileStore<SDPatternOperator store>
+ : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
+ StoreSDNode *Store = cast<StoreSDNode>(N);
+ return !Store->isVolatile();
+}]>;
+def nonvolatile_store : NonvolatileStore<store>;
+def nonvolatile_truncstorei8 : NonvolatileStore<truncstorei8>;
+def nonvolatile_truncstorei16 : NonvolatileStore<truncstorei16>;
+def nonvolatile_truncstorei32 : NonvolatileStore<truncstorei32>;
+
+// A store of a load that can be implemented using MVC.
+def mvc_store : PatFrag<(ops node:$value, node:$addr),
+ (unindexedstore node:$value, node:$addr),
+ [{ return storeLoadCanUseMVC(N); }]>;
+
+// Binary read-modify-write operations on memory in which the other
+// operand is also memory and for which block operations like NC can
+// be used. There are two patterns for each operator, depending on
+// which operand contains the "other" load.
+multiclass block_op<SDPatternOperator operator> {
+ def "1" : PatFrag<(ops node:$value, node:$addr),
+ (unindexedstore (operator node:$value,
+ (unindexedload node:$addr)),
+ node:$addr),
+ [{ return storeLoadCanUseBlockBinary(N, 0); }]>;
+ def "2" : PatFrag<(ops node:$value, node:$addr),
+ (unindexedstore (operator (unindexedload node:$addr),
+ node:$value),
+ node:$addr),
+ [{ return storeLoadCanUseBlockBinary(N, 1); }]>;
+}
+defm block_and : block_op<and>;
+defm block_or : block_op<or>;
+defm block_xor : block_op<xor>;
+
// Insertions.
def inserti8 : PatFrag<(ops node:$src1, node:$src2),
(or (and node:$src1, -256), node:$src2)>;
@@ -174,6 +346,16 @@ def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
APInt::getLowBitsSet(BitWidth, 8));
}]>;
+// Integer absolute, matching the canonical form generated by DAGCombiner.
+def z_iabs32 : PatFrag<(ops node:$src),
+ (xor (add node:$src, (sra node:$src, (i32 31))),
+ (sra node:$src, (i32 31)))>;
+def z_iabs64 : PatFrag<(ops node:$src),
+ (xor (add node:$src, (sra node:$src, (i32 63))),
+ (sra node:$src, (i32 63)))>;
+def z_inegabs32 : PatFrag<(ops node:$src), (ineg (z_iabs32 node:$src))>;
+def z_inegabs64 : PatFrag<(ops node:$src), (ineg (z_iabs64 node:$src))>;
+
// Fused multiply-add and multiply-subtract, but with the order of the
// operands matching SystemZ's MA and MS instructions.
def z_fma : PatFrag<(ops node:$src1, node:$src2, node:$src3),
@@ -186,11 +368,11 @@ def fnabs : PatFrag<(ops node:$ptr), (fneg (fabs node:$ptr))>;
// Create a unary operator that loads from memory and then performs
// the given operation on it.
-class loadu<SDPatternOperator operator>
+class loadu<SDPatternOperator operator, SDPatternOperator load = load>
: PatFrag<(ops node:$addr), (operator (load node:$addr))>;
// Create a store operator that performs the given unary operation
// on the value before storing it.
-class storeu<SDPatternOperator operator>
+class storeu<SDPatternOperator operator, SDPatternOperator store = store>
: PatFrag<(ops node:$value, node:$addr),
(store (operator node:$value), node:$addr)>;
diff --git a/lib/Target/SystemZ/SystemZPatterns.td b/lib/Target/SystemZ/SystemZPatterns.td
index 3689f74bfd4f..7706351e54b3 100644
--- a/lib/Target/SystemZ/SystemZPatterns.td
+++ b/lib/Target/SystemZ/SystemZPatterns.td
@@ -13,7 +13,7 @@ multiclass SXU<SDPatternOperator operator, Instruction insn> {
def : Pat<(operator (sext (i32 GR32:$src))),
(insn GR32:$src)>;
def : Pat<(operator (sext_inreg GR64:$src, i32)),
- (insn (EXTRACT_SUBREG GR64:$src, subreg_32bit))>;
+ (insn (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
}
// Record that INSN performs a 64-bit version of binary operator OPERATOR
@@ -24,7 +24,7 @@ multiclass SXB<SDPatternOperator operator, RegisterOperand cls,
def : Pat<(operator cls:$src1, (sext GR32:$src2)),
(insn cls:$src1, GR32:$src2)>;
def : Pat<(operator cls:$src1, (sext_inreg GR64:$src2, i32)),
- (insn cls:$src1, (EXTRACT_SUBREG GR64:$src2, subreg_32bit))>;
+ (insn cls:$src1, (EXTRACT_SUBREG GR64:$src2, subreg_l32))>;
}
// Like SXB, but for zero extension.
@@ -33,7 +33,7 @@ multiclass ZXB<SDPatternOperator operator, RegisterOperand cls,
def : Pat<(operator cls:$src1, (zext GR32:$src2)),
(insn cls:$src1, GR32:$src2)>;
def : Pat<(operator cls:$src1, (and GR64:$src2, 0xffffffff)),
- (insn cls:$src1, (EXTRACT_SUBREG GR64:$src2, subreg_32bit))>;
+ (insn cls:$src1, (EXTRACT_SUBREG GR64:$src2, subreg_l32))>;
}
// Record that INSN performs a binary read-modify-write operation,
@@ -50,12 +50,8 @@ class RMWI<SDPatternOperator load, SDPatternOperator operator,
// memory location. IMM is the type of the second operand.
multiclass RMWIByte<SDPatternOperator operator, AddressingMode mode,
Instruction insn> {
- def : RMWI<zextloadi8, operator, truncstorei8, mode, imm32, insn>;
- def : RMWI<zextloadi8, operator, truncstorei8, mode, imm64, insn>;
- def : RMWI<sextloadi8, operator, truncstorei8, mode, imm32, insn>;
- def : RMWI<sextloadi8, operator, truncstorei8, mode, imm64, insn>;
- def : RMWI<extloadi8, operator, truncstorei8, mode, imm32, insn>;
- def : RMWI<extloadi8, operator, truncstorei8, mode, imm64, insn>;
+ def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm32, insn>;
+ def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm64, insn>;
}
// Record that INSN performs insertion TYPE into a register of class CLS.
@@ -69,3 +65,88 @@ multiclass InsertMem<string type, Instruction insn, RegisterOperand cls,
(load mode:$src2), cls:$src1),
(insn cls:$src1, mode:$src2)>;
}
+
+// INSN stores the low 32 bits of a GPR to a memory with addressing mode MODE.
+// Record that it is equivalent to using OPERATOR to store a GR64.
+class StoreGR64<Instruction insn, SDPatternOperator operator,
+ AddressingMode mode>
+ : Pat<(operator GR64:$R1, mode:$XBD2),
+ (insn (EXTRACT_SUBREG GR64:$R1, subreg_l32), mode:$XBD2)>;
+
+// INSN and INSNY are an RX/RXY pair of instructions that store the low
+// 32 bits of a GPR to memory. Record that they are equivalent to using
+// OPERATOR to store a GR64.
+multiclass StoreGR64Pair<Instruction insn, Instruction insny,
+ SDPatternOperator operator> {
+ def : StoreGR64<insn, operator, bdxaddr12pair>;
+ def : StoreGR64<insny, operator, bdxaddr20pair>;
+}
+
+// INSN stores the low 32 bits of a GPR using PC-relative addressing.
+// Record that it is equivalent to using OPERATOR to store a GR64.
+class StoreGR64PC<Instruction insn, SDPatternOperator operator>
+ : Pat<(operator GR64:$R1, pcrel32:$XBD2),
+ (insn (EXTRACT_SUBREG GR64:$R1, subreg_l32), pcrel32:$XBD2)> {
+ // We want PC-relative addresses to be tried ahead of BD and BDX addresses.
+ // However, BDXs have two extra operands and are therefore 6 units more
+ // complex.
+ let AddedComplexity = 7;
+}
+
+// INSN and INSNINV conditionally store the low 32 bits of a GPR to memory,
+// with INSN storing when the condition is true and INSNINV storing when the
+// condition is false. Record that they are equivalent to a LOAD/select/STORE
+// sequence for GR64s.
+multiclass CondStores64<Instruction insn, Instruction insninv,
+ SDPatternOperator store, SDPatternOperator load,
+ AddressingMode mode> {
+ def : Pat<(store (z_select_ccmask GR64:$new, (load mode:$addr),
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr),
+ (insn (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
+ uimm8zx4:$valid, uimm8zx4:$cc)>;
+ def : Pat<(store (z_select_ccmask (load mode:$addr), GR64:$new,
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr),
+ (insninv (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
+ uimm8zx4:$valid, uimm8zx4:$cc)>;
+}
+
+// Try to use MVC instruction INSN for a load of type LOAD followed by a store
+// of the same size. VT is the type of the intermediate (legalized) value and
+// LENGTH is the number of bytes loaded by LOAD.
+multiclass MVCLoadStore<SDPatternOperator load, ValueType vt, Instruction insn,
+ bits<5> length> {
+ def : Pat<(mvc_store (vt (load bdaddr12only:$src)), bdaddr12only:$dest),
+ (insn bdaddr12only:$dest, bdaddr12only:$src, length)>;
+}
+
+// Use NC-like instruction INSN for block_op operation OPERATOR.
+// The other operand is a load of type LOAD, which accesses LENGTH bytes.
+// VT is the intermediate legalized type in which the binary operation
+// is actually done.
+multiclass BinaryLoadStore<SDPatternOperator operator, SDPatternOperator load,
+ ValueType vt, Instruction insn, bits<5> length> {
+ def : Pat<(operator (vt (load bdaddr12only:$src)), bdaddr12only:$dest),
+ (insn bdaddr12only:$dest, bdaddr12only:$src, length)>;
+}
+
+// A convenient way of generating all block peepholes for a particular
+// LOAD/VT/LENGTH combination.
+multiclass BlockLoadStore<SDPatternOperator load, ValueType vt,
+ Instruction mvc, Instruction nc, Instruction oc,
+ Instruction xc, bits<5> length> {
+ defm : MVCLoadStore<load, vt, mvc, length>;
+ defm : BinaryLoadStore<block_and1, load, vt, nc, length>;
+ defm : BinaryLoadStore<block_and2, load, vt, nc, length>;
+ defm : BinaryLoadStore<block_or1, load, vt, oc, length>;
+ defm : BinaryLoadStore<block_or2, load, vt, oc, length>;
+ defm : BinaryLoadStore<block_xor1, load, vt, xc, length>;
+ defm : BinaryLoadStore<block_xor2, load, vt, xc, length>;
+}
+
+// Record that INSN is a LOAD AND TEST that can be used to compare
+// registers in CLS against zero. The instruction has separate R1 and R2
+// operands, but they must be the same when the instruction is used like this.
+class CompareZeroFP<Instruction insn, RegisterOperand cls>
+ : Pat<(z_fcmp cls:$reg, (fpimm0)), (insn cls:$reg, cls:$reg)>;
diff --git a/lib/Target/SystemZ/SystemZProcessors.td b/lib/Target/SystemZ/SystemZProcessors.td
new file mode 100644
index 000000000000..f241fb0c2222
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZProcessors.td
@@ -0,0 +1,46 @@
+//===-- SystemZ.td - SystemZ processors and features ---------*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Processor and feature definitions.
+//
+//===----------------------------------------------------------------------===//
+
+class SystemZFeature<string extname, string intname, string desc>
+ : Predicate<"Subtarget.has"##intname##"()">,
+ AssemblerPredicate<"Feature"##intname, extname>,
+ SubtargetFeature<extname, "Has"##intname, "true", desc>;
+
+def FeatureDistinctOps : SystemZFeature<
+ "distinct-ops", "DistinctOps",
+ "Assume that the distinct-operands facility is installed"
+>;
+
+def FeatureLoadStoreOnCond : SystemZFeature<
+ "load-store-on-cond", "LoadStoreOnCond",
+ "Assume that the load/store-on-condition facility is installed"
+>;
+
+def FeatureHighWord : SystemZFeature<
+ "high-word", "HighWord",
+ "Assume that the high-word facility is installed"
+>;
+
+def FeatureFPExtension : SystemZFeature<
+ "fp-extension", "FPExtension",
+ "Assume that the floating-point extension facility is installed"
+>;
+
+def : Processor<"generic", NoItineraries, []>;
+def : Processor<"z10", NoItineraries, []>;
+def : Processor<"z196", NoItineraries,
+ [FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
+ FeatureFPExtension]>;
+def : Processor<"zEC12", NoItineraries,
+ [FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
+ FeatureFPExtension]>;
diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index a0ae7ed00017..b61ae88f733c 100644
--- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -17,9 +17,8 @@
using namespace llvm;
-SystemZRegisterInfo::SystemZRegisterInfo(SystemZTargetMachine &tm,
- const SystemZInstrInfo &tii)
- : SystemZGenRegisterInfo(SystemZ::R14D), TM(tm), TII(tii) {}
+SystemZRegisterInfo::SystemZRegisterInfo(SystemZTargetMachine &tm)
+ : SystemZGenRegisterInfo(SystemZ::R14D), TM(tm) {}
const uint16_t*
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
@@ -43,41 +42,19 @@ SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
if (TFI->hasFP(MF)) {
// R11D is the frame pointer. Reserve all aliases.
Reserved.set(SystemZ::R11D);
- Reserved.set(SystemZ::R11W);
+ Reserved.set(SystemZ::R11L);
+ Reserved.set(SystemZ::R11H);
Reserved.set(SystemZ::R10Q);
}
// R15D is the stack pointer. Reserve all aliases.
Reserved.set(SystemZ::R15D);
- Reserved.set(SystemZ::R15W);
+ Reserved.set(SystemZ::R15L);
+ Reserved.set(SystemZ::R15H);
Reserved.set(SystemZ::R14Q);
return Reserved;
}
-bool
-SystemZRegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator SaveMBBI,
- MachineBasicBlock::iterator &UseMBBI,
- const TargetRegisterClass *RC,
- unsigned Reg) const {
- MachineFunction &MF = *MBB.getParent();
- const SystemZFrameLowering *TFI =
- static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
- unsigned Base = getFrameRegister(MF);
- uint64_t Offset = TFI->getEmergencySpillSlotOffset(MF);
- DebugLoc DL;
-
- unsigned LoadOpcode, StoreOpcode;
- TII.getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
-
- // The offset must always be in range of a 12-bit unsigned displacement.
- BuildMI(MBB, SaveMBBI, DL, TII.get(StoreOpcode))
- .addReg(Reg, RegState::Kill).addReg(Base).addImm(Offset).addReg(0);
- BuildMI(MBB, UseMBBI, DL, TII.get(LoadOpcode), Reg)
- .addReg(Base).addImm(Offset).addReg(0);
- return true;
-}
-
void
SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
@@ -86,6 +63,8 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
MachineBasicBlock &MBB = *MI->getParent();
MachineFunction &MF = *MBB.getParent();
+ const SystemZInstrInfo &TII =
+ *static_cast<const SystemZInstrInfo*>(TM.getInstrInfo());
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
DebugLoc DL = MI->getDebugLoc();
diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.h b/lib/Target/SystemZ/SystemZRegisterInfo.h
index 91a70dea29c9..13f45faba076 100644
--- a/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ b/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -22,10 +22,10 @@ namespace SystemZ {
// Return the subreg to use for referring to the even and odd registers
// in a GR128 pair. Is32Bit says whether we want a GR32 or GR64.
inline unsigned even128(bool Is32bit) {
- return Is32bit ? subreg_32bit : subreg_high;
+ return Is32bit ? subreg_hl32 : subreg_h64;
}
inline unsigned odd128(bool Is32bit) {
- return Is32bit ? subreg_low32 : subreg_low;
+ return Is32bit ? subreg_l32 : subreg_l64;
}
}
@@ -35,10 +35,9 @@ class SystemZInstrInfo;
struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
private:
SystemZTargetMachine &TM;
- const SystemZInstrInfo &TII;
public:
- SystemZRegisterInfo(SystemZTargetMachine &tm, const SystemZInstrInfo &tii);
+ SystemZRegisterInfo(SystemZTargetMachine &tm);
// Override TargetRegisterInfo.h.
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
@@ -49,15 +48,14 @@ public:
LLVM_OVERRIDE {
return true;
}
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
+ LLVM_OVERRIDE {
+ return true;
+ }
virtual const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0)
const LLVM_OVERRIDE;
virtual BitVector getReservedRegs(const MachineFunction &MF)
const LLVM_OVERRIDE;
- virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator SaveMBBI,
- MachineBasicBlock::iterator &UseMBBI,
- const TargetRegisterClass *RC,
- unsigned Reg) const LLVM_OVERRIDE;
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const LLVM_OVERRIDE;
diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.td b/lib/Target/SystemZ/SystemZRegisterInfo.td
index bd1b563b9aab..93d7c8375b3d 100644
--- a/lib/Target/SystemZ/SystemZRegisterInfo.td
+++ b/lib/Target/SystemZ/SystemZRegisterInfo.td
@@ -21,10 +21,12 @@ class SystemZRegWithSubregs<string n, list<Register> subregs>
}
let Namespace = "SystemZ" in {
-def subreg_32bit : SubRegIndex; // could also be known as "subreg_high32"
-def subreg_high : SubRegIndex;
-def subreg_low : SubRegIndex;
-def subreg_low32 : SubRegIndex<[subreg_low, subreg_32bit]>;
+def subreg_l32 : SubRegIndex<32, 0>; // Also acts as subreg_ll32.
+def subreg_h32 : SubRegIndex<32, 32>; // Also acts as subreg_lh32.
+def subreg_l64 : SubRegIndex<64, 0>;
+def subreg_h64 : SubRegIndex<64, 64>;
+def subreg_hh32 : ComposedSubRegIndex<subreg_h64, subreg_h32>;
+def subreg_hl32 : ComposedSubRegIndex<subreg_h64, subreg_l32>;
}
// Define a register class that contains values of type TYPE and an
@@ -54,36 +56,49 @@ class GPR32<bits<16> num, string n> : SystemZReg<n> {
}
// One of the 16 64-bit general-purpose registers.
-class GPR64<bits<16> num, string n, GPR32 low>
- : SystemZRegWithSubregs<n, [low]> {
+class GPR64<bits<16> num, string n, GPR32 low, GPR32 high>
+ : SystemZRegWithSubregs<n, [low, high]> {
let HWEncoding = num;
- let SubRegIndices = [subreg_32bit];
+ let SubRegIndices = [subreg_l32, subreg_h32];
}
// 8 even-odd pairs of GPR64s.
-class GPR128<bits<16> num, string n, GPR64 high, GPR64 low>
- : SystemZRegWithSubregs<n, [high, low]> {
+class GPR128<bits<16> num, string n, GPR64 low, GPR64 high>
+ : SystemZRegWithSubregs<n, [low, high]> {
let HWEncoding = num;
- let SubRegIndices = [subreg_high, subreg_low];
+ let SubRegIndices = [subreg_l64, subreg_h64];
}
// General-purpose registers
foreach I = 0-15 in {
- def R#I#W : GPR32<I, "r"#I>;
- def R#I#D : GPR64<I, "r"#I, !cast<GPR32>("R"#I#"W")>, DwarfRegNum<[I]>;
+ def R#I#L : GPR32<I, "r"#I>;
+ def R#I#H : GPR32<I, "r"#I>;
+ def R#I#D : GPR64<I, "r"#I, !cast<GPR32>("R"#I#"L"), !cast<GPR32>("R"#I#"H")>,
+ DwarfRegNum<[I]>;
}
foreach I = [0, 2, 4, 6, 8, 10, 12, 14] in {
- def R#I#Q : GPR128<I, "r"#I, !cast<GPR64>("R"#I#"D"),
- !cast<GPR64>("R"#!add(I, 1)#"D")>;
+ def R#I#Q : GPR128<I, "r"#I, !cast<GPR64>("R"#!add(I, 1)#"D"),
+ !cast<GPR64>("R"#I#"D")>;
}
/// Allocate the callee-saved R6-R13 backwards. That way they can be saved
/// together with R14 and R15 in one prolog instruction.
-defm GR32 : SystemZRegClass<"GR32", i32, 32, (add (sequence "R%uW", 0, 5),
- (sequence "R%uW", 15, 6))>;
-defm GR64 : SystemZRegClass<"GR64", i64, 64, (add (sequence "R%uD", 0, 5),
- (sequence "R%uD", 15, 6))>;
+defm GR32 : SystemZRegClass<"GR32", i32, 32, (add (sequence "R%uL", 0, 5),
+ (sequence "R%uL", 15, 6))>;
+defm GRH32 : SystemZRegClass<"GRH32", i32, 32, (add (sequence "R%uH", 0, 5),
+ (sequence "R%uH", 15, 6))>;
+defm GR64 : SystemZRegClass<"GR64", i64, 64, (add (sequence "R%uD", 0, 5),
+ (sequence "R%uD", 15, 6))>;
+
+// Combine the low and high GR32s into a single class. This can only be
+// used for virtual registers if the high-word facility is available.
+defm GRX32 : SystemZRegClass<"GRX32", i32, 32,
+ (add (sequence "R%uL", 0, 5),
+ (sequence "R%uH", 0, 5),
+ R15L, R15H, R14L, R14H, R13L, R13H,
+ R12L, R12H, R11L, R11H, R10L, R10H,
+ R9L, R9H, R8L, R8H, R7L, R7H, R6L, R6H)>;
// The architecture doesn't really have any i128 support, so model the
// register pairs as untyped instead.
@@ -93,7 +108,7 @@ defm GR128 : SystemZRegClass<"GR128", untyped, 128, (add R0Q, R2Q, R4Q,
// Base and index registers. Everything except R0, which in an address
// context evaluates as 0.
-defm ADDR32 : SystemZRegClass<"ADDR32", i32, 32, (sub GR32Bit, R0W)>;
+defm ADDR32 : SystemZRegClass<"ADDR32", i32, 32, (sub GR32Bit, R0L)>;
defm ADDR64 : SystemZRegClass<"ADDR64", i64, 64, (sub GR64Bit, R0D)>;
// Not used directly, but needs to exist for ADDR32 and ADDR64 subregs
@@ -113,14 +128,14 @@ class FPR32<bits<16> num, string n> : SystemZReg<n> {
class FPR64<bits<16> num, string n, FPR32 low>
: SystemZRegWithSubregs<n, [low]> {
let HWEncoding = num;
- let SubRegIndices = [subreg_32bit];
+ let SubRegIndices = [subreg_h32];
}
// 8 pairs of FPR64s, with a one-register gap inbetween.
-class FPR128<bits<16> num, string n, FPR64 high, FPR64 low>
- : SystemZRegWithSubregs<n, [high, low]> {
+class FPR128<bits<16> num, string n, FPR64 low, FPR64 high>
+ : SystemZRegWithSubregs<n, [low, high]> {
let HWEncoding = num;
- let SubRegIndices = [subreg_high, subreg_low];
+ let SubRegIndices = [subreg_l64, subreg_h64];
}
// Floating-point registers
@@ -131,8 +146,8 @@ foreach I = 0-15 in {
}
foreach I = [0, 1, 4, 5, 8, 9, 12, 13] in {
- def F#I#Q : FPR128<I, "f"#I, !cast<FPR64>("F"#I#"D"),
- !cast<FPR64>("F"#!add(I, 2)#"D")>;
+ def F#I#Q : FPR128<I, "f"#I, !cast<FPR64>("F"#!add(I, 2)#"D"),
+ !cast<FPR64>("F"#I#"D")>;
}
// There's no store-multiple instruction for FPRs, so we're not fussy
@@ -146,5 +161,7 @@ defm FP128 : SystemZRegClass<"FP128", f128, 128, (add F0Q, F1Q, F4Q, F5Q,
// Other registers
//===----------------------------------------------------------------------===//
-// Status register
-def PSW : SystemZReg<"psw">;
+// The 2-bit condition code field of the PSW. Every register named in an
+// inline asm needs a class associated with it.
+def CC : SystemZReg<"cc">;
+def CCRegs : RegisterClass<"SystemZ", [i32], 32, (add CC)>;
diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
new file mode 100644
index 000000000000..c7ebb5d6b4ec
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -0,0 +1,293 @@
+//===-- SystemZSelectionDAGInfo.cpp - SystemZ SelectionDAG Info -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SystemZSelectionDAGInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-selectiondag-info"
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+
+using namespace llvm;
+
+SystemZSelectionDAGInfo::
+SystemZSelectionDAGInfo(const SystemZTargetMachine &TM)
+ : TargetSelectionDAGInfo(TM) {
+}
+
+SystemZSelectionDAGInfo::~SystemZSelectionDAGInfo() {
+}
+
+// Decide whether it is best to use a loop or straight-line code for
+// a block operation of Size bytes with source address Src and destination
+// address Dest. Sequence is the opcode to use for straight-line code
+// (such as MVC) and Loop is the opcode to use for loops (such as MVC_LOOP).
+// Return the chain for the completed operation.
+static SDValue emitMemMem(SelectionDAG &DAG, SDLoc DL, unsigned Sequence,
+ unsigned Loop, SDValue Chain, SDValue Dst,
+ SDValue Src, uint64_t Size) {
+ EVT PtrVT = Src.getValueType();
+ // The heuristic we use is to prefer loops for anything that would
+ // require 7 or more MVCs. With these kinds of sizes there isn't
+ // much to choose between straight-line code and looping code,
+ // since the time will be dominated by the MVCs themselves.
+ // However, the loop has 4 or 5 instructions (depending on whether
+ // the base addresses can be proved equal), so there doesn't seem
+ // much point using a loop for 5 * 256 bytes or fewer. Anything in
+ // the range (5 * 256, 6 * 256) will need another instruction after
+ // the loop, so it doesn't seem worth using a loop then either.
+ // The next value up, 6 * 256, can be implemented in the same
+ // number of straight-line MVCs as 6 * 256 - 1.
+ if (Size > 6 * 256)
+ return DAG.getNode(Loop, DL, MVT::Other, Chain, Dst, Src,
+ DAG.getConstant(Size, PtrVT),
+ DAG.getConstant(Size / 256, PtrVT));
+ return DAG.getNode(Sequence, DL, MVT::Other, Chain, Dst, Src,
+ DAG.getConstant(Size, PtrVT));
+}
+
+SDValue SystemZSelectionDAGInfo::
+EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
+ bool IsVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
+ if (IsVolatile)
+ return SDValue();
+
+ if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size))
+ return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
+ Chain, Dst, Src, CSize->getZExtValue());
+ return SDValue();
+}
+
+// Handle a memset of 1, 2, 4 or 8 bytes with the operands given by
+// Chain, Dst, ByteVal and Size. These cases are expected to use
+// MVI, MVHHI, MVHI and MVGHI respectively.
+static SDValue memsetStore(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, uint64_t ByteVal, uint64_t Size,
+ unsigned Align,
+ MachinePointerInfo DstPtrInfo) {
+ uint64_t StoreVal = ByteVal;
+ for (unsigned I = 1; I < Size; ++I)
+ StoreVal |= ByteVal << (I * 8);
+ return DAG.getStore(Chain, DL,
+ DAG.getConstant(StoreVal, MVT::getIntegerVT(Size * 8)),
+ Dst, DstPtrInfo, false, false, Align);
+}
+
+SDValue SystemZSelectionDAGInfo::
+EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, SDValue Byte, SDValue Size,
+ unsigned Align, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo) const {
+ EVT PtrVT = Dst.getValueType();
+
+ if (IsVolatile)
+ return SDValue();
+
+ if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ uint64_t Bytes = CSize->getZExtValue();
+ if (Bytes == 0)
+ return SDValue();
+ if (ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte)) {
+ // Handle cases that can be done using at most two of
+ // MVI, MVHI, MVHHI and MVGHI. The latter two can only be
+ // used if ByteVal is all zeros or all ones; in other casees,
+ // we can move at most 2 halfwords.
+ uint64_t ByteVal = CByte->getZExtValue();
+ if (ByteVal == 0 || ByteVal == 255 ?
+ Bytes <= 16 && CountPopulation_64(Bytes) <= 2 :
+ Bytes <= 4) {
+ unsigned Size1 = Bytes == 16 ? 8 : 1 << findLastSet(Bytes);
+ unsigned Size2 = Bytes - Size1;
+ SDValue Chain1 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size1,
+ Align, DstPtrInfo);
+ if (Size2 == 0)
+ return Chain1;
+ Dst = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
+ DAG.getConstant(Size1, PtrVT));
+ DstPtrInfo = DstPtrInfo.getWithOffset(Size1);
+ SDValue Chain2 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size2,
+ std::min(Align, Size1), DstPtrInfo);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2);
+ }
+ } else {
+ // Handle one and two bytes using STC.
+ if (Bytes <= 2) {
+ SDValue Chain1 = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo,
+ false, false, Align);
+ if (Bytes == 1)
+ return Chain1;
+ SDValue Dst2 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
+ DAG.getConstant(1, PtrVT));
+ SDValue Chain2 = DAG.getStore(Chain, DL, Byte, Dst2,
+ DstPtrInfo.getWithOffset(1),
+ false, false, 1);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2);
+ }
+ }
+ assert(Bytes >= 2 && "Should have dealt with 0- and 1-byte cases already");
+
+ // Handle the special case of a memset of 0, which can use XC.
+ ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte);
+ if (CByte && CByte->getZExtValue() == 0)
+ return emitMemMem(DAG, DL, SystemZISD::XC, SystemZISD::XC_LOOP,
+ Chain, Dst, Dst, Bytes);
+
+ // Copy the byte to the first location and then use MVC to copy
+ // it to the rest.
+ Chain = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo,
+ false, false, Align);
+ SDValue DstPlus1 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
+ DAG.getConstant(1, PtrVT));
+ return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
+ Chain, DstPlus1, Dst, Bytes - 1);
+ }
+ return SDValue();
+}
+
+// Use CLC to compare [Src1, Src1 + Size) with [Src2, Src2 + Size),
+// deciding whether to use a loop or straight-line code.
+static SDValue emitCLC(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2, uint64_t Size) {
+ SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ EVT PtrVT = Src1.getValueType();
+ // A two-CLC sequence is a clear win over a loop, not least because it
+ // needs only one branch. A three-CLC sequence needs the same number
+ // of branches as a loop (i.e. 2), but is shorter. That brings us to
+ // lengths greater than 768 bytes. It seems relatively likely that
+ // a difference will be found within the first 768 bytes, so we just
+ // optimize for the smallest number of branch instructions, in order
+ // to avoid polluting the prediction buffer too much. A loop only ever
+ // needs 2 branches, whereas a straight-line sequence would need 3 or more.
+ if (Size > 3 * 256)
+ return DAG.getNode(SystemZISD::CLC_LOOP, DL, VTs, Chain, Src1, Src2,
+ DAG.getConstant(Size, PtrVT),
+ DAG.getConstant(Size / 256, PtrVT));
+ return DAG.getNode(SystemZISD::CLC, DL, VTs, Chain, Src1, Src2,
+ DAG.getConstant(Size, PtrVT));
+}
+
+// Convert the current CC value into an integer that is 0 if CC == 0,
+// less than zero if CC == 1 and greater than zero if CC >= 2.
+// The sequence starts with IPM, which puts CC into bits 29 and 28
+// of an integer and clears bits 30 and 31.
+static SDValue addIPMSequence(SDLoc DL, SDValue Glue, SelectionDAG &DAG) {
+ SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
+ SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
+ DAG.getConstant(SystemZ::IPM_CC, MVT::i32));
+ SDValue ROTL = DAG.getNode(ISD::ROTL, DL, MVT::i32, SRL,
+ DAG.getConstant(31, MVT::i32));
+ return ROTL;
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2, SDValue Size,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
+ if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ uint64_t Bytes = CSize->getZExtValue();
+ assert(Bytes > 0 && "Caller should have handled 0-size case");
+ Chain = emitCLC(DAG, DL, Chain, Src1, Src2, Bytes);
+ SDValue Glue = Chain.getValue(1);
+ return std::make_pair(addIPMSequence(DL, Glue, DAG), Chain);
+ }
+ return std::make_pair(SDValue(), SDValue());
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue Char, SDValue Length,
+ MachinePointerInfo SrcPtrInfo) const {
+ // Use SRST to find the character. End is its address on success.
+ EVT PtrVT = Src.getValueType();
+ SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other, MVT::Glue);
+ Length = DAG.getZExtOrTrunc(Length, DL, PtrVT);
+ Char = DAG.getZExtOrTrunc(Char, DL, MVT::i32);
+ Char = DAG.getNode(ISD::AND, DL, MVT::i32, Char,
+ DAG.getConstant(255, MVT::i32));
+ SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, Length);
+ SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
+ Limit, Src, Char);
+ Chain = End.getValue(1);
+ SDValue Glue = End.getValue(2);
+
+ // Now select between End and null, depending on whether the character
+ // was found.
+ SmallVector<SDValue, 5> Ops;
+ Ops.push_back(End);
+ Ops.push_back(DAG.getConstant(0, PtrVT));
+ Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST, MVT::i32));
+ Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, MVT::i32));
+ Ops.push_back(Glue);
+ VTs = DAG.getVTList(PtrVT, MVT::Glue);
+ End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+ return std::make_pair(End, Chain);
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dest, SDValue Src,
+ MachinePointerInfo DestPtrInfo,
+ MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
+ SDVTList VTs = DAG.getVTList(Dest.getValueType(), MVT::Other);
+ SDValue EndDest = DAG.getNode(SystemZISD::STPCPY, DL, VTs, Chain, Dest, Src,
+ DAG.getConstant(0, MVT::i32));
+ return std::make_pair(isStpcpy ? EndDest : Dest, EndDest.getValue(1));
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
+ SDVTList VTs = DAG.getVTList(Src1.getValueType(), MVT::Other, MVT::Glue);
+ SDValue Unused = DAG.getNode(SystemZISD::STRCMP, DL, VTs, Chain, Src1, Src2,
+ DAG.getConstant(0, MVT::i32));
+ Chain = Unused.getValue(1);
+ SDValue Glue = Chain.getValue(2);
+ return std::make_pair(addIPMSequence(DL, Glue, DAG), Chain);
+}
+
+// Search from Src for a null character, stopping once Src reaches Limit.
+// Return a pair of values, the first being the number of nonnull characters
+// and the second being the out chain.
+//
+// This can be used for strlen by setting Limit to 0.
+static std::pair<SDValue, SDValue> getBoundedStrlen(SelectionDAG &DAG, SDLoc DL,
+ SDValue Chain, SDValue Src,
+ SDValue Limit) {
+ EVT PtrVT = Src.getValueType();
+ SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other, MVT::Glue);
+ SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
+ Limit, Src, DAG.getConstant(0, MVT::i32));
+ Chain = End.getValue(1);
+ SDValue Len = DAG.getNode(ISD::SUB, DL, PtrVT, End, Src);
+ return std::make_pair(Len, Chain);
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, MachinePointerInfo SrcPtrInfo) const {
+ EVT PtrVT = Src.getValueType();
+ return getBoundedStrlen(DAG, DL, Chain, Src, DAG.getConstant(0, PtrVT));
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue MaxLength,
+ MachinePointerInfo SrcPtrInfo) const {
+ EVT PtrVT = Src.getValueType();
+ MaxLength = DAG.getZExtOrTrunc(MaxLength, DL, PtrVT);
+ SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, MaxLength);
+ return getBoundedStrlen(DAG, DL, Chain, Src, Limit);
+}
diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
new file mode 100644
index 000000000000..281d1e291dc9
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
@@ -0,0 +1,80 @@
+//===-- SystemZSelectionDAGInfo.h - SystemZ SelectionDAG Info ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SystemZ subclass for TargetSelectionDAGInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZSELECTIONDAGINFO_H
+#define SYSTEMZSELECTIONDAGINFO_H
+
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+
+namespace llvm {
+
+class SystemZTargetMachine;
+
+class SystemZSelectionDAGInfo : public TargetSelectionDAGInfo {
+public:
+ explicit SystemZSelectionDAGInfo(const SystemZTargetMachine &TM);
+ ~SystemZSelectionDAGInfo();
+
+ virtual
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ bool IsVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const
+ LLVM_OVERRIDE;
+
+ virtual SDValue
+ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL,
+ SDValue Chain, SDValue Dst, SDValue Byte,
+ SDValue Size, unsigned Align, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2, SDValue Size,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue Char, SDValue Length,
+ MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dest, SDValue Src,
+ MachinePointerInfo DestPtrInfo,
+ MachinePointerInfo SrcPtrInfo,
+ bool isStpcpy) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, MachinePointerInfo SrcPtrInfo) const
+ LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue MaxLength,
+ MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+};
+
+}
+
+#endif
diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp
new file mode 100644
index 000000000000..537a54554045
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -0,0 +1,163 @@
+//===-- SystemZShortenInst.cpp - Instruction-shortening pass --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to replace instructions with shorter forms. For example,
+// IILF can be replaced with LLILL or LLILH if the constant fits and if the
+// other 32 bits of the GR64 destination are not live.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-shorten-inst"
+
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+using namespace llvm;
+
+namespace {
+ class SystemZShortenInst : public MachineFunctionPass {
+ public:
+ static char ID;
+ SystemZShortenInst(const SystemZTargetMachine &tm);
+
+ virtual const char *getPassName() const {
+ return "SystemZ Instruction Shortening";
+ }
+
+ bool processBlock(MachineBasicBlock *MBB);
+ bool runOnMachineFunction(MachineFunction &F);
+
+ private:
+ bool shortenIIF(MachineInstr &MI, unsigned *GPRMap, unsigned LiveOther,
+ unsigned LLIxL, unsigned LLIxH);
+
+ const SystemZInstrInfo *TII;
+
+ // LowGPRs[I] has bit N set if LLVM register I includes the low
+ // word of GPR N. HighGPRs is the same for the high word.
+ unsigned LowGPRs[SystemZ::NUM_TARGET_REGS];
+ unsigned HighGPRs[SystemZ::NUM_TARGET_REGS];
+ };
+
+ char SystemZShortenInst::ID = 0;
+} // end of anonymous namespace
+
+FunctionPass *llvm::createSystemZShortenInstPass(SystemZTargetMachine &TM) {
+ return new SystemZShortenInst(TM);
+}
+
+SystemZShortenInst::SystemZShortenInst(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(0), LowGPRs(), HighGPRs() {
+ // Set up LowGPRs and HighGPRs.
+ for (unsigned I = 0; I < 16; ++I) {
+ LowGPRs[SystemZMC::GR32Regs[I]] |= 1 << I;
+ LowGPRs[SystemZMC::GR64Regs[I]] |= 1 << I;
+ HighGPRs[SystemZMC::GRH32Regs[I]] |= 1 << I;
+ HighGPRs[SystemZMC::GR64Regs[I]] |= 1 << I;
+ if (unsigned GR128 = SystemZMC::GR128Regs[I]) {
+ LowGPRs[GR128] |= 3 << I;
+ HighGPRs[GR128] |= 3 << I;
+ }
+ }
+}
+
+// MI loads one word of a GPR using an IIxF instruction and LLIxL and LLIxH
+// are the halfword immediate loads for the same word. Try to use one of them
+// instead of IIxF. If MI loads the high word, GPRMap[X] is the set of high
+// words referenced by LLVM register X while LiveOther is the mask of low
+// words that are currently live, and vice versa.
+bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned *GPRMap,
+ unsigned LiveOther, unsigned LLIxL,
+ unsigned LLIxH) {
+ unsigned Reg = MI.getOperand(0).getReg();
+ assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
+ unsigned GPRs = GPRMap[Reg];
+ assert(GPRs != 0 && "Register must be a GPR");
+ if (GPRs & LiveOther)
+ return false;
+
+ uint64_t Imm = MI.getOperand(1).getImm();
+ if (SystemZ::isImmLL(Imm)) {
+ MI.setDesc(TII->get(LLIxL));
+ MI.getOperand(0).setReg(SystemZMC::getRegAsGR64(Reg));
+ return true;
+ }
+ if (SystemZ::isImmLH(Imm)) {
+ MI.setDesc(TII->get(LLIxH));
+ MI.getOperand(0).setReg(SystemZMC::getRegAsGR64(Reg));
+ MI.getOperand(1).setImm(Imm >> 16);
+ return true;
+ }
+ return false;
+}
+
+// Process all instructions in MBB. Return true if something changed.
+bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
+ bool Changed = false;
+
+ // Work out which words are live on exit from the block.
+ unsigned LiveLow = 0;
+ unsigned LiveHigh = 0;
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI) {
+ for (MachineBasicBlock::livein_iterator LI = (*SI)->livein_begin(),
+ LE = (*SI)->livein_end(); LI != LE; ++LI) {
+ unsigned Reg = *LI;
+ assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
+ LiveLow |= LowGPRs[Reg];
+ LiveHigh |= HighGPRs[Reg];
+ }
+ }
+
+ // Iterate backwards through the block looking for instructions to change.
+ for (MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin(),
+ MBBE = MBB->rend(); MBBI != MBBE; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SystemZ::IILF)
+ Changed |= shortenIIF(MI, LowGPRs, LiveHigh, SystemZ::LLILL,
+ SystemZ::LLILH);
+ else if (Opcode == SystemZ::IIHF)
+ Changed |= shortenIIF(MI, HighGPRs, LiveLow, SystemZ::LLIHL,
+ SystemZ::LLIHH);
+ unsigned UsedLow = 0;
+ unsigned UsedHigh = 0;
+ for (MachineInstr::mop_iterator MOI = MI.operands_begin(),
+ MOE = MI.operands_end(); MOI != MOE; ++MOI) {
+ MachineOperand &MO = *MOI;
+ if (MO.isReg()) {
+ if (unsigned Reg = MO.getReg()) {
+ assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
+ if (MO.isDef()) {
+ LiveLow &= ~LowGPRs[Reg];
+ LiveHigh &= ~HighGPRs[Reg];
+ } else if (!MO.isUndef()) {
+ UsedLow |= LowGPRs[Reg];
+ UsedHigh |= HighGPRs[Reg];
+ }
+ }
+ }
+ }
+ LiveLow |= UsedLow;
+ LiveHigh |= UsedHigh;
+ }
+
+ return Changed;
+}
+
+bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) {
+ TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
+
+ bool Changed = false;
+ for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
+ MFI != MFE; ++MFI)
+ Changed |= processBlock(MFI);
+
+ return Changed;
+}
diff --git a/lib/Target/SystemZ/SystemZSubtarget.cpp b/lib/Target/SystemZ/SystemZSubtarget.cpp
index cfd3324aadb7..3971d5e2a5fa 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ b/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -9,6 +9,8 @@
#include "SystemZSubtarget.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/Host.h"
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
@@ -16,13 +18,22 @@
using namespace llvm;
+// Pin the vtabel to this file.
+void SystemZSubtarget::anchor() {}
+
SystemZSubtarget::SystemZSubtarget(const std::string &TT,
const std::string &CPU,
const std::string &FS)
- : SystemZGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT) {
+ : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
+ HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false),
+ TargetTriple(TT) {
std::string CPUName = CPU;
if (CPUName.empty())
- CPUName = "z10";
+ CPUName = "generic";
+#if defined(__linux__) && defined(__s390x__)
+ if (CPUName == "generic")
+ CPUName = sys::getHostCPUName();
+#endif
// Parse features string.
ParseSubtargetFeatures(CPUName, FS);
diff --git a/lib/Target/SystemZ/SystemZSubtarget.h b/lib/Target/SystemZ/SystemZSubtarget.h
index 8d4d450844df..5817491d4585 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.h
+++ b/lib/Target/SystemZ/SystemZSubtarget.h
@@ -26,6 +26,13 @@ class GlobalValue;
class StringRef;
class SystemZSubtarget : public SystemZGenSubtargetInfo {
+ virtual void anchor();
+protected:
+ bool HasDistinctOps;
+ bool HasLoadStoreOnCond;
+ bool HasHighWord;
+ bool HasFPExtension;
+
private:
Triple TargetTriple;
@@ -33,9 +40,24 @@ public:
SystemZSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS);
+ // This is important for reducing register pressure in vector code.
+ virtual bool useAA() const LLVM_OVERRIDE { return true; }
+
// Automatically generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+ // Return true if the target has the distinct-operands facility.
+ bool hasDistinctOps() const { return HasDistinctOps; }
+
+ // Return true if the target has the load/store-on-condition facility.
+ bool hasLoadStoreOnCond() const { return HasLoadStoreOnCond; }
+
+ // Return true if the target has the high-word facility.
+ bool hasHighWord() const { return HasHighWord; }
+
+ // Return true if the target has the floating-point extension facility.
+ bool hasFPExtension() const { return HasFPExtension; }
+
// Return true if GV can be accessed using LARL for reloc model RM
// and code model CM.
bool isPC32DBLSymbol(const GlobalValue *GV, Reloc::Model RM,
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.cpp b/lib/Target/SystemZ/SystemZTargetMachine.cpp
index 8c4c456ef5b7..dee92e960c54 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -10,6 +10,7 @@
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Transforms/Scalar.h"
using namespace llvm;
@@ -33,6 +34,7 @@ SystemZTargetMachine::SystemZTargetMachine(const Target &T, StringRef TT,
"-f32:32-f64:64-f128:64-a0:8:16-n32:64"),
InstrInfo(*this), TLInfo(*this), TSInfo(*this),
FrameLowering(*this, Subtarget) {
+ initAsmInfo();
}
namespace {
@@ -46,15 +48,61 @@ public:
return getTM<SystemZTargetMachine>();
}
- virtual bool addInstSelector();
+ virtual void addIRPasses() LLVM_OVERRIDE;
+ virtual bool addInstSelector() LLVM_OVERRIDE;
+ virtual bool addPreSched2() LLVM_OVERRIDE;
+ virtual bool addPreEmitPass() LLVM_OVERRIDE;
};
} // end anonymous namespace
+void SystemZPassConfig::addIRPasses() {
+ TargetPassConfig::addIRPasses();
+ addPass(createPartiallyInlineLibCallsPass());
+}
+
bool SystemZPassConfig::addInstSelector() {
addPass(createSystemZISelDag(getSystemZTargetMachine(), getOptLevel()));
return false;
}
+bool SystemZPassConfig::addPreSched2() {
+ if (getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
+ addPass(&IfConverterID);
+ return true;
+}
+
+bool SystemZPassConfig::addPreEmitPass() {
+ // We eliminate comparisons here rather than earlier because some
+ // transformations can change the set of available CC values and we
+ // generally want those transformations to have priority. This is
+ // especially true in the commonest case where the result of the comparison
+ // is used by a single in-range branch instruction, since we will then
+ // be able to fuse the compare and the branch instead.
+ //
+ // For example, two-address NILF can sometimes be converted into
+ // three-address RISBLG. NILF produces a CC value that indicates whether
+ // the low word is zero, but RISBLG does not modify CC at all. On the
+ // other hand, 64-bit ANDs like NILL can sometimes be converted to RISBG.
+ // The CC value produced by NILL isn't useful for our purposes, but the
+ // value produced by RISBG can be used for any comparison with zero
+ // (not just equality). So there are some transformations that lose
+ // CC values (while still being worthwhile) and others that happen to make
+ // the CC result more useful than it was originally.
+ //
+ // Another reason is that we only want to use BRANCH ON COUNT in cases
+ // where we know that the count register is not going to be spilled.
+ //
+ // Doing it so late makes it more likely that a register will be reused
+ // between the comparison and the branch, but it isn't clear whether
+ // preventing that would be a win or not.
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createSystemZElimComparePass(getSystemZTargetMachine()));
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createSystemZShortenInstPass(getSystemZTargetMachine()));
+ addPass(createSystemZLongBranchPass(getSystemZTargetMachine()));
+ return true;
+}
+
TargetPassConfig *SystemZTargetMachine::createPassConfig(PassManagerBase &PM) {
return new SystemZPassConfig(this, PM);
}
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.h b/lib/Target/SystemZ/SystemZTargetMachine.h
index 98614e7b7e2c..a99a98e08477 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.h
+++ b/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -20,10 +20,10 @@
#include "SystemZInstrInfo.h"
#include "SystemZRegisterInfo.h"
#include "SystemZSubtarget.h"
+#include "SystemZSelectionDAGInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
namespace llvm {
@@ -32,7 +32,7 @@ class SystemZTargetMachine : public LLVMTargetMachine {
const DataLayout DL;
SystemZInstrInfo InstrInfo;
SystemZTargetLowering TLInfo;
- TargetSelectionDAGInfo TSInfo;
+ SystemZSelectionDAGInfo TSInfo;
SystemZFrameLowering FrameLowering;
public:
diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp
index 3d92f297be21..2190198d8c90 100644
--- a/lib/Target/Target.cpp
+++ b/lib/Target/Target.cpp
@@ -88,6 +88,14 @@ LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), AS));
}
+LLVMTypeRef LLVMIntPtrTypeInContext(LLVMContextRef C, LLVMTargetDataRef TD) {
+ return wrap(unwrap(TD)->getIntPtrType(*unwrap(C)));
+}
+
+LLVMTypeRef LLVMIntPtrTypeForASInContext(LLVMContextRef C, LLVMTargetDataRef TD, unsigned AS) {
+ return wrap(unwrap(TD)->getIntPtrType(*unwrap(C), AS));
+}
+
unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
return unwrap(TD)->getTypeSizeInBits(unwrap(Ty));
}
diff --git a/lib/Target/TargetLibraryInfo.cpp b/lib/Target/TargetLibraryInfo.cpp
index ee88ce77c09f..3e68fe16d4a4 100644
--- a/lib/Target/TargetLibraryInfo.cpp
+++ b/lib/Target/TargetLibraryInfo.cpp
@@ -27,7 +27,9 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"_IO_getc",
"_IO_putc",
"_ZdaPv",
+ "_ZdaPvRKSt9nothrow_t",
"_ZdlPv",
+ "_ZdlPvRKSt9nothrow_t",
"_Znaj",
"_ZnajRKSt9nothrow_t",
"_Znam",
@@ -36,6 +38,8 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"_ZnwjRKSt9nothrow_t",
"_Znwm",
"_ZnwmRKSt9nothrow_t",
+ "__cospi",
+ "__cospif",
"__cxa_atexit",
"__cxa_guard_abort",
"__cxa_guard_acquire",
@@ -43,6 +47,13 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"__isoc99_scanf",
"__isoc99_sscanf",
"__memcpy_chk",
+ "__sincospi_stret",
+ "__sincospi_stretf",
+ "__sinpi",
+ "__sinpif",
+ "__sqrt_finite",
+ "__sqrtf_finite",
+ "__sqrtl_finite",
"__strdup",
"__strndup",
"__strtok_r",
@@ -165,6 +176,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"getlogin_r",
"getpwnam",
"gets",
+ "gettimeofday",
"htonl",
"htons",
"iprintf",
@@ -325,6 +337,24 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"write"
};
+static bool hasSinCosPiStret(const Triple &T) {
+ // Only Darwin variants have _stret versions of combined trig functions.
+ if (!T.isMacOSX() && T.getOS() != Triple::IOS)
+ return false;
+
+ // The ABI is rather complicated on x86, so don't do anything special there.
+ if (T.getArch() == Triple::x86)
+ return false;
+
+ if (T.isMacOSX() && T.isMacOSXVersionLT(10, 9))
+ return false;
+
+ if (T.getOS() == Triple::IOS && T.isOSVersionLT(7, 0))
+ return false;
+
+ return true;
+}
+
/// initialize - Initialize the set of available library functions based on the
/// specified target triple. This should be carefully written so that a missing
/// target triple gets a sane set of defaults.
@@ -344,13 +374,22 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
if (T.isMacOSX()) {
if (T.isMacOSXVersionLT(10, 5))
TLI.setUnavailable(LibFunc::memset_pattern16);
- } else if (T.getOS() == Triple::IOS) {
+ } else if (T.isiOS()) {
if (T.isOSVersionLT(3, 0))
TLI.setUnavailable(LibFunc::memset_pattern16);
} else {
TLI.setUnavailable(LibFunc::memset_pattern16);
}
+ if (!hasSinCosPiStret(T)) {
+ TLI.setUnavailable(LibFunc::sinpi);
+ TLI.setUnavailable(LibFunc::sinpif);
+ TLI.setUnavailable(LibFunc::cospi);
+ TLI.setUnavailable(LibFunc::cospif);
+ TLI.setUnavailable(LibFunc::sincospi_stret);
+ TLI.setUnavailable(LibFunc::sincospi_stretf);
+ }
+
if (T.isMacOSX() && T.getArch() == Triple::x86 &&
!T.isMacOSXVersionLT(10, 7)) {
// x86-32 OSX has a scheme where fwrite and fputs (and some other functions
@@ -487,6 +526,7 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::getitimer);
TLI.setUnavailable(LibFunc::getlogin_r);
TLI.setUnavailable(LibFunc::getpwnam);
+ TLI.setUnavailable(LibFunc::gettimeofday);
TLI.setUnavailable(LibFunc::htonl);
TLI.setUnavailable(LibFunc::htons);
TLI.setUnavailable(LibFunc::lchown);
@@ -555,7 +595,7 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
}
// The following functions are available on at least Linux:
- if (T.getOS() != Triple::Linux) {
+ if (!T.isOSLinux()) {
TLI.setUnavailable(LibFunc::dunder_strdup);
TLI.setUnavailable(LibFunc::dunder_strtok_r);
TLI.setUnavailable(LibFunc::dunder_isoc99_scanf);
diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp
index f5121e34f77f..7b8d1108f1bf 100644
--- a/lib/Target/TargetLoweringObjectFile.cpp
+++ b/lib/Target/TargetLoweringObjectFile.cpp
@@ -97,10 +97,20 @@ static bool IsNullTerminatedString(const Constant *C) {
return false;
}
+/// Return the MCSymbol for the specified global value. This
+/// symbol is the main label that is the address of the global.
+MCSymbol *TargetLoweringObjectFile::getSymbol(Mangler &M,
+ const GlobalValue *GV) const {
+ SmallString<60> NameStr;
+ M.getNameWithPrefix(NameStr, GV, false);
+ return Ctx->GetOrCreateSymbol(NameStr.str());
+}
+
+
MCSymbol *TargetLoweringObjectFile::
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI) const {
- return Mang->getSymbol(GV);
+ return getSymbol(*Mang, GV);
}
void TargetLoweringObjectFile::emitPersonalityValue(MCStreamer &Streamer,
@@ -293,7 +303,7 @@ getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI, unsigned Encoding,
MCStreamer &Streamer) const {
const MCSymbolRefExpr *Ref =
- MCSymbolRefExpr::Create(Mang->getSymbol(GV), getContext());
+ MCSymbolRefExpr::Create(getSymbol(*Mang, GV), getContext());
return getTTypeReference(Ref, Encoding, Streamer);
}
@@ -317,3 +327,9 @@ getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
}
}
}
+
+const MCExpr *TargetLoweringObjectFile::getDebugThreadLocalSymbol(const MCSymbol *Sym) const {
+ // FIXME: It's not clear what, if any, default this should have - perhaps a
+ // null return could mean 'no location' & we should just do that here.
+ return MCSymbolRefExpr::Create(Sym, *Ctx);
+}
diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp
index e7282519d597..cb42e8311b87 100644
--- a/lib/Target/TargetMachine.cpp
+++ b/lib/Target/TargetMachine.cpp
@@ -78,7 +78,6 @@ void TargetMachine::resetTargetOptions(const MachineFunction *MF) const {
} while (0)
RESET_OPTION(NoFramePointerElim, "no-frame-pointer-elim");
- RESET_OPTION(NoFramePointerElimNonLeaf, "no-frame-pointer-elim-non-leaf");
RESET_OPTION(LessPreciseFPMADOption, "less-precise-fpmad");
RESET_OPTION(UnsafeFPMath, "unsafe-fp-math");
RESET_OPTION(NoInfsFPMath, "no-infs-fp-math");
@@ -165,6 +164,11 @@ CodeGenOpt::Level TargetMachine::getOptLevel() const {
return CodeGenInfo->getOptLevel();
}
+void TargetMachine::setOptLevel(CodeGenOpt::Level Level) const {
+ if (CodeGenInfo)
+ CodeGenInfo->setOptLevel(Level);
+}
+
bool TargetMachine::getAsmVerbosityDefault() {
return AsmVerbosityDefault;
}
diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp
index 01d12e8ba5c0..3d5f8277f00e 100644
--- a/lib/Target/TargetMachineC.cpp
+++ b/lib/Target/TargetMachineC.cpp
@@ -21,6 +21,7 @@
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Host.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
#include <cstdlib>
@@ -60,13 +61,44 @@ inline LLVMTargetRef wrap(const Target * P) {
}
LLVMTargetRef LLVMGetFirstTarget() {
- const Target* target = &*TargetRegistry::begin();
- return wrap(target);
+ if(TargetRegistry::begin() == TargetRegistry::end()) {
+ return NULL;
+ }
+
+ const Target* target = &*TargetRegistry::begin();
+ return wrap(target);
}
LLVMTargetRef LLVMGetNextTarget(LLVMTargetRef T) {
return wrap(unwrap(T)->getNext());
}
+LLVMTargetRef LLVMGetTargetFromName(const char *Name) {
+ StringRef NameRef = Name;
+ for (TargetRegistry::iterator IT = TargetRegistry::begin(),
+ IE = TargetRegistry::end(); IT != IE; ++IT) {
+ if (IT->getName() == NameRef)
+ return wrap(&*IT);
+ }
+
+ return NULL;
+}
+
+LLVMBool LLVMGetTargetFromTriple(const char* TripleStr, LLVMTargetRef *T,
+ char **ErrorMessage) {
+ std::string Error;
+
+ *T = wrap(TargetRegistry::lookupTarget(TripleStr, Error));
+
+ if (!*T) {
+ if (ErrorMessage)
+ *ErrorMessage = strdup(Error.c_str());
+
+ return 1;
+ }
+
+ return 0;
+}
+
const char * LLVMGetTargetName(LLVMTargetRef T) {
return unwrap(T)->getName();
}
@@ -87,9 +119,10 @@ LLVMBool LLVMTargetHasAsmBackend(LLVMTargetRef T) {
return unwrap(T)->hasMCAsmBackend();
}
-LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T, char* Triple,
- char* CPU, char* Features, LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc,
- LLVMCodeModel CodeModel) {
+LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T,
+ const char* Triple, const char* CPU, const char* Features,
+ LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc,
+ LLVMCodeModel CodeModel) {
Reloc::Model RM;
switch (Reloc){
case LLVMRelocStatic:
@@ -158,6 +191,11 @@ LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) {
return wrap(unwrap(T)->getDataLayout());
}
+void LLVMSetTargetMachineAsmVerbosity(LLVMTargetMachineRef T,
+ LLVMBool VerboseAsm) {
+ unwrap(T)->setAsmVerbosityDefault(VerboseAsm);
+}
+
static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
formatted_raw_ostream &OS, LLVMCodeGenFileType codegen, char **ErrorMessage) {
TargetMachine* TM = unwrap(T);
@@ -200,12 +238,12 @@ static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
char* Filename, LLVMCodeGenFileType codegen, char** ErrorMessage) {
std::string error;
- raw_fd_ostream dest(Filename, error, raw_fd_ostream::F_Binary);
- formatted_raw_ostream destf(dest);
+ raw_fd_ostream dest(Filename, error, sys::fs::F_Binary);
if (!error.empty()) {
*ErrorMessage = strdup(error.c_str());
return true;
}
+ formatted_raw_ostream destf(dest);
bool Result = LLVMTargetMachineEmit(T, M, destf, codegen, ErrorMessage);
dest.flush();
return Result;
@@ -225,3 +263,7 @@ LLVMBool LLVMTargetMachineEmitToMemoryBuffer(LLVMTargetMachineRef T,
Data.length(), "");
return Result;
}
+
+char *LLVMGetDefaultTargetTriple(void) {
+ return strdup(sys::getDefaultTargetTriple().c_str());
+}
diff --git a/lib/Target/TargetSubtargetInfo.cpp b/lib/Target/TargetSubtargetInfo.cpp
index af0cef62d552..10e8db5925de 100644
--- a/lib/Target/TargetSubtargetInfo.cpp
+++ b/lib/Target/TargetSubtargetInfo.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/ADT/SmallVector.h"
using namespace llvm;
@@ -22,6 +23,21 @@ TargetSubtargetInfo::TargetSubtargetInfo() {}
TargetSubtargetInfo::~TargetSubtargetInfo() {}
+// Temporary option to compare overall performance change when moving from the
+// SD scheduler to the MachineScheduler pass pipeline. It should be removed
+// before 3.4. The normal way to enable/disable the MachineScheduling pass
+// itself is by using -enable-misched. For targets that already use MI sched
+// (via MySubTarget::enableMachineScheduler()) -misched-bench=false negates the
+// subtarget hook.
+static cl::opt<bool> BenchMachineSched("misched-bench", cl::Hidden,
+ cl::desc("Migrate from the target's default SD scheduler to MI scheduler"));
+
+bool TargetSubtargetInfo::useMachineScheduler() const {
+ if (BenchMachineSched.getNumOccurrences())
+ return BenchMachineSched;
+ return enableMachineScheduler();
+}
+
bool TargetSubtargetInfo::enableMachineScheduler() const {
return false;
}
@@ -35,3 +51,6 @@ bool TargetSubtargetInfo::enablePostRAScheduler(
return false;
}
+bool TargetSubtargetInfo::useAA() const {
+ return false;
+}
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 68908abb5764..bc8f367e9255 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -9,6 +9,7 @@
#include "MCTargetDesc/X86BaseInfo.h"
#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
@@ -79,7 +80,7 @@ private:
PostfixStack.push_back(std::make_pair(Op, Val));
}
- void popOperator() { InfixOperatorStack.pop_back_val(); }
+ void popOperator() { InfixOperatorStack.pop_back(); }
void pushOperator(InfixCalculatorTok Op) {
// Push the new operator if the stack is empty.
if (InfixOperatorStack.empty()) {
@@ -117,12 +118,12 @@ private:
if (StackOp == IC_RPAREN) {
++ParenCount;
- InfixOperatorStack.pop_back_val();
+ InfixOperatorStack.pop_back();
} else if (StackOp == IC_LPAREN) {
--ParenCount;
- InfixOperatorStack.pop_back_val();
+ InfixOperatorStack.pop_back();
} else {
- InfixOperatorStack.pop_back_val();
+ InfixOperatorStack.pop_back();
PostfixStack.push_back(std::make_pair(StackOp, 0));
}
}
@@ -219,7 +220,9 @@ private:
const MCExpr *getSym() { return Sym; }
StringRef getSymName() { return SymName; }
int64_t getImm() { return Imm + IC.execute(); }
- bool isValidEndState() { return State == IES_RBRAC; }
+ bool isValidEndState() {
+ return State == IES_RBRAC || State == IES_INTEGER;
+ }
bool getStopOnLBrac() { return StopOnLBrac; }
bool getAddImmPrefix() { return AddImmPrefix; }
bool hadError() { return State == IES_ERROR; }
@@ -492,16 +495,17 @@ private:
X86Operand *ParseATTOperand();
X86Operand *ParseIntelOperand();
X86Operand *ParseIntelOffsetOfOperator();
- X86Operand *ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp);
+ bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp);
X86Operand *ParseIntelOperator(unsigned OpKind);
- X86Operand *ParseIntelMemOperand(unsigned SegReg, int64_t ImmDisp,
- SMLoc StartLoc);
- X86Operand *ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
+ X86Operand *ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start, unsigned Size);
+ X86Operand *ParseIntelMemOperand(int64_t ImmDisp, SMLoc StartLoc,
+ unsigned Size);
+ bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
X86Operand *ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
int64_t ImmDisp, unsigned Size);
- X86Operand *ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier,
- InlineAsmIdentifierInfo &Info,
- bool IsUnevaluatedOperand, SMLoc &End);
+ bool ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier,
+ InlineAsmIdentifierInfo &Info,
+ bool IsUnevaluatedOperand, SMLoc &End);
X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
@@ -552,8 +556,9 @@ private:
/// }
public:
- X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
- : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) {
+ X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
@@ -811,6 +816,9 @@ struct X86Operand : public MCParsedAsmOperand {
bool isMem256() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 256);
}
+ bool isMem512() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 512);
+ }
bool isMemVX32() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
@@ -828,14 +836,45 @@ struct X86Operand : public MCParsedAsmOperand {
return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
}
+ bool isMemVZ32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
+ getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ }
+ bool isMemVZ64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
+ getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ }
bool isAbsMem() const {
return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
!getMemIndexReg() && getMemScale() == 1;
}
+ bool isMemOffs8() const {
+ return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMemOffs16() const {
+ return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMemOffs32() const {
+ return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMemOffs64() const {
+ return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 64);
+ }
+
bool isReg() const { return Kind == Register; }
+ bool isGR32orGR64() const {
+ return Kind == Register &&
+ (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
+ }
+
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible.
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
@@ -849,43 +888,40 @@ struct X86Operand : public MCParsedAsmOperand {
Inst.addOperand(MCOperand::CreateReg(getReg()));
}
- void addImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
+ static unsigned getGR32FromGR64(unsigned RegNo) {
+ switch (RegNo) {
+ default: llvm_unreachable("Unexpected register");
+ case X86::RAX: return X86::EAX;
+ case X86::RCX: return X86::ECX;
+ case X86::RDX: return X86::EDX;
+ case X86::RBX: return X86::EBX;
+ case X86::RBP: return X86::EBP;
+ case X86::RSP: return X86::ESP;
+ case X86::RSI: return X86::ESI;
+ case X86::RDI: return X86::EDI;
+ case X86::R8: return X86::R8D;
+ case X86::R9: return X86::R9D;
+ case X86::R10: return X86::R10D;
+ case X86::R11: return X86::R11D;
+ case X86::R12: return X86::R12D;
+ case X86::R13: return X86::R13D;
+ case X86::R14: return X86::R14D;
+ case X86::R15: return X86::R15D;
+ case X86::RIP: return X86::EIP;
+ }
}
- void addMem8Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMem16Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMem32Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMem64Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMem80Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMem128Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMem256Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMemVX32Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMemVY32Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
- }
- void addMemVX64Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ unsigned RegNo = getReg();
+ if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
+ RegNo = getGR32FromGR64(RegNo);
+ Inst.addOperand(MCOperand::CreateReg(RegNo));
}
- void addMemVY64Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
}
void addMemOperands(MCInst &Inst, unsigned N) const {
@@ -906,6 +942,15 @@ struct X86Operand : public MCParsedAsmOperand {
Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
}
+ void addMemOffsOperands(MCInst &Inst, unsigned N) const {
+ assert((N == 1) && "Invalid number of operands!");
+ // Add as immediates when possible.
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
+ }
+
static X86Operand *CreateToken(StringRef Str, SMLoc Loc) {
SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
X86Operand *Res = new X86Operand(Token, Loc, EndLoc);
@@ -1194,6 +1239,7 @@ RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites,
}
}
assert (Found && "Unable to rewrite ImmDisp.");
+ (void)Found;
} else {
// We have a symbolic and an immediate displacement, but no displacement
// before the bracketed expression. Put the immediate displacement
@@ -1223,8 +1269,7 @@ RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites,
}
}
-X86Operand *
-X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
+bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
const AsmToken &Tok = Parser.getTok();
bool Done = false;
@@ -1246,7 +1291,7 @@ X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
Done = true;
break;
}
- return ErrorOperand(Tok.getLoc(), "Unexpected token!");
+ return Error(Tok.getLoc(), "unknown token in expression");
}
case AsmToken::EndOfStatement: {
Done = true;
@@ -1265,18 +1310,18 @@ X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
} else {
if (!isParsingInlineAsm()) {
if (getParser().parsePrimaryExpr(Val, End))
- return ErrorOperand(Tok.getLoc(), "Unexpected identifier!");
+ return Error(Tok.getLoc(), "Unexpected identifier!");
} else {
InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
- if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info,
- /*Unevaluated*/ false, End))
- return Err;
+ if (ParseIntelIdentifier(Val, Identifier, Info,
+ /*Unevaluated=*/false, End))
+ return true;
}
SM.onIdentifierExpr(Val, Identifier);
UpdateLocLex = false;
break;
}
- return ErrorOperand(Tok.getLoc(), "Unexpected identifier!");
+ return Error(Tok.getLoc(), "Unexpected identifier!");
}
case AsmToken::Integer:
if (isParsingInlineAsm() && SM.getAddImmPrefix())
@@ -1294,14 +1339,14 @@ X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
case AsmToken::RParen: SM.onRParen(); break;
}
if (SM.hadError())
- return ErrorOperand(Tok.getLoc(), "Unexpected token!");
+ return Error(Tok.getLoc(), "unknown token in expression");
if (!Done && UpdateLocLex) {
End = Tok.getLoc();
Parser.Lex(); // Consume the token.
}
}
- return 0;
+ return false;
}
X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
@@ -1318,8 +1363,8 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
// may have already parsed an immediate displacement before the bracketed
// expression.
IntelExprStateMachine SM(ImmDisp, /*StopOnLBrac=*/false, /*AddImmPrefix=*/true);
- if (X86Operand *Err = ParseIntelExpression(SM, End))
- return Err;
+ if (ParseIntelExpression(SM, End))
+ return 0;
const MCExpr *Disp;
if (const MCExpr *Sym = SM.getSym()) {
@@ -1337,8 +1382,8 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
// Parse the dot operator (e.g., [ebx].foo.bar).
if (Tok.getString().startswith(".")) {
const MCExpr *NewDisp;
- if (X86Operand *Err = ParseIntelDotOperator(Disp, NewDisp))
- return Err;
+ if (ParseIntelDotOperator(Disp, NewDisp))
+ return 0;
End = Tok.getEndLoc();
Parser.Lex(); // Eat the field.
@@ -1366,11 +1411,10 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
}
// Inline assembly may use variable names with namespace alias qualifiers.
-X86Operand *X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
- StringRef &Identifier,
- InlineAsmIdentifierInfo &Info,
- bool IsUnevaluatedOperand,
- SMLoc &End) {
+bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
+ StringRef &Identifier,
+ InlineAsmIdentifierInfo &Info,
+ bool IsUnevaluatedOperand, SMLoc &End) {
assert (isParsingInlineAsm() && "Expected to be parsing inline assembly.");
Val = 0;
@@ -1395,68 +1439,89 @@ X86Operand *X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
MCSymbol *Sym = getContext().GetOrCreateSymbol(Identifier);
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
Val = MCSymbolRefExpr::Create(Sym, Variant, getParser().getContext());
- return 0;
+ return false;
}
-/// ParseIntelMemOperand - Parse intel style memory operand.
-X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg,
- int64_t ImmDisp,
- SMLoc Start) {
- const AsmToken &Tok = Parser.getTok();
- SMLoc End;
-
- unsigned Size = getIntelMemOperandSize(Tok.getString());
- if (Size) {
- Parser.Lex(); // Eat operand size (e.g., byte, word).
- if (Tok.getString() != "PTR" && Tok.getString() != "ptr")
- return ErrorOperand(Start, "Expected 'PTR' or 'ptr' token!");
- Parser.Lex(); // Eat ptr.
- }
-
- // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ].
+/// \brief Parse intel style segment override.
+X86Operand *X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg,
+ SMLoc Start,
+ unsigned Size) {
+ assert(SegReg != 0 && "Tried to parse a segment override without a segment!");
+ const AsmToken &Tok = Parser.getTok(); // Eat colon.
+ if (Tok.isNot(AsmToken::Colon))
+ return ErrorOperand(Tok.getLoc(), "Expected ':' token!");
+ Parser.Lex(); // Eat ':'
+
+ int64_t ImmDisp = 0;
if (getLexer().is(AsmToken::Integer)) {
+ ImmDisp = Tok.getIntVal();
+ AsmToken ImmDispToken = Parser.Lex(); // Eat the integer.
+
if (isParsingInlineAsm())
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix,
- Tok.getLoc()));
- int64_t ImmDisp = Tok.getIntVal();
- Parser.Lex(); // Eat the integer.
- if (getLexer().isNot(AsmToken::LBrac))
- return ErrorOperand(Start, "Expected '[' token!");
- return ParseIntelBracExpression(SegReg, Start, ImmDisp, Size);
+ InstInfo->AsmRewrites->push_back(
+ AsmRewrite(AOK_ImmPrefix, ImmDispToken.getLoc()));
+
+ if (getLexer().isNot(AsmToken::LBrac)) {
+ // An immediate following a 'segment register', 'colon' token sequence can
+ // be followed by a bracketed expression. If it isn't we know we have our
+ // final segment override.
+ const MCExpr *Disp = MCConstantExpr::Create(ImmDisp, getContext());
+ return X86Operand::CreateMem(SegReg, Disp, /*BaseReg=*/0, /*IndexReg=*/0,
+ /*Scale=*/1, Start, ImmDispToken.getEndLoc(),
+ Size);
+ }
}
if (getLexer().is(AsmToken::LBrac))
return ParseIntelBracExpression(SegReg, Start, ImmDisp, Size);
- if (!ParseRegister(SegReg, Start, End)) {
- // Handel SegReg : [ ... ]
- if (getLexer().isNot(AsmToken::Colon))
- return ErrorOperand(Start, "Expected ':' token!");
- Parser.Lex(); // Eat :
- if (getLexer().isNot(AsmToken::LBrac))
- return ErrorOperand(Start, "Expected '[' token!");
- return ParseIntelBracExpression(SegReg, Start, ImmDisp, Size);
+ const MCExpr *Val;
+ SMLoc End;
+ if (!isParsingInlineAsm()) {
+ if (getParser().parsePrimaryExpr(Val, End))
+ return ErrorOperand(Tok.getLoc(), "unknown token in expression");
+
+ return X86Operand::CreateMem(Val, Start, End, Size);
}
+ InlineAsmIdentifierInfo Info;
+ StringRef Identifier = Tok.getString();
+ if (ParseIntelIdentifier(Val, Identifier, Info,
+ /*Unevaluated=*/false, End))
+ return 0;
+ return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0,
+ /*Scale=*/1, Start, End, Size, Identifier, Info);
+}
+
+/// ParseIntelMemOperand - Parse intel style memory operand.
+X86Operand *X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, SMLoc Start,
+ unsigned Size) {
+ const AsmToken &Tok = Parser.getTok();
+ SMLoc End;
+
+ // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ].
+ if (getLexer().is(AsmToken::LBrac))
+ return ParseIntelBracExpression(/*SegReg=*/0, Start, ImmDisp, Size);
+
const MCExpr *Val;
if (!isParsingInlineAsm()) {
if (getParser().parsePrimaryExpr(Val, End))
- return ErrorOperand(Tok.getLoc(), "Unexpected token!");
+ return ErrorOperand(Tok.getLoc(), "unknown token in expression");
return X86Operand::CreateMem(Val, Start, End, Size);
}
InlineAsmIdentifierInfo Info;
StringRef Identifier = Tok.getString();
- if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info,
- /*Unevaluated*/ false, End))
- return Err;
- return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0,
+ if (ParseIntelIdentifier(Val, Identifier, Info,
+ /*Unevaluated=*/false, End))
+ return 0;
+ return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0, /*IndexReg=*/0,
/*Scale=*/1, Start, End, Size, Identifier, Info);
}
/// Parse the '.' operator.
-X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
+bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
const MCExpr *&NewDisp) {
const AsmToken &Tok = Parser.getTok();
int64_t OrigDispVal, DotDispVal;
@@ -1465,7 +1530,7 @@ X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp))
OrigDispVal = OrigDisp->getValue();
else
- return ErrorOperand(Tok.getLoc(), "Non-constant offsets are not supported!");
+ return Error(Tok.getLoc(), "Non-constant offsets are not supported!");
// Drop the '.'.
StringRef DotDispStr = Tok.getString().drop_front(1);
@@ -1480,10 +1545,10 @@ X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second,
DotDisp))
- return ErrorOperand(Tok.getLoc(), "Unable to lookup field reference!");
+ return Error(Tok.getLoc(), "Unable to lookup field reference!");
DotDispVal = DotDisp;
} else
- return ErrorOperand(Tok.getLoc(), "Unexpected token type!");
+ return Error(Tok.getLoc(), "Unexpected token type!");
if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) {
SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data());
@@ -1494,7 +1559,7 @@ X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
}
NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext());
- return 0;
+ return false;
}
/// Parse the 'offset' operator. This operator is used to specify the
@@ -1508,9 +1573,9 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() {
InlineAsmIdentifierInfo Info;
SMLoc Start = Tok.getLoc(), End;
StringRef Identifier = Tok.getString();
- if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info,
- /*Unevaluated*/ false, End))
- return Err;
+ if (ParseIntelIdentifier(Val, Identifier, Info,
+ /*Unevaluated=*/false, End))
+ return 0;
// Don't emit the offset operator.
InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7));
@@ -1544,9 +1609,12 @@ X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) {
InlineAsmIdentifierInfo Info;
SMLoc Start = Tok.getLoc(), End;
StringRef Identifier = Tok.getString();
- if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info,
- /*Unevaluated*/ true, End))
- return Err;
+ if (ParseIntelIdentifier(Val, Identifier, Info,
+ /*Unevaluated=*/true, End))
+ return 0;
+
+ if (!Info.OpDecl)
+ return ErrorOperand(Start, "unable to lookup expression");
unsigned CVal = 0;
switch(OpKind) {
@@ -1567,7 +1635,7 @@ X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) {
X86Operand *X86AsmParser::ParseIntelOperand() {
const AsmToken &Tok = Parser.getTok();
- SMLoc Start = Tok.getLoc(), End;
+ SMLoc Start, End;
// Offset, length, type and size operators.
if (isParsingInlineAsm()) {
@@ -1582,14 +1650,23 @@ X86Operand *X86AsmParser::ParseIntelOperand() {
return ParseIntelOperator(IOK_TYPE);
}
+ unsigned Size = getIntelMemOperandSize(Tok.getString());
+ if (Size) {
+ Parser.Lex(); // Eat operand size (e.g., byte, word).
+ if (Tok.getString() != "PTR" && Tok.getString() != "ptr")
+ return ErrorOperand(Start, "Expected 'PTR' or 'ptr' token!");
+ Parser.Lex(); // Eat ptr.
+ }
+ Start = Tok.getLoc();
+
// Immediate.
if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Minus) ||
getLexer().is(AsmToken::LParen)) {
AsmToken StartTok = Tok;
IntelExprStateMachine SM(/*Imm=*/0, /*StopOnLBrac=*/true,
/*AddImmPrefix=*/false);
- if (X86Operand *Err = ParseIntelExpression(SM, End))
- return Err;
+ if (ParseIntelExpression(SM, End))
+ return 0;
int64_t Imm = SM.getImm();
if (isParsingInlineAsm()) {
@@ -1613,23 +1690,22 @@ X86Operand *X86AsmParser::ParseIntelOperand() {
"before bracketed expr.");
// Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ].
- return ParseIntelMemOperand(/*SegReg=*/0, Imm, Start);
+ return ParseIntelMemOperand(Imm, Start, Size);
}
// Register.
unsigned RegNo = 0;
if (!ParseRegister(RegNo, Start, End)) {
// If this is a segment register followed by a ':', then this is the start
- // of a memory reference, otherwise this is a normal register reference.
+ // of a segment override, otherwise this is a normal register reference.
if (getLexer().isNot(AsmToken::Colon))
return X86Operand::CreateReg(RegNo, Start, End);
- getParser().Lex(); // Eat the colon.
- return ParseIntelMemOperand(/*SegReg=*/RegNo, /*Disp=*/0, Start);
+ return ParseIntelSegmentOverride(/*SegReg=*/RegNo, Start, Size);
}
// Memory operand.
- return ParseIntelMemOperand(/*SegReg=*/0, /*Disp=*/0, Start);
+ return ParseIntelMemOperand(/*Disp=*/0, Start, Size);
}
X86Operand *X86AsmParser::ParseATTOperand() {
@@ -1941,6 +2017,47 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
}
}
+ if (STI.getFeatureBits() & X86::FeatureAVX512) {
+ // Parse mask register {%k1}
+ if (getLexer().is(AsmToken::LCurly)) {
+ SMLoc Loc = Parser.getTok().getLoc();
+ Operands.push_back(X86Operand::CreateToken("{", Loc));
+ Parser.Lex(); // Eat the {
+ if (X86Operand *Op = ParseOperand()) {
+ Operands.push_back(Op);
+ if (!getLexer().is(AsmToken::RCurly)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "Expected } at this point");
+ }
+ Loc = Parser.getTok().getLoc();
+ Operands.push_back(X86Operand::CreateToken("}", Loc));
+ Parser.Lex(); // Eat the }
+ } else {
+ Parser.eatToEndOfStatement();
+ return true;
+ }
+ }
+ // Parse "zeroing non-masked" semantic {z}
+ if (getLexer().is(AsmToken::LCurly)) {
+ SMLoc Loc = Parser.getTok().getLoc();
+ Operands.push_back(X86Operand::CreateToken("{z}", Loc));
+ Parser.Lex(); // Eat the {
+ if (!getLexer().is(AsmToken::Identifier) || getLexer().getTok().getIdentifier() != "z") {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "Expected z at this point");
+ }
+ Parser.Lex(); // Eat the z
+ if (!getLexer().is(AsmToken::RCurly)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "Expected } at this point");
+ }
+ Parser.Lex(); // Eat the }
+ }
+ }
+
if (getLexer().isNot(AsmToken::EndOfStatement)) {
SMLoc Loc = getLexer().getLoc();
Parser.eatToEndOfStatement();
@@ -2192,6 +2309,55 @@ processInstruction(MCInst &Inst,
case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8);
case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8);
case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8);
+ case X86::VMOVAPDrr:
+ case X86::VMOVAPDYrr:
+ case X86::VMOVAPSrr:
+ case X86::VMOVAPSYrr:
+ case X86::VMOVDQArr:
+ case X86::VMOVDQAYrr:
+ case X86::VMOVDQUrr:
+ case X86::VMOVDQUYrr:
+ case X86::VMOVUPDrr:
+ case X86::VMOVUPDYrr:
+ case X86::VMOVUPSrr:
+ case X86::VMOVUPSYrr: {
+ if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) ||
+ !X86II::isX86_64ExtendedReg(Inst.getOperand(1).getReg()))
+ return false;
+
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
+ case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
+ case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
+ case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
+ case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
+ case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
+ case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
+ case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
+ case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
+ case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
+ case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
+ case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
+ }
+ Inst.setOpcode(NewOpc);
+ return true;
+ }
+ case X86::VMOVSDrr:
+ case X86::VMOVSSrr: {
+ if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) ||
+ !X86II::isX86_64ExtendedReg(Inst.getOperand(2).getReg()))
+ return false;
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
+ case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
+ }
+ Inst.setOpcode(NewOpc);
+ return true;
+ }
}
}
@@ -2306,25 +2472,25 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
unsigned Match1, Match2, Match3, Match4;
Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match1 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
Tmp[Base.size()] = Suffixes[1];
Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match2 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
Tmp[Base.size()] = Suffixes[2];
Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match3 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
Tmp[Base.size()] = Suffixes[3];
Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match4 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index 7cb71f066cca..7e20151a19fc 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -53,7 +53,7 @@ endif()
add_llvm_target(X86CodeGen ${sources})
-add_dependencies(LLVMX86CodeGen intrinsics_gen)
+add_dependencies(LLVMX86CodeGen X86CommonTableGen intrinsics_gen)
add_subdirectory(AsmParser)
add_subdirectory(Disassembler)
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp
index ca6f80ce3e58..903e36cfe6ce 100644
--- a/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -190,94 +190,8 @@ static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
uint64_t Address, uint64_t Offset,
uint64_t Width, MCInst &MI,
const MCDisassembler *Dis) {
- LLVMOpInfoCallback getOpInfo = Dis->getLLVMOpInfoCallback();
- struct LLVMOpInfo1 SymbolicOp;
- memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
- SymbolicOp.Value = Value;
- void *DisInfo = Dis->getDisInfoBlock();
-
- if (!getOpInfo ||
- !getOpInfo(DisInfo, Address, Offset, Width, 1, &SymbolicOp)) {
- // Clear SymbolicOp.Value from above and also all other fields.
- memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
- LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
- if (!SymbolLookUp)
- return false;
- uint64_t ReferenceType;
- if (isBranch)
- ReferenceType = LLVMDisassembler_ReferenceType_In_Branch;
- else
- ReferenceType = LLVMDisassembler_ReferenceType_InOut_None;
- const char *ReferenceName;
- const char *Name = SymbolLookUp(DisInfo, Value, &ReferenceType, Address,
- &ReferenceName);
- if (Name) {
- SymbolicOp.AddSymbol.Name = Name;
- SymbolicOp.AddSymbol.Present = true;
- }
- // For branches always create an MCExpr so it gets printed as hex address.
- else if (isBranch) {
- SymbolicOp.Value = Value;
- }
- if(ReferenceType == LLVMDisassembler_ReferenceType_Out_SymbolStub)
- (*Dis->CommentStream) << "symbol stub for: " << ReferenceName;
- if (!Name && !isBranch)
- return false;
- }
-
- MCContext *Ctx = Dis->getMCContext();
- const MCExpr *Add = NULL;
- if (SymbolicOp.AddSymbol.Present) {
- if (SymbolicOp.AddSymbol.Name) {
- StringRef Name(SymbolicOp.AddSymbol.Name);
- MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
- Add = MCSymbolRefExpr::Create(Sym, *Ctx);
- } else {
- Add = MCConstantExpr::Create((int)SymbolicOp.AddSymbol.Value, *Ctx);
- }
- }
-
- const MCExpr *Sub = NULL;
- if (SymbolicOp.SubtractSymbol.Present) {
- if (SymbolicOp.SubtractSymbol.Name) {
- StringRef Name(SymbolicOp.SubtractSymbol.Name);
- MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
- Sub = MCSymbolRefExpr::Create(Sym, *Ctx);
- } else {
- Sub = MCConstantExpr::Create((int)SymbolicOp.SubtractSymbol.Value, *Ctx);
- }
- }
-
- const MCExpr *Off = NULL;
- if (SymbolicOp.Value != 0)
- Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx);
-
- const MCExpr *Expr;
- if (Sub) {
- const MCExpr *LHS;
- if (Add)
- LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx);
- else
- LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx);
- if (Off != 0)
- Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx);
- else
- Expr = LHS;
- } else if (Add) {
- if (Off != 0)
- Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx);
- else
- Expr = Add;
- } else {
- if (Off != 0)
- Expr = Off;
- else
- Expr = MCConstantExpr::Create(0, *Ctx);
- }
-
- MI.addOperand(MCOperand::CreateExpr(Expr));
-
- return true;
+ return Dis->tryAddingSymbolicOperand(MI, Value, Address, isBranch,
+ Offset, Width);
}
/// tryAddingPcLoadReferenceComment - trys to add a comment as to what is being
@@ -290,15 +204,7 @@ static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
static void tryAddingPcLoadReferenceComment(uint64_t Address, uint64_t Value,
const void *Decoder) {
const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
- LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
- if (SymbolLookUp) {
- void *DisInfo = Dis->getDisInfoBlock();
- uint64_t ReferenceType = LLVMDisassembler_ReferenceType_In_PCrel_Load;
- const char *ReferenceName;
- (void)SymbolLookUp(DisInfo, Value, &ReferenceType, Address, &ReferenceName);
- if(ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr)
- (*Dis->CommentStream) << "literal pool for: " << ReferenceName;
- }
+ Dis->tryAddingPcLoadReferenceComment(Value, Address);
}
/// translateImmediate - Appends an immediate operand to an MCInst.
@@ -325,16 +231,18 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
default:
break;
case 1:
- type = TYPE_MOFFS8;
+ if(immediate & 0x80)
+ immediate |= ~(0xffull);
break;
case 2:
- type = TYPE_MOFFS16;
+ if(immediate & 0x8000)
+ immediate |= ~(0xffffull);
break;
case 4:
- type = TYPE_MOFFS32;
+ if(immediate & 0x80000000)
+ immediate |= ~(0xffffffffull);
break;
case 8:
- type = TYPE_MOFFS64;
break;
}
}
@@ -357,16 +265,18 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
Opcode != X86::VMPSADBWrri && Opcode != X86::VDPPSYrri &&
Opcode != X86::VDPPSYrmi && Opcode != X86::VDPPDrri &&
Opcode != X86::VINSERTPSrr)
- type = TYPE_MOFFS8;
+ if(immediate & 0x80)
+ immediate |= ~(0xffull);
break;
case ENCODING_IW:
- type = TYPE_MOFFS16;
+ if(immediate & 0x8000)
+ immediate |= ~(0xffffull);
break;
case ENCODING_ID:
- type = TYPE_MOFFS32;
+ if(immediate & 0x80000000)
+ immediate |= ~(0xffffffffull);
break;
case ENCODING_IO:
- type = TYPE_MOFFS64;
break;
}
}
@@ -380,33 +290,27 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
case TYPE_XMM256:
mcInst.addOperand(MCOperand::CreateReg(X86::YMM0 + (immediate >> 4)));
return;
+ case TYPE_XMM512:
+ mcInst.addOperand(MCOperand::CreateReg(X86::ZMM0 + (immediate >> 4)));
+ return;
case TYPE_REL8:
isBranch = true;
pcrel = insn.startLocation + insn.immediateOffset + insn.immediateSize;
- // fall through to sign extend the immediate if needed.
- case TYPE_MOFFS8:
if(immediate & 0x80)
immediate |= ~(0xffull);
break;
- case TYPE_MOFFS16:
- if(immediate & 0x8000)
- immediate |= ~(0xffffull);
- break;
case TYPE_REL32:
case TYPE_REL64:
isBranch = true;
pcrel = insn.startLocation + insn.immediateOffset + insn.immediateSize;
- // fall through to sign extend the immediate if needed.
- case TYPE_MOFFS32:
if(immediate & 0x80000000)
immediate |= ~(0xffffffffull);
break;
- case TYPE_MOFFS64:
default:
// operand is 64 bits wide. Do nothing.
break;
}
-
+
if(!tryAddingSymbolicOperand(immediate + pcrel, isBranch, insn.startLocation,
insn.immediateOffset, insn.immediateSize,
mcInst, Dis))
@@ -537,6 +441,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
EA_BASES_64BIT
REGS_XMM
REGS_YMM
+ REGS_ZMM
#undef ENTRY
}
} else {
@@ -659,6 +564,7 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
case TYPE_XMM64:
case TYPE_XMM128:
case TYPE_XMM256:
+ case TYPE_XMM512:
case TYPE_DEBUGREG:
case TYPE_CONTROLREG:
return translateRMRegister(mcInst, insn);
@@ -777,6 +683,15 @@ static bool translateInstruction(MCInst &mcInst,
}
mcInst.setOpcode(insn.instructionID);
+ // If when reading the prefix bytes we determined the overlapping 0xf2 or 0xf3
+ // prefix bytes should be disassembled as xrelease and xacquire then set the
+ // opcode to those instead of the rep and repne opcodes.
+ if (insn.xAcquireRelease) {
+ if(mcInst.getOpcode() == X86::REP_PREFIX)
+ mcInst.setOpcode(X86::XRELEASE_PREFIX);
+ else if(mcInst.getOpcode() == X86::REPNE_PREFIX)
+ mcInst.setOpcode(X86::XACQUIRE_PREFIX);
+ }
int index;
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
index e40edba6d689..c81a85755f82 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
@@ -25,8 +25,6 @@
#define TRUE 1
#define FALSE 0
-typedef int8_t bool;
-
#ifndef NDEBUG
#define debug(s) do { x86DisassemblerDebug(__FILE__, __LINE__, s); } while (0)
#else
@@ -81,6 +79,15 @@ static int modRMRequired(OpcodeType type,
case THREEBYTE_A7:
decision = &THREEBYTEA7_SYM;
break;
+ case XOP8_MAP:
+ decision = &XOP8_MAP_SYM;
+ break;
+ case XOP9_MAP:
+ decision = &XOP9_MAP_SYM;
+ break;
+ case XOPA_MAP:
+ decision = &XOPA_MAP_SYM;
+ break;
}
return decision->opcodeDecisions[insnContext].modRMDecisions[opcode].
@@ -122,6 +129,15 @@ static InstrUID decode(OpcodeType type,
case THREEBYTE_A7:
dec = &THREEBYTEA7_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
break;
+ case XOP8_MAP:
+ dec = &XOP8_MAP_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
+ break;
+ case XOP9_MAP:
+ dec = &XOP9_MAP_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
+ break;
+ case XOPA_MAP:
+ dec = &XOPA_MAP_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
+ break;
}
switch (dec->modrm_type) {
@@ -305,6 +321,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
BOOL prefixGroups[4] = { FALSE };
uint64_t prefixLocation;
uint8_t byte = 0;
+ uint8_t nextByte;
BOOL hasAdSize = FALSE;
BOOL hasOpSize = FALSE;
@@ -314,20 +331,42 @@ static int readPrefixes(struct InternalInstruction* insn) {
while (isPrefix) {
prefixLocation = insn->readerCursor;
+ /* If we fail reading prefixes, just stop here and let the opcode reader deal with it */
if (consumeByte(insn, &byte))
- return -1;
+ break;
/*
* If the byte is a LOCK/REP/REPNE prefix and not a part of the opcode, then
* break and let it be disassembled as a normal "instruction".
*/
+ if (insn->readerCursor - 1 == insn->startLocation && byte == 0xf0)
+ break;
+
if (insn->readerCursor - 1 == insn->startLocation
- && (byte == 0xf0 || byte == 0xf2 || byte == 0xf3)) {
- uint8_t nextByte;
- if (byte == 0xf0)
- break;
- if (lookAtByte(insn, &nextByte))
- return -1;
+ && (byte == 0xf2 || byte == 0xf3)
+ && !lookAtByte(insn, &nextByte))
+ {
+ /*
+ * If the byte is 0xf2 or 0xf3, and any of the following conditions are
+ * met:
+ * - it is followed by a LOCK (0xf0) prefix
+ * - it is followed by an xchg instruction
+ * then it should be disassembled as a xacquire/xrelease not repne/rep.
+ */
+ if ((byte == 0xf2 || byte == 0xf3) &&
+ ((nextByte == 0xf0) |
+ ((nextByte & 0xfe) == 0x86 || (nextByte & 0xf8) == 0x90)))
+ insn->xAcquireRelease = TRUE;
+ /*
+ * Also if the byte is 0xf3, and the following condition is met:
+ * - it is followed by a "mov mem, reg" (opcode 0x88/0x89) or
+ * "mov mem, imm" (opcode 0xc6/0xc7) instructions.
+ * then it should be disassembled as an xrelease not rep.
+ */
+ if (byte == 0xf3 &&
+ (nextByte == 0x88 || nextByte == 0x89 ||
+ nextByte == 0xc6 || nextByte == 0xc7))
+ insn->xAcquireRelease = TRUE;
if (insn->mode == MODE_64BIT && (nextByte & 0xf0) == 0x40) {
if (consumeByte(insn, &nextByte))
return -1;
@@ -405,7 +444,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
dbgprintf(insn, "Found prefix 0x%hhx", byte);
}
- insn->vexSize = 0;
+ insn->vexXopType = TYPE_NO_VEX_XOP;
if (byte == 0xc4) {
uint8_t byte1;
@@ -416,7 +455,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
}
if (insn->mode == MODE_64BIT || (byte1 & 0xc0) == 0xc0) {
- insn->vexSize = 3;
+ insn->vexXopType = TYPE_VEX_3B;
insn->necessaryPrefixLocation = insn->readerCursor - 1;
}
else {
@@ -424,22 +463,22 @@ static int readPrefixes(struct InternalInstruction* insn) {
insn->necessaryPrefixLocation = insn->readerCursor - 1;
}
- if (insn->vexSize == 3) {
- insn->vexPrefix[0] = byte;
- consumeByte(insn, &insn->vexPrefix[1]);
- consumeByte(insn, &insn->vexPrefix[2]);
+ if (insn->vexXopType == TYPE_VEX_3B) {
+ insn->vexXopPrefix[0] = byte;
+ consumeByte(insn, &insn->vexXopPrefix[1]);
+ consumeByte(insn, &insn->vexXopPrefix[2]);
/* We simulate the REX prefix for simplicity's sake */
if (insn->mode == MODE_64BIT) {
insn->rexPrefix = 0x40
- | (wFromVEX3of3(insn->vexPrefix[2]) << 3)
- | (rFromVEX2of3(insn->vexPrefix[1]) << 2)
- | (xFromVEX2of3(insn->vexPrefix[1]) << 1)
- | (bFromVEX2of3(insn->vexPrefix[1]) << 0);
+ | (wFromVEX3of3(insn->vexXopPrefix[2]) << 3)
+ | (rFromVEX2of3(insn->vexXopPrefix[1]) << 2)
+ | (xFromVEX2of3(insn->vexXopPrefix[1]) << 1)
+ | (bFromVEX2of3(insn->vexXopPrefix[1]) << 0);
}
- switch (ppFromVEX3of3(insn->vexPrefix[2]))
+ switch (ppFromVEX3of3(insn->vexXopPrefix[2]))
{
default:
break;
@@ -448,7 +487,9 @@ static int readPrefixes(struct InternalInstruction* insn) {
break;
}
- dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx 0x%hhx", insn->vexPrefix[0], insn->vexPrefix[1], insn->vexPrefix[2]);
+ dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx 0x%hhx",
+ insn->vexXopPrefix[0], insn->vexXopPrefix[1],
+ insn->vexXopPrefix[2]);
}
}
else if (byte == 0xc5) {
@@ -460,22 +501,22 @@ static int readPrefixes(struct InternalInstruction* insn) {
}
if (insn->mode == MODE_64BIT || (byte1 & 0xc0) == 0xc0) {
- insn->vexSize = 2;
+ insn->vexXopType = TYPE_VEX_2B;
}
else {
unconsumeByte(insn);
}
- if (insn->vexSize == 2) {
- insn->vexPrefix[0] = byte;
- consumeByte(insn, &insn->vexPrefix[1]);
+ if (insn->vexXopType == TYPE_VEX_2B) {
+ insn->vexXopPrefix[0] = byte;
+ consumeByte(insn, &insn->vexXopPrefix[1]);
if (insn->mode == MODE_64BIT) {
insn->rexPrefix = 0x40
- | (rFromVEX2of2(insn->vexPrefix[1]) << 2);
+ | (rFromVEX2of2(insn->vexXopPrefix[1]) << 2);
}
- switch (ppFromVEX2of2(insn->vexPrefix[1]))
+ switch (ppFromVEX2of2(insn->vexXopPrefix[1]))
{
default:
break;
@@ -484,7 +525,53 @@ static int readPrefixes(struct InternalInstruction* insn) {
break;
}
- dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx", insn->vexPrefix[0], insn->vexPrefix[1]);
+ dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx", insn->vexXopPrefix[0], insn->vexXopPrefix[1]);
+ }
+ }
+ else if (byte == 0x8f) {
+ uint8_t byte1;
+
+ if (lookAtByte(insn, &byte1)) {
+ dbgprintf(insn, "Couldn't read second byte of XOP");
+ return -1;
+ }
+
+ if ((byte1 & 0x38) != 0x0) { /* 0 in these 3 bits is a POP instruction. */
+ insn->vexXopType = TYPE_XOP;
+ insn->necessaryPrefixLocation = insn->readerCursor - 1;
+ }
+ else {
+ unconsumeByte(insn);
+ insn->necessaryPrefixLocation = insn->readerCursor - 1;
+ }
+
+ if (insn->vexXopType == TYPE_XOP) {
+ insn->vexXopPrefix[0] = byte;
+ consumeByte(insn, &insn->vexXopPrefix[1]);
+ consumeByte(insn, &insn->vexXopPrefix[2]);
+
+ /* We simulate the REX prefix for simplicity's sake */
+
+ if (insn->mode == MODE_64BIT) {
+ insn->rexPrefix = 0x40
+ | (wFromXOP3of3(insn->vexXopPrefix[2]) << 3)
+ | (rFromXOP2of3(insn->vexXopPrefix[1]) << 2)
+ | (xFromXOP2of3(insn->vexXopPrefix[1]) << 1)
+ | (bFromXOP2of3(insn->vexXopPrefix[1]) << 0);
+ }
+
+ switch (ppFromXOP3of3(insn->vexXopPrefix[2]))
+ {
+ default:
+ break;
+ case VEX_PREFIX_66:
+ hasOpSize = TRUE;
+ break;
+ }
+
+ dbgprintf(insn, "Found XOP prefix 0x%hhx 0x%hhx 0x%hhx",
+ insn->vexXopPrefix[0], insn->vexXopPrefix[1],
+ insn->vexXopPrefix[2]);
}
}
else {
@@ -559,37 +646,49 @@ static int readOpcode(struct InternalInstruction* insn) {
insn->opcodeType = ONEBYTE;
- if (insn->vexSize == 3)
+ if (insn->vexXopType == TYPE_VEX_3B)
{
- switch (mmmmmFromVEX2of3(insn->vexPrefix[1]))
+ switch (mmmmmFromVEX2of3(insn->vexXopPrefix[1]))
{
default:
- dbgprintf(insn, "Unhandled m-mmmm field for instruction (0x%hhx)", mmmmmFromVEX2of3(insn->vexPrefix[1]));
+ dbgprintf(insn, "Unhandled m-mmmm field for instruction (0x%hhx)",
+ mmmmmFromVEX2of3(insn->vexXopPrefix[1]));
return -1;
- case 0:
- break;
case VEX_LOB_0F:
- insn->twoByteEscape = 0x0f;
insn->opcodeType = TWOBYTE;
return consumeByte(insn, &insn->opcode);
case VEX_LOB_0F38:
- insn->twoByteEscape = 0x0f;
- insn->threeByteEscape = 0x38;
insn->opcodeType = THREEBYTE_38;
return consumeByte(insn, &insn->opcode);
case VEX_LOB_0F3A:
- insn->twoByteEscape = 0x0f;
- insn->threeByteEscape = 0x3a;
insn->opcodeType = THREEBYTE_3A;
return consumeByte(insn, &insn->opcode);
}
}
- else if (insn->vexSize == 2)
+ else if (insn->vexXopType == TYPE_VEX_2B)
{
- insn->twoByteEscape = 0x0f;
insn->opcodeType = TWOBYTE;
return consumeByte(insn, &insn->opcode);
}
+ else if (insn->vexXopType == TYPE_XOP)
+ {
+ switch (mmmmmFromXOP2of3(insn->vexXopPrefix[1]))
+ {
+ default:
+ dbgprintf(insn, "Unhandled m-mmmm field for instruction (0x%hhx)",
+ mmmmmFromVEX2of3(insn->vexXopPrefix[1]));
+ return -1;
+ case XOP_MAP_SELECT_8:
+ insn->opcodeType = XOP8_MAP;
+ return consumeByte(insn, &insn->opcode);
+ case XOP_MAP_SELECT_9:
+ insn->opcodeType = XOP9_MAP;
+ return consumeByte(insn, &insn->opcode);
+ case XOP_MAP_SELECT_A:
+ insn->opcodeType = XOPA_MAP;
+ return consumeByte(insn, &insn->opcode);
+ }
+ }
if (consumeByte(insn, &current))
return -1;
@@ -597,16 +696,12 @@ static int readOpcode(struct InternalInstruction* insn) {
if (current == 0x0f) {
dbgprintf(insn, "Found a two-byte escape prefix (0x%hhx)", current);
- insn->twoByteEscape = current;
-
if (consumeByte(insn, &current))
return -1;
if (current == 0x38) {
dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
- insn->threeByteEscape = current;
-
if (consumeByte(insn, &current))
return -1;
@@ -614,8 +709,6 @@ static int readOpcode(struct InternalInstruction* insn) {
} else if (current == 0x3a) {
dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
- insn->threeByteEscape = current;
-
if (consumeByte(insn, &current))
return -1;
@@ -623,8 +716,6 @@ static int readOpcode(struct InternalInstruction* insn) {
} else if (current == 0xa6) {
dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
- insn->threeByteEscape = current;
-
if (consumeByte(insn, &current))
return -1;
@@ -632,8 +723,6 @@ static int readOpcode(struct InternalInstruction* insn) {
} else if (current == 0xa7) {
dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
- insn->threeByteEscape = current;
-
if (consumeByte(insn, &current))
return -1;
@@ -747,11 +836,27 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
if (insn->mode == MODE_64BIT)
attrMask |= ATTR_64BIT;
- if (insn->vexSize) {
+ if (insn->vexXopType != TYPE_NO_VEX_XOP) {
attrMask |= ATTR_VEX;
- if (insn->vexSize == 3) {
- switch (ppFromVEX3of3(insn->vexPrefix[2])) {
+ if (insn->vexXopType == TYPE_VEX_3B) {
+ switch (ppFromVEX3of3(insn->vexXopPrefix[2])) {
+ case VEX_PREFIX_66:
+ attrMask |= ATTR_OPSIZE;
+ break;
+ case VEX_PREFIX_F3:
+ attrMask |= ATTR_XS;
+ break;
+ case VEX_PREFIX_F2:
+ attrMask |= ATTR_XD;
+ break;
+ }
+
+ if (lFromVEX3of3(insn->vexXopPrefix[2]))
+ attrMask |= ATTR_VEXL;
+ }
+ else if (insn->vexXopType == TYPE_VEX_2B) {
+ switch (ppFromVEX2of2(insn->vexXopPrefix[1])) {
case VEX_PREFIX_66:
attrMask |= ATTR_OPSIZE;
break;
@@ -763,11 +868,11 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
break;
}
- if (lFromVEX3of3(insn->vexPrefix[2]))
+ if (lFromVEX2of2(insn->vexXopPrefix[1]))
attrMask |= ATTR_VEXL;
}
- else if (insn->vexSize == 2) {
- switch (ppFromVEX2of2(insn->vexPrefix[1])) {
+ else if (insn->vexXopType == TYPE_XOP) {
+ switch (ppFromXOP3of3(insn->vexXopPrefix[2])) {
case VEX_PREFIX_66:
attrMask |= ATTR_OPSIZE;
break;
@@ -779,7 +884,7 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
break;
}
- if (lFromVEX2of2(insn->vexPrefix[1]))
+ if (lFromXOP3of3(insn->vexXopPrefix[2]))
attrMask |= ATTR_VEXL;
}
else {
@@ -805,42 +910,6 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
/* The following clauses compensate for limitations of the tables. */
- if ((attrMask & ATTR_VEXL) && (attrMask & ATTR_REXW) &&
- !(attrMask & ATTR_OPSIZE)) {
- /*
- * Some VEX instructions ignore the L-bit, but use the W-bit. Normally L-bit
- * has precedence since there are no L-bit with W-bit entries in the tables.
- * So if the L-bit isn't significant we should use the W-bit instead.
- * We only need to do this if the instruction doesn't specify OpSize since
- * there is a VEX_L_W_OPSIZE table.
- */
-
- const struct InstructionSpecifier *spec;
- uint16_t instructionIDWithWBit;
- const struct InstructionSpecifier *specWithWBit;
-
- spec = specifierForUID(instructionID);
-
- if (getIDWithAttrMask(&instructionIDWithWBit,
- insn,
- (attrMask & (~ATTR_VEXL)) | ATTR_REXW)) {
- insn->instructionID = instructionID;
- insn->spec = spec;
- return 0;
- }
-
- specWithWBit = specifierForUID(instructionIDWithWBit);
-
- if (instructionID != instructionIDWithWBit) {
- insn->instructionID = instructionIDWithWBit;
- insn->spec = specWithWBit;
- } else {
- insn->instructionID = instructionID;
- insn->spec = spec;
- }
- return 0;
- }
-
if (insn->prefixPresent[0x66] && !(attrMask & ATTR_OPSIZE)) {
/*
* The instruction tables make no distinction between instructions that
@@ -1234,6 +1303,8 @@ static int readModRM(struct InternalInstruction* insn) {
return prefix##_EAX + index; \
case TYPE_R64: \
return prefix##_RAX + index; \
+ case TYPE_XMM512: \
+ return prefix##_ZMM0 + index; \
case TYPE_XMM256: \
return prefix##_YMM0 + index; \
case TYPE_XMM128: \
@@ -1479,10 +1550,12 @@ static int readImmediate(struct InternalInstruction* insn, uint8_t size) {
static int readVVVV(struct InternalInstruction* insn) {
dbgprintf(insn, "readVVVV()");
- if (insn->vexSize == 3)
- insn->vvvv = vvvvFromVEX3of3(insn->vexPrefix[2]);
- else if (insn->vexSize == 2)
- insn->vvvv = vvvvFromVEX2of2(insn->vexPrefix[1]);
+ if (insn->vexXopType == TYPE_VEX_3B)
+ insn->vvvv = vvvvFromVEX3of3(insn->vexXopPrefix[2]);
+ else if (insn->vexXopType == TYPE_VEX_2B)
+ insn->vvvv = vvvvFromVEX2of2(insn->vexXopPrefix[1]);
+ else if (insn->vexXopType == TYPE_XOP)
+ insn->vvvv = vvvvFromXOP3of3(insn->vexXopPrefix[2]);
else
return -1;
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index 407ead3cafa9..6d03d5ca5f36 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -59,6 +59,15 @@ extern "C" {
#define lFromVEX2of2(vex) (((vex) & 0x4) >> 2)
#define ppFromVEX2of2(vex) ((vex) & 0x3)
+#define rFromXOP2of3(xop) (((~(xop)) & 0x80) >> 7)
+#define xFromXOP2of3(xop) (((~(xop)) & 0x40) >> 6)
+#define bFromXOP2of3(xop) (((~(xop)) & 0x20) >> 5)
+#define mmmmmFromXOP2of3(xop) ((xop) & 0x1f)
+#define wFromXOP3of3(xop) (((xop) & 0x80) >> 7)
+#define vvvvFromXOP3of3(vex) (((~(vex)) & 0x78) >> 3)
+#define lFromXOP3of3(xop) (((xop) & 0x4) >> 2)
+#define ppFromXOP3of3(xop) ((xop) & 0x3)
+
/*
* These enums represent Intel registers for use by the decoder.
*/
@@ -219,7 +228,23 @@ extern "C" {
ENTRY(XMM12) \
ENTRY(XMM13) \
ENTRY(XMM14) \
- ENTRY(XMM15)
+ ENTRY(XMM15) \
+ ENTRY(XMM16) \
+ ENTRY(XMM17) \
+ ENTRY(XMM18) \
+ ENTRY(XMM19) \
+ ENTRY(XMM20) \
+ ENTRY(XMM21) \
+ ENTRY(XMM22) \
+ ENTRY(XMM23) \
+ ENTRY(XMM24) \
+ ENTRY(XMM25) \
+ ENTRY(XMM26) \
+ ENTRY(XMM27) \
+ ENTRY(XMM28) \
+ ENTRY(XMM29) \
+ ENTRY(XMM30) \
+ ENTRY(XMM31)
#define REGS_YMM \
ENTRY(YMM0) \
@@ -237,7 +262,57 @@ extern "C" {
ENTRY(YMM12) \
ENTRY(YMM13) \
ENTRY(YMM14) \
- ENTRY(YMM15)
+ ENTRY(YMM15) \
+ ENTRY(YMM16) \
+ ENTRY(YMM17) \
+ ENTRY(YMM18) \
+ ENTRY(YMM19) \
+ ENTRY(YMM20) \
+ ENTRY(YMM21) \
+ ENTRY(YMM22) \
+ ENTRY(YMM23) \
+ ENTRY(YMM24) \
+ ENTRY(YMM25) \
+ ENTRY(YMM26) \
+ ENTRY(YMM27) \
+ ENTRY(YMM28) \
+ ENTRY(YMM29) \
+ ENTRY(YMM30) \
+ ENTRY(YMM31)
+
+#define REGS_ZMM \
+ ENTRY(ZMM0) \
+ ENTRY(ZMM1) \
+ ENTRY(ZMM2) \
+ ENTRY(ZMM3) \
+ ENTRY(ZMM4) \
+ ENTRY(ZMM5) \
+ ENTRY(ZMM6) \
+ ENTRY(ZMM7) \
+ ENTRY(ZMM8) \
+ ENTRY(ZMM9) \
+ ENTRY(ZMM10) \
+ ENTRY(ZMM11) \
+ ENTRY(ZMM12) \
+ ENTRY(ZMM13) \
+ ENTRY(ZMM14) \
+ ENTRY(ZMM15) \
+ ENTRY(ZMM16) \
+ ENTRY(ZMM17) \
+ ENTRY(ZMM18) \
+ ENTRY(ZMM19) \
+ ENTRY(ZMM20) \
+ ENTRY(ZMM21) \
+ ENTRY(ZMM22) \
+ ENTRY(ZMM23) \
+ ENTRY(ZMM24) \
+ ENTRY(ZMM25) \
+ ENTRY(ZMM26) \
+ ENTRY(ZMM27) \
+ ENTRY(ZMM28) \
+ ENTRY(ZMM29) \
+ ENTRY(ZMM30) \
+ ENTRY(ZMM31)
#define REGS_SEGMENT \
ENTRY(ES) \
@@ -285,6 +360,7 @@ extern "C" {
REGS_MMX \
REGS_XMM \
REGS_YMM \
+ REGS_ZMM \
REGS_SEGMENT \
REGS_DEBUG \
REGS_CONTROL \
@@ -319,6 +395,7 @@ typedef enum {
ALL_EA_BASES
REGS_XMM
REGS_YMM
+ REGS_ZMM
#undef ENTRY
SIB_INDEX_max
} SIBIndex;
@@ -379,6 +456,12 @@ typedef enum {
VEX_LOB_0F3A = 0x3
} VEXLeadingOpcodeByte;
+typedef enum {
+ XOP_MAP_SELECT_8 = 0x8,
+ XOP_MAP_SELECT_9 = 0x9,
+ XOP_MAP_SELECT_A = 0xA
+} XOPMapSelect;
+
/*
* VEXPrefixCode - Possible values for the VEX.pp field
*/
@@ -390,6 +473,13 @@ typedef enum {
VEX_PREFIX_F2 = 0x3
} VEXPrefixCode;
+typedef enum {
+ TYPE_NO_VEX_XOP = 0x0,
+ TYPE_VEX_2B = 0x1,
+ TYPE_VEX_3B = 0x2,
+ TYPE_XOP = 0x3
+} VEXXOPType;
+
typedef uint8_t BOOL;
/*
@@ -446,10 +536,10 @@ struct InternalInstruction {
uint8_t prefixPresent[0x100];
/* contains the location (for use with the reader) of the prefix byte */
uint64_t prefixLocations[0x100];
- /* The value of the VEX prefix, if present */
- uint8_t vexPrefix[3];
+ /* The value of the VEX/XOP prefix, if present */
+ uint8_t vexXopPrefix[3];
/* The length of the VEX prefix (0 if not present) */
- uint8_t vexSize;
+ VEXXOPType vexXopType;
/* The value of the REX prefix, if present */
uint8_t rexPrefix;
/* The location where a mandatory prefix would have to be (i.e., right before
@@ -457,6 +547,8 @@ struct InternalInstruction {
uint64_t necessaryPrefixLocation;
/* The segment override type */
SegmentOverride segmentOverride;
+ /* 1 if the prefix byte, 0xf2 or 0xf3 is xacquire or xrelease */
+ BOOL xAcquireRelease;
/* Sizes of various critical pieces of data, in bytes */
uint8_t registerSize;
@@ -471,10 +563,6 @@ struct InternalInstruction {
/* opcode state */
- /* The value of the two-byte escape prefix (usually 0x0f) */
- uint8_t twoByteEscape;
- /* The value of the three-byte escape prefix (usually 0x38 or 0x3a) */
- uint8_t threeByteEscape;
/* The last byte of the opcode, not counting any ModR/M extension */
uint8_t opcode;
/* The ModR/M byte of the instruction, if it is an opcode extension */
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index 23dfe4b5b5f4..dd1719c64d76 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -32,6 +32,9 @@
#define THREEBYTE3A_SYM x86DisassemblerThreeByte3AOpcodes
#define THREEBYTEA6_SYM x86DisassemblerThreeByteA6Opcodes
#define THREEBYTEA7_SYM x86DisassemblerThreeByteA7Opcodes
+#define XOP8_MAP_SYM x86DisassemblerXOP8Opcodes
+#define XOP9_MAP_SYM x86DisassemblerXOP9Opcodes
+#define XOPA_MAP_SYM x86DisassemblerXOPAOpcodes
#define INSTRUCTIONS_STR "x86DisassemblerInstrSpecifiers"
#define CONTEXTS_STR "x86DisassemblerContexts"
@@ -41,6 +44,9 @@
#define THREEBYTE3A_STR "x86DisassemblerThreeByte3AOpcodes"
#define THREEBYTEA6_STR "x86DisassemblerThreeByteA6Opcodes"
#define THREEBYTEA7_STR "x86DisassemblerThreeByteA7Opcodes"
+#define XOP8_MAP_STR "x86DisassemblerXOP8Opcodes"
+#define XOP9_MAP_STR "x86DisassemblerXOP9Opcodes"
+#define XOPA_MAP_STR "x86DisassemblerXOPAOpcodes"
/*
* Attributes of an instruction that must be known before the opcode can be
@@ -116,8 +122,154 @@ enum attributeBits {
ENUM_ENTRY(IC_VEX_L_XS, 4, "requires VEX and the L and XS prefix")\
ENUM_ENTRY(IC_VEX_L_XD, 4, "requires VEX and the L and XD prefix")\
ENUM_ENTRY(IC_VEX_L_OPSIZE, 4, "requires VEX, L, and OpSize") \
- ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 5, "requires VEX, L, W and OpSize")
-
+ ENUM_ENTRY(IC_VEX_L_W, 4, "requires VEX, L and W") \
+ ENUM_ENTRY(IC_VEX_L_W_XS, 5, "requires VEX, L, W and XS prefix") \
+ ENUM_ENTRY(IC_VEX_L_W_XD, 5, "requires VEX, L, W and XD prefix") \
+ ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 5, "requires VEX, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX, 1, "requires an EVEX prefix") \
+ ENUM_ENTRY(IC_EVEX_XS, 2, "requires EVEX and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD, 2, "requires EVEX and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE, 2, "requires EVEX and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W, 3, "requires EVEX and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS, 4, "requires EVEX, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD, 4, "requires EVEX, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE, 4, "requires EVEX, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L, 3, "requires EVEX and the L prefix") \
+ ENUM_ENTRY(IC_EVEX_L_XS, 4, "requires EVEX and the L and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L_XD, 4, "requires EVEX and the L and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L_OPSIZE, 4, "requires EVEX, L, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_W, 3, "requires EVEX, L and W") \
+ ENUM_ENTRY(IC_EVEX_L_W_XS, 4, "requires EVEX, L, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_XD, 4, "requires EVEX, L, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_OPSIZE, 4, "requires EVEX, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2, 3, "requires EVEX and the L2 prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_XS, 4, "requires EVEX and the L2 and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_XD, 4, "requires EVEX and the L2 and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_OPSIZE, 4, "requires EVEX, L2, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_W, 3, "requires EVEX, L2 and W") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XS, 4, "requires EVEX, L2, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XD, 4, "requires EVEX, L2, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE, 4, "requires EVEX, L2, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_K, 1, "requires an EVEX_K prefix") \
+ ENUM_ENTRY(IC_EVEX_XS_K, 2, "requires EVEX_K and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD_K, 2, "requires EVEX_K and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE_K, 2, "requires EVEX_K and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W_K, 3, "requires EVEX_K and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS_K, 4, "requires EVEX_K, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD_K, 4, "requires EVEX_K, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE_K, 4, "requires EVEX_K, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_K, 3, "requires EVEX_K and the L prefix") \
+ ENUM_ENTRY(IC_EVEX_L_XS_K, 4, "requires EVEX_K and the L and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L_XD_K, 4, "requires EVEX_K and the L and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L_OPSIZE_K, 4, "requires EVEX_K, L, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_W_K, 3, "requires EVEX_K, L and W") \
+ ENUM_ENTRY(IC_EVEX_L_W_XS_K, 4, "requires EVEX_K, L, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_XD_K, 4, "requires EVEX_K, L, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K, 4, "requires EVEX_K, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_K, 3, "requires EVEX_K and the L2 prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_XS_K, 4, "requires EVEX_K and the L2 and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_XD_K, 4, "requires EVEX_K and the L2 and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K, 4, "requires EVEX_K, L2, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_W_K, 3, "requires EVEX_K, L2 and W") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XS_K, 4, "requires EVEX_K, L2, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XD_K, 4, "requires EVEX_K, L2, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K, 4, "requires EVEX_K, L2, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_B, 1, "requires an EVEX_B prefix") \
+ ENUM_ENTRY(IC_EVEX_XS_B, 2, "requires EVEX_B and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD_B, 2, "requires EVEX_B and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE_B, 2, "requires EVEX_B and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W_B, 3, "requires EVEX_B and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS_B, 4, "requires EVEX_B, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD_B, 4, "requires EVEX_B, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE_B, 4, "requires EVEX_B, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_B, 3, "requires EVEX_B and the L prefix") \
+ ENUM_ENTRY(IC_EVEX_L_XS_B, 4, "requires EVEX_B and the L and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L_XD_B, 4, "requires EVEX_B and the L and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L_OPSIZE_B, 4, "requires EVEX_B, L, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_W_B, 3, "requires EVEX_B, L and W") \
+ ENUM_ENTRY(IC_EVEX_L_W_XS_B, 4, "requires EVEX_B, L, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_XD_B, 4, "requires EVEX_B, L, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_B, 4, "requires EVEX_B, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_B, 3, "requires EVEX_B and the L2 prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_XS_B, 4, "requires EVEX_B and the L2 and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_XD_B, 4, "requires EVEX_B and the L2 and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_OPSIZE_B, 4, "requires EVEX_B, L2, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_W_B, 3, "requires EVEX_B, L2 and W") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XS_B, 4, "requires EVEX_B, L2, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XD_B, 4, "requires EVEX_B, L2, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4, "requires EVEX_B, L2, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_K_B, 1, "requires EVEX_B and EVEX_K prefix") \
+ ENUM_ENTRY(IC_EVEX_XS_K_B, 2, "requires EVEX_B, EVEX_K and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD_K_B, 2, "requires EVEX_B, EVEX_K and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE_K_B, 2, "requires EVEX_B, EVEX_K and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W_K_B, 3, "requires EVEX_B, EVEX_K and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_K_B, 3, "requires EVEX_B, EVEX_K and the L prefix") \
+ ENUM_ENTRY(IC_EVEX_L_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_W_K_B, 3, "requires EVEX_B, EVEX_K, L and W") \
+ ENUM_ENTRY(IC_EVEX_L_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_K_B, 3, "requires EVEX_B, EVEX_K and the L2 prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_W_K_B, 3, "requires EVEX_B, EVEX_K, L2 and W") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_KZ_B, 1, "requires EVEX_B and EVEX_KZ prefix") \
+ ENUM_ENTRY(IC_EVEX_XS_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the L prefix") \
+ ENUM_ENTRY(IC_EVEX_L_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ and the L and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ and the L and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ, L and W") \
+ ENUM_ENTRY(IC_EVEX_L_W_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the L2 prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ and the L2 and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ and the L2 and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L2, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ, L2 and W") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L2, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L2, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ_B, 4, "requires EVEX_B, EVEX_KZ, L2, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_KZ, 1, "requires an EVEX_KZ prefix") \
+ ENUM_ENTRY(IC_EVEX_XS_KZ, 2, "requires EVEX_KZ and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD_KZ, 2, "requires EVEX_KZ and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE_KZ, 2, "requires EVEX_KZ and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W_KZ, 3, "requires EVEX_KZ and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS_KZ, 4, "requires EVEX_KZ, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD_KZ, 4, "requires EVEX_KZ, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ, 4, "requires EVEX_KZ, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_KZ, 3, "requires EVEX_KZ and the L prefix") \
+ ENUM_ENTRY(IC_EVEX_L_XS_KZ, 4, "requires EVEX_KZ and the L and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L_XD_KZ, 4, "requires EVEX_KZ and the L and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ, 4, "requires EVEX_KZ, L, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_W_KZ, 3, "requires EVEX_KZ, L and W") \
+ ENUM_ENTRY(IC_EVEX_L_W_XS_KZ, 4, "requires EVEX_KZ, L, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_XD_KZ, 4, "requires EVEX_KZ, L, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ, 4, "requires EVEX_KZ, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_KZ, 3, "requires EVEX_KZ and the L2 prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_XS_KZ, 4, "requires EVEX_KZ and the L2 and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_XD_KZ, 4, "requires EVEX_KZ and the L2 and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ, 4, "requires EVEX_KZ, L2, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_W_KZ, 3, "requires EVEX_KZ, L2 and W") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ, 4, "requires EVEX_KZ, L2, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ, 4, "requires EVEX_KZ, L2, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ, 4, "requires EVEX_KZ, L2, W and OpSize")
#define ENUM_ENTRY(n, r, d) n,
typedef enum {
@@ -136,7 +288,10 @@ typedef enum {
THREEBYTE_38 = 2,
THREEBYTE_3A = 3,
THREEBYTE_A6 = 4,
- THREEBYTE_A7 = 5
+ THREEBYTE_A7 = 5,
+ XOP8_MAP = 6,
+ XOP9_MAP = 7,
+ XOPA_MAP = 8
} OpcodeType;
/*
@@ -224,6 +379,7 @@ struct ContextDecision {
ENUM_ENTRY(ENCODING_REG, "Register operand in ModR/M byte.") \
ENUM_ENTRY(ENCODING_RM, "R/M operand in ModR/M byte.") \
ENUM_ENTRY(ENCODING_VVVV, "Register operand in VEX.vvvv byte.") \
+ ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.") \
ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \
ENUM_ENTRY(ENCODING_CW, "2-byte") \
ENUM_ENTRY(ENCODING_CD, "4-byte") \
@@ -321,6 +477,9 @@ struct ContextDecision {
ENUM_ENTRY(TYPE_XMM64, "8-byte") \
ENUM_ENTRY(TYPE_XMM128, "16-byte") \
ENUM_ENTRY(TYPE_XMM256, "32-byte") \
+ ENUM_ENTRY(TYPE_XMM512, "64-byte") \
+ ENUM_ENTRY(TYPE_VK8, "8-bit") \
+ ENUM_ENTRY(TYPE_VK16, "16-bit") \
ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \
ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \
ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index e357710b20eb..44393115cc46 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -50,7 +50,7 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
// Try to print any aliases first.
if (!printAliasInstr(MI, OS))
printInstruction(MI, OS);
-
+
// Next always print the annotation.
printAnnotation(OS, Annot);
@@ -139,8 +139,7 @@ void X86ATTInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
int64_t Address;
if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
- O << "0x";
- O.write_hex(Address);
+ O << formatHex((uint64_t)Address);
}
else {
// Otherwise, just print the expression.
@@ -159,10 +158,10 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
O << markup("<imm:")
<< '$' << formatImm((int64_t)Op.getImm())
<< markup(">");
-
+
if (CommentStream && (Op.getImm() > 255 || Op.getImm() < -256))
*CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Op.getImm());
-
+
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << markup("<imm:")
@@ -177,7 +176,7 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
const MCOperand &IndexReg = MI->getOperand(Op+2);
const MCOperand &DispSpec = MI->getOperand(Op+3);
const MCOperand &SegReg = MI->getOperand(Op+4);
-
+
O << markup("<mem:");
// If this has a segment register, print it.
@@ -185,7 +184,7 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
printOperand(MI, Op+4, O);
O << ':';
}
-
+
if (DispSpec.isImm()) {
int64_t DispVal = DispSpec.getImm();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
@@ -194,21 +193,21 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
O << *DispSpec.getExpr();
}
-
+
if (IndexReg.getReg() || BaseReg.getReg()) {
O << '(';
if (BaseReg.getReg())
printOperand(MI, Op, O);
-
+
if (IndexReg.getReg()) {
O << ',';
printOperand(MI, Op+2, O);
unsigned ScaleVal = MI->getOperand(Op+1).getImm();
if (ScaleVal != 1) {
O << ','
- << markup("<imm:")
+ << markup("<imm:")
<< ScaleVal // never printed in hex.
- << markup(">");
+ << markup(">");
}
}
O << ')';
@@ -216,3 +215,19 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
O << markup(">");
}
+
+void X86ATTInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ const MCOperand &DispSpec = MI->getOperand(Op);
+
+ O << markup("<mem:");
+
+ if (DispSpec.isImm()) {
+ O << formatImm(DispSpec.getImm());
+ } else {
+ assert(DispSpec.isExpr() && "non-immediate displacement?");
+ O << *DispSpec.getExpr();
+ }
+
+ O << markup(">");
+}
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index 8e09183dccc9..a8fab72bc0d6 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
@@ -42,7 +42,8 @@ public:
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
-
+ void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
+
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
@@ -65,6 +66,9 @@ public:
void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
+ void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemReference(MI, OpNo, O);
+ }
void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
@@ -80,6 +84,22 @@ public:
void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
+ void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemReference(MI, OpNo, O);
+ }
+
+ void printMemOffs8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemOffset(MI, OpNo, O);
+ }
+ void printMemOffs16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemOffset(MI, OpNo, O);
+ }
+ void printMemOffs32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemOffset(MI, OpNo, O);
+ }
+ void printMemOffs64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemOffset(MI, OpNo, O);
+ }
};
}
diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
index 141f4a4dd856..e7e7b151c3bd 100644
--- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
@@ -119,7 +119,7 @@ void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isImm())
- O << Op.getImm();
+ O << formatImm(Op.getImm());
else {
assert(Op.isExpr() && "unknown pcrel immediate operand");
// If a symbolic branch target was added as a constant expression then print
@@ -127,8 +127,7 @@ void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
int64_t Address;
if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
- O << "0x";
- O.write_hex(Address);
+ O << formatHex((uint64_t)Address);
}
else {
// Otherwise, just print the expression.
@@ -137,18 +136,13 @@ void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
}
}
-static void PrintRegName(raw_ostream &O, StringRef RegName) {
- for (unsigned i = 0, e = RegName.size(); i != e; ++i)
- O << (char)toupper(RegName[i]);
-}
-
void X86IntelInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
- PrintRegName(O, getRegisterName(Op.getReg()));
+ printRegName(O, Op.getReg());
} else if (Op.isImm()) {
- O << Op.getImm();
+ O << formatImm((int64_t)Op.getImm());
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << *Op.getExpr();
@@ -200,9 +194,25 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
DispVal = -DispVal;
}
}
- O << DispVal;
+ O << formatImm(DispVal);
}
}
O << ']';
}
+
+void X86IntelInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ const MCOperand &DispSpec = MI->getOperand(Op);
+
+ O << '[';
+
+ if (DispSpec.isImm()) {
+ O << formatImm(DispSpec.getImm());
+ } else {
+ assert(DispSpec.isExpr() && "non-immediate displacement?");
+ O << *DispSpec.getExpr();
+ }
+
+ O << ']';
+}
diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index bb769eb52e4f..590bf6812417 100644
--- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
@@ -39,56 +39,82 @@ public:
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O);
void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &O);
void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-
+ void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "OPAQUE PTR ";
+ O << "opaque ptr ";
printMemReference(MI, OpNo, O);
}
void printi8mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "BYTE PTR ";
+ O << "byte ptr ";
printMemReference(MI, OpNo, O);
}
void printi16mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "WORD PTR ";
+ O << "word ptr ";
printMemReference(MI, OpNo, O);
}
void printi32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "DWORD PTR ";
+ O << "dword ptr ";
printMemReference(MI, OpNo, O);
}
void printi64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "QWORD PTR ";
+ O << "qword ptr ";
printMemReference(MI, OpNo, O);
}
void printi128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "XMMWORD PTR ";
+ O << "xmmword ptr ";
printMemReference(MI, OpNo, O);
}
void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "YMMWORD PTR ";
+ O << "ymmword ptr ";
+ printMemReference(MI, OpNo, O);
+ }
+ void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "zmmword ptr ";
printMemReference(MI, OpNo, O);
}
void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "DWORD PTR ";
+ O << "dword ptr ";
printMemReference(MI, OpNo, O);
}
void printf64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "QWORD PTR ";
+ O << "qword ptr ";
printMemReference(MI, OpNo, O);
}
void printf80mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "XWORD PTR ";
+ O << "xword ptr ";
printMemReference(MI, OpNo, O);
}
void printf128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "XMMWORD PTR ";
+ O << "xmmword ptr ";
printMemReference(MI, OpNo, O);
}
void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "YMMWORD PTR ";
+ O << "ymmword ptr ";
+ printMemReference(MI, OpNo, O);
+ }
+ void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "zmmword ptr ";
printMemReference(MI, OpNo, O);
}
+
+ void printMemOffs8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "byte ptr ";
+ printMemOffset(MI, OpNo, O);
+ }
+ void printMemOffs16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "word ptr ";
+ printMemOffset(MI, OpNo, O);
+ }
+ void printMemOffs32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "dword ptr ";
+ printMemOffset(MI, OpNo, O);
+ }
+ void printMemOffs64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "qword ptr ";
+ printMemOffset(MI, OpNo, O);
+ }
};
}
diff --git a/lib/Target/X86/MCTargetDesc/CMakeLists.txt b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
index 1c240e52a37d..2eb5f25ffd44 100644
--- a/lib/Target/X86/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
@@ -6,6 +6,8 @@ add_llvm_library(LLVMX86Desc
X86MachObjectWriter.cpp
X86ELFObjectWriter.cpp
X86WinCOFFObjectWriter.cpp
+ X86MachORelocationInfo.cpp
+ X86ELFRelocationInfo.cpp
)
add_dependencies(LLVMX86Desc X86CommonTableGen)
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 598ddee56d21..f8e359b160f3 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -9,6 +9,7 @@
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86FixupKinds.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCELFObjectWriter.h"
@@ -19,10 +20,10 @@
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachO.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -67,9 +68,16 @@ public:
class X86AsmBackend : public MCAsmBackend {
StringRef CPU;
+ bool HasNopl;
public:
X86AsmBackend(const Target &T, StringRef _CPU)
- : MCAsmBackend(), CPU(_CPU) {}
+ : MCAsmBackend(), CPU(_CPU) {
+ HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
+ CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
+ CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
+ CPU != "geode" && CPU != "winchip-c6" && CPU != "winchip2" &&
+ CPU != "c3" && CPU != "c3-2";
+ }
unsigned getNumFixupKinds() const {
return X86::NumTargetFixupKinds;
@@ -308,8 +316,8 @@ bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
// This CPU doesnt support long nops. If needed add more.
// FIXME: Can we get this from the subtarget somehow?
- if (CPU == "generic" || CPU == "i386" || CPU == "i486" || CPU == "i586" ||
- CPU == "pentium" || CPU == "pentium-mmx" || CPU == "geode") {
+ // FIXME: We could generated something better than plain 0x90.
+ if (!HasNopl) {
for (uint64_t i = 0; i < Count; ++i)
OW->Write8(0x90);
return true;
@@ -334,6 +342,7 @@ bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
/* *** */
namespace {
+
class ELFX86AsmBackend : public X86AsmBackend {
public:
uint8_t OSABI;
@@ -382,35 +391,368 @@ public:
}
};
+namespace CU {
+
+ /// Compact unwind encoding values.
+ enum CompactUnwindEncodings {
+ /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
+ /// the return address, then [RE]SP is moved to [RE]BP.
+ UNWIND_MODE_BP_FRAME = 0x01000000,
+
+ /// A frameless function with a small constant stack size.
+ UNWIND_MODE_STACK_IMMD = 0x02000000,
+
+ /// A frameless function with a large constant stack size.
+ UNWIND_MODE_STACK_IND = 0x03000000,
+
+ /// No compact unwind encoding is available.
+ UNWIND_MODE_DWARF = 0x04000000,
+
+ /// Mask for encoding the frame registers.
+ UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
+
+ /// Mask for encoding the frameless registers.
+ UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
+ };
+
+} // end CU namespace
+
class DarwinX86AsmBackend : public X86AsmBackend {
+ const MCRegisterInfo &MRI;
+
+ /// \brief Number of registers that can be saved in a compact unwind encoding.
+ enum { CU_NUM_SAVED_REGS = 6 };
+
+ mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
+ bool Is64Bit;
+
+ unsigned OffsetSize; ///< Offset of a "push" instruction.
+ unsigned PushInstrSize; ///< Size of a "push" instruction.
+ unsigned MoveInstrSize; ///< Size of a "move" instruction.
+ unsigned StackDivide; ///< Amount to adjust stack stize by.
+protected:
+ /// \brief Implementation of algorithm to generate the compact unwind encoding
+ /// for the CFI instructions.
+ uint32_t
+ generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const {
+ if (Instrs.empty()) return 0;
+
+ // Reset the saved registers.
+ unsigned SavedRegIdx = 0;
+ memset(SavedRegs, 0, sizeof(SavedRegs));
+
+ bool HasFP = false;
+
+ // Encode that we are using EBP/RBP as the frame pointer.
+ uint32_t CompactUnwindEncoding = 0;
+
+ unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
+ unsigned InstrOffset = 0;
+ unsigned StackAdjust = 0;
+ unsigned StackSize = 0;
+ unsigned PrevStackSize = 0;
+ unsigned NumDefCFAOffsets = 0;
+
+ for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
+ const MCCFIInstruction &Inst = Instrs[i];
+
+ switch (Inst.getOperation()) {
+ default:
+ // Any other CFI directives indicate a frame that we aren't prepared
+ // to represent via compact unwind, so just bail out.
+ return 0;
+ case MCCFIInstruction::OpDefCfaRegister: {
+ // Defines a frame pointer. E.g.
+ //
+ // movq %rsp, %rbp
+ // L0:
+ // .cfi_def_cfa_register %rbp
+ //
+ HasFP = true;
+ assert(MRI.getLLVMRegNum(Inst.getRegister(), true) ==
+ (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!");
+
+ // Reset the counts.
+ memset(SavedRegs, 0, sizeof(SavedRegs));
+ StackAdjust = 0;
+ SavedRegIdx = 0;
+ InstrOffset += MoveInstrSize;
+ break;
+ }
+ case MCCFIInstruction::OpDefCfaOffset: {
+ // Defines a new offset for the CFA. E.g.
+ //
+ // With frame:
+ //
+ // pushq %rbp
+ // L0:
+ // .cfi_def_cfa_offset 16
+ //
+ // Without frame:
+ //
+ // subq $72, %rsp
+ // L0:
+ // .cfi_def_cfa_offset 80
+ //
+ PrevStackSize = StackSize;
+ StackSize = std::abs(Inst.getOffset()) / StackDivide;
+ ++NumDefCFAOffsets;
+ break;
+ }
+ case MCCFIInstruction::OpOffset: {
+ // Defines a "push" of a callee-saved register. E.g.
+ //
+ // pushq %r15
+ // pushq %r14
+ // pushq %rbx
+ // L0:
+ // subq $120, %rsp
+ // L1:
+ // .cfi_offset %rbx, -40
+ // .cfi_offset %r14, -32
+ // .cfi_offset %r15, -24
+ //
+ if (SavedRegIdx == CU_NUM_SAVED_REGS)
+ // If there are too many saved registers, we cannot use a compact
+ // unwind encoding.
+ return CU::UNWIND_MODE_DWARF;
+
+ unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
+ SavedRegs[SavedRegIdx++] = Reg;
+ StackAdjust += OffsetSize;
+ InstrOffset += PushInstrSize;
+ break;
+ }
+ }
+ }
+
+ StackAdjust /= StackDivide;
+
+ if (HasFP) {
+ if ((StackAdjust & 0xFF) != StackAdjust)
+ // Offset was too big for a compact unwind encoding.
+ return CU::UNWIND_MODE_DWARF;
+
+ // Get the encoding of the saved registers when we have a frame pointer.
+ uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
+ if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
+
+ CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
+ CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
+ CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
+ } else {
+ // If the amount of the stack allocation is the size of a register, then
+ // we "push" the RAX/EAX register onto the stack instead of adjusting the
+ // stack pointer with a SUB instruction. We don't support the push of the
+ // RAX/EAX register with compact unwind. So we check for that situation
+ // here.
+ if ((NumDefCFAOffsets == SavedRegIdx + 1 &&
+ StackSize - PrevStackSize == 1) ||
+ (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2))
+ return CU::UNWIND_MODE_DWARF;
+
+ SubtractInstrIdx += InstrOffset;
+ ++StackAdjust;
+
+ if ((StackSize & 0xFF) == StackSize) {
+ // Frameless stack with a small stack size.
+ CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
+
+ // Encode the stack size.
+ CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
+ } else {
+ if ((StackAdjust & 0x7) != StackAdjust)
+ // The extra stack adjustments are too big for us to handle.
+ return CU::UNWIND_MODE_DWARF;
+
+ // Frameless stack with an offset too large for us to encode compactly.
+ CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
+
+ // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
+ // instruction.
+ CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
+
+ // Encode any extra stack stack adjustments (done via push
+ // instructions).
+ CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
+ }
+
+ // Encode the number of registers saved. (Reverse the list first.)
+ std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
+ CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
+
+ // Get the encoding of the saved registers when we don't have a frame
+ // pointer.
+ uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
+ if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
+
+ // Encode the register encoding.
+ CompactUnwindEncoding |=
+ RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
+ }
+
+ return CompactUnwindEncoding;
+ }
+
+private:
+ /// \brief Get the compact unwind number for a given register. The number
+ /// corresponds to the enum lists in compact_unwind_encoding.h.
+ int getCompactUnwindRegNum(unsigned Reg) const {
+ static const uint16_t CU32BitRegs[7] = {
+ X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
+ };
+ static const uint16_t CU64BitRegs[] = {
+ X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
+ };
+ const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
+ for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
+ if (*CURegs == Reg)
+ return Idx;
+
+ return -1;
+ }
+
+ /// \brief Return the registers encoded for a compact encoding with a frame
+ /// pointer.
+ uint32_t encodeCompactUnwindRegistersWithFrame() const {
+ // Encode the registers in the order they were saved --- 3-bits per
+ // register. The list of saved registers is assumed to be in reverse
+ // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
+ uint32_t RegEnc = 0;
+ for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
+ unsigned Reg = SavedRegs[i];
+ if (Reg == 0) break;
+
+ int CURegNum = getCompactUnwindRegNum(Reg);
+ if (CURegNum == -1) return ~0U;
+
+ // Encode the 3-bit register number in order, skipping over 3-bits for
+ // each register.
+ RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
+ }
+
+ assert((RegEnc & 0x3FFFF) == RegEnc &&
+ "Invalid compact register encoding!");
+ return RegEnc;
+ }
+
+ /// \brief Create the permutation encoding used with frameless stacks. It is
+ /// passed the number of registers to be saved and an array of the registers
+ /// saved.
+ uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
+ // The saved registers are numbered from 1 to 6. In order to encode the
+ // order in which they were saved, we re-number them according to their
+ // place in the register order. The re-numbering is relative to the last
+ // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
+ // that order:
+ //
+ // Orig Re-Num
+ // ---- ------
+ // 6 6
+ // 2 2
+ // 4 3
+ // 5 3
+ //
+ for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
+ int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
+ if (CUReg == -1) return ~0U;
+ SavedRegs[i] = CUReg;
+ }
+
+ // Reverse the list.
+ std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
+
+ uint32_t RenumRegs[CU_NUM_SAVED_REGS];
+ for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
+ unsigned Countless = 0;
+ for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
+ if (SavedRegs[j] < SavedRegs[i])
+ ++Countless;
+
+ RenumRegs[i] = SavedRegs[i] - Countless - 1;
+ }
+
+ // Take the renumbered values and encode them into a 10-bit number.
+ uint32_t permutationEncoding = 0;
+ switch (RegCount) {
+ case 6:
+ permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
+ + 6 * RenumRegs[2] + 2 * RenumRegs[3]
+ + RenumRegs[4];
+ break;
+ case 5:
+ permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
+ + 6 * RenumRegs[3] + 2 * RenumRegs[4]
+ + RenumRegs[5];
+ break;
+ case 4:
+ permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
+ + 3 * RenumRegs[4] + RenumRegs[5];
+ break;
+ case 3:
+ permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
+ + RenumRegs[5];
+ break;
+ case 2:
+ permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
+ break;
+ case 1:
+ permutationEncoding |= RenumRegs[5];
+ break;
+ }
+
+ assert((permutationEncoding & 0x3FF) == permutationEncoding &&
+ "Invalid compact register encoding!");
+ return permutationEncoding;
+ }
+
public:
- DarwinX86AsmBackend(const Target &T, StringRef CPU)
- : X86AsmBackend(T, CPU) { }
+ DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU,
+ bool Is64Bit)
+ : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) {
+ memset(SavedRegs, 0, sizeof(SavedRegs));
+ OffsetSize = Is64Bit ? 8 : 4;
+ MoveInstrSize = Is64Bit ? 3 : 2;
+ StackDivide = Is64Bit ? 8 : 4;
+ PushInstrSize = 1;
+ }
};
class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
+ bool SupportsCU;
public:
- DarwinX86_32AsmBackend(const Target &T, StringRef CPU)
- : DarwinX86AsmBackend(T, CPU) {}
+ DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef CPU, bool SupportsCU)
+ : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
- object::mach::CTM_i386,
- object::mach::CSX86_ALL);
+ MachO::CPU_TYPE_I386,
+ MachO::CPU_SUBTYPE_I386_ALL);
+ }
+
+ /// \brief Generate the compact unwind encoding for the CFI instructions.
+ virtual uint32_t
+ generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
+ return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
}
};
class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
+ bool SupportsCU;
+ const MachO::CPUSubTypeX86 Subtype;
public:
- DarwinX86_64AsmBackend(const Target &T, StringRef CPU)
- : DarwinX86AsmBackend(T, CPU) {
+ DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef CPU, bool SupportsCU,
+ MachO::CPUSubTypeX86 st)
+ : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU),
+ Subtype(st) {
HasReliableSymbolDifference = true;
}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
- object::mach::CTM_x86_64,
- object::mach::CSX86_ALL);
+ MachO::CPU_TYPE_X86_64, Subtype);
}
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -445,15 +787,26 @@ public:
return false;
}
}
+
+ /// \brief Generate the compact unwind encoding for the CFI instructions.
+ virtual uint32_t
+ generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
+ return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
+ }
};
} // end anonymous namespace
-MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT, StringRef CPU) {
+MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
+ StringRef CPU) {
Triple TheTriple(TT);
if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
- return new DarwinX86_32AsmBackend(T, CPU);
+ return new DarwinX86_32AsmBackend(T, MRI, CPU,
+ TheTriple.isMacOSX() &&
+ !TheTriple.isMacOSXVersionLT(10, 7));
if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
return new WindowsX86AsmBackend(T, false, CPU);
@@ -462,11 +815,21 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT, String
return new ELFX86_32AsmBackend(T, OSABI, CPU);
}
-MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT, StringRef CPU) {
+MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
+ StringRef CPU) {
Triple TheTriple(TT);
- if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
- return new DarwinX86_64AsmBackend(T, CPU);
+ if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) {
+ MachO::CPUSubTypeX86 CS =
+ StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName())
+ .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H)
+ .Default(MachO::CPU_SUBTYPE_X86_64_ALL);
+ return new DarwinX86_64AsmBackend(T, MRI, CPU,
+ TheTriple.isMacOSX() &&
+ !TheTriple.isMacOSXVersionLT(10, 7), CS);
+ }
if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
return new WindowsX86AsmBackend(T, true, CPU);
diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index d8f727887f23..1ef98141f82b 100644
--- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -354,6 +354,9 @@ namespace X86II {
// XOP9 - Prefix to exclude use of imm byte.
XOP9 = 21 << Op0Shift,
+ // XOPA - Prefix to encode 0xA in VEX.MMMM of XOP instructions.
+ XOPA = 22 << Op0Shift,
+
//===------------------------------------------------------------------===//
// REX_W - REX prefixes are instruction prefixes used in 64-bit mode.
// They are used to specify GPRs and SSE registers, 64-bit operand size,
@@ -462,20 +465,54 @@ namespace X86II {
// prefix. Usually used for scalar instructions. Needed by disassembler.
VEX_LIG = 1U << 6,
+ // TODO: we should combine VEX_L and VEX_LIG together to form a 2-bit field
+ // with following encoding:
+ // - 00 V128
+ // - 01 V256
+ // - 10 V512
+ // - 11 LIG (but, in insn encoding, leave VEX.L and EVEX.L in zeros.
+ // this will save 1 tsflag bit
+
+ // VEX_EVEX - Specifies that this instruction use EVEX form which provides
+ // syntax support up to 32 512-bit register operands and up to 7 16-bit
+ // mask operands as well as source operand data swizzling/memory operand
+ // conversion, eviction hint, and rounding mode.
+ EVEX = 1U << 7,
+
+ // EVEX_K - Set if this instruction requires masking
+ EVEX_K = 1U << 8,
+
+ // EVEX_Z - Set if this instruction has EVEX.Z field set.
+ EVEX_Z = 1U << 9,
+
+ // EVEX_L2 - Set if this instruction has EVEX.L' field set.
+ EVEX_L2 = 1U << 10,
+
+ // EVEX_B - Set if this instruction has EVEX.B field set.
+ EVEX_B = 1U << 11,
+
+ // EVEX_CD8E - compressed disp8 form, element-size
+ EVEX_CD8EShift = VEXShift + 12,
+ EVEX_CD8EMask = 3,
+
+ // EVEX_CD8V - compressed disp8 form, vector-width
+ EVEX_CD8VShift = EVEX_CD8EShift + 2,
+ EVEX_CD8VMask = 7,
+
/// Has3DNow0F0FOpcode - This flag indicates that the instruction uses the
/// wacky 0x0F 0x0F prefix for 3DNow! instructions. The manual documents
/// this as having a 0x0F prefix with a 0x0F opcode, and each instruction
/// storing a classifier in the imm8 field. To simplify our implementation,
/// we handle this by storeing the classifier in the opcode field and using
/// this flag to indicate that the encoder should do the wacky 3DNow! thing.
- Has3DNow0F0FOpcode = 1U << 7,
+ Has3DNow0F0FOpcode = 1U << 17,
/// MemOp4 - Used to indicate swapping of operand 3 and 4 to be encoded in
/// ModRM or I8IMM. This is used for FMA4 and XOP instructions.
- MemOp4 = 1U << 8,
+ MemOp4 = 1U << 18,
/// XOP - Opcode prefix used by XOP instructions.
- XOP = 1U << 9
+ XOP = 1U << 19
};
@@ -533,12 +570,19 @@ namespace X86II {
unsigned CurOp = 0;
if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0)
++CurOp;
- else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) {
- assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
+ else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
+ Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1)
+ // Special case for AVX-512 GATHER with 2 TIED_TO operands
+ // Skip the first 2 operands: dst, mask_wb
+ CurOp += 2;
+ else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
+ Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1)
// Special case for GATHER with 2 TIED_TO operands
// Skip the first 2 operands: dst, mask_wb
CurOp += 2;
- }
+ else if (NumOps > 2 && Desc.getOperandConstraint(NumOps - 2, MCOI::TIED_TO) == 0)
+ // SCATTER
+ ++CurOp;
return CurOp;
}
@@ -569,12 +613,15 @@ namespace X86II {
case X86II::MRMSrcMem: {
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
+ bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
+ bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
unsigned FirstMemOp = 1;
if (HasVEX_4V)
++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
if (HasMemOp4)
++FirstMemOp;// Skip the register source (which is encoded in I8IMM).
-
+ if (HasEVEX_K)
+ ++FirstMemOp;// Skip the mask register
// FIXME: Maybe lea should have its own form? This is a horrible hack.
//if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
// Opcode == X86::LEA16r || Opcode == X86::LEA32r)
@@ -611,6 +658,14 @@ namespace X86II {
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or
/// higher) register? e.g. r8, xmm8, xmm13, etc.
inline bool isX86_64ExtendedReg(unsigned RegNo) {
+ if ((RegNo > X86::XMM7 && RegNo <= X86::XMM15) ||
+ (RegNo > X86::XMM23 && RegNo <= X86::XMM31) ||
+ (RegNo > X86::YMM7 && RegNo <= X86::YMM15) ||
+ (RegNo > X86::YMM23 && RegNo <= X86::YMM31) ||
+ (RegNo > X86::ZMM7 && RegNo <= X86::ZMM15) ||
+ (RegNo > X86::ZMM23 && RegNo <= X86::ZMM31))
+ return true;
+
switch (RegNo) {
default: break;
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
@@ -621,16 +676,21 @@ namespace X86II {
case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
- case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
- case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
- case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
- case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
case X86::CR8: case X86::CR9: case X86::CR10: case X86::CR11:
case X86::CR12: case X86::CR13: case X86::CR14: case X86::CR15:
return true;
}
return false;
}
+
+ /// is32ExtendedReg - Is the MemoryOperand a 32 extended (zmm16 or higher)
+ /// registers? e.g. zmm21, etc.
+ static inline bool is32ExtendedReg(unsigned RegNo) {
+ return ((RegNo > X86::XMM15 && RegNo <= X86::XMM31) ||
+ (RegNo > X86::YMM15 && RegNo <= X86::YMM31) ||
+ (RegNo > X86::ZMM15 && RegNo <= X86::ZMM31));
+ }
+
inline bool isX86_64NonExtLowByteReg(unsigned reg) {
return (reg == X86::SPL || reg == X86::BPL ||
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index de80dd835e99..3ddd86599eb7 100644
--- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -101,7 +101,27 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
} else {
switch ((unsigned)Fixup.getKind()) {
default: llvm_unreachable("invalid fixup kind!");
- case FK_Data_8: Type = ELF::R_X86_64_64; break;
+ case FK_Data_8:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_X86_64_64;
+ break;
+ case MCSymbolRefExpr::VK_GOT:
+ Type = ELF::R_X86_64_GOT64;
+ break;
+ case MCSymbolRefExpr::VK_GOTOFF:
+ Type = ELF::R_X86_64_GOTOFF64;
+ break;
+ case MCSymbolRefExpr::VK_TPOFF:
+ Type = ELF::R_X86_64_TPOFF64;
+ break;
+ case MCSymbolRefExpr::VK_DTPOFF:
+ Type = ELF::R_X86_64_DTPOFF64;
+ break;
+ }
+ break;
case X86::reloc_signed_4byte:
switch (Modifier) {
default:
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
new file mode 100644
index 000000000000..a3eb4fbe4936
--- /dev/null
+++ b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
@@ -0,0 +1,135 @@
+//===-- X86ELFRelocationInfo.cpp ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/ELF.h"
+
+using namespace llvm;
+using namespace object;
+using namespace ELF;
+
+namespace {
+class X86_64ELFRelocationInfo : public MCRelocationInfo {
+public:
+ X86_64ELFRelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
+
+ const MCExpr *createExprForRelocation(RelocationRef Rel) {
+ uint64_t RelType; Rel.getType(RelType);
+ symbol_iterator SymI = Rel.getSymbol();
+
+ StringRef SymName; SymI->getName(SymName);
+ uint64_t SymAddr; SymI->getAddress(SymAddr);
+ uint64_t SymSize; SymI->getSize(SymSize);
+ int64_t Addend; getELFRelocationAddend(Rel, Addend);
+
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(SymName);
+ // FIXME: check that the value is actually the same.
+ if (Sym->isVariable() == false)
+ Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx));
+
+ const MCExpr *Expr = 0;
+ // If hasAddend is true, then we need to add Addend (r_addend) to Expr.
+ bool hasAddend = false;
+
+ // The AMD64 SysV ABI says:
+ // A: the addend used to compute the value of the relocatable field.
+ // B: the base address at which a shared object has been loaded into memory
+ // during execution. Generally, a shared object is built with a 0 base
+ // virtual address, but the execution address will be different.
+ // G: the offset into the global offset table at which the relocation
+ // entry's symbol will reside during execution.
+ // GOT: the address of the global offset table.
+ // L: the place (section offset or address) of the Procedure Linkage Table
+ // entry for a symbol.
+ // P: the place (section offset or address) of the storage unit being
+ // relocated (computed using r_offset).
+ // S: the value of the symbol whose index resides in the relocation entry.
+ // Z: the size of the symbol whose index resides in the relocation entry.
+
+ switch(RelType) {
+ case R_X86_64_NONE:
+ case R_X86_64_COPY:
+ // none
+ break;
+ case R_X86_64_64:
+ case R_X86_64_16:
+ case R_X86_64_8:
+ // S + A
+ case R_X86_64_32:
+ case R_X86_64_32S:
+ // S + A (We don't care about the result not fitting in 32 bits.)
+ case R_X86_64_PC32:
+ case R_X86_64_PC16:
+ case R_X86_64_PC8:
+ case R_X86_64_PC64:
+ // S + A - P (P/pcrel is implicit)
+ hasAddend = true;
+ Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+ break;
+ case R_X86_64_GOT32:
+ case R_X86_64_GOT64:
+ case R_X86_64_GOTPC32:
+ case R_X86_64_GOTPC64:
+ case R_X86_64_GOTPLT64:
+ // G + A
+ hasAddend = true;
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOT, Ctx);
+ break;
+ case R_X86_64_PLT32:
+ // L + A - P -> S@PLT + A
+ hasAddend = true;
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_PLT, Ctx);
+ break;
+ case R_X86_64_GLOB_DAT:
+ case R_X86_64_JUMP_SLOT:
+ // S
+ Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+ break;
+ case R_X86_64_GOTPCREL:
+ case R_X86_64_GOTPCREL64:
+ // G + GOT + A - P -> S@GOTPCREL + A
+ hasAddend = true;
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
+ break;
+ case R_X86_64_GOTOFF64:
+ // S + A - GOT
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTOFF, Ctx);
+ break;
+ case R_X86_64_PLTOFF64:
+ // L + A - GOT
+ break;
+ case R_X86_64_SIZE32:
+ case R_X86_64_SIZE64:
+ // Z + A
+ Expr = MCConstantExpr::Create(SymSize, Ctx);
+ break;
+ default:
+ Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+ break;
+ }
+ if (Expr && hasAddend && Addend != 0)
+ Expr = MCBinaryExpr::CreateAdd(Expr,
+ MCConstantExpr::Create(Addend, Ctx),
+ Ctx);
+ return Expr;
+ }
+};
+} // End unnamed namespace
+
+/// createX86ELFRelocationInfo - Construct an X86 Mach-O RelocationInfo.
+MCRelocationInfo *llvm::createX86_64ELFRelocationInfo(MCContext &Ctx) {
+ // We only handle x86-64 for now.
+ return new X86_64ELFRelocationInfo(Ctx);
+}
diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index 7815ae98c9bd..3861e1ce290a 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -59,10 +59,8 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
// for .S files on other systems. Perhaps this is because the file system
// wasn't always case preserving or something.
CommentString = "##";
- PCSymbol = ".";
SupportsDebugInformation = true;
- DwarfUsesInlineInfoSection = true;
UseDataRegionDirectives = MarkedJTDataRegions;
// Exceptions handling
@@ -92,8 +90,6 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
TextAlignFillValue = 0x90;
PrivateGlobalPrefix = ".L";
- WeakRefDirective = "\t.weak\t";
- PCSymbol = ".";
// Set up DWARF directives
HasLEB128 = true; // Target asm supports leb128 directives (little-endian)
@@ -139,6 +135,8 @@ X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
AssemblerDialect = AsmWriterFlavor;
TextAlignFillValue = 0x90;
+
+ AllowAtInName = true;
}
void X86MCAsmInfoGNUCOFF::anchor() { }
diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
index b6b70fd3e855..80979dda6770 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
@@ -17,6 +17,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmInfoCOFF.h"
#include "llvm/MC/MCAsmInfoDarwin.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class Triple;
@@ -35,7 +36,7 @@ namespace llvm {
MCStreamer &Streamer) const;
};
- class X86ELFMCAsmInfo : public MCAsmInfo {
+ class X86ELFMCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
explicit X86ELFMCAsmInfo(const Triple &Triple);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 016af71501aa..7952607aca0e 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -53,7 +53,7 @@ public:
}
unsigned GetX86RegNum(const MCOperand &MO) const {
- return Ctx.getRegisterInfo().getEncodingValue(MO.getReg()) & 0x7;
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
}
// On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
@@ -77,6 +77,14 @@ public:
return (~SrcRegNum) & 0xf;
}
+ unsigned char getWriteMaskRegisterEncoding(const MCInst &MI,
+ unsigned OpNum) const {
+ assert(X86::K0 != MI.getOperand(OpNum).getReg() &&
+ "Invalid mask register as write-mask!");
+ unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum));
+ return MaskRegNum;
+ }
+
void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
OS << (char)C;
++CurByte;
@@ -152,6 +160,52 @@ static bool isDisp8(int Value) {
return Value == (signed char)Value;
}
+/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
+/// compressed dispacement field.
+static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
+ assert(((TSFlags >> X86II::VEXShift) & X86II::EVEX) &&
+ "Compressed 8-bit displacement is only valid for EVEX inst.");
+
+ unsigned CD8E = (TSFlags >> X86II::EVEX_CD8EShift) & X86II::EVEX_CD8EMask;
+ unsigned CD8V = (TSFlags >> X86II::EVEX_CD8VShift) & X86II::EVEX_CD8VMask;
+
+ if (CD8V == 0 && CD8E == 0) {
+ CValue = Value;
+ return isDisp8(Value);
+ }
+
+ unsigned MemObjSize = 1U << CD8E;
+ if (CD8V & 4) {
+ // Fixed vector length
+ MemObjSize *= 1U << (CD8V & 0x3);
+ } else {
+ // Modified vector length
+ bool EVEX_b = (TSFlags >> X86II::VEXShift) & X86II::EVEX_B;
+ if (!EVEX_b) {
+ unsigned EVEX_LL = ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) ? 1 : 0;
+ EVEX_LL += ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2) ? 2 : 0;
+ assert(EVEX_LL < 3 && "");
+
+ unsigned NumElems = (1U << (EVEX_LL + 4)) / MemObjSize;
+ NumElems /= 1U << (CD8V & 0x3);
+
+ MemObjSize *= NumElems;
+ }
+ }
+
+ unsigned MemObjMask = MemObjSize - 1;
+ assert((MemObjSize & MemObjMask) == 0 && "Invalid memory object size.");
+
+ if (Value & MemObjMask) // Unaligned offset
+ return false;
+ Value /= MemObjSize;
+ bool Ret = (Value == (signed char)Value);
+
+ if (Ret)
+ CValue = Value;
+ return Ret;
+}
+
/// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
/// in an instruction with the specified TSFlags.
static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
@@ -318,6 +372,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
unsigned BaseReg = Base.getReg();
+ bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
// Handle %rip relative addressing.
if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
@@ -378,10 +433,21 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
}
// Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
- if (Disp.isImm() && isDisp8(Disp.getImm())) {
- EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
- EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
- return;
+ if (Disp.isImm()) {
+ if (!HasEVEX && isDisp8(Disp.getImm())) {
+ EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
+ EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
+ return;
+ }
+ // Try EVEX compressed 8-bit displacement first; if failed, fall back to
+ // 32-bit displacement.
+ int CDisp8 = 0;
+ if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
+ EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
+ EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
+ CDisp8 - Disp.getImm());
+ return;
+ }
}
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
@@ -397,6 +463,8 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
bool ForceDisp32 = false;
bool ForceDisp8 = false;
+ int CDisp8 = 0;
+ int ImmOffset = 0;
if (BaseReg == 0) {
// If there is no base register, we emit the special case SIB byte with
// MOD=0, BASE=5, to JUST get the index, scale, and displacement.
@@ -412,10 +480,15 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
BaseRegNo != N86::EBP) {
// Emit no displacement ModR/M byte
EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
- } else if (isDisp8(Disp.getImm())) {
+ } else if (!HasEVEX && isDisp8(Disp.getImm())) {
// Emit the disp8 encoding.
EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
+ } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
+ // Emit the disp8 encoding.
+ EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
+ ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
+ ImmOffset = CDisp8 - Disp.getImm();
} else {
// Emit the normal disp32 encoding.
EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
@@ -445,7 +518,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Do we need to output a displacement?
if (ForceDisp8)
- EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
+ EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
else if (ForceDisp32 || Disp.getImm() != 0)
EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
CurByte, OS, Fixups);
@@ -457,6 +530,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
+ bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
+ bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
@@ -468,6 +543,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0: Same as REX_R=1 (64 bit mode only)
//
unsigned char VEX_R = 0x1;
+ unsigned char EVEX_R2 = 0x1;
// VEX_X: equivalent to REX.X, only used when a
// register is used for index in SIB Byte.
@@ -488,7 +564,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
unsigned char VEX_W = 0;
// XOP: Use XOP prefix byte 0x8f instead of VEX.
- unsigned char XOP = 0;
+ bool XOP = false;
// VEX_5M (VEX m-mmmmm field):
//
@@ -498,12 +574,14 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b00011: implied 0F 3A leading opcode bytes
// 0b00100-0b11111: Reserved for future use
// 0b01000: XOP map select - 08h instructions with imm byte
- // 0b10001: XOP map select - 09h instructions with no imm byte
+ // 0b01001: XOP map select - 09h instructions with no imm byte
+ // 0b01010: XOP map select - 0Ah instructions with imm dword
unsigned char VEX_5M = 0x1;
// VEX_4V (VEX vvvv field): a register specifier
// (in 1's complement form) or 1111 if unused.
unsigned char VEX_4V = 0xf;
+ unsigned char EVEX_V2 = 0x1;
// VEX_L (Vector Length):
//
@@ -511,6 +589,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 1: 256-bit vector
//
unsigned char VEX_L = 0;
+ unsigned char EVEX_L2 = 0;
// VEX_PP: opcode extension providing equivalent
// functionality of a SIMD prefix
@@ -522,6 +601,18 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
//
unsigned char VEX_PP = 0;
+ // EVEX_U
+ unsigned char EVEX_U = 1; // Always '1' so far
+
+ // EVEX_z
+ unsigned char EVEX_z = 0;
+
+ // EVEX_b
+ unsigned char EVEX_b = 0;
+
+ // EVEX_aaa
+ unsigned char EVEX_aaa = 0;
+
// Encode the operand size opcode prefix as needed.
if (TSFlags & X86II::OpSize)
VEX_PP = 0x01;
@@ -530,10 +621,18 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
VEX_W = 1;
if ((TSFlags >> X86II::VEXShift) & X86II::XOP)
- XOP = 1;
+ XOP = true;
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
VEX_L = 1;
+ if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2))
+ EVEX_L2 = 1;
+
+ if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z))
+ EVEX_z = 1;
+
+ if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B))
+ EVEX_b = 1;
switch (TSFlags & X86II::Op0Mask) {
default: llvm_unreachable("Invalid prefix!");
@@ -567,11 +666,11 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
case X86II::XOP9:
VEX_5M = 0x9;
break;
- case X86II::A6: // Bypass: Not used by VEX
- case X86II::A7: // Bypass: Not used by VEX
- case X86II::TB: // Bypass: Not used by VEX
- case 0:
- break; // No prefix!
+ case X86II::XOPA:
+ VEX_5M = 0xA;
+ break;
+ case X86II::TB: // VEX_5M/VEX_PP already correct
+ break;
}
@@ -580,12 +679,19 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
unsigned CurOp = 0;
if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0)
++CurOp;
- else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) {
- assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
+ else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
+ Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1)
+ // Special case for AVX-512 GATHER with 2 TIED_TO operands
+ // Skip the first 2 operands: dst, mask_wb
+ CurOp += 2;
+ else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
+ Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1)
// Special case for GATHER with 2 TIED_TO operands
// Skip the first 2 operands: dst, mask_wb
CurOp += 2;
- }
+ else if (NumOps > 2 && Desc.getOperandConstraint(NumOps - 2, MCOI::TIED_TO) == 0)
+ // SCATTER
+ ++CurOp;
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!");
@@ -595,18 +701,35 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr, src1(VEX_4V), src2(ModR/M)
// MemAddr, src1(ModR/M), imm8
//
- if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrBaseReg).getReg()))
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
+ X86::AddrBaseReg).getReg()))
VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrIndexReg).getReg()))
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
+ X86::AddrIndexReg).getReg()))
VEX_X = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
+ X86::AddrIndexReg).getReg()))
+ EVEX_V2 = 0x0;
+
+ CurOp += X86::AddrNumOperands;
- CurOp = X86::AddrNumOperands;
- if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ if (HasEVEX_K)
+ EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_V2 = 0x0;
+ CurOp++;
+ }
const MCOperand &MO = MI.getOperand(CurOp);
- if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
- VEX_R = 0x0;
+ if (MO.isReg()) {
+ if (X86II::isX86_64ExtendedReg(MO.getReg()))
+ VEX_R = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MO.getReg()))
+ EVEX_R2 = 0x0;
+ }
break;
}
case X86II::MRMSrcMem:
@@ -619,11 +742,21 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp++).getReg()))
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_R = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_R2 = 0x0;
+ CurOp++;
+
+ if (HasEVEX_K)
+ EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
- if (HasVEX_4V)
+ if (HasVEX_4V) {
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_V2 = 0x0;
+ CurOp++;
+ }
if (X86II::isX86_64ExtendedReg(
MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
@@ -631,6 +764,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (X86II::isX86_64ExtendedReg(
MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
VEX_X = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
+ X86::AddrIndexReg).getReg()))
+ EVEX_V2 = 0x0;
if (HasVEX_4VOp3)
// Instruction format for 4VOp3:
@@ -647,8 +783,15 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MRM[0-9]m instructions forms:
// MemAddr
// src1(VEX_4V), MemAddr
- if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, 0);
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_V2 = 0x0;
+ CurOp++;
+ }
+
+ if (HasEVEX_K)
+ EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
if (X86II::isX86_64ExtendedReg(
MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
@@ -669,16 +812,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_R = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_R2 = 0x0;
CurOp++;
- if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ if (HasEVEX_K)
+ EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_V2 = 0x0;
+ CurOp++;
+ }
if (HasMemOp4) // Skip second register source (encoded in I8IMM)
CurOp++;
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_X = 0x0;
CurOp++;
if (HasVEX_4VOp3)
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
@@ -690,13 +844,24 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// dst(ModR/M), src1(VEX_4V), src2(ModR/M)
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_X = 0x0;
CurOp++;
- if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ if (HasEVEX_K)
+ EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_V2 = 0x0;
+ CurOp++;
+ }
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_R = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_R2 = 0x0;
break;
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
@@ -704,9 +869,19 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
case X86II::MRM6r: case X86II::MRM7r:
// MRM0r-MRM7r instructions forms:
// dst(VEX_4V), src(ModR/M), imm8
- VEX_4V = getVEXRegisterEncoding(MI, 0);
- if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg()))
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ EVEX_V2 = 0x0;
+ CurOp++;
+ }
+ if (HasEVEX_K)
+ EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
+ if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_X = 0x0;
break;
default: // RawFrm
break;
@@ -715,29 +890,58 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// Emit segment override opcode prefix as needed.
EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
- // VEX opcode prefix can have 2 or 3 bytes
- //
- // 3 bytes:
- // +-----+ +--------------+ +-------------------+
- // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
- // +-----+ +--------------+ +-------------------+
- // 2 bytes:
- // +-----+ +-------------------+
- // | C5h | | R | vvvv | L | pp |
- // +-----+ +-------------------+
- //
- unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
+ if (!HasEVEX) {
+ // VEX opcode prefix can have 2 or 3 bytes
+ //
+ // 3 bytes:
+ // +-----+ +--------------+ +-------------------+
+ // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
+ // +-----+ +--------------+ +-------------------+
+ // 2 bytes:
+ // +-----+ +-------------------+
+ // | C5h | | R | vvvv | L | pp |
+ // +-----+ +-------------------+
+ //
+ unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
- if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
- EmitByte(0xC5, CurByte, OS);
- EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
- return;
- }
+ if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
+ EmitByte(0xC5, CurByte, OS);
+ EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
+ return;
+ }
- // 3 byte VEX prefix
- EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS);
- EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
- EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
+ // 3 byte VEX prefix
+ EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS);
+ EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
+ EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
+ } else {
+ // EVEX opcode prefix can have 4 bytes
+ //
+ // +-----+ +--------------+ +-------------------+ +------------------------+
+ // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
+ // +-----+ +--------------+ +-------------------+ +------------------------+
+ assert((VEX_5M & 0x3) == VEX_5M
+ && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
+
+ VEX_5M &= 0x3;
+
+ EmitByte(0x62, CurByte, OS);
+ EmitByte((VEX_R << 7) |
+ (VEX_X << 6) |
+ (VEX_B << 5) |
+ (EVEX_R2 << 4) |
+ VEX_5M, CurByte, OS);
+ EmitByte((VEX_W << 7) |
+ (VEX_4V << 3) |
+ (EVEX_U << 2) |
+ VEX_PP, CurByte, OS);
+ EmitByte((EVEX_z << 7) |
+ (EVEX_L2 << 6) |
+ (VEX_L << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
+ }
}
/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
@@ -1007,6 +1211,10 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
const unsigned MemOp4_I8IMMOperand = 2;
+ // It uses the EVEX.aaa field?
+ bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
+ bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
+
// Determine where the memory operand starts, if present.
int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
if (MemoryOperand != -1) MemoryOperand += CurOp;
@@ -1057,6 +1265,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode, CurByte, OS);
SrcRegNum = CurOp + 1;
+ if (HasEVEX_K) // Skip writemask
+ SrcRegNum++;
+
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
@@ -1069,6 +1280,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode, CurByte, OS);
SrcRegNum = CurOp + X86::AddrNumOperands;
+ if (HasEVEX_K) // Skip writemask
+ SrcRegNum++;
+
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
@@ -1082,6 +1296,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode, CurByte, OS);
SrcRegNum = CurOp + 1;
+ if (HasEVEX_K) // Skip writemask
+ SrcRegNum++;
+
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
@@ -1100,6 +1317,12 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRMSrcMem: {
int AddrOperands = X86::AddrNumOperands;
unsigned FirstMemOp = CurOp+1;
+
+ if (HasEVEX_K) { // Skip writemask
+ ++AddrOperands;
+ ++FirstMemOp;
+ }
+
if (HasVEX_4V) {
++AddrOperands;
++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 5e84530cd729..1cbdafdf151d 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -263,7 +263,7 @@ static MCRegisterInfo *createX86MCRegisterInfo(StringRef TT) {
return X;
}
-static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
Triple TheTriple(TT);
bool is64Bit = TheTriple.getArch() == Triple::x86_64;
@@ -290,14 +290,16 @@ static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) {
int stackGrowth = is64Bit ? -8 : -4;
// Initial state of the frame pointer is esp+stackGrowth.
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(is64Bit ? X86::RSP : X86::ESP, stackGrowth);
- MAI->addInitialFrameState(0, Dst, Src);
+ unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(
+ 0, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
+ MAI->addInitialFrameState(Inst);
// Add return address to move list
- MachineLocation CSDst(is64Bit ? X86::RSP : X86::ESP, stackGrowth);
- MachineLocation CSSrc(is64Bit ? X86::RIP : X86::EIP);
- MAI->addInitialFrameState(0, CSDst, CSSrc);
+ unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
+ MCCFIInstruction Inst2 = MCCFIInstruction::createOffset(
+ 0, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
+ MAI->addInitialFrameState(Inst2);
return MAI;
}
@@ -366,7 +368,7 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
return createWinCOFFStreamer(Ctx, MAB, *_Emitter, _OS, RelaxAll);
- return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
+ return createELFStreamer(Ctx, 0, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
}
static MCInstPrinter *createX86MCInstPrinter(const Target &T,
@@ -382,6 +384,17 @@ static MCInstPrinter *createX86MCInstPrinter(const Target &T,
return 0;
}
+static MCRelocationInfo *createX86MCRelocationInfo(StringRef TT,
+ MCContext &Ctx) {
+ Triple TheTriple(TT);
+ if (TheTriple.isEnvironmentMachO() && TheTriple.getArch() == Triple::x86_64)
+ return createX86_64MachORelocationInfo(Ctx);
+ else if (TheTriple.isOSBinFormatELF())
+ return createX86_64ELFRelocationInfo(Ctx);
+ // Default to the stock relocation info.
+ return llvm::createMCRelocationInfo(TT, Ctx);
+}
+
static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) {
return new MCInstrAnalysis(Info);
}
@@ -439,4 +452,10 @@ extern "C" void LLVMInitializeX86TargetMC() {
createX86MCInstPrinter);
TargetRegistry::RegisterMCInstPrinter(TheX86_64Target,
createX86MCInstPrinter);
+
+ // Register the MC relocation info.
+ TargetRegistry::RegisterMCRelocationInfo(TheX86_32Target,
+ createX86MCRelocationInfo);
+ TargetRegistry::RegisterMCRelocationInfo(TheX86_64Target,
+ createX86MCRelocationInfo);
}
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 981aa1a2b911..41ae4354cbfe 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -25,6 +25,7 @@ class MCInstrInfo;
class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
+class MCRelocationInfo;
class Target;
class StringRef;
class raw_ostream;
@@ -78,8 +79,10 @@ MCCodeEmitter *createX86MCCodeEmitter(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createX86_32AsmBackend(const Target &T, StringRef TT, StringRef CPU);
-MCAsmBackend *createX86_64AsmBackend(const Target &T, StringRef TT, StringRef CPU);
+MCAsmBackend *createX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+MCAsmBackend *createX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
/// createX86MachObjectWriter - Construct an X86 Mach-O object writer.
MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS,
@@ -94,6 +97,12 @@ MCObjectWriter *createX86ELFObjectWriter(raw_ostream &OS,
uint16_t EMachine);
/// createX86WinCOFFObjectWriter - Construct an X86 Win COFF object writer.
MCObjectWriter *createX86WinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit);
+
+/// createX86_64MachORelocationInfo - Construct X86-64 Mach-O relocation info.
+MCRelocationInfo *createX86_64MachORelocationInfo(MCContext &Ctx);
+
+/// createX86_64ELFORelocationInfo - Construct X86-64 ELF relocation info.
+MCRelocationInfo *createX86_64ELFRelocationInfo(MCContext &Ctx);
} // End llvm namespace
diff --git a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
new file mode 100644
index 000000000000..209b1d0ee8bf
--- /dev/null
+++ b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
@@ -0,0 +1,116 @@
+//===-- X86MachORelocationInfo.cpp ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm/Object/MachO.h"
+
+using namespace llvm;
+using namespace object;
+using namespace MachO;
+
+namespace {
+class X86_64MachORelocationInfo : public MCRelocationInfo {
+public:
+ X86_64MachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
+
+ const MCExpr *createExprForRelocation(RelocationRef Rel) {
+ const MachOObjectFile *Obj = cast<MachOObjectFile>(Rel.getObjectFile());
+
+ uint64_t RelType; Rel.getType(RelType);
+ symbol_iterator SymI = Rel.getSymbol();
+
+ StringRef SymName; SymI->getName(SymName);
+ uint64_t SymAddr; SymI->getAddress(SymAddr);
+
+ any_relocation_info RE = Obj->getRelocation(Rel.getRawDataRefImpl());
+ bool isPCRel = Obj->getAnyRelocationPCRel(RE);
+
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(SymName);
+ // FIXME: check that the value is actually the same.
+ if (Sym->isVariable() == false)
+ Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx));
+ const MCExpr *Expr = 0;
+
+ switch(RelType) {
+ case X86_64_RELOC_TLV:
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
+ break;
+ case X86_64_RELOC_SIGNED_4:
+ Expr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Sym, Ctx),
+ MCConstantExpr::Create(4, Ctx),
+ Ctx);
+ break;
+ case X86_64_RELOC_SIGNED_2:
+ Expr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Sym, Ctx),
+ MCConstantExpr::Create(2, Ctx),
+ Ctx);
+ break;
+ case X86_64_RELOC_SIGNED_1:
+ Expr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Sym, Ctx),
+ MCConstantExpr::Create(1, Ctx),
+ Ctx);
+ break;
+ case X86_64_RELOC_GOT_LOAD:
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
+ break;
+ case X86_64_RELOC_GOT:
+ Expr = MCSymbolRefExpr::Create(Sym, isPCRel ?
+ MCSymbolRefExpr::VK_GOTPCREL :
+ MCSymbolRefExpr::VK_GOT,
+ Ctx);
+ break;
+ case X86_64_RELOC_SUBTRACTOR:
+ {
+ RelocationRef RelNext;
+ Obj->getRelocationNext(Rel.getRawDataRefImpl(), RelNext);
+ any_relocation_info RENext = Obj->getRelocation(RelNext.getRawDataRefImpl());
+
+ // X86_64_SUBTRACTOR must be followed by a relocation of type
+ // X86_64_RELOC_UNSIGNED.
+ // NOTE: Scattered relocations don't exist on x86_64.
+ unsigned RType = Obj->getAnyRelocationType(RENext);
+ if (RType != X86_64_RELOC_UNSIGNED)
+ report_fatal_error("Expected X86_64_RELOC_UNSIGNED after "
+ "X86_64_RELOC_SUBTRACTOR.");
+
+ const MCExpr *LHS = MCSymbolRefExpr::Create(Sym, Ctx);
+
+ symbol_iterator RSymI = RelNext.getSymbol();
+ uint64_t RSymAddr;
+ RSymI->getAddress(RSymAddr);
+ StringRef RSymName;
+ RSymI->getName(RSymName);
+
+ MCSymbol *RSym = Ctx.GetOrCreateSymbol(RSymName);
+ if (RSym->isVariable() == false)
+ RSym->setVariableValue(MCConstantExpr::Create(RSymAddr, Ctx));
+
+ const MCExpr *RHS = MCSymbolRefExpr::Create(RSym, Ctx);
+
+ Expr = MCBinaryExpr::CreateSub(LHS, RHS, Ctx);
+ break;
+ }
+ default:
+ Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+ break;
+ }
+ return Expr;
+ }
+};
+} // End unnamed namespace
+
+/// createX86_64MachORelocationInfo - Construct an X86-64 Mach-O RelocationInfo.
+MCRelocationInfo *llvm::createX86_64MachORelocationInfo(MCContext &Ctx) {
+ return new X86_64MachORelocationInfo(Ctx);
+}
diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index 64f005c469bc..eb7c0b1a9965 100644
--- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -16,12 +16,11 @@
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCValue.h"
-#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/MachO.h"
using namespace llvm;
-using namespace llvm::object;
namespace {
class X86MachObjectWriter : public MCMachObjectTargetWriter {
@@ -132,7 +131,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
if (Target.isAbsolute()) { // constant
// SymbolNum of 0 indicates the absolute section.
- Type = macho::RIT_X86_64_Unsigned;
+ Type = MachO::X86_64_RELOC_UNSIGNED;
Index = 0;
// FIXME: I believe this is broken, I don't think the linker can understand
@@ -141,26 +140,31 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
// is to use an absolute symbol (which we don't support yet).
if (IsPCRel) {
IsExtern = 1;
- Type = macho::RIT_X86_64_Branch;
+ Type = MachO::X86_64_RELOC_BRANCH;
}
} else if (Target.getSymB()) { // A - B + constant
const MCSymbol *A = &Target.getSymA()->getSymbol();
+ if (A->isTemporary())
+ A = &A->AliasedSymbol();
MCSymbolData &A_SD = Asm.getSymbolData(*A);
const MCSymbolData *A_Base = Asm.getAtom(&A_SD);
const MCSymbol *B = &Target.getSymB()->getSymbol();
+ if (B->isTemporary())
+ B = &B->AliasedSymbol();
MCSymbolData &B_SD = Asm.getSymbolData(*B);
const MCSymbolData *B_Base = Asm.getAtom(&B_SD);
// Neither symbol can be modified.
if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None)
- report_fatal_error("unsupported relocation of modified symbol");
+ report_fatal_error("unsupported relocation of modified symbol", false);
// We don't support PCrel relocations of differences. Darwin 'as' doesn't
// implement most of these correctly.
if (IsPCRel)
- report_fatal_error("unsupported pc-relative relocation of difference");
+ report_fatal_error("unsupported pc-relative relocation of difference",
+ false);
// The support for the situation where one or both of the symbols would
// require a local relocation is handled just like if the symbols were
@@ -173,7 +177,13 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
// single SIGNED relocation); reject it for now. Except the case where both
// symbols don't have a base, equal but both NULL.
if (A_Base == B_Base && A_Base)
- report_fatal_error("unsupported relocation with identical base");
+ report_fatal_error("unsupported relocation with identical base", false);
+
+ // A subtraction expression where both symbols are undefined is a
+ // non-relocatable expression.
+ if (A->isUndefined() && B->isUndefined())
+ report_fatal_error("unsupported relocation with subtraction expression",
+ false);
Value += Writer->getSymbolAddress(&A_SD, Layout) -
(A_Base == NULL ? 0 : Writer->getSymbolAddress(A_Base, Layout));
@@ -188,15 +198,15 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
Index = A_SD.getFragment()->getParent()->getOrdinal() + 1;
IsExtern = 0;
}
- Type = macho::RIT_X86_64_Unsigned;
-
- macho::RelocationEntry MRE;
- MRE.Word0 = FixupOffset;
- MRE.Word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
+ Type = MachO::X86_64_RELOC_UNSIGNED;
+
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (IsExtern << 27) |
+ (Type << 28));
Writer->addRelocation(Fragment->getParent(), MRE);
if (B_Base) {
@@ -207,7 +217,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
Index = B_SD.getFragment()->getParent()->getOrdinal() + 1;
IsExtern = 0;
}
- Type = macho::RIT_X86_64_Subtractor;
+ Type = MachO::X86_64_RELOC_SUBTRACTOR;
} else {
const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
MCSymbolData &SD = Asm.getSymbolData(*Symbol);
@@ -252,11 +262,11 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
return;
} else {
report_fatal_error("unsupported relocation of variable '" +
- Symbol->getName() + "'");
+ Symbol->getName() + "'", false);
}
} else {
report_fatal_error("unsupported relocation of undefined symbol '" +
- Symbol->getName() + "'");
+ Symbol->getName() + "'", false);
}
MCSymbolRefExpr::VariantKind Modifier = Target.getSymA()->getKind();
@@ -267,15 +277,16 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
// rewrite the movq to an leaq at link time if the symbol ends up in
// the same linkage unit.
if (unsigned(Fixup.getKind()) == X86::reloc_riprel_4byte_movq_load)
- Type = macho::RIT_X86_64_GOTLoad;
+ Type = MachO::X86_64_RELOC_GOT_LOAD;
else
- Type = macho::RIT_X86_64_GOT;
+ Type = MachO::X86_64_RELOC_GOT;
} else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
- Type = macho::RIT_X86_64_TLV;
+ Type = MachO::X86_64_RELOC_TLV;
} else if (Modifier != MCSymbolRefExpr::VK_None) {
- report_fatal_error("unsupported symbol modifier in relocation");
+ report_fatal_error("unsupported symbol modifier in relocation",
+ false);
} else {
- Type = macho::RIT_X86_64_Signed;
+ Type = MachO::X86_64_RELOC_SIGNED;
// The Darwin x86_64 relocation format has a problem where it cannot
// encode an address (L<foo> + <constant>) which is outside the atom
@@ -292,34 +303,40 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
// (the additional bias), but instead appear to just look at the final
// offset.
switch (-(Target.getConstant() + (1LL << Log2Size))) {
- case 1: Type = macho::RIT_X86_64_Signed1; break;
- case 2: Type = macho::RIT_X86_64_Signed2; break;
- case 4: Type = macho::RIT_X86_64_Signed4; break;
+ case 1: Type = MachO::X86_64_RELOC_SIGNED_1; break;
+ case 2: Type = MachO::X86_64_RELOC_SIGNED_2; break;
+ case 4: Type = MachO::X86_64_RELOC_SIGNED_4; break;
}
}
} else {
if (Modifier != MCSymbolRefExpr::VK_None)
report_fatal_error("unsupported symbol modifier in branch "
- "relocation");
+ "relocation", false);
- Type = macho::RIT_X86_64_Branch;
+ Type = MachO::X86_64_RELOC_BRANCH;
}
} else {
if (Modifier == MCSymbolRefExpr::VK_GOT) {
- Type = macho::RIT_X86_64_GOT;
+ Type = MachO::X86_64_RELOC_GOT;
} else if (Modifier == MCSymbolRefExpr::VK_GOTPCREL) {
// GOTPCREL is allowed as a modifier on non-PCrel instructions, in which
// case all we do is set the PCrel bit in the relocation entry; this is
// used with exception handling, for example. The source is required to
// include any necessary offset directly.
- Type = macho::RIT_X86_64_GOT;
+ Type = MachO::X86_64_RELOC_GOT;
IsPCRel = 1;
} else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
- report_fatal_error("TLVP symbol modifier should have been rip-rel");
+ report_fatal_error("TLVP symbol modifier should have been rip-rel",
+ false);
} else if (Modifier != MCSymbolRefExpr::VK_None)
- report_fatal_error("unsupported symbol modifier in relocation");
- else
- Type = macho::RIT_X86_64_Unsigned;
+ report_fatal_error("unsupported symbol modifier in relocation", false);
+ else {
+ Type = MachO::X86_64_RELOC_UNSIGNED;
+ unsigned Kind = Fixup.getKind();
+ if (Kind == X86::reloc_signed_4byte)
+ report_fatal_error("32-bit absolute addressing is not supported in "
+ "64-bit mode", false);
+ }
}
}
@@ -327,13 +344,13 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
FixedValue = Value;
// struct relocation_info (8 bytes)
- macho::RelocationEntry MRE;
- MRE.Word0 = FixupOffset;
- MRE.Word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (IsExtern << 27) |
+ (Type << 28));
Writer->addRelocation(Fragment->getParent(), MRE);
}
@@ -347,7 +364,7 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
uint64_t &FixedValue) {
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
- unsigned Type = macho::RIT_Vanilla;
+ unsigned Type = MachO::GENERIC_RELOC_VANILLA;
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
@@ -355,7 +372,8 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
if (!A_SD->getFragment())
report_fatal_error("symbol '" + A->getName() +
- "' can not be undefined in a subtraction expression");
+ "' can not be undefined in a subtraction expression",
+ false);
uint32_t Value = Writer->getSymbolAddress(A_SD, Layout);
uint64_t SecAddr = Writer->getSectionAddress(A_SD->getFragment()->getParent());
@@ -367,22 +385,23 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
if (!B_SD->getFragment())
report_fatal_error("symbol '" + B->getSymbol().getName() +
- "' can not be undefined in a subtraction expression");
+ "' can not be undefined in a subtraction expression",
+ false);
// Select the appropriate difference relocation type.
//
// Note that there is no longer any semantic difference between these two
// relocation types from the linkers point of view, this is done solely for
// pedantic compatibility with 'as'.
- Type = A_SD->isExternal() ? (unsigned)macho::RIT_Difference :
- (unsigned)macho::RIT_Generic_LocalDifference;
+ Type = A_SD->isExternal() ? (unsigned)MachO::GENERIC_RELOC_SECTDIFF :
+ (unsigned)MachO::GENERIC_RELOC_LOCAL_SECTDIFF;
Value2 = Writer->getSymbolAddress(B_SD, Layout);
FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent());
}
// Relocations are written out in reverse order, so the PAIR comes first.
- if (Type == macho::RIT_Difference ||
- Type == macho::RIT_Generic_LocalDifference) {
+ if (Type == MachO::GENERIC_RELOC_SECTDIFF ||
+ Type == MachO::GENERIC_RELOC_LOCAL_SECTDIFF) {
// If the offset is too large to fit in a scattered relocation,
// we're hosed. It's an unfortunate limitation of the MachO format.
if (FixupOffset > 0xffffff) {
@@ -396,13 +415,13 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
llvm_unreachable("fatal error returned?!");
}
- macho::RelocationEntry MRE;
- MRE.Word0 = ((0 << 0) |
- (macho::RIT_Pair << 24) |
- (Log2Size << 28) |
- (IsPCRel << 30) |
- macho::RF_Scattered);
- MRE.Word1 = Value2;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = ((0 << 0) | // r_address
+ (MachO::GENERIC_RELOC_PAIR << 24) | // r_type
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ MachO::R_SCATTERED);
+ MRE.r_word1 = Value2;
Writer->addRelocation(Fragment->getParent(), MRE);
} else {
// If the offset is more than 24-bits, it won't fit in a scattered
@@ -416,13 +435,13 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
return false;
}
- macho::RelocationEntry MRE;
- MRE.Word0 = ((FixupOffset << 0) |
- (Type << 24) |
- (Log2Size << 28) |
- (IsPCRel << 30) |
- macho::RF_Scattered);
- MRE.Word1 = Value;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = ((FixupOffset << 0) |
+ (Type << 24) |
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ MachO::R_SCATTERED);
+ MRE.r_word1 = Value;
Writer->addRelocation(Fragment->getParent(), MRE);
return true;
}
@@ -464,13 +483,13 @@ void X86MachObjectWriter::RecordTLVPRelocation(MachObjectWriter *Writer,
}
// struct relocation_info (8 bytes)
- macho::RelocationEntry MRE;
- MRE.Word0 = Value;
- MRE.Word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (1 << 27) | // Extern
- (macho::RIT_Generic_TLV << 28)); // Type
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = Value;
+ MRE.r_word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (1 << 27) | // r_extern
+ (MachO::GENERIC_RELOC_TLV << 28)); // r_type
Writer->addRelocation(Fragment->getParent(), MRE);
}
@@ -530,7 +549,7 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
//
// FIXME: Currently, these are never generated (see code below). I cannot
// find a case where they are actually emitted.
- Type = macho::RIT_Vanilla;
+ Type = MachO::GENERIC_RELOC_VANILLA;
} else {
// Resolve constant variables.
if (SD->getSymbol().isVariable()) {
@@ -561,17 +580,17 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
if (IsPCRel)
FixedValue -= Writer->getSectionAddress(Fragment->getParent());
- Type = macho::RIT_Vanilla;
+ Type = MachO::GENERIC_RELOC_VANILLA;
}
// struct relocation_info (8 bytes)
- macho::RelocationEntry MRE;
- MRE.Word0 = FixupOffset;
- MRE.Word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (IsExtern << 27) |
+ (Type << 28));
Writer->addRelocation(Fragment->getParent(), MRE);
}
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index ed64a32eeff2..6da414287cfc 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -27,7 +27,7 @@ namespace {
public:
X86WinCOFFObjectWriter(bool Is64Bit_);
- ~X86WinCOFFObjectWriter();
+ virtual ~X86WinCOFFObjectWriter();
virtual unsigned getRelocType(const MCValue &Target,
const MCFixup &Fixup,
diff --git a/lib/Target/X86/README-SSE.txt b/lib/Target/X86/README-SSE.txt
index 496b704ee85f..adfa7fa1232c 100644
--- a/lib/Target/X86/README-SSE.txt
+++ b/lib/Target/X86/README-SSE.txt
@@ -517,37 +517,6 @@ to <2 x i64> ops being so bad.
//===---------------------------------------------------------------------===//
-'select' on vectors and scalars could be a whole lot better. We currently
-lower them to conditional branches. On x86-64 for example, we compile this:
-
-double test(double a, double b, double c, double d) { return a<b ? c : d; }
-
-to:
-
-_test:
- ucomisd %xmm0, %xmm1
- ja LBB1_2 # entry
-LBB1_1: # entry
- movapd %xmm3, %xmm2
-LBB1_2: # entry
- movapd %xmm2, %xmm0
- ret
-
-instead of:
-
-_test:
- cmpltsd %xmm1, %xmm0
- andpd %xmm0, %xmm2
- andnpd %xmm3, %xmm0
- orpd %xmm2, %xmm0
- ret
-
-For unpredictable branches, the later is much more efficient. This should
-just be a matter of having scalar sse map to SELECT_CC and custom expanding
-or iseling it.
-
-//===---------------------------------------------------------------------===//
-
LLVM currently generates stack realignment code, when it is not necessary
needed. The problem is that we need to know about stack alignment too early,
before RA runs.
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index c865500deb30..65c5552de2af 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -50,10 +50,10 @@ def FeatureSSE3 : SubtargetFeature<"sse3", "X86SSELevel", "SSE3",
def FeatureSSSE3 : SubtargetFeature<"ssse3", "X86SSELevel", "SSSE3",
"Enable SSSE3 instructions",
[FeatureSSE3]>;
-def FeatureSSE41 : SubtargetFeature<"sse41", "X86SSELevel", "SSE41",
+def FeatureSSE41 : SubtargetFeature<"sse4.1", "X86SSELevel", "SSE41",
"Enable SSE 4.1 instructions",
[FeatureSSSE3]>;
-def FeatureSSE42 : SubtargetFeature<"sse42", "X86SSELevel", "SSE42",
+def FeatureSSE42 : SubtargetFeature<"sse4.2", "X86SSELevel", "SSE42",
"Enable SSE 4.2 instructions",
[FeatureSSE41]>;
def Feature3DNow : SubtargetFeature<"3dnow", "X863DNowLevel", "ThreeDNow",
@@ -68,7 +68,7 @@ def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
def Feature64Bit : SubtargetFeature<"64bit", "HasX86_64", "true",
"Support 64-bit instructions",
[FeatureCMOV]>;
-def FeatureCMPXCHG16B : SubtargetFeature<"cmpxchg16b", "HasCmpxchg16b", "true",
+def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCmpxchg16b", "true",
"64-bit with cmpxchg16b",
[Feature64Bit]>;
def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
@@ -86,6 +86,19 @@ def FeatureAVX : SubtargetFeature<"avx", "X86SSELevel", "AVX",
def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2",
"Enable AVX2 instructions",
[FeatureAVX]>;
+def FeatureAVX512 : SubtargetFeature<"avx512f", "X86SSELevel", "AVX512F",
+ "Enable AVX-512 instructions",
+ [FeatureAVX2]>;
+def FeatureERI : SubtargetFeature<"avx512er", "HasERI", "true",
+ "Enable AVX-512 Exponential and Reciprocal Instructions",
+ [FeatureAVX512]>;
+def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true",
+ "Enable AVX-512 Conflict Detection Instructions",
+ [FeatureAVX512]>;
+def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true",
+ "Enable AVX-512 PreFetch Instructions",
+ [FeatureAVX512]>;
+
def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
"Enable packed carry-less multiplication instructions",
[FeatureSSE2]>;
@@ -104,12 +117,15 @@ def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",
def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
"Enable AES instructions",
[FeatureSSE2]>;
+def FeatureTBM : SubtargetFeature<"tbm", "HasTBM", "true",
+ "Enable TBM instructions">;
def FeatureMOVBE : SubtargetFeature<"movbe", "HasMOVBE", "true",
"Support MOVBE instruction">;
-def FeatureRDRAND : SubtargetFeature<"rdrand", "HasRDRAND", "true",
+def FeatureRDRAND : SubtargetFeature<"rdrnd", "HasRDRAND", "true",
"Support RDRAND instruction">;
def FeatureF16C : SubtargetFeature<"f16c", "HasF16C", "true",
- "Support 16-bit floating point conversion instructions">;
+ "Support 16-bit floating point conversion instructions",
+ [FeatureAVX]>;
def FeatureFSGSBase : SubtargetFeature<"fsgsbase", "HasFSGSBase", "true",
"Support FS/GS Base instructions">;
def FeatureLZCNT : SubtargetFeature<"lzcnt", "HasLZCNT", "true",
@@ -124,6 +140,9 @@ def FeatureHLE : SubtargetFeature<"hle", "HasHLE", "true",
"Support HLE">;
def FeatureADX : SubtargetFeature<"adx", "HasADX", "true",
"Support ADX instructions">;
+def FeatureSHA : SubtargetFeature<"sha", "HasSHA", "true",
+ "Enable SHA instructions",
+ [FeatureSSE2]>;
def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true",
"Support PRFCHW instructions">;
def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",
@@ -150,6 +169,8 @@ include "X86Schedule.td"
def ProcIntelAtom : SubtargetFeature<"atom", "X86ProcFamily", "IntelAtom",
"Intel Atom processors">;
+def ProcIntelSLM : SubtargetFeature<"slm", "X86ProcFamily", "IntelSLM",
+ "Intel Silvermont processors">;
class Proc<string Name, list<SubtargetFeature> Features>
: ProcessorModel<Name, GenericModel, Features>;
@@ -193,6 +214,14 @@ def : ProcessorModel<"atom", AtomModel,
FeatureLEAUsesAG,
FeaturePadShortFunctions]>;
+// Atom Silvermont.
+def : ProcessorModel<"slm", SLMModel, [ProcIntelSLM,
+ FeatureSSE42, FeatureCMPXCHG16B,
+ FeatureMOVBE, FeaturePOPCNT,
+ FeaturePCLMUL, FeatureAES,
+ FeatureCallRegIndirect,
+ FeaturePRFCHW,
+ FeatureSlowBTMem]>;
// "Arrandale" along with corei3 and corei5
def : ProcessorModel<"corei7", SandyBridgeModel,
[FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
@@ -227,6 +256,15 @@ def : ProcessorModel<"core-avx2", HaswellModel,
FeatureBMI, FeatureBMI2, FeatureFMA, FeatureRTM,
FeatureHLE]>;
+// KNL
+// FIXME: define KNL model
+def : ProcessorModel<"knl", HaswellModel,
+ [FeatureAVX512, FeatureERI, FeatureCDI, FeaturePFI,
+ FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT,
+ FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
+ FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
+ FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE]>;
+
def : Proc<"k6", [FeatureMMX]>;
def : Proc<"k6-2", [Feature3DNow]>;
def : Proc<"k6-3", [Feature3DNow]>;
@@ -254,21 +292,30 @@ def : Proc<"amdfam10", [FeatureSSE4A,
FeaturePOPCNT, FeatureSlowBTMem]>;
// Bobcat
def : Proc<"btver1", [FeatureSSSE3, FeatureSSE4A, FeatureCMPXCHG16B,
- FeatureLZCNT, FeaturePOPCNT]>;
+ FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT]>;
// Jaguar
def : Proc<"btver2", [FeatureAVX, FeatureSSE4A, FeatureCMPXCHG16B,
- FeatureAES, FeaturePCLMUL, FeatureBMI,
- FeatureF16C, FeatureMOVBE, FeatureLZCNT,
- FeaturePOPCNT]>;
+ FeaturePRFCHW, FeatureAES, FeaturePCLMUL,
+ FeatureBMI, FeatureF16C, FeatureMOVBE,
+ FeatureLZCNT, FeaturePOPCNT]>;
// Bulldozer
def : Proc<"bdver1", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
- FeatureAES, FeaturePCLMUL,
+ FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
FeatureLZCNT, FeaturePOPCNT]>;
// Piledriver
def : Proc<"bdver2", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
- FeatureAES, FeaturePCLMUL,
+ FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
+ FeatureF16C, FeatureLZCNT,
+ FeaturePOPCNT, FeatureBMI, FeatureTBM,
+ FeatureFMA]>;
+
+// Steamroller
+def : Proc<"bdver3", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
+ FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
FeatureF16C, FeatureLZCNT,
- FeaturePOPCNT, FeatureBMI, FeatureFMA]>;
+ FeaturePOPCNT, FeatureBMI, FeatureTBM,
+ FeatureFMA, FeatureFSGSBase]>;
+
def : Proc<"geode", [Feature3DNowA]>;
def : Proc<"winchip-c6", [FeatureMMX]>;
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index 6b228b0b0329..12584411509d 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -96,7 +96,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
GVSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
else
- GVSym = Mang->getSymbol(GV);
+ GVSym = getSymbol(GV);
// Handle dllimport linkage.
if (MO.getTargetFlags() == X86II::MO_DLLIMPORT)
@@ -109,21 +109,21 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(Sym);
if (StubSym.getPointer() == 0)
StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
} else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){
MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(Sym);
if (StubSym.getPointer() == 0)
StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
} else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
if (StubSym.getPointer() == 0)
StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
}
// If the name begins with a dollar-sign, enclose it in parens. We do this
@@ -333,21 +333,21 @@ void X86AsmPrinter::printIntelMemReference(const MachineInstr *MI, unsigned Op,
const MachineOperand &IndexReg = MI->getOperand(Op+2);
const MachineOperand &DispSpec = MI->getOperand(Op+3);
const MachineOperand &SegReg = MI->getOperand(Op+4);
-
+
// If this has a segment register, print it.
if (SegReg.getReg()) {
printOperand(MI, Op+4, O, Modifier, AsmVariant);
O << ':';
}
-
+
O << '[';
-
+
bool NeedPlus = false;
if (BaseReg.getReg()) {
printOperand(MI, Op, O, Modifier, AsmVariant);
NeedPlus = true;
}
-
+
if (IndexReg.getReg()) {
if (NeedPlus) O << " + ";
if (ScaleVal != 1)
@@ -394,6 +394,7 @@ bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
Reg = getX86SubSuperRegister(Reg, MVT::i32);
break;
case 'q': // Print DImode register
+ // FIXME: gcc will actually print e instead of r for 32-bit.
Reg = getX86SubSuperRegister(Reg, MVT::i64);
break;
}
@@ -518,6 +519,27 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
if (Subtarget->isTargetEnvMacho())
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
+
+ if (Subtarget->isTargetCOFF()) {
+ // Emit an absolute @feat.00 symbol. This appears to be some kind of
+ // compiler features bitfield read by link.exe.
+ if (!Subtarget->is64Bit()) {
+ MCSymbol *S = MMI->getContext().GetOrCreateSymbol(StringRef("@feat.00"));
+ OutStreamer.BeginCOFFSymbolDef(S);
+ OutStreamer.EmitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
+ OutStreamer.EmitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
+ OutStreamer.EndCOFFSymbolDef();
+ // According to the PE-COFF spec, the LSB of this value marks the object
+ // for "registered SEH". This means that all SEH handler entry points
+ // must be registered in .sxdata. Use of any unregistered handlers will
+ // cause the process to terminate immediately. LLVM does not know how to
+ // register any SEH handlers, so its object files should be safe.
+ S->setAbsolute();
+ OutStreamer.EmitSymbolAttribute(S, MCSA_Global);
+ OutStreamer.EmitAssignment(
+ S, MCConstantExpr::Create(int64_t(1), MMI->getContext()));
+ }
+ }
}
@@ -606,6 +628,8 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer.AddBlankLine();
}
+ SM.serializeToStackMapSection();
+
// Funny Darwin hack: This flag tells the linker that no global symbols
// contain code that falls through to other global symbols (e.g. the obvious
// implementation of multiple entry points). If this doesn't occur, the
@@ -645,12 +669,12 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
if (I->hasDLLExportLinkage())
- DLLExportedFns.push_back(Mang->getSymbol(I));
+ DLLExportedFns.push_back(getSymbol(I));
for (Module::const_global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I)
if (I->hasDLLExportLinkage())
- DLLExportedGlobals.push_back(Mang->getSymbol(I));
+ DLLExportedGlobals.push_back(getSymbol(I));
// Output linker support code for dllexported globals on windows.
if (!DLLExportedGlobals.empty() || !DLLExportedFns.empty()) {
@@ -702,48 +726,6 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
}
-MachineLocation
-X86AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
- MachineLocation Location;
- assert (MI->getNumOperands() == 7 && "Invalid no. of machine operands!");
- // Frame address. Currently handles register +- offset only.
-
- if (MI->getOperand(0).isReg() && MI->getOperand(3).isImm())
- Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm());
- else {
- DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
- }
- return Location;
-}
-
-void X86AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
- raw_ostream &O) {
- // Only the target-dependent form of DBG_VALUE should get here.
- // Referencing the offset and metadata as NOps-2 and NOps-1 is
- // probably portable to other targets; frame pointer location is not.
- unsigned NOps = MI->getNumOperands();
- assert(NOps==7);
- O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
- if (V.getContext().isSubprogram())
- O << DISubprogram(V.getContext()).getDisplayName() << ":";
- O << V.getName();
- O << " <- ";
- // Frame address. Currently handles register +- offset only.
- O << '[';
- if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg())
- printOperand(MI, 0, O);
- else
- O << "undef";
- O << '+'; printOperand(MI, 3, O);
- O << ']';
- O << "+";
- printOperand(MI, NOps-2, O);
-}
-
-
-
//===----------------------------------------------------------------------===//
// Target Registry Stuff
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index bc7496bad144..24a768b933bd 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -16,6 +16,7 @@
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -24,9 +25,21 @@ class MCStreamer;
class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
+ StackMaps SM;
+
+ // Parses operands of PATCHPOINT and STACKMAP to produce stack map Location
+ // structures. Returns a result location and an iterator to the operand
+ // immediately following the operands consumed.
+ //
+ // This method is implemented in X86MCInstLower.cpp.
+ static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
+ stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE,
+ const TargetMachine &TM);
+
public:
explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
+ : AsmPrinter(TM, Streamer), SM(*this, stackmapOperandParser) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
}
@@ -67,11 +80,6 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
unsigned AsmVariant = 1);
virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE;
-
- void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
-
- virtual MachineLocation
- getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE;
};
} // end namespace llvm
diff --git a/lib/Target/X86/X86CallingConv.h b/lib/Target/X86/X86CallingConv.h
new file mode 100644
index 000000000000..e76f9fda2db9
--- /dev/null
+++ b/lib/Target/X86/X86CallingConv.h
@@ -0,0 +1,35 @@
+//=== X86CallingConv.h - X86 Custom Calling Convention Routines -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the custom routines for the X86 Calling Convention that
+// aren't done by tablegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86CALLINGCONV_H
+#define X86CALLINGCONV_H
+
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/IR/CallingConv.h"
+
+namespace llvm {
+
+inline bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &,
+ CCValAssign::LocInfo &, ISD::ArgFlagsTy &,
+ CCState &) {
+ llvm_unreachable("The AnyReg calling convention is only supported by the " \
+ "stackmap and patchpoint intrinsics.");
+ // gracefully fallback to X86 C calling convention on Release builds.
+ return false;
+}
+
+} // End llvm namespace
+
+#endif
+
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index 9eafbd55a5ae..a78b5c0a7967 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -49,6 +49,12 @@ def RetCC_X86Common : CallingConv<[
CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
+ // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
+ // can only be used by ABI non-compliant code. This vector type is only
+ // supported while using the AVX-512 target feature.
+ CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
+
// MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types.
CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
@@ -99,6 +105,10 @@ def RetCC_Intel_OCL_BI : CallingConv<[
CCIfType<[v8f32, v4f64, v8i32, v4i64],
CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
+ // 512-bit FP vectors
+ CCIfType<[v16f32, v8f64, v16i32, v8i64],
+ CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
+
// i32, i64 in the standard way
CCDelegateTo<RetCC_X86Common>
]>;
@@ -141,6 +151,26 @@ def RetCC_X86_64_HiPE : CallingConv<[
CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
]>;
+// X86-64 WebKit_JS return-value convention.
+def RetCC_X86_64_WebKit_JS : CallingConv<[
+ // Promote all types to i64
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+
+ // Return: RAX
+ CCIfType<[i64], CCAssignToReg<[RAX]>>
+]>;
+
+// X86-64 AnyReg return-value convention. No explicit register is specified for
+// the return-value. The register allocator is allowed and expected to choose
+// any free register.
+//
+// This calling convention is currently only supported by the stackmap and
+// patchpoint intrinsics. All other uses will result in an assert on Debug
+// builds. On Release builds we fallback to the X86 C calling convention.
+def RetCC_X86_64_AnyReg : CallingConv<[
+ CCCustom<"CC_X86_AnyReg_Error">
+]>;
+
// This is the root return-value convention for the X86-32 backend.
def RetCC_X86_32 : CallingConv<[
// If FastCC, use RetCC_X86_32_Fast.
@@ -156,6 +186,15 @@ def RetCC_X86_32 : CallingConv<[
def RetCC_X86_64 : CallingConv<[
// HiPE uses RetCC_X86_64_HiPE
CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
+
+ // Handle JavaScript calls.
+ CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
+ CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
+
+ // Handle explicit CC selection
+ CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
+ CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
+
// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
@@ -208,10 +247,15 @@ def CC_X86_64_C : CallingConv<[
// fixed arguments to vararg functions are supposed to be passed in
// registers. Actually modeling that would be a lot of work, though.
CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- CCIfSubtarget<"hasAVX()",
+ CCIfSubtarget<"hasFp256()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
YMM4, YMM5, YMM6, YMM7]>>>>,
+ // The first 8 512-bit vector arguments are passed in ZMM registers.
+ CCIfNotVarArg<CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCIfSubtarget<"hasAVX512()",
+ CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
+
// Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
@@ -225,7 +269,11 @@ def CC_X86_64_C : CallingConv<[
// 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- CCAssignToStack<32, 32>>
+ CCAssignToStack<32, 32>>,
+
+ // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
+ CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCAssignToStack<64, 64>>
]>;
// Calling convention used on Win64
@@ -246,16 +294,19 @@ def CC_X86_Win64_C : CallingConv<[
// 256 bit vectors are passed by pointer
CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
+ // 512 bit vectors are passed by pointer
+ CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
+
// The first 4 MMX vector arguments are passed in GPRs.
CCIfType<[x86mmx], CCBitConvertToType<i64>>,
// The first 4 integer arguments are passed in integer registers.
CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
[XMM0, XMM1, XMM2, XMM3]>>,
-
+
// Do not pass the sret argument in RCX, the Win64 thiscall calling
- // convention requires "this" to be passed in RCX.
- CCIfCC<"CallingConv::X86_ThisCall",
+ // convention requires "this" to be passed in RCX.
+ CCIfCC<"CallingConv::X86_ThisCall",
CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8 , R9 ],
[XMM1, XMM2, XMM3]>>>>,
@@ -302,6 +353,25 @@ def CC_X86_64_HiPE : CallingConv<[
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
]>;
+def CC_X86_64_WebKit_JS : CallingConv<[
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ // Integer/FP values are always stored in stack slots that are 8 bytes in size
+ // and 8-byte aligned.
+ CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
+]>;
+
+// No explicit register is specified for the AnyReg calling convention. The
+// register allocator may assign the arguments to any free register.
+//
+// This calling convention is currently only supported by the stackmap and
+// patchpoint intrinsics. All other uses will result in an assert on Debug
+// builds. On Release builds we fallback to the X86 C calling convention.
+def CC_X86_64_AnyReg : CallingConv<[
+ CCCustom<"CC_X86_AnyReg_Error">
+]>;
+
//===----------------------------------------------------------------------===//
// X86 C Calling Convention
//===----------------------------------------------------------------------===//
@@ -327,7 +397,7 @@ def CC_X86_32_Common : CallingConv<[
// Integer/Float values get stored in stack slots that are 4 bytes in
// size and 4-byte aligned.
CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
-
+
// Doubles get 8-byte slots that are 4-byte aligned.
CCIfType<[f64], CCAssignToStack<8, 4>>,
@@ -340,7 +410,7 @@ def CC_X86_32_Common : CallingConv<[
// The first 4 AVX 256-bit vector arguments are passed in YMM registers.
CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- CCIfSubtarget<"hasAVX()",
+ CCIfSubtarget<"hasFp256()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
// Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
@@ -464,6 +534,10 @@ def CC_Intel_OCL_BI : CallingConv<[
CCIfType<[v8f32, v4f64, v8i32, v4i64],
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
+ // The 512-bit vector arguments are passed in ZMM registers.
+ CCIfType<[v16f32, v8f64, v16i32, v8i64],
+ CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
+
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64_C>>,
CCDelegateTo<CC_X86_32_C>
@@ -489,6 +563,10 @@ def CC_X86_32 : CallingConv<[
def CC_X86_64 : CallingConv<[
CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
+ CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>,
+ CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
+ CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
+ CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
@@ -525,9 +603,13 @@ def CSR_MostRegs_64 : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
// Standard C + YMM6-15
def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
- R13, R14, R15,
+ R13, R14, R15,
(sequence "YMM%u", 6, 15))>;
+def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
+ R12, R13, R14, R15,
+ (sequence "ZMM%u", 6, 21),
+ K4, K5, K6, K7)>;
//Standard C + XMM 8-15
def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
(sequence "XMM%u", 8, 15))>;
@@ -535,3 +617,7 @@ def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
//Standard C + YMM 8-15
def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64,
(sequence "YMM%u", 8, 15))>;
+
+def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add CSR_64,
+ (sequence "ZMM%u", 16, 31),
+ K4, K5, K6, K7)>;
diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp
index 8fea6edc8d78..14385edb1474 100644
--- a/lib/Target/X86/X86CodeEmitter.cpp
+++ b/lib/Target/X86/X86CodeEmitter.cpp
@@ -53,13 +53,8 @@ namespace {
static char ID;
explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce)
: MachineFunctionPass(ID), II(0), TD(0), TM(tm),
- MCE(mce), PICBaseOffset(0), Is64BitMode(false),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
- Emitter(X86TargetMachine &tm, CodeEmitter &mce,
- const X86InstrInfo &ii, const DataLayout &td, bool is64)
- : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm),
- MCE(mce), PICBaseOffset(0), Is64BitMode(is64),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
+ MCE(mce), PICBaseOffset(0), Is64BitMode(false),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
bool runOnMachineFunction(MachineFunction &MF);
@@ -845,7 +840,7 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
unsigned char VEX_W = 0;
// XOP: Use XOP prefix byte 0x8f instead of VEX.
- unsigned char XOP = 0;
+ bool XOP = false;
// VEX_5M (VEX m-mmmmm field):
//
@@ -855,7 +850,8 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
// 0b00011: implied 0F 3A leading opcode bytes
// 0b00100-0b11111: Reserved for future use
// 0b01000: XOP map select - 08h instructions with imm byte
- // 0b10001: XOP map select - 09h instructions with no imm byte
+ // 0b01001: XOP map select - 09h instructions with no imm byte
+ // 0b01010: XOP map select - 0Ah instructions with imm dword
unsigned char VEX_5M = 0x1;
// VEX_4V (VEX vvvv field): a register specifier
@@ -887,7 +883,7 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
VEX_W = 1;
if ((TSFlags >> X86II::VEXShift) & X86II::XOP)
- XOP = 1;
+ XOP = true;
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
VEX_L = 1;
@@ -924,11 +920,11 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
case X86II::XOP9:
VEX_5M = 0x9;
break;
- case X86II::A6: // Bypass: Not used by VEX
- case X86II::A7: // Bypass: Not used by VEX
- case X86II::TB: // Bypass: Not used by VEX
- case 0:
- break; // No prefix!
+ case X86II::XOPA:
+ VEX_5M = 0xA;
+ break;
+ case X86II::TB: // VEX_5M/VEX_PP already correct
+ break;
}
@@ -987,11 +983,14 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_R = 0x0;
+ CurOp++;
- if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, 1);
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ CurOp++;
+ }
if (X86II::isX86_64ExtendedReg(
MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
@@ -1001,7 +1000,7 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
VEX_X = 0x0;
if (HasVEX_4VOp3)
- VEX_4V = getVEXRegisterEncoding(MI, X86::AddrNumOperands+1);
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
break;
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
@@ -1011,7 +1010,7 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
// MemAddr
// src1(VEX_4V), MemAddr
if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, 0);
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
if (X86II::isX86_64ExtendedReg(
MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
@@ -1064,8 +1063,10 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
case X86II::MRM6r: case X86II::MRM7r:
// MRM0r-MRM7r instructions forms:
// dst(VEX_4V), src(ModR/M), imm8
- VEX_4V = getVEXRegisterEncoding(MI, 0);
- if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg()))
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ CurOp++;
+
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
break;
default: // RawFrm
@@ -1270,7 +1271,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
: (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
- if (Opcode == X86::MOV64ri64i32)
+ if (Opcode == X86::MOV32ri64)
rt = X86::reloc_absolute_word; // FIXME: add X86II flag?
// This should not occur on Darwin for relocatable objects.
if (Opcode == X86::MOV64ri)
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index cf44bd033bf3..97f96ab72c24 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "X86.h"
+#include "X86CallingConv.h"
#include "X86ISelLowering.h"
#include "X86InstrBuilder.h"
#include "X86RegisterInfo.h"
@@ -45,10 +46,6 @@ class X86FastISel : public FastISel {
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
- /// RegInfo - X86 register info.
- ///
- const X86RegisterInfo *RegInfo;
-
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
/// floating point ops.
/// When SSE is available, use it for f32 operations.
@@ -63,7 +60,6 @@ public:
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
- RegInfo = static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
}
virtual bool TargetSelectInstruction(const Instruction *I);
@@ -84,8 +80,10 @@ private:
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
- bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM);
- bool X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM);
+ bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM,
+ bool Aligned = false);
+ bool X86FastEmitStore(EVT VT, unsigned ValReg, const X86AddressMode &AM,
+ bool Aligned = false);
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg);
@@ -128,6 +126,8 @@ private:
return static_cast<const X86TargetMachine *>(&TM);
}
+ bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
+
unsigned TargetMaterializeConstant(const Constant *C);
unsigned TargetMaterializeAlloca(const AllocaInst *C);
@@ -238,7 +238,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
/// and a displacement offset, or a GlobalAddress,
/// i.e. V. Return true if it is possible.
bool
-X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
+X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg,
+ const X86AddressMode &AM, bool Aligned) {
// Get opcode and regclass of the output for the given store instruction.
unsigned Opc = 0;
switch (VT.getSimpleVT().SimpleTy) {
@@ -248,8 +249,8 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
// Mask out all but lowest bit.
unsigned AndResult = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
- Val = AndResult;
+ TII.get(X86::AND8ri), AndResult).addReg(ValReg).addImm(1);
+ ValReg = AndResult;
}
// FALLTHROUGH, handling i1 as i8.
case MVT::i8: Opc = X86::MOV8mr; break;
@@ -265,26 +266,35 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
(Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;
break;
case MVT::v4f32:
- Opc = X86::MOVAPSmr;
+ if (Aligned)
+ Opc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
+ else
+ Opc = Subtarget->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr;
break;
case MVT::v2f64:
- Opc = X86::MOVAPDmr;
+ if (Aligned)
+ Opc = Subtarget->hasAVX() ? X86::VMOVAPDmr : X86::MOVAPDmr;
+ else
+ Opc = Subtarget->hasAVX() ? X86::VMOVUPDmr : X86::MOVUPDmr;
break;
case MVT::v4i32:
case MVT::v2i64:
case MVT::v8i16:
case MVT::v16i8:
- Opc = X86::MOVDQAmr;
+ if (Aligned)
+ Opc = Subtarget->hasAVX() ? X86::VMOVDQAmr : X86::MOVDQAmr;
+ else
+ Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr;
break;
}
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc)), AM).addReg(Val);
+ DL, TII.get(Opc)), AM).addReg(ValReg);
return true;
}
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
- const X86AddressMode &AM) {
+ const X86AddressMode &AM, bool Aligned) {
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val))
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
@@ -319,7 +329,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
if (ValReg == 0)
return false;
- return X86FastEmitStore(VT, ValReg, AM);
+ return X86FastEmitStore(VT, ValReg, AM, Aligned);
}
/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
@@ -337,9 +347,126 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
return true;
}
+bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
+ // Handle constant address.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ // Can't handle alternate code models yet.
+ if (TM.getCodeModel() != CodeModel::Small)
+ return false;
+
+ // Can't handle TLS yet.
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->isThreadLocal())
+ return false;
+
+ // Can't handle TLS yet, part 2 (this is slightly crazy, but this is how
+ // it works...).
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ if (const GlobalVariable *GVar =
+ dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)))
+ if (GVar->isThreadLocal())
+ return false;
+
+ // RIP-relative addresses can't have additional register operands, so if
+ // we've already folded stuff into the addressing mode, just force the
+ // global value into its own register, which we can use as the basereg.
+ if (!Subtarget->isPICStyleRIPRel() ||
+ (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
+ // Okay, we've committed to selecting this global. Set up the address.
+ AM.GV = GV;
+
+ // Allow the subtarget to classify the global.
+ unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
+
+ // If this reference is relative to the pic base, set it now.
+ if (isGlobalRelativeToPICBase(GVFlags)) {
+ // FIXME: How do we know Base.Reg is free??
+ AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ }
+
+ // Unless the ABI requires an extra load, return a direct reference to
+ // the global.
+ if (!isGlobalStubReference(GVFlags)) {
+ if (Subtarget->isPICStyleRIPRel()) {
+ // Use rip-relative addressing if we can. Above we verified that the
+ // base and index registers are unused.
+ assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
+ AM.Base.Reg = X86::RIP;
+ }
+ AM.GVOpFlags = GVFlags;
+ return true;
+ }
+
+ // Ok, we need to do a load from a stub. If we've already loaded from
+ // this stub, reuse the loaded pointer, otherwise emit the load now.
+ DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
+ unsigned LoadReg;
+ if (I != LocalValueMap.end() && I->second != 0) {
+ LoadReg = I->second;
+ } else {
+ // Issue load from stub.
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = NULL;
+ X86AddressMode StubAM;
+ StubAM.Base.Reg = AM.Base.Reg;
+ StubAM.GV = GV;
+ StubAM.GVOpFlags = GVFlags;
+
+ // Prepare for inserting code in the local-value area.
+ SavePoint SaveInsertPt = enterLocalValueArea();
+
+ if (TLI.getPointerTy() == MVT::i64) {
+ Opc = X86::MOV64rm;
+ RC = &X86::GR64RegClass;
+
+ if (Subtarget->isPICStyleRIPRel())
+ StubAM.Base.Reg = X86::RIP;
+ } else {
+ Opc = X86::MOV32rm;
+ RC = &X86::GR32RegClass;
+ }
+
+ LoadReg = createResultReg(RC);
+ MachineInstrBuilder LoadMI =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
+ addFullAddress(LoadMI, StubAM);
+
+ // Ok, back to normal mode.
+ leaveLocalValueArea(SaveInsertPt);
+
+ // Prevent loading GV stub multiple times in same MBB.
+ LocalValueMap[V] = LoadReg;
+ }
+
+ // Now construct the final address. Note that the Disp, Scale,
+ // and Index values may already be set here.
+ AM.Base.Reg = LoadReg;
+ AM.GV = 0;
+ return true;
+ }
+ }
+
+ // If all else fails, try to materialize the value in a register.
+ if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
+ if (AM.Base.Reg == 0) {
+ AM.Base.Reg = getRegForValue(V);
+ return AM.Base.Reg != 0;
+ }
+ if (AM.IndexReg == 0) {
+ assert(AM.Scale == 1 && "Scale with no index!");
+ AM.IndexReg = getRegForValue(V);
+ return AM.IndexReg != 0;
+ }
+ }
+
+ return false;
+}
+
/// X86SelectAddress - Attempt to fill in an address from the given value.
///
bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
+ SmallVector<const Value *, 32> GEPs;
+redo_gep:
const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
if (const Instruction *I = dyn_cast<Instruction>(V)) {
@@ -434,13 +561,8 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
Disp += CI->getSExtValue() * S;
break;
}
- if (isa<AddOperator>(Op) &&
- (!isa<Instruction>(Op) ||
- FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
- == FuncInfo.MBB) &&
- isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
- // An add (in the same block) with a constant operand. Fold the
- // constant.
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
ConstantInt *CI =
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
Disp += CI->getSExtValue() * S;
@@ -462,139 +584,43 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
goto unsupported_gep;
}
}
+
// Check for displacement overflow.
if (!isInt<32>(Disp))
break;
- // Ok, the GEP indices were covered by constant-offset and scaled-index
- // addressing. Update the address state and move on to examining the base.
+
AM.IndexReg = IndexReg;
AM.Scale = Scale;
AM.Disp = (uint32_t)Disp;
- if (X86SelectAddress(U->getOperand(0), AM))
+ GEPs.push_back(V);
+
+ if (const GetElementPtrInst *GEP =
+ dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
+ // Ok, the GEP indices were covered by constant-offset and scaled-index
+ // addressing. Update the address state and move on to examining the base.
+ V = GEP;
+ goto redo_gep;
+ } else if (X86SelectAddress(U->getOperand(0), AM)) {
return true;
+ }
// If we couldn't merge the gep value into this addr mode, revert back to
// our address and just match the value instead of completely failing.
AM = SavedAM;
- break;
- unsupported_gep:
- // Ok, the GEP indices weren't all covered.
- break;
- }
- }
-
- // Handle constant address.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- // Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
- return false;
- // Can't handle TLS yet.
- if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- if (GVar->isThreadLocal())
- return false;
-
- // Can't handle TLS yet, part 2 (this is slightly crazy, but this is how
- // it works...).
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- if (const GlobalVariable *GVar =
- dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)))
- if (GVar->isThreadLocal())
- return false;
-
- // RIP-relative addresses can't have additional register operands, so if
- // we've already folded stuff into the addressing mode, just force the
- // global value into its own register, which we can use as the basereg.
- if (!Subtarget->isPICStyleRIPRel() ||
- (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
- // Okay, we've committed to selecting this global. Set up the address.
- AM.GV = GV;
-
- // Allow the subtarget to classify the global.
- unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
-
- // If this reference is relative to the pic base, set it now.
- if (isGlobalRelativeToPICBase(GVFlags)) {
- // FIXME: How do we know Base.Reg is free??
- AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- }
-
- // Unless the ABI requires an extra load, return a direct reference to
- // the global.
- if (!isGlobalStubReference(GVFlags)) {
- if (Subtarget->isPICStyleRIPRel()) {
- // Use rip-relative addressing if we can. Above we verified that the
- // base and index registers are unused.
- assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
- AM.Base.Reg = X86::RIP;
- }
- AM.GVOpFlags = GVFlags;
+ for (SmallVectorImpl<const Value *>::reverse_iterator
+ I = GEPs.rbegin(), E = GEPs.rend(); I != E; ++I)
+ if (handleConstantAddresses(*I, AM))
return true;
- }
- // Ok, we need to do a load from a stub. If we've already loaded from
- // this stub, reuse the loaded pointer, otherwise emit the load now.
- DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
- unsigned LoadReg;
- if (I != LocalValueMap.end() && I->second != 0) {
- LoadReg = I->second;
- } else {
- // Issue load from stub.
- unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
- X86AddressMode StubAM;
- StubAM.Base.Reg = AM.Base.Reg;
- StubAM.GV = GV;
- StubAM.GVOpFlags = GVFlags;
-
- // Prepare for inserting code in the local-value area.
- SavePoint SaveInsertPt = enterLocalValueArea();
-
- if (TLI.getPointerTy() == MVT::i64) {
- Opc = X86::MOV64rm;
- RC = &X86::GR64RegClass;
-
- if (Subtarget->isPICStyleRIPRel())
- StubAM.Base.Reg = X86::RIP;
- } else {
- Opc = X86::MOV32rm;
- RC = &X86::GR32RegClass;
- }
-
- LoadReg = createResultReg(RC);
- MachineInstrBuilder LoadMI =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
- addFullAddress(LoadMI, StubAM);
-
- // Ok, back to normal mode.
- leaveLocalValueArea(SaveInsertPt);
-
- // Prevent loading GV stub multiple times in same MBB.
- LocalValueMap[V] = LoadReg;
- }
-
- // Now construct the final address. Note that the Disp, Scale,
- // and Index values may already be set here.
- AM.Base.Reg = LoadReg;
- AM.GV = 0;
- return true;
- }
+ return false;
+ unsupported_gep:
+ // Ok, the GEP indices weren't all covered.
+ break;
}
-
- // If all else fails, try to materialize the value in a register.
- if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
- if (AM.Base.Reg == 0) {
- AM.Base.Reg = getRegForValue(V);
- return AM.Base.Reg != 0;
- }
- if (AM.IndexReg == 0) {
- assert(AM.Scale == 1 && "Scale with no index!");
- AM.IndexReg = getRegForValue(V);
- return AM.IndexReg != 0;
- }
}
- return false;
+ return handleConstantAddresses(V, AM);
}
/// X86SelectCallAddress - Attempt to fill in an address from the given value.
@@ -602,9 +628,35 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
- if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ const Instruction *I = dyn_cast<Instruction>(V);
+ // Record if the value is defined in the same basic block.
+ //
+ // This information is crucial to know whether or not folding an
+ // operand is valid.
+ // Indeed, FastISel generates or reuses a virtual register for all
+ // operands of all instructions it selects. Obviously, the definition and
+ // its uses must use the same virtual register otherwise the produced
+ // code is incorrect.
+ // Before instruction selection, FunctionLoweringInfo::set sets the virtual
+ // registers for values that are alive across basic blocks. This ensures
+ // that the values are consistently set between across basic block, even
+ // if different instruction selection mechanisms are used (e.g., a mix of
+ // SDISel and FastISel).
+ // For values local to a basic block, the instruction selection process
+ // generates these virtual registers with whatever method is appropriate
+ // for its needs. In particular, FastISel and SDISel do not share the way
+ // local virtual registers are set.
+ // Therefore, this is impossible (or at least unsafe) to share values
+ // between basic blocks unless they use the same instruction selection
+ // method, which is not guarantee for X86.
+ // Moreover, things like hasOneUse could not be used accurately, if we
+ // allow to reference values across basic blocks whereas they are not
+ // alive across basic blocks initially.
+ bool InMBB = true;
+ if (I) {
Opcode = I->getOpcode();
U = I;
+ InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
@@ -613,18 +665,22 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
switch (Opcode) {
default: break;
case Instruction::BitCast:
- // Look past bitcasts.
- return X86SelectCallAddress(U->getOperand(0), AM);
+ // Look past bitcasts if its operand is in the same BB.
+ if (InMBB)
+ return X86SelectCallAddress(U->getOperand(0), AM);
+ break;
case Instruction::IntToPtr:
- // Look past no-op inttoptrs.
- if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
+ // Look past no-op inttoptrs if its operand is in the same BB.
+ if (InMBB &&
+ TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
return X86SelectCallAddress(U->getOperand(0), AM);
break;
case Instruction::PtrToInt:
- // Look past no-op ptrtoints.
- if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
+ // Look past no-op ptrtoints if its operand is in the same BB.
+ if (InMBB &&
+ TLI.getValueType(U->getType()) == TLI.getPointerTy())
return X86SelectCallAddress(U->getOperand(0), AM);
break;
}
@@ -693,6 +749,10 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
if (S->isAtomic())
return false;
+ unsigned SABIAlignment =
+ TD.getABITypeAlignment(S->getValueOperand()->getType());
+ bool Aligned = S->getAlignment() == 0 || S->getAlignment() >= SABIAlignment;
+
MVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
return false;
@@ -701,7 +761,7 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
if (!X86SelectAddress(I->getOperand(1), AM))
return false;
- return X86FastEmitStore(VT, I->getOperand(0), AM);
+ return X86FastEmitStore(VT, I->getOperand(0), AM, Aligned);
}
/// X86SelectRet - Select and emit code to implement ret instructions.
@@ -717,10 +777,11 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
CallingConv::ID CC = F.getCallingConv();
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
- CC != CallingConv::X86_FastCall)
+ CC != CallingConv::X86_FastCall &&
+ CC != CallingConv::X86_64_SysV)
return false;
- if (Subtarget->isTargetWin64())
+ if (Subtarget->isCallingConvWin64(CC))
return false;
// Don't handle popping bytes on return for now.
@@ -1005,10 +1066,6 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
}
bool X86FastISel::X86SelectZExt(const Instruction *I) {
- // Handle zero-extension from i1 to i8, which is common.
- if (!I->getOperand(0)->getType()->isIntegerTy(1))
- return false;
-
EVT DstVT = TLI.getValueType(I->getType());
if (!TLI.isTypeLegal(DstVT))
return false;
@@ -1017,12 +1074,37 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
if (ResultReg == 0)
return false;
- // Set the high bits to zero.
- ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
- if (ResultReg == 0)
- return false;
+ // Handle zero-extension from i1 to i8, which is common.
+ MVT SrcVT = TLI.getSimpleValueType(I->getOperand(0)->getType());
+ if (SrcVT.SimpleTy == MVT::i1) {
+ // Set the high bits to zero.
+ ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
+ SrcVT = MVT::i8;
+
+ if (ResultReg == 0)
+ return false;
+ }
+
+ if (DstVT == MVT::i64) {
+ // Handle extension to 64-bits via sub-register shenanigans.
+ unsigned MovInst;
+
+ switch (SrcVT.SimpleTy) {
+ case MVT::i8: MovInst = X86::MOVZX32rr8; break;
+ case MVT::i16: MovInst = X86::MOVZX32rr16; break;
+ case MVT::i32: MovInst = X86::MOV32rr; break;
+ default: llvm_unreachable("Unexpected zext to i64 source type");
+ }
+
+ unsigned Result32 = createResultReg(&X86::GR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovInst), Result32)
+ .addReg(ResultReg);
- if (DstVT != MVT::i8) {
+ ResultReg = createResultReg(&X86::GR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::SUBREG_TO_REG),
+ ResultReg)
+ .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
+ } else if (DstVT != MVT::i8) {
ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
ResultReg, /*Kill=*/true);
if (ResultReg == 0)
@@ -1273,8 +1355,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
{ &X86::GR16RegClass, X86::AX, X86::DX, {
{ X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
{ X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
- { X86::DIV16r, X86::MOV16r0, Copy, X86::AX, U }, // UDiv
- { X86::DIV16r, X86::MOV16r0, Copy, X86::DX, U }, // URem
+ { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
+ { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
}
}, // i16
{ &X86::GR32RegClass, X86::EAX, X86::EDX, {
@@ -1287,8 +1369,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
{ &X86::GR64RegClass, X86::RAX, X86::RDX, {
{ X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
{ X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
- { X86::DIV64r, X86::MOV64r0, Copy, X86::RAX, U }, // UDiv
- { X86::DIV64r, X86::MOV64r0, Copy, X86::RDX, U }, // URem
+ { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
+ { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
}
}, // i64
};
@@ -1334,17 +1416,63 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
if (OpEntry.IsOpSigned)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(OpEntry.OpSignExtend));
- else
+ else {
+ unsigned Zero32 = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(OpEntry.OpSignExtend), TypeEntry.HighInReg);
+ TII.get(X86::MOV32r0), Zero32);
+
+ // Copy the zero into the appropriate sub/super/identical physical
+ // register. Unfortunately the operations needed are not uniform enough to
+ // fit neatly into the table above.
+ if (VT.SimpleTy == MVT::i16) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Copy), TypeEntry.HighInReg)
+ .addReg(Zero32, 0, X86::sub_16bit);
+ } else if (VT.SimpleTy == MVT::i32) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Copy), TypeEntry.HighInReg)
+ .addReg(Zero32);
+ } else if (VT.SimpleTy == MVT::i64) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
+ .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
+ }
+ }
}
// Generate the DIV/IDIV instruction.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
- // Copy output register into result register.
- unsigned ResultReg = createResultReg(TypeEntry.RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Copy), ResultReg).addReg(OpEntry.DivRemResultReg);
+ // For i8 remainder, we can't reference AH directly, as we'll end
+ // up with bogus copies like %R9B = COPY %AH. Reference AX
+ // instead to prevent AH references in a REX instruction.
+ //
+ // The current assumption of the fast register allocator is that isel
+ // won't generate explicit references to the GPR8_NOREX registers. If
+ // the allocator and/or the backend get enhanced to be more robust in
+ // that regard, this can be, and should be, removed.
+ unsigned ResultReg = 0;
+ if ((I->getOpcode() == Instruction::SRem ||
+ I->getOpcode() == Instruction::URem) &&
+ OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
+ unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
+ unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Copy), SourceSuperReg).addReg(X86::AX);
+
+ // Shift AX right by 8 bits instead of using AH.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SHR16ri),
+ ResultSuperReg).addReg(SourceSuperReg).addImm(8);
+
+ // Now reference the 8-bit subreg of the result.
+ ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
+ /*Kill=*/true, X86::sub_8bit);
+ }
+ // Copy the result out of the physreg if we haven't already.
+ if (!ResultReg) {
+ ResultReg = createResultReg(TypeEntry.RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Copy), ResultReg)
+ .addReg(OpEntry.DivRemResultReg);
+ }
UpdateValueMap(I, ResultReg);
return true;
@@ -1643,9 +1771,6 @@ bool X86FastISel::FastLowerArguments() {
if (!FuncInfo.CanLowerReturn)
return false;
- if (Subtarget->isTargetWin64())
- return false;
-
const Function *F = FuncInfo.Fn;
if (F->isVarArg())
return false;
@@ -1653,7 +1778,10 @@ bool X86FastISel::FastLowerArguments() {
CallingConv::ID CC = F->getCallingConv();
if (CC != CallingConv::C)
return false;
-
+
+ if (Subtarget->isCallingConvWin64(CC))
+ return false;
+
if (!Subtarget->is64Bit())
return false;
@@ -1697,8 +1825,6 @@ bool X86FastISel::FastLowerArguments() {
const TargetRegisterClass *RC64 = TLI.getRegClassFor(MVT::i64);
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I, ++Idx) {
- if (I->use_empty())
- continue;
bool is32Bit = TLI.getValueType(I->getType()) == MVT::i32;
const TargetRegisterClass *RC = is32Bit ? RC32 : RC64;
unsigned SrcReg = is32Bit ? GPR32ArgRegs[Idx] : GPR64ArgRegs[Idx];
@@ -1757,8 +1883,10 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Handle only C and fastcc calling conventions for now.
ImmutableCallSite CS(CI);
CallingConv::ID CC = CS.getCallingConv();
+ bool isWin64 = Subtarget->isCallingConvWin64(CC);
if (CC != CallingConv::C && CC != CallingConv::Fast &&
- CC != CallingConv::X86_FastCall)
+ CC != CallingConv::X86_FastCall && CC != CallingConv::X86_64_Win64 &&
+ CC != CallingConv::X86_64_SysV)
return false;
// fastcc with -tailcallopt is intended to provide a guaranteed
@@ -1772,7 +1900,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Don't know how to handle Win64 varargs yet. Nothing special needed for
// x86-32. Special handling for x86-64 is implemented.
- if (isVarArg && Subtarget->isTargetWin64())
+ if (isVarArg && isWin64)
return false;
// Fast-isel doesn't know about callee-pop yet.
@@ -1902,7 +2030,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
I->getParent()->getContext());
// Allocate shadow area for Win64
- if (Subtarget->isTargetWin64())
+ if (isWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
@@ -1985,6 +2113,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
X86AddressMode AM;
+ const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo*>(
+ getTargetMachine()->getRegisterInfo());
AM.Base.Reg = RegInfo->getStackRegister();
AM.Disp = LocMemOffset;
const Value *ArgVal = ArgVals[VA.getValNo()];
@@ -2016,7 +2146,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
X86::EBX).addReg(Base);
}
- if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) {
+ if (Subtarget->is64Bit() && isVarArg && !isWin64) {
// Count the number of XMM registers allocated.
static const uint16_t XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
@@ -2085,7 +2215,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (Subtarget->isPICStyleGOT())
MIB.addReg(X86::EBX, RegState::Implicit);
- if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64())
+ if (Subtarget->is64Bit() && isVarArg && !isWin64)
MIB.addReg(X86::AL, RegState::Implicit);
// Add implicit physical register uses to the call.
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index 0dd034c45098..38a835175378 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -125,6 +125,15 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
// which requires isImm() to be true
return 0;
}
+ break;
+ case X86::ADD16rr:
+ case X86::ADD16rr_DB:
+ if (MI->getOperand(1).getReg() != MI->getOperand(2).getReg()) {
+ // if src1 != src2, then convertToThreeAddress will
+ // need to create a Virtual register, which we cannot do
+ // after register allocation.
+ return 0;
+ }
}
return TII->convertToThreeAddress(MFI, MBBI, 0);
}
@@ -135,8 +144,8 @@ FunctionPass *llvm::createX86FixupLEAs() {
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
MF = &Func;
- TII = Func.getTarget().getInstrInfo();
TM = &MF->getTarget();
+ TII = TM->getInstrInfo();
DEBUG(dbgs() << "Start X86FixupLEAs\n";);
// Process all basic blocks.
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index 0585b43a4640..48470da0164a 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -115,9 +115,10 @@ namespace {
unsigned Mask = 0;
for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I) {
- unsigned Reg = *I - X86::FP0;
- if (Reg < 8)
- Mask |= 1 << Reg;
+ unsigned Reg = *I;
+ if (Reg < X86::FP0 || Reg > X86::FP6)
+ continue;
+ Mask |= 1 << (Reg - X86::FP0);
}
return Mask;
}
@@ -893,8 +894,8 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Produce implicit-defs for free by using killed registers.
while (Kills && Defs) {
- unsigned KReg = CountTrailingZeros_32(Kills);
- unsigned DReg = CountTrailingZeros_32(Defs);
+ unsigned KReg = countTrailingZeros(Kills);
+ unsigned DReg = countTrailingZeros(Defs);
DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n");
std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
std::swap(RegMap[KReg], RegMap[DReg]);
@@ -917,7 +918,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Manually kill the rest.
while (Kills) {
- unsigned KReg = CountTrailingZeros_32(Kills);
+ unsigned KReg = countTrailingZeros(Kills);
DEBUG(dbgs() << "Killing %FP" << KReg << "\n");
freeStackSlotBefore(I, KReg);
Kills &= ~(1 << KReg);
@@ -925,7 +926,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Load zeros for all the imp-defs.
while(Defs) {
- unsigned DReg = CountTrailingZeros_32(Defs);
+ unsigned DReg = countTrailingZeros(Defs);
DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n");
BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
pushReg(DReg);
@@ -1636,7 +1637,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
// Note: this might be a non-optimal pop sequence. We might be able to do
// better by trying to pop in stack order or something.
while (FPKills) {
- unsigned FPReg = CountTrailingZeros_32(FPKills);
+ unsigned FPReg = countTrailingZeros(FPKills);
if (isLive(FPReg))
freeStackSlotAfter(InsertPt, FPReg);
FPKills &= ~(1U << FPReg);
@@ -1661,6 +1662,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::CALLpcrel32))
.addExternalSymbol("_ftol2")
.addReg(X86::ST0, RegState::ImplicitKill)
+ .addReg(X86::ECX, RegState::ImplicitDefine)
.addReg(X86::EAX, RegState::Define | RegState::Implicit)
.addReg(X86::EDX, RegState::Define | RegState::Implicit)
.addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index 42b4e735096d..a06ba9d750a9 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -307,12 +307,12 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
unsigned FramePtr) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.empty()) return;
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
bool HasFP = hasFP(MF);
@@ -360,276 +360,18 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
if (HasFP && FramePtr == Reg)
continue;
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(Label, DwarfReg, Offset));
}
}
-/// getCompactUnwindRegNum - Get the compact unwind number for a given
-/// register. The number corresponds to the enum lists in
-/// compact_unwind_encoding.h.
-static int getCompactUnwindRegNum(unsigned Reg, bool is64Bit) {
- static const uint16_t CU32BitRegs[] = {
- X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
- };
- static const uint16_t CU64BitRegs[] = {
- X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
- };
- const uint16_t *CURegs = is64Bit ? CU64BitRegs : CU32BitRegs;
- for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
- if (*CURegs == Reg)
- return Idx;
-
- return -1;
-}
-
-// Number of registers that can be saved in a compact unwind encoding.
-#define CU_NUM_SAVED_REGS 6
-
-/// encodeCompactUnwindRegistersWithoutFrame - Create the permutation encoding
-/// used with frameless stacks. It is passed the number of registers to be saved
-/// and an array of the registers saved.
-static uint32_t
-encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
- unsigned RegCount, bool Is64Bit) {
- // The saved registers are numbered from 1 to 6. In order to encode the order
- // in which they were saved, we re-number them according to their place in the
- // register order. The re-numbering is relative to the last re-numbered
- // register. E.g., if we have registers {6, 2, 4, 5} saved in that order:
- //
- // Orig Re-Num
- // ---- ------
- // 6 6
- // 2 2
- // 4 3
- // 5 3
- //
- for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
- int CUReg = getCompactUnwindRegNum(SavedRegs[i], Is64Bit);
- if (CUReg == -1) return ~0U;
- SavedRegs[i] = CUReg;
- }
-
- // Reverse the list.
- std::swap(SavedRegs[0], SavedRegs[5]);
- std::swap(SavedRegs[1], SavedRegs[4]);
- std::swap(SavedRegs[2], SavedRegs[3]);
-
- uint32_t RenumRegs[CU_NUM_SAVED_REGS];
- for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i) {
- unsigned Countless = 0;
- for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
- if (SavedRegs[j] < SavedRegs[i])
- ++Countless;
-
- RenumRegs[i] = SavedRegs[i] - Countless - 1;
- }
-
- // Take the renumbered values and encode them into a 10-bit number.
- uint32_t permutationEncoding = 0;
- switch (RegCount) {
- case 6:
- permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
- + 6 * RenumRegs[2] + 2 * RenumRegs[3]
- + RenumRegs[4];
- break;
- case 5:
- permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
- + 6 * RenumRegs[3] + 2 * RenumRegs[4]
- + RenumRegs[5];
- break;
- case 4:
- permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
- + 3 * RenumRegs[4] + RenumRegs[5];
- break;
- case 3:
- permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
- + RenumRegs[5];
- break;
- case 2:
- permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
- break;
- case 1:
- permutationEncoding |= RenumRegs[5];
- break;
- }
-
- assert((permutationEncoding & 0x3FF) == permutationEncoding &&
- "Invalid compact register encoding!");
- return permutationEncoding;
-}
-
-/// encodeCompactUnwindRegistersWithFrame - Return the registers encoded for a
-/// compact encoding with a frame pointer.
-static uint32_t
-encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
- bool Is64Bit) {
- // Encode the registers in the order they were saved, 3-bits per register. The
- // registers are numbered from 1 to CU_NUM_SAVED_REGS.
- uint32_t RegEnc = 0;
- for (int I = CU_NUM_SAVED_REGS - 1, Idx = 0; I != -1; --I) {
- unsigned Reg = SavedRegs[I];
- if (Reg == 0) continue;
-
- int CURegNum = getCompactUnwindRegNum(Reg, Is64Bit);
- if (CURegNum == -1) return ~0U;
-
- // Encode the 3-bit register number in order, skipping over 3-bits for each
- // register.
- RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
- }
-
- assert((RegEnc & 0x3FFFF) == RegEnc && "Invalid compact register encoding!");
- return RegEnc;
-}
-
-uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
- const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
- unsigned FramePtr = RegInfo->getFrameRegister(MF);
- unsigned StackPtr = RegInfo->getStackRegister();
-
- bool Is64Bit = STI.is64Bit();
- bool HasFP = hasFP(MF);
-
- unsigned SavedRegs[CU_NUM_SAVED_REGS] = { 0, 0, 0, 0, 0, 0 };
- unsigned SavedRegIdx = 0;
-
- unsigned OffsetSize = (Is64Bit ? 8 : 4);
-
- unsigned PushInstr = (Is64Bit ? X86::PUSH64r : X86::PUSH32r);
- unsigned PushInstrSize = 1;
- unsigned MoveInstr = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
- unsigned MoveInstrSize = (Is64Bit ? 3 : 2);
- unsigned SubtractInstrIdx = (Is64Bit ? 3 : 2);
-
- unsigned StackDivide = (Is64Bit ? 8 : 4);
-
- unsigned InstrOffset = 0;
- unsigned StackAdjust = 0;
- unsigned StackSize = 0;
-
- MachineBasicBlock &MBB = MF.front(); // Prologue is in entry BB.
- bool ExpectEnd = false;
- for (MachineBasicBlock::iterator
- MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) {
- MachineInstr &MI = *MBBI;
- unsigned Opc = MI.getOpcode();
- if (Opc == X86::PROLOG_LABEL) continue;
- if (!MI.getFlag(MachineInstr::FrameSetup)) break;
-
- // We don't exect any more prolog instructions.
- if (ExpectEnd) return CU::UNWIND_MODE_DWARF;
-
- if (Opc == PushInstr) {
- // If there are too many saved registers, we cannot use compact encoding.
- if (SavedRegIdx >= CU_NUM_SAVED_REGS) return CU::UNWIND_MODE_DWARF;
-
- unsigned Reg = MI.getOperand(0).getReg();
- if (Reg == (Is64Bit ? X86::RAX : X86::EAX)) {
- ExpectEnd = true;
- continue;
- }
-
- SavedRegs[SavedRegIdx++] = MI.getOperand(0).getReg();
- StackAdjust += OffsetSize;
- InstrOffset += PushInstrSize;
- } else if (Opc == MoveInstr) {
- unsigned SrcReg = MI.getOperand(1).getReg();
- unsigned DstReg = MI.getOperand(0).getReg();
-
- if (DstReg != FramePtr || SrcReg != StackPtr)
- return CU::UNWIND_MODE_DWARF;
-
- StackAdjust = 0;
- memset(SavedRegs, 0, sizeof(SavedRegs));
- SavedRegIdx = 0;
- InstrOffset += MoveInstrSize;
- } else if (Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) {
- if (StackSize)
- // We already have a stack size.
- return CU::UNWIND_MODE_DWARF;
-
- if (!MI.getOperand(0).isReg() ||
- MI.getOperand(0).getReg() != MI.getOperand(1).getReg() ||
- MI.getOperand(0).getReg() != StackPtr || !MI.getOperand(2).isImm())
- // We need this to be a stack adjustment pointer. Something like:
- //
- // %RSP<def> = SUB64ri8 %RSP, 48
- return CU::UNWIND_MODE_DWARF;
-
- StackSize = MI.getOperand(2).getImm() / StackDivide;
- SubtractInstrIdx += InstrOffset;
- ExpectEnd = true;
- }
- }
-
- // Encode that we are using EBP/RBP as the frame pointer.
- uint32_t CompactUnwindEncoding = 0;
- StackAdjust /= StackDivide;
- if (HasFP) {
- if ((StackAdjust & 0xFF) != StackAdjust)
- // Offset was too big for compact encoding.
- return CU::UNWIND_MODE_DWARF;
-
- // Get the encoding of the saved registers when we have a frame pointer.
- uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit);
- if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
-
- CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
- CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
- CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
- } else {
- ++StackAdjust;
- uint32_t TotalStackSize = StackAdjust + StackSize;
- if ((TotalStackSize & 0xFF) == TotalStackSize) {
- // Frameless stack with a small stack size.
- CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
-
- // Encode the stack size.
- CompactUnwindEncoding |= (TotalStackSize & 0xFF) << 16;
- } else {
- if ((StackAdjust & 0x7) != StackAdjust)
- // The extra stack adjustments are too big for us to handle.
- return CU::UNWIND_MODE_DWARF;
-
- // Frameless stack with an offset too large for us to encode compactly.
- CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
-
- // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
- // instruction.
- CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
-
- // Encode any extra stack stack adjustments (done via push instructions).
- CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
- }
-
- // Encode the number of registers saved.
- CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
-
- // Get the encoding of the saved registers when we don't have a frame
- // pointer.
- uint32_t RegEnc =
- encodeCompactUnwindRegistersWithoutFrame(SavedRegs, SavedRegIdx,
- Is64Bit);
- if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
-
- // Encode the register encoding.
- CompactUnwindEncoding |=
- RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
- }
-
- return CompactUnwindEncoding;
-}
-
/// usesTheStack - This function checks if any of the users of EFLAGS
/// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
/// to use the stack, and if we don't adjust the stack we clobber the first
/// frame index.
/// See X86InstrInfo::copyPhysReg.
-static bool usesTheStack(MachineFunction &MF) {
- MachineRegisterInfo &MRI = MF.getRegInfo();
+static bool usesTheStack(const MachineFunction &MF) {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
for (MachineRegisterInfo::reg_iterator ri = MRI.reg_begin(X86::EFLAGS),
re = MRI.reg_end(); ri != re; ++ri)
@@ -732,7 +474,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// REG < 64 => DW_CFA_offset + Reg
// ELSE => DW_CFA_offset_extended
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
uint64_t NumBytes = 0;
int stackGrowth = -SlotSize;
@@ -765,20 +506,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.addSym(FrameLabel);
// Define the current CFA rule to use the provided offset.
- if (StackSize) {
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
- } else {
- MachineLocation SPDst(StackPtr);
- MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
- }
+ assert(StackSize);
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(FrameLabel, 2 * stackGrowth));
// Change the rule for the FramePtr to be an "offset" rule.
- MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
- MachineLocation FPSrc(FramePtr);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(FrameLabel, DwarfFramePtr,
+ 2 * stackGrowth));
}
// Update EBP with the new base value.
@@ -794,9 +529,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.addSym(FrameLabel);
// Define the current CFA to use the EBP/RBP register.
- MachineLocation FPDst(FramePtr);
- MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(FrameLabel, DwarfFramePtr));
}
// Mark the FramePtr as live-in in every block except the entry.
@@ -824,10 +559,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
// Define the current CFA rule to use the provided offset.
- unsigned Ptr = StackSize ? MachineLocation::VirtualFP : StackPtr;
- MachineLocation SPDst(Ptr);
- MachineLocation SPSrc(Ptr, StackOffset);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
+ assert(StackSize);
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(Label, StackOffset));
StackOffset += stackGrowth;
}
}
@@ -872,7 +606,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// responsible for adjusting the stack pointer. Touching the stack at 4K
// increments is necessary to ensure that the guard pages used by the OS
// virtual memory manager are allocated in correct sequence.
- if (NumBytes >= 4096 && STI.isTargetCOFF() && !STI.isTargetEnvMacho()) {
+ if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetEnvMacho()) {
const char *StackProbeSymbol;
bool isSPUpdateNeeded = false;
@@ -923,11 +657,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
.setMIFlag(MachineInstr::FrameSetup);
- // MSVC x64's __chkstk needs to adjust %rsp.
- // FIXME: %rax preserves the offset and should be available.
- if (isSPUpdateNeeded)
- emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64,
- UseLEA, TII, *RegInfo);
+ // MSVC x64's __chkstk does not adjust %rsp itself.
+ // It also does not clobber %rax so we can reuse it when adjusting %rsp.
+ if (isSPUpdateNeeded) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
+ .addReg(StackPtr)
+ .addReg(X86::RAX)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
if (isEAXAlive) {
// Restore EAX
@@ -961,27 +698,15 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
if (!HasFP && NumBytes) {
// Define the current CFA rule to use the provided offset.
- if (StackSize) {
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP,
- -StackSize + stackGrowth);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
- } else {
- MachineLocation SPDst(StackPtr);
- MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
- }
+ assert(StackSize);
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(
+ Label, -StackSize + stackGrowth));
}
// Emit DWARF info specifying the offsets of the callee-saved registers.
if (PushedRegs)
emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
}
-
- // Darwin 10.7 and greater has support for compact unwind encoding.
- if (STI.getTargetTriple().isMacOSX() &&
- !STI.getTargetTriple().isMacOSXVersionLT(10, 7))
- MMI.setCompactUnwindEncoding(getCompactUnwindEncoding(MF));
}
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
@@ -1336,7 +1061,7 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
if (TailCallReturnAddrDelta < 0) {
// create RETURNADDR area
@@ -1349,7 +1074,7 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// }
// [EBP]
MFI->CreateFixedObject(-TailCallReturnAddrDelta,
- (-1U*SlotSize)+TailCallReturnAddrDelta, true);
+ TailCallReturnAddrDelta - SlotSize, true);
}
if (hasFP(MF)) {
diff --git a/lib/Target/X86/X86FrameLowering.h b/lib/Target/X86/X86FrameLowering.h
index 6e309d88327b..3d3b0114ace6 100644
--- a/lib/Target/X86/X86FrameLowering.h
+++ b/lib/Target/X86/X86FrameLowering.h
@@ -20,32 +20,6 @@
namespace llvm {
-namespace CU {
-
- /// Compact unwind encoding values.
- enum CompactUnwindEncodings {
- /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
- /// the return address, then [RE]SP is moved to [RE]BP.
- UNWIND_MODE_BP_FRAME = 0x01000000,
-
- /// A frameless function with a small constant stack size.
- UNWIND_MODE_STACK_IMMD = 0x02000000,
-
- /// A frameless function with a large constant stack size.
- UNWIND_MODE_STACK_IND = 0x03000000,
-
- /// No compact unwind encoding is available.
- UNWIND_MODE_DWARF = 0x04000000,
-
- /// Mask for encoding the frame registers.
- UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
-
- /// Mask for encoding the frameless registers.
- UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
- };
-
-} // end CU namespace
-
class MCSymbol;
class X86TargetMachine;
@@ -91,7 +65,6 @@ public:
int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const;
- uint32_t getCompactUnwindEncoding(MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 968b3583c36d..36d16907bfef 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -79,7 +79,8 @@ namespace {
}
bool hasBaseOrIndexReg() const {
- return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
+ return BaseType == FrameIndexBase ||
+ IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
}
/// isRIPRelative - Return true if this addressing mode is already RIP
@@ -141,10 +142,6 @@ namespace {
/// SelectionDAG operations.
///
class X86DAGToDAGISel : public SelectionDAGISel {
- /// X86Lowering - This object fully describes how to lower LLVM code to an
- /// X86-specific SelectionDAG.
- const X86TargetLowering &X86Lowering;
-
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
@@ -156,7 +153,6 @@ namespace {
public:
explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel),
- X86Lowering(*tm.getTargetLowering()),
Subtarget(&tm.getSubtarget<X86Subtarget>()),
OptForSize(false) {}
@@ -188,7 +184,7 @@ namespace {
SDNode *Select(SDNode *N);
SDNode *SelectGather(SDNode *N, unsigned Opc);
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
- SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
+ SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT);
bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
@@ -200,9 +196,13 @@ namespace {
bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
+ bool SelectMOV64Imm32(SDValue N, SDValue &Imm);
bool SelectLEAAddr(SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
+ bool SelectLEA64_32Addr(SDValue N, SDValue &Base,
+ SDValue &Scale, SDValue &Index, SDValue &Disp,
+ SDValue &Segment);
bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
@@ -229,14 +229,15 @@ namespace {
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
- CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
+ CurDAG->getTargetFrameIndex(AM.Base_FrameIndex,
+ getTargetLowering()->getPointerTy()) :
AM.Base_Reg;
Scale = getI8Imm(AM.Scale);
Index = AM.IndexReg;
// These are 32-bit even in 64-bit mode since RIP relative offset
// is 32-bit.
if (AM.GV)
- Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
+ Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
MVT::i32, AM.Disp,
AM.SymbolFlags);
else if (AM.CP)
@@ -373,7 +374,7 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
else
Ops.push_back(Chain.getOperand(i));
SDValue NewChain =
- CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
+ CurDAG->getNode(ISD::TokenFactor, SDLoc(Load),
MVT::Other, &Ops[0], Ops.size());
Ops.clear();
Ops.push_back(NewChain);
@@ -491,8 +492,8 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
continue;
- EVT SrcVT = N->getOperand(0).getValueType();
- EVT DstVT = N->getValueType(0);
+ MVT SrcVT = N->getOperand(0).getSimpleValueType();
+ MVT DstVT = N->getSimpleValueType(0);
// If any of the sources are vectors, no fp stack involved.
if (SrcVT.isVector() || DstVT.isVector())
@@ -500,8 +501,10 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
// If the source and destination are SSE registers, then this is a legal
// conversion that should not be lowered.
- bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
- bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
+ const X86TargetLowering *X86Lowering =
+ static_cast<const X86TargetLowering *>(getTargetLowering());
+ bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
+ bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
if (SrcIsSSE && DstIsSSE)
continue;
@@ -517,14 +520,14 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
// Here we could have an FP stack truncation or an FPStack <-> SSE convert.
// FPStack has extload and truncstore. SSE can fold direct loads into other
// operations. Based on this, decide what we want to do.
- EVT MemVT;
+ MVT MemVT;
if (N->getOpcode() == ISD::FP_ROUND)
MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
else
MemVT = SrcIsSSE ? SrcVT : DstVT;
SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
// FIXME: optimize the case where the src/dest is a load or store?
SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
@@ -781,8 +784,8 @@ static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
Mask != (0xffu << ScaleLog))
return true;
- EVT VT = N.getValueType();
- DebugLoc DL = N.getDebugLoc();
+ MVT VT = N.getSimpleValueType();
+ SDLoc DL(N);
SDValue Eight = DAG.getConstant(8, MVT::i8);
SDValue NewMask = DAG.getConstant(0xff, VT);
SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
@@ -829,8 +832,8 @@ static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
return true;
- EVT VT = N.getValueType();
- DebugLoc DL = N.getDebugLoc();
+ MVT VT = N.getSimpleValueType();
+ SDLoc DL(N);
SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
@@ -886,8 +889,8 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
return true;
unsigned ShiftAmt = Shift.getConstantOperandVal(1);
- unsigned MaskLZ = CountLeadingZeros_64(Mask);
- unsigned MaskTZ = CountTrailingZeros_64(Mask);
+ unsigned MaskLZ = countLeadingZeros(Mask);
+ unsigned MaskTZ = countTrailingZeros(Mask);
// The amount of shift we're trying to fit into the addressing mode is taken
// from the trailing zeros of the mask.
@@ -902,7 +905,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
// Scale the leading zero count down based on the actual size of the value.
// Also scale it down based on the size of the shift.
- MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
+ MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
// The final check is to ensure that any masked out high bits of X are
// already known to be zero. Otherwise, the mask has a semantic impact
@@ -912,31 +915,31 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
// replace them with zero extensions cheaply if necessary.
bool ReplacingAnyExtend = false;
if (X.getOpcode() == ISD::ANY_EXTEND) {
- unsigned ExtendBits =
- X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
+ unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
+ X.getOperand(0).getSimpleValueType().getSizeInBits();
// Assume that we'll replace the any-extend with a zero-extend, and
// narrow the search to the extended value.
X = X.getOperand(0);
MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
ReplacingAnyExtend = true;
}
- APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
- MaskLZ);
+ APInt MaskedHighBits =
+ APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
APInt KnownZero, KnownOne;
DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
if (MaskedHighBits != KnownZero) return true;
// We've identified a pattern that can be transformed into a single shift
// and an addressing mode. Make it so.
- EVT VT = N.getValueType();
+ MVT VT = N.getSimpleValueType();
if (ReplacingAnyExtend) {
assert(X.getValueType() != VT);
// We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
- SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
+ SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
InsertDAGNode(DAG, N, NewX);
X = NewX;
}
- DebugLoc DL = N.getDebugLoc();
+ SDLoc DL(N);
SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
@@ -960,7 +963,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned Depth) {
- DebugLoc dl = N.getDebugLoc();
+ SDLoc dl(N);
DEBUG({
dbgs() << "MatchAddress: ";
AM.dump();
@@ -1057,7 +1060,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// We only handle up to 64-bit values here as those are what matter for
// addressing mode optimizations.
- if (X.getValueSizeInBits() > 64) break;
+ if (X.getSimpleValueType().getSizeInBits() > 64) break;
// The mask used for the transform is expected to be post-shift, but we
// found the shift first so just apply the shift to the mask before passing
@@ -1242,7 +1245,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// We only handle up to 64-bit values here as those are what matter for
// addressing mode optimizations.
- if (X.getValueSizeInBits() > 64) break;
+ if (X.getSimpleValueType().getSizeInBits() > 64) break;
if (!isa<ConstantSDNode>(N.getOperand(1)))
break;
@@ -1321,7 +1324,7 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
if (MatchAddress(N, AM))
return false;
- EVT VT = N.getValueType();
+ MVT VT = N.getSimpleValueType();
if (AM.BaseType == X86ISelAddressMode::RegBase) {
if (!AM.Base_Reg.getNode())
AM.Base_Reg = CurDAG->getRegister(0, VT);
@@ -1380,6 +1383,71 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
}
+bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) {
+ if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
+ uint64_t ImmVal = CN->getZExtValue();
+ if ((uint32_t)ImmVal != (uint64_t)ImmVal)
+ return false;
+
+ Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64);
+ return true;
+ }
+
+ // In static codegen with small code model, we can get the address of a label
+ // into a register with 'movl'. TableGen has already made sure we're looking
+ // at a label of some kind.
+ assert(N->getOpcode() == X86ISD::Wrapper &&
+ "Unexpected node type for MOV32ri64");
+ N = N.getOperand(0);
+
+ if (N->getOpcode() != ISD::TargetConstantPool &&
+ N->getOpcode() != ISD::TargetJumpTable &&
+ N->getOpcode() != ISD::TargetGlobalAddress &&
+ N->getOpcode() != ISD::TargetExternalSymbol &&
+ N->getOpcode() != ISD::TargetBlockAddress)
+ return false;
+
+ Imm = N;
+ return TM.getCodeModel() == CodeModel::Small;
+}
+
+bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base,
+ SDValue &Scale, SDValue &Index,
+ SDValue &Disp, SDValue &Segment) {
+ if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment))
+ return false;
+
+ SDLoc DL(N);
+ RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
+ if (RN && RN->getReg() == 0)
+ Base = CurDAG->getRegister(0, MVT::i64);
+ else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(N)) {
+ // Base could already be %rip, particularly in the x32 ABI.
+ Base = SDValue(CurDAG->getMachineNode(
+ TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
+ CurDAG->getTargetConstant(0, MVT::i64),
+ Base,
+ CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
+ 0);
+ }
+
+ RN = dyn_cast<RegisterSDNode>(Index);
+ if (RN && RN->getReg() == 0)
+ Index = CurDAG->getRegister(0, MVT::i64);
+ else {
+ assert(Index.getValueType() == MVT::i32 &&
+ "Expect to be extending 32-bit registers for use in LEA");
+ Index = SDValue(CurDAG->getMachineNode(
+ TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
+ CurDAG->getTargetConstant(0, MVT::i64),
+ Index,
+ CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
+ 0);
+ }
+
+ return true;
+}
+
/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
/// mode it matches can be cost effectively emitted as an LEA instruction.
bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
@@ -1398,7 +1466,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
assert (T == AM.Segment);
AM.Segment = Copy;
- EVT VT = N.getValueType();
+ MVT VT = N.getSimpleValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
if (AM.Base_Reg.getNode())
@@ -1487,7 +1555,8 @@ bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
///
SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
- return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
+ return CurDAG->getRegister(GlobalBaseReg,
+ getTargetLowering()->getPointerTy()).getNode();
}
SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
@@ -1502,7 +1571,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
- SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
+ SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node),
MVT::i32, MVT::i32, MVT::Other, Ops);
cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
return ResNode;
@@ -1637,8 +1706,8 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
// + empty, the operand is not needed any more with the new op selected.
// + non-empty, otherwise.
static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
- DebugLoc dl,
- enum AtomicOpc &Op, EVT NVT,
+ SDLoc dl,
+ enum AtomicOpc &Op, MVT NVT,
SDValue Val) {
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
int64_t CNVal = CN->getSExtValue();
@@ -1685,11 +1754,11 @@ static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
return Val;
}
-SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
+SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
if (Node->hasAnyUseOfValue(0))
return 0;
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
// Optimize common patterns for __sync_or_and_fetch and similar arith
// operations where the result is not used. This allows us to use the "lock"
@@ -1725,7 +1794,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
unsigned Opc = 0;
- switch (NVT.getSimpleVT().SimpleTy) {
+ switch (NVT.SimpleTy) {
default: return 0;
case MVT::i8:
if (isCN)
@@ -1920,7 +1989,7 @@ static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
if (ChainCheck)
// Make a new TokenFactor with all the other input chains except
// for the load.
- InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(),
+ InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
MVT::Other, &ChainOps[0], ChainOps.size());
}
if (!ChainCheck)
@@ -1968,7 +2037,7 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
SDValue Segment = CurDAG->getRegister(0, MVT::i32);
const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
Disp, Segment, VMask, Chain};
- SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), VTs, Ops);
+ SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops);
// Node has 2 outputs: VDst and MVT::Other.
// ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
// We replace VDst of Node with VDst of ResNode, and Other of Node with Other
@@ -1979,15 +2048,16 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
}
SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
- EVT NVT = Node->getValueType(0);
+ MVT NVT = Node->getSimpleValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
if (Node->isMachineOpcode()) {
DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
+ Node->setNodeId(-1);
return NULL; // Already selected.
}
@@ -2013,6 +2083,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::x86_avx2_gather_d_d_256:
case Intrinsic::x86_avx2_gather_q_d:
case Intrinsic::x86_avx2_gather_q_d_256: {
+ if (!Subtarget->hasAVX2())
+ break;
unsigned Opc;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic");
@@ -2117,7 +2189,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
break;
unsigned ShlOp, Op;
- EVT CstVT = NVT;
+ MVT CstVT = NVT;
// Check the minimum bitwidth for the new constant.
// TODO: AND32ri is the same as AND64ri32 with zext imm.
@@ -2132,7 +2204,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (NVT == CstVT)
break;
- switch (NVT.getSimpleVT().SimpleTy) {
+ switch (NVT.SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i32:
assert(CstVT == MVT::i8);
@@ -2169,7 +2241,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue N1 = Node->getOperand(1);
unsigned LoReg;
- switch (NVT.getSimpleVT().SimpleTy) {
+ switch (NVT.SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
@@ -2198,7 +2270,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
bool isSigned = Opcode == ISD::SMUL_LOHI;
bool hasBMI2 = Subtarget->hasBMI2();
if (!isSigned) {
- switch (NVT.getSimpleVT().SimpleTy) {
+ switch (NVT.SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
@@ -2208,7 +2280,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
}
} else {
- switch (NVT.getSimpleVT().SimpleTy) {
+ switch (NVT.SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
@@ -2335,9 +2407,6 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
}
- // Propagate ordering to the last node, for now.
- CurDAG->AssignOrdering(InFlag.getNode(), CurDAG->GetOrdering(Node));
-
return NULL;
}
@@ -2348,7 +2417,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
bool isSigned = Opcode == ISD::SDIVREM;
if (!isSigned) {
- switch (NVT.getSimpleVT().SimpleTy) {
+ switch (NVT.SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
@@ -2356,7 +2425,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
}
} else {
- switch (NVT.getSimpleVT().SimpleTy) {
+ switch (NVT.SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
@@ -2366,27 +2435,24 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
unsigned LoReg, HiReg, ClrReg;
- unsigned ClrOpcode, SExtOpcode;
- switch (NVT.getSimpleVT().SimpleTy) {
+ unsigned SExtOpcode;
+ switch (NVT.SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8:
LoReg = X86::AL; ClrReg = HiReg = X86::AH;
- ClrOpcode = 0;
SExtOpcode = X86::CBW;
break;
case MVT::i16:
LoReg = X86::AX; HiReg = X86::DX;
- ClrOpcode = X86::MOV16r0; ClrReg = X86::DX;
+ ClrReg = X86::DX;
SExtOpcode = X86::CWD;
break;
case MVT::i32:
LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
- ClrOpcode = X86::MOV32r0;
SExtOpcode = X86::CDQ;
break;
case MVT::i64:
LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
- ClrOpcode = X86::MOV64r0;
SExtOpcode = X86::CQO;
break;
}
@@ -2424,8 +2490,29 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
} else {
// Zero out the high part, effectively zero extending the input.
- SDValue ClrNode =
- SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
+ SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
+ switch (NVT.SimpleTy) {
+ case MVT::i16:
+ ClrNode =
+ SDValue(CurDAG->getMachineNode(
+ TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
+ CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)),
+ 0);
+ break;
+ case MVT::i32:
+ break;
+ case MVT::i64:
+ ClrNode =
+ SDValue(CurDAG->getMachineNode(
+ TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
+ CurDAG->getTargetConstant(0, MVT::i64), ClrNode,
+ CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
+ 0);
+ break;
+ default:
+ llvm_unreachable("Unexpected division source");
+ }
+
InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
ClrNode, InFlag).getValue(1);
}
@@ -2446,6 +2533,11 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Prevent use of AH in a REX instruction by referencing AX instead.
// Shift it down 8 bits.
+ //
+ // The current assumption of the register allocator is that isel
+ // won't generate explicit references to the GPR8_NOREX registers. If
+ // the allocator and/or the backend get enhanced to be more robust in
+ // that regard, this can be, and should be, removed.
if (HiReg == X86::AH && Subtarget->is64Bit() &&
!SDValue(Node, 1).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
@@ -2519,7 +2611,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// On x86-32, only the ABCD registers have 8-bit subregisters.
if (!Subtarget->is64Bit()) {
const TargetRegisterClass *TRC;
- switch (N0.getValueType().getSimpleVT().SimpleTy) {
+ switch (N0.getSimpleValueType().SimpleTy) {
case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
default: llvm_unreachable("Unsupported TEST operand type!");
@@ -2554,7 +2646,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Put the value in an ABCD register.
const TargetRegisterClass *TRC;
- switch (N0.getValueType().getSimpleVT().SimpleTy) {
+ switch (N0.getSimpleValueType().SimpleTy) {
case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
@@ -2666,7 +2758,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
EVT LdVT = LoadNode->getMemoryVT();
unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
- Node->getDebugLoc(),
+ SDLoc(Node),
MVT::i32, MVT::Other, Ops);
Result->setMemRefs(MemOp, MemOp + 2);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index f69f5d85f76e..76eeb64650ba 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -16,6 +16,7 @@
#include "X86ISelLowering.h"
#include "Utils/X86ShuffleDecode.h"
#include "X86.h"
+#include "X86CallingConv.h"
#include "X86InstrBuilder.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
@@ -55,20 +56,17 @@ using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
// Forward declarations.
-static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
SDValue V2);
-/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
-/// sets things up to match to an AVX VEXTRACTF128 instruction or a
-/// simple subregister reference. Idx is an index in the 128 bits we
-/// want. It need not be aligned to a 128-bit bounday. That makes
-/// lowering EXTRACT_VECTOR_ELT operations easier.
-static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, DebugLoc dl) {
+static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, SDLoc dl,
+ unsigned vectorWidth) {
+ assert((vectorWidth == 128 || vectorWidth == 256) &&
+ "Unsupported vector width");
EVT VT = Vec.getValueType();
- assert(VT.is256BitVector() && "Unexpected vector size!");
EVT ElVT = VT.getVectorElementType();
- unsigned Factor = VT.getSizeInBits()/128;
+ unsigned Factor = VT.getSizeInBits()/vectorWidth;
EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
VT.getVectorNumElements()/Factor);
@@ -76,13 +74,12 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
if (Vec.getOpcode() == ISD::UNDEF)
return DAG.getUNDEF(ResultVT);
- // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR
- // we can match to VEXTRACTF128.
- unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits();
+ // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
+ unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
- // This is the index of the first element of the 128-bit chunk
+ // This is the index of the first element of the vectorWidth-bit chunk
// we want.
- unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128)
+ unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
* ElemsPerChunk);
// If the input is a buildvector just emit a smaller one.
@@ -95,38 +92,71 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
VecIdx);
return Result;
+
+}
+/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
+/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
+/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
+/// instructions or a simple subregister reference. Idx is an index in the
+/// 128 bits we want. It need not be aligned to a 128-bit bounday. That makes
+/// lowering EXTRACT_VECTOR_ELT operations easier.
+static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, SDLoc dl) {
+ assert((Vec.getValueType().is256BitVector() ||
+ Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
+ return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
}
-/// Generate a DAG to put 128-bits into a vector > 128 bits. This
-/// sets things up to match to an AVX VINSERTF128 instruction or a
-/// simple superregister reference. Idx is an index in the 128 bits
-/// we want. It need not be aligned to a 128-bit bounday. That makes
-/// lowering INSERT_VECTOR_ELT operations easier.
-static SDValue Insert128BitVector(SDValue Result, SDValue Vec,
- unsigned IdxVal, SelectionDAG &DAG,
- DebugLoc dl) {
+/// Generate a DAG to grab 256-bits from a 512-bit vector.
+static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, SDLoc dl) {
+ assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
+ return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
+}
+
+static SDValue InsertSubVector(SDValue Result, SDValue Vec,
+ unsigned IdxVal, SelectionDAG &DAG,
+ SDLoc dl, unsigned vectorWidth) {
+ assert((vectorWidth == 128 || vectorWidth == 256) &&
+ "Unsupported vector width");
// Inserting UNDEF is Result
if (Vec.getOpcode() == ISD::UNDEF)
return Result;
-
EVT VT = Vec.getValueType();
- assert(VT.is128BitVector() && "Unexpected vector size!");
-
EVT ElVT = VT.getVectorElementType();
EVT ResultVT = Result.getValueType();
- // Insert the relevant 128 bits.
- unsigned ElemsPerChunk = 128/ElVT.getSizeInBits();
+ // Insert the relevant vectorWidth bits.
+ unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
- // This is the index of the first element of the 128-bit chunk
+ // This is the index of the first element of the vectorWidth-bit chunk
// we want.
- unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128)
+ unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
* ElemsPerChunk);
SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec,
VecIdx);
}
+/// Generate a DAG to put 128-bits into a vector > 128 bits. This
+/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
+/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
+/// simple superregister reference. Idx is an index in the 128 bits
+/// we want. It need not be aligned to a 128-bit bounday. That makes
+/// lowering INSERT_VECTOR_ELT operations easier.
+static SDValue Insert128BitVector(SDValue Result, SDValue Vec,
+ unsigned IdxVal, SelectionDAG &DAG,
+ SDLoc dl) {
+ assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
+ return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
+}
+
+static SDValue Insert256BitVector(SDValue Result, SDValue Vec,
+ unsigned IdxVal, SelectionDAG &DAG,
+ SDLoc dl) {
+ assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
+ return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
+}
/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
/// instructions. This is used because creating CONCAT_VECTOR nodes of
@@ -134,11 +164,18 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec,
/// large BUILD_VECTORS.
static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
unsigned NumElems, SelectionDAG &DAG,
- DebugLoc dl) {
+ SDLoc dl) {
SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
}
+static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
+ unsigned NumElems, SelectionDAG &DAG,
+ SDLoc dl) {
+ SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
+ return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
+}
+
static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
bool is64Bit = Subtarget->is64Bit();
@@ -163,7 +200,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
- RegInfo = TM.getRegisterInfo();
TD = getDataLayout();
resetOperationActions();
@@ -202,6 +238,8 @@ void X86TargetLowering::resetOperationActions() {
setSchedulingPreference(Sched::ILP);
else
setSchedulingPreference(Sched::RegPressure);
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
// Bypass expensive divides on Atom when compiling with O2
@@ -562,10 +600,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
}
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
if (Subtarget->is64Bit()) {
setExceptionPointerRegister(X86::RAX);
setExceptionSelectorRegister(X86::RDX);
@@ -585,10 +619,12 @@ void X86TargetLowering::resetOperationActions() {
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
- if (Subtarget->is64Bit()) {
+ if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
+ // TargetInfo::X86_64ABIBuiltinVaList
setOperationAction(ISD::VAARG , MVT::Other, Custom);
setOperationAction(ISD::VACOPY , MVT::Other, Custom);
} else {
+ // TargetInfo::CharPtrBuiltinVaList
setOperationAction(ISD::VAARG , MVT::Other, Expand);
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
}
@@ -596,7 +632,7 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho())
+ if (Subtarget->isOSWindows() && !Subtarget->isTargetEnvMacho())
setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
MVT::i64 : MVT::i32, Custom);
else if (TM.Options.EnableSegmentedStacks)
@@ -999,7 +1035,7 @@ void X86TargetLowering::resetOperationActions() {
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
}
- if (Subtarget->hasSSE41()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FCEIL, MVT::f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
@@ -1115,9 +1151,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
setOperationAction(ISD::FABS, MVT::v4f64, Custom);
- setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
- setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
-
setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
@@ -1125,7 +1158,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
@@ -1158,10 +1190,16 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
+ setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
setOperationAction(ISD::FMA, MVT::v8f32, Legal);
@@ -1262,6 +1300,152 @@ void X86TargetLowering::resetOperationActions() {
}
}
+ if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
+ addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
+ addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
+ addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
+ addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
+
+ addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
+ addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, Legal);
+ setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
+ setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
+ setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
+ setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
+ setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
+
+ setOperationAction(ISD::FADD, MVT::v16f32, Legal);
+ setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
+ setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
+ setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
+ setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
+
+ setOperationAction(ISD::FADD, MVT::v8f64, Legal);
+ setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
+ setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
+ setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
+ setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
+ setOperationAction(ISD::FMA, MVT::v8f64, Legal);
+ setOperationAction(ISD::FMA, MVT::v16f32, Legal);
+ setOperationAction(ISD::SDIV, MVT::v16i32, Custom);
+
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
+ }
+ setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
+ setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
+ setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
+
+ setOperationAction(ISD::TRUNCATE, MVT::i1, Legal);
+ setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
+
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
+
+ setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
+ setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
+
+ setOperationAction(ISD::MUL, MVT::v8i64, Custom);
+
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
+ setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
+ setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
+ setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
+
+ setOperationAction(ISD::ADD, MVT::v8i64, Legal);
+ setOperationAction(ISD::ADD, MVT::v16i32, Legal);
+
+ setOperationAction(ISD::SUB, MVT::v8i64, Legal);
+ setOperationAction(ISD::SUB, MVT::v16i32, Legal);
+
+ setOperationAction(ISD::MUL, MVT::v16i32, Legal);
+
+ setOperationAction(ISD::SRL, MVT::v8i64, Custom);
+ setOperationAction(ISD::SRL, MVT::v16i32, Custom);
+
+ setOperationAction(ISD::SHL, MVT::v8i64, Custom);
+ setOperationAction(ISD::SHL, MVT::v16i32, Custom);
+
+ setOperationAction(ISD::SRA, MVT::v8i64, Custom);
+ setOperationAction(ISD::SRA, MVT::v16i32, Custom);
+
+ setOperationAction(ISD::AND, MVT::v8i64, Legal);
+ setOperationAction(ISD::OR, MVT::v8i64, Legal);
+ setOperationAction(ISD::XOR, MVT::v8i64, Legal);
+ setOperationAction(ISD::AND, MVT::v16i32, Legal);
+ setOperationAction(ISD::OR, MVT::v16i32, Legal);
+ setOperationAction(ISD::XOR, MVT::v16i32, Legal);
+
+ // Custom lower several nodes.
+ for (int i = MVT::FIRST_VECTOR_VALUETYPE;
+ i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
+
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ // Extract subvector is special because the value type
+ // (result) is 256/128-bit but the source is 512-bit wide.
+ if (VT.is128BitVector() || VT.is256BitVector())
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+
+ if (VT.getVectorElementType() == MVT::i1)
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
+
+ // Do not attempt to custom lower other non-512-bit vectors
+ if (!VT.is512BitVector())
+ continue;
+
+ if ( EltSize >= 32) {
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
+ }
+ }
+ for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
+
+ // Do not attempt to promote non-256-bit vectors
+ if (!VT.is512BitVector())
+ continue;
+
+ setOperationAction(ISD::SELECT, VT, Promote);
+ AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
+ }
+ }// has AVX-512
+
// SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
// of this type with custom code.
for (int VT = MVT::FIRST_VECTOR_VALUETYPE;
@@ -1273,6 +1457,7 @@ void X86TargetLowering::resetOperationActions() {
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
// Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
// handle type legalization for these operations here.
@@ -1361,8 +1546,17 @@ void X86TargetLowering::resetOperationActions() {
setPrefFunctionAlignment(4); // 2^4 bytes.
}
-EVT X86TargetLowering::getSetCCResultType(EVT VT) const {
- if (!VT.isVector()) return MVT::i8;
+EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i8;
+
+ const TargetMachine &TM = getTargetMachine();
+ if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512())
+ switch(VT.getVectorNumElements()) {
+ case 8: return MVT::v8i1;
+ case 16: return MVT::v16i1;
+ }
+
return VT.changeVectorElementTypeToInteger();
}
@@ -1504,9 +1698,9 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
if (!Subtarget->is64Bit())
- // This doesn't have DebugLoc associated with it, but is not really the
+ // This doesn't have SDLoc associated with it, but is not really the
// same as a Register.
- return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy());
+ return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
return Table;
}
@@ -1571,6 +1765,13 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
return true;
}
+bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
+ unsigned DestAS) const {
+ assert(SrcAS != DestAS && "Expected different address spaces!");
+
+ return SrcAS < 256 && DestAS < 256;
+}
+
//===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -1588,12 +1789,17 @@ X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
return CCInfo.CheckReturn(Outs, RetCC_X86);
}
+const uint16_t *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
+ static const uint16_t ScratchRegs[] = { X86::R11, 0 };
+ return ScratchRegs;
+}
+
SDValue
X86TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
@@ -1761,7 +1967,7 @@ SDValue
X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
@@ -1868,7 +2074,7 @@ argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- DebugLoc dl) {
+ SDLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
@@ -1883,13 +2089,19 @@ static bool IsTailCallConvention(CallingConv::ID CC) {
CC == CallingConv::HiPE);
}
+/// \brief Return true if the calling convention is a C calling convention.
+static bool IsCCallConvention(CallingConv::ID CC) {
+ return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
+ CC == CallingConv::X86_64_SysV);
+}
+
bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
return false;
CallSite CS(CI);
CallingConv::ID CalleeCC = CS.getCallingConv();
- if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
+ if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
return false;
return true;
@@ -1906,7 +2118,7 @@ SDValue
X86TargetLowering::LowerMemArgument(SDValue Chain,
CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo *MFI,
unsigned i) const {
@@ -1948,7 +2160,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
+ SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
@@ -1964,7 +2176,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
MachineFrameInfo *MFI = MF.getFrameInfo();
bool Is64Bit = Subtarget->is64Bit();
bool IsWindows = Subtarget->isTargetWindows();
- bool IsWin64 = Subtarget->isTargetWin64();
+ bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
@@ -1975,9 +2187,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
- if (IsWin64) {
+ if (IsWin64)
CCInfo.AllocateStack(32, 8);
- }
CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
@@ -2003,12 +2214,18 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
RC = &X86::FR32RegClass;
else if (RegVT == MVT::f64)
RC = &X86::FR64RegClass;
+ else if (RegVT.is512BitVector())
+ RC = &X86::VR512RegClass;
else if (RegVT.is256BitVector())
RC = &X86::VR256RegClass;
else if (RegVT.is128BitVector())
RC = &X86::VR128RegClass;
else if (RegVT == MVT::x86mmx)
RC = &X86::VR64RegClass;
+ else if (RegVT == MVT::v8i1)
+ RC = &X86::VK8RegClass;
+ else if (RegVT == MVT::v16i1)
+ RC = &X86::VK16RegClass;
else
llvm_unreachable("Unknown argument type!");
@@ -2225,7 +2442,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SDValue
X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
SDValue StackPtr, SDValue Arg,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
@@ -2245,7 +2462,7 @@ SDValue
X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
SDValue &OutRetAddr, SDValue Chain,
bool IsTailCall, bool Is64Bit,
- int FPDiff, DebugLoc dl) const {
+ int FPDiff, SDLoc dl) const {
// Adjust the Return address stack slot.
EVT VT = getPointerTy();
OutRetAddr = getReturnAddressFrameIndex(DAG);
@@ -2261,12 +2478,13 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
static SDValue
EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT,
- unsigned SlotSize, int FPDiff, DebugLoc dl) {
+ unsigned SlotSize, int FPDiff, SDLoc dl) {
// Store the return address to the appropriate stack slot.
if (!FPDiff) return Chain;
// Calculate the new stack slot for the return address.
int NewReturnAddrFI =
- MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false);
+ MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
+ false);
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
MachinePointerInfo::getFixedStack(NewReturnAddrFI),
@@ -2278,10 +2496,10 @@ SDValue
X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
CallingConv::ID CallConv = CLI.CallConv;
@@ -2290,7 +2508,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
- bool IsWin64 = Subtarget->isTargetWin64();
+ bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
bool IsWindows = Subtarget->isTargetWindows();
StructReturnType SR = callIsStructReturn(Outs);
bool IsSibcall = false;
@@ -2323,9 +2541,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
- if (IsWin64) {
+ if (IsWin64)
CCInfo.AllocateStack(32, 8);
- }
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
@@ -2354,7 +2571,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
if (!IsSibcall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ dl);
SDValue RetAddrFrIdx;
// Load return address for tail calls.
@@ -2368,6 +2586,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later.
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
EVT RegVT = VA.getLocVT();
@@ -2443,7 +2663,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// GOT pointer.
if (!isTailCall) {
RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
- DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy())));
+ DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
} else {
// If we are tail calling and generating PIC/GOT style code load the
// address of the callee into ECX. The value in ecx is used as target of
@@ -2640,7 +2860,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!IsSibcall && isTailCall) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
@@ -2699,7 +2919,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(NumBytesForCalleeToPush,
true),
- InFlag);
+ InFlag, dl);
InFlag = Chain.getValue(1);
}
@@ -2747,6 +2967,8 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const TargetMachine &TM = MF.getTarget();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
const TargetFrameLowering &TFI = *TM.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
@@ -2831,13 +3053,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG &DAG) const {
- if (!IsTailCallConvention(CalleeCC) &&
- CalleeCC != CallingConv::C)
+ if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
return false;
// If -tailcallopt is specified, make fastcc functions tail-callable.
const MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = DAG.getMachineFunction().getFunction();
+ const Function *CallerF = MF.getFunction();
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
@@ -2847,6 +3068,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
CallingConv::ID CallerCC = CallerF->getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
+ bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
+ bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
if (getTargetMachine().Options.GuaranteedTailCallOpt) {
if (IsTailCallConvention(CalleeCC) && CCMatch)
@@ -2859,6 +3082,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
if (RegInfo->needsStackRealignment(MF))
return false;
@@ -2878,7 +3103,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Optimizing for varargs on Win64 is unlikely to be safe without
// additional testing.
- if (Subtarget->isTargetWin64())
+ if (IsCalleeWin64 || IsCallerWin64)
return false;
SmallVector<CCValAssign, 16> ArgLocs;
@@ -2953,9 +3178,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
getTargetMachine(), ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
- if (Subtarget->isTargetWin64()) {
+ if (IsCalleeWin64)
CCInfo.AllocateStack(32, 8);
- }
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
if (CCInfo.getNextStackOffset()) {
@@ -3062,7 +3286,7 @@ static bool isTargetShuffle(unsigned Opcode) {
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue V1, SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
@@ -3073,7 +3297,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue V1, unsigned TargetMask,
SelectionDAG &DAG) {
switch(Opc) {
@@ -3087,7 +3311,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue V1, SDValue V2, unsigned TargetMask,
SelectionDAG &DAG) {
switch(Opc) {
@@ -3100,7 +3324,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue V1, SDValue V2, SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
@@ -3119,13 +3343,16 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
unsigned SlotSize = RegInfo->getSlotSize();
- ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
+ ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
+ -(int64_t)SlotSize,
false);
FuncInfo->setRAIndex(ReturnAddrIndex);
}
@@ -3332,7 +3559,7 @@ static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference
/// the second operand.
-static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) {
+static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT) {
if (VT == MVT::v4f32 || VT == MVT::v4i32 )
return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
if (VT == MVT::v2f64 || VT == MVT::v2i64)
@@ -3342,7 +3569,7 @@ static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) {
/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFHW.
-static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
+static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
return false;
@@ -3371,7 +3598,7 @@ static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFLW.
-static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
+static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
return false;
@@ -3400,14 +3627,14 @@ static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PALIGNR.
-static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT,
+static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
const X86Subtarget *Subtarget) {
if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
(VT.is256BitVector() && !Subtarget->hasInt256()))
return false;
unsigned NumElts = VT.getVectorNumElements();
- unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLanes = VT.is512BitVector() ? 1: VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts/NumLanes;
// Do not handle 64-bit element shuffles with palignr.
@@ -3490,10 +3717,7 @@ static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
/// specifies a shuffle of elements that is suitable for input to 128/256-bit
/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
/// reverse of what x86 shuffles want.
-static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256,
- bool Commuted = false) {
- if (!HasFp256 && VT.is256BitVector())
- return false;
+static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
unsigned NumElems = VT.getVectorNumElements();
unsigned NumLanes = VT.getSizeInBits()/128;
@@ -3502,6 +3726,10 @@ static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256,
if (NumLaneElems != 2 && NumLaneElems != 4)
return false;
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ bool symetricMaskRequired =
+ (VT.getSizeInBits() >= 256) && (EltSize == 32);
+
// VSHUFPSY divides the resulting vector into 4 chunks.
// The sources are also splitted into 4 chunks, and each destination
// chunk must come from a different source chunk.
@@ -3521,6 +3749,7 @@ static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256,
//
// DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
//
+ SmallVector<int, 4> MaskVal(NumLaneElems, -1);
unsigned HalfLaneElems = NumLaneElems/2;
for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
for (unsigned i = 0; i != NumLaneElems; ++i) {
@@ -3531,9 +3760,13 @@ static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256,
// For VSHUFPSY, the mask of the second half must be the same as the
// first but with the appropriate offsets. This works in the same way as
// VPERMILPS works with masks.
- if (NumElems != 8 || l == 0 || Mask[i] < 0)
+ if (!symetricMaskRequired || Idx < 0)
continue;
- if (!isUndefOrEqual(Idx, Mask[i]+l))
+ if (MaskVal[i] < 0) {
+ MaskVal[i] = Idx - l;
+ continue;
+ }
+ if ((signed)(Idx - l) != MaskVal[i])
return false;
}
}
@@ -3543,7 +3776,7 @@ static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256,
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
-static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) {
+static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
if (!VT.is128BitVector())
return false;
@@ -3562,7 +3795,7 @@ static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) {
/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
/// <2, 3, 2, 3>
-static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) {
+static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
if (!VT.is128BitVector())
return false;
@@ -3579,7 +3812,7 @@ static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) {
/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
-static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) {
+static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
if (!VT.is128BitVector())
return false;
@@ -3601,7 +3834,7 @@ static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) {
/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLHPS.
-static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) {
+static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
if (!VT.is128BitVector())
return false;
@@ -3627,8 +3860,8 @@ static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) {
static
SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
SelectionDAG &DAG) {
- MVT VT = SVOp->getValueType(0).getSimpleVT();
- DebugLoc dl = SVOp->getDebugLoc();
+ MVT VT = SVOp->getSimpleValueType(0);
+ SDLoc dl(SVOp);
if (VT != MVT::v8i32 && VT != MVT::v8f32)
return SDValue();
@@ -3670,73 +3903,92 @@ SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
-static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT,
+static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
bool HasInt256, bool V2IsSplat = false) {
- unsigned NumElts = VT.getVectorNumElements();
- assert((VT.is128BitVector() || VT.is256BitVector()) &&
- "Unsupported vector type for unpckh");
+ assert(VT.getSizeInBits() >= 128 &&
+ "Unsupported vector type for unpckl");
- if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
- (!HasInt256 || (NumElts != 16 && NumElts != 32)))
+ // AVX defines UNPCK* to operate independently on 128-bit lanes.
+ unsigned NumLanes;
+ unsigned NumOf256BitLanes;
+ unsigned NumElts = VT.getVectorNumElements();
+ if (VT.is256BitVector()) {
+ if (NumElts != 4 && NumElts != 8 &&
+ (!HasInt256 || (NumElts != 16 && NumElts != 32)))
return false;
+ NumLanes = 2;
+ NumOf256BitLanes = 1;
+ } else if (VT.is512BitVector()) {
+ assert(VT.getScalarType().getSizeInBits() >= 32 &&
+ "Unsupported vector type for unpckh");
+ NumLanes = 2;
+ NumOf256BitLanes = 2;
+ } else {
+ NumLanes = 1;
+ NumOf256BitLanes = 1;
+ }
- // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
- // independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
+ unsigned NumEltsInStride = NumElts/NumOf256BitLanes;
+ unsigned NumLaneElts = NumEltsInStride/NumLanes;
- for (unsigned l = 0; l != NumLanes; ++l) {
- for (unsigned i = l*NumLaneElts, j = l*NumLaneElts;
- i != (l+1)*NumLaneElts;
- i += 2, ++j) {
- int BitI = Mask[i];
- int BitI1 = Mask[i+1];
- if (!isUndefOrEqual(BitI, j))
- return false;
- if (V2IsSplat) {
- if (!isUndefOrEqual(BitI1, NumElts))
+ for (unsigned l256 = 0; l256 < NumOf256BitLanes; l256 += 1) {
+ for (unsigned l = 0; l != NumEltsInStride; l += NumLaneElts) {
+ for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
+ int BitI = Mask[l256*NumEltsInStride+l+i];
+ int BitI1 = Mask[l256*NumEltsInStride+l+i+1];
+ if (!isUndefOrEqual(BitI, j+l256*NumElts))
return false;
- } else {
- if (!isUndefOrEqual(BitI1, j + NumElts))
+ if (V2IsSplat && !isUndefOrEqual(BitI1, NumElts))
+ return false;
+ if (!isUndefOrEqual(BitI1, j+l256*NumElts+NumEltsInStride))
return false;
}
}
}
-
return true;
}
/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKH.
-static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT,
+static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
bool HasInt256, bool V2IsSplat = false) {
- unsigned NumElts = VT.getVectorNumElements();
-
- assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ assert(VT.getSizeInBits() >= 128 &&
"Unsupported vector type for unpckh");
- if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
- (!HasInt256 || (NumElts != 16 && NumElts != 32)))
+ // AVX defines UNPCK* to operate independently on 128-bit lanes.
+ unsigned NumLanes;
+ unsigned NumOf256BitLanes;
+ unsigned NumElts = VT.getVectorNumElements();
+ if (VT.is256BitVector()) {
+ if (NumElts != 4 && NumElts != 8 &&
+ (!HasInt256 || (NumElts != 16 && NumElts != 32)))
return false;
+ NumLanes = 2;
+ NumOf256BitLanes = 1;
+ } else if (VT.is512BitVector()) {
+ assert(VT.getScalarType().getSizeInBits() >= 32 &&
+ "Unsupported vector type for unpckh");
+ NumLanes = 2;
+ NumOf256BitLanes = 2;
+ } else {
+ NumLanes = 1;
+ NumOf256BitLanes = 1;
+ }
- // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
- // independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
+ unsigned NumEltsInStride = NumElts/NumOf256BitLanes;
+ unsigned NumLaneElts = NumEltsInStride/NumLanes;
- for (unsigned l = 0; l != NumLanes; ++l) {
- for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2;
- i != (l+1)*NumLaneElts; i += 2, ++j) {
- int BitI = Mask[i];
- int BitI1 = Mask[i+1];
- if (!isUndefOrEqual(BitI, j))
- return false;
- if (V2IsSplat) {
- if (isUndefOrEqual(BitI1, NumElts))
+ for (unsigned l256 = 0; l256 < NumOf256BitLanes; l256 += 1) {
+ for (unsigned l = 0; l != NumEltsInStride; l += NumLaneElts) {
+ for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
+ int BitI = Mask[l256*NumEltsInStride+l+i];
+ int BitI1 = Mask[l256*NumEltsInStride+l+i+1];
+ if (!isUndefOrEqual(BitI, j+l256*NumElts))
return false;
- } else {
- if (!isUndefOrEqual(BitI1, j+NumElts))
+ if (V2IsSplat && !isUndefOrEqual(BitI1, NumElts))
+ return false;
+ if (!isUndefOrEqual(BitI1, j+l256*NumElts+NumEltsInStride))
return false;
}
}
@@ -3747,10 +3999,12 @@ static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT,
/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
/// <0, 0, 1, 1>
-static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
+static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
unsigned NumElts = VT.getVectorNumElements();
bool Is256BitVec = VT.is256BitVector();
+ if (VT.is512BitVector())
+ return false;
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Unsupported vector type for unpckh");
@@ -3770,12 +4024,10 @@ static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts/NumLanes;
- for (unsigned l = 0; l != NumLanes; ++l) {
- for (unsigned i = l*NumLaneElts, j = l*NumLaneElts;
- i != (l+1)*NumLaneElts;
- i += 2, ++j) {
- int BitI = Mask[i];
- int BitI1 = Mask[i+1];
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
+ int BitI = Mask[l+i];
+ int BitI1 = Mask[l+i+1];
if (!isUndefOrEqual(BitI, j))
return false;
@@ -3790,9 +4042,12 @@ static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
/// <2, 2, 3, 3>
-static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
+static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
unsigned NumElts = VT.getVectorNumElements();
+ if (VT.is512BitVector())
+ return false;
+
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Unsupported vector type for unpckh");
@@ -3805,11 +4060,10 @@ static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) {
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts/NumLanes;
- for (unsigned l = 0; l != NumLanes; ++l) {
- for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2;
- i != (l+1)*NumLaneElts; i += 2, ++j) {
- int BitI = Mask[i];
- int BitI1 = Mask[i+1];
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
+ int BitI = Mask[l+i];
+ int BitI1 = Mask[l+i+1];
if (!isUndefOrEqual(BitI, j))
return false;
if (!isUndefOrEqual(BitI1, j))
@@ -3846,7 +4100,7 @@ static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
/// The first half comes from the second half of V1 and the second half from the
/// the second half of V2.
-static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasFp256) {
+static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
if (!HasFp256 || !VT.is256BitVector())
return false;
@@ -3878,7 +4132,7 @@ static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasFp256) {
/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
- MVT VT = SVOp->getValueType(0).getSimpleVT();
+ MVT VT = SVOp->getSimpleValueType(0);
unsigned HalfSize = VT.getVectorNumElements()/2;
@@ -3899,6 +4153,44 @@ static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
return (FstHalf | (SndHalf << 4));
}
+// Symetric in-lane mask. Each lane has 4 elements (for imm8)
+static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ if (EltSize < 32)
+ return false;
+
+ unsigned NumElts = VT.getVectorNumElements();
+ Imm8 = 0;
+ if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (Mask[i] < 0)
+ continue;
+ Imm8 |= Mask[i] << (i*2);
+ }
+ return true;
+ }
+
+ unsigned LaneSize = 4;
+ SmallVector<int, 4> MaskVal(LaneSize, -1);
+
+ for (unsigned l = 0; l != NumElts; l += LaneSize) {
+ for (unsigned i = 0; i != LaneSize; ++i) {
+ if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
+ return false;
+ if (Mask[i+l] < 0)
+ continue;
+ if (MaskVal[i] < 0) {
+ MaskVal[i] = Mask[i+l] - l;
+ Imm8 |= MaskVal[i] << (i*2);
+ continue;
+ }
+ if (Mask[i+l] != (signed)(MaskVal[i]+l))
+ return false;
+ }
+ }
+ return true;
+}
+
/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
/// Note that VPERMIL mask matching is different depending whether theunderlying
@@ -3906,38 +4198,39 @@ static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
/// to the same elements of the low, but to the higher half of the source.
/// In VPERMILPD the two lanes could be shuffled independently of each other
/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
-static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256) {
- if (!HasFp256)
+static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ if (VT.getSizeInBits() < 256 || EltSize < 32)
return false;
-
+ bool symetricMaskRequired = (EltSize == 32);
unsigned NumElts = VT.getVectorNumElements();
- // Only match 256-bit with 32/64-bit types
- if (!VT.is256BitVector() || (NumElts != 4 && NumElts != 8))
- return false;
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned LaneSize = NumElts/NumLanes;
+ // 2 or 4 elements in one lane
+
+ SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
for (unsigned l = 0; l != NumElts; l += LaneSize) {
for (unsigned i = 0; i != LaneSize; ++i) {
if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
return false;
- if (NumElts != 8 || l == 0)
- continue;
- // VPERMILPS handling
- if (Mask[i] < 0)
- continue;
- if (!isUndefOrEqual(Mask[i+l], Mask[i]+l))
- return false;
+ if (symetricMaskRequired) {
+ if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
+ ExpectedMaskVal[i] = Mask[i+l] - l;
+ continue;
+ }
+ if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
+ return false;
+ }
}
}
-
return true;
}
/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
/// of what x86 movss want. X86 movs requires the lowest element to be lowest
/// element of vector 2 and the other elements to come from vector 1 in order.
-static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT,
+static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
bool V2IsSplat = false, bool V2IsUndef = false) {
if (!VT.is128BitVector())
return false;
@@ -3961,7 +4254,7 @@ static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT,
/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
-static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT,
+static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
const X86Subtarget *Subtarget) {
if (!Subtarget->hasSSE3())
return false;
@@ -3969,7 +4262,8 @@ static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT,
unsigned NumElems = VT.getVectorNumElements();
if ((VT.is128BitVector() && NumElems != 4) ||
- (VT.is256BitVector() && NumElems != 8))
+ (VT.is256BitVector() && NumElems != 8) ||
+ (VT.is512BitVector() && NumElems != 16))
return false;
// "i+1" is the value the indexed mask element must have
@@ -3984,7 +4278,7 @@ static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT,
/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
-static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT,
+static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
const X86Subtarget *Subtarget) {
if (!Subtarget->hasSSE3())
return false;
@@ -3992,7 +4286,8 @@ static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT,
unsigned NumElems = VT.getVectorNumElements();
if ((VT.is128BitVector() && NumElems != 4) ||
- (VT.is256BitVector() && NumElems != 8))
+ (VT.is256BitVector() && NumElems != 8) ||
+ (VT.is512BitVector() && NumElems != 16))
return false;
// "i" is the value the indexed mask element must have
@@ -4007,7 +4302,7 @@ static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT,
/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to 256-bit
/// version of MOVDDUP.
-static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasFp256) {
+static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
if (!HasFp256 || !VT.is256BitVector())
return false;
@@ -4027,7 +4322,7 @@ static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasFp256) {
/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to 128-bit
/// version of MOVDDUP.
-static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) {
+static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
if (!VT.is128BitVector())
return false;
@@ -4041,49 +4336,66 @@ static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) {
return true;
}
-/// isVEXTRACTF128Index - Return true if the specified
+/// isVEXTRACTIndex - Return true if the specified
/// EXTRACT_SUBVECTOR operand specifies a vector extract that is
-/// suitable for input to VEXTRACTF128.
-bool X86::isVEXTRACTF128Index(SDNode *N) {
+/// suitable for instruction that extract 128 or 256 bit vectors
+static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
+ assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
return false;
- // The index should be aligned on a 128-bit boundary.
+ // The index should be aligned on a vecWidth-bit boundary.
uint64_t Index =
cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
- MVT VT = N->getValueType(0).getSimpleVT();
+ MVT VT = N->getSimpleValueType(0);
unsigned ElSize = VT.getVectorElementType().getSizeInBits();
- bool Result = (Index * ElSize) % 128 == 0;
+ bool Result = (Index * ElSize) % vecWidth == 0;
return Result;
}
-/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR
+/// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
/// operand specifies a subvector insert that is suitable for input to
-/// VINSERTF128.
-bool X86::isVINSERTF128Index(SDNode *N) {
+/// insertion of 128 or 256-bit subvectors
+static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
+ assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
return false;
-
- // The index should be aligned on a 128-bit boundary.
+ // The index should be aligned on a vecWidth-bit boundary.
uint64_t Index =
cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
- MVT VT = N->getValueType(0).getSimpleVT();
+ MVT VT = N->getSimpleValueType(0);
unsigned ElSize = VT.getVectorElementType().getSizeInBits();
- bool Result = (Index * ElSize) % 128 == 0;
+ bool Result = (Index * ElSize) % vecWidth == 0;
return Result;
}
+bool X86::isVINSERT128Index(SDNode *N) {
+ return isVINSERTIndex(N, 128);
+}
+
+bool X86::isVINSERT256Index(SDNode *N) {
+ return isVINSERTIndex(N, 256);
+}
+
+bool X86::isVEXTRACT128Index(SDNode *N) {
+ return isVEXTRACTIndex(N, 128);
+}
+
+bool X86::isVEXTRACT256Index(SDNode *N) {
+ return isVEXTRACTIndex(N, 256);
+}
+
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
/// Handles 128-bit and 256-bit.
static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
- MVT VT = N->getValueType(0).getSimpleVT();
+ MVT VT = N->getSimpleValueType(0);
- assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ assert((VT.getSizeInBits() >= 128) &&
"Unsupported vector type for PSHUF/SHUFP");
// Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
@@ -4092,10 +4404,10 @@ static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts/NumLanes;
- assert((NumLaneElts == 2 || NumLaneElts == 4) &&
- "Only supports 2 or 4 elements per lane");
+ assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
+ "Only supports 2, 4 or 8 elements per lane");
- unsigned Shift = (NumLaneElts == 4) ? 1 : 0;
+ unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
unsigned Mask = 0;
for (unsigned i = 0; i != NumElts; ++i) {
int Elt = N->getMaskElt(i);
@@ -4111,7 +4423,7 @@ static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
- MVT VT = N->getValueType(0).getSimpleVT();
+ MVT VT = N->getSimpleValueType(0);
assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
"Unsupported vector type for PSHUFHW");
@@ -4135,7 +4447,7 @@ static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
- MVT VT = N->getValueType(0).getSimpleVT();
+ MVT VT = N->getSimpleValueType(0);
assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
"Unsupported vector type for PSHUFHW");
@@ -4159,11 +4471,12 @@ static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
- MVT VT = SVOp->getValueType(0).getSimpleVT();
- unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3;
+ MVT VT = SVOp->getSimpleValueType(0);
+ unsigned EltSize = VT.is512BitVector() ? 1 :
+ VT.getVectorElementType().getSizeInBits() >> 3;
unsigned NumElts = VT.getVectorNumElements();
- unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts/NumLanes;
int Val = 0;
@@ -4180,61 +4493,64 @@ static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
return (Val - i) * EltSize;
}
-/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate
-/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
-/// instructions.
-unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) {
+static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
+ assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
- llvm_unreachable("Illegal extract subvector for VEXTRACTF128");
+ llvm_unreachable("Illegal extract subvector for VEXTRACT");
uint64_t Index =
cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
- MVT VecVT = N->getOperand(0).getValueType().getSimpleVT();
+ MVT VecVT = N->getOperand(0).getSimpleValueType();
MVT ElVT = VecVT.getVectorElementType();
- unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits();
+ unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
return Index / NumElemsPerChunk;
}
-/// getInsertVINSERTF128Immediate - Return the appropriate immediate
-/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
-/// instructions.
-unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) {
+static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
+ assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
- llvm_unreachable("Illegal insert subvector for VINSERTF128");
+ llvm_unreachable("Illegal insert subvector for VINSERT");
uint64_t Index =
cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
- MVT VecVT = N->getValueType(0).getSimpleVT();
+ MVT VecVT = N->getSimpleValueType(0);
MVT ElVT = VecVT.getVectorElementType();
- unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits();
+ unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
return Index / NumElemsPerChunk;
}
-/// getShuffleCLImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions.
-/// Handles 256-bit.
-static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) {
- MVT VT = N->getValueType(0).getSimpleVT();
-
- unsigned NumElts = VT.getVectorNumElements();
+/// getExtractVEXTRACT128Immediate - Return the appropriate immediate
+/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
+/// and VINSERTI128 instructions.
+unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
+ return getExtractVEXTRACTImmediate(N, 128);
+}
- assert((VT.is256BitVector() && NumElts == 4) &&
- "Unsupported vector type for VPERMQ/VPERMPD");
+/// getExtractVEXTRACT256Immediate - Return the appropriate immediate
+/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
+/// and VINSERTI64x4 instructions.
+unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
+ return getExtractVEXTRACTImmediate(N, 256);
+}
- unsigned Mask = 0;
- for (unsigned i = 0; i != NumElts; ++i) {
- int Elt = N->getMaskElt(i);
- if (Elt < 0)
- continue;
- Mask |= Elt << (i*2);
- }
+/// getInsertVINSERT128Immediate - Return the appropriate immediate
+/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
+/// and VINSERTI128 instructions.
+unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
+ return getInsertVINSERTImmediate(N, 128);
+}
- return Mask;
+/// getInsertVINSERT256Immediate - Return the appropriate immediate
+/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
+/// and VINSERTI64x4 instructions.
+unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
+ return getInsertVINSERTImmediate(N, 256);
}
+
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
/// constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
@@ -4249,7 +4565,7 @@ bool X86::isZeroNode(SDValue Elt) {
/// their permute mask.
static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
SelectionDAG &DAG) {
- MVT VT = SVOp->getValueType(0).getSimpleVT();
+ MVT VT = SVOp->getSimpleValueType(0);
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> MaskVec;
@@ -4263,7 +4579,7 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
}
MaskVec.push_back(Idx);
}
- return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1),
+ return DAG.getVectorShuffle(VT, SDLoc(SVOp), SVOp->getOperand(1),
SVOp->getOperand(0), &MaskVec[0]);
}
@@ -4271,7 +4587,7 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
/// match movhlps. The lower half elements should come from upper half of
/// V1 (and in order), and the upper half elements should come from the upper
/// half of V2 (and in order).
-static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) {
+static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
if (!VT.is128BitVector())
return false;
if (VT.getVectorNumElements() != 4)
@@ -4328,7 +4644,7 @@ static bool WillBeConstantPoolLoad(SDNode *N) {
/// half of V2 (and in order). And since V1 will become the source of the
/// MOVLP, it must be either a vector load or a scalar load to vector.
static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
- ArrayRef<int> Mask, EVT VT) {
+ ArrayRef<int> Mask, MVT VT) {
if (!VT.is128BitVector())
return false;
@@ -4396,7 +4712,7 @@ static bool isZeroShuffle(ShuffleVectorSDNode *N) {
/// getZeroVector - Returns a vector of specified type with all zero elements.
///
static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
- SelectionDAG &DAG, DebugLoc dl) {
+ SelectionDAG &DAG, SDLoc dl) {
assert(VT.isVector() && "Expected a vector type");
// Always build SSE zero vectors as <4 x i32> bitcasted
@@ -4424,6 +4740,11 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops,
array_lengthof(Ops));
}
+ } else if (VT.is512BitVector()) { // AVX-512
+ SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
+ Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops, 16);
} else
llvm_unreachable("Unexpected vector type");
@@ -4435,7 +4756,7 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
/// Then bitcast to their original type, ensuring they get CSE'd.
static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
- DebugLoc dl) {
+ SDLoc dl) {
assert(VT.isVector() && "Expected a vector type");
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
@@ -4469,7 +4790,7 @@ static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
-static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
@@ -4480,7 +4801,7 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
}
/// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
-static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
+static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
@@ -4492,7 +4813,7 @@ static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
}
/// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
-static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
+static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
@@ -4508,9 +4829,9 @@ static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
// Generate shuffles which repeat i16 and i8 several times until they can be
// represented by v4f32 and then be manipulated by target suported shuffles.
static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
- EVT VT = V.getValueType();
+ MVT VT = V.getSimpleValueType();
int NumElems = VT.getVectorNumElements();
- DebugLoc dl = V.getDebugLoc();
+ SDLoc dl(V);
while (NumElems > 4) {
if (EltNo < NumElems/2) {
@@ -4526,8 +4847,8 @@ static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
/// getLegalSplat - Generate a legal splat with supported x86 shuffles
static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
- EVT VT = V.getValueType();
- DebugLoc dl = V.getDebugLoc();
+ MVT VT = V.getSimpleValueType();
+ SDLoc dl(V);
if (VT.is128BitVector()) {
V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
@@ -4552,9 +4873,9 @@ static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
/// PromoteSplat - Splat is promoted to target supported vector shuffles.
static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
- EVT SrcVT = SV->getValueType(0);
+ MVT SrcVT = SV->getSimpleValueType(0);
SDValue V1 = SV->getOperand(0);
- DebugLoc dl = SV->getDebugLoc();
+ SDLoc dl(SV);
int EltNo = SV->getSplatIndex();
int NumElems = SrcVT.getVectorNumElements();
@@ -4575,7 +4896,7 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
// instruction because the target has no such instruction. Generate shuffles
// which repeat i16 and i8 several times until they fit in i32, and then can
// be manipulated by target suported shuffles.
- EVT EltVT = SrcVT.getVectorElementType();
+ MVT EltVT = SrcVT.getVectorElementType();
if (EltVT == MVT::i8 || EltVT == MVT::i16)
V1 = PromoteSplati8i16(V1, DAG, EltNo);
@@ -4597,15 +4918,15 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
bool IsZero,
const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- EVT VT = V2.getValueType();
+ MVT VT = V2.getSimpleValueType();
SDValue V1 = IsZero
- ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT);
+ ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 16> MaskVec;
for (unsigned i = 0; i != NumElems; ++i)
// If this is the insertion idx, put the low elt of V2 here.
MaskVec.push_back(i == Idx ? NumElems : i);
- return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]);
+ return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
}
/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
@@ -4715,7 +5036,7 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
// Recurse into target specific vector shuffles to find scalars.
if (isTargetShuffle(Opcode)) {
- MVT ShufVT = V.getValueType().getSimpleVT();
+ MVT ShufVT = V.getSimpleValueType();
unsigned NumElems = ShufVT.getVectorNumElements();
SmallVector<int, 16> ShuffleMask;
bool IsUnary;
@@ -4756,19 +5077,27 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
/// getNumOfConsecutiveZeros - Return the number of elements of a vector
/// shuffle operation which come from a consecutively from a zero. The
/// search can start in two different directions, from left or right.
-static
-unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems,
- bool ZerosFromLeft, SelectionDAG &DAG) {
- unsigned i;
- for (i = 0; i != NumElems; ++i) {
- unsigned Index = ZerosFromLeft ? i : NumElems-i-1;
+/// We count undefs as zeros until PreferredNum is reached.
+static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
+ unsigned NumElems, bool ZerosFromLeft,
+ SelectionDAG &DAG,
+ unsigned PreferredNum = -1U) {
+ unsigned NumZeros = 0;
+ for (unsigned i = 0; i != NumElems; ++i) {
+ unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
- if (!(Elt.getNode() &&
- (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt))))
+ if (!Elt.getNode())
+ break;
+
+ if (X86::isZeroNode(Elt))
+ ++NumZeros;
+ else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
+ NumZeros = std::min(NumZeros + 1, PreferredNum);
+ else
break;
}
- return i;
+ return NumZeros;
}
/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
@@ -4805,9 +5134,11 @@ bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
/// logical left shift of a vector.
static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
- unsigned NumElems = SVOp->getValueType(0).getVectorNumElements();
- unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems,
- false /* check zeros from right */, DAG);
+ unsigned NumElems =
+ SVOp->getSimpleValueType(0).getVectorNumElements();
+ unsigned NumZeros = getNumOfConsecutiveZeros(
+ SVOp, NumElems, false /* check zeros from right */, DAG,
+ SVOp->getMaskElt(0));
unsigned OpSrc;
if (!NumZeros)
@@ -4838,9 +5169,11 @@ static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
/// logical left shift of a vector.
static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
- unsigned NumElems = SVOp->getValueType(0).getVectorNumElements();
- unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems,
- true /* check zeros from left */, DAG);
+ unsigned NumElems =
+ SVOp->getSimpleValueType(0).getVectorNumElements();
+ unsigned NumZeros = getNumOfConsecutiveZeros(
+ SVOp, NumElems, true /* check zeros from left */, DAG,
+ NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
unsigned OpSrc;
if (!NumZeros)
@@ -4873,7 +5206,7 @@ static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
// Although the logic below support any bitwidth size, there are no
// shift instructions which handle more than 128-bit vectors.
- if (!SVOp->getValueType(0).is128BitVector())
+ if (!SVOp->getSimpleValueType(0).is128BitVector())
return false;
if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
@@ -4893,7 +5226,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
if (NumNonZero > 8)
return SDValue();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue V(0, 0);
bool First = true;
for (unsigned i = 0; i < 16; ++i) {
@@ -4941,7 +5274,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
if (NumNonZero > 4)
return SDValue();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue V(0, 0);
bool First = true;
for (unsigned i = 0; i < 8; ++i) {
@@ -4967,7 +5300,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
///
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
unsigned NumBits, SelectionDAG &DAG,
- const TargetLowering &TLI, DebugLoc dl) {
+ const TargetLowering &TLI, SDLoc dl) {
assert(VT.is128BitVector() && "Unknown type for VShift");
EVT ShVT = MVT::v2i64;
unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
@@ -4978,9 +5311,8 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
TLI.getScalarShiftAmountTy(SrcOp.getValueType()))));
}
-SDValue
-X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
- SelectionDAG &DAG) const {
+static SDValue
+LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
// Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into
@@ -5032,7 +5364,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
return SDValue();
int64_t StartOffset = Offset & ~(RequiredAlign-1);
if (StartOffset)
- Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(),
+ Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
int EltNo = (Offset - StartOffset) >> 2;
@@ -5063,7 +5395,8 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
/// rather than undef via VZEXT_LOAD, but we do not detect that case today.
/// There's even a handy isZeroNode for that purpose.
static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
- DebugLoc &DL, SelectionDAG &DAG) {
+ SDLoc &DL, SelectionDAG &DAG,
+ bool isAfterLegalize) {
EVT EltVT = VT.getVectorElementType();
unsigned NumElems = Elts.size();
@@ -5099,15 +5432,33 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
// load of the entire vector width starting at the base pointer. If we found
// consecutive loads for the low half, generate a vzext_load node.
if (LastLoadedElt == NumElems - 1) {
+
+ if (isAfterLegalize &&
+ !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
+ return SDValue();
+
+ SDValue NewLd = SDValue();
+
if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16)
- return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getPointerInfo(),
- LDBase->isVolatile(), LDBase->isNonTemporal(),
- LDBase->isInvariant(), 0);
- return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getPointerInfo(),
- LDBase->isVolatile(), LDBase->isNonTemporal(),
- LDBase->isInvariant(), LDBase->getAlignment());
+ NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getPointerInfo(),
+ LDBase->isVolatile(), LDBase->isNonTemporal(),
+ LDBase->isInvariant(), 0);
+ NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getPointerInfo(),
+ LDBase->isVolatile(), LDBase->isNonTemporal(),
+ LDBase->isInvariant(), LDBase->getAlignment());
+
+ if (LDBase->hasAnyUseOfValue(1)) {
+ SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ SDValue(LDBase, 1),
+ SDValue(NewLd.getNode(), 1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
+ DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
+ SDValue(NewLd.getNode(), 1));
+ }
+
+ return NewLd;
}
if (NumElems == 4 && LastLoadedElt == 1 &&
DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
@@ -5144,15 +5495,15 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
/// a scalar load, or a constant.
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
-SDValue
-X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
+ SelectionDAG &DAG) {
if (!Subtarget->hasFp256())
return SDValue();
- MVT VT = Op.getValueType().getSimpleVT();
- DebugLoc dl = Op.getDebugLoc();
+ MVT VT = Op.getSimpleValueType();
+ SDLoc dl(Op);
- assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
"Unsupported vector type for broadcast.");
SDValue Ld;
@@ -5196,7 +5547,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
// Use the register form of the broadcast instruction available on AVX2.
- if (VT.is256BitVector())
+ if (VT.getSizeInBits() >= 256)
Sc = Extract128BitVector(Sc, 0, DAG, dl);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
}
@@ -5208,13 +5559,18 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
// The scalar_to_vector node and the suspected
// load node must have exactly one user.
// Constants may have multiple users.
- if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse()))
+
+ // AVX-512 has register version of the broadcast
+ bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
+ Ld.getValueType().getSizeInBits() >= 32;
+ if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
+ !hasRegVer))
return SDValue();
break;
}
}
- bool Is256 = VT.is256BitVector();
+ bool IsGE256 = (VT.getSizeInBits() >= 256);
// Handle the broadcasting a single constant scalar from the constant pool
// into a vector. On Sandybridge it is still better to load a constant vector
@@ -5224,7 +5580,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
assert(!CVT.isVector() && "Must not broadcast a vector type");
unsigned ScalarSize = CVT.getSizeInBits();
- if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) {
+ if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)) {
const Constant *C = 0;
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
C = CI->getConstantIntValue();
@@ -5233,7 +5589,8 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
assert(C && "Invalid constant type");
- SDValue CP = DAG.getConstantPool(C, getPointerTy());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
MachinePointerInfo::getConstantPool(),
@@ -5248,14 +5605,14 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
// Handle AVX2 in-register broadcasts.
if (!IsLoad && Subtarget->hasInt256() &&
- (ScalarSize == 32 || (Is256 && ScalarSize == 64)))
+ (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The scalar source must be a normal load.
if (!IsLoad)
return SDValue();
- if (ScalarSize == 32 || (Is256 && ScalarSize == 64))
+ if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The integer check is needed for the 64-bit into 128-bit so it doesn't match
@@ -5269,15 +5626,15 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
-SDValue
-X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
+static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
// Skip if insert_vec_elt is not supported.
- if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
return SDValue();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
unsigned NumElems = Op.getNumOperands();
SDValue VecIn1;
@@ -5343,19 +5700,128 @@ X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const {
return NV;
}
+// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
+SDValue
+X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
+
+ MVT VT = Op.getSimpleValueType();
+ assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
+ "Unexpected type in LowerBUILD_VECTORvXi1!");
+
+ SDLoc dl(Op);
+ if (ISD::isBuildVectorAllZeros(Op.getNode())) {
+ SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
+ SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
+ Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT,
+ Ops, VT.getVectorNumElements());
+ }
+
+ if (ISD::isBuildVectorAllOnes(Op.getNode())) {
+ SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
+ SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
+ Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT,
+ Ops, VT.getVectorNumElements());
+ }
+
+ bool AllContants = true;
+ uint64_t Immediate = 0;
+ for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
+ SDValue In = Op.getOperand(idx);
+ if (In.getOpcode() == ISD::UNDEF)
+ continue;
+ if (!isa<ConstantSDNode>(In)) {
+ AllContants = false;
+ break;
+ }
+ if (cast<ConstantSDNode>(In)->getZExtValue())
+ Immediate |= (1ULL << idx);
+ }
+
+ if (AllContants) {
+ SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
+ DAG.getConstant(Immediate, MVT::i16));
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
+ DAG.getIntPtrConstant(0));
+ }
+
+ // Splat vector (with undefs)
+ SDValue In = Op.getOperand(0);
+ for (unsigned i = 1, e = Op.getNumOperands(); i != e; ++i) {
+ if (Op.getOperand(i) != In && Op.getOperand(i).getOpcode() != ISD::UNDEF)
+ llvm_unreachable("Unsupported predicate operation");
+ }
+
+ SDValue EFLAGS, X86CC;
+ if (In.getOpcode() == ISD::SETCC) {
+ SDValue Op0 = In.getOperand(0);
+ SDValue Op1 = In.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(In.getOperand(2))->get();
+ bool isFP = Op1.getValueType().isFloatingPoint();
+ unsigned X86CCVal = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
+
+ assert(X86CCVal != X86::COND_INVALID && "Unsupported predicate operation");
+
+ X86CC = DAG.getConstant(X86CCVal, MVT::i8);
+ EFLAGS = EmitCmp(Op0, Op1, X86CCVal, DAG);
+ EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
+ } else if (In.getOpcode() == X86ISD::SETCC) {
+ X86CC = In.getOperand(0);
+ EFLAGS = In.getOperand(1);
+ } else {
+ // The algorithm:
+ // Bit1 = In & 0x1
+ // if (Bit1 != 0)
+ // ZF = 0
+ // else
+ // ZF = 1
+ // if (ZF == 0)
+ // res = allOnes ### CMOVNE -1, %res
+ // else
+ // res = allZero
+ MVT InVT = In.getSimpleValueType();
+ SDValue Bit1 = DAG.getNode(ISD::AND, dl, InVT, In, DAG.getConstant(1, InVT));
+ EFLAGS = EmitTest(Bit1, X86::COND_NE, DAG);
+ X86CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ }
+
+ if (VT == MVT::v16i1) {
+ SDValue Cst1 = DAG.getConstant(-1, MVT::i16);
+ SDValue Cst0 = DAG.getConstant(0, MVT::i16);
+ SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i16,
+ Cst0, Cst1, X86CC, EFLAGS);
+ return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp);
+ }
+
+ if (VT == MVT::v8i1) {
+ SDValue Cst1 = DAG.getConstant(-1, MVT::i32);
+ SDValue Cst0 = DAG.getConstant(0, MVT::i32);
+ SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i32,
+ Cst0, Cst1, X86CC, EFLAGS);
+ CmovOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CmovOp);
+ return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp);
+ }
+ llvm_unreachable("Unsupported predicate operation");
+}
+
SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
MVT ExtVT = VT.getVectorElementType();
unsigned NumElems = Op.getNumOperands();
+ // Generate vectors for predicate vectors.
+ if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
+ return LowerBUILD_VECTORvXi1(Op, DAG);
+
// Vectors containing all zeros can be matched by pxor and xorps later
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
// Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
// and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
- if (VT == MVT::v4i32 || VT == MVT::v8i32)
+ if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
return Op;
return getZeroVector(VT, Subtarget, DAG, dl);
@@ -5368,10 +5834,11 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
return Op;
- return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
+ if (!VT.is512BitVector())
+ return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
}
- SDValue Broadcast = LowerVectorBroadcast(Op, DAG);
+ SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
if (Broadcast.getNode())
return Broadcast;
@@ -5404,7 +5871,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Special case for single non-zero, non-undef, element.
if (NumNonZero == 1) {
- unsigned Idx = CountTrailingZeros_32(NonZeros);
+ unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
// If this is an insertion of an i64 value on x86-32, and if the top bits of
@@ -5450,7 +5917,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
(ExtVT == MVT::i64 && Subtarget->is64Bit())) {
- if (VT.is256BitVector()) {
+ if (VT.is256BitVector() || VT.is512BitVector()) {
SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
Item, DAG.getIntPtrConstant(0));
@@ -5513,7 +5980,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
// Check if it's possible to issue this instead.
// shuffle (vload ptr)), undef, <1, 1, 1, 1>
- unsigned Idx = CountTrailingZeros_32(NonZeros);
+ unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
if (Op.getNode()->isOnlyUserOf(Item.getNode()))
return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
@@ -5548,7 +6015,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (EVTBits == 64) {
if (NumNonZero == 1) {
// One half is zero or undef.
- unsigned Idx = CountTrailingZeros_32(NonZeros);
+ unsigned Idx = countTrailingZeros(NonZeros);
SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
Op.getOperand(Idx));
return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
@@ -5615,7 +6082,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
V[i] = Op.getOperand(i);
// Check for elements which are consecutive loads.
- SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG);
+ SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
if (LD.getNode())
return LD;
@@ -5678,22 +6145,25 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
// to create 256-bit vectors from two other 128-bit ones.
static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
- MVT ResVT = Op.getValueType().getSimpleVT();
+ SDLoc dl(Op);
+ MVT ResVT = Op.getSimpleValueType();
- assert(ResVT.is256BitVector() && "Value type must be 256-bit wide");
+ assert((ResVT.is256BitVector() ||
+ ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
unsigned NumElems = ResVT.getVectorNumElements();
+ if(ResVT.is256BitVector())
+ return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
- return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
+ return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
}
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 2);
- // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors
+ // AVX/AVX-512 can use the vinsertf128 instruction to create 256-bit vectors
// from two other 128-bit ones.
return LowerAVXCONCAT_VECTORS(Op, DAG);
}
@@ -5704,11 +6174,15 @@ LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
const X86Subtarget *Subtarget, SelectionDAG &DAG) {
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
- DebugLoc dl = SVOp->getDebugLoc();
- MVT VT = SVOp->getValueType(0).getSimpleVT();
+ SDLoc dl(SVOp);
+ MVT VT = SVOp->getSimpleValueType(0);
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = VT.getVectorNumElements();
+ // There is no blend with immediate in AVX-512.
+ if (VT.is512BitVector())
+ return SDValue();
+
if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
return SDValue();
if (!Subtarget->hasInt256() && VT == MVT::v16i16)
@@ -5765,7 +6239,7 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
- DebugLoc dl = SVOp->getDebugLoc();
+ SDLoc dl(SVOp);
SmallVector<int, 8> MaskVals;
// Determine if more than 1 of the words in each of the low and high quadwords
@@ -6014,13 +6488,13 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
// 1. [ssse3] 1 x pshufb
// 2. [ssse3] 2 x pshufb + 1 x por
// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
-static
-SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
- SelectionDAG &DAG,
- const X86TargetLowering &TLI) {
+static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
+ const X86Subtarget* Subtarget,
+ SelectionDAG &DAG) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
- DebugLoc dl = SVOp->getDebugLoc();
+ SDLoc dl(SVOp);
ArrayRef<int> MaskVals = SVOp->getMask();
// Promote splats to a larger type which usually leads to more efficient code.
@@ -6033,7 +6507,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
// present, fall back to case 3.
// If SSSE3, use 1 pshufb instruction per vector with elements in the result.
- if (TLI.getSubtarget()->hasSSSE3()) {
+ if (Subtarget->hasSSSE3()) {
SmallVector<SDValue,16> pshufbMask;
// If all result elements are from one input vector, then only translate
@@ -6146,10 +6620,10 @@ static
SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- MVT VT = SVOp->getValueType(0).getSimpleVT();
+ MVT VT = SVOp->getSimpleValueType(0);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
- DebugLoc dl = SVOp->getDebugLoc();
+ SDLoc dl(SVOp);
SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
@@ -6194,8 +6668,8 @@ SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
static
SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
SelectionDAG &DAG) {
- MVT VT = SVOp->getValueType(0).getSimpleVT();
- DebugLoc dl = SVOp->getDebugLoc();
+ MVT VT = SVOp->getSimpleValueType(0);
+ SDLoc dl(SVOp);
unsigned NumElems = VT.getVectorNumElements();
MVT NewVT;
unsigned Scale;
@@ -6231,9 +6705,9 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
/// getVZextMovL - Return a zero-extending vector move low node.
///
-static SDValue getVZextMovL(MVT VT, EVT OpVT,
+static SDValue getVZextMovL(MVT VT, MVT OpVT,
SDValue SrcOp, SelectionDAG &DAG,
- const X86Subtarget *Subtarget, DebugLoc dl) {
+ const X86Subtarget *Subtarget, SDLoc dl) {
if (VT == MVT::v2f64 || VT == MVT::v4f32) {
LoadSDNode *LD = NULL;
if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
@@ -6273,12 +6747,12 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
if (NewOp.getNode())
return NewOp;
- MVT VT = SVOp->getValueType(0).getSimpleVT();
+ MVT VT = SVOp->getSimpleValueType(0);
unsigned NumElems = VT.getVectorNumElements();
unsigned NumLaneElems = NumElems / 2;
- DebugLoc dl = SVOp->getDebugLoc();
+ SDLoc dl(SVOp);
MVT EltVT = VT.getVectorElementType();
MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
SDValue Output[2];
@@ -6384,8 +6858,8 @@ static SDValue
LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
- DebugLoc dl = SVOp->getDebugLoc();
- MVT VT = SVOp->getValueType(0).getSimpleVT();
+ SDLoc dl(SVOp);
+ MVT VT = SVOp->getSimpleValueType(0);
assert(VT.is128BitVector() && "Unsupported vector size");
@@ -6535,8 +7009,8 @@ static bool MayFoldVectorLoad(SDValue V) {
}
static
-SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
+SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
// Canonizalize to v2f64.
V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
@@ -6546,11 +7020,11 @@ SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
}
static
-SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG,
+SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
bool HasSSE2) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
assert(VT != MVT::v2i64 && "unsupported shuffle type");
@@ -6565,10 +7039,10 @@ SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG,
}
static
-SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) {
+SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
"unsupported shuffle type");
@@ -6581,10 +7055,10 @@ SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) {
}
static
-SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
+SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
unsigned NumElems = VT.getVectorNumElements();
// Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
@@ -6638,20 +7112,20 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
}
// Reduce a vector shuffle to zext.
-SDValue
-X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
// PMOVZX is only available from SSE41.
if (!Subtarget->hasSSE41())
return SDValue();
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
// Only AVX2 support 256-bit vector integer extending.
if (!Subtarget->hasInt256() && VT.is256BitVector())
return SDValue();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
unsigned NumElems = VT.getVectorNumElements();
@@ -6683,12 +7157,11 @@ X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
- LLVMContext *Context = DAG.getContext();
unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
- EVT NeVT = EVT::getIntegerVT(*Context, NBits);
- EVT NVT = EVT::getVectorVT(*Context, NeVT, NumElems >> Shift);
+ MVT NeVT = MVT::getIntegerVT(NBits);
+ MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
- if (!isTypeLegal(NVT))
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
return SDValue();
// Simplify the operand as it's prepared to be fed into shuffle.
@@ -6696,8 +7169,8 @@ X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const {
if (V1.getOpcode() == ISD::BITCAST &&
V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- V1.getOperand(0)
- .getOperand(0).getValueType().getSizeInBits() == SignificantBits) {
+ V1.getOperand(0).getOperand(0)
+ .getSimpleValueType().getSizeInBits() == SignificantBits) {
// (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
SDValue V = V1.getOperand(0).getOperand(0).getOperand(0);
ConstantSDNode *CIdx =
@@ -6706,19 +7179,19 @@ X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const {
// selection to fold it. Otherwise, we will short the conversion sequence.
if (CIdx && CIdx->getZExtValue() == 0 &&
(!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse())) {
- if (V.getValueSizeInBits() > V1.getValueSizeInBits()) {
+ MVT FullVT = V.getSimpleValueType();
+ MVT V1VT = V1.getSimpleValueType();
+ if (FullVT.getSizeInBits() > V1VT.getSizeInBits()) {
// The "ext_vec_elt" node is wider than the result node.
// In this case we should extract subvector from V.
// (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast (extract_subvector x)).
- unsigned Ratio = V.getValueSizeInBits() / V1.getValueSizeInBits();
- EVT FullVT = V.getValueType();
- EVT SubVecVT = EVT::getVectorVT(*Context,
- FullVT.getVectorElementType(),
+ unsigned Ratio = FullVT.getSizeInBits() / V1VT.getSizeInBits();
+ MVT SubVecVT = MVT::getVectorVT(FullVT.getVectorElementType(),
FullVT.getVectorNumElements()/Ratio);
- V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, V,
+ V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, V,
DAG.getIntPtrConstant(0));
}
- V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V);
+ V1 = DAG.getNode(ISD::BITCAST, DL, V1VT, V);
}
}
@@ -6726,11 +7199,12 @@ X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const {
DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
}
-SDValue
-X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
+static SDValue
+NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- MVT VT = Op.getValueType().getSimpleVT();
- DebugLoc dl = Op.getDebugLoc();
+ MVT VT = Op.getSimpleValueType();
+ SDLoc dl(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
@@ -6740,13 +7214,13 @@ X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
// Handle splat operations
if (SVOp->isSplat()) {
// Use vbroadcast whenever the splat comes from a foldable load
- SDValue Broadcast = LowerVectorBroadcast(Op, DAG);
+ SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
if (Broadcast.getNode())
return Broadcast;
}
// Check integer expanding shuffles.
- SDValue NewOp = LowerVectorIntExtend(Op, DAG);
+ SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
if (NewOp.getNode())
return NewOp;
@@ -6764,7 +7238,7 @@ X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
if (ISD::isBuildVectorAllZeros(V2.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode()) {
- MVT NewVT = NewOp.getValueType().getSimpleVT();
+ MVT NewVT = NewOp.getSimpleValueType();
if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
NewVT, true, false))
return getVZextMovL(VT, NewVT, NewOp.getOperand(0),
@@ -6773,7 +7247,7 @@ X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
} else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode()) {
- MVT NewVT = NewOp.getValueType().getSimpleVT();
+ MVT NewVT = NewOp.getSimpleValueType();
if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
return getVZextMovL(VT, NewVT, NewOp.getOperand(1),
DAG, Subtarget, dl);
@@ -6788,8 +7262,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- MVT VT = Op.getValueType().getSimpleVT();
- DebugLoc dl = Op.getDebugLoc();
+ MVT VT = Op.getSimpleValueType();
+ SDLoc dl(Op);
unsigned NumElems = VT.getVectorNumElements();
bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
@@ -6826,7 +7300,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
// Normalize the input vectors. Here splats, zeroed vectors, profitable
// narrowing and commutation of operands should be handled. The actual code
// doesn't include all of those, work in progress...
- SDValue NewOp = NormalizeVectorShuffle(Op, DAG);
+ SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
if (NewOp.getNode())
return NewOp;
@@ -6871,6 +7345,11 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
TargetMask, DAG);
}
+ if (isPALIGNRMask(M, VT, Subtarget))
+ return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
+ getShufflePALIGNRImmediate(SVOp),
+ DAG);
+
// Check if this can be converted into a logical shift.
bool isLeft = false;
unsigned ShAmt = 0;
@@ -6981,18 +7460,13 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
}
// Normalize the node to match x86 shuffle ops if needed
- if (!V2IsUndef && (isSHUFPMask(M, VT, HasFp256, /* Commuted */ true)))
+ if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
return CommuteVectorShuffle(SVOp, DAG);
// The checks below are all present in isShuffleMaskLegal, but they are
// inlined here right now to enable us to directly emit target specific
// nodes, and remove one by one until they don't return Op anymore.
- if (isPALIGNRMask(M, VT, Subtarget))
- return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
- getShufflePALIGNRImmediate(SVOp),
- DAG);
-
if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
SVOp->getSplatIndex() == 0 && V2IsUndef) {
if (VT == MVT::v2f64 || VT == MVT::v2i64)
@@ -7009,7 +7483,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
getShufflePSHUFLWImmediate(SVOp),
DAG);
- if (isSHUFPMask(M, VT, HasFp256))
+ if (isSHUFPMask(M, VT))
return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
getShuffleSHUFImmediate(SVOp), DAG);
@@ -7028,8 +7502,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
// Handle VPERMILPS/D* permutations
- if (isVPERMILPMask(M, VT, HasFp256)) {
- if (HasInt256 && VT == MVT::v8i32)
+ if (isVPERMILPMask(M, VT)) {
+ if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
getShuffleSHUFImmediate(SVOp), DAG);
return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1,
@@ -7045,21 +7519,28 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
if (BlendOp.getNode())
return BlendOp;
- if (V2IsUndef && HasInt256 && (VT == MVT::v8i32 || VT == MVT::v8f32)) {
- SmallVector<SDValue, 8> permclMask;
- for (unsigned i = 0; i != 8; ++i) {
- permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32));
+ unsigned Imm8;
+ if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
+ return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
+
+ if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
+ VT.is512BitVector()) {
+ MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
+ MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
+ SmallVector<SDValue, 16> permclMask;
+ for (unsigned i = 0; i != NumElems; ++i) {
+ permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
}
- SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32,
- &permclMask[0], 8);
- // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
- return DAG.getNode(X86ISD::VPERMV, dl, VT,
- DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
- }
- if (V2IsUndef && HasInt256 && (VT == MVT::v4i64 || VT == MVT::v4f64))
- return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1,
- getShuffleCLImmediate(SVOp), DAG);
+ SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT,
+ &permclMask[0], NumElems);
+ if (V2IsUndef)
+ // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
+ return DAG.getNode(X86ISD::VPERMV, dl, VT,
+ DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
+ return DAG.getNode(X86ISD::VPERMV3, dl, VT,
+ DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1, V2);
+ }
//===--------------------------------------------------------------------===//
// Since no target specific shuffle was selected for this generic one,
@@ -7075,7 +7556,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
}
if (VT == MVT::v16i8) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this);
+ SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
if (NewOp.getNode())
return NewOp;
}
@@ -7099,10 +7580,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
}
static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
- MVT VT = Op.getValueType().getSimpleVT();
- DebugLoc dl = Op.getDebugLoc();
+ MVT VT = Op.getSimpleValueType();
+ SDLoc dl(Op);
- if (!Op.getOperand(0).getValueType().getSimpleVT().is128BitVector())
+ if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
return SDValue();
if (VT.getSizeInBits() == 8) {
@@ -7163,25 +7644,45 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
- if (!isa<ConstantSDNode>(Op.getOperand(1)))
- return SDValue();
-
+ SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
- MVT VecVT = Vec.getValueType().getSimpleVT();
+ MVT VecVT = Vec.getSimpleValueType();
+ SDValue Idx = Op.getOperand(1);
+ if (!isa<ConstantSDNode>(Idx)) {
+ if (VecVT.is512BitVector() ||
+ (VecVT.is256BitVector() && Subtarget->hasInt256() &&
+ VecVT.getVectorElementType().getSizeInBits() == 32)) {
+
+ MVT MaskEltVT =
+ MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
+ MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
+ MaskEltVT.getSizeInBits());
+
+ Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
+ SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
+ getZeroVector(MaskVT, Subtarget, DAG, dl),
+ Idx, DAG.getConstant(0, getPointerTy()));
+ SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
+ Perm, DAG.getConstant(0, getPointerTy()));
+ }
+ return SDValue();
+ }
// If this is a 256-bit vector result, first extract the 128-bit vector and
// then extract the element from the 128-bit vector.
- if (VecVT.is256BitVector()) {
- DebugLoc dl = Op.getNode()->getDebugLoc();
- unsigned NumElems = VecVT.getVectorNumElements();
- SDValue Idx = Op.getOperand(1);
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
// Get the 128-bit vector.
Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
+ MVT EltVT = VecVT.getVectorElementType();
- if (IdxVal >= NumElems/2)
- IdxVal -= NumElems/2;
+ unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
+
+ //if (IdxVal >= NumElems/2)
+ // IdxVal -= NumElems/2;
+ IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
DAG.getConstant(IdxVal, MVT::i32));
}
@@ -7194,8 +7695,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
return Res;
}
- MVT VT = Op.getValueType().getSimpleVT();
- DebugLoc dl = Op.getDebugLoc();
+ MVT VT = Op.getSimpleValueType();
// TODO: handle v16i8.
if (VT.getSizeInBits() == 16) {
SDValue Vec = Op.getOperand(0);
@@ -7222,7 +7722,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// SHUFPS the element to the lowest double word, then movss.
int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
- MVT VVT = Op.getOperand(0).getValueType().getSimpleVT();
+ MVT VVT = Op.getOperand(0).getSimpleValueType();
SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
DAG.getUNDEF(VVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
@@ -7241,7 +7741,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
int Mask[2] = { 1, -1 };
- MVT VVT = Op.getOperand(0).getValueType().getSimpleVT();
+ MVT VVT = Op.getOperand(0).getSimpleValueType();
SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
DAG.getUNDEF(VVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
@@ -7252,9 +7752,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
}
static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
@@ -7306,29 +7806,30 @@ static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
SDValue
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
SDValue N2 = Op.getOperand(2);
// If this is a 256-bit vector result, first extract the 128-bit vector,
// insert the element into the extracted half and then place it back.
- if (VT.is256BitVector()) {
+ if (VT.is256BitVector() || VT.is512BitVector()) {
if (!isa<ConstantSDNode>(N2))
return SDValue();
// Get the desired 128-bit vector half.
- unsigned NumElems = VT.getVectorNumElements();
unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue();
SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
// Insert the element into the desired half.
- bool Upper = IdxVal >= NumElems/2;
+ unsigned NumEltsIn128 = 128/EltVT.getSizeInBits();
+ unsigned IdxIn128 = IdxVal - (IdxVal/NumEltsIn128) * NumEltsIn128;
+
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
- DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32));
+ DAG.getConstant(IdxIn128, MVT::i32));
// Insert the changed part back to the 256-bit vector
return Insert128BitVector(N0, V, IdxVal, DAG, dl);
@@ -7353,17 +7854,16 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
}
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
- LLVMContext *Context = DAG.getContext();
- DebugLoc dl = Op.getDebugLoc();
- MVT OpVT = Op.getValueType().getSimpleVT();
+ SDLoc dl(Op);
+ MVT OpVT = Op.getSimpleValueType();
// If this is a 256-bit vector result, first insert into a 128-bit
// vector and then insert into the 256-bit vector.
if (!OpVT.is128BitVector()) {
// Insert into a 128-bit vector.
- EVT VT128 = EVT::getVectorVT(*Context,
- OpVT.getVectorElementType(),
- OpVT.getVectorNumElements() / 2);
+ unsigned SizeFactor = OpVT.getSizeInBits()/128;
+ MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
+ OpVT.getVectorNumElements() / SizeFactor);
Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
@@ -7386,16 +7886,22 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
// upper bits of a vector.
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- if (Subtarget->hasFp256()) {
- DebugLoc dl = Op.getNode()->getDebugLoc();
- SDValue Vec = Op.getNode()->getOperand(0);
- SDValue Idx = Op.getNode()->getOperand(1);
+ SDLoc dl(Op);
+ SDValue In = Op.getOperand(0);
+ SDValue Idx = Op.getOperand(1);
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ MVT ResVT = Op.getSimpleValueType();
+ MVT InVT = In.getSimpleValueType();
- if (Op.getNode()->getValueType(0).is128BitVector() &&
- Vec.getNode()->getValueType(0).is256BitVector() &&
+ if (Subtarget->hasFp256()) {
+ if (ResVT.is128BitVector() &&
+ (InVT.is256BitVector() || InVT.is512BitVector()) &&
isa<ConstantSDNode>(Idx)) {
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
- return Extract128BitVector(Vec, IdxVal, DAG, dl);
+ return Extract128BitVector(In, IdxVal, DAG, dl);
+ }
+ if (ResVT.is256BitVector() && InVT.is512BitVector() &&
+ isa<ConstantSDNode>(Idx)) {
+ return Extract256BitVector(In, IdxVal, DAG, dl);
}
}
return SDValue();
@@ -7407,17 +7913,25 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
if (Subtarget->hasFp256()) {
- DebugLoc dl = Op.getNode()->getDebugLoc();
+ SDLoc dl(Op.getNode());
SDValue Vec = Op.getNode()->getOperand(0);
SDValue SubVec = Op.getNode()->getOperand(1);
SDValue Idx = Op.getNode()->getOperand(2);
- if (Op.getNode()->getValueType(0).is256BitVector() &&
- SubVec.getNode()->getValueType(0).is128BitVector() &&
+ if ((Op.getNode()->getSimpleValueType(0).is256BitVector() ||
+ Op.getNode()->getSimpleValueType(0).is512BitVector()) &&
+ SubVec.getNode()->getSimpleValueType(0).is128BitVector() &&
isa<ConstantSDNode>(Idx)) {
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
}
+
+ if (Op.getNode()->getSimpleValueType(0).is512BitVector() &&
+ SubVec.getNode()->getSimpleValueType(0).is256BitVector() &&
+ isa<ConstantSDNode>(Idx)) {
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
+ }
}
return SDValue();
}
@@ -7449,13 +7963,13 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
CP->getAlignment(),
CP->getOffset(), OpFlag);
- DebugLoc DL = CP->getDebugLoc();
+ SDLoc DL(CP);
Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc(), getPointerTy()),
+ SDLoc(), getPointerTy()),
Result);
}
@@ -7481,14 +7995,14 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
OpFlag);
- DebugLoc DL = JT->getDebugLoc();
+ SDLoc DL(JT);
Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag)
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc(), getPointerTy()),
+ SDLoc(), getPointerTy()),
Result);
return Result;
@@ -7519,7 +8033,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
@@ -7527,7 +8041,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
!Subtarget->is64Bit()) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc(), getPointerTy()),
+ SDLoc(), getPointerTy()),
Result);
}
@@ -7548,7 +8062,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
CodeModel::Model M = getTargetMachine().getCodeModel();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
OpFlags);
@@ -7569,7 +8083,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
}
SDValue
-X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
+X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
int64_t Offset, SelectionDAG &DAG) const {
// Create the TargetGlobalAddress node, folding in the constant
// offset if it is legal.
@@ -7618,7 +8132,7 @@ SDValue
X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
- return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
+ return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
}
static SDValue
@@ -7627,7 +8141,7 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
unsigned char OperandFlags, bool LocalDynamic = false) {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(),
@@ -7656,10 +8170,10 @@ static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT) {
SDValue InFlag;
- DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better
+ SDLoc dl(GA); // ? function entry point might be better
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc(), PtrVT), InFlag);
+ SDLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
@@ -7677,7 +8191,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG,
const EVT PtrVT,
bool is64Bit) {
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
// Get the start address of the TLS block for this module.
X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
@@ -7691,7 +8205,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
} else {
SDValue InFlag;
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
- DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag);
+ DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
X86II::MO_TLSLDM, /*LocalDynamic=*/true);
@@ -7716,16 +8230,15 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT, TLSModel::Model model,
bool is64Bit, bool isPIC) {
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
// Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
is64Bit ? 257 : 256));
- SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
- DAG.getIntPtrConstant(0),
- MachinePointerInfo(Ptr),
- false, false, false, 0);
+ SDValue ThreadPointer =
+ DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
+ MachinePointerInfo(Ptr), false, false, false, 0);
unsigned char OperandFlags = 0;
// Most TLS accesses are not RIP relative, even on x86-64. One exception is
@@ -7747,21 +8260,20 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
// emit "addl x@ntpoff,%eax" (local exec)
// or "addl x@indntpoff,%eax" (initial exec)
// or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
- GA->getValueType(0),
- GA->getOffset(), OperandFlags);
+ SDValue TGA =
+ DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
+ GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
if (model == TLSModel::InitialExec) {
if (isPIC && !is64Bit) {
Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
- DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT),
+ DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
Offset);
}
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getGOT(), false, false, false,
- 0);
+ MachinePointerInfo::getGOT(), false, false, false, 0);
}
// The address of the thread local variable is the add of the thread
@@ -7809,7 +8321,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
OpFlag = X86II::MO_TLVP_PIC_BASE;
else
OpFlag = X86II::MO_TLVP;
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
GA->getValueType(0),
GA->getOffset(), OpFlag);
@@ -7819,7 +8331,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
if (PIC32)
Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc(), getPointerTy()),
+ SDLoc(), getPointerTy()),
Offset);
// Lowering the machine isd will make sure everything is in the right
@@ -7856,7 +8368,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
// thread-localness.
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
GV = GA->resolveAliasedGlobal(false);
- DebugLoc dl = GA->getDebugLoc();
+ SDLoc dl(GA);
SDValue Chain = DAG.getEntryNode();
// Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
@@ -7914,11 +8426,16 @@ SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
EVT VT = Op.getValueType();
unsigned VTBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
+ // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
+ // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
+ // during isel.
+ SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
+ DAG.getConstant(VTBits - 1, MVT::i8));
SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
DAG.getConstant(VTBits - 1, MVT::i8))
: DAG.getConstant(0, VT);
@@ -7926,12 +8443,15 @@ SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{
SDValue Tmp2, Tmp3;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
- Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+ Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
} else {
Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
- Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt);
+ Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
}
+ // If the shift amount is larger or equal than the width of a part we can't
+ // rely on the results of shld/shrd. Insert a test and select the appropriate
+ // values for large shift amounts.
SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
DAG.getConstant(VTBits, MVT::i8));
SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
@@ -7973,7 +8493,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
return Op;
}
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned Size = SrcVT.getSizeInBits()/8;
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
@@ -7989,7 +8509,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
SDValue StackSlot,
SelectionDAG &DAG) const {
// Build the FILD
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDVTList Tys;
bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
if (useSSE)
@@ -8064,11 +8584,11 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
#endif
*/
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
LLVMContext *Context = DAG.getContext();
// Build some magic constants.
- const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
+ static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
Constant *C0 = ConstantDataVector::get(*Context, CV0);
SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
@@ -8118,7 +8638,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// FP constant to bias correct the final result.
SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
MVT::f64);
@@ -8166,7 +8686,7 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
SelectionDAG &DAG) const {
SDValue N0 = Op.getOperand(0);
EVT SVT = N0.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 ||
SVT == MVT::v8i8 || SVT == MVT::v8i16) &&
@@ -8181,7 +8701,7 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDValue N0 = Op.getOperand(0);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (Op.getValueType().isVector())
return lowerUINT_TO_FP_vec(Op, DAG);
@@ -8240,7 +8760,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
APInt FF(32, 0x5F800000ULL);
// Check whether the sign bit is set.
- SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
+ SDValue SignSet = DAG.getSetCC(dl,
+ getSetCCResultType(*DAG.getContext(), MVT::i64),
Op.getOperand(0), DAG.getConstant(0, MVT::i64),
ISD::SETLT);
@@ -8269,7 +8790,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
std::pair<SDValue,SDValue>
X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
bool IsSigned, bool IsReplace) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT DstTy = Op.getValueType();
@@ -8363,10 +8884,10 @@ X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
- MVT VT = Op->getValueType(0).getSimpleVT();
+ MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
- MVT InVT = In.getValueType().getSimpleVT();
- DebugLoc dl = Op->getDebugLoc();
+ MVT InVT = In.getSimpleValueType();
+ SDLoc dl(Op);
// Optimize vectors in AVX mode:
//
@@ -8381,7 +8902,8 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
// Concat upper and lower parts.
//
- if (((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
+ if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
+ ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
return SDValue();
@@ -8403,8 +8925,39 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
-SDValue X86TargetLowering::LowerANY_EXTEND(SDValue Op,
- SelectionDAG &DAG) const {
+static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
+ SelectionDAG &DAG) {
+ MVT VT = Op->getValueType(0).getSimpleVT();
+ SDValue In = Op->getOperand(0);
+ MVT InVT = In.getValueType().getSimpleVT();
+ SDLoc DL(Op);
+ unsigned int NumElts = VT.getVectorNumElements();
+ if (NumElts != 8 && NumElts != 16)
+ return SDValue();
+
+ if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
+ return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
+
+ EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ // Now we have only mask extension
+ assert(InVT.getVectorElementType() == MVT::i1);
+ SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
+ const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
+ SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
+ unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
+ SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
+ MachinePointerInfo::getConstantPool(),
+ false, false, false, Alignment);
+
+ SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
+ if (VT.is512BitVector())
+ return Brcst;
+ return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
+}
+
+static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
if (Subtarget->hasFp256()) {
SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
if (Res.getNode())
@@ -8413,12 +8966,16 @@ SDValue X86TargetLowering::LowerANY_EXTEND(SDValue Op,
return SDValue();
}
-SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op,
- SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
- MVT VT = Op.getValueType().getSimpleVT();
+
+static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
- MVT SVT = In.getValueType().getSimpleVT();
+ MVT SVT = In.getSimpleValueType();
+
+ if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
+ return LowerZERO_EXTEND_AVX512(Op, DAG);
if (Subtarget->hasFp256()) {
SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
@@ -8426,33 +8983,44 @@ SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op,
return Res;
}
- if (!VT.is256BitVector() || !SVT.is128BitVector() ||
- VT.getVectorNumElements() != SVT.getVectorNumElements())
- return SDValue();
-
- assert(Subtarget->hasFp256() && "256-bit vector is observed without AVX!");
-
- // AVX2 has better support of integer extending.
- if (Subtarget->hasInt256())
- return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
-
- SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In);
- static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1};
- SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32,
- DAG.getVectorShuffle(MVT::v8i16, DL, In,
- DAG.getUNDEF(MVT::v8i16),
- &Mask[0]));
-
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi);
+ assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
+ VT.getVectorNumElements() != SVT.getVectorNumElements());
+ return SDValue();
}
SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
- MVT VT = Op.getValueType().getSimpleVT();
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
- MVT SVT = In.getValueType().getSimpleVT();
-
- if ((VT == MVT::v4i32) && (SVT == MVT::v4i64)) {
+ MVT InVT = In.getSimpleValueType();
+ assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
+ "Invalid TRUNCATE operation");
+
+ if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
+ if (VT.getVectorElementType().getSizeInBits() >=8)
+ return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
+
+ assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
+ unsigned NumElts = InVT.getVectorNumElements();
+ assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
+ if (InVT.getSizeInBits() < 512) {
+ MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
+ In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
+ InVT = ExtVT;
+ }
+ SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
+ const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
+ SDValue CP = DAG.getConstantPool(C, getPointerTy());
+ unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
+ SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
+ MachinePointerInfo::getConstantPool(),
+ false, false, false, Alignment);
+ SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
+ SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
+ return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
+ }
+
+ if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
// On AVX2, v4i64 -> v4i32 becomes VPERMD.
if (Subtarget->hasInt256()) {
static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
@@ -8483,7 +9051,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask2);
}
- if ((VT == MVT::v8i16) && (SVT == MVT::v8i32)) {
+ if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
// On AVX2, v8i32 -> v8i16 becomed PSHUFB.
if (Subtarget->hasInt256()) {
In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
@@ -8541,11 +9109,9 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
}
// Handle truncation of V256 to V128 using shuffles.
- if (!VT.is128BitVector() || !SVT.is256BitVector())
+ if (!VT.is128BitVector() || !InVT.is256BitVector())
return SDValue();
- assert(VT.getVectorNumElements() != SVT.getVectorNumElements() &&
- "Invalid op");
assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
unsigned NumElems = VT.getVectorNumElements();
@@ -8565,11 +9131,11 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
SelectionDAG &DAG) const {
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
if (VT.isVector()) {
if (VT == MVT::v8i16)
- return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), VT,
- DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(),
+ return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT,
+ DAG.getNode(ISD::FP_TO_SINT, SDLoc(Op),
MVT::v8i32, Op.getOperand(0)));
return SDValue();
}
@@ -8582,7 +9148,7 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
if (StackSlot.getNode())
// Load the result.
- return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op),
FIST, StackSlot, MachinePointerInfo(),
false, false, false, 0);
@@ -8599,7 +9165,7 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
if (StackSlot.getNode())
// Load the result.
- return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op),
FIST, StackSlot, MachinePointerInfo(),
false, false, false, 0);
@@ -8608,10 +9174,10 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
}
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
- DebugLoc DL = Op.getDebugLoc();
- MVT VT = Op.getValueType().getSimpleVT();
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
- MVT SVT = In.getValueType().getSimpleVT();
+ MVT SVT = In.getSimpleValueType();
assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
@@ -8622,8 +9188,8 @@ static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
LLVMContext *Context = DAG.getContext();
- DebugLoc dl = Op.getDebugLoc();
- MVT VT = Op.getValueType().getSimpleVT();
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
MVT EltVT = VT;
unsigned NumElts = VT == MVT::f64 ? 2 : 4;
if (VT.isVector()) {
@@ -8656,8 +9222,8 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
LLVMContext *Context = DAG.getContext();
- DebugLoc dl = Op.getDebugLoc();
- MVT VT = Op.getValueType().getSimpleVT();
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
MVT EltVT = VT;
unsigned NumElts = VT == MVT::f64 ? 2 : 4;
if (VT.isVector()) {
@@ -8678,7 +9244,7 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo::getConstantPool(),
false, false, false, Alignment);
if (VT.isVector()) {
- MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
+ MVT XORVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits()/64);
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(ISD::XOR, dl, XORVT,
DAG.getNode(ISD::BITCAST, dl, XORVT,
@@ -8693,9 +9259,9 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
LLVMContext *Context = DAG.getContext();
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
- MVT VT = Op.getValueType().getSimpleVT();
- MVT SrcVT = Op1.getValueType().getSimpleVT();
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
+ MVT SrcVT = Op1.getSimpleValueType();
// If second operand is smaller, extend it first.
if (SrcVT.bitsLT(VT)) {
@@ -8770,8 +9336,8 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue N0 = Op.getOperand(0);
- DebugLoc dl = Op.getDebugLoc();
- MVT VT = Op.getValueType().getSimpleVT();
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
// Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
@@ -8781,8 +9347,8 @@ static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able.
//
-SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op,
- SelectionDAG &DAG) const {
+static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
if (!Subtarget->hasSSE41())
@@ -8792,7 +9358,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op,
return SDValue();
SDNode *N = Op.getNode();
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SmallVector<SDValue, 8> Opnds;
DenseMap<SDValue, unsigned> VecInMap;
@@ -8804,7 +9370,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op,
Opnds.push_back(N->getOperand(1));
for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
- SmallVector<SDValue, 8>::const_iterator I = Opnds.begin() + Slot;
+ SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
// BFS traverse all OR'd operands.
if (I->getOpcode() == ISD::OR) {
Opnds.push_back(I->getOperand(0));
@@ -8876,7 +9442,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op,
/// equivalent.
SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// CF and OF aren't always set the way we want. Determine which
// of these we need.
@@ -8907,7 +9473,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
unsigned NumOperands = 0;
// Truncate operations may prevent the merge of the SETCC instruction
- // and the arithmetic intruction before it. Attempt to truncate the operands
+ // and the arithmetic instruction before it. Attempt to truncate the operands
// of the arithmetic instruction and use a reduced bit-width instruction.
bool NeedTruncation = false;
SDValue ArithOp = Op;
@@ -9015,7 +9581,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
case ISD::AND: Opcode = X86ISD::AND; break;
case ISD::OR: {
if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
- SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG);
+ SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
if (EFLAGS.getNode())
return EFLAGS;
}
@@ -9091,7 +9657,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
if (C->getAPIntValue() == 0)
return EmitTest(Op0, X86CC, DAG);
- DebugLoc dl = Op0.getDebugLoc();
+ SDLoc dl(Op0);
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
// Use SUB instead of CMP to enable CSE between SUB and CMP.
@@ -9118,7 +9684,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
// FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
// build an SDNode sequence that transfers the result from FPSW into EFLAGS:
// (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
- DebugLoc dl = Cmp.getDebugLoc();
+ SDLoc dl(Cmp);
SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
@@ -9135,7 +9701,7 @@ static bool isAllOnes(SDValue V) {
/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
/// if it's possible.
SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
@@ -9203,16 +9769,61 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
return SDValue();
}
+/// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
+/// mask CMPs.
+static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
+ SDValue &Op1) {
+ unsigned SSECC;
+ bool Swap = false;
+
+ // SSE Condition code mapping:
+ // 0 - EQ
+ // 1 - LT
+ // 2 - LE
+ // 3 - UNORD
+ // 4 - NEQ
+ // 5 - NLT
+ // 6 - NLE
+ // 7 - ORD
+ switch (SetCCOpcode) {
+ default: llvm_unreachable("Unexpected SETCC condition");
+ case ISD::SETOEQ:
+ case ISD::SETEQ: SSECC = 0; break;
+ case ISD::SETOGT:
+ case ISD::SETGT: Swap = true; // Fallthrough
+ case ISD::SETLT:
+ case ISD::SETOLT: SSECC = 1; break;
+ case ISD::SETOGE:
+ case ISD::SETGE: Swap = true; // Fallthrough
+ case ISD::SETLE:
+ case ISD::SETOLE: SSECC = 2; break;
+ case ISD::SETUO: SSECC = 3; break;
+ case ISD::SETUNE:
+ case ISD::SETNE: SSECC = 4; break;
+ case ISD::SETULE: Swap = true; // Fallthrough
+ case ISD::SETUGE: SSECC = 5; break;
+ case ISD::SETULT: Swap = true; // Fallthrough
+ case ISD::SETUGT: SSECC = 6; break;
+ case ISD::SETO: SSECC = 7; break;
+ case ISD::SETUEQ:
+ case ISD::SETONE: SSECC = 8; break;
+ }
+ if (Swap)
+ std::swap(Op0, Op1);
+
+ return SSECC;
+}
+
// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
// ones, and then concatenate the result back.
static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue CC = Op.getOperand(2);
// Extract the LHS vectors
@@ -9233,61 +9844,62 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
}
+static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ SDValue CC = Op.getOperand(2);
+ MVT VT = Op.getSimpleValueType();
+
+ assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 32 &&
+ Op.getValueType().getScalarType() == MVT::i1 &&
+ "Cannot set masked compare for this operation");
+
+ ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+ SDLoc dl(Op);
+
+ bool Unsigned = false;
+ unsigned SSECC;
+ switch (SetCCOpcode) {
+ default: llvm_unreachable("Unexpected SETCC condition");
+ case ISD::SETNE: SSECC = 4; break;
+ case ISD::SETEQ: SSECC = 0; break;
+ case ISD::SETUGT: Unsigned = true;
+ case ISD::SETGT: SSECC = 6; break; // NLE
+ case ISD::SETULT: Unsigned = true;
+ case ISD::SETLT: SSECC = 1; break;
+ case ISD::SETUGE: Unsigned = true;
+ case ISD::SETGE: SSECC = 5; break; // NLT
+ case ISD::SETULE: Unsigned = true;
+ case ISD::SETLE: SSECC = 2; break;
+ }
+ unsigned Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
+ return DAG.getNode(Opc, dl, VT, Op0, Op1,
+ DAG.getConstant(SSECC, MVT::i8));
+
+}
+
static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- SDValue Cond;
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
- bool isFP = Op.getOperand(1).getValueType().getSimpleVT().isFloatingPoint();
- DebugLoc dl = Op.getDebugLoc();
+ bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
+ SDLoc dl(Op);
if (isFP) {
#ifndef NDEBUG
- MVT EltVT = Op0.getValueType().getVectorElementType().getSimpleVT();
+ MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
assert(EltVT == MVT::f32 || EltVT == MVT::f64);
#endif
- unsigned SSECC;
- bool Swap = false;
-
- // SSE Condition code mapping:
- // 0 - EQ
- // 1 - LT
- // 2 - LE
- // 3 - UNORD
- // 4 - NEQ
- // 5 - NLT
- // 6 - NLE
- // 7 - ORD
- switch (SetCCOpcode) {
- default: llvm_unreachable("Unexpected SETCC condition");
- case ISD::SETOEQ:
- case ISD::SETEQ: SSECC = 0; break;
- case ISD::SETOGT:
- case ISD::SETGT: Swap = true; // Fallthrough
- case ISD::SETLT:
- case ISD::SETOLT: SSECC = 1; break;
- case ISD::SETOGE:
- case ISD::SETGE: Swap = true; // Fallthrough
- case ISD::SETLE:
- case ISD::SETOLE: SSECC = 2; break;
- case ISD::SETUO: SSECC = 3; break;
- case ISD::SETUNE:
- case ISD::SETNE: SSECC = 4; break;
- case ISD::SETULE: Swap = true; // Fallthrough
- case ISD::SETUGE: SSECC = 5; break;
- case ISD::SETULT: Swap = true; // Fallthrough
- case ISD::SETUGT: SSECC = 6; break;
- case ISD::SETO: SSECC = 7; break;
- case ISD::SETUEQ:
- case ISD::SETONE: SSECC = 8; break;
- }
- if (Swap)
- std::swap(Op0, Op1);
-
+ unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
+ unsigned Opc = X86ISD::CMPP;
+ if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
+ assert(VT.getVectorNumElements() <= 16);
+ Opc = X86ISD::CMPM;
+ }
// In the two special cases we can't handle, emit two comparisons.
if (SSECC == 8) {
unsigned CC0, CC1;
@@ -9299,14 +9911,14 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
}
- SDValue Cmp0 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC0, MVT::i8));
- SDValue Cmp1 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC1, MVT::i8));
return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
}
// Handle all other FP comparisons here.
- return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ return DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(SSECC, MVT::i8));
}
@@ -9314,25 +9926,63 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
if (VT.is256BitVector() && !Subtarget->hasInt256())
return Lower256IntVSETCC(Op, DAG);
+ bool MaskResult = (VT.getVectorElementType() == MVT::i1);
+ EVT OpVT = Op1.getValueType();
+ if (Subtarget->hasAVX512()) {
+ if (Op1.getValueType().is512BitVector() ||
+ (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
+ return LowerIntVSETCC_AVX512(Op, DAG);
+
+ // In AVX-512 architecture setcc returns mask with i1 elements,
+ // But there is no compare instruction for i8 and i16 elements.
+ // We are not talking about 512-bit operands in this case, these
+ // types are illegal.
+ if (MaskResult &&
+ (OpVT.getVectorElementType().getSizeInBits() < 32 &&
+ OpVT.getVectorElementType().getSizeInBits() >= 8))
+ return DAG.getNode(ISD::TRUNCATE, dl, VT,
+ DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
+ }
+
// We are handling one of the integer comparisons here. Since SSE only has
// GT and EQ comparisons for integer, swapping operands and multiple
// operations may be required for some comparisons.
unsigned Opc;
- bool Swap = false, Invert = false, FlipSigns = false;
-
+ bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
+
switch (SetCCOpcode) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETNE: Invert = true;
- case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
+ case ISD::SETEQ: Opc = MaskResult? X86ISD::PCMPEQM: X86ISD::PCMPEQ; break;
case ISD::SETLT: Swap = true;
- case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
+ case ISD::SETGT: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT; break;
case ISD::SETGE: Swap = true;
- case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break;
+ case ISD::SETLE: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT;
+ Invert = true; break;
case ISD::SETULT: Swap = true;
- case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break;
+ case ISD::SETUGT: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT;
+ FlipSigns = true; break;
case ISD::SETUGE: Swap = true;
- case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break;
+ case ISD::SETULE: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT;
+ FlipSigns = true; Invert = true; break;
+ }
+
+ // Special case: Use min/max operations for SETULE/SETUGE
+ MVT VET = VT.getVectorElementType();
+ bool hasMinMax =
+ (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
+ || (Subtarget->hasSSE2() && (VET == MVT::i8));
+
+ if (hasMinMax) {
+ switch (SetCCOpcode) {
+ default: break;
+ case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
+ case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
+ }
+
+ if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
}
+
if (Swap)
std::swap(Op0, Op1);
@@ -9366,8 +10016,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
// Create masks for only the low parts/high parts of the 64 bit integers.
- const int MaskHi[] = { 1, 1, 3, 3 };
- const int MaskLo[] = { 0, 0, 2, 2 };
+ static const int MaskHi[] = { 1, 1, 3, 3 };
+ static const int MaskLo[] = { 0, 0, 2, 2 };
SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
@@ -9394,7 +10044,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
// Make sure the lower and upper halves are both all-ones.
- const int Mask[] = { 1, 0, 3, 2 };
+ static const int Mask[] = { 1, 0, 3, 2 };
SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
@@ -9419,20 +10069,23 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// If the logical-not of the result is required, perform that now.
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
+
+ if (MinMax)
+ Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
return Result;
}
SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
// Optimize to BT if possible.
@@ -9469,7 +10122,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
}
}
- bool isFP = Op1.getValueType().getSimpleVT().isFloatingPoint();
+ bool isFP = Op1.getSimpleValueType().isFloatingPoint();
unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
if (X86CC == X86::COND_INVALID)
return SDValue();
@@ -9526,9 +10179,31 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Cond = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue Op2 = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
+ EVT VT = Op1.getValueType();
SDValue CC;
+ // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
+ // are available. Otherwise fp cmovs get lowered into a less efficient branch
+ // sequence later on.
+ if (Cond.getOpcode() == ISD::SETCC &&
+ ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
+ (Subtarget->hasSSE1() && VT == MVT::f32)) &&
+ VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
+ SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
+ int SSECC = translateX86FSETCC(
+ cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
+
+ if (SSECC != 8) {
+ unsigned Opcode = VT == MVT::f32 ? X86ISD::FSETCCss : X86ISD::FSETCCsd;
+ SDValue Cmp = DAG.getNode(Opcode, DL, VT, CondOp0, CondOp1,
+ DAG.getConstant(SSECC, MVT::i8));
+ SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
+ SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
+ return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
+ }
+ }
+
if (Cond.getOpcode() == ISD::SETCC) {
SDValue NewCond = LowerSETCC(Cond, DAG);
if (NewCond.getNode())
@@ -9602,7 +10277,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
- MVT VT = Op.getValueType().getSimpleVT();
+ MVT VT = Op.getSimpleValueType();
bool IllegalFPCMov = false;
if (VT.isFloatingPoint() && !VT.isVector() &&
@@ -9711,15 +10386,50 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops));
}
-SDValue X86TargetLowering::LowerSIGN_EXTEND(SDValue Op,
- SelectionDAG &DAG) const {
- MVT VT = Op->getValueType(0).getSimpleVT();
+static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
- MVT InVT = In.getValueType().getSimpleVT();
- DebugLoc dl = Op->getDebugLoc();
+ MVT InVT = In.getSimpleValueType();
+ SDLoc dl(Op);
+
+ unsigned int NumElts = VT.getVectorNumElements();
+ if (NumElts != 8 && NumElts != 16)
+ return SDValue();
+
+ if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
+ return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
+
+ MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
+ Constant *C = ConstantInt::get(*DAG.getContext(),
+ APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
+
+ SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
+ unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
+ SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
+ MachinePointerInfo::getConstantPool(),
+ false, false, false, Alignment);
+ SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
+ if (VT.is512BitVector())
+ return Brcst;
+ return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
+}
+
+static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ MVT VT = Op->getSimpleValueType(0);
+ SDValue In = Op->getOperand(0);
+ MVT InVT = In.getSimpleValueType();
+ SDLoc dl(Op);
+
+ if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
+ return LowerSIGN_EXTEND_AVX512(Op, DAG);
if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
- (VT != MVT::v8i32 || InVT != MVT::v8i16))
+ (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
+ (VT != MVT::v16i16 || InVT != MVT::v16i8))
return SDValue();
if (Subtarget->hasInt256())
@@ -9789,7 +10499,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Cond = Op.getOperand(1);
SDValue Dest = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue CC;
bool Inverted = false;
@@ -10059,12 +10769,13 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
"This should be used only on Windows targets or when segmented stacks "
"are being used");
assert(!Subtarget->isTargetEnvMacho() && "Not implemented");
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Get the inputs.
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
- // FIXME: Ensure alignment here
+ unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ EVT VT = Op.getNode()->getValueType(0);
bool Is64Bit = Subtarget->is64Bit();
EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32;
@@ -10102,12 +10813,20 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
- Flag = Chain.getValue(1);
- Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
- SPTy).getValue(1);
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ unsigned SPReg = RegInfo->getStackRegister();
+ SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
+ Chain = SP.getValue(1);
- SDValue Ops1[2] = { Chain.getValue(0), Chain };
+ if (Align) {
+ SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
+ DAG.getConstant(-(uint64_t)Align, VT));
+ Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
+ }
+
+ SDValue Ops1[2] = { SP, Chain };
return DAG.getMergeValues(Ops1, 2, dl);
}
}
@@ -10117,7 +10836,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
@@ -10184,7 +10903,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
SDValue SrcPtr = Op.getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
unsigned Align = Op.getConstantOperandVal(3);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT ArgVT = Op.getNode()->getValueType(0);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
@@ -10250,7 +10969,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
SDValue SrcPtr = Op.getOperand(2);
const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
@@ -10258,25 +10977,37 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
+// getTargetVShiftByConstNode - Handle vector element shifts where the shift
+// amount is a constant. Takes immediate version of shift as input.
+static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, EVT VT,
+ SDValue SrcOp, uint64_t ShiftAmt,
+ SelectionDAG &DAG) {
+
+ // Check for ShiftAmt >= element width
+ if (ShiftAmt >= VT.getVectorElementType().getSizeInBits()) {
+ if (Opc == X86ISD::VSRAI)
+ ShiftAmt = VT.getVectorElementType().getSizeInBits() - 1;
+ else
+ return DAG.getConstant(0, VT);
+ }
+
+ assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
+ && "Unknown target vector shift-by-constant node");
+
+ return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
+}
+
// getTargetVShiftNode - Handle vector element shifts where the shift amount
// may or may not be a constant. Takes immediate version of shift as input.
-static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT,
+static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue SrcOp, SDValue ShAmt,
SelectionDAG &DAG) {
assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32");
- if (isa<ConstantSDNode>(ShAmt)) {
- // Constant may be a TargetConstant. Use a regular constant.
- uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue();
- switch (Opc) {
- default: llvm_unreachable("Unknown target vector shift node");
- case X86ISD::VSHLI:
- case X86ISD::VSRLI:
- case X86ISD::VSRAI:
- return DAG.getNode(Opc, dl, VT, SrcOp,
- DAG.getConstant(ShiftAmt, MVT::i32));
- }
- }
+ // Catch shift-by-constant.
+ if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
+ return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
+ CShAmt->getZExtValue(), DAG);
// Change opcode to non-immediate version
switch (Opc) {
@@ -10304,7 +11035,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT,
}
static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
@@ -10479,24 +11210,32 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmaxu_b:
case Intrinsic::x86_avx2_pmaxu_w:
case Intrinsic::x86_avx2_pmaxu_d:
+ case Intrinsic::x86_avx512_pmaxu_d:
+ case Intrinsic::x86_avx512_pmaxu_q:
case Intrinsic::x86_sse2_pminu_b:
case Intrinsic::x86_sse41_pminuw:
case Intrinsic::x86_sse41_pminud:
case Intrinsic::x86_avx2_pminu_b:
case Intrinsic::x86_avx2_pminu_w:
case Intrinsic::x86_avx2_pminu_d:
+ case Intrinsic::x86_avx512_pminu_d:
+ case Intrinsic::x86_avx512_pminu_q:
case Intrinsic::x86_sse41_pmaxsb:
case Intrinsic::x86_sse2_pmaxs_w:
case Intrinsic::x86_sse41_pmaxsd:
case Intrinsic::x86_avx2_pmaxs_b:
case Intrinsic::x86_avx2_pmaxs_w:
case Intrinsic::x86_avx2_pmaxs_d:
+ case Intrinsic::x86_avx512_pmaxs_d:
+ case Intrinsic::x86_avx512_pmaxs_q:
case Intrinsic::x86_sse41_pminsb:
case Intrinsic::x86_sse2_pmins_w:
case Intrinsic::x86_sse41_pminsd:
case Intrinsic::x86_avx2_pmins_b:
case Intrinsic::x86_avx2_pmins_w:
- case Intrinsic::x86_avx2_pmins_d: {
+ case Intrinsic::x86_avx2_pmins_d:
+ case Intrinsic::x86_avx512_pmins_d:
+ case Intrinsic::x86_avx512_pmins_q: {
unsigned Opcode;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
@@ -10506,6 +11245,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmaxu_b:
case Intrinsic::x86_avx2_pmaxu_w:
case Intrinsic::x86_avx2_pmaxu_d:
+ case Intrinsic::x86_avx512_pmaxu_d:
+ case Intrinsic::x86_avx512_pmaxu_q:
Opcode = X86ISD::UMAX;
break;
case Intrinsic::x86_sse2_pminu_b:
@@ -10514,6 +11255,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pminu_b:
case Intrinsic::x86_avx2_pminu_w:
case Intrinsic::x86_avx2_pminu_d:
+ case Intrinsic::x86_avx512_pminu_d:
+ case Intrinsic::x86_avx512_pminu_q:
Opcode = X86ISD::UMIN;
break;
case Intrinsic::x86_sse41_pmaxsb:
@@ -10522,6 +11265,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmaxs_b:
case Intrinsic::x86_avx2_pmaxs_w:
case Intrinsic::x86_avx2_pmaxs_d:
+ case Intrinsic::x86_avx512_pmaxs_d:
+ case Intrinsic::x86_avx512_pmaxs_q:
Opcode = X86ISD::SMAX;
break;
case Intrinsic::x86_sse41_pminsb:
@@ -10530,6 +11275,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmins_b:
case Intrinsic::x86_avx2_pmins_w:
case Intrinsic::x86_avx2_pmins_d:
+ case Intrinsic::x86_avx512_pmins_d:
+ case Intrinsic::x86_avx512_pmins_q:
Opcode = X86ISD::SMIN;
break;
}
@@ -10542,10 +11289,14 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_sse2_max_pd:
case Intrinsic::x86_avx_max_ps_256:
case Intrinsic::x86_avx_max_pd_256:
+ case Intrinsic::x86_avx512_max_ps_512:
+ case Intrinsic::x86_avx512_max_pd_512:
case Intrinsic::x86_sse_min_ps:
case Intrinsic::x86_sse2_min_pd:
case Intrinsic::x86_avx_min_ps_256:
- case Intrinsic::x86_avx_min_pd_256: {
+ case Intrinsic::x86_avx_min_pd_256:
+ case Intrinsic::x86_avx512_min_ps_512:
+ case Intrinsic::x86_avx512_min_pd_512: {
unsigned Opcode;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
@@ -10553,12 +11304,16 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_sse2_max_pd:
case Intrinsic::x86_avx_max_ps_256:
case Intrinsic::x86_avx_max_pd_256:
+ case Intrinsic::x86_avx512_max_ps_512:
+ case Intrinsic::x86_avx512_max_pd_512:
Opcode = X86ISD::FMAX;
break;
case Intrinsic::x86_sse_min_ps:
case Intrinsic::x86_sse2_min_pd:
case Intrinsic::x86_avx_min_ps_256:
case Intrinsic::x86_avx_min_pd_256:
+ case Intrinsic::x86_avx512_min_ps_512:
+ case Intrinsic::x86_avx512_min_pd_512:
Opcode = X86ISD::FMIN;
break;
}
@@ -10629,7 +11384,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_permd:
case Intrinsic::x86_avx2_permps:
// Operands intentionally swapped. Mask is last operand to intrinsic,
- // but second operand for node/intruction.
+ // but second operand for node/instruction.
return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(),
Op.getOperand(2), Op.getOperand(1));
@@ -10704,6 +11459,16 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
+ case Intrinsic::x86_avx512_kortestz:
+ case Intrinsic::x86_avx512_kortestc: {
+ unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz)? X86::COND_E: X86::COND_B;
+ SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
+ SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
+ SDValue CC = DAG.getConstant(X86CC, MVT::i8);
+ SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
+ return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
+ }
// SSE/AVX shift intrinsics
case Intrinsic::x86_sse2_psll_w:
@@ -10854,8 +11619,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
X86CC = X86::COND_E;
break;
}
- SmallVector<SDValue, 5> NewOps;
- NewOps.append(Op->op_begin()+1, Op->op_end());
+ SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size());
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
@@ -10872,8 +11636,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
else
Opcode = X86ISD::PCMPESTRI;
- SmallVector<SDValue, 5> NewOps;
- NewOps.append(Op->op_begin()+1, Op->op_end());
+ SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size());
}
@@ -10900,7 +11663,19 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_fma_vfmaddsub_ps_256:
case Intrinsic::x86_fma_vfmaddsub_pd_256:
case Intrinsic::x86_fma_vfmsubadd_ps_256:
- case Intrinsic::x86_fma_vfmsubadd_pd_256: {
+ case Intrinsic::x86_fma_vfmsubadd_pd_256:
+ case Intrinsic::x86_fma_vfmadd_ps_512:
+ case Intrinsic::x86_fma_vfmadd_pd_512:
+ case Intrinsic::x86_fma_vfmsub_ps_512:
+ case Intrinsic::x86_fma_vfmsub_pd_512:
+ case Intrinsic::x86_fma_vfnmadd_ps_512:
+ case Intrinsic::x86_fma_vfnmadd_pd_512:
+ case Intrinsic::x86_fma_vfnmsub_ps_512:
+ case Intrinsic::x86_fma_vfnmsub_pd_512:
+ case Intrinsic::x86_fma_vfmaddsub_ps_512:
+ case Intrinsic::x86_fma_vfmaddsub_pd_512:
+ case Intrinsic::x86_fma_vfmsubadd_ps_512:
+ case Intrinsic::x86_fma_vfmsubadd_pd_512: {
unsigned Opc;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
@@ -10908,36 +11683,48 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_fma_vfmadd_pd:
case Intrinsic::x86_fma_vfmadd_ps_256:
case Intrinsic::x86_fma_vfmadd_pd_256:
+ case Intrinsic::x86_fma_vfmadd_ps_512:
+ case Intrinsic::x86_fma_vfmadd_pd_512:
Opc = X86ISD::FMADD;
break;
case Intrinsic::x86_fma_vfmsub_ps:
case Intrinsic::x86_fma_vfmsub_pd:
case Intrinsic::x86_fma_vfmsub_ps_256:
case Intrinsic::x86_fma_vfmsub_pd_256:
+ case Intrinsic::x86_fma_vfmsub_ps_512:
+ case Intrinsic::x86_fma_vfmsub_pd_512:
Opc = X86ISD::FMSUB;
break;
case Intrinsic::x86_fma_vfnmadd_ps:
case Intrinsic::x86_fma_vfnmadd_pd:
case Intrinsic::x86_fma_vfnmadd_ps_256:
case Intrinsic::x86_fma_vfnmadd_pd_256:
+ case Intrinsic::x86_fma_vfnmadd_ps_512:
+ case Intrinsic::x86_fma_vfnmadd_pd_512:
Opc = X86ISD::FNMADD;
break;
case Intrinsic::x86_fma_vfnmsub_ps:
case Intrinsic::x86_fma_vfnmsub_pd:
case Intrinsic::x86_fma_vfnmsub_ps_256:
case Intrinsic::x86_fma_vfnmsub_pd_256:
+ case Intrinsic::x86_fma_vfnmsub_ps_512:
+ case Intrinsic::x86_fma_vfnmsub_pd_512:
Opc = X86ISD::FNMSUB;
break;
case Intrinsic::x86_fma_vfmaddsub_ps:
case Intrinsic::x86_fma_vfmaddsub_pd:
case Intrinsic::x86_fma_vfmaddsub_ps_256:
case Intrinsic::x86_fma_vfmaddsub_pd_256:
+ case Intrinsic::x86_fma_vfmaddsub_ps_512:
+ case Intrinsic::x86_fma_vfmaddsub_pd_512:
Opc = X86ISD::FMADDSUB;
break;
case Intrinsic::x86_fma_vfmsubadd_ps:
case Intrinsic::x86_fma_vfmsubadd_pd:
case Intrinsic::x86_fma_vfmsubadd_ps_256:
case Intrinsic::x86_fma_vfmsubadd_pd_256:
+ case Intrinsic::x86_fma_vfmsubadd_ps_512:
+ case Intrinsic::x86_fma_vfmsubadd_pd_512:
Opc = X86ISD::FMSUBADD;
break;
}
@@ -10948,8 +11735,88 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
}
}
-static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
+static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
+ SDValue Base, SDValue Index,
+ SDValue ScaleOp, SDValue Chain,
+ const X86Subtarget * Subtarget) {
+ SDLoc dl(Op);
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
+ assert(C && "Invalid scale type");
+ SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
+ SDValue Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
+ EVT MaskVT = MVT::getVectorVT(MVT::i1,
+ Index.getValueType().getVectorNumElements());
+ SDValue MaskInReg = DAG.getConstant(~0, MaskVT);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
+ SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Segment = DAG.getRegister(0, MVT::i32);
+ SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
+ SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
+ SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
+ return DAG.getMergeValues(RetOps, array_lengthof(RetOps), dl);
+}
+
+static SDValue getMGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
+ SDValue Src, SDValue Mask, SDValue Base,
+ SDValue Index, SDValue ScaleOp, SDValue Chain,
+ const X86Subtarget * Subtarget) {
+ SDLoc dl(Op);
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
+ assert(C && "Invalid scale type");
+ SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
+ EVT MaskVT = MVT::getVectorVT(MVT::i1,
+ Index.getValueType().getVectorNumElements());
+ SDValue MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
+ SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Segment = DAG.getRegister(0, MVT::i32);
+ if (Src.getOpcode() == ISD::UNDEF)
+ Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
+ SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
+ SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
+ SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
+ return DAG.getMergeValues(RetOps, array_lengthof(RetOps), dl);
+}
+
+static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
+ SDValue Src, SDValue Base, SDValue Index,
+ SDValue ScaleOp, SDValue Chain) {
+ SDLoc dl(Op);
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
+ assert(C && "Invalid scale type");
+ SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
+ SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Segment = DAG.getRegister(0, MVT::i32);
+ EVT MaskVT = MVT::getVectorVT(MVT::i1,
+ Index.getValueType().getVectorNumElements());
+ SDValue MaskInReg = DAG.getConstant(~0, MaskVT);
+ SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
+ SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
+ SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
+ return SDValue(Res, 1);
+}
+
+static SDValue getMScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
+ SDValue Src, SDValue Mask, SDValue Base,
+ SDValue Index, SDValue ScaleOp, SDValue Chain) {
+ SDLoc dl(Op);
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
+ assert(C && "Invalid scale type");
+ SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
+ SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Segment = DAG.getRegister(0, MVT::i32);
+ EVT MaskVT = MVT::getVectorVT(MVT::i1,
+ Index.getValueType().getVectorNumElements());
+ SDValue MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
+ SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
+ SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
+ SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
+ return SDValue(Res, 1);
+}
+
+static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc dl(Op);
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
@@ -10983,7 +11850,144 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
SDValue(Result.getNode(), 2));
}
-
+ //int_gather(index, base, scale);
+ case Intrinsic::x86_avx512_gather_qpd_512:
+ case Intrinsic::x86_avx512_gather_qps_512:
+ case Intrinsic::x86_avx512_gather_dpd_512:
+ case Intrinsic::x86_avx512_gather_qpi_512:
+ case Intrinsic::x86_avx512_gather_qpq_512:
+ case Intrinsic::x86_avx512_gather_dpq_512:
+ case Intrinsic::x86_avx512_gather_dps_512:
+ case Intrinsic::x86_avx512_gather_dpi_512: {
+ unsigned Opc;
+ switch (IntNo) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::x86_avx512_gather_qps_512: Opc = X86::VGATHERQPSZrm; break;
+ case Intrinsic::x86_avx512_gather_qpd_512: Opc = X86::VGATHERQPDZrm; break;
+ case Intrinsic::x86_avx512_gather_dpd_512: Opc = X86::VGATHERDPDZrm; break;
+ case Intrinsic::x86_avx512_gather_dps_512: Opc = X86::VGATHERDPSZrm; break;
+ case Intrinsic::x86_avx512_gather_qpi_512: Opc = X86::VPGATHERQDZrm; break;
+ case Intrinsic::x86_avx512_gather_qpq_512: Opc = X86::VPGATHERQQZrm; break;
+ case Intrinsic::x86_avx512_gather_dpi_512: Opc = X86::VPGATHERDDZrm; break;
+ case Intrinsic::x86_avx512_gather_dpq_512: Opc = X86::VPGATHERDQZrm; break;
+ }
+ SDValue Chain = Op.getOperand(0);
+ SDValue Index = Op.getOperand(2);
+ SDValue Base = Op.getOperand(3);
+ SDValue Scale = Op.getOperand(4);
+ return getGatherNode(Opc, Op, DAG, Base, Index, Scale, Chain, Subtarget);
+ }
+ //int_gather_mask(v1, mask, index, base, scale);
+ case Intrinsic::x86_avx512_gather_qps_mask_512:
+ case Intrinsic::x86_avx512_gather_qpd_mask_512:
+ case Intrinsic::x86_avx512_gather_dpd_mask_512:
+ case Intrinsic::x86_avx512_gather_dps_mask_512:
+ case Intrinsic::x86_avx512_gather_qpi_mask_512:
+ case Intrinsic::x86_avx512_gather_qpq_mask_512:
+ case Intrinsic::x86_avx512_gather_dpi_mask_512:
+ case Intrinsic::x86_avx512_gather_dpq_mask_512: {
+ unsigned Opc;
+ switch (IntNo) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::x86_avx512_gather_qps_mask_512:
+ Opc = X86::VGATHERQPSZrm; break;
+ case Intrinsic::x86_avx512_gather_qpd_mask_512:
+ Opc = X86::VGATHERQPDZrm; break;
+ case Intrinsic::x86_avx512_gather_dpd_mask_512:
+ Opc = X86::VGATHERDPDZrm; break;
+ case Intrinsic::x86_avx512_gather_dps_mask_512:
+ Opc = X86::VGATHERDPSZrm; break;
+ case Intrinsic::x86_avx512_gather_qpi_mask_512:
+ Opc = X86::VPGATHERQDZrm; break;
+ case Intrinsic::x86_avx512_gather_qpq_mask_512:
+ Opc = X86::VPGATHERQQZrm; break;
+ case Intrinsic::x86_avx512_gather_dpi_mask_512:
+ Opc = X86::VPGATHERDDZrm; break;
+ case Intrinsic::x86_avx512_gather_dpq_mask_512:
+ Opc = X86::VPGATHERDQZrm; break;
+ }
+ SDValue Chain = Op.getOperand(0);
+ SDValue Src = Op.getOperand(2);
+ SDValue Mask = Op.getOperand(3);
+ SDValue Index = Op.getOperand(4);
+ SDValue Base = Op.getOperand(5);
+ SDValue Scale = Op.getOperand(6);
+ return getMGatherNode(Opc, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
+ Subtarget);
+ }
+ //int_scatter(base, index, v1, scale);
+ case Intrinsic::x86_avx512_scatter_qpd_512:
+ case Intrinsic::x86_avx512_scatter_qps_512:
+ case Intrinsic::x86_avx512_scatter_dpd_512:
+ case Intrinsic::x86_avx512_scatter_qpi_512:
+ case Intrinsic::x86_avx512_scatter_qpq_512:
+ case Intrinsic::x86_avx512_scatter_dpq_512:
+ case Intrinsic::x86_avx512_scatter_dps_512:
+ case Intrinsic::x86_avx512_scatter_dpi_512: {
+ unsigned Opc;
+ switch (IntNo) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::x86_avx512_scatter_qpd_512:
+ Opc = X86::VSCATTERQPDZmr; break;
+ case Intrinsic::x86_avx512_scatter_qps_512:
+ Opc = X86::VSCATTERQPSZmr; break;
+ case Intrinsic::x86_avx512_scatter_dpd_512:
+ Opc = X86::VSCATTERDPDZmr; break;
+ case Intrinsic::x86_avx512_scatter_dps_512:
+ Opc = X86::VSCATTERDPSZmr; break;
+ case Intrinsic::x86_avx512_scatter_qpi_512:
+ Opc = X86::VPSCATTERQDZmr; break;
+ case Intrinsic::x86_avx512_scatter_qpq_512:
+ Opc = X86::VPSCATTERQQZmr; break;
+ case Intrinsic::x86_avx512_scatter_dpq_512:
+ Opc = X86::VPSCATTERDQZmr; break;
+ case Intrinsic::x86_avx512_scatter_dpi_512:
+ Opc = X86::VPSCATTERDDZmr; break;
+ }
+ SDValue Chain = Op.getOperand(0);
+ SDValue Base = Op.getOperand(2);
+ SDValue Index = Op.getOperand(3);
+ SDValue Src = Op.getOperand(4);
+ SDValue Scale = Op.getOperand(5);
+ return getScatterNode(Opc, Op, DAG, Src, Base, Index, Scale, Chain);
+ }
+ //int_scatter_mask(base, mask, index, v1, scale);
+ case Intrinsic::x86_avx512_scatter_qps_mask_512:
+ case Intrinsic::x86_avx512_scatter_qpd_mask_512:
+ case Intrinsic::x86_avx512_scatter_dpd_mask_512:
+ case Intrinsic::x86_avx512_scatter_dps_mask_512:
+ case Intrinsic::x86_avx512_scatter_qpi_mask_512:
+ case Intrinsic::x86_avx512_scatter_qpq_mask_512:
+ case Intrinsic::x86_avx512_scatter_dpi_mask_512:
+ case Intrinsic::x86_avx512_scatter_dpq_mask_512: {
+ unsigned Opc;
+ switch (IntNo) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::x86_avx512_scatter_qpd_mask_512:
+ Opc = X86::VSCATTERQPDZmr; break;
+ case Intrinsic::x86_avx512_scatter_qps_mask_512:
+ Opc = X86::VSCATTERQPSZmr; break;
+ case Intrinsic::x86_avx512_scatter_dpd_mask_512:
+ Opc = X86::VSCATTERDPDZmr; break;
+ case Intrinsic::x86_avx512_scatter_dps_mask_512:
+ Opc = X86::VSCATTERDPSZmr; break;
+ case Intrinsic::x86_avx512_scatter_qpi_mask_512:
+ Opc = X86::VPSCATTERQDZmr; break;
+ case Intrinsic::x86_avx512_scatter_qpq_mask_512:
+ Opc = X86::VPSCATTERQQZmr; break;
+ case Intrinsic::x86_avx512_scatter_dpq_mask_512:
+ Opc = X86::VPSCATTERDQZmr; break;
+ case Intrinsic::x86_avx512_scatter_dpi_mask_512:
+ Opc = X86::VPSCATTERDDZmr; break;
+ }
+ SDValue Chain = Op.getOperand(0);
+ SDValue Base = Op.getOperand(2);
+ SDValue Mask = Op.getOperand(3);
+ SDValue Index = Op.getOperand(4);
+ SDValue Src = Op.getOperand(5);
+ SDValue Scale = Op.getOperand(6);
+ return getMScatterNode(Opc, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
+ }
// XTEST intrinsics.
case Intrinsic::x86_xtest: {
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
@@ -11004,13 +12008,14 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
MFI->setReturnAddressIsTaken(true);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT PtrVT = getPointerTy();
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- SDValue Offset =
- DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT,
FrameAddr, Offset),
@@ -11028,8 +12033,10 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
+ SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
(FrameReg == X86::EBP && VT == MVT::i32)) &&
@@ -11044,6 +12051,8 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
}
@@ -11051,9 +12060,11 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl (Op);
EVT PtrVT = getPointerTy();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
@@ -11074,7 +12085,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
DAG.getVTList(MVT::i32, MVT::Other),
Op.getOperand(0), Op.getOperand(1));
@@ -11082,7 +12093,7 @@ SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
Op.getOperand(0), Op.getOperand(1));
}
@@ -11097,7 +12108,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDValue Trmp = Op.getOperand(1); // trampoline
SDValue FPtr = Op.getOperand(2); // nested function
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo();
@@ -11267,7 +12278,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
const TargetFrameLowering &TFI = *TM.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Save FP Control Word to stack slot
int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
@@ -11314,7 +12325,7 @@ static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
Op = Op.getOperand(0);
if (VT == MVT::i8) {
@@ -11348,7 +12359,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
Op = Op.getOperand(0);
if (VT == MVT::i8) {
@@ -11372,7 +12383,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
unsigned NumBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
Op = Op.getOperand(0);
// Issue a bsf (scan bits forward) which also sets EFLAGS.
@@ -11398,7 +12409,7 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
@@ -11434,7 +12445,7 @@ static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT VT = Op.getValueType();
// Decompose 256-bit ops into smaller 128-bit ops.
@@ -11450,7 +12461,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
"Should not custom lower when pmuldq is available!");
// Extract the odd parts.
- const int UnpackMask[] = { 1, -1, 3, -1 };
+ static const int UnpackMask[] = { 1, -1, 3, -1 };
SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
@@ -11464,12 +12475,12 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// Merge the two vectors back together with a shuffle. This expands into 2
// shuffles.
- const int ShufMask[] = { 0, 4, 2, 6 };
+ static const int ShufMask[] = { 0, 4, 2, 6 };
return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
}
- assert((VT == MVT::v2i64 || VT == MVT::v4i64) &&
- "Only know how to lower V2I64/V4I64 multiply");
+ assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
+ "Only know how to lower V2I64/V4I64/V8I64 multiply");
// Ahi = psrlqi(a, 32);
// Bhi = psrlqi(b, 32);
@@ -11482,13 +12493,12 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// AhiBlo = psllqi(AhiBlo, 32);
// return AloBlo + AloBhi + AhiBlo;
- SDValue ShAmt = DAG.getConstant(32, MVT::i32);
-
- SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt);
- SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt);
+ SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
+ SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
// Bit cast to 32-bit vectors for MULUDQ
- EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32;
+ EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
+ (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
@@ -11498,19 +12508,19 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
- AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt);
- AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt);
+ AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
+ AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
}
-SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
EVT EltTy = VT.getVectorElementType();
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = Op.getOperand(0);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Lower sdiv X, pow2-const.
BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(Op.getOperand(1));
@@ -11518,23 +12528,35 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
APInt SplatValue, SplatUndef;
- unsigned MinSplatBits;
+ unsigned SplatBitSize;
bool HasAnyUndefs;
- if (!C->isConstantSplat(SplatValue, SplatUndef, MinSplatBits, HasAnyUndefs))
+ if (!C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs) ||
+ EltTy.getSizeInBits() < SplatBitSize)
return SDValue();
if ((SplatValue != 0) &&
(SplatValue.isPowerOf2() || (-SplatValue).isPowerOf2())) {
- unsigned lg2 = SplatValue.countTrailingZeros();
+ unsigned Lg2 = SplatValue.countTrailingZeros();
// Splat the sign bit.
- SDValue Sz = DAG.getConstant(EltTy.getSizeInBits()-1, MVT::i32);
- SDValue SGN = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, N0, Sz, DAG);
+ SmallVector<SDValue, 16> Sz(NumElts,
+ DAG.getConstant(EltTy.getSizeInBits() - 1,
+ EltTy));
+ SDValue SGN = DAG.getNode(ISD::SRA, dl, VT, N0,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Sz[0],
+ NumElts));
// Add (N0 < 0) ? abs2 - 1 : 0;
- SDValue Amt = DAG.getConstant(EltTy.getSizeInBits() - lg2, MVT::i32);
- SDValue SRL = getTargetVShiftNode(X86ISD::VSRLI, dl, VT, SGN, Amt, DAG);
+ SmallVector<SDValue, 16> Amt(NumElts,
+ DAG.getConstant(EltTy.getSizeInBits() - Lg2,
+ EltTy));
+ SDValue SRL = DAG.getNode(ISD::SRL, dl, VT, SGN,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Amt[0],
+ NumElts));
SDValue ADD = DAG.getNode(ISD::ADD, dl, VT, N0, SRL);
- SDValue Lg2Amt = DAG.getConstant(lg2, MVT::i32);
- SDValue SRA = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, ADD, Lg2Amt, DAG);
+ SmallVector<SDValue, 16> Lg2Amt(NumElts, DAG.getConstant(Lg2, EltTy));
+ SDValue SRA = DAG.getNode(ISD::SRA, dl, VT, ADD,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Lg2Amt[0],
+ NumElts));
// If we're dividing by a positive value, we're done. Otherwise, we must
// negate the result.
@@ -11551,7 +12573,7 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
@@ -11563,23 +12585,26 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
(Subtarget->hasInt256() &&
- (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) {
+ (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
+ (Subtarget->hasAVX512() &&
+ (VT == MVT::v8i64 || VT == MVT::v16i32))) {
if (Op.getOpcode() == ISD::SHL)
- return DAG.getNode(X86ISD::VSHLI, dl, VT, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
+ DAG);
if (Op.getOpcode() == ISD::SRL)
- return DAG.getNode(X86ISD::VSRLI, dl, VT, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
+ DAG);
if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
- return DAG.getNode(X86ISD::VSRAI, dl, VT, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
+ DAG);
}
if (VT == MVT::v16i8) {
if (Op.getOpcode() == ISD::SHL) {
// Make a large shift.
- SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
+ MVT::v8i16, R, ShiftAmt,
+ DAG);
SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
// Zero out the rightmost bits.
SmallVector<SDValue, 16> V(16,
@@ -11590,8 +12615,9 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
- SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
+ MVT::v8i16, R, ShiftAmt,
+ DAG);
SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
// Zero out the leftmost bits.
SmallVector<SDValue, 16> V(16,
@@ -11622,8 +12648,9 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
if (Subtarget->hasInt256() && VT == MVT::v32i8) {
if (Op.getOpcode() == ISD::SHL) {
// Make a large shift.
- SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
+ MVT::v16i16, R, ShiftAmt,
+ DAG);
SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
// Zero out the rightmost bits.
SmallVector<SDValue, 32> V(32,
@@ -11634,8 +12661,9 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
- SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
+ MVT::v16i16, R, ShiftAmt,
+ DAG);
SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
// Zero out the leftmost bits.
SmallVector<SDValue, 32> V(32,
@@ -11700,14 +12728,14 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
default:
llvm_unreachable("Unknown shift opcode!");
case ISD::SHL:
- return DAG.getNode(X86ISD::VSHLI, dl, VT, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
+ DAG);
case ISD::SRL:
- return DAG.getNode(X86ISD::VSRLI, dl, VT, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
+ DAG);
case ISD::SRA:
- return DAG.getNode(X86ISD::VSRAI, dl, VT, R,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
+ DAG);
}
}
@@ -11717,7 +12745,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget* Subtarget) {
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
@@ -11725,7 +12753,8 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
VT == MVT::v4i32 || VT == MVT::v8i16 ||
(Subtarget->hasInt256() &&
((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
- VT == MVT::v8i32 || VT == MVT::v16i16))) {
+ VT == MVT::v8i32 || VT == MVT::v16i16)) ||
+ (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
SDValue BaseShAmt;
EVT EltVT = VT.getVectorElementType();
@@ -11793,6 +12822,8 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
case MVT::v4i64:
case MVT::v8i32:
case MVT::v16i16:
+ case MVT::v16i32:
+ case MVT::v8i64:
return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
}
case ISD::SRA:
@@ -11802,6 +12833,8 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
case MVT::v8i16:
case MVT::v8i32:
case MVT::v16i16:
+ case MVT::v16i32:
+ case MVT::v8i64:
return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
}
case ISD::SRL:
@@ -11813,6 +12846,8 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
case MVT::v4i64:
case MVT::v8i32:
case MVT::v16i16:
+ case MVT::v16i32:
+ case MVT::v8i64:
return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
}
}
@@ -11821,7 +12856,8 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
// Special case in 32-bit mode, where i64 is expanded into high and low parts.
if (!Subtarget->is64Bit() &&
- (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
+ (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
+ (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
Amt.getOpcode() == ISD::BITCAST &&
Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
Amt = Amt.getOperand(0);
@@ -11850,10 +12886,11 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
return SDValue();
}
-SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
+ SelectionDAG &DAG) {
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
SDValue V;
@@ -11869,6 +12906,8 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
if (V.getNode())
return V;
+ if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
+ return Op;
// AVX2 has VPSLLV/VPSRAV/VPSRLV.
if (Subtarget->hasInt256()) {
if (Op.getOpcode() == ISD::SRL &&
@@ -11909,8 +12948,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
// r = VSELECT(r, psllw(r & (char16)15, 4), a);
SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
- M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M,
- DAG.getConstant(4, MVT::i32), DAG);
+ M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
M = DAG.getNode(ISD::BITCAST, dl, VT, M);
R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
@@ -11921,8 +12959,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
// r = VSELECT(r, psllw(r & (char16)63, 2), a);
M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
- M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M,
- DAG.getConstant(2, MVT::i32), DAG);
+ M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
M = DAG.getNode(ISD::BITCAST, dl, VT, M);
R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
@@ -11989,7 +13026,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
SDValue RHS = N->getOperand(1);
unsigned BaseOp = 0;
unsigned Cond = 0;
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown ovf instruction!");
case ISD::SADDO:
@@ -12056,7 +13093,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
EVT VT = Op.getValueType();
@@ -12065,7 +13102,6 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
ExtraVT.getScalarType().getSizeInBits();
- SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32);
switch (VT.getSimpleVT().SimpleTy) {
default: return SDValue();
@@ -12099,31 +13135,41 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
// fall through
case MVT::v4i32:
case MVT::v8i16: {
- // (sext (vzext x)) -> (vsext x)
SDValue Op0 = Op.getOperand(0);
SDValue Op00 = Op0.getOperand(0);
SDValue Tmp1;
// Hopefully, this VECTOR_SHUFFLE is just a VZEXT.
if (Op0.getOpcode() == ISD::BITCAST &&
- Op00.getOpcode() == ISD::VECTOR_SHUFFLE)
- Tmp1 = LowerVectorIntExtend(Op00, DAG);
- if (Tmp1.getNode()) {
- SDValue Tmp1Op0 = Tmp1.getOperand(0);
- assert(Tmp1Op0.getOpcode() == X86ISD::VZEXT &&
- "This optimization is invalid without a VZEXT.");
- return DAG.getNode(X86ISD::VSEXT, dl, VT, Tmp1Op0.getOperand(0));
+ Op00.getOpcode() == ISD::VECTOR_SHUFFLE) {
+ // (sext (vzext x)) -> (vsext x)
+ Tmp1 = LowerVectorIntExtend(Op00, Subtarget, DAG);
+ if (Tmp1.getNode()) {
+ EVT ExtraEltVT = ExtraVT.getVectorElementType();
+ // This folding is only valid when the in-reg type is a vector of i8,
+ // i16, or i32.
+ if (ExtraEltVT == MVT::i8 || ExtraEltVT == MVT::i16 ||
+ ExtraEltVT == MVT::i32) {
+ SDValue Tmp1Op0 = Tmp1.getOperand(0);
+ assert(Tmp1Op0.getOpcode() == X86ISD::VZEXT &&
+ "This optimization is invalid without a VZEXT.");
+ return DAG.getNode(X86ISD::VSEXT, dl, VT, Tmp1Op0.getOperand(0));
+ }
+ Op0 = Tmp1;
+ }
}
// If the above didn't work, then just use Shift-Left + Shift-Right.
- Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, Op0, ShAmt, DAG);
- return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG);
+ Tmp1 = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0, BitsDiff,
+ DAG);
+ return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Tmp1, BitsDiff,
+ DAG);
}
}
}
static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
@@ -12160,7 +13206,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
EVT T = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
unsigned Reg = 0;
unsigned size = 0;
switch(T.getSimpleVT().SimpleTy) {
@@ -12194,7 +13240,7 @@ static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
assert(Subtarget->is64Bit() && "Result not type legalized?");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue TheChain = Op.getOperand(0);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1));
SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64,
@@ -12208,9 +13254,10 @@ static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getMergeValues(Ops, array_lengthof(Ops), dl);
}
-SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
- EVT SrcVT = Op.getOperand(0).getValueType();
- EVT DstVT = Op.getValueType();
+static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ MVT SrcVT = Op.getOperand(0).getSimpleValueType();
+ MVT DstVT = Op.getSimpleValueType();
assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
Subtarget->hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
@@ -12230,7 +13277,7 @@ SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
SDNode *Node = Op.getNode();
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
EVT T = Node->getValueType(0);
SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
DAG.getConstant(0, T), Node->getOperand(2));
@@ -12246,7 +13293,7 @@ static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
SDNode *Node = Op.getNode();
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
// Convert seq_cst store -> xchg
@@ -12289,25 +13336,26 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
}
if (!ExtraOp)
- return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
+ return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
Op.getOperand(1));
- return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
+ return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
Op.getOperand(1), Op.getOperand(2));
}
-SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
// For MacOSX, we want to call an alternative entry point: __sincos_stret,
// which returns the values as { float, float } (in XMM0) or
// { double, double } (which is returned in XMM0, XMM1).
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Arg = Op.getOperand(0);
EVT ArgVT = Arg.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
- ArgListTy Args;
- ArgListEntry Entry;
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
Entry.Node = Arg;
Entry.Ty = ArgTy;
@@ -12320,7 +13368,8 @@ SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
// the small struct {f32, f32} is returned in (eax, edx). For f64,
// the results are returned via SRet in memory.
const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
- SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
Type *RetTy = isF64
? (Type*)StructType::get(ArgTy, ArgTy, NULL)
@@ -12331,7 +13380,7 @@ SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
CallingConv::C, /*isTaillCall=*/false,
/*doesNotRet=*/false, /*isReturnValueUsed*/true,
Callee, Args, DAG, dl);
- std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
+ std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
if (isF64)
// Returned in xmm0 and xmm1.
@@ -12375,9 +13424,9 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
- case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG);
- case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
- case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, DAG);
+ case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
+ case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
+ case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
@@ -12393,7 +13442,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
- case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::FRAME_TO_ARGS_OFFSET:
@@ -12411,7 +13461,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
case ISD::SRA:
case ISD::SRL:
- case ISD::SHL: return LowerShift(Op, DAG);
+ case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
case ISD::SADDO:
case ISD::UADDO:
case ISD::SSUBO:
@@ -12419,7 +13469,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SMULO:
case ISD::UMULO: return LowerXALUO(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
- case ISD::BITCAST: return LowerBITCAST(Op, DAG);
+ case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
case ISD::ADDC:
case ISD::ADDE:
case ISD::SUBC:
@@ -12427,14 +13477,14 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ADD: return LowerADD(Op, DAG);
case ISD::SUB: return LowerSUB(Op, DAG);
case ISD::SDIV: return LowerSDIV(Op, DAG);
- case ISD::FSINCOS: return LowerFSINCOS(Op, DAG);
+ case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
}
}
static void ReplaceATOMIC_LOAD(SDNode *Node,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) {
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
// Convert wide load -> cmpxchg8b/cmpxchg16b
@@ -12455,7 +13505,7 @@ static void ReplaceATOMIC_LOAD(SDNode *Node,
static void
ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG, unsigned NewOp) {
- DebugLoc dl = Node->getDebugLoc();
+ SDLoc dl(Node);
assert (Node->getValueType(0) == MVT::i64 &&
"Only know how to expand i64 atomics");
@@ -12480,7 +13530,7 @@ ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
switch (N->getOpcode()) {
default:
@@ -12664,6 +13714,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::SHLD: return "X86ISD::SHLD";
case X86ISD::SHRD: return "X86ISD::SHRD";
case X86ISD::FAND: return "X86ISD::FAND";
+ case X86ISD::FANDN: return "X86ISD::FANDN";
case X86ISD::FOR: return "X86ISD::FOR";
case X86ISD::FXOR: return "X86ISD::FXOR";
case X86ISD::FSRL: return "X86ISD::FSRL";
@@ -12680,6 +13731,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::CMP: return "X86ISD::CMP";
case X86ISD::COMI: return "X86ISD::COMI";
case X86ISD::UCOMI: return "X86ISD::UCOMI";
+ case X86ISD::CMPM: return "X86ISD::CMPM";
+ case X86ISD::CMPMU: return "X86ISD::CMPMU";
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd";
@@ -12739,6 +13792,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
case X86ISD::VZEXT: return "X86ISD::VZEXT";
case X86ISD::VSEXT: return "X86ISD::VSEXT";
+ case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
+ case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
+ case X86ISD::VINSERT: return "X86ISD::VINSERT";
case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
@@ -12752,6 +13808,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::CMPP: return "X86ISD::CMPP";
case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
+ case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
+ case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
case X86ISD::ADD: return "X86ISD::ADD";
case X86ISD::SUB: return "X86ISD::SUB";
case X86ISD::ADC: return "X86ISD::ADC";
@@ -12766,9 +13824,14 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::BLSI: return "X86ISD::BLSI";
case X86ISD::BLSMSK: return "X86ISD::BLSMSK";
case X86ISD::BLSR: return "X86ISD::BLSR";
+ case X86ISD::BZHI: return "X86ISD::BZHI";
+ case X86ISD::BEXTR: return "X86ISD::BEXTR";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
+ case X86ISD::TESTM: return "X86ISD::TESTM";
+ case X86ISD::KORTEST: return "X86ISD::KORTEST";
+ case X86ISD::KTEST: return "X86ISD::KTEST";
case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
@@ -12787,9 +13850,11 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
+ case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
case X86ISD::VPERMILP: return "X86ISD::VPERMILP";
case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
case X86ISD::VPERMV: return "X86ISD::VPERMV";
+ case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
case X86ISD::VPERMI: return "X86ISD::VPERMI";
case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
@@ -12875,6 +13940,20 @@ bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
return NumBits1 > NumBits2;
}
+bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
+ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+ return false;
+
+ if (!isTypeLegal(EVT::getEVT(Ty1)))
+ return false;
+
+ assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
+
+ // Assuming the caller doesn't have a zeroext or signext return parameter,
+ // truncation all the way down to i1 is valid.
+ return true;
+}
+
bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return isInt<32>(Imm);
}
@@ -12926,6 +14005,27 @@ bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
return false;
}
+bool
+X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
+ return false;
+
+ VT = VT.getScalarType();
+
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f32:
+ case MVT::f64:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
// i16 instructions are longer (0x66 prefix) and potentially slower.
return !(VT1 == MVT::i32 && VT2 == MVT::i16);
@@ -12938,37 +14038,46 @@ bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
bool
X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
EVT VT) const {
+ if (!VT.isSimple())
+ return false;
+
+ MVT SVT = VT.getSimpleVT();
+
// Very little shuffling can be done for 64-bit vectors right now.
if (VT.getSizeInBits() == 64)
return false;
// FIXME: pshufb, blends, shifts.
- return (VT.getVectorNumElements() == 2 ||
+ return (SVT.getVectorNumElements() == 2 ||
ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
- isMOVLMask(M, VT) ||
- isSHUFPMask(M, VT, Subtarget->hasFp256()) ||
- isPSHUFDMask(M, VT) ||
- isPSHUFHWMask(M, VT, Subtarget->hasInt256()) ||
- isPSHUFLWMask(M, VT, Subtarget->hasInt256()) ||
- isPALIGNRMask(M, VT, Subtarget) ||
- isUNPCKLMask(M, VT, Subtarget->hasInt256()) ||
- isUNPCKHMask(M, VT, Subtarget->hasInt256()) ||
- isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasInt256()) ||
- isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasInt256()));
+ isMOVLMask(M, SVT) ||
+ isSHUFPMask(M, SVT) ||
+ isPSHUFDMask(M, SVT) ||
+ isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
+ isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
+ isPALIGNRMask(M, SVT, Subtarget) ||
+ isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
+ isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
+ isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
+ isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()));
}
bool
X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
EVT VT) const {
- unsigned NumElts = VT.getVectorNumElements();
+ if (!VT.isSimple())
+ return false;
+
+ MVT SVT = VT.getSimpleVT();
+ unsigned NumElts = SVT.getVectorNumElements();
// FIXME: This collection of masks seems suspect.
if (NumElts == 2)
return true;
- if (NumElts == 4 && VT.is128BitVector()) {
- return (isMOVLMask(Mask, VT) ||
- isCommutedMOVLMask(Mask, VT, true) ||
- isSHUFPMask(Mask, VT, Subtarget->hasFp256()) ||
- isSHUFPMask(Mask, VT, Subtarget->hasFp256(), /* Commuted */ true));
+ if (NumElts == 4 && SVT.is128BitVector()) {
+ return (isMOVLMask(Mask, SVT) ||
+ isCommutedMOVLMask(Mask, SVT, true) ||
+ isSHUFPMask(Mask, SVT) ||
+ isSHUFPMask(Mask, SVT, /* Commuted */ true));
}
return false;
}
@@ -14392,12 +15501,11 @@ X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
} else {
// __chkstk(MSVCRT): does not update stack pointer.
// Clobbers R10, R11 and EFLAGS.
- // FIXME: RAX(allocated size) might be reused and not killed.
BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
.addExternalSymbol("__chkstk")
.addReg(X86::RAX, RegState::Implicit)
.addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
- // RAX has the offset to subtracted from RSP.
+ // RAX has the offset to be subtracted from RSP.
BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP)
.addReg(X86::RSP)
.addReg(X86::RAX);
@@ -14589,6 +15697,9 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// Setup
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
.addMBB(restoreMBB);
+
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB);
@@ -14634,6 +15745,8 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR.
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister();
@@ -14706,6 +15819,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::CMOV_V8F32:
case X86::CMOV_V4F64:
case X86::CMOV_V4I64:
+ case X86::CMOV_V16F32:
+ case X86::CMOV_V8F64:
+ case X86::CMOV_V8I64:
case X86::CMOV_GR16:
case X86::CMOV_GR32:
case X86::CMOV_RFP32:
@@ -15034,7 +16150,7 @@ static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget* Subtarget) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
@@ -15130,7 +16246,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
// Don't create instructions with illegal types after legalize types has run.
@@ -15154,7 +16270,7 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
- return EltsFromConsecutiveLoads(VT, Elts, dl, DAG);
+ return EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
}
/// PerformTruncateCombine - Converts truncate operation to
@@ -15249,7 +16365,7 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
// All checks match so transform back to vector_shuffle so that DAG combiner
// can finish the job
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
// Create shuffle node taking into account the case that its a unary shuffle
SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1);
@@ -15261,6 +16377,44 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
EltNo);
}
+/// Extract one bit from mask vector, like v16i1 or v8i1.
+/// AVX-512 feature.
+static SDValue ExtractBitFromMaskVector(SDNode *N, SelectionDAG &DAG) {
+ SDValue Vec = N->getOperand(0);
+ SDLoc dl(Vec);
+ MVT VecVT = Vec.getSimpleValueType();
+ SDValue Idx = N->getOperand(1);
+ MVT EltVT = N->getSimpleValueType(0);
+
+ assert((VecVT.getVectorElementType() == MVT::i1 && EltVT == MVT::i8) ||
+ "Unexpected operands in ExtractBitFromMaskVector");
+
+ // variable index
+ if (!isa<ConstantSDNode>(Idx)) {
+ MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
+ SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ ExtVT.getVectorElementType(), Ext);
+ return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
+ }
+
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+
+ MVT ScalarVT = MVT::getIntegerVT(VecVT.getSizeInBits());
+ unsigned MaxShift = VecVT.getSizeInBits() - 1;
+ Vec = DAG.getNode(ISD::BITCAST, dl, ScalarVT, Vec);
+ Vec = DAG.getNode(ISD::SHL, dl, ScalarVT, Vec,
+ DAG.getConstant(MaxShift - IdxVal, ScalarVT));
+ Vec = DAG.getNode(ISD::SRL, dl, ScalarVT, Vec,
+ DAG.getConstant(MaxShift, ScalarVT));
+
+ if (VecVT == MVT::v16i1) {
+ Vec = DAG.getNode(ISD::BITCAST, dl, MVT::i16, Vec);
+ return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Vec);
+ }
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i8, Vec);
+}
+
/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
/// generation and convert it from being a bunch of shuffles and extracts
/// to a simple store and scalar loads to extract the elements.
@@ -15271,12 +16425,17 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
return NewOp;
SDValue InputVector = N->getOperand(0);
+
+ if (InputVector.getValueType().getVectorElementType() == MVT::i1 &&
+ !DCI.isBeforeLegalize())
+ return ExtractBitFromMaskVector(N, DAG);
+
// Detect whether we are trying to convert from mmx to i32 and the bitcast
// from mmx to v2i32 has a single usage.
if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST &&
InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx &&
InputVector.hasOneUse() && N->getValueType(0) == MVT::i32)
- return DAG.getNode(X86ISD::MMX_MOVD2W, InputVector.getDebugLoc(),
+ return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
N->getValueType(0),
InputVector.getNode()->getOperand(0));
@@ -15321,7 +16480,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
// Ok, we've now decided to do the transformation.
- DebugLoc dl = InputVector.getDebugLoc();
+ SDLoc dl(InputVector);
// Store the value to a temporary stack slot.
SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
@@ -15358,24 +16517,28 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
}
/// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
-static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS,
- SDValue RHS, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static std::pair<unsigned, bool>
+matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
+ SelectionDAG &DAG, const X86Subtarget *Subtarget) {
if (!VT.isVector())
- return 0;
+ return std::make_pair(0, false);
+ bool NeedSplit = false;
switch (VT.getSimpleVT().SimpleTy) {
- default: return 0;
+ default: return std::make_pair(0, false);
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
if (!Subtarget->hasAVX2())
- return 0;
+ NeedSplit = true;
+ if (!Subtarget->hasAVX())
+ return std::make_pair(0, false);
+ break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
if (!Subtarget->hasSSE2())
- return 0;
+ return std::make_pair(0, false);
}
// SSE2 has only a small subset of the operations.
@@ -15386,6 +16549,7 @@ static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS,
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
+ unsigned Opc = 0;
// Check for x CC y ? x : y.
if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
DAG.isEqualTo(RHS, Cond.getOperand(1))) {
@@ -15393,16 +16557,16 @@ static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS,
default: break;
case ISD::SETULT:
case ISD::SETULE:
- return hasUnsigned ? X86ISD::UMIN : 0;
+ Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
case ISD::SETUGT:
case ISD::SETUGE:
- return hasUnsigned ? X86ISD::UMAX : 0;
+ Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
case ISD::SETLT:
case ISD::SETLE:
- return hasSigned ? X86ISD::SMIN : 0;
+ Opc = hasSigned ? X86ISD::SMIN : 0; break;
case ISD::SETGT:
case ISD::SETGE:
- return hasSigned ? X86ISD::SMAX : 0;
+ Opc = hasSigned ? X86ISD::SMAX : 0; break;
}
// Check for x CC y ? y : x -- a min/max with reversed arms.
} else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
@@ -15411,20 +16575,20 @@ static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS,
default: break;
case ISD::SETULT:
case ISD::SETULE:
- return hasUnsigned ? X86ISD::UMAX : 0;
+ Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
case ISD::SETUGT:
case ISD::SETUGE:
- return hasUnsigned ? X86ISD::UMIN : 0;
+ Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
case ISD::SETLT:
case ISD::SETLE:
- return hasSigned ? X86ISD::SMAX : 0;
+ Opc = hasSigned ? X86ISD::SMAX : 0; break;
case ISD::SETGT:
case ISD::SETGE:
- return hasSigned ? X86ISD::SMIN : 0;
+ Opc = hasSigned ? X86ISD::SMIN : 0; break;
}
}
- return 0;
+ return std::make_pair(Opc, NeedSplit);
}
/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
@@ -15432,19 +16596,20 @@ static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS,
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue Cond = N->getOperand(0);
// Get the LHS/RHS of the select.
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
EVT VT = LHS.getValueType();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// If we have SSE[12] support, try to form min/max nodes. SSE min/max
// instructions match the semantics of the common C idiom x<y?x:y but not
// x<=y?x:y, because of how they handle negative zero (which can be
// ignored in unsafe-math mode).
if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
- VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
+ VT != MVT::f80 && TLI.isTypeLegal(VT) &&
(Subtarget->hasSSE2() ||
(Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
@@ -15583,6 +16748,22 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
}
+ EVT CondVT = Cond.getValueType();
+ if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
+ CondVT.getVectorElementType() == MVT::i1) {
+ // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
+ // lowering on AVX-512. In this case we convert it to
+ // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
+ // The same situation for all 128 and 256-bit vectors of i8 and i16
+ EVT OpVT = LHS.getValueType();
+ if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
+ (OpVT.getVectorElementType() == MVT::i8 ||
+ OpVT.getVectorElementType() == MVT::i16)) {
+ Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
+ DCI.AddToWorklist(Cond.getNode());
+ return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
+ }
+ }
// If this is a select between two integer constants, try to do some
// optimizations.
if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
@@ -15700,16 +16881,19 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
case ISD::SETLT:
case ISD::SETGT: {
ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
- Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(),
+ Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
Cond.getOperand(0), Cond.getOperand(1), NewCC);
return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
}
}
}
+ // Early exit check
+ if (!TLI.isTypeLegal(VT))
+ return SDValue();
+
// Match VSELECTs into subs with unsigned saturation.
- if (!DCI.isBeforeLegalize() &&
- N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
+ if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
// psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
(Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
@@ -15763,14 +16947,35 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
// Try to match a min/max vector operation.
- if (!DCI.isBeforeLegalize() &&
- N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC)
- if (unsigned Op = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget))
- return DAG.getNode(Op, DL, N->getValueType(0), LHS, RHS);
+ if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
+ std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
+ unsigned Opc = ret.first;
+ bool NeedSplit = ret.second;
+
+ if (Opc && NeedSplit) {
+ unsigned NumElems = VT.getVectorNumElements();
+ // Extract the LHS vectors
+ SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
+ SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
+
+ // Extract the RHS vectors
+ SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
+ SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
+
+ // Create min/max for each subvector
+ LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
+ RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
+
+ // Merge the result
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
+ } else if (Opc)
+ return DAG.getNode(Opc, DL, VT, LHS, RHS);
+ }
// Simplify vector selection if the selector will be produced by CMPP*/PCMP*.
- if (!DCI.isBeforeLegalize() && N->getOpcode() == ISD::VSELECT &&
- Cond.getOpcode() == ISD::SETCC) {
+ if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
+ // Check if SETCC has already been promoted
+ TLI.getSetCCResultType(*DAG.getContext(), VT) == Cond.getValueType()) {
assert(Cond.getValueType().isVector() &&
"vector select expects a vector selector!");
@@ -15817,7 +17022,6 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// matched by one of the SSE/AVX BLEND instructions. These instructions only
// depend on the highest bit in each word. Try to use SimplifyDemandedBits
// to simplify previous instructions.
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
!DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) {
unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
@@ -15826,6 +17030,15 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (BitWidth == 1)
return SDValue();
+ // Check all uses of that condition operand to check whether it will be
+ // consumed by non-BLEND instructions, which may depend on all bits are set
+ // properly.
+ for (SDNode::use_iterator I = Cond->use_begin(),
+ E = Cond->use_end(); I != E; ++I)
+ if (I->getOpcode() != ISD::VSELECT)
+ // TODO: Add other opcodes eventually lowered into BLEND.
+ return SDValue();
+
assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
@@ -15976,7 +17189,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// If the flag operand isn't dead, don't touch this CMOV.
if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
@@ -16179,7 +17392,7 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
}
if (MulAmt2 &&
(isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
if (isPowerOf2_64(MulAmt2) &&
!(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
@@ -16229,7 +17442,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
APInt ShAmt = N1C->getAPIntValue();
Mask = Mask.shl(ShAmt);
if (Mask != 0)
- return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
+ return DAG.getNode(ISD::AND, SDLoc(N), VT,
N00, DAG.getConstant(Mask, VT));
}
}
@@ -16245,7 +17458,39 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
// hardware support for this operation. This is better expressed as an ADD
// of two values.
if (N1C && (1 == N1C->getZExtValue())) {
- return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0);
+ return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
+ }
+ }
+
+ return SDValue();
+}
+
+/// \brief Returns a vector of 0s if the node in input is a vector logical
+/// shift by a constant amount which is known to be bigger than or equal
+/// to the vector element size in bits.
+static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ EVT VT = N->getValueType(0);
+
+ if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
+ (!Subtarget->hasInt256() ||
+ (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
+ return SDValue();
+
+ SDValue Amt = N->getOperand(1);
+ SDLoc DL(N);
+ if (isSplatVector(Amt.getNode())) {
+ SDValue SclrAmt = Amt->getOperand(0);
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) {
+ APInt ShiftAmt = C->getAPIntValue();
+ unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
+
+ // SSE2/AVX2 logical shifts always return a vector of 0s
+ // if the shift amount is bigger than or equal to
+ // the element size. The constant shift amount will be
+ // encoded as a 8-bit immediate.
+ if (ShiftAmt.trunc(8).uge(MaxAmount))
+ return getZeroVector(VT, Subtarget, DAG, DL);
}
}
@@ -16261,6 +17506,12 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
if (V.getNode()) return V;
}
+ if (N->getOpcode() != ISD::SRA) {
+ // Try to fold this logical shift into a zero vector.
+ SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
+ if (V.getNode()) return V;
+ }
+
return SDValue();
}
@@ -16279,7 +17530,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
SDValue N1 = N->getOperand(1);
SDValue CMP0 = N0->getOperand(1);
SDValue CMP1 = N1->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// The SETCCs should both refer to the same CMP.
if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
@@ -16398,7 +17649,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
SDValue N0 = Narrow->getOperand(0);
SDValue N1 = Narrow->getOperand(1);
- DebugLoc DL = Narrow->getDebugLoc();
+ SDLoc DL(Narrow);
// The Left side has to be a trunc.
if (N0.getOpcode() != ISD::TRUNCATE)
@@ -16464,33 +17715,80 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
if (R.getNode())
return R;
- // Create BLSI, and BLSR instructions
+ // Create BLSI, BLSR, and BZHI instructions
// BLSI is X & (-X)
// BLSR is X & (X-1)
- if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) {
+ // BZHI is X & ((1 << Y) - 1)
+ // BEXTR is ((X >> imm) & (2**size-1))
+ if (VT == MVT::i32 || VT == MVT::i64) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
-
- // Check LHS for neg
- if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 &&
- isZero(N0.getOperand(0)))
- return DAG.getNode(X86ISD::BLSI, DL, VT, N1);
-
- // Check RHS for neg
- if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 &&
- isZero(N1.getOperand(0)))
- return DAG.getNode(X86ISD::BLSI, DL, VT, N0);
+ SDLoc DL(N);
+
+ if (Subtarget->hasBMI()) {
+ // Check LHS for neg
+ if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 &&
+ isZero(N0.getOperand(0)))
+ return DAG.getNode(X86ISD::BLSI, DL, VT, N1);
+
+ // Check RHS for neg
+ if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 &&
+ isZero(N1.getOperand(0)))
+ return DAG.getNode(X86ISD::BLSI, DL, VT, N0);
+
+ // Check LHS for X-1
+ if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
+ isAllOnes(N0.getOperand(1)))
+ return DAG.getNode(X86ISD::BLSR, DL, VT, N1);
+
+ // Check RHS for X-1
+ if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
+ isAllOnes(N1.getOperand(1)))
+ return DAG.getNode(X86ISD::BLSR, DL, VT, N0);
+ }
+
+ if (Subtarget->hasBMI2()) {
+ // Check for (and (add (shl 1, Y), -1), X)
+ if (N0.getOpcode() == ISD::ADD && isAllOnes(N0.getOperand(1))) {
+ SDValue N00 = N0.getOperand(0);
+ if (N00.getOpcode() == ISD::SHL) {
+ SDValue N001 = N00.getOperand(1);
+ assert(N001.getValueType() == MVT::i8 && "unexpected type");
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(N00.getOperand(0));
+ if (C && C->getZExtValue() == 1)
+ return DAG.getNode(X86ISD::BZHI, DL, VT, N1, N001);
+ }
+ }
- // Check LHS for X-1
- if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
- isAllOnes(N0.getOperand(1)))
- return DAG.getNode(X86ISD::BLSR, DL, VT, N1);
+ // Check for (and X, (add (shl 1, Y), -1))
+ if (N1.getOpcode() == ISD::ADD && isAllOnes(N1.getOperand(1))) {
+ SDValue N10 = N1.getOperand(0);
+ if (N10.getOpcode() == ISD::SHL) {
+ SDValue N101 = N10.getOperand(1);
+ assert(N101.getValueType() == MVT::i8 && "unexpected type");
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(N10.getOperand(0));
+ if (C && C->getZExtValue() == 1)
+ return DAG.getNode(X86ISD::BZHI, DL, VT, N0, N101);
+ }
+ }
+ }
- // Check RHS for X-1
- if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
- isAllOnes(N1.getOperand(1)))
- return DAG.getNode(X86ISD::BLSR, DL, VT, N0);
+ // Check for BEXTR.
+ if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
+ (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
+ ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (MaskNode && ShiftNode) {
+ uint64_t Mask = MaskNode->getZExtValue();
+ uint64_t Shift = ShiftNode->getZExtValue();
+ if (isMask_64(Mask)) {
+ uint64_t MaskSize = CountPopulation_64(Mask);
+ if (Shift + MaskSize <= VT.getSizeInBits())
+ return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
+ DAG.getConstant(Shift | (MaskSize << 8), VT));
+ }
+ }
+ } // BEXTR
return SDValue();
}
@@ -16504,7 +17802,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// Check LHS for vnot
if (N0.getOpcode() == ISD::XOR &&
@@ -16588,7 +17886,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
if ((SraAmt + 1) != EltBits)
return SDValue();
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// Now we know we at least have a plendvb with the mask val. See if
// we can form a psignb/w/d.
@@ -16637,7 +17935,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
if (ShAmt1.getOpcode() == ISD::TRUNCATE)
ShAmt1 = ShAmt1.getOperand(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
unsigned Opc = X86ISD::SHLD;
SDValue Op0 = N0.getOperand(0);
SDValue Op1 = N1.getOperand(0);
@@ -16684,7 +17982,7 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
// and change it to SUB and CMOV.
@@ -16734,7 +18032,7 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
// Create BLSMSK instructions by finding X ^ (X-1)
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
isAllOnes(N0.getOperand(1)))
@@ -16754,7 +18052,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
LoadSDNode *Ld = cast<LoadSDNode>(N);
EVT RegVT = Ld->getValueType(0);
EVT MemVT = Ld->getMemoryVT();
- DebugLoc dl = Ld->getDebugLoc();
+ SDLoc dl(Ld);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned RegSz = RegVT.getSizeInBits();
@@ -16949,7 +18247,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
StoreSDNode *St = cast<StoreSDNode>(N);
EVT VT = St->getValue().getValueType();
EVT StVT = St->getMemoryVT();
- DebugLoc dl = St->getDebugLoc();
+ SDLoc dl(St);
SDValue StoredVal = St->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -17112,8 +18410,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
return SDValue();
- DebugLoc LdDL = Ld->getDebugLoc();
- DebugLoc StDL = N->getDebugLoc();
+ SDLoc LdDL(Ld);
+ SDLoc StDL(N);
// If we are a 64-bit capable x86, lower to a single movq load/store pair.
// Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
// pair instead.
@@ -17206,7 +18504,7 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
return false;
- EVT VT = LHS.getValueType();
+ MVT VT = LHS.getSimpleValueType();
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Unsupported vector type for horizontal add/sub");
@@ -17277,23 +18575,24 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
// LHS = VECTOR_SHUFFLE A, B, LMask
// RHS = VECTOR_SHUFFLE A, B, RMask
// Check that the masks correspond to performing a horizontal operation.
- for (unsigned i = 0; i != NumElts; ++i) {
- int LIdx = LMask[i], RIdx = RMask[i];
-
- // Ignore any UNDEF components.
- if (LIdx < 0 || RIdx < 0 ||
- (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
- (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
- continue;
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ int LIdx = LMask[i+l], RIdx = RMask[i+l];
+
+ // Ignore any UNDEF components.
+ if (LIdx < 0 || RIdx < 0 ||
+ (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
+ (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
+ continue;
- // Check that successive elements are being operated on. If not, this is
- // not a horizontal operation.
- unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs
- unsigned LaneStart = (i/NumLaneElts) * NumLaneElts;
- int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart;
- if (!(LIdx == Index && RIdx == Index + 1) &&
- !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
- return false;
+ // Check that successive elements are being operated on. If not, this is
+ // not a horizontal operation.
+ unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
+ int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
+ if (!(LIdx == Index && RIdx == Index + 1) &&
+ !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
+ return false;
+ }
}
LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
@@ -17312,7 +18611,7 @@ static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
(Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
isHorizontalBinOp(LHS, RHS, true))
- return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS);
+ return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
return SDValue();
}
@@ -17327,7 +18626,7 @@ static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
(Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
isHorizontalBinOp(LHS, RHS, false))
- return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS);
+ return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
return SDValue();
}
@@ -17364,7 +18663,7 @@ static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
}
- return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
N->getOperand(0), N->getOperand(1));
}
@@ -17381,6 +18680,19 @@ static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+/// PerformFANDNCombine - Do target-specific dag combines on X86ISD::FANDN nodes
+static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
+ // FANDN(x, 0.0) -> 0.0
+ // FANDN(0.0, x) -> x
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
+ if (C->getValueAPF().isPosZero())
+ return N->getOperand(1);
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
+ if (C->getValueAPF().isPosZero())
+ return N->getOperand(1);
+ return SDValue();
+}
+
static SDValue PerformBTCombine(SDNode *N,
SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
@@ -17408,12 +18720,12 @@ static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
VT.getVectorElementType().getSizeInBits() ==
OpVT.getVectorElementType().getSizeInBits()) {
- return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
+ return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
}
return SDValue();
}
-static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
+static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
EVT VT = N->getValueType(0);
if (!VT.isVector())
@@ -17422,7 +18734,7 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
// The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
// both SSE and AVX2 since there is no sign-extended shift right
@@ -17433,14 +18745,14 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
N0.getOpcode() == ISD::SIGN_EXTEND)) {
SDValue N00 = N0.getOperand(0);
- // EXTLOAD has a better solution on AVX2,
+ // EXTLOAD has a better solution on AVX2,
// it may be replaced with X86ISD::VSEXT node.
if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
if (!ISD::isNormalLoad(N00.getNode()))
return SDValue();
if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
- SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
+ SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
N00, N1);
return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
}
@@ -17469,7 +18781,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget* Subtarget) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
// Let legalize expand this if it isn't a legal type yet.
@@ -17514,7 +18826,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
// (and (i32 x86isd::setcc_carry), 1)
// This eliminates the zext. This transformation is necessary because
// ISD::SETCC is always legalized to i8.
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -17552,17 +18864,17 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) {
if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
- SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(),
+ SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
LHS.getValueType(), RHS, LHS.getOperand(1));
- return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0),
+ return DAG.getSetCC(SDLoc(N), N->getValueType(0),
addV, DAG.getConstant(0, addV.getValueType()), CC);
}
if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
- SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(),
+ SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
RHS.getValueType(), LHS, RHS.getOperand(1));
- return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0),
+ return DAG.getSetCC(SDLoc(N), N->getValueType(0),
addV, DAG.getConstant(0, addV.getValueType()), CC);
}
return SDValue();
@@ -17571,7 +18883,7 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) {
// Helper function of PerformSETCCCombine. It is to materialize "setb reg"
// as "sbb reg,reg", since it can be extended without zext and produces
// an all-ones bit which is more useful than 0/1 in some cases.
-static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) {
+static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG) {
return DAG.getNode(ISD::AND, DL, MVT::i8,
DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
@@ -17582,7 +18894,7 @@ static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) {
static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
SDValue EFLAGS = N->getOperand(1);
@@ -17596,7 +18908,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
EFLAGS.getValueType().isInteger() &&
!isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
- SDValue NewSub = DAG.getNode(X86ISD::SUB, EFLAGS.getDebugLoc(),
+ SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
EFLAGS.getNode()->getVTList(),
EFLAGS.getOperand(1), EFLAGS.getOperand(0));
SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
@@ -17626,7 +18938,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Dest = N->getOperand(1);
SDValue EFLAGS = N->getOperand(3);
@@ -17651,7 +18963,7 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
// SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
@@ -17665,7 +18977,7 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
!XTLI->getSubtarget()->is64Bit() &&
- !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
+ VT == MVT::i64) {
SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0),
Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
@@ -17686,7 +18998,7 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
// We don't have a good way to replace an EFLAGS use, so only do this when
// dead right now.
SDValue(N, 1).use_empty()) {
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
@@ -17705,7 +19017,7 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
// (sub (sete X, 0), Y) -> sbb 0, Y
// (sub (setne X, 0), Y) -> adc -1, Y
static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
// Look through ZExts.
SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
@@ -17751,7 +19063,7 @@ static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
(Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
isHorizontalBinOp(Op0, Op1, true))
- return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1);
+ return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
return OptimizeConditionalInDecrement(N, DAG);
}
@@ -17771,10 +19083,10 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
isa<ConstantSDNode>(Op1.getOperand(1))) {
APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
EVT VT = Op0.getValueType();
- SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT,
+ SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
Op1.getOperand(0),
DAG.getConstant(~XorC, VT));
- return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor,
+ return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
DAG.getConstant(C->getAPIntValue()+1, VT));
}
}
@@ -17784,7 +19096,7 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
(Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
isHorizontalBinOp(Op0, Op1, true))
- return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1);
+ return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
return OptimizeConditionalInDecrement(N, DAG);
}
@@ -17801,7 +19113,7 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
if (In.getOpcode() != X86ISD::VZEXT)
return SDValue();
- return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(X86ISD::VZEXT, SDLoc(N), N->getValueType(0),
In.getOperand(0));
}
@@ -17835,6 +19147,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::FMIN:
case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
+ case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
case ISD::ANY_EXTEND:
@@ -17990,6 +19303,22 @@ namespace {
const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
}
+static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
+
+ if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
+ if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
+ std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
+ std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
+
+ if (AsmPieces.size() == 3)
+ return true;
+ else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
+ return true;
+ }
+ }
+ return false;
+}
+
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
@@ -18031,12 +19360,8 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
const std::string &ConstraintsStr = IA->getConstraintString();
SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
array_pod_sort(AsmPieces.begin(), AsmPieces.end());
- if (AsmPieces.size() == 4 &&
- AsmPieces[0] == "~{cc}" &&
- AsmPieces[1] == "~{dirflag}" &&
- AsmPieces[2] == "~{flags}" &&
- AsmPieces[3] == "~{fpsr}")
- return IntrinsicLowering::LowerToByteSwap(CI);
+ if (clobbersFlagRegisters(AsmPieces))
+ return IntrinsicLowering::LowerToByteSwap(CI);
}
break;
case 3:
@@ -18049,11 +19374,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
const std::string &ConstraintsStr = IA->getConstraintString();
SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
array_pod_sort(AsmPieces.begin(), AsmPieces.end());
- if (AsmPieces.size() == 4 &&
- AsmPieces[0] == "~{cc}" &&
- AsmPieces[1] == "~{dirflag}" &&
- AsmPieces[2] == "~{flags}" &&
- AsmPieces[3] == "~{fpsr}")
+ if (clobbersFlagRegisters(AsmPieces))
return IntrinsicLowering::LowerToByteSwap(CI);
}
@@ -18361,7 +19682,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
getTargetMachine())))
return;
- Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
+ Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
GA->getValueType(0), Offset);
break;
}
@@ -18376,7 +19697,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
// First, see if this is a constraint that directly corresponds to an LLVM
// register class.
if (Constraint.size() == 1) {
@@ -18443,7 +19764,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
if (!Subtarget->hasSSE1()) break;
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: break;
// Scalar SSE types.
case MVT::f32:
@@ -18468,6 +19789,11 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case MVT::v8f32:
case MVT::v4f64:
return std::make_pair(0U, &X86::VR256RegClass);
+ case MVT::v8f64:
+ case MVT::v16f32:
+ case MVT::v16i32:
+ case MVT::v8i64:
+ return std::make_pair(0U, &X86::VR512RegClass);
}
break;
}
@@ -18578,7 +19904,13 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
}
} else if (Res.second == &X86::FR32RegClass ||
Res.second == &X86::FR64RegClass ||
- Res.second == &X86::VR128RegClass) {
+ Res.second == &X86::VR128RegClass ||
+ Res.second == &X86::VR256RegClass ||
+ Res.second == &X86::FR32XRegClass ||
+ Res.second == &X86::FR64XRegClass ||
+ Res.second == &X86::VR128XRegClass ||
+ Res.second == &X86::VR256XRegClass ||
+ Res.second == &X86::VR512RegClass) {
// Handle references to XMM physical registers that got mapped into the
// wrong class. This can happen with constraints like {xmm0} where the
// target independent register mapper will just pick the first match it can
@@ -18592,6 +19924,8 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
Res.second = &X86::VR128RegClass;
else if (X86::VR256RegClass.hasType(VT))
Res.second = &X86::VR256RegClass;
+ else if (X86::VR512RegClass.hasType(VT))
+ Res.second = &X86::VR512RegClass;
}
return Res;
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 2727e220d360..bc3dd608da52 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -53,6 +53,10 @@ namespace llvm {
/// to X86::XORPS or X86::XORPD.
FXOR,
+ /// FANDN - Bitwise logical ANDNOT of floating point values. This
+ /// corresponds to X86::ANDNPS or X86::ANDNPD.
+ FANDN,
+
/// FSRL - Bitwise logical right shift of floating point values. These
/// corresponds to X86::PSRLDQ.
FSRL,
@@ -250,6 +254,12 @@ namespace llvm {
// VSEXT - Vector integer signed-extend.
VSEXT,
+ // VTRUNC - Vector integer truncate.
+ VTRUNC,
+
+ // VTRUNC - Vector integer truncate with mask.
+ VTRUNCM,
+
// VFPEXT - Vector FP extend.
VFPEXT,
@@ -270,6 +280,13 @@ namespace llvm {
// PCMP* - Vector integer comparisons.
PCMPEQ, PCMPGT,
+ // PCMP*M - Vector integer comparisons, the result is in a mask vector.
+ PCMPEQM, PCMPGTM,
+
+ /// CMPM, CMPMU - Vector comparison generating mask bits for fp and
+ /// integer signed and unsigned data types.
+ CMPM,
+ CMPMU,
// ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
ADD, SUB, ADC, SBB, SMUL,
@@ -278,18 +295,27 @@ namespace llvm {
BLSI, // BLSI - Extract lowest set isolated bit
BLSMSK, // BLSMSK - Get mask up to lowest set bit
BLSR, // BLSR - Reset lowest set bit
+ BZHI, // BZHI - Zero high bits
+ BEXTR, // BEXTR - Bit field extract
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
// MUL_IMM - X86 specific multiply by immediate.
MUL_IMM,
- // PTEST - Vector bitwise comparisons
+ // PTEST - Vector bitwise comparisons.
PTEST,
- // TESTP - Vector packed fp sign bitwise comparisons
+ // TESTP - Vector packed fp sign bitwise comparisons.
TESTP,
+ // TESTM - Vector "test" in AVX-512, the result is in a mask vector.
+ TESTM,
+
+ // OR/AND test for masks
+ KORTEST,
+ KTEST,
+
// Several flavors of instructions with vector shuffle behaviors.
PALIGNR,
PSHUFD,
@@ -310,9 +336,13 @@ namespace llvm {
UNPCKH,
VPERMILP,
VPERMV,
+ VPERMV3,
VPERMI,
VPERM2X128,
VBROADCAST,
+ // masked broadcast
+ VBROADCASTM,
+ VINSERT,
// PMULUDQ - Vector multiply packed unsigned doubleword integers
PMULUDQ,
@@ -434,25 +464,45 @@ namespace llvm {
/// Define some predicates that are used for node matching.
namespace X86 {
- /// isVEXTRACTF128Index - Return true if the specified
+ /// isVEXTRACT128Index - Return true if the specified
+ /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
+ /// suitable for input to VEXTRACTF128, VEXTRACTI128 instructions.
+ bool isVEXTRACT128Index(SDNode *N);
+
+ /// isVINSERT128Index - Return true if the specified
+ /// INSERT_SUBVECTOR operand specifies a subvector insert that is
+ /// suitable for input to VINSERTF128, VINSERTI128 instructions.
+ bool isVINSERT128Index(SDNode *N);
+
+ /// isVEXTRACT256Index - Return true if the specified
/// EXTRACT_SUBVECTOR operand specifies a vector extract that is
- /// suitable for input to VEXTRACTF128.
- bool isVEXTRACTF128Index(SDNode *N);
+ /// suitable for input to VEXTRACTF64X4, VEXTRACTI64X4 instructions.
+ bool isVEXTRACT256Index(SDNode *N);
- /// isVINSERTF128Index - Return true if the specified
+ /// isVINSERT256Index - Return true if the specified
/// INSERT_SUBVECTOR operand specifies a subvector insert that is
- /// suitable for input to VINSERTF128.
- bool isVINSERTF128Index(SDNode *N);
+ /// suitable for input to VINSERTF64X4, VINSERTI64X4 instructions.
+ bool isVINSERT256Index(SDNode *N);
- /// getExtractVEXTRACTF128Immediate - Return the appropriate
+ /// getExtractVEXTRACT128Immediate - Return the appropriate
/// immediate to extract the specified EXTRACT_SUBVECTOR index
- /// with VEXTRACTF128 instructions.
- unsigned getExtractVEXTRACTF128Immediate(SDNode *N);
+ /// with VEXTRACTF128, VEXTRACTI128 instructions.
+ unsigned getExtractVEXTRACT128Immediate(SDNode *N);
- /// getInsertVINSERTF128Immediate - Return the appropriate
+ /// getInsertVINSERT128Immediate - Return the appropriate
/// immediate to insert at the specified INSERT_SUBVECTOR index
- /// with VINSERTF128 instructions.
- unsigned getInsertVINSERTF128Immediate(SDNode *N);
+ /// with VINSERTF128, VINSERT128 instructions.
+ unsigned getInsertVINSERT128Immediate(SDNode *N);
+
+ /// getExtractVEXTRACT256Immediate - Return the appropriate
+ /// immediate to extract the specified EXTRACT_SUBVECTOR index
+ /// with VEXTRACTF64X4, VEXTRACTI64x4 instructions.
+ unsigned getExtractVEXTRACT256Immediate(SDNode *N);
+
+ /// getInsertVINSERT256Immediate - Return the appropriate
+ /// immediate to insert at the specified INSERT_SUBVECTOR index
+ /// with VINSERTF64x4, VINSERTI64x4 instructions.
+ unsigned getInsertVINSERT256Immediate(SDNode *N);
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
/// constant +0.0.
@@ -511,7 +561,7 @@ namespace llvm {
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
virtual EVT
- getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
MachineFunction &MF) const;
@@ -563,7 +613,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
/// computeMaskedBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
@@ -610,7 +660,7 @@ namespace llvm {
/// error, this returns a register number of 0.
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
@@ -634,6 +684,8 @@ namespace llvm {
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+ virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+
/// isZExtFree - Return true if any actual instruction that defines a
/// value of type Ty1 implicit zero-extends the value to Ty2 in the result
/// register. This does not necessarily include registers defined in
@@ -646,11 +698,11 @@ namespace llvm {
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
- /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
- /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
- /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
- /// is expanded to mul + add.
- virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; }
+ /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
+ /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
+ /// expanded to FMAs when this method returns true, otherwise fmuladd is
+ /// expanded to fmul + fadd.
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
@@ -723,6 +775,8 @@ namespace llvm {
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
SelectionDAG &DAG) const;
+ virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const LLVM_OVERRIDE;
+
/// \brief Reset the operation actions based on target options.
virtual void resetOperationActions();
@@ -734,7 +788,6 @@ namespace llvm {
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
- const X86RegisterInfo *RegInfo;
const DataLayout *TD;
/// Used to store the TargetOptions so that we don't waste time resetting
@@ -760,16 +813,16 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerMemArgument(SDValue Chain,
CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &ArgInfo,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA, MachineFrameInfo *MFI,
unsigned i) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
@@ -791,7 +844,7 @@ namespace llvm {
bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall, bool Is64Bit,
- int FPDiff, DebugLoc dl) const;
+ int FPDiff, SDLoc dl) const;
unsigned GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG &DAG) const;
@@ -800,37 +853,32 @@ namespace llvm {
bool isSigned,
bool isReplace) const;
- SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
- SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
+ SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
int64_t Offset, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerToBT(SDValue And, ISD::CondCode CC,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
@@ -847,25 +895,13 @@ namespace llvm {
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
-
- // Utility functions to help LowerVECTOR_SHUFFLE & LowerBUILD_VECTOR
- SDValue LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const;
- SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const;
- SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const;
-
- SDValue LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const;
-
- SDValue LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(CallLoweringInfo &CLI,
@@ -876,7 +912,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
@@ -891,6 +927,8 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const;
+ virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const;
+
/// Utility function to emit atomic-load-arith operations (and, or, xor,
/// nand, max, min, umax, umin). It takes the corresponding instruction to
/// expand, the associated machine basic block, and the associated X86
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
new file mode 100644
index 000000000000..cb19fbd5638e
--- /dev/null
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -0,0 +1,3526 @@
+// Bitcasts between 512-bit vector types. Return the original type since
+// no instruction is needed for the conversion
+let Predicates = [HasAVX512] in {
+ def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
+ def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
+ def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
+ def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
+ def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
+ def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
+ def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
+ def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
+ def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
+ def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
+ def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
+ def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
+ def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
+
+ def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
+ def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
+ def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
+ def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
+ def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
+ def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
+ def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
+ def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
+ def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
+ def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
+ def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
+ def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
+ def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
+ def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
+ def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
+ def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
+ def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
+ def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
+ def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
+ def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
+ def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
+ def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
+ def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
+ def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
+ def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
+ def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
+ def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
+ def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
+ def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
+ def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
+
+// Bitcasts between 256-bit vector types. Return the original type since
+// no instruction is needed for the conversion
+ def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
+ def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
+ def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
+ def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
+ def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
+ def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
+ def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
+ def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
+ def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
+ def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
+ def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
+ def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
+ def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
+ def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
+ def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
+ def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
+ def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
+ def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
+ def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
+ def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
+ def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
+ def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
+ def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
+ def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
+ def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
+ def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
+ def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
+ def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
+ def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
+ def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
+}
+
+//
+// AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
+//
+
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isPseudo = 1, Predicates = [HasAVX512] in {
+def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
+ [(set VR512:$dst, (v16f32 immAllZerosV))]>;
+}
+
+def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
+def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
+def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
+def : Pat<(v16f32 immAllZerosV), (AVX512_512_SET0)>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - VECTOR INSERT
+//
+// -- 32x8 form --
+let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
+def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
+ (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
+ "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512;
+let mayLoad = 1 in
+def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
+ (ins VR512:$src1, f128mem:$src2, i8imm:$src3),
+ "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
+}
+
+// -- 64x4 fp form --
+let neverHasSideEffects = 1, ExeDomain = SSEPackedDouble in {
+def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
+ (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
+ "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512, VEX_W;
+let mayLoad = 1 in
+def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
+ (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
+ "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
+}
+// -- 32x4 integer form --
+let neverHasSideEffects = 1 in {
+def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
+ (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
+ "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512;
+let mayLoad = 1 in
+def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
+ (ins VR512:$src1, i128mem:$src2, i8imm:$src3),
+ "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
+
+}
+
+let neverHasSideEffects = 1 in {
+// -- 64x4 form --
+def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
+ (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
+ "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512, VEX_W;
+let mayLoad = 1 in
+def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst),
+ (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
+ "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
+}
+
+def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2),
+ (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (v2f64 VR128X:$src2),
+ (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v2i64 VR128X:$src2),
+ (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
+ (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+
+def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
+ (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
+ (bc_v4i32 (loadv2i64 addr:$src2)),
+ (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (loadv2f64 addr:$src2),
+ (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (loadv2i64 addr:$src2),
+ (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert128_imm VR512:$ins))>;
+
+def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (v8f32 VR256X:$src2),
+ (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (v4f64 VR256X:$src2),
+ (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v4i64 VR256X:$src2),
+ (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2),
+ (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+
+def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (loadv8f32 addr:$src2),
+ (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (loadv4f64 addr:$src2),
+ (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+def : Pat<(vinsert256_insert:$ins (v8i64 VR512:$src1), (loadv4i64 addr:$src2),
+ (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
+ (bc_v8i32 (loadv4i64 addr:$src2)),
+ (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
+ (INSERT_get_vinsert256_imm VR512:$ins))>;
+
+// vinsertps - insert f32 to XMM
+def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
+ "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set VR128X:$dst, (X86insrtps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
+ EVEX_4V;
+def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
+ (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
+ "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set VR128X:$dst, (X86insrtps VR128X:$src1,
+ (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
+ imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 VECTOR EXTRACT
+//---
+let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
+// -- 32x4 form --
+def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
+ (ins VR512:$src1, i8imm:$src2),
+ "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512;
+def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR512:$src1, i8imm:$src2),
+ "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
+
+// -- 64x4 form --
+def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst),
+ (ins VR512:$src1, i8imm:$src2),
+ "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512, VEX_W;
+let mayStore = 1 in
+def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR512:$src1, i8imm:$src2),
+ "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
+}
+
+let neverHasSideEffects = 1 in {
+// -- 32x4 form --
+def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
+ (ins VR512:$src1, i8imm:$src2),
+ "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512;
+def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR512:$src1, i8imm:$src2),
+ "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
+
+// -- 64x4 form --
+def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst),
+ (ins VR512:$src1, i8imm:$src2),
+ "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512, VEX_W;
+let mayStore = 1 in
+def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs),
+ (ins i256mem:$dst, VR512:$src1, i8imm:$src2),
+ "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
+}
+
+def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
+ (v4f32 (VEXTRACTF32x4rr VR512:$src1,
+ (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
+
+def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)),
+ (v4i32 (VEXTRACTF32x4rr VR512:$src1,
+ (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
+
+def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
+ (v2f64 (VEXTRACTF32x4rr VR512:$src1,
+ (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
+
+def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
+ (v2i64 (VEXTRACTI32x4rr VR512:$src1,
+ (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
+
+
+def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
+ (v8f32 (VEXTRACTF64x4rr VR512:$src1,
+ (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
+
+def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)),
+ (v8i32 (VEXTRACTI64x4rr VR512:$src1,
+ (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
+
+def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
+ (v4f64 (VEXTRACTF64x4rr VR512:$src1,
+ (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
+
+def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
+ (v4i64 (VEXTRACTI64x4rr VR512:$src1,
+ (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
+
+// A 256-bit subvector extract from the first 512-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
+ (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
+def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
+ (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
+def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
+ (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
+def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
+ (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
+
+// zmm -> xmm
+def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
+ (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
+def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
+ (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
+def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
+ (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
+def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
+ (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
+
+
+// A 128-bit subvector insert to the first 512-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
+ (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
+ sub_ymm)>;
+def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
+ (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
+ sub_ymm)>;
+def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
+ (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
+ sub_ymm)>;
+def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
+ (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
+ sub_ymm)>;
+
+def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+
+// vextractps - extract 32 bits from XMM
+def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
+ (ins VR128X:$src1, u32u8imm:$src2),
+ "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
+ EVEX;
+
+def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
+ (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
+ "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
+ addr:$dst)]>, EVEX;
+
+//===---------------------------------------------------------------------===//
+// AVX-512 BROADCAST
+//---
+multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
+ RegisterClass DestRC,
+ RegisterClass SrcRC, X86MemOperand x86memop> {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+ def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),[]>, EVEX;
+}
+let ExeDomain = SSEPackedSingle in {
+ defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss{z}", VR512,
+ VR128X, f32mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+}
+
+let ExeDomain = SSEPackedDouble in {
+ defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd{z}", VR512,
+ VR128X, f64mem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+}
+
+def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
+ (VBROADCASTSSZrm addr:$src)>;
+def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
+ (VBROADCASTSDZrm addr:$src)>;
+
+def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
+ (VBROADCASTSSZrm addr:$src)>;
+def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
+ (VBROADCASTSDZrm addr:$src)>;
+
+multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
+ RegisterClass SrcRC, RegisterClass KRC> {
+ def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ []>, EVEX, EVEX_V512;
+ def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
+ (ins KRC:$mask, SrcRC:$src),
+ !strconcat(OpcodeStr,
+ "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_V512, EVEX_KZ;
+}
+
+defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
+defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
+ VEX_W;
+
+def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
+ (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
+
+def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
+ (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
+
+def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
+ (VPBROADCASTDrZrr GR32:$src)>;
+def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
+ (VPBROADCASTQrZrr GR64:$src)>;
+def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
+ (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
+
+def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
+ (VPBROADCASTDrZrr GR32:$src)>;
+def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
+ (VPBROADCASTQrZrr GR64:$src)>;
+
+multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
+ X86MemOperand x86memop, PatFrag ld_frag,
+ RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
+ RegisterClass KRC> {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
+ def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
+ VR128X:$src),
+ !strconcat(OpcodeStr,
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
+ EVEX, EVEX_KZ;
+ let mayLoad = 1 in {
+ def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
+ def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
+ x86memop:$src),
+ !strconcat(OpcodeStr,
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
+ (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
+ }
+}
+
+defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
+ loadi32, VR512, v16i32, v4i32, VK16WM>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
+ loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+
+def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
+ (VPBROADCASTDZrr VR128X:$src)>;
+def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
+ (VPBROADCASTQZrr VR128X:$src)>;
+
+def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
+ (VBROADCASTSSZrr VR128X:$src)>;
+def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
+ (VBROADCASTSDZrr VR128X:$src)>;
+
+def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
+ (VBROADCASTSSZrr VR128X:$src)>;
+def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
+ (VBROADCASTSDZrr VR128X:$src)>;
+
+// Provide fallback in case the load node that is used in the patterns above
+// is used by additional users, which prevents the pattern selection.
+def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
+ (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
+def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
+ (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
+
+
+let Predicates = [HasAVX512] in {
+def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
+ (EXTRACT_SUBREG
+ (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
+ addr:$src)), sub_ymm)>;
+}
+//===----------------------------------------------------------------------===//
+// AVX-512 BROADCAST MASK TO VECTOR REGISTER
+//---
+
+multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
+ RegisterClass DstRC, RegisterClass KRC,
+ ValueType OpVT, ValueType SrcVT> {
+def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+}
+
+defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
+ VK16, v16i32, v16i1>, EVEX_V512;
+defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
+ VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - VPERM
+//
+// -- immediate form --
+multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ SDNode OpNode, PatFrag mem_frag,
+ X86MemOperand x86memop, ValueType OpVT> {
+ def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
+ EVEX;
+ def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
+ (ins x86memop:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (OpVT (OpNode (mem_frag addr:$src1),
+ (i8 imm:$src2))))]>, EVEX;
+}
+
+defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", VR512, X86VPermi, memopv8i64,
+ i512mem, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+let ExeDomain = SSEPackedDouble in
+defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", VR512, X86VPermi, memopv8f64,
+ f512mem, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+// -- VPERM - register form --
+multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
+
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
+
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
+ EVEX_4V;
+}
+
+defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
+ v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
+ v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+let ExeDomain = SSEPackedSingle in
+defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
+ v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+let ExeDomain = SSEPackedDouble in
+defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
+ v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+// -- VPERM2I - 3 source operands form --
+multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ PatFrag mem_frag, X86MemOperand x86memop,
+ ValueType OpVT> {
+let Constraints = "$src1 = $dst" in {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (X86VPermv3 RC:$src1, RC:$src2, RC:$src3)))]>,
+ EVEX_4V;
+
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, x86memop:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (X86VPermv3 RC:$src1, RC:$src2,
+ (mem_frag addr:$src3))))]>, EVEX_4V;
+ }
+}
+defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32, i512mem,
+ v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64, i512mem,
+ v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32, i512mem,
+ v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64, i512mem,
+ v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - BLEND using mask
+//
+multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, Intrinsic Int,
+ RegisterClass KRC, RegisterClass RC,
+ X86MemOperand x86memop, PatFrag mem_frag,
+ SDNode OpNode, ValueType vt> {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
+ [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
+ (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
+ def rr_Int : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
+ [(set RC:$dst, (Int KRC:$mask, (vt RC:$src2),
+ (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
+
+ let mayLoad = 1 in {
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $mask, $dst|$dst, $mask, $src1, $src2}"),
+ []>,
+ EVEX_4V, EVEX_K;
+
+ def rm_Int : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $mask, $dst|$dst, $mask, $src1, $src2}"),
+ [(set RC:$dst, (Int KRC:$mask, (vt RC:$src1),
+ (mem_frag addr:$src2)))]>,
+ EVEX_4V, EVEX_K;
+ }
+}
+
+let ExeDomain = SSEPackedSingle in
+defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
+ int_x86_avx512_mskblend_ps_512,
+ VK16WM, VR512, f512mem,
+ memopv16f32, vselect, v16f32>,
+ EVEX_CD8<32, CD8VF>, EVEX_V512;
+let ExeDomain = SSEPackedDouble in
+defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
+ int_x86_avx512_mskblend_pd_512,
+ VK8WM, VR512, f512mem,
+ memopv8f64, vselect, v8f64>,
+ VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
+
+defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
+ int_x86_avx512_mskblend_d_512,
+ VK16WM, VR512, f512mem,
+ memopv16i32, vselect, v16i32>,
+ EVEX_CD8<32, CD8VF>, EVEX_V512;
+
+defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
+ int_x86_avx512_mskblend_q_512,
+ VK8WM, VR512, f512mem,
+ memopv8i64, vselect, v8i64>,
+ VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
+
+let Predicates = [HasAVX512] in {
+def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
+ (v8f32 VR256X:$src2))),
+ (EXTRACT_SUBREG
+ (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
+ (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
+ (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
+
+def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
+ (v8i32 VR256X:$src2))),
+ (EXTRACT_SUBREG
+ (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
+}
+
+multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
+ SDNode OpNode, ValueType vt> {
+ def rr : AVX512BI<opc, MRMSrcReg,
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
+ IIC_SSE_ALU_F32P_RR>, EVEX_4V;
+ def rm : AVX512BI<opc, MRMSrcMem,
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2)))],
+ IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+}
+
+defm VPCMPEQDZ : avx512_icmp_packed<0x76, "vpcmpeqd", VK16, VR512, i512mem,
+ memopv16i32, X86pcmpeqm, v16i32>, EVEX_V512;
+defm VPCMPEQQZ : avx512_icmp_packed<0x29, "vpcmpeqq", VK8, VR512, i512mem,
+ memopv8i64, X86pcmpeqm, v8i64>, T8, EVEX_V512, VEX_W;
+
+defm VPCMPGTDZ : avx512_icmp_packed<0x66, "vpcmpgtd", VK16, VR512, i512mem,
+ memopv16i32, X86pcmpgtm, v16i32>, EVEX_V512;
+defm VPCMPGTQZ : avx512_icmp_packed<0x37, "vpcmpgtq", VK8, VR512, i512mem,
+ memopv8i64, X86pcmpgtm, v8i64>, T8, EVEX_V512, VEX_W;
+
+def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
+ (COPY_TO_REGCLASS (VPCMPGTDZrr
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
+
+def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
+ (COPY_TO_REGCLASS (VPCMPEQDZrr
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
+
+multiclass avx512_icmp_cc<bits<8> opc, RegisterClass KRC,
+ RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
+ SDNode OpNode, ValueType vt, Operand CC, string asm,
+ string asm_alt> {
+ def rri : AVX512AIi8<opc, MRMSrcReg,
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
+ [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))],
+ IIC_SSE_ALU_F32P_RR>, EVEX_4V;
+ def rmi : AVX512AIi8<opc, MRMSrcMem,
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
+ [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2),
+ imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+ // Accept explicit immediate argument form instead of comparison code.
+ let neverHasSideEffects = 1 in {
+ def rri_alt : AVX512AIi8<opc, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ asm_alt, [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
+ def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+ }
+}
+
+defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16, VR512, i512mem, memopv16i32,
+ X86cmpm, v16i32, AVXCC,
+ "vpcmp${cc}d\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vpcmpd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16, VR512, i512mem, memopv16i32,
+ X86cmpmu, v16i32, AVXCC,
+ "vpcmp${cc}ud\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vpcmpud\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8, VR512, i512mem, memopv8i64,
+ X86cmpm, v8i64, AVXCC,
+ "vpcmp${cc}q\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vpcmpq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8, VR512, i512mem, memopv8i64,
+ X86cmpmu, v8i64, AVXCC,
+ "vpcmp${cc}uq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vpcmpuq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+// avx512_cmp_packed - sse 1 & 2 compare packed instructions
+multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
+ X86MemOperand x86memop, Operand CC,
+ SDNode OpNode, ValueType vt, string asm,
+ string asm_alt, Domain d> {
+ def rri : AVX512PIi8<0xC2, MRMSrcReg,
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
+ [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
+ def rmi : AVX512PIi8<0xC2, MRMSrcMem,
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
+ [(set KRC:$dst,
+ (OpNode (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
+
+ // Accept explicit immediate argument form instead of comparison code.
+ let neverHasSideEffects = 1 in {
+ def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ asm_alt, [], d>;
+ def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ asm_alt, [], d>;
+ }
+}
+
+defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, AVXCC, X86cmpm, v16f32,
+ "vcmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
+ SSEPackedSingle>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, AVXCC, X86cmpm, v8f64,
+ "vcmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
+ SSEPackedDouble>, OpSize, EVEX_4V, VEX_W, EVEX_V512,
+ EVEX_CD8<64, CD8VF>;
+
+def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
+ (COPY_TO_REGCLASS (VCMPPSZrri
+ (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
+ (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
+ imm:$cc), VK8)>;
+def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
+ (COPY_TO_REGCLASS (VPCMPDZrri
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
+ imm:$cc), VK8)>;
+def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
+ (COPY_TO_REGCLASS (VPCMPUDZrri
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
+ imm:$cc), VK8)>;
+
+// Mask register copy, including
+// - copy between mask registers
+// - load/store mask registers
+// - copy from GPR to mask register and vice versa
+//
+multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
+ string OpcodeStr, RegisterClass KRC,
+ ValueType vt, X86MemOperand x86memop> {
+ let neverHasSideEffects = 1 in {
+ def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ let mayLoad = 1 in
+ def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set KRC:$dst, (vt (load addr:$src)))]>;
+ let mayStore = 1 in
+ def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ }
+}
+
+multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
+ string OpcodeStr,
+ RegisterClass KRC, RegisterClass GRC> {
+ let neverHasSideEffects = 1 in {
+ def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ }
+}
+
+let Predicates = [HasAVX512] in {
+ defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
+ VEX, TB;
+ defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
+ VEX, TB;
+}
+
+let Predicates = [HasAVX512] in {
+ // GR16 from/to 16-bit mask
+ def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
+ (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
+ def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
+ (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
+
+ // Store kreg in memory
+ def : Pat<(store (v16i1 VK16:$src), addr:$dst),
+ (KMOVWmk addr:$dst, VK16:$src)>;
+
+ def : Pat<(store (v8i1 VK8:$src), addr:$dst),
+ (KMOVWmk addr:$dst, (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16)))>;
+}
+// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
+let Predicates = [HasAVX512] in {
+ // GR from/to 8-bit mask without native support
+ def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
+ (COPY_TO_REGCLASS
+ (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
+ VK8)>;
+ def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
+ (EXTRACT_SUBREG
+ (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
+ sub_8bit)>;
+}
+
+// Mask unary operation
+// - KNOT
+multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
+ RegisterClass KRC, SDPatternOperator OpNode> {
+ let Predicates = [HasAVX512] in
+ def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set KRC:$dst, (OpNode KRC:$src))]>;
+}
+
+multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode> {
+ defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
+ VEX, TB;
+}
+
+defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
+
+def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
+def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
+ (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
+
+// With AVX-512, 8-bit mask is promoted to 16-bit mask.
+def : Pat<(not VK8:$src),
+ (COPY_TO_REGCLASS
+ (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
+
+// Mask binary operation
+// - KADD, KAND, KANDN, KOR, KXNOR, KXOR
+multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
+ RegisterClass KRC, SDPatternOperator OpNode> {
+ let Predicates = [HasAVX512] in
+ def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
+}
+
+multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode> {
+ defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
+ VEX_4V, VEX_L, TB;
+}
+
+def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
+def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
+
+let isCommutable = 1 in {
+ defm KADD : avx512_mask_binop_w<0x4a, "kadd", add>;
+ defm KAND : avx512_mask_binop_w<0x41, "kand", and>;
+ let isCommutable = 0 in
+ defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>;
+ defm KOR : avx512_mask_binop_w<0x45, "kor", or>;
+ defm KXNOR : avx512_mask_binop_w<0x46, "kxnor", xnor>;
+ defm KXOR : avx512_mask_binop_w<0x47, "kxor", xor>;
+}
+
+multiclass avx512_mask_binop_int<string IntName, string InstName> {
+ let Predicates = [HasAVX512] in
+ def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
+ VK16:$src1, VK16:$src2),
+ (!cast<Instruction>(InstName##"Wrr") VK16:$src1, VK16:$src2)>;
+}
+
+defm : avx512_mask_binop_int<"kadd", "KADD">;
+defm : avx512_mask_binop_int<"kand", "KAND">;
+defm : avx512_mask_binop_int<"kandn", "KANDN">;
+defm : avx512_mask_binop_int<"kor", "KOR">;
+defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
+defm : avx512_mask_binop_int<"kxor", "KXOR">;
+// With AVX-512, 8-bit mask is promoted to 16-bit mask.
+multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
+ let Predicates = [HasAVX512] in
+ def : Pat<(OpNode VK8:$src1, VK8:$src2),
+ (COPY_TO_REGCLASS
+ (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
+ (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
+}
+
+defm : avx512_binop_pat<and, KANDWrr>;
+defm : avx512_binop_pat<andn, KANDNWrr>;
+defm : avx512_binop_pat<or, KORWrr>;
+defm : avx512_binop_pat<xnor, KXNORWrr>;
+defm : avx512_binop_pat<xor, KXORWrr>;
+
+// Mask unpacking
+multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
+ RegisterClass KRC1, RegisterClass KRC2> {
+ let Predicates = [HasAVX512] in
+ def rr : I<opc, MRMSrcReg, (outs KRC1:$dst), (ins KRC2:$src1, KRC2:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+}
+
+multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
+ defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16, VK8>,
+ VEX_4V, VEX_L, OpSize, TB;
+}
+
+defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
+
+multiclass avx512_mask_unpck_int<string IntName, string InstName> {
+ let Predicates = [HasAVX512] in
+ def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
+ VK8:$src1, VK8:$src2),
+ (!cast<Instruction>(InstName##"BWrr") VK8:$src1, VK8:$src2)>;
+}
+
+defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
+// Mask bit testing
+multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ SDNode OpNode> {
+ let Predicates = [HasAVX512], Defs = [EFLAGS] in
+ def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
+}
+
+multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
+ VEX, TB;
+}
+
+defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
+defm KTEST : avx512_mask_testop_w<0x99, "ktest", X86ktest>;
+
+// Mask shift
+multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ SDNode OpNode> {
+ let Predicates = [HasAVX512] in
+ def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
+ !strconcat(OpcodeStr,
+ "\t{$imm, $src, $dst|$dst, $src, $imm}"),
+ [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
+}
+
+multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
+ SDNode OpNode> {
+ defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
+ VEX, OpSize, TA, VEX_W;
+}
+
+defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", shl>;
+defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", srl>;
+
+// Mask setting all 0s or 1s
+multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
+ let Predicates = [HasAVX512] in
+ let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
+ def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
+ [(set KRC:$dst, (VT Val))]>;
+}
+
+multiclass avx512_mask_setop_w<PatFrag Val> {
+ defm B : avx512_mask_setop<VK8, v8i1, Val>;
+ defm W : avx512_mask_setop<VK16, v16i1, Val>;
+}
+
+defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
+defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
+
+// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
+let Predicates = [HasAVX512] in {
+ def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
+ def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
+}
+def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
+ (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
+
+def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
+ (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
+
+def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
+ (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - Aligned and unaligned load and store
+//
+
+multiclass avx512_mov_packed<bits<8> opc, RegisterClass RC, RegisterClass KRC,
+ X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d> {
+let neverHasSideEffects = 1 in
+ def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>,
+ EVEX;
+let canFoldAsLoad = 1 in
+ def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (ld_frag addr:$src))], d>, EVEX;
+let Constraints = "$src1 = $dst" in {
+ def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2),
+ !strconcat(asm,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
+ EVEX, EVEX_K;
+ def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, x86memop:$src2),
+ !strconcat(asm,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ [], d>, EVEX, EVEX_K;
+}
+}
+
+defm VMOVAPSZ : avx512_mov_packed<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
+ "vmovaps", SSEPackedSingle>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMOVAPDZ : avx512_mov_packed<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
+ "vmovapd", SSEPackedDouble>,
+ OpSize, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+defm VMOVUPSZ : avx512_mov_packed<0x10, VR512, VK16WM, f512mem, loadv16f32,
+ "vmovups", SSEPackedSingle>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMOVUPDZ : avx512_mov_packed<0x10, VR512, VK8WM, f512mem, loadv8f64,
+ "vmovupd", SSEPackedDouble>,
+ OpSize, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+def VMOVAPSZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
+ "vmovaps\t{$src, $dst|$dst, $src}",
+ [(alignedstore512 (v16f32 VR512:$src), addr:$dst)],
+ SSEPackedSingle>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+def VMOVAPDZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
+ "vmovapd\t{$src, $dst|$dst, $src}",
+ [(alignedstore512 (v8f64 VR512:$src), addr:$dst)],
+ SSEPackedDouble>, EVEX, EVEX_V512,
+ OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+def VMOVUPSZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
+ "vmovups\t{$src, $dst|$dst, $src}",
+ [(store (v16f32 VR512:$src), addr:$dst)],
+ SSEPackedSingle>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+def VMOVUPDZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
+ "vmovupd\t{$src, $dst|$dst, $src}",
+ [(store (v8f64 VR512:$src), addr:$dst)],
+ SSEPackedDouble>, EVEX, EVEX_V512,
+ OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+
+let neverHasSideEffects = 1 in {
+ def VMOVDQA32rr : AVX512BI<0x6F, MRMSrcReg, (outs VR512:$dst),
+ (ins VR512:$src),
+ "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
+ EVEX, EVEX_V512;
+ def VMOVDQA64rr : AVX512BI<0x6F, MRMSrcReg, (outs VR512:$dst),
+ (ins VR512:$src),
+ "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
+ EVEX, EVEX_V512, VEX_W;
+let mayStore = 1 in {
+ def VMOVDQA32mr : AVX512BI<0x7F, MRMDestMem, (outs),
+ (ins i512mem:$dst, VR512:$src),
+ "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
+ EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ def VMOVDQA64mr : AVX512BI<0x7F, MRMDestMem, (outs),
+ (ins i512mem:$dst, VR512:$src),
+ "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
+ EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+}
+let mayLoad = 1 in {
+def VMOVDQA32rm : AVX512BI<0x6F, MRMSrcMem, (outs VR512:$dst),
+ (ins i512mem:$src),
+ "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
+ EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+def VMOVDQA64rm : AVX512BI<0x6F, MRMSrcMem, (outs VR512:$dst),
+ (ins i512mem:$src),
+ "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
+ EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+}
+}
+
+// 512-bit aligned load/store
+def : Pat<(alignedloadv8i64 addr:$src), (VMOVDQA64rm addr:$src)>;
+def : Pat<(alignedloadv16i32 addr:$src), (VMOVDQA32rm addr:$src)>;
+
+def : Pat<(alignedstore512 (v8i64 VR512:$src), addr:$dst),
+ (VMOVDQA64mr addr:$dst, VR512:$src)>;
+def : Pat<(alignedstore512 (v16i32 VR512:$src), addr:$dst),
+ (VMOVDQA32mr addr:$dst, VR512:$src)>;
+
+multiclass avx512_mov_int<bits<8> load_opc, bits<8> store_opc, string asm,
+ RegisterClass RC, RegisterClass KRC,
+ PatFrag ld_frag, X86MemOperand x86memop> {
+let neverHasSideEffects = 1 in
+ def rr : AVX512XSI<load_opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), []>, EVEX;
+let canFoldAsLoad = 1 in
+ def rm : AVX512XSI<load_opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (ld_frag addr:$src))]>, EVEX;
+let mayStore = 1 in
+ def mr : AVX512XSI<store_opc, MRMDestMem, (outs),
+ (ins x86memop:$dst, VR512:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), []>, EVEX;
+let Constraints = "$src1 = $dst" in {
+ def rrk : AVX512XSI<load_opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2),
+ !strconcat(asm,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), []>,
+ EVEX, EVEX_K;
+ def rmk : AVX512XSI<load_opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, x86memop:$src2),
+ !strconcat(asm,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ []>, EVEX, EVEX_K;
+}
+}
+
+defm VMOVDQU32 : avx512_mov_int<0x6F, 0x7F, "vmovdqu32", VR512, VK16WM,
+ memopv16i32, i512mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMOVDQU64 : avx512_mov_int<0x6F, 0x7F, "vmovdqu64", VR512, VK8WM,
+ memopv8i64, i512mem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+// 512-bit unaligned load/store
+def : Pat<(loadv8i64 addr:$src), (VMOVDQU64rm addr:$src)>;
+def : Pat<(loadv16i32 addr:$src), (VMOVDQU32rm addr:$src)>;
+
+def : Pat<(store (v8i64 VR512:$src), addr:$dst),
+ (VMOVDQU64mr addr:$dst, VR512:$src)>;
+def : Pat<(store (v16i32 VR512:$src), addr:$dst),
+ (VMOVDQU32mr addr:$dst, VR512:$src)>;
+
+let AddedComplexity = 20 in {
+def : Pat<(v16f32 (vselect VK16WM:$mask, (v16f32 VR512:$src1),
+ (v16f32 VR512:$src2))),
+ (VMOVUPSZrrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
+def : Pat<(v8f64 (vselect VK8WM:$mask, (v8f64 VR512:$src1),
+ (v8f64 VR512:$src2))),
+ (VMOVUPDZrrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
+def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2))),
+ (VMOVDQU32rrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
+def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src1),
+ (v8i64 VR512:$src2))),
+ (VMOVDQU64rrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
+}
+// Move Int Doubleword to Packed Double Int
+//
+def VMOVDI2PDIZrr : AVX512SI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst,
+ (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
+ EVEX, VEX_LIG;
+def VMOVDI2PDIZrm : AVX512SI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
+ IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+def VMOV64toPQIZrr : AVX512SI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst,
+ (v2i64 (scalar_to_vector GR64:$src)))],
+ IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
+let isCodeGenOnly = 1 in {
+def VMOV64toSDZrr : AVX512SI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (bitconvert GR64:$src))],
+ IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
+def VMOVSDto64Zrr : AVX512SI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (bitconvert FR64:$src))],
+ IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
+}
+def VMOVSDto64Zmr : AVX512SI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
+ EVEX_CD8<64, CD8VT1>;
+
+// Move Int Doubleword to Single Scalar
+//
+let isCodeGenOnly = 1 in {
+def VMOVDI2SSZrr : AVX512SI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(set FR32X:$dst, (bitconvert GR32:$src))],
+ IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
+
+def VMOVDI2SSZrm : AVX512SI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
+ IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+}
+
+// Move Packed Doubleword Int to Packed Double Int
+//
+def VMOVPDI2DIZrr : AVX512SI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
+ (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
+ EVEX, VEX_LIG;
+def VMOVPDI2DIZmr : AVX512SI<0x7E, MRMDestMem, (outs),
+ (ins i32mem:$dst, VR128X:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(store (i32 (vector_extract (v4i32 VR128X:$src),
+ (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
+ EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+
+// Move Packed Doubleword Int first element to Doubleword Int
+//
+def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
+ (iPTR 0)))],
+ IIC_SSE_MOVD_ToGP>, TB, OpSize, EVEX, VEX_LIG, VEX_W,
+ Requires<[HasAVX512, In64BitMode]>;
+
+def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
+ (ins i64mem:$dst, VR128X:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
+ addr:$dst)], IIC_SSE_MOVDQ>,
+ EVEX, OpSize, VEX_LIG, VEX_W, TB, EVEX_CD8<64, CD8VT1>,
+ Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
+
+// Move Scalar Single to Double Int
+//
+let isCodeGenOnly = 1 in {
+def VMOVSS2DIZrr : AVX512SI<0x7E, MRMDestReg, (outs GR32:$dst),
+ (ins FR32X:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (bitconvert FR32X:$src))],
+ IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
+def VMOVSS2DIZmr : AVX512SI<0x7E, MRMDestMem, (outs),
+ (ins i32mem:$dst, FR32X:$src),
+ "vmovd{z}\t{$src, $dst|$dst, $src}",
+ [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+}
+
+// Move Quadword Int to Packed Quadword Int
+//
+def VMOVQI2PQIZrm : AVX512SI<0x6E, MRMSrcMem, (outs VR128X:$dst),
+ (ins i64mem:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
+ EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 MOVSS, MOVSD
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_move_scalar <string asm, RegisterClass RC,
+ SDNode OpNode, ValueType vt,
+ X86MemOperand x86memop, PatFrag mem_pat> {
+ def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
+ (scalar_to_vector RC:$src2))))],
+ IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
+ def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
+ EVEX, VEX_LIG;
+ def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
+ EVEX, VEX_LIG;
+}
+
+let ExeDomain = SSEPackedSingle in
+defm VMOVSSZ : avx512_move_scalar<"movss{z}", FR32X, X86Movss, v4f32, f32mem,
+ loadf32>, XS, EVEX_CD8<32, CD8VT1>;
+
+let ExeDomain = SSEPackedDouble in
+defm VMOVSDZ : avx512_move_scalar<"movsd{z}", FR64X, X86Movsd, v2f64, f64mem,
+ loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+
+// For the disassembler
+let isCodeGenOnly = 1 in {
+ def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, FR32X:$src2),
+ "movss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
+ IIC_SSE_MOV_S_RR>,
+ XS, EVEX_4V, VEX_LIG;
+ def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, FR64X:$src2),
+ "movsd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
+ IIC_SSE_MOV_S_RR>,
+ XD, EVEX_4V, VEX_LIG, VEX_W;
+}
+
+let Predicates = [HasAVX512] in {
+ let AddedComplexity = 15 in {
+ // Move scalar to XMM zero-extended, zeroing a VR128X then do a
+ // MOVS{S,D} to the lower bits.
+ def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
+ (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
+ def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
+ (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+ def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
+ (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+ def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
+ (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
+
+ // Move low f32 and clear high bits.
+ def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSZrr (v4f32 (V_SET0)),
+ (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
+ def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSZrr (v4i32 (V_SET0)),
+ (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
+ }
+
+ let AddedComplexity = 20 in {
+ // MOVSSrm zeros the high parts of the register; represent this
+ // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
+ def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
+ (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
+ def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
+ (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
+ def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
+ (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
+
+ // MOVSDrm zeros the high parts of the register; represent this
+ // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
+ def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
+ (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
+ def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
+ (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
+ def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
+ (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
+ def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
+ (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
+ def : Pat<(v2f64 (X86vzload addr:$src)),
+ (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
+
+ // Represent the same patterns above but in the form they appear for
+ // 256-bit types
+ def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
+ def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
+ (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
+ def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
+ (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
+ }
+ def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
+ (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
+ FR32X:$src)), sub_xmm)>;
+ def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
+ (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
+ (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
+ FR64X:$src)), sub_xmm)>;
+ def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
+
+ // Move low f64 and clear high bits.
+ def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSDZrr (v2f64 (V_SET0)),
+ (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
+
+ def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
+ (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
+
+ // Extract and store.
+ def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
+ addr:$dst),
+ (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
+ def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
+ addr:$dst),
+ (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
+
+ // Shuffle with VMOVSS
+ def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
+ (VMOVSSZrr (v4i32 VR128X:$src1),
+ (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
+ def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
+ (VMOVSSZrr (v4f32 VR128X:$src1),
+ (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
+
+ // 256-bit variants
+ def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
+ sub_xmm)>;
+ def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
+ sub_xmm)>;
+
+ // Shuffle with VMOVSD
+ def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+ def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+ def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+ def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+
+ // 256-bit variants
+ def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
+ sub_xmm)>;
+ def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
+ sub_xmm)>;
+
+ def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+ def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+ def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+ def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
+ (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
+}
+
+let AddedComplexity = 15 in
+def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
+ (ins VR128X:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst, (v2i64 (X86vzmovl
+ (v2i64 VR128X:$src))))],
+ IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
+
+let AddedComplexity = 20 in
+def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
+ (ins i128mem:$src),
+ "vmovq{z}\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst, (v2i64 (X86vzmovl
+ (loadv2i64 addr:$src))))],
+ IIC_SSE_MOVDQ>, EVEX, VEX_W,
+ EVEX_CD8<8, CD8VT8>;
+
+let Predicates = [HasAVX512] in {
+ // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
+ let AddedComplexity = 20 in {
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
+ (VMOVDI2PDIZrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
+ (VMOV64toPQIZrr GR64:$src)>;
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
+ (VMOVDI2PDIZrr GR32:$src)>;
+
+ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
+ (VMOVDI2PDIZrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
+ (VMOVDI2PDIZrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
+ (VMOVZPQILo2PQIZrm addr:$src)>;
+ def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
+ (VMOVZPQILo2PQIZrr VR128X:$src)>;
+ }
+
+ // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
+ def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
+ (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
+ def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
+ (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
+ (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
+}
+
+def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
+
+def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
+
+def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
+
+def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - Integer arithmetic
+//
+multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop, PatFrag scalar_mfrag,
+ X86MemOperand x86scalar_mop, string BrdcstStr,
+ OpndItins itins, bit IsCommutable = 0> {
+ let isCommutable = IsCommutable in
+ def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
+ itins.rr>, EVEX_4V;
+ def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))],
+ itins.rm>, EVEX_4V;
+ def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
+ ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
+ [(set RC:$dst, (OpNode RC:$src1,
+ (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))))],
+ itins.rm>, EVEX_4V, EVEX_B;
+}
+multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr,
+ ValueType DstVT, ValueType SrcVT, RegisterClass RC,
+ PatFrag memop_frag, X86MemOperand x86memop,
+ OpndItins itins,
+ bit IsCommutable = 0> {
+ let isCommutable = IsCommutable in
+ def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V, VEX_W;
+ def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V, VEX_W;
+}
+
+defm VPADDDZ : avx512_binop_rm<0xFE, "vpaddd", add, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VPSUBDZ : avx512_binop_rm<0xFA, "vpsubd", sub, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 0>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VPMULLDZ : avx512_binop_rm<0x40, "vpmulld", mul, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
+ T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VPADDQZ : avx512_binop_rm<0xD4, "vpaddq", add, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 1>,
+ EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_W;
+
+defm VPSUBQZ : avx512_binop_rm<0xFB, "vpsubq", sub, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32,
+ VR512, memopv8i64, i512mem, SSE_INTALU_ITINS_P, 1>, T8,
+ EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32,
+ VR512, memopv8i64, i512mem, SSE_INTMUL_ITINS_P, 1>, EVEX_V512,
+ EVEX_CD8<64, CD8VF>;
+
+def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
+ (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
+
+defm VPMAXUDZ : avx512_binop_rm<0x3F, "vpmaxud", X86umax, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
+ T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMAXUQZ : avx512_binop_rm<0x3F, "vpmaxuq", X86umax, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
+ T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPMAXSDZ : avx512_binop_rm<0x3D, "vpmaxsd", X86smax, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMAXSQZ : avx512_binop_rm<0x3D, "vpmaxsq", X86smax, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
+ T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPMINUDZ : avx512_binop_rm<0x3B, "vpminud", X86umin, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
+ T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMINUQZ : avx512_binop_rm<0x3B, "vpminuq", X86umin, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
+ T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPMINSDZ : avx512_binop_rm<0x39, "vpminsd", X86smin, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
+ T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMINSQZ : avx512_binop_rm<0x39, "vpminsq", X86smin, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
+ T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - Unpack Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
+ PatFrag mem_frag, RegisterClass RC,
+ X86MemOperand x86memop, string asm,
+ Domain d> {
+ def rr : AVX512PI<opc, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ asm, [(set RC:$dst,
+ (vt (OpNode RC:$src1, RC:$src2)))],
+ d>, EVEX_4V;
+ def rm : AVX512PI<opc, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ asm, [(set RC:$dst,
+ (vt (OpNode RC:$src1,
+ (bitconvert (mem_frag addr:$src2)))))],
+ d>, EVEX_4V;
+}
+
+defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
+ VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
+ VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
+ VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
+ VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop> {
+ def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
+ IIC_SSE_UNPCK>, EVEX_4V;
+ def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
+ (bitconvert (memop_frag addr:$src2)))))],
+ IIC_SSE_UNPCK>, EVEX_4V;
+}
+defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
+ VR512, memopv16i32, i512mem>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
+ VR512, memopv8i64, i512mem>, EVEX_V512,
+ VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
+ VR512, memopv16i32, i512mem>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
+ VR512, memopv8i64, i512mem>, EVEX_V512,
+ VEX_W, EVEX_CD8<64, CD8VF>;
+//===----------------------------------------------------------------------===//
+// AVX-512 - PSHUFD
+//
+
+multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ SDNode OpNode, PatFrag mem_frag,
+ X86MemOperand x86memop, ValueType OpVT> {
+ def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
+ EVEX;
+ def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
+ (ins x86memop:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (OpVT (OpNode (mem_frag addr:$src1),
+ (i8 imm:$src2))))]>, EVEX;
+}
+
+defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
+ i512mem, v16i32>, OpSize, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+let ExeDomain = SSEPackedSingle in
+defm VPERMILPSZ : avx512_pshuf_imm<0x04, "vpermilps", VR512, X86VPermilp,
+ memopv16f32, i512mem, v16f32>, OpSize, TA, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+let ExeDomain = SSEPackedDouble in
+defm VPERMILPDZ : avx512_pshuf_imm<0x05, "vpermilpd", VR512, X86VPermilp,
+ memopv8f64, i512mem, v8f64>, OpSize, TA, EVEX_V512,
+ VEX_W, EVEX_CD8<32, CD8VF>;
+
+def : Pat<(v16i32 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
+ (VPERMILPSZri VR512:$src1, imm:$imm)>;
+def : Pat<(v8i64 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
+ (VPERMILPDZri VR512:$src1, imm:$imm)>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 Logical Instructions
+//===----------------------------------------------------------------------===//
+
+defm VPANDDZ : avx512_binop_rm<0xDB, "vpandd", and, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPANDQZ : avx512_binop_rm<0xDB, "vpandq", and, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPORDZ : avx512_binop_rm<0xEB, "vpord", or, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPORQZ : avx512_binop_rm<0xEB, "vporq", or, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPXORDZ : avx512_binop_rm<0xEF, "vpxord", xor, v16i32, VR512, memopv16i32,
+ i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPXORQZ : avx512_binop_rm<0xEF, "vpxorq", xor, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPANDNDZ : avx512_binop_rm<0xDF, "vpandnd", X86andnp, v16i32, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_BIT_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VR512, memopv8i64,
+ i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 0>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 FP arithmetic
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SizeItins itins> {
+ defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss{z}"), OpNode, FR32X,
+ f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+ defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd{z}"), OpNode, FR64X,
+ f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
+ EVEX_CD8<64, CD8VT1>;
+}
+
+let isCommutable = 1 in {
+defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
+defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
+defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
+defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
+}
+let isCommutable = 0 in {
+defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
+defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
+}
+
+multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, ValueType vt,
+ X86MemOperand x86memop, PatFrag mem_frag,
+ X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
+ string BrdcstStr,
+ Domain d, OpndItins itins, bit commutable> {
+ let isCommutable = commutable in
+ def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
+ EVEX_4V, TB;
+ let mayLoad = 1 in {
+ def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
+ itins.rm, d>, EVEX_4V, TB;
+ def rmb : PI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
+ ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
+ [(set RC:$dst, (OpNode RC:$src1,
+ (vt (X86VBroadcast (scalar_mfrag addr:$src2)))))],
+ itins.rm, d>, EVEX_4V, EVEX_B, TB;
+ }
+}
+
+defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VR512, v16f32, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
+ SSE_ALU_ITINS_P.s, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VR512, v8f64, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
+ SSE_ALU_ITINS_P.d, 1>,
+ EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VR512, v16f32, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
+ SSE_ALU_ITINS_P.s, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VR512, v8f64, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
+ SSE_ALU_ITINS_P.d, 1>,
+ EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VR512, v16f32, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
+ SSE_ALU_ITINS_P.s, 1>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VR512, v16f32, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
+ SSE_ALU_ITINS_P.s, 1>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VR512, v8f64, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
+ SSE_ALU_ITINS_P.d, 1>,
+ EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VR512, v8f64, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
+ SSE_ALU_ITINS_P.d, 1>,
+ EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VR512, v16f32, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
+ SSE_ALU_ITINS_P.s, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VR512, v16f32, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
+ SSE_ALU_ITINS_P.s, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VR512, v8f64, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
+ SSE_ALU_ITINS_P.d, 0>,
+ EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VR512, v8f64, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
+ SSE_ALU_ITINS_P.d, 0>,
+ EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 VPTESTM instructions
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
+ SDNode OpNode, ValueType vt> {
+ def rr : AVX5128I<opc, MRMSrcReg,
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))]>, EVEX_4V;
+ def rm : AVX5128I<opc, MRMSrcMem,
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (OpNode (vt RC:$src1),
+ (bitconvert (memop_frag addr:$src2))))]>, EVEX_4V;
+}
+
+defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem,
+ memopv16i32, X86testm, v16i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem,
+ memopv8i64, X86testm, v8i64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 Shift instructions
+//===----------------------------------------------------------------------===//
+multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
+ string OpcodeStr, SDNode OpNode, RegisterClass RC,
+ ValueType vt, X86MemOperand x86memop, PatFrag mem_frag,
+ RegisterClass KRC> {
+ def ri : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
+ (ins RC:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (vt (OpNode RC:$src1, (i8 imm:$src2))))],
+ SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
+ def rik : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
+ def mi: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
+ (ins x86memop:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpNode (mem_frag addr:$src1),
+ (i8 imm:$src2)))], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
+ def mik: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
+ (ins KRC:$mask, x86memop:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
+}
+
+multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, ValueType vt, ValueType SrcVT,
+ PatFrag bc_frag, RegisterClass KRC> {
+ // src2 is always 128-bit
+ def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, VR128X:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
+ SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
+ def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, VR128X:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
+ def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (vt (OpNode RC:$src1,
+ (bc_frag (memopv2i64 addr:$src2)))))],
+ SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
+ def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
+}
+
+defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
+ VR512, v16i32, i512mem, memopv16i32, VK16WM>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl,
+ VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
+ EVEX_CD8<32, CD8VQ>;
+
+defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
+ VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
+ EVEX_CD8<64, CD8VF>, VEX_W;
+defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl,
+ VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
+ EVEX_CD8<64, CD8VQ>, VEX_W;
+
+defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
+ VR512, v16i32, i512mem, memopv16i32, VK16WM>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl,
+ VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
+ EVEX_CD8<32, CD8VQ>;
+
+defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
+ VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
+ EVEX_CD8<64, CD8VF>, VEX_W;
+defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl,
+ VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
+ EVEX_CD8<64, CD8VQ>, VEX_W;
+
+defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
+ VR512, v16i32, i512mem, memopv16i32, VK16WM>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra,
+ VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
+ EVEX_CD8<32, CD8VQ>;
+
+defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
+ VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
+ EVEX_CD8<64, CD8VF>, VEX_W;
+defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra,
+ VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
+ EVEX_CD8<64, CD8VQ>, VEX_W;
+
+//===-------------------------------------------------------------------===//
+// Variable Bit Shifts
+//===-------------------------------------------------------------------===//
+multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, ValueType vt,
+ X86MemOperand x86memop, PatFrag mem_frag> {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (vt (OpNode RC:$src1, (vt RC:$src2))))]>,
+ EVEX_4V;
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
+ EVEX_4V;
+}
+
+defm VPSLLVDZ : avx512_var_shift<0x47, "vpsllvd", shl, VR512, v16i32,
+ i512mem, memopv16i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPSLLVQZ : avx512_var_shift<0x47, "vpsllvq", shl, VR512, v8i64,
+ i512mem, memopv8i64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+defm VPSRLVDZ : avx512_var_shift<0x45, "vpsrlvd", srl, VR512, v16i32,
+ i512mem, memopv16i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPSRLVQZ : avx512_var_shift<0x45, "vpsrlvq", srl, VR512, v8i64,
+ i512mem, memopv8i64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+defm VPSRAVDZ : avx512_var_shift<0x46, "vpsravd", sra, VR512, v16i32,
+ i512mem, memopv16i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64,
+ i512mem, memopv8i64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - MOVDDUP
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
+ X86MemOperand x86memop, PatFrag memop_frag> {
+def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
+def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst,
+ (VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
+}
+
+defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, memopv8f64>,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
+ (VMOVDDUPZrm addr:$src)>;
+
+//===---------------------------------------------------------------------===//
+// Replicate Single FP - MOVSHDUP and MOVSLDUP
+//===---------------------------------------------------------------------===//
+multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
+ ValueType vt, RegisterClass RC, PatFrag mem_frag,
+ X86MemOperand x86memop> {
+ def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
+ let mayLoad = 1 in
+ def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
+}
+
+defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
+ v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
+ v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+
+def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
+def : Pat<(v16i32 (X86Movshdup (memopv16i32 addr:$src))),
+ (VMOVSHDUPZrm addr:$src)>;
+def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
+def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
+ (VMOVSLDUPZrm addr:$src)>;
+
+//===----------------------------------------------------------------------===//
+// Move Low to High and High to Low packed FP Instructions
+//===----------------------------------------------------------------------===//
+def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, VR128X:$src2),
+ "vmovlhps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
+ IIC_SSE_MOV_LH>, EVEX_4V;
+def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, VR128X:$src2),
+ "vmovhlps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
+ IIC_SSE_MOV_LH>, EVEX_4V;
+
+let Predicates = [HasAVX512] in {
+ // MOVLHPS patterns
+ def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
+ (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
+ def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
+ (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
+
+ // MOVHLPS patterns
+ def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
+ (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
+}
+
+//===----------------------------------------------------------------------===//
+// FMA - Fused Multiply Operations
+//
+let Constraints = "$src1 = $dst" in {
+multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr,
+ RegisterClass RC, X86MemOperand x86memop,
+ PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
+ string BrdcstStr, SDNode OpNode, ValueType OpVT> {
+ def r: AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst, (OpVT(OpNode RC:$src1, RC:$src2, RC:$src3)))]>;
+
+ let mayLoad = 1 in
+ def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, x86memop:$src3),
+ !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2,
+ (mem_frag addr:$src3))))]>;
+ def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, x86scalar_mop:$src3),
+ !strconcat(OpcodeStr, "\t{${src3}", BrdcstStr,
+ ", $src2, $dst|$dst, $src2, ${src3}", BrdcstStr, "}"),
+ [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
+ (OpVT (X86VBroadcast (scalar_mfrag addr:$src3)))))]>, EVEX_B;
+}
+} // Constraints = "$src1 = $dst"
+
+let ExeDomain = SSEPackedSingle in {
+ defm VFMADD213PSZ : avx512_fma3p_rm<0xA8, "vfmadd213ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmadd, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+ defm VFMSUB213PSZ : avx512_fma3p_rm<0xAA, "vfmsub213ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmsub, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+ defm VFMADDSUB213PSZ : avx512_fma3p_rm<0xA6, "vfmaddsub213ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmaddsub, v16f32>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+ defm VFMSUBADD213PSZ : avx512_fma3p_rm<0xA7, "vfmsubadd213ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmsubadd, v16f32>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+ defm VFNMADD213PSZ : avx512_fma3p_rm<0xAC, "vfnmadd213ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fnmadd, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+ defm VFNMSUB213PSZ : avx512_fma3p_rm<0xAE, "vfnmsub213ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fnmsub, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+}
+let ExeDomain = SSEPackedDouble in {
+ defm VFMADD213PDZ : avx512_fma3p_rm<0xA8, "vfmadd213pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmadd, v8f64>, EVEX_V512,
+ VEX_W, EVEX_CD8<64, CD8VF>;
+ defm VFMSUB213PDZ : avx512_fma3p_rm<0xAA, "vfmsub213pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmsub, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFMADDSUB213PDZ : avx512_fma3p_rm<0xA6, "vfmaddsub213pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFMSUBADD213PDZ : avx512_fma3p_rm<0xA7, "vfmsubadd213pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFNMADD213PDZ : avx512_fma3p_rm<0xAC, "vfnmadd213pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFNMSUB213PDZ : avx512_fma3p_rm<0xAE, "vfnmsub213pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+}
+
+let Constraints = "$src1 = $dst" in {
+multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr,
+ RegisterClass RC, X86MemOperand x86memop,
+ PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
+ string BrdcstStr, SDNode OpNode, ValueType OpVT> {
+ let mayLoad = 1 in
+ def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src3, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src3, $dst|$dst, $src3, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3)))]>;
+ def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src3, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
+ ", $src3, $dst|$dst, $src3, ${src2}", BrdcstStr, "}"),
+ [(set RC:$dst, (OpNode RC:$src1,
+ (OpVT (X86VBroadcast (scalar_mfrag addr:$src2))), RC:$src3))]>, EVEX_B;
+}
+} // Constraints = "$src1 = $dst"
+
+
+let ExeDomain = SSEPackedSingle in {
+ defm VFMADD132PSZ : avx512_fma3p_m132<0x98, "vfmadd132ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmadd, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+ defm VFMSUB132PSZ : avx512_fma3p_m132<0x9A, "vfmsub132ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmsub, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+ defm VFMADDSUB132PSZ : avx512_fma3p_m132<0x96, "vfmaddsub132ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmaddsub, v16f32>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+ defm VFMSUBADD132PSZ : avx512_fma3p_m132<0x97, "vfmsubadd132ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fmsubadd, v16f32>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+ defm VFNMADD132PSZ : avx512_fma3p_m132<0x9C, "vfnmadd132ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fnmadd, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+ defm VFNMSUB132PSZ : avx512_fma3p_m132<0x9E, "vfnmsub132ps", VR512, f512mem,
+ memopv16f32, f32mem, loadf32, "{1to16}",
+ X86Fnmsub, v16f32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+}
+let ExeDomain = SSEPackedDouble in {
+ defm VFMADD132PDZ : avx512_fma3p_m132<0x98, "vfmadd132pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmadd, v8f64>, EVEX_V512,
+ VEX_W, EVEX_CD8<64, CD8VF>;
+ defm VFMSUB132PDZ : avx512_fma3p_m132<0x9A, "vfmsub132pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmsub, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFMADDSUB132PDZ : avx512_fma3p_m132<0x96, "vfmaddsub132pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFMSUBADD132PDZ : avx512_fma3p_m132<0x97, "vfmsubadd132pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFNMADD132PDZ : avx512_fma3p_m132<0x9C, "vfnmadd132pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+ defm VFNMSUB132PDZ : avx512_fma3p_m132<0x9E, "vfnmsub132pd", VR512, f512mem,
+ memopv8f64, f64mem, loadf64, "{1to8}",
+ X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+}
+
+// Scalar FMA
+let Constraints = "$src1 = $dst" in {
+multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, ValueType OpVT,
+ X86MemOperand x86memop, Operand memop,
+ PatFrag mem_frag> {
+ let isCommutable = 1 in
+ def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
+ let mayLoad = 1 in
+ def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, f128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src2, RC:$src1,
+ (mem_frag addr:$src3))))]>;
+}
+
+} // Constraints = "$src1 = $dst"
+
+defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss{z}", X86Fmadd, FR32X,
+ f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
+defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd{z}", X86Fmadd, FR64X,
+ f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss{z}", X86Fmsub, FR32X,
+ f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
+defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd{z}", X86Fmsub, FR64X,
+ f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss{z}", X86Fnmadd, FR32X,
+ f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
+defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd{z}", X86Fnmadd, FR64X,
+ f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss{z}", X86Fnmsub, FR32X,
+ f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
+defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd{z}", X86Fnmsub, FR64X,
+ f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 Scalar convert from sign integer to float/double
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ X86MemOperand x86memop, string asm> {
+let neverHasSideEffects = 1 in {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
+ !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+ EVEX_4V;
+ let mayLoad = 1 in
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src),
+ !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+ EVEX_4V;
+} // neverHasSideEffects = 1
+}
+let Predicates = [HasAVX512] in {
+defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}{z}">,
+ XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}{z}">,
+ XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}{z}">,
+ XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}{z}">,
+ XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+
+def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
+ (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
+def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
+ (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
+def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
+ (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
+def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
+ (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
+
+def : Pat<(f32 (sint_to_fp GR32:$src)),
+ (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
+def : Pat<(f32 (sint_to_fp GR64:$src)),
+ (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
+def : Pat<(f64 (sint_to_fp GR32:$src)),
+ (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
+def : Pat<(f64 (sint_to_fp GR64:$src)),
+ (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
+
+defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}{z}">,
+ XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}{z}">,
+ XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}{z}">,
+ XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}{z}">,
+ XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+
+def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
+ (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
+def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
+ (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
+def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
+ (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
+def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
+ (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
+
+def : Pat<(f32 (uint_to_fp GR32:$src)),
+ (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
+def : Pat<(f32 (uint_to_fp GR64:$src)),
+ (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
+def : Pat<(f64 (uint_to_fp GR32:$src)),
+ (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
+def : Pat<(f64 (uint_to_fp GR64:$src)),
+ (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
+}
+
+//===----------------------------------------------------------------------===//
+// AVX-512 Scalar convert from float/double to integer
+//===----------------------------------------------------------------------===//
+multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
+ string asm> {
+let neverHasSideEffects = 1 in {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG;
+ let mayLoad = 1 in
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG;
+} // neverHasSideEffects = 1
+}
+let Predicates = [HasAVX512] in {
+// Convert float/double to signed/unsigned int 32/64
+defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
+ ssmem, sse_load_f32, "cvtss2si{z}">,
+ XS, EVEX_CD8<32, CD8VT1>;
+defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
+ ssmem, sse_load_f32, "cvtss2si{z}">,
+ XS, VEX_W, EVEX_CD8<32, CD8VT1>;
+defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
+ ssmem, sse_load_f32, "cvtss2usi{z}">,
+ XS, EVEX_CD8<32, CD8VT1>;
+defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
+ int_x86_avx512_cvtss2usi64, ssmem,
+ sse_load_f32, "cvtss2usi{z}">, XS, VEX_W,
+ EVEX_CD8<32, CD8VT1>;
+defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
+ sdmem, sse_load_f64, "cvtsd2si{z}">,
+ XD, EVEX_CD8<64, CD8VT1>;
+defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
+ sdmem, sse_load_f64, "cvtsd2si{z}">,
+ XD, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
+ sdmem, sse_load_f64, "cvtsd2usi{z}">,
+ XD, EVEX_CD8<64, CD8VT1>;
+defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
+ int_x86_avx512_cvtsd2usi64, sdmem,
+ sse_load_f64, "cvtsd2usi{z}">, XD, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+
+defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}{z}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V;
+defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}{z}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
+defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}{z}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V;
+defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}{z}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
+
+defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}{z}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V;
+defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}{z}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
+defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}{z}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V;
+defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}{z}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
+
+// Convert float/double to signed/unsigned int 32/64 with truncation
+defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
+ ssmem, sse_load_f32, "cvttss2si{z}">,
+ XS, EVEX_CD8<32, CD8VT1>;
+defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
+ int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
+ "cvttss2si{z}">, XS, VEX_W,
+ EVEX_CD8<32, CD8VT1>;
+defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
+ sdmem, sse_load_f64, "cvttsd2si{z}">, XD,
+ EVEX_CD8<64, CD8VT1>;
+defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
+ int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
+ "cvttsd2si{z}">, XD, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
+ int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
+ "cvttss2si{z}">, XS, EVEX_CD8<32, CD8VT1>;
+defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
+ int_x86_avx512_cvttss2usi64, ssmem,
+ sse_load_f32, "cvttss2usi{z}">, XS, VEX_W,
+ EVEX_CD8<32, CD8VT1>;
+defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
+ int_x86_avx512_cvttsd2usi,
+ sdmem, sse_load_f64, "cvttsd2usi{z}">, XD,
+ EVEX_CD8<64, CD8VT1>;
+defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
+ int_x86_avx512_cvttsd2usi64, sdmem,
+ sse_load_f64, "cvttsd2usi{z}">, XD, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+}
+
+multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
+}
+
+defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
+ loadf32, "cvttss2si{z}">, XS,
+ EVEX_CD8<32, CD8VT1>;
+defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
+ loadf32, "cvttss2usi{z}">, XS,
+ EVEX_CD8<32, CD8VT1>;
+defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
+ loadf32, "cvttss2si{z}">, XS, VEX_W,
+ EVEX_CD8<32, CD8VT1>;
+defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
+ loadf32, "cvttss2usi{z}">, XS, VEX_W,
+ EVEX_CD8<32, CD8VT1>;
+defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
+ loadf64, "cvttsd2si{z}">, XD,
+ EVEX_CD8<64, CD8VT1>;
+defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
+ loadf64, "cvttsd2usi{z}">, XD,
+ EVEX_CD8<64, CD8VT1>;
+defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
+ loadf64, "cvttsd2si{z}">, XD, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
+ loadf64, "cvttsd2usi{z}">, XD, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+//===----------------------------------------------------------------------===//
+// AVX-512 Convert form float to double and back
+//===----------------------------------------------------------------------===//
+let neverHasSideEffects = 1 in {
+def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
+ (ins FR32X:$src1, FR32X:$src2),
+ "vcvtss2sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
+let mayLoad = 1 in
+def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
+ (ins FR32X:$src1, f32mem:$src2),
+ "vcvtss2sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
+ EVEX_CD8<32, CD8VT1>;
+
+// Convert scalar double to scalar single
+def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
+ (ins FR64X:$src1, FR64X:$src2),
+ "vcvtsd2ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
+let mayLoad = 1 in
+def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
+ (ins FR64X:$src1, f64mem:$src2),
+ "vcvtsd2ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX_4V, VEX_LIG, VEX_W,
+ Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
+}
+
+def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>,
+ Requires<[HasAVX512]>;
+def : Pat<(fextend (loadf32 addr:$src)),
+ (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>;
+
+def : Pat<(extloadf32 addr:$src),
+ (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[HasAVX512, OptForSize]>;
+
+def : Pat<(extloadf32 addr:$src),
+ (VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
+ Requires<[HasAVX512, OptForSpeed]>;
+
+def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
+ Requires<[HasAVX512]>;
+
+multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
+ RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
+ X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
+ Domain d> {
+let neverHasSideEffects = 1 in {
+ def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
+ let mayLoad = 1 in
+ def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
+} // neverHasSideEffects = 1
+}
+
+defm VCVTPD2PSZ : avx512_vcvt_fp<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
+ memopv8f64, f512mem, v8f32, v8f64,
+ SSEPackedSingle>, EVEX_V512, VEX_W, OpSize,
+ EVEX_CD8<64, CD8VF>;
+
+defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
+ memopv4f64, f256mem, v8f64, v8f32,
+ SSEPackedDouble>, EVEX_V512, EVEX_CD8<32, CD8VH>;
+def : Pat<(v8f64 (extloadv8f32 addr:$src)),
+ (VCVTPS2PDZrm addr:$src)>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 Vector convert from sign integer to float/double
+//===----------------------------------------------------------------------===//
+
+defm VCVTDQ2PSZ : avx512_vcvt_fp<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
+ memopv8i64, i512mem, v16f32, v16i32,
+ SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
+ memopv4i64, i256mem, v8f64, v8i32,
+ SSEPackedDouble>, EVEX_V512, XS,
+ EVEX_CD8<32, CD8VH>;
+
+defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
+ memopv16f32, f512mem, v16i32, v16f32,
+ SSEPackedSingle>, EVEX_V512, XS,
+ EVEX_CD8<32, CD8VF>;
+
+defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
+ memopv8f64, f512mem, v8i32, v8f64,
+ SSEPackedDouble>, EVEX_V512, OpSize, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+
+defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
+ memopv16f32, f512mem, v16i32, v16f32,
+ SSEPackedSingle>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+
+defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
+ memopv8f64, f512mem, v8i32, v8f64,
+ SSEPackedDouble>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+
+defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
+ memopv4i64, f256mem, v8f64, v8i32,
+ SSEPackedDouble>, EVEX_V512, XS,
+ EVEX_CD8<32, CD8VH>;
+
+defm VCVTUDQ2PSZ : avx512_vcvt_fp<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
+ memopv16i32, f512mem, v16f32, v16i32,
+ SSEPackedSingle>, EVEX_V512, XD,
+ EVEX_CD8<32, CD8VF>;
+
+def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
+ (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
+ (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
+
+
+def : Pat<(int_x86_avx512_cvtdq2_ps_512 VR512:$src),
+ (VCVTDQ2PSZrr VR512:$src)>;
+def : Pat<(int_x86_avx512_cvtdq2_ps_512 (bitconvert (memopv8i64 addr:$src))),
+ (VCVTDQ2PSZrm addr:$src)>;
+
+def VCVTPS2DQZrr : AVX512BI<0x5B, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ "vcvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR512:$dst,
+ (int_x86_avx512_cvt_ps2dq_512 VR512:$src))],
+ IIC_SSE_CVT_PS_RR>, EVEX, EVEX_V512;
+def VCVTPS2DQZrm : AVX512BI<0x5B, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ "vcvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR512:$dst,
+ (int_x86_avx512_cvt_ps2dq_512 (memopv16f32 addr:$src)))],
+ IIC_SSE_CVT_PS_RM>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+
+let Predicates = [HasAVX512] in {
+ def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
+ (VCVTPD2PSZrm addr:$src)>;
+ def : Pat<(v8f64 (extloadv8f32 addr:$src)),
+ (VCVTPS2PDZrm addr:$src)>;
+}
+
+//===----------------------------------------------------------------------===//
+// Half precision conversion instructions
+//===----------------------------------------------------------------------===//
+multiclass avx512_f16c_ph2ps<RegisterClass destRC, RegisterClass srcRC,
+ X86MemOperand x86memop, Intrinsic Int> {
+ def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
+ "vcvtph2ps\t{$src, $dst|$dst, $src}",
+ [(set destRC:$dst, (Int srcRC:$src))]>, EVEX;
+ let neverHasSideEffects = 1, mayLoad = 1 in
+ def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
+ "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
+}
+
+multiclass avx512_f16c_ps2ph<RegisterClass destRC, RegisterClass srcRC,
+ X86MemOperand x86memop, Intrinsic Int> {
+ def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
+ (ins srcRC:$src1, i32i8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set destRC:$dst, (Int srcRC:$src1, imm:$src2))]>, EVEX;
+ let neverHasSideEffects = 1, mayStore = 1 in
+ def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
+ (ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
+}
+
+defm VCVTPH2PSZ : avx512_f16c_ph2ps<VR512, VR256X, f256mem,
+ int_x86_avx512_vcvtph2ps_512>, EVEX_V512,
+ EVEX_CD8<32, CD8VH>;
+defm VCVTPS2PHZ : avx512_f16c_ps2ph<VR256X, VR512, f256mem,
+ int_x86_avx512_vcvtps2ph_512>, EVEX_V512,
+ EVEX_CD8<32, CD8VH>;
+
+let Defs = [EFLAGS], Predicates = [HasAVX512] in {
+ defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
+ "ucomiss{z}">, TB, EVEX, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+ defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
+ "ucomisd{z}">, TB, OpSize, EVEX,
+ VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+ let Pattern = []<dag> in {
+ defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
+ "comiss{z}">, TB, EVEX, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+ defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
+ "comisd{z}">, TB, OpSize, EVEX,
+ VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+ }
+ defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
+ load, "ucomiss">, TB, EVEX, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+ defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
+ load, "ucomisd">, TB, OpSize, EVEX,
+ VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+ defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
+ load, "comiss">, TB, EVEX, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+ defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
+ load, "comisd">, TB, OpSize, EVEX,
+ VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+}
+
+/// avx512_unop_p - AVX-512 unops in packed form.
+multiclass avx512_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ def PSZr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr,
+ "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (v16f32 (OpNode VR512:$src)))]>,
+ EVEX, EVEX_V512;
+ def PSZm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr,
+ "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (OpNode (memopv16f32 addr:$src)))]>,
+ EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ def PDZr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr,
+ "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (v8f64 (OpNode VR512:$src)))]>,
+ EVEX, EVEX_V512, VEX_W;
+ def PDZm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ !strconcat(OpcodeStr,
+ "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (OpNode (memopv16f32 addr:$src)))]>,
+ EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+}
+
+/// avx512_fp_unop_p_int - AVX-512 intrinsics unops in packed forms.
+multiclass avx512_fp_unop_p_int<bits<8> opc, string OpcodeStr,
+ Intrinsic V16F32Int, Intrinsic V8F64Int> {
+ def PSZr_Int : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr,
+ "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (V16F32Int VR512:$src))]>,
+ EVEX, EVEX_V512;
+ def PSZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ !strconcat(OpcodeStr,
+ "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst,
+ (V16F32Int (memopv16f32 addr:$src)))]>, EVEX,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+ def PDZr_Int : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr,
+ "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (V8F64Int VR512:$src))]>,
+ EVEX, EVEX_V512, VEX_W;
+ def PDZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ !strconcat(OpcodeStr,
+ "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst,
+ (V8F64Int (memopv8f64 addr:$src)))]>,
+ EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+}
+
+/// avx512_fp_unop_s - AVX-512 unops in scalar form.
+multiclass avx512_fp_unop_s<bits<8> opc, string OpcodeStr> {
+ let hasSideEffects = 0 in {
+ def SSZr : AVX5128I<opc, MRMSrcReg, (outs FR32X:$dst),
+ (ins FR32X:$src1, FR32X:$src2),
+ !strconcat(OpcodeStr,
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V;
+ let mayLoad = 1 in {
+ def SSZm : AVX5128I<opc, MRMSrcMem, (outs FR32X:$dst),
+ (ins FR32X:$src1, f32mem:$src2),
+ !strconcat(OpcodeStr,
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+ def SSZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR128X:$dst),
+ (ins VR128X:$src1, ssmem:$src2),
+ !strconcat(OpcodeStr,
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+ }
+ def SDZr : AVX5128I<opc, MRMSrcReg, (outs FR64X:$dst),
+ (ins FR64X:$src1, FR64X:$src2),
+ !strconcat(OpcodeStr,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ EVEX_4V, VEX_W;
+ let mayLoad = 1 in {
+ def SDZm : AVX5128I<opc, MRMSrcMem, (outs FR64X:$dst),
+ (ins FR64X:$src1, f64mem:$src2),
+ !strconcat(OpcodeStr,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
+ def SDZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR128X:$dst),
+ (ins VR128X:$src1, sdmem:$src2),
+ !strconcat(OpcodeStr,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
+ }
+}
+}
+
+defm VRCP14 : avx512_fp_unop_s<0x4D, "vrcp14">,
+ avx512_fp_unop_p<0x4C, "vrcp14", X86frcp>,
+ avx512_fp_unop_p_int<0x4C, "vrcp14",
+ int_x86_avx512_rcp14_ps_512, int_x86_avx512_rcp14_pd_512>;
+
+defm VRSQRT14 : avx512_fp_unop_s<0x4F, "vrsqrt14">,
+ avx512_fp_unop_p<0x4E, "vrsqrt14", X86frsqrt>,
+ avx512_fp_unop_p_int<0x4E, "vrsqrt14",
+ int_x86_avx512_rsqrt14_ps_512, int_x86_avx512_rsqrt14_pd_512>;
+
+def : Pat<(int_x86_avx512_rsqrt14_ss VR128X:$src),
+ (COPY_TO_REGCLASS (VRSQRT14SSZr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128X:$src, FR32)),
+ VR128X)>;
+def : Pat<(int_x86_avx512_rsqrt14_ss sse_load_f32:$src),
+ (VRSQRT14SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
+
+def : Pat<(int_x86_avx512_rcp14_ss VR128X:$src),
+ (COPY_TO_REGCLASS (VRCP14SSZr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128X:$src, FR32)),
+ VR128X)>;
+def : Pat<(int_x86_avx512_rcp14_ss sse_load_f32:$src),
+ (VRCP14SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
+
+let AddedComplexity = 20, Predicates = [HasERI] in {
+defm VRCP28 : avx512_fp_unop_s<0xCB, "vrcp28">,
+ avx512_fp_unop_p<0xCA, "vrcp28", X86frcp>,
+ avx512_fp_unop_p_int<0xCA, "vrcp28",
+ int_x86_avx512_rcp28_ps_512, int_x86_avx512_rcp28_pd_512>;
+
+defm VRSQRT28 : avx512_fp_unop_s<0xCD, "vrsqrt28">,
+ avx512_fp_unop_p<0xCC, "vrsqrt28", X86frsqrt>,
+ avx512_fp_unop_p_int<0xCC, "vrsqrt28",
+ int_x86_avx512_rsqrt28_ps_512, int_x86_avx512_rsqrt28_pd_512>;
+}
+
+let Predicates = [HasERI] in {
+ def : Pat<(int_x86_avx512_rsqrt28_ss VR128X:$src),
+ (COPY_TO_REGCLASS (VRSQRT28SSZr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128X:$src, FR32)),
+ VR128X)>;
+ def : Pat<(int_x86_avx512_rsqrt28_ss sse_load_f32:$src),
+ (VRSQRT28SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
+
+ def : Pat<(int_x86_avx512_rcp28_ss VR128X:$src),
+ (COPY_TO_REGCLASS (VRCP28SSZr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128X:$src, FR32)),
+ VR128X)>;
+ def : Pat<(int_x86_avx512_rcp28_ss sse_load_f32:$src),
+ (VRCP28SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
+}
+multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ Intrinsic V16F32Int, Intrinsic V8F64Int,
+ OpndItins itins_s, OpndItins itins_d> {
+ def PSZrr :AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (v16f32 (OpNode VR512:$src)))], itins_s.rr>,
+ EVEX, EVEX_V512;
+
+ let mayLoad = 1 in
+ def PSZrm : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst,
+ (OpNode (v16f32 (bitconvert (memopv16f32 addr:$src)))))],
+ itins_s.rm>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+ def PDZrr : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (v8f64 (OpNode VR512:$src)))], itins_d.rr>,
+ EVEX, EVEX_V512;
+
+ let mayLoad = 1 in
+ def PDZrm : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (OpNode
+ (v8f64 (bitconvert (memopv16f32 addr:$src)))))],
+ itins_d.rm>, EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+ def PSZr_Int : AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr,
+ "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (V16F32Int VR512:$src))]>,
+ EVEX, EVEX_V512;
+ def PSZm_Int : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst,
+ (V16F32Int (memopv16f32 addr:$src)))]>, EVEX,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+ def PDZr_Int : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (V8F64Int VR512:$src))]>,
+ EVEX, EVEX_V512, VEX_W;
+ def PDZm_Int : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
+ !strconcat(OpcodeStr,
+ "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR512:$dst, (V8F64Int (memopv8f64 addr:$src)))]>,
+ EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+}
+
+multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
+ Intrinsic F32Int, Intrinsic F64Int,
+ OpndItins itins_s, OpndItins itins_d> {
+ def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
+ (ins FR32X:$src1, FR32X:$src2),
+ !strconcat(OpcodeStr,
+ "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [], itins_s.rr>, XS, EVEX_4V;
+ def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, VR128X:$src2),
+ !strconcat(OpcodeStr,
+ "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128X:$dst,
+ (F32Int VR128X:$src1, VR128X:$src2))],
+ itins_s.rr>, XS, EVEX_4V;
+ let mayLoad = 1 in {
+ def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
+ (ins FR32X:$src1, f32mem:$src2),
+ !strconcat(OpcodeStr,
+ "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+ def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
+ (ins VR128X:$src1, ssmem:$src2),
+ !strconcat(OpcodeStr,
+ "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128X:$dst,
+ (F32Int VR128X:$src1, sse_load_f32:$src2))],
+ itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+ }
+ def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
+ (ins FR64X:$src1, FR64X:$src2),
+ !strconcat(OpcodeStr,
+ "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ XD, EVEX_4V, VEX_W;
+ def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, VR128X:$src2),
+ !strconcat(OpcodeStr,
+ "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128X:$dst,
+ (F64Int VR128X:$src1, VR128X:$src2))],
+ itins_s.rr>, XD, EVEX_4V, VEX_W;
+ let mayLoad = 1 in {
+ def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
+ (ins FR64X:$src1, f64mem:$src2),
+ !strconcat(OpcodeStr,
+ "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
+ def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
+ (ins VR128X:$src1, sdmem:$src2),
+ !strconcat(OpcodeStr,
+ "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128X:$dst,
+ (F64Int VR128X:$src1, sse_load_f64:$src2))]>,
+ XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
+ }
+}
+
+
+defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
+ int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
+ SSE_SQRTSS, SSE_SQRTSD>,
+ avx512_sqrt_packed<0x51, "vsqrt", fsqrt,
+ int_x86_avx512_sqrt_ps_512, int_x86_avx512_sqrt_pd_512,
+ SSE_SQRTPS, SSE_SQRTPD>;
+
+let Predicates = [HasAVX512] in {
+ def : Pat<(f32 (fsqrt FR32X:$src)),
+ (VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
+ def : Pat<(f32 (fsqrt (load addr:$src))),
+ (VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[OptForSize]>;
+ def : Pat<(f64 (fsqrt FR64X:$src)),
+ (VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>;
+ def : Pat<(f64 (fsqrt (load addr:$src))),
+ (VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[OptForSize]>;
+
+ def : Pat<(f32 (X86frsqrt FR32X:$src)),
+ (VRSQRT14SSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
+ def : Pat<(f32 (X86frsqrt (load addr:$src))),
+ (VRSQRT14SSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[OptForSize]>;
+
+ def : Pat<(f32 (X86frcp FR32X:$src)),
+ (VRCP14SSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
+ def : Pat<(f32 (X86frcp (load addr:$src))),
+ (VRCP14SSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[OptForSize]>;
+
+ def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
+ (COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128X:$src, FR32)),
+ VR128X)>;
+ def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
+ (VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
+
+ def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src),
+ (COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128X:$src, FR64)),
+ VR128X)>;
+ def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
+ (VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
+}
+
+
+multiclass avx512_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
+ X86MemOperand x86memop, RegisterClass RC,
+ PatFrag mem_frag32, PatFrag mem_frag64,
+ Intrinsic V4F32Int, Intrinsic V2F64Int,
+ CD8VForm VForm> {
+let ExeDomain = SSEPackedSingle in {
+ // Intrinsic operation, reg.
+ // Vector intrinsic operation, reg
+ def PSr : AVX512AIi8<opcps, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>;
+
+ // Vector intrinsic operation, mem
+ def PSm : AVX512AIi8<opcps, MRMSrcMem,
+ (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
+ EVEX_CD8<32, VForm>;
+} // ExeDomain = SSEPackedSingle
+
+let ExeDomain = SSEPackedDouble in {
+ // Vector intrinsic operation, reg
+ def PDr : AVX512AIi8<opcpd, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>;
+
+ // Vector intrinsic operation, mem
+ def PDm : AVX512AIi8<opcpd, MRMSrcMem,
+ (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
+ EVEX_CD8<64, VForm>;
+} // ExeDomain = SSEPackedDouble
+}
+
+multiclass avx512_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
+ string OpcodeStr,
+ Intrinsic F32Int,
+ Intrinsic F64Int> {
+let ExeDomain = GenericDomain in {
+ // Operation, reg.
+ let hasSideEffects = 0 in
+ def SSr : AVX512AIi8<opcss, MRMSrcReg,
+ (outs FR32X:$dst), (ins FR32X:$src1, FR32X:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>;
+
+ // Intrinsic operation, reg.
+ def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
+ (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128X:$dst, (F32Int VR128X:$src1, VR128X:$src2, imm:$src3))]>;
+
+ // Intrinsic operation, mem.
+ def SSm : AVX512AIi8<opcss, MRMSrcMem, (outs VR128X:$dst),
+ (ins VR128X:$src1, ssmem:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128X:$dst, (F32Int VR128X:$src1,
+ sse_load_f32:$src2, imm:$src3))]>,
+ EVEX_CD8<32, CD8VT1>;
+
+ // Operation, reg.
+ let hasSideEffects = 0 in
+ def SDr : AVX512AIi8<opcsd, MRMSrcReg,
+ (outs FR64X:$dst), (ins FR64X:$src1, FR64X:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, VEX_W;
+
+ // Intrinsic operation, reg.
+ def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
+ (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128X:$dst, (F64Int VR128X:$src1, VR128X:$src2, imm:$src3))]>,
+ VEX_W;
+
+ // Intrinsic operation, mem.
+ def SDm : AVX512AIi8<opcsd, MRMSrcMem,
+ (outs VR128X:$dst), (ins VR128X:$src1, sdmem:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128X:$dst,
+ (F64Int VR128X:$src1, sse_load_f64:$src2, imm:$src3))]>,
+ VEX_W, EVEX_CD8<64, CD8VT1>;
+} // ExeDomain = GenericDomain
+}
+
+let Predicates = [HasAVX512] in {
+ defm VRNDSCALE : avx512_fp_binop_rm<0x0A, 0x0B, "vrndscale",
+ int_x86_avx512_rndscale_ss,
+ int_x86_avx512_rndscale_sd>, EVEX_4V;
+
+ defm VRNDSCALEZ : avx512_fp_unop_rm<0x08, 0x09, "vrndscale", f256mem, VR512,
+ memopv16f32, memopv8f64,
+ int_x86_avx512_rndscale_ps_512,
+ int_x86_avx512_rndscale_pd_512, CD8VF>,
+ EVEX, EVEX_V512;
+}
+
+def : Pat<(ffloor FR32X:$src),
+ (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
+def : Pat<(f64 (ffloor FR64X:$src)),
+ (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x1))>;
+def : Pat<(f32 (fnearbyint FR32X:$src)),
+ (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0xC))>;
+def : Pat<(f64 (fnearbyint FR64X:$src)),
+ (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0xC))>;
+def : Pat<(f32 (fceil FR32X:$src)),
+ (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x2))>;
+def : Pat<(f64 (fceil FR64X:$src)),
+ (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x2))>;
+def : Pat<(f32 (frint FR32X:$src)),
+ (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x4))>;
+def : Pat<(f64 (frint FR64X:$src)),
+ (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x4))>;
+def : Pat<(f32 (ftrunc FR32X:$src)),
+ (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x3))>;
+def : Pat<(f64 (ftrunc FR64X:$src)),
+ (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
+
+def : Pat<(v16f32 (ffloor VR512:$src)),
+ (VRNDSCALEZPSr VR512:$src, (i32 0x1))>;
+def : Pat<(v16f32 (fnearbyint VR512:$src)),
+ (VRNDSCALEZPSr VR512:$src, (i32 0xC))>;
+def : Pat<(v16f32 (fceil VR512:$src)),
+ (VRNDSCALEZPSr VR512:$src, (i32 0x2))>;
+def : Pat<(v16f32 (frint VR512:$src)),
+ (VRNDSCALEZPSr VR512:$src, (i32 0x4))>;
+def : Pat<(v16f32 (ftrunc VR512:$src)),
+ (VRNDSCALEZPSr VR512:$src, (i32 0x3))>;
+
+def : Pat<(v8f64 (ffloor VR512:$src)),
+ (VRNDSCALEZPDr VR512:$src, (i32 0x1))>;
+def : Pat<(v8f64 (fnearbyint VR512:$src)),
+ (VRNDSCALEZPDr VR512:$src, (i32 0xC))>;
+def : Pat<(v8f64 (fceil VR512:$src)),
+ (VRNDSCALEZPDr VR512:$src, (i32 0x2))>;
+def : Pat<(v8f64 (frint VR512:$src)),
+ (VRNDSCALEZPDr VR512:$src, (i32 0x4))>;
+def : Pat<(v8f64 (ftrunc VR512:$src)),
+ (VRNDSCALEZPDr VR512:$src, (i32 0x3))>;
+
+//-------------------------------------------------
+// Integer truncate and extend operations
+//-------------------------------------------------
+
+multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
+ RegisterClass dstRC, RegisterClass srcRC,
+ RegisterClass KRC, X86MemOperand x86memop> {
+ def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
+ (ins srcRC:$src),
+ !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+
+ def krr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
+ (ins KRC:$mask, srcRC:$src),
+ !strconcat(OpcodeStr,
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_KZ;
+
+ def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+}
+defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
+ i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
+defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM,
+ i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
+defm VPMOVUSQB : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM,
+ i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
+defm VPMOVQW : avx512_trunc_sat<0x34, "vpmovqw", VR128X, VR512, VK8WM,
+ i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
+defm VPMOVSQW : avx512_trunc_sat<0x24, "vpmovsqw", VR128X, VR512, VK8WM,
+ i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
+defm VPMOVUSQW : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM,
+ i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
+defm VPMOVQD : avx512_trunc_sat<0x35, "vpmovqd", VR256X, VR512, VK8WM,
+ i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
+defm VPMOVSQD : avx512_trunc_sat<0x25, "vpmovsqd", VR256X, VR512, VK8WM,
+ i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
+defm VPMOVUSQD : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM,
+ i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
+defm VPMOVDW : avx512_trunc_sat<0x33, "vpmovdw", VR256X, VR512, VK16WM,
+ i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
+defm VPMOVSDW : avx512_trunc_sat<0x23, "vpmovsdw", VR256X, VR512, VK16WM,
+ i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
+defm VPMOVUSDW : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM,
+ i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
+defm VPMOVDB : avx512_trunc_sat<0x31, "vpmovdb", VR128X, VR512, VK16WM,
+ i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
+defm VPMOVSDB : avx512_trunc_sat<0x21, "vpmovsdb", VR128X, VR512, VK16WM,
+ i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
+defm VPMOVUSDB : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM,
+ i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
+
+def : Pat<(v16i8 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQBrr VR512:$src)>;
+def : Pat<(v8i16 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQWrr VR512:$src)>;
+def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr VR512:$src)>;
+def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>;
+def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>;
+
+def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
+ (VPMOVDBkrr VK16WM:$mask, VR512:$src)>;
+def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
+ (VPMOVDWkrr VK16WM:$mask, VR512:$src)>;
+def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
+ (VPMOVQWkrr VK8WM:$mask, VR512:$src)>;
+def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
+ (VPMOVQDkrr VK8WM:$mask, VR512:$src)>;
+
+
+multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass DstRC,
+ RegisterClass SrcRC, SDNode OpNode, PatFrag mem_frag,
+ X86MemOperand x86memop, ValueType OpVT, ValueType InVT> {
+
+ def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
+ (ins SrcRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
+ def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins x86memop:$src),
+ !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
+ EVEX;
+}
+
+defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VR512, VR128X, X86vzext,
+ memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
+ EVEX_CD8<8, CD8VQ>;
+defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VR512, VR128X, X86vzext,
+ memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
+ EVEX_CD8<8, CD8VO>;
+defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VR512, VR256X, X86vzext,
+ memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
+ EVEX_CD8<16, CD8VH>;
+defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VR512, VR128X, X86vzext,
+ memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
+ EVEX_CD8<16, CD8VQ>;
+defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VR512, VR256X, X86vzext,
+ memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VH>;
+
+defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VR512, VR128X, X86vsext,
+ memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
+ EVEX_CD8<8, CD8VQ>;
+defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VR512, VR128X, X86vsext,
+ memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
+ EVEX_CD8<8, CD8VO>;
+defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VR512, VR256X, X86vsext,
+ memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
+ EVEX_CD8<16, CD8VH>;
+defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VR512, VR128X, X86vsext,
+ memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
+ EVEX_CD8<16, CD8VQ>;
+defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VR512, VR256X, X86vsext,
+ memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VH>;
+
+//===----------------------------------------------------------------------===//
+// GATHER - SCATTER Operations
+
+multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ RegisterClass RC, X86MemOperand memop> {
+let mayLoad = 1,
+ Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
+ (ins RC:$src1, KRC:$mask, memop:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ []>, EVEX, EVEX_K;
+}
+defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512, vz64mem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X, vz64mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ RegisterClass RC, X86MemOperand memop> {
+let mayStore = 1, Constraints = "$mask = $mask_wb" in
+ def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
+ (ins memop:$dst, KRC:$mask, RC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ []>, EVEX, EVEX_K;
+}
+
+defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+//===----------------------------------------------------------------------===//
+// VSHUFPS - VSHUFPD Operations
+
+multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
+ ValueType vt, string OpcodeStr, PatFrag mem_frag,
+ Domain d> {
+ def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2, i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
+ (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
+ EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
+ def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
+ (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
+ EVEX_4V, Sched<[WriteShuffle]>;
+}
+
+defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
+ SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
+ SSEPackedDouble>, OpSize, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
+ (VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
+def : Pat<(v16i32 (X86Shufp VR512:$src1,
+ (memopv16i32 addr:$src2), (i8 imm:$imm))),
+ (VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
+
+def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
+ (VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
+def : Pat<(v8i64 (X86Shufp VR512:$src1,
+ (memopv8i64 addr:$src2), (i8 imm:$imm))),
+ (VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
+
+multiclass avx512_alignr<string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop> {
+ def rri : AVX512AIi8<0x03, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, EVEX_4V;
+ let mayLoad = 1 in
+ def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2, i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, EVEX_4V;
+}
+defm VALIGND : avx512_alignr<"valignd", VR512, i512mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VALIGNQ : avx512_alignr<"valignq", VR512, i512mem>,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+def : Pat<(v16f32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
+ (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
+def : Pat<(v8f64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
+ (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
+def : Pat<(v16i32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
+ (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
+def : Pat<(v8i64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
+ (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
+
+multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop> {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
+ EVEX;
+ def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
+ (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
+ EVEX;
+}
+
+defm VPABSD : avx512_vpabs<0x1E, "vpabsd", VR512, i512mem>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPABSQ : avx512_vpabs<0x1F, "vpabsq", VR512, i512mem>, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+
+multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
+ RegisterClass RC, RegisterClass KRC, PatFrag memop_frag,
+ X86MemOperand x86memop, PatFrag scalar_mfrag,
+ X86MemOperand x86scalar_mop, string BrdcstStr,
+ Intrinsic Int, Intrinsic maskInt, Intrinsic maskzInt> {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src),
+ !strconcat(OpcodeStr, "\t{$src, ${dst} |${dst}, $src}"),
+ [(set RC:$dst, (Int RC:$src))]>, EVEX;
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, ${dst}|${dst}, $src}"),
+ [(set RC:$dst, (Int (memop_frag addr:$src)))]>, EVEX;
+ def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins x86scalar_mop:$src),
+ !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
+ ", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
+ []>, EVEX, EVEX_B;
+ def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src),
+ !strconcat(OpcodeStr,
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [(set RC:$dst, (maskzInt KRC:$mask, RC:$src))]>, EVEX, EVEX_KZ;
+ def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, x86memop:$src),
+ !strconcat(OpcodeStr,
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [(set RC:$dst, (maskzInt KRC:$mask, (memop_frag addr:$src)))]>,
+ EVEX, EVEX_KZ;
+ def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, x86scalar_mop:$src),
+ !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
+ ", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
+ BrdcstStr, "}"),
+ []>, EVEX, EVEX_KZ, EVEX_B;
+
+ let Constraints = "$src1 = $dst" in {
+ def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ [(set RC:$dst, (maskInt RC:$src1, KRC:$mask, RC:$src2))]>, EVEX, EVEX_K;
+ def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ [(set RC:$dst, (maskInt RC:$src1, KRC:$mask, (memop_frag addr:$src2)))]>, EVEX, EVEX_K;
+ def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
+ ", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
+ []>, EVEX, EVEX_K, EVEX_B;
+ }
+}
+
+let Predicates = [HasCDI] in {
+defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ int_x86_avx512_conflict_d_512,
+ int_x86_avx512_conflict_d_mask_512,
+ int_x86_avx512_conflict_d_maskz_512>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ int_x86_avx512_conflict_q_512,
+ int_x86_avx512_conflict_q_mask_512,
+ int_x86_avx512_conflict_q_maskz_512>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+}
diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td
index 225e9720da0c..7fc9c443373d 100644
--- a/lib/Target/X86/X86InstrArithmetic.td
+++ b/lib/Target/X86/X86InstrArithmetic.td
@@ -294,7 +294,7 @@ def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
// unsigned division/remainder
let hasSideEffects = 1 in { // so that we don't speculatively execute
let SchedRW = [WriteIDiv] in {
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+let Defs = [AL,AH,EFLAGS], Uses = [AX] in
def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
"div{b}\t$src", [], IIC_DIV8_REG>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
@@ -310,7 +310,7 @@ def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
} // SchedRW
let mayLoad = 1 in {
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+let Defs = [AL,AH,EFLAGS], Uses = [AX] in
def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
"div{b}\t$src", [], IIC_DIV8_MEM>,
SchedLoadReg<WriteIDivLd>;
@@ -331,7 +331,7 @@ def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
// Signed division/remainder.
let SchedRW = [WriteIDiv] in {
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+let Defs = [AL,AH,EFLAGS], Uses = [AX] in
def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
"idiv{b}\t$src", [], IIC_IDIV8>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
@@ -347,7 +347,7 @@ def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
} // SchedRW
let mayLoad = 1 in {
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+let Defs = [AL,AH,EFLAGS], Uses = [AX] in
def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
"idiv{b}\t$src", [], IIC_IDIV8>,
SchedLoadReg<WriteIDivLd>;
@@ -497,6 +497,21 @@ def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
Requires<[In64BitMode]>;
} // isConvertibleToThreeAddress = 1, CodeSize = 2
+let isCodeGenOnly = 1, CodeSize = 2 in {
+def INC32_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
+ "inc{w}\t$dst", [], IIC_UNARY_REG>,
+ OpSize, Requires<[In32BitMode]>;
+def INC32_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
+ "inc{l}\t$dst", [], IIC_UNARY_REG>,
+ Requires<[In32BitMode]>;
+def DEC32_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
+ "dec{w}\t$dst", [], IIC_UNARY_REG>,
+ OpSize, Requires<[In32BitMode]>;
+def DEC32_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
+ "dec{l}\t$dst", [], IIC_UNARY_REG>,
+ Requires<[In32BitMode]>;
+} // isCodeGenOnly = 1, CodeSize = 2
+
} // Constraints = "$src1 = $dst", SchedRW
let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
@@ -578,7 +593,6 @@ let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
} // CodeSize = 2, SchedRW
} // Defs = [EFLAGS]
-
/// X86TypeInfo - This is a bunch of information that describes relevant X86
/// information about value types. For example, it can tell you what the
/// register class and preferred load to use.
@@ -726,20 +740,25 @@ class BinOpRR_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2,
- EFLAGS))], IIC_BIN_NONMEM>;
+ EFLAGS))], IIC_BIN_CARRY_NONMEM>;
// BinOpRR_Rev - Instructions like "add reg, reg, reg" (reversed encoding).
-class BinOpRR_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
+class BinOpRR_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ InstrItinClass itin = IIC_BIN_NONMEM>
: ITy<opcode, MRMSrcReg, typeinfo,
(outs typeinfo.RegClass:$dst),
(ins typeinfo.RegClass:$src1, typeinfo.RegClass:$src2),
- mnemonic, "{$src2, $dst|$dst, $src2}", [], IIC_BIN_NONMEM>,
+ mnemonic, "{$src2, $dst|$dst, $src2}", [], itin>,
Sched<[WriteALU]> {
// The disassembler should know about this, but not the asmparser.
let isCodeGenOnly = 1;
let hasSideEffects = 0;
}
+// BinOpRR_RDD_Rev - Instructions like "adc reg, reg, reg" (reversed encoding).
+class BinOpRR_RFF_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
+ : BinOpRR_Rev<opcode, mnemonic, typeinfo, IIC_BIN_CARRY_NONMEM>;
+
// BinOpRR_F_Rev - Instructions like "cmp reg, reg" (reversed encoding).
class BinOpRR_F_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
: ITy<opcode, MRMSrcReg, typeinfo, (outs),
@@ -753,10 +772,11 @@ class BinOpRR_F_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
// BinOpRM - Instructions like "add reg, reg, [mem]".
class BinOpRM<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- dag outlist, list<dag> pattern>
+ dag outlist, list<dag> pattern,
+ InstrItinClass itin = IIC_BIN_MEM>
: ITy<opcode, MRMSrcMem, typeinfo, outlist,
(ins typeinfo.RegClass:$src1, typeinfo.MemOperand:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", pattern, IIC_BIN_NONMEM>,
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern, itin>,
Sched<[WriteALULd, ReadAfterLd]>;
// BinOpRM_R - Instructions like "add reg, reg, [mem]".
@@ -786,14 +806,15 @@ class BinOpRM_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpRM<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, (typeinfo.LoadNode addr:$src2),
- EFLAGS))]>;
+ EFLAGS))], IIC_BIN_CARRY_MEM>;
// BinOpRI - Instructions like "add reg, reg, imm".
class BinOpRI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- Format f, dag outlist, list<dag> pattern>
+ Format f, dag outlist, list<dag> pattern,
+ InstrItinClass itin = IIC_BIN_NONMEM>
: ITy<opcode, f, typeinfo, outlist,
(ins typeinfo.RegClass:$src1, typeinfo.ImmOperand:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", pattern, IIC_BIN_NONMEM>,
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern, itin>,
Sched<[WriteALU]> {
let ImmT = typeinfo.ImmEncoding;
}
@@ -824,14 +845,15 @@ class BinOpRI_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpRI<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.ImmOperator:$src2,
- EFLAGS))]>;
+ EFLAGS))], IIC_BIN_CARRY_NONMEM>;
// BinOpRI8 - Instructions like "add reg, reg, imm8".
class BinOpRI8<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- Format f, dag outlist, list<dag> pattern>
+ Format f, dag outlist, list<dag> pattern,
+ InstrItinClass itin = IIC_BIN_NONMEM>
: ITy<opcode, f, typeinfo, outlist,
(ins typeinfo.RegClass:$src1, typeinfo.Imm8Operand:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", pattern, IIC_BIN_NONMEM>,
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern, itin>,
Sched<[WriteALU]> {
let ImmT = Imm8; // Always 8-bit immediate.
}
@@ -863,14 +885,14 @@ class BinOpRI8_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2,
- EFLAGS))]>;
+ EFLAGS))], IIC_BIN_CARRY_NONMEM>;
// BinOpMR - Instructions like "add [mem], reg".
class BinOpMR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- list<dag> pattern>
+ list<dag> pattern, InstrItinClass itin = IIC_BIN_MEM>
: ITy<opcode, MRMDestMem, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.RegClass:$src),
- mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM>,
+ mnemonic, "{$src, $dst|$dst, $src}", pattern, itin>,
Sched<[WriteALULd, WriteRMW]>;
// BinOpMR_RMW - Instructions like "add [mem], reg".
@@ -886,7 +908,7 @@ class BinOpMR_RMW_FF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpMR<opcode, mnemonic, typeinfo,
[(store (opnode (load addr:$dst), typeinfo.RegClass:$src, EFLAGS),
addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_BIN_CARRY_MEM>;
// BinOpMR_F - Instructions like "cmp [mem], reg".
class BinOpMR_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
@@ -896,10 +918,11 @@ class BinOpMR_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
// BinOpMI - Instructions like "add [mem], imm".
class BinOpMI<string mnemonic, X86TypeInfo typeinfo,
- Format f, list<dag> pattern, bits<8> opcode = 0x80>
+ Format f, list<dag> pattern, bits<8> opcode = 0x80,
+ InstrItinClass itin = IIC_BIN_MEM>
: ITy<opcode, f, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.ImmOperand:$src),
- mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM>,
+ mnemonic, "{$src, $dst|$dst, $src}", pattern, itin>,
Sched<[WriteALULd, WriteRMW]> {
let ImmT = typeinfo.ImmEncoding;
}
@@ -917,7 +940,7 @@ class BinOpMI_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
: BinOpMI<mnemonic, typeinfo, f,
[(store (opnode (typeinfo.VT (load addr:$dst)),
typeinfo.ImmOperator:$src, EFLAGS), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], 0x80, IIC_BIN_CARRY_MEM>;
// BinOpMI_F - Instructions like "cmp [mem], imm".
class BinOpMI_F<string mnemonic, X86TypeInfo typeinfo,
@@ -929,10 +952,11 @@ class BinOpMI_F<string mnemonic, X86TypeInfo typeinfo,
// BinOpMI8 - Instructions like "add [mem], imm8".
class BinOpMI8<string mnemonic, X86TypeInfo typeinfo,
- Format f, list<dag> pattern>
+ Format f, list<dag> pattern,
+ InstrItinClass itin = IIC_BIN_MEM>
: ITy<0x82, f, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.Imm8Operand:$src),
- mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM>,
+ mnemonic, "{$src, $dst|$dst, $src}", pattern, itin>,
Sched<[WriteALULd, WriteRMW]> {
let ImmT = Imm8; // Always 8-bit immediate.
}
@@ -951,7 +975,7 @@ class BinOpMI8_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
: BinOpMI8<mnemonic, typeinfo, f,
[(store (opnode (load addr:$dst),
typeinfo.Imm8Operator:$src, EFLAGS), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_BIN_CARRY_MEM>;
// BinOpMI8_F - Instructions like "cmp [mem], imm8".
class BinOpMI8_F<string mnemonic, X86TypeInfo typeinfo,
@@ -960,18 +984,28 @@ class BinOpMI8_F<string mnemonic, X86TypeInfo typeinfo,
[(set EFLAGS, (opnode (load addr:$dst),
typeinfo.Imm8Operator:$src))]>;
-// BinOpAI - Instructions like "add %eax, %eax, imm".
+// BinOpAI - Instructions like "add %eax, %eax, imm", that imp-def EFLAGS.
class BinOpAI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- Register areg, string operands>
+ Register areg, string operands,
+ InstrItinClass itin = IIC_BIN_NONMEM>
: ITy<opcode, RawFrm, typeinfo,
(outs), (ins typeinfo.ImmOperand:$src),
- mnemonic, operands, []>, Sched<[WriteALU]> {
+ mnemonic, operands, [], itin>, Sched<[WriteALU]> {
let ImmT = typeinfo.ImmEncoding;
let Uses = [areg];
- let Defs = [areg];
+ let Defs = [areg, EFLAGS];
let hasSideEffects = 0;
}
+// BinOpAI_FF - Instructions like "adc %eax, %eax, imm", that implicitly define
+// and use EFLAGS.
+class BinOpAI_FF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ Register areg, string operands>
+ : BinOpAI<opcode, mnemonic, typeinfo, areg, operands,
+ IIC_BIN_CARRY_NONMEM> {
+ let Uses = [areg, EFLAGS];
+}
+
/// ArithBinOp_RF - This is an arithmetic binary operator where the pattern is
/// defined with "(set GPR:$dst, EFLAGS, (...".
///
@@ -1030,16 +1064,16 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#16mi : BinOpMI_RMW<mnemonic, Xi16, opnode, MemMRM>;
def NAME#32mi : BinOpMI_RMW<mnemonic, Xi32, opnode, MemMRM>;
def NAME#64mi32 : BinOpMI_RMW<mnemonic, Xi64, opnode, MemMRM>;
-
- def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL,
- "{$src, %al|AL, $src}">;
- def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX,
- "{$src, %ax|AX, $src}">;
- def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX,
- "{$src, %eax|EAX, $src}">;
- def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX,
- "{$src, %rax|RAX, $src}">;
- }
+ } // Defs = [EFLAGS]
+
+ def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL,
+ "{$src, %al|al, $src}">;
+ def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX,
+ "{$src, %ax|ax, $src}">;
+ def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX,
+ "{$src, %eax|eax, $src}">;
+ def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX,
+ "{$src, %rax|rax, $src}">;
}
/// ArithBinOp_RFF - This is an arithmetic binary operator where the pattern is
@@ -1052,7 +1086,7 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
string mnemonic, Format RegMRM, Format MemMRM,
SDNode opnode, bit CommutableRR,
bit ConvertibleToThreeAddress> {
- let Defs = [EFLAGS] in {
+ let Uses = [EFLAGS], Defs = [EFLAGS] in {
let Constraints = "$src1 = $dst" in {
let isCommutable = CommutableRR,
isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
@@ -1062,10 +1096,10 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#64rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi64, opnode>;
} // isCommutable
- def NAME#8rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi8>;
- def NAME#16rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi16>;
- def NAME#32rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi32>;
- def NAME#64rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi64>;
+ def NAME#8rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi8>;
+ def NAME#16rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi16>;
+ def NAME#32rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi32>;
+ def NAME#64rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi64>;
def NAME#8rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi8 , opnode>;
def NAME#16rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi16, opnode>;
@@ -1101,16 +1135,16 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#16mi : BinOpMI_RMW_FF<mnemonic, Xi16, opnode, MemMRM>;
def NAME#32mi : BinOpMI_RMW_FF<mnemonic, Xi32, opnode, MemMRM>;
def NAME#64mi32 : BinOpMI_RMW_FF<mnemonic, Xi64, opnode, MemMRM>;
-
- def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL,
- "{$src, %al|AL, $src}">;
- def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX,
- "{$src, %ax|AX, $src}">;
- def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX,
- "{$src, %eax|EAX, $src}">;
- def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX,
- "{$src, %rax|RAX, $src}">;
- }
+ } // Uses = [EFLAGS], Defs = [EFLAGS]
+
+ def NAME#8i8 : BinOpAI_FF<BaseOpc4, mnemonic, Xi8 , AL,
+ "{$src, %al|al, $src}">;
+ def NAME#16i16 : BinOpAI_FF<BaseOpc4, mnemonic, Xi16, AX,
+ "{$src, %ax|ax, $src}">;
+ def NAME#32i32 : BinOpAI_FF<BaseOpc4, mnemonic, Xi32, EAX,
+ "{$src, %eax|eax, $src}">;
+ def NAME#64i32 : BinOpAI_FF<BaseOpc4, mnemonic, Xi64, RAX,
+ "{$src, %rax|rax, $src}">;
}
/// ArithBinOp_F - This is an arithmetic binary operator where the pattern is
@@ -1168,16 +1202,16 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#16mi : BinOpMI_F<mnemonic, Xi16, opnode, MemMRM>;
def NAME#32mi : BinOpMI_F<mnemonic, Xi32, opnode, MemMRM>;
def NAME#64mi32 : BinOpMI_F<mnemonic, Xi64, opnode, MemMRM>;
-
- def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL,
- "{$src, %al|AL, $src}">;
- def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX,
- "{$src, %ax|AX, $src}">;
- def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX,
- "{$src, %eax|EAX, $src}">;
- def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX,
- "{$src, %rax|RAX, $src}">;
- }
+ } // Defs = [EFLAGS]
+
+ def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL,
+ "{$src, %al|al, $src}">;
+ def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX,
+ "{$src, %ax|ax, $src}">;
+ def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX,
+ "{$src, %eax|eax, $src}">;
+ def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX,
+ "{$src, %rax|rax, $src}">;
}
@@ -1195,12 +1229,10 @@ defm SUB : ArithBinOp_RF<0x28, 0x2A, 0x2C, "sub", MRM5r, MRM5m,
}
// Arithmetic.
-let Uses = [EFLAGS] in {
- defm ADC : ArithBinOp_RFF<0x10, 0x12, 0x14, "adc", MRM2r, MRM2m, X86adc_flag,
- 1, 0>;
- defm SBB : ArithBinOp_RFF<0x18, 0x1A, 0x1C, "sbb", MRM3r, MRM3m, X86sbb_flag,
- 0, 0>;
-}
+defm ADC : ArithBinOp_RFF<0x10, 0x12, 0x14, "adc", MRM2r, MRM2m, X86adc_flag,
+ 1, 0>;
+defm SBB : ArithBinOp_RFF<0x18, 0x1A, 0x1C, "sbb", MRM3r, MRM3m, X86sbb_flag,
+ 0, 0>;
let isCompare = 1 in {
defm CMP : ArithBinOp_F<0x38, 0x3A, 0x3C, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>;
@@ -1215,44 +1247,46 @@ defm CMP : ArithBinOp_F<0x38, 0x3A, 0x3C, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>;
def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
(X86cmp (and_su node:$lhs, node:$rhs), 0)>;
-let isCompare = 1, Defs = [EFLAGS] in {
- let isCommutable = 1 in {
- def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat, MRMSrcReg>;
- def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat, MRMSrcReg>;
- def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat, MRMSrcReg>;
- def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat, MRMSrcReg>;
- } // isCommutable
-
- def TEST8rm : BinOpRM_F<0x84, "test", Xi8 , X86testpat>;
- def TEST16rm : BinOpRM_F<0x84, "test", Xi16, X86testpat>;
- def TEST32rm : BinOpRM_F<0x84, "test", Xi32, X86testpat>;
- def TEST64rm : BinOpRM_F<0x84, "test", Xi64, X86testpat>;
-
- def TEST8ri : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>;
- def TEST16ri : BinOpRI_F<0xF6, "test", Xi16, X86testpat, MRM0r>;
- def TEST32ri : BinOpRI_F<0xF6, "test", Xi32, X86testpat, MRM0r>;
- def TEST64ri32 : BinOpRI_F<0xF6, "test", Xi64, X86testpat, MRM0r>;
-
- def TEST8mi : BinOpMI_F<"test", Xi8 , X86testpat, MRM0m, 0xF6>;
- def TEST16mi : BinOpMI_F<"test", Xi16, X86testpat, MRM0m, 0xF6>;
- def TEST32mi : BinOpMI_F<"test", Xi32, X86testpat, MRM0m, 0xF6>;
- def TEST64mi32 : BinOpMI_F<"test", Xi64, X86testpat, MRM0m, 0xF6>;
+let isCompare = 1 in {
+ let Defs = [EFLAGS] in {
+ let isCommutable = 1 in {
+ def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat, MRMSrcReg>;
+ def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat, MRMSrcReg>;
+ def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat, MRMSrcReg>;
+ def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat, MRMSrcReg>;
+ } // isCommutable
+
+ def TEST8rm : BinOpRM_F<0x84, "test", Xi8 , X86testpat>;
+ def TEST16rm : BinOpRM_F<0x84, "test", Xi16, X86testpat>;
+ def TEST32rm : BinOpRM_F<0x84, "test", Xi32, X86testpat>;
+ def TEST64rm : BinOpRM_F<0x84, "test", Xi64, X86testpat>;
+
+ def TEST8ri : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>;
+ def TEST16ri : BinOpRI_F<0xF6, "test", Xi16, X86testpat, MRM0r>;
+ def TEST32ri : BinOpRI_F<0xF6, "test", Xi32, X86testpat, MRM0r>;
+ def TEST64ri32 : BinOpRI_F<0xF6, "test", Xi64, X86testpat, MRM0r>;
+
+ def TEST8mi : BinOpMI_F<"test", Xi8 , X86testpat, MRM0m, 0xF6>;
+ def TEST16mi : BinOpMI_F<"test", Xi16, X86testpat, MRM0m, 0xF6>;
+ def TEST32mi : BinOpMI_F<"test", Xi32, X86testpat, MRM0m, 0xF6>;
+ def TEST64mi32 : BinOpMI_F<"test", Xi64, X86testpat, MRM0m, 0xF6>;
+
+ // When testing the result of EXTRACT_SUBREG sub_8bit_hi, make sure the
+ // register class is constrained to GR8_NOREX.
+ let isPseudo = 1 in
+ def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask),
+ "", [], IIC_BIN_NONMEM>, Sched<[WriteALU]>;
+ } // Defs = [EFLAGS]
def TEST8i8 : BinOpAI<0xA8, "test", Xi8 , AL,
- "{$src, %al|AL, $src}">;
+ "{$src, %al|al, $src}">;
def TEST16i16 : BinOpAI<0xA8, "test", Xi16, AX,
- "{$src, %ax|AX, $src}">;
+ "{$src, %ax|ax, $src}">;
def TEST32i32 : BinOpAI<0xA8, "test", Xi32, EAX,
- "{$src, %eax|EAX, $src}">;
+ "{$src, %eax|eax, $src}">;
def TEST64i32 : BinOpAI<0xA8, "test", Xi64, RAX,
- "{$src, %rax|RAX, $src}">;
-
- // When testing the result of EXTRACT_SUBREG sub_8bit_hi, make sure the
- // register class is constrained to GR8_NOREX.
- let isPseudo = 1 in
- def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask),
- "", [], IIC_BIN_NONMEM>, Sched<[WriteALU]>;
-}
+ "{$src, %rax|rax, $src}">;
+} // isCompare
//===----------------------------------------------------------------------===//
// ANDN Instruction
@@ -1294,12 +1328,12 @@ let neverHasSideEffects = 1 in {
let isCommutable = 1 in
def rr : I<0xF6, MRMSrcReg, (outs RC:$dst1, RC:$dst2), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
- [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMul]>;
+ [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMul, WriteIMulH]>;
let mayLoad = 1 in
def rm : I<0xF6, MRMSrcMem, (outs RC:$dst1, RC:$dst2), (ins x86memop:$src),
!strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
- [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMulLd]>;
+ [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMulLd, WriteIMulH]>;
}
}
@@ -1328,7 +1362,7 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in {
def ADCX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"adcx{l}\t{$src, $dst|$dst, $src}",
[], IIC_BIN_MEM>, T8, OpSize;
-
+
def ADCX64rm : I<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"adcx{q}\t{$src, $dst|$dst, $src}",
[], IIC_BIN_MEM>, T8, OpSize, REX_W, Requires<[In64BitMode]>;
@@ -1353,7 +1387,7 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in {
def ADOX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"adox{l}\t{$src, $dst|$dst, $src}",
[], IIC_BIN_MEM>, T8XS;
-
+
def ADOX64rm : I<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"adox{q}\t{$src, $dst|$dst, $src}",
[], IIC_BIN_MEM>, T8XS, REX_W, Requires<[In64BitMode]>;
diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td
index d9ff0c63c55f..7d10b67bfe6d 100644
--- a/lib/Target/X86/X86InstrCompiler.td
+++ b/lib/Target/X86/X86InstrCompiler.td
@@ -129,12 +129,13 @@ def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
// The MSVC runtime contains an _ftol2 routine for converting floating-point
// to integer values. It has a strange calling convention: the input is
-// popped from the x87 stack, and the return value is given in EDX:EAX. No
-// other registers (aside from flags) are touched.
+// popped from the x87 stack, and the return value is given in EDX:EAX. ECX is
+// used as a temporary register. No other registers (aside from flags) are
+// touched.
// Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
// variant is unnecessary.
-let Defs = [EAX, EDX, EFLAGS], FPForm = SpecialFP in {
+let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in {
def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
"# win32 fptoui",
[(X86WinFTOL RFP32:$src)]>,
@@ -216,48 +217,38 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
// Alias Instructions
//===----------------------------------------------------------------------===//
-// Alias instructions that map movr0 to xor.
+// Alias instruction mapping movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
// FIXME: Set encoding to pseudo.
let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
- isCodeGenOnly = 1 in {
-def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
- [(set GR8:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
-
-// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
-// encoding and avoids a partial-register update sometimes, but doing so
-// at isel time interferes with rematerialization in the current register
-// allocator. For now, this is rewritten when the instruction is lowered
-// to an MCInst.
-def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
- "",
- [(set GR16:$dst, 0)], IIC_ALU_NONMEM>, OpSize,
- Sched<[WriteZero]>;
-
-// FIXME: Set encoding to pseudo.
+ isCodeGenOnly = 1 in
def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
[(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
-}
-// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
-// smaller encoding, but doing so at isel time interferes with rematerialization
-// in the current register allocator. For now, this is rewritten when the
-// instruction is lowered to an MCInst.
-// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
-// when we have a better way to specify isel priority.
-let Defs = [EFLAGS], isCodeGenOnly=1,
- AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
- [(set GR64:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
+// Other widths can also make use of the 32-bit xor, which may have a smaller
+// encoding and avoid partial register updates.
+def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
+def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
+def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
+ let AddedComplexity = 20;
+}
// Materialize i64 constant where top 32-bits are zero. This could theoretically
// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
// that would make it more difficult to rematerialize.
let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
- isCodeGenOnly = 1 in
-def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
- "", [(set GR64:$dst, i64immZExt32:$src)],
- IIC_ALU_NONMEM>, Sched<[WriteALU]>;
+ isCodeGenOnly = 1, neverHasSideEffects = 1 in
+def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
+ "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
+
+// This 64-bit pseudo-move can be used for both a 64-bit constant that is
+// actually the zero-extension of a 32-bit constant, and for labels in the
+// x86-64 small code model.
+def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;
+
+let AddedComplexity = 1 in
+def : Pat<(i64 mov64imm32:$src),
+ (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
// Use sbb to materialize carry bit.
let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
@@ -893,6 +884,24 @@ let Uses = [EFLAGS], usesCustomInserter = 1 in {
[(set VR256:$dst,
(v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
EFLAGS)))]>;
+ def CMOV_V8I64 : I<0, Pseudo,
+ (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
+ "#CMOV_V8I64 PSEUDO!",
+ [(set VR512:$dst,
+ (v8i64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V8F64 : I<0, Pseudo,
+ (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
+ "#CMOV_V8F64 PSEUDO!",
+ [(set VR512:$dst,
+ (v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V16F32 : I<0, Pseudo,
+ (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
+ "#CMOV_V16F32 PSEUDO!",
+ [(set VR512:$dst,
+ (v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond,
+ EFLAGS)))]>;
}
@@ -926,8 +935,6 @@ def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
(MOV32mi addr:$dst, tblockaddress:$src)>;
-
-
// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
// code model mode, should use 'movabs'. FIXME: This is really a hack, the
// 'movabs' predicate should handle this sort of thing.
@@ -942,20 +949,6 @@ def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
(MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
-// In static codegen with small code model, we can get the address of a label
-// into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
-// the MOV64ri64i32 should accept these.
-def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
- (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
- (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
- (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
- (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
- (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
-
// In kernel code model, we can get the address of a label
// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
// the MOV64ri32 should accept these.
@@ -989,14 +982,12 @@ def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tblockaddress:$src)>,
Requires<[NearData, IsStatic]>;
-
-
// Calls
// tls has some funny stuff here...
// This corresponds to movabs $foo@tpoff, %rax
def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
- (MOV64ri tglobaltlsaddr :$dst)>;
+ (MOV64ri32 tglobaltlsaddr :$dst)>;
// This corresponds to add $foo@tpoff, %rax
def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
(ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
@@ -1119,7 +1110,8 @@ defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
-def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
+def : Pat<(zextloadi64i1 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
// extload bool -> extload byte
// When extloading from 16-bit and smaller memory locations into 64-bit
@@ -1133,14 +1125,16 @@ def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
-def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
-def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
-def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
// For other extloads, use subregs, since the high contents of the register are
// defined after an extload.
+def : Pat<(extloadi64i1 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
+def : Pat<(extloadi64i8 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
+def : Pat<(extloadi64i16 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
def : Pat<(extloadi64i32 addr:$src),
- (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
- sub_32bit)>;
+ (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
// anyext. Define these to do an explicit zero-extend to
// avoid partial-register updates.
@@ -1152,8 +1146,10 @@ def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
def : Pat<(i32 (anyext GR16:$src)),
(INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
-def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
-def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
+def : Pat<(i64 (anyext GR8 :$src)),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
+def : Pat<(i64 (anyext GR16:$src)),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
def : Pat<(i64 (anyext GR32:$src)),
(SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
@@ -1318,13 +1314,19 @@ def : Pat<(and GR16:$src1, 0xff),
// r & (2^32-1) ==> movz
def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
- (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
+ (SUBREG_TO_REG (i64 0),
+ (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
+ sub_32bit)>;
// r & (2^16-1) ==> movz
def : Pat<(and GR64:$src, 0xffff),
- (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
+ (SUBREG_TO_REG (i64 0),
+ (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
+ sub_32bit)>;
// r & (2^8-1) ==> movz
def : Pat<(and GR64:$src, 0xff),
- (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
+ (SUBREG_TO_REG (i64 0),
+ (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
+ sub_32bit)>;
// r & (2^8-1) ==> movz
def : Pat<(and GR32:$src1, 0xff),
(MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td
index 0e696513d47c..e4ccc06feb89 100644
--- a/lib/Target/X86/X86InstrControl.td
+++ b/lib/Target/X86/X86InstrControl.td
@@ -49,10 +49,12 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1,
let isBarrier = 1, isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in {
def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
"jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>;
+ let hasSideEffects = 0 in
def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
"jmp\t$dst", [], IIC_JMP_REL>;
// FIXME : Intel syntax for JMP64pcrel32 such that it is not ambiguious
// with JMP_1.
+ let hasSideEffects = 0 in
def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
"jmpq\t$dst", [], IIC_JMP_REL>;
}
@@ -60,6 +62,7 @@ let isBarrier = 1, isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in {
// Conditional Branches.
let isBranch = 1, isTerminator = 1, Uses = [EFLAGS], SchedRW = [WriteJump] in {
multiclass ICBr<bits<8> opc1, bits<8> opc4, string asm, PatFrag Cond> {
+ let hasSideEffects = 0 in
def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, [],
IIC_Jcc>;
def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
@@ -85,7 +88,7 @@ defm JLE : ICBr<0x7E, 0x8E, "jle\t$dst", X86_COND_LE>;
defm JG : ICBr<0x7F, 0x8F, "jg\t$dst" , X86_COND_G>;
// jcx/jecx/jrcx instructions.
-let isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in {
+let isBranch = 1, isTerminator = 1, hasSideEffects = 0, SchedRW = [WriteJump] in {
// These are the 32-bit versions of this instruction for the asmparser. In
// 32-bit mode, the address size prefix is jcxz and the unprefixed version is
// jecxz.
diff --git a/lib/Target/X86/X86InstrExtension.td b/lib/Target/X86/X86InstrExtension.td
index 6dc7175357b3..4090550cfc05 100644
--- a/lib/Target/X86/X86InstrExtension.td
+++ b/lib/Target/X86/X86InstrExtension.td
@@ -14,26 +14,26 @@
let neverHasSideEffects = 1 in {
let Defs = [AX], Uses = [AL] in
def CBW : I<0x98, RawFrm, (outs), (ins),
- "{cbtw|cbw}", []>, OpSize; // AX = signext(AL)
+ "{cbtw|cbw}", [], IIC_CBW>, OpSize; // AX = signext(AL)
let Defs = [EAX], Uses = [AX] in
def CWDE : I<0x98, RawFrm, (outs), (ins),
- "{cwtl|cwde}", []>; // EAX = signext(AX)
+ "{cwtl|cwde}", [], IIC_CBW>; // EAX = signext(AX)
let Defs = [AX,DX], Uses = [AX] in
def CWD : I<0x99, RawFrm, (outs), (ins),
- "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX)
+ "{cwtd|cwd}", [], IIC_CBW>, OpSize; // DX:AX = signext(AX)
let Defs = [EAX,EDX], Uses = [EAX] in
def CDQ : I<0x99, RawFrm, (outs), (ins),
- "{cltd|cdq}", []>; // EDX:EAX = signext(EAX)
+ "{cltd|cdq}", [], IIC_CBW>; // EDX:EAX = signext(EAX)
let Defs = [RAX], Uses = [EAX] in
def CDQE : RI<0x98, RawFrm, (outs), (ins),
- "{cltq|cdqe}", []>; // RAX = signext(EAX)
+ "{cltq|cdqe}", [], IIC_CBW>; // RAX = signext(EAX)
let Defs = [RAX,RDX], Uses = [RAX] in
def CQO : RI<0x99, RawFrm, (outs), (ins),
- "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
+ "{cqto|cqo}", [], IIC_CBW>; // RDX:RAX = signext(RAX)
}
@@ -149,38 +149,24 @@ def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
"movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>,
TB, Sched<[WriteALULd]>;
-// FIXME: These should be Pat patterns.
-let isCodeGenOnly = 1 in {
-
-// Use movzbl instead of movzbq when the destination is a register; it's
-// equivalent due to implicit zero-extending, and it has a smaller encoding.
-def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
- "", [(set GR64:$dst, (zext GR8:$src))], IIC_MOVZX>, TB,
- Sched<[WriteALU]>;
-def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
- "", [(set GR64:$dst, (zextloadi64i8 addr:$src))], IIC_MOVZX>,
- TB, Sched<[WriteALULd]>;
-// Use movzwl instead of movzwq when the destination is a register; it's
-// equivalent due to implicit zero-extending, and it has a smaller encoding.
-def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
- "", [(set GR64:$dst, (zext GR16:$src))], IIC_MOVZX>, TB,
- Sched<[WriteALU]>;
-def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "", [(set GR64:$dst, (zextloadi64i16 addr:$src))],
- IIC_MOVZX>, TB, Sched<[WriteALULd]>;
-
-// There's no movzlq instruction, but movl can be used for this purpose, using
-// implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
-// extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
-// zero-extension, however this isn't possible when the 32-bit value is
-// defined by a truncate or is copied from something where the high bits aren't
-// necessarily all zero. In such cases, we fall back to these explicit zext
-// instructions.
-def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
- "", [(set GR64:$dst, (zext GR32:$src))], IIC_MOVZX>,
- Sched<[WriteALU]>;
-def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
- "", [(set GR64:$dst, (zextloadi64i32 addr:$src))],
- IIC_MOVZX>, Sched<[WriteALULd]>;
-}
-
+// 64-bit zero-extension patterns use SUBREG_TO_REG and an operation writing a
+// 32-bit register.
+def : Pat<(i64 (zext GR8:$src)),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8:$src), sub_32bit)>;
+def : Pat<(zextloadi64i8 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
+
+def : Pat<(i64 (zext GR16:$src)),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16:$src), sub_32bit)>;
+def : Pat<(zextloadi64i16 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
+
+// The preferred way to do 32-bit-to-64-bit zero extension on x86-64 is to use a
+// SUBREG_TO_REG to utilize implicit zero-extension, however this isn't possible
+// when the 32-bit value is defined by a truncate or is copied from something
+// where the high bits aren't necessarily all zero. In such cases, we fall back
+// to these explicit zext instructions.
+def : Pat<(i64 (zext GR32:$src)),
+ (SUBREG_TO_REG (i64 0), (MOV32rr GR32:$src), sub_32bit)>;
+def : Pat<(i64 (zextloadi64i32 addr:$src)),
+ (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td
index 7759a8a2dabb..69cd5a568ba8 100644
--- a/lib/Target/X86/X86InstrFMA.td
+++ b/lib/Target/X86/X86InstrFMA.td
@@ -74,43 +74,43 @@ let neverHasSideEffects = 1 in {
// Fused Multiply-Add
let ExeDomain = SSEPackedSingle in {
- defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32,
- memopv8f32, X86Fmadd, v4f32, v8f32>;
- defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32,
- memopv8f32, X86Fmsub, v4f32, v8f32>;
+ defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", loadv4f32,
+ loadv8f32, X86Fmadd, v4f32, v8f32>;
+ defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", loadv4f32,
+ loadv8f32, X86Fmsub, v4f32, v8f32>;
defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps",
- memopv4f32, memopv8f32, X86Fmaddsub,
+ loadv4f32, loadv8f32, X86Fmaddsub,
v4f32, v8f32>;
defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps",
- memopv4f32, memopv8f32, X86Fmsubadd,
+ loadv4f32, loadv8f32, X86Fmsubadd,
v4f32, v8f32>;
}
let ExeDomain = SSEPackedDouble in {
- defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64,
- memopv4f64, X86Fmadd, v2f64, v4f64>, VEX_W;
- defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64,
- memopv4f64, X86Fmsub, v2f64, v4f64>, VEX_W;
+ defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", loadv2f64,
+ loadv4f64, X86Fmadd, v2f64, v4f64>, VEX_W;
+ defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", loadv2f64,
+ loadv4f64, X86Fmsub, v2f64, v4f64>, VEX_W;
defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd",
- memopv2f64, memopv4f64, X86Fmaddsub,
+ loadv2f64, loadv4f64, X86Fmaddsub,
v2f64, v4f64>, VEX_W;
defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd",
- memopv2f64, memopv4f64, X86Fmsubadd,
+ loadv2f64, loadv4f64, X86Fmsubadd,
v2f64, v4f64>, VEX_W;
}
// Fused Negative Multiply-Add
let ExeDomain = SSEPackedSingle in {
- defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32,
- memopv8f32, X86Fnmadd, v4f32, v8f32>;
- defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32,
- memopv8f32, X86Fnmsub, v4f32, v8f32>;
+ defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", loadv4f32,
+ loadv8f32, X86Fnmadd, v4f32, v8f32>;
+ defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", loadv4f32,
+ loadv8f32, X86Fnmsub, v4f32, v8f32>;
}
let ExeDomain = SSEPackedDouble in {
- defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64,
- memopv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W;
+ defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", loadv2f64,
+ loadv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W;
defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd",
- memopv2f64, memopv4f64, X86Fnmsub, v2f64,
+ loadv2f64, loadv4f64, X86Fnmsub, v2f64,
v4f64>, VEX_W;
}
@@ -206,25 +206,26 @@ multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst,
- (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, MemOp4;
+ (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, VEX_LIG, MemOp4;
def rm : FMA4<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, x86memop:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (OpNode RC:$src1, RC:$src2,
- (mem_frag addr:$src3)))]>, VEX_W, MemOp4;
+ (mem_frag addr:$src3)))]>, VEX_W, VEX_LIG, MemOp4;
def mr : FMA4<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst,
- (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>;
+ (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG;
// For disassembler
let isCodeGenOnly = 1, hasSideEffects = 0 in
def rr_REV : FMA4<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
+ VEX_LIG;
}
multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
@@ -235,19 +236,19 @@ multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
+ (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, VEX_LIG, MemOp4;
def rm_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, memop:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst, (Int VR128:$src1, VR128:$src2,
- mem_cpat:$src3))]>, VEX_W, MemOp4;
+ mem_cpat:$src3))]>, VEX_W, VEX_LIG, MemOp4;
def mr_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, memop:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>;
+ (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>, VEX_LIG;
}
multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -338,31 +339,31 @@ defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64,
let ExeDomain = SSEPackedSingle in {
defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32,
- memopv4f32, memopv8f32>;
+ loadv4f32, loadv8f32>;
defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32,
- memopv4f32, memopv8f32>;
+ loadv4f32, loadv8f32>;
defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32,
- memopv4f32, memopv8f32>;
+ loadv4f32, loadv8f32>;
defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32,
- memopv4f32, memopv8f32>;
+ loadv4f32, loadv8f32>;
defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32,
- memopv4f32, memopv8f32>;
+ loadv4f32, loadv8f32>;
defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32,
- memopv4f32, memopv8f32>;
+ loadv4f32, loadv8f32>;
}
let ExeDomain = SSEPackedDouble in {
defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64,
- memopv2f64, memopv4f64>;
+ loadv2f64, loadv4f64>;
defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64,
- memopv2f64, memopv4f64>;
+ loadv2f64, loadv4f64>;
defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64,
- memopv2f64, memopv4f64>;
+ loadv2f64, loadv4f64>;
defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64,
- memopv2f64, memopv4f64>;
+ loadv2f64, loadv4f64>;
defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64,
- memopv2f64, memopv4f64>;
+ loadv2f64, loadv4f64>;
defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64,
- memopv2f64, memopv4f64>;
+ loadv2f64, loadv4f64>;
}
diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td
index 2224a08d59f4..7c3788865c1e 100644
--- a/lib/Target/X86/X86InstrFPStack.td
+++ b/lib/Target/X86/X86InstrFPStack.td
@@ -229,22 +229,22 @@ class FPrST0PInst<bits<8> o, string asm>
// of some of the 'reverse' forms of the fsub and fdiv instructions. As such,
// we have to put some 'r's in and take them out of weird places.
def ADD_FST0r : FPST0rInst <0xC0, "fadd\t$op">;
-def ADD_FrST0 : FPrST0Inst <0xC0, "fadd\t{%st(0), $op|$op, ST(0)}">;
+def ADD_FrST0 : FPrST0Inst <0xC0, "fadd\t{%st(0), $op|$op, st(0)}">;
def ADD_FPrST0 : FPrST0PInst<0xC0, "faddp\t$op">;
def SUBR_FST0r : FPST0rInst <0xE8, "fsubr\t$op">;
-def SUB_FrST0 : FPrST0Inst <0xE8, "fsub{r}\t{%st(0), $op|$op, ST(0)}">;
+def SUB_FrST0 : FPrST0Inst <0xE8, "fsub{r}\t{%st(0), $op|$op, st(0)}">;
def SUB_FPrST0 : FPrST0PInst<0xE8, "fsub{r}p\t$op">;
def SUB_FST0r : FPST0rInst <0xE0, "fsub\t$op">;
-def SUBR_FrST0 : FPrST0Inst <0xE0, "fsub{|r}\t{%st(0), $op|$op, ST(0)}">;
+def SUBR_FrST0 : FPrST0Inst <0xE0, "fsub{|r}\t{%st(0), $op|$op, st(0)}">;
def SUBR_FPrST0 : FPrST0PInst<0xE0, "fsub{|r}p\t$op">;
def MUL_FST0r : FPST0rInst <0xC8, "fmul\t$op">;
-def MUL_FrST0 : FPrST0Inst <0xC8, "fmul\t{%st(0), $op|$op, ST(0)}">;
+def MUL_FrST0 : FPrST0Inst <0xC8, "fmul\t{%st(0), $op|$op, st(0)}">;
def MUL_FPrST0 : FPrST0PInst<0xC8, "fmulp\t$op">;
def DIVR_FST0r : FPST0rInst <0xF8, "fdivr\t$op">;
-def DIV_FrST0 : FPrST0Inst <0xF8, "fdiv{r}\t{%st(0), $op|$op, ST(0)}">;
+def DIV_FrST0 : FPrST0Inst <0xF8, "fdiv{r}\t{%st(0), $op|$op, st(0)}">;
def DIV_FPrST0 : FPrST0PInst<0xF8, "fdiv{r}p\t$op">;
def DIV_FST0r : FPST0rInst <0xF0, "fdiv\t$op">;
-def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, ST(0)}">;
+def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, st(0)}">;
def DIVR_FPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p\t$op">;
def COM_FST0r : FPST0rInst <0xD0, "fcom\t$op">;
@@ -337,21 +337,21 @@ defm CMOVNP : FPCMov<X86_COND_NP>;
let Predicates = [HasCMov] in {
// These are not factored because there's no clean way to pass DA/DB.
def CMOVB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovb\t{$op, %st(0)|ST(0), $op}">, DA;
+ "fcmovb\t{$op, %st(0)|st(0), $op}">, DA;
def CMOVBE_F : FPI<0xD0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovbe\t{$op, %st(0)|ST(0), $op}">, DA;
+ "fcmovbe\t{$op, %st(0)|st(0), $op}">, DA;
def CMOVE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
- "fcmove\t{$op, %st(0)|ST(0), $op}">, DA;
+ "fcmove\t{$op, %st(0)|st(0), $op}">, DA;
def CMOVP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovu\t {$op, %st(0)|ST(0), $op}">, DA;
+ "fcmovu\t{$op, %st(0)|st(0), $op}">, DA;
def CMOVNB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnb\t{$op, %st(0)|ST(0), $op}">, DB;
+ "fcmovnb\t{$op, %st(0)|st(0), $op}">, DB;
def CMOVNBE_F: FPI<0xD0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnbe\t{$op, %st(0)|ST(0), $op}">, DB;
+ "fcmovnbe\t{$op, %st(0)|st(0), $op}">, DB;
def CMOVNE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovne\t{$op, %st(0)|ST(0), $op}">, DB;
+ "fcmovne\t{$op, %st(0)|st(0), $op}">, DB;
def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnu\t{$op, %st(0)|ST(0), $op}">, DB;
+ "fcmovnu\t{$op, %st(0)|st(0), $op}">, DB;
} // Predicates = [HasCMov]
// Floating point loads & stores.
@@ -578,7 +578,7 @@ def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg),
let SchedRW = [WriteALU] in {
let Defs = [AX], Uses = [FPSW] in
def FNSTSW16r : I<0xE0, RawFrm, // AX = fp flags
- (outs), (ins), "fnstsw %ax",
+ (outs), (ins), "fnstsw\t{%ax|ax}",
[(set AX, (X86fp_stsw FPSW))], IIC_FNSTSW>, DF;
def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world
diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td
index a71e024f4e28..0fd9011338b4 100644
--- a/lib/Target/X86/X86InstrFormats.td
+++ b/lib/Target/X86/X86InstrFormats.td
@@ -96,6 +96,20 @@ def SSEPackedSingle : Domain<1>;
def SSEPackedDouble : Domain<2>;
def SSEPackedInt : Domain<3>;
+// Class specifying the vector form of the decompressed
+// displacement of 8-bit.
+class CD8VForm<bits<3> val> {
+ bits<3> Value = val;
+}
+def CD8VF : CD8VForm<0>; // v := VL
+def CD8VH : CD8VForm<1>; // v := VL/2
+def CD8VQ : CD8VForm<2>; // v := VL/4
+def CD8VO : CD8VForm<3>; // v := VL/8
+def CD8VT1 : CD8VForm<4>; // v := 1
+def CD8VT2 : CD8VForm<5>; // v := 2
+def CD8VT4 : CD8VForm<6>; // v := 4
+def CD8VT8 : CD8VForm<7>; // v := 8
+
// Prefix byte classes which are used to indicate to the ad-hoc machine code
// emitter that various prefix bytes are required.
class OpSize { bit hasOpSizePrefix = 1; }
@@ -125,6 +139,7 @@ class T8XS { bits<5> Prefix = 18; }
class TAXD { bits<5> Prefix = 19; }
class XOP8 { bits<5> Prefix = 20; }
class XOP9 { bits<5> Prefix = 21; }
+class XOPA { bits<5> Prefix = 22; }
class VEX { bit hasVEXPrefix = 1; }
class VEX_W { bit hasVEX_WPrefix = 1; }
class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; }
@@ -132,6 +147,19 @@ class VEX_4VOp3 : VEX { bit hasVEX_4VOp3Prefix = 1; }
class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
class VEX_L { bit hasVEX_L = 1; }
class VEX_LIG { bit ignoresVEX_L = 1; }
+class EVEX : VEX { bit hasEVEXPrefix = 1; }
+class EVEX_4V : VEX_4V { bit hasEVEXPrefix = 1; }
+class EVEX_K { bit hasEVEX_K = 1; }
+class EVEX_KZ : EVEX_K { bit hasEVEX_Z = 1; }
+class EVEX_B { bit hasEVEX_B = 1; }
+class EVEX_V512 { bit hasEVEX_L2 = 1; bit hasVEX_L = 0; }
+class EVEX_CD8<int esize, CD8VForm form> {
+ bits<2> EVEX_CD8E = !if(!eq(esize, 8), 0b00,
+ !if(!eq(esize, 16), 0b01,
+ !if(!eq(esize, 32), 0b10,
+ !if(!eq(esize, 64), 0b11, ?))));
+ bits<3> EVEX_CD8V = form.Value;
+}
class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; }
class MemOp4 { bit hasMemOp4Prefix = 1; }
class XOP { bit hasXOP_Prefix = 1; }
@@ -177,6 +205,13 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
// to be encoded in a immediate field?
bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit
+ bit hasEVEXPrefix = 0; // Does this inst require EVEX form?
+ bit hasEVEX_K = 0; // Does this inst require masking?
+ bit hasEVEX_Z = 0; // Does this inst set the EVEX_Z field?
+ bit hasEVEX_L2 = 0; // Does this inst set the EVEX_L2 field?
+ bit hasEVEX_B = 0; // Does this inst set the EVEX_B field?
+ bits<2> EVEX_CD8E = 0; // Compressed disp8 form - element-size.
+ bits<3> EVEX_CD8V = 0; // Compressed disp8 form - vector-width.
bit has3DNow0F0FOpcode =0;// Wacky 3dNow! encoding?
bit hasMemOp4Prefix = 0; // Same bit as VEX_W, but used for swapping operands
bit hasXOP_Prefix = 0; // Does this inst require an XOP prefix?
@@ -200,9 +235,16 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
let TSFlags{37} = hasVEX_i8ImmReg;
let TSFlags{38} = hasVEX_L;
let TSFlags{39} = ignoresVEX_L;
- let TSFlags{40} = has3DNow0F0FOpcode;
- let TSFlags{41} = hasMemOp4Prefix;
- let TSFlags{42} = hasXOP_Prefix;
+ let TSFlags{40} = hasEVEXPrefix;
+ let TSFlags{41} = hasEVEX_K;
+ let TSFlags{42} = hasEVEX_Z;
+ let TSFlags{43} = hasEVEX_L2;
+ let TSFlags{44} = hasEVEX_B;
+ let TSFlags{46-45} = EVEX_CD8E;
+ let TSFlags{49-47} = EVEX_CD8V;
+ let TSFlags{50} = has3DNow0F0FOpcode;
+ let TSFlags{51} = hasMemOp4Prefix;
+ let TSFlags{52} = hasXOP_Prefix;
}
class PseudoI<dag oops, dag iops, list<dag> pattern>
@@ -292,13 +334,17 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
}
def __xs : XS;
+def __xd : XD;
// SI - SSE 1 & 2 scalar instructions
class SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin> {
- let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2]));
+ let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
+ !if(hasVEXPrefix /* VEX */, [UseAVX],
+ !if(!eq(Prefix, __xs.Prefix), [UseSSE1],
+ !if(!eq(Prefix, __xd.Prefix), [UseSSE2],
+ !if(hasOpSizePrefix, [UseSSE2], [UseSSE1])))));
// AVX instructions have a 'v' prefix in the mnemonic
let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
@@ -308,8 +354,9 @@ class SI<bits<8> o, Format F, dag outs, dag ins, string asm,
class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin> {
- let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2]));
+ let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
+ !if(hasVEXPrefix /* VEX */, [UseAVX],
+ !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2])));
// AVX instructions have a 'v' prefix in the mnemonic
let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
@@ -319,8 +366,9 @@ class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
InstrItinClass itin, Domain d>
: I<o, F, outs, ins, asm, pattern, itin, d> {
- let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1]));
+ let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
+ !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1])));
// AVX instructions have a 'v' prefix in the mnemonic
let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
@@ -337,11 +385,12 @@ class MMXPI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> patter
class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin, Domain d>
: Ii8<o, F, outs, ins, asm, pattern, itin, d> {
- let Predicates = !if(hasVEX_4VPrefix /* VEX */, [HasAVX],
- !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1]));
+ let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
+ !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1])));
// AVX instructions have a 'v' prefix in the mnemonic
- let AsmString = !if(hasVEX_4VPrefix, !strconcat("v", asm), asm);
+ let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
}
// SSE1 Instruction Templates:
@@ -350,7 +399,7 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// PSI - SSE1 instructions with TB prefix.
// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
// VSSI - SSE1 instructions with XS prefix in AVX form.
-// VPSI - SSE1 instructions with TB prefix in AVX form.
+// VPSI - SSE1 instructions with TB prefix in AVX form, packed single.
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -381,10 +430,13 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
// SDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix.
// S2SI - SSE2 instructions with XS prefix.
// SSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix.
-// PDI - SSE2 instructions with TB and OpSize prefixes.
+// PDI - SSE2 instructions with TB and OpSize prefixes, packed double domain.
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
-// VSDI - SSE2 instructions with XD prefix in AVX form.
-// VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form.
+// VSDI - SSE2 scalar instructions with XD prefix in AVX form.
+// VPDI - SSE2 vector instructions with TB and OpSize prefixes in AVX form,
+// packed double domain.
+// VS2I - SSE2 scalar instructions with TB and OpSize prefixes in AVX form.
+// S2I - SSE2 scalar instructions with TB and OpSize prefixes.
// MMXSDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix as well as
// MMX operands.
// MMXSSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix as well as
@@ -413,7 +465,7 @@ class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XD,
- Requires<[HasAVX]>;
+ Requires<[UseAVX]>;
class VS2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS,
@@ -422,6 +474,14 @@ class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>, TB,
OpSize, Requires<[HasAVX]>;
+class VS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, TB,
+ OpSize, Requires<[UseAVX]>;
+class S2I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin>, TB,
+ OpSize, Requires<[UseSSE2]>;
class MMXSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
@@ -539,12 +599,80 @@ class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
Requires<[HasAVX2]>;
+
+// AVX-512 Instruction Templates:
+// Instructions introduced in AVX-512 (no SSE equivalent forms)
+//
+// AVX5128I - AVX-512 instructions with T8 and OpSize prefix.
+// AVX512AIi8 - AVX-512 instructions with TA, OpSize prefix and ImmT = Imm8.
+// AVX512PDI - AVX-512 instructions with TB, OpSize, double packed.
+// AVX512PSI - AVX-512 instructions with TB, single packed.
+// AVX512XS8I - AVX-512 instructions with T8 and XS prefixes.
+// AVX512XSI - AVX-512 instructions with XS prefix, generic domain.
+// AVX512BI - AVX-512 instructions with TB, OpSize, int packed domain.
+// AVX512SI - AVX-512 scalar instructions with TB and OpSize prefixes.
+
+class AVX5128I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize,
+ Requires<[HasAVX512]>;
+class AVX512XS8I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8XS,
+ Requires<[HasAVX512]>;
+class AVX512XSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin>, XS,
+ Requires<[HasAVX512]>;
+class AVX512XDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, XD,
+ Requires<[HasAVX512]>;
+class AVX512BI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
+ Requires<[HasAVX512]>;
+class AVX512BIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
+ Requires<[HasAVX512]>;
+class AVX512SI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
+ Requires<[HasAVX512]>;
+class AVX512AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
+ Requires<[HasAVX512]>;
+class AVX512Ii8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB,
+ Requires<[HasAVX512]>;
+class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB,
+ OpSize, Requires<[HasAVX512]>;
+class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
+ Requires<[HasAVX512]>;
+class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, d>, TB, Requires<[HasAVX512]>;
+class AVX512PI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin, d>, TB, Requires<[HasAVX512]>;
+class AVX512FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag>pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin>, T8,
+ OpSize, EVEX_4V, Requires<[HasAVX512]>;
+
// AES Instruction Templates:
//
// AES8I
// These use the same encoding as the SSE4.2 T8 and TA encodings.
class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag>pattern, InstrItinClass itin = NoItinerary>
+ list<dag>pattern, InstrItinClass itin = IIC_AES>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
Requires<[HasAES]>;
@@ -614,6 +742,13 @@ class RIi64<bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
+class RIi64_NOREX<bits<8> o, Format f, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : X86Inst<o, f, Imm64, outs, ins, asm, itin> {
+ let Pattern = pattern;
+ let CodeSize = 3;
+}
+
class RSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: SSI<o, F, outs, ins, asm, pattern, itin>, REX_W;
@@ -626,11 +761,18 @@ class RPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: VPDI<o, F, outs, ins, asm, pattern, itin>, VEX_W;
+class RS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : S2I<o, F, outs, ins, asm, pattern, itin>, REX_W;
+class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : VS2I<o, F, outs, ins, asm, pattern, itin>, VEX_W;
// MMX Instruction templates
//
// MMXI - MMX instructions with TB prefix.
+// MMXI32 - MMX instructions with TB prefix valid only in 32 bit mode.
// MMXI64 - MMX instructions with TB prefix valid only in 64 bit mode.
// MMX2I - MMX / SSE2 instructions with TB and OpSize prefixes.
// MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix.
@@ -640,6 +782,9 @@ class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>;
+class MMXI32<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,In32BitMode]>;
class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,In64BitMode]>;
diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td
index 2a72fb6f7b2a..1fed424fd6e0 100644
--- a/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -47,6 +47,8 @@ def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
+def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
@@ -103,6 +105,13 @@ def X86vsext : SDNode<"X86ISD::VSEXT",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisInt<0>, SDTCisInt<1>]>>;
+def X86vtrunc : SDNode<"X86ISD::VTRUNC",
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisInt<0>, SDTCisInt<1>]>>;
+def X86vtruncm : SDNode<"X86ISD::VTRUNCM",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisInt<0>, SDTCisInt<1>,
+ SDTCisVec<2>, SDTCisInt<2>]>>;
def X86vfpext : SDNode<"X86ISD::VFPEXT",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisFP<0>, SDTCisFP<1>]>>;
@@ -116,6 +125,15 @@ def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
+def X86IntCmpMask : SDTypeProfile<1, 2,
+ [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisInt<1>]>;
+def X86pcmpeqm : SDNode<"X86ISD::PCMPEQM", X86IntCmpMask, [SDNPCommutative]>;
+def X86pcmpgtm : SDNode<"X86ISD::PCMPGTM", X86IntCmpMask>;
+
+def X86CmpMaskCC : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
+def X86cmpm : SDNode<"X86ISD::CMPM", X86CmpMaskCC>;
+def X86cmpmu : SDNode<"X86ISD::CMPMU", X86CmpMaskCC>;
+
def X86vshl : SDNode<"X86ISD::VSHL",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisVec<2>]>>;
@@ -136,6 +154,11 @@ def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
+def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
+def X86ktest : SDNode<"X86ISD::KTEST", SDTX86CmpPTest>;
+def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
+ SDTCisVec<1>,
+ SDTCisSameAs<2, 1>]>>;
def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
@@ -147,13 +170,17 @@ def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>;
+def SDTShuff3Op : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>, SDTCisSameAs<0,3>]>;
def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
SDTCisSameAs<0,1>, SDTCisInt<2>]>;
def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>, SDTCisInt<3>]>;
-def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
+def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
+def SDTVBroadcastm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>]>;
+
def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>;
@@ -188,10 +215,14 @@ def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
def X86VPermilp : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>;
def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>;
def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>;
+def X86VPermv3 : SDNode<"X86ISD::VPERMV3", SDTShuff3Op>;
def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
+def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>;
+def X86Vinsert : SDNode<"X86ISD::VINSERT", SDTypeProfile<1, 3,
+ [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
@@ -229,13 +260,13 @@ def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
def ssmem : Operand<v4f32> {
let PrintMethod = "printf32mem";
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
def sdmem : Operand<v2f64> {
let PrintMethod = "printf64mem";
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = X86Mem64AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -255,9 +286,16 @@ def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
-// 128-/256-bit extload pattern fragments
+// 512-bit load pattern fragments
+def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>;
+def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>;
+def loadv16i32 : PatFrag<(ops node:$ptr), (v16i32 (load node:$ptr))>;
+def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>;
+
+// 128-/256-/512-bit extload pattern fragments
def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
+def extloadv8f32 : PatFrag<(ops node:$ptr), (v8f64 (extloadvf32 node:$ptr))>;
// Like 'store', but always requires 128-bit vector alignment.
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
@@ -271,6 +309,12 @@ def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
return cast<StoreSDNode>(N)->getAlignment() >= 32;
}]>;
+// Like 'store', but always requires 512-bit vector alignment.
+def alignedstore512 : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() >= 64;
+}]>;
+
// Like 'load', but always requires 128-bit vector alignment.
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 16;
@@ -286,6 +330,11 @@ def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 32;
}]>;
+// Like 'load', but always requires 512-bit vector alignment.
+def alignedload512 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() >= 64;
+}]>;
+
def alignedloadfsf32 : PatFrag<(ops node:$ptr),
(f32 (alignedload node:$ptr))>;
def alignedloadfsf64 : PatFrag<(ops node:$ptr),
@@ -309,6 +358,16 @@ def alignedloadv4f64 : PatFrag<(ops node:$ptr),
def alignedloadv4i64 : PatFrag<(ops node:$ptr),
(v4i64 (alignedload256 node:$ptr))>;
+// 512-bit aligned load pattern fragments
+def alignedloadv16f32 : PatFrag<(ops node:$ptr),
+ (v16f32 (alignedload512 node:$ptr))>;
+def alignedloadv16i32 : PatFrag<(ops node:$ptr),
+ (v16i32 (alignedload512 node:$ptr))>;
+def alignedloadv8f64 : PatFrag<(ops node:$ptr),
+ (v8f64 (alignedload512 node:$ptr))>;
+def alignedloadv8i64 : PatFrag<(ops node:$ptr),
+ (v8i64 (alignedload512 node:$ptr))>;
+
// Like 'load', but uses special alignment checks suitable for use in
// memory operands in most SSE instructions, which are required to
// be naturally aligned on some targets but not on others. If the subtarget
@@ -320,6 +379,16 @@ def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|| cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;
+def memop4 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return Subtarget->hasVectorUAMem()
+ || cast<LoadSDNode>(N)->getAlignment() >= 4;
+}]>;
+
+def memop8 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return Subtarget->hasVectorUAMem()
+ || cast<LoadSDNode>(N)->getAlignment() >= 8;
+}]>;
+
def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
@@ -335,6 +404,12 @@ def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
+// 512-bit memop pattern fragments
+def memopv16f32 : PatFrag<(ops node:$ptr), (v16f32 (memop4 node:$ptr))>;
+def memopv8f64 : PatFrag<(ops node:$ptr), (v8f64 (memop8 node:$ptr))>;
+def memopv16i32 : PatFrag<(ops node:$ptr), (v16i32 (memop4 node:$ptr))>;
+def memopv8i64 : PatFrag<(ops node:$ptr), (v8i64 (memop8 node:$ptr))>;
+
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
// 16-byte boundary.
// FIXME: 8 byte alignment for mmx reads is not required
@@ -384,6 +459,10 @@ def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
+// 512-bit bitconvert pattern fragments
+def bc_v16i32 : PatFrag<(ops node:$in), (v16i32 (bitconvert node:$in))>;
+def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>;
+
def vzmovl_v2i64 : PatFrag<(ops node:$src),
(bitconvert (v2i64 (X86vzmovl
(v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
@@ -405,28 +484,54 @@ def BYTE_imm : SDNodeXForm<imm, [{
return getI32Imm(N->getZExtValue() >> 3);
}]>;
-// EXTRACT_get_vextractf128_imm xform function: convert extract_subvector index
-// to VEXTRACTF128 imm.
-def EXTRACT_get_vextractf128_imm : SDNodeXForm<extract_subvector, [{
- return getI8Imm(X86::getExtractVEXTRACTF128Immediate(N));
+// EXTRACT_get_vextract128_imm xform function: convert extract_subvector index
+// to VEXTRACTF128/VEXTRACTI128 imm.
+def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{
+ return getI8Imm(X86::getExtractVEXTRACT128Immediate(N));
+}]>;
+
+// INSERT_get_vinsert128_imm xform function: convert insert_subvector index to
+// VINSERTF128/VINSERTI128 imm.
+def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{
+ return getI8Imm(X86::getInsertVINSERT128Immediate(N));
+}]>;
+
+// EXTRACT_get_vextract256_imm xform function: convert extract_subvector index
+// to VEXTRACTF64x4 imm.
+def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{
+ return getI8Imm(X86::getExtractVEXTRACT256Immediate(N));
}]>;
-// INSERT_get_vinsertf128_imm xform function: convert insert_subvector index to
-// VINSERTF128 imm.
-def INSERT_get_vinsertf128_imm : SDNodeXForm<insert_subvector, [{
- return getI8Imm(X86::getInsertVINSERTF128Immediate(N));
+// INSERT_get_vinsert256_imm xform function: convert insert_subvector index to
+// VINSERTF64x4 imm.
+def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{
+ return getI8Imm(X86::getInsertVINSERT256Immediate(N));
}]>;
-def vextractf128_extract : PatFrag<(ops node:$bigvec, node:$index),
+def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index),
+ (extract_subvector node:$bigvec,
+ node:$index), [{
+ return X86::isVEXTRACT128Index(N);
+}], EXTRACT_get_vextract128_imm>;
+
+def vinsert128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
+ node:$index),
+ (insert_subvector node:$bigvec, node:$smallvec,
+ node:$index), [{
+ return X86::isVINSERT128Index(N);
+}], INSERT_get_vinsert128_imm>;
+
+
+def vextract256_extract : PatFrag<(ops node:$bigvec, node:$index),
(extract_subvector node:$bigvec,
node:$index), [{
- return X86::isVEXTRACTF128Index(N);
-}], EXTRACT_get_vextractf128_imm>;
+ return X86::isVEXTRACT256Index(N);
+}], EXTRACT_get_vextract256_imm>;
-def vinsertf128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
+def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
node:$index),
(insert_subvector node:$bigvec, node:$smallvec,
node:$index), [{
- return X86::isVINSERTF128Index(N);
-}], INSERT_get_vinsertf128_imm>;
+ return X86::isVINSERT256Index(N);
+}], INSERT_get_vinsert256_imm>;
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 7c0423f81825..24617737420b 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -35,7 +36,7 @@
#include "llvm/Target/TargetOptions.h"
#include <limits>
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "X86GenInstrInfo.inc"
using namespace llvm;
@@ -81,6 +82,7 @@ enum {
TB_ALIGN_NONE = 0 << TB_ALIGN_SHIFT,
TB_ALIGN_16 = 16 << TB_ALIGN_SHIFT,
TB_ALIGN_32 = 32 << TB_ALIGN_SHIFT,
+ TB_ALIGN_64 = 64 << TB_ALIGN_SHIFT,
TB_ALIGN_MASK = 0xff << TB_ALIGN_SHIFT
};
@@ -90,6 +92,9 @@ struct X86OpTblEntry {
uint16_t Flags;
};
+// Pin the vtable to this file.
+void X86InstrInfo::anchor() {}
+
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
: X86GenInstrInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
? X86::ADJCALLSTACKDOWN64
@@ -97,7 +102,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
(tm.getSubtarget<X86Subtarget>().is64Bit()
? X86::ADJCALLSTACKUP64
: X86::ADJCALLSTACKUP32)),
- TM(tm), RI(tm, *this) {
+ TM(tm), RI(tm) {
static const X86OpTblEntry OpTbl2Addr[] = {
{ X86::ADC32ri, X86::ADC32mi, 0 },
@@ -298,8 +303,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::DIV64r, X86::DIV64m, TB_FOLDED_LOAD },
{ X86::DIV8r, X86::DIV8m, TB_FOLDED_LOAD },
{ X86::EXTRACTPSrr, X86::EXTRACTPSmr, TB_FOLDED_STORE },
- { X86::FsMOVAPDrr, X86::MOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE },
- { X86::FsMOVAPSrr, X86::MOVSSmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::IDIV16r, X86::IDIV16m, TB_FOLDED_LOAD },
{ X86::IDIV32r, X86::IDIV32m, TB_FOLDED_LOAD },
{ X86::IDIV64r, X86::IDIV64m, TB_FOLDED_LOAD },
@@ -356,8 +359,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::TEST8ri, X86::TEST8mi, TB_FOLDED_LOAD },
// AVX 128-bit versions of foldable instructions
{ X86::VEXTRACTPSrr,X86::VEXTRACTPSmr, TB_FOLDED_STORE },
- { X86::FsVMOVAPDrr, X86::VMOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE },
- { X86::FsVMOVAPSrr, X86::VMOVSSmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVAPDrr, X86::VMOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVAPSrr, X86::VMOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 },
@@ -374,7 +375,9 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVAPSYrr, X86::VMOVAPSYmr, TB_FOLDED_STORE | TB_ALIGN_32 },
{ X86::VMOVDQAYrr, X86::VMOVDQAYmr, TB_FOLDED_STORE | TB_ALIGN_32 },
{ X86::VMOVUPDYrr, X86::VMOVUPDYmr, TB_FOLDED_STORE },
- { X86::VMOVUPSYrr, X86::VMOVUPSYmr, TB_FOLDED_STORE }
+ { X86::VMOVUPSYrr, X86::VMOVUPSYmr, TB_FOLDED_STORE },
+ // AVX-512 foldable instructions
+ { X86::VMOVPDI2DIZrr,X86::VMOVPDI2DIZmr, TB_FOLDED_STORE }
};
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
@@ -400,8 +403,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::CVTTSD2SIrr, X86::CVTTSD2SIrm, 0 },
{ X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm, 0 },
{ X86::CVTTSS2SIrr, X86::CVTTSS2SIrm, 0 },
- { X86::FsMOVAPDrr, X86::MOVSDrm, TB_NO_REVERSE },
- { X86::FsMOVAPSrr, X86::MOVSSrm, TB_NO_REVERSE },
{ X86::IMUL16rri, X86::IMUL16rmi, 0 },
{ X86::IMUL16rri8, X86::IMUL16rmi8, 0 },
{ X86::IMUL32rri, X86::IMUL32rmi, 0 },
@@ -444,16 +445,12 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MOVSX64rr8, X86::MOVSX64rm8, 0 },
{ X86::MOVUPDrr, X86::MOVUPDrm, TB_ALIGN_16 },
{ X86::MOVUPSrr, X86::MOVUPSrm, 0 },
- { X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm, 0 },
{ X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm, 0 },
{ X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm, TB_ALIGN_16 },
{ X86::MOVZX16rr8, X86::MOVZX16rm8, 0 },
{ X86::MOVZX32rr16, X86::MOVZX32rm16, 0 },
{ X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8, 0 },
{ X86::MOVZX32rr8, X86::MOVZX32rm8, 0 },
- { X86::MOVZX64rr16, X86::MOVZX64rm16, 0 },
- { X86::MOVZX64rr32, X86::MOVZX64rm32, 0 },
- { X86::MOVZX64rr8, X86::MOVZX64rm8, 0 },
{ X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 },
{ X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 },
{ X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 },
@@ -496,8 +493,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VCVTSD2SIrr, X86::VCVTSD2SIrm, 0 },
{ X86::VCVTSS2SI64rr, X86::VCVTSS2SI64rm, 0 },
{ X86::VCVTSS2SIrr, X86::VCVTSS2SIrm, 0 },
- { X86::FsVMOVAPDrr, X86::VMOVSDrm, TB_NO_REVERSE },
- { X86::FsVMOVAPSrr, X86::VMOVSSrm, TB_NO_REVERSE },
{ X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, 0 },
{ X86::VMOV64toSDrr, X86::VMOV64toSDrm, 0 },
{ X86::VMOVAPDrr, X86::VMOVAPDrm, TB_ALIGN_16 },
@@ -510,7 +505,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVSHDUPrr, X86::VMOVSHDUPrm, TB_ALIGN_16 },
{ X86::VMOVUPDrr, X86::VMOVUPDrm, 0 },
{ X86::VMOVUPSrr, X86::VMOVUPSrm, 0 },
- { X86::VMOVZDI2PDIrr, X86::VMOVZDI2PDIrm, 0 },
{ X86::VMOVZQI2PQIrr, X86::VMOVZQI2PQIrm, 0 },
{ X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm, TB_ALIGN_16 },
{ X86::VPABSBrr128, X86::VPABSBrm128, 0 },
@@ -555,11 +549,27 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE },
{ X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE },
- // BMI/BMI2/LZCNT/POPCNT foldable instructions
+ // BMI/BMI2/LZCNT/POPCNT/TBM foldable instructions
{ X86::BEXTR32rr, X86::BEXTR32rm, 0 },
{ X86::BEXTR64rr, X86::BEXTR64rm, 0 },
+ { X86::BEXTRI32ri, X86::BEXTRI32mi, 0 },
+ { X86::BEXTRI64ri, X86::BEXTRI64mi, 0 },
+ { X86::BLCFILL32rr, X86::BLCFILL32rm, 0 },
+ { X86::BLCFILL64rr, X86::BLCFILL64rm, 0 },
+ { X86::BLCI32rr, X86::BLCI32rm, 0 },
+ { X86::BLCI64rr, X86::BLCI64rm, 0 },
+ { X86::BLCIC32rr, X86::BLCIC32rm, 0 },
+ { X86::BLCIC64rr, X86::BLCIC64rm, 0 },
+ { X86::BLCMSK32rr, X86::BLCMSK32rm, 0 },
+ { X86::BLCMSK64rr, X86::BLCMSK64rm, 0 },
+ { X86::BLCS32rr, X86::BLCS32rm, 0 },
+ { X86::BLCS64rr, X86::BLCS64rm, 0 },
+ { X86::BLSFILL32rr, X86::BLSFILL32rm, 0 },
+ { X86::BLSFILL64rr, X86::BLSFILL64rm, 0 },
{ X86::BLSI32rr, X86::BLSI32rm, 0 },
{ X86::BLSI64rr, X86::BLSI64rm, 0 },
+ { X86::BLSIC32rr, X86::BLSIC32rm, 0 },
+ { X86::BLSIC64rr, X86::BLSIC64rm, 0 },
{ X86::BLSMSK32rr, X86::BLSMSK32rm, 0 },
{ X86::BLSMSK64rr, X86::BLSMSK64rm, 0 },
{ X86::BLSR32rr, X86::BLSR32rm, 0 },
@@ -580,9 +590,27 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::SHRX64rr, X86::SHRX64rm, 0 },
{ X86::SHLX32rr, X86::SHLX32rm, 0 },
{ X86::SHLX64rr, X86::SHLX64rm, 0 },
+ { X86::T1MSKC32rr, X86::T1MSKC32rm, 0 },
+ { X86::T1MSKC64rr, X86::T1MSKC64rm, 0 },
{ X86::TZCNT16rr, X86::TZCNT16rm, 0 },
{ X86::TZCNT32rr, X86::TZCNT32rm, 0 },
{ X86::TZCNT64rr, X86::TZCNT64rm, 0 },
+ { X86::TZMSK32rr, X86::TZMSK32rm, 0 },
+ { X86::TZMSK64rr, X86::TZMSK64rm, 0 },
+
+ // AVX-512 foldable instructions
+ { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
+ { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
+ { X86::VMOVDQA32rr, X86::VMOVDQA32rm, TB_ALIGN_64 },
+ { X86::VMOVDQA64rr, X86::VMOVDQA64rm, TB_ALIGN_64 },
+ { X86::VMOVDQU32rr, X86::VMOVDQU32rm, 0 },
+ { X86::VMOVDQU64rr, X86::VMOVDQU64rm, 0 },
+
+ // AES foldable instructions
+ { X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16 },
+ { X86::AESKEYGENASSIST128rr, X86::AESKEYGENASSIST128rm, TB_ALIGN_16 },
+ { X86::VAESIMCrr, X86::VAESIMCrm, TB_ALIGN_16 },
+ { X86::VAESKEYGENASSIST128rr, X86::VAESKEYGENASSIST128rm, TB_ALIGN_16 },
};
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
@@ -1180,6 +1208,52 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::PDEP64rr, X86::PDEP64rm, 0 },
{ X86::PEXT32rr, X86::PEXT32rm, 0 },
{ X86::PEXT64rr, X86::PEXT64rm, 0 },
+
+ // AVX-512 foldable instructions
+ { X86::VPADDDZrr, X86::VPADDDZrm, 0 },
+ { X86::VPADDQZrr, X86::VPADDQZrm, 0 },
+ { X86::VADDPSZrr, X86::VADDPSZrm, 0 },
+ { X86::VADDPDZrr, X86::VADDPDZrm, 0 },
+ { X86::VSUBPSZrr, X86::VSUBPSZrm, 0 },
+ { X86::VSUBPDZrr, X86::VSUBPDZrm, 0 },
+ { X86::VMULPSZrr, X86::VMULPSZrm, 0 },
+ { X86::VMULPDZrr, X86::VMULPDZrm, 0 },
+ { X86::VDIVPSZrr, X86::VDIVPSZrm, 0 },
+ { X86::VDIVPDZrr, X86::VDIVPDZrm, 0 },
+ { X86::VMINPSZrr, X86::VMINPSZrm, 0 },
+ { X86::VMINPDZrr, X86::VMINPDZrm, 0 },
+ { X86::VMAXPSZrr, X86::VMAXPSZrm, 0 },
+ { X86::VMAXPDZrr, X86::VMAXPDZrm, 0 },
+ { X86::VPERMPDZri, X86::VPERMPDZmi, 0 },
+ { X86::VPERMPSZrr, X86::VPERMPSZrm, 0 },
+ { X86::VPSLLVDZrr, X86::VPSLLVDZrm, 0 },
+ { X86::VPSLLVQZrr, X86::VPSLLVQZrm, 0 },
+ { X86::VPSRAVDZrr, X86::VPSRAVDZrm, 0 },
+ { X86::VPSRLVDZrr, X86::VPSRLVDZrm, 0 },
+ { X86::VPSRLVQZrr, X86::VPSRLVQZrm, 0 },
+ { X86::VSHUFPDZrri, X86::VSHUFPDZrmi, 0 },
+ { X86::VSHUFPSZrri, X86::VSHUFPSZrmi, 0 },
+ { X86::VALIGNQrri, X86::VALIGNQrmi, 0 },
+ { X86::VALIGNDrri, X86::VALIGNDrmi, 0 },
+
+ // AES foldable instructions
+ { X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 },
+ { X86::AESDECrr, X86::AESDECrm, TB_ALIGN_16 },
+ { X86::AESENCLASTrr, X86::AESENCLASTrm, TB_ALIGN_16 },
+ { X86::AESENCrr, X86::AESENCrm, TB_ALIGN_16 },
+ { X86::VAESDECLASTrr, X86::VAESDECLASTrm, TB_ALIGN_16 },
+ { X86::VAESDECrr, X86::VAESDECrm, TB_ALIGN_16 },
+ { X86::VAESENCLASTrr, X86::VAESENCLASTrm, TB_ALIGN_16 },
+ { X86::VAESENCrr, X86::VAESENCrm, TB_ALIGN_16 },
+
+ // SHA foldable instructions
+ { X86::SHA1MSG1rr, X86::SHA1MSG1rm, TB_ALIGN_16 },
+ { X86::SHA1MSG2rr, X86::SHA1MSG2rm, TB_ALIGN_16 },
+ { X86::SHA1NEXTErr, X86::SHA1NEXTErm, TB_ALIGN_16 },
+ { X86::SHA1RNDS4rri, X86::SHA1RNDS4rmi, TB_ALIGN_16 },
+ { X86::SHA256MSG1rr, X86::SHA256MSG1rm, TB_ALIGN_16 },
+ { X86::SHA256MSG2rr, X86::SHA256MSG2rm, TB_ALIGN_16 },
+ { X86::SHA256RNDS2rr, X86::SHA256RNDS2rm, TB_ALIGN_16 },
};
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
@@ -1341,6 +1415,11 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4rm, TB_ALIGN_16 },
{ X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4rmY, TB_ALIGN_32 },
{ X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_32 },
+ // AVX-512 VPERMI instructions with 3 source operands.
+ { X86::VPERMI2Drr, X86::VPERMI2Drm, 0 },
+ { X86::VPERMI2Qrr, X86::VPERMI2Qrm, 0 },
+ { X86::VPERMI2PSrr, X86::VPERMI2PSrm, 0 },
+ { X86::VPERMI2PDrr, X86::VPERMI2PDrm, 0 },
};
for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) {
@@ -1381,7 +1460,6 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
case X86::MOVSX32rr8:
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
- case X86::MOVZX64rr8:
if (!TM.getSubtarget<X86Subtarget>().is64Bit())
// It's not always legal to reference the low 8-bit of the larger
// register in 32-bit mode.
@@ -1389,9 +1467,7 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
- case X86::MOVZX64rr16:
- case X86::MOVSX64rr32:
- case X86::MOVZX64rr32: {
+ case X86::MOVSX64rr32: {
if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
// Be conservative.
return false;
@@ -1404,17 +1480,14 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
case X86::MOVSX32rr8:
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
- case X86::MOVZX64rr8:
SubIdx = X86::sub_8bit;
break;
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
- case X86::MOVZX64rr16:
SubIdx = X86::sub_16bit;
break;
case X86::MOVSX64rr32:
- case X86::MOVZX64rr32:
SubIdx = X86::sub_32bit;
break;
}
@@ -1463,6 +1536,8 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::VMOVDQAYrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
+ case X86::VMOVDQA32rm:
+ case X86::VMOVDQA64rm:
return true;
}
}
@@ -1722,37 +1797,16 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const {
- DebugLoc DL = Orig->getDebugLoc();
-
- // MOV32r0 etc. are implemented with xor which clobbers condition code.
- // Re-materialize them as movri instructions to avoid side effects.
- bool Clone = true;
+ // MOV32r0 is implemented with a xor which clobbers condition code.
+ // Re-materialize it as movri instructions to avoid side effects.
unsigned Opc = Orig->getOpcode();
- switch (Opc) {
- default: break;
- case X86::MOV8r0:
- case X86::MOV16r0:
- case X86::MOV32r0:
- case X86::MOV64r0: {
- if (!isSafeToClobberEFLAGS(MBB, I)) {
- switch (Opc) {
- default: llvm_unreachable("Unreachable!");
- case X86::MOV8r0: Opc = X86::MOV8ri; break;
- case X86::MOV16r0: Opc = X86::MOV16ri; break;
- case X86::MOV32r0: Opc = X86::MOV32ri; break;
- case X86::MOV64r0: Opc = X86::MOV64ri64i32; break;
- }
- Clone = false;
- }
- break;
- }
- }
-
- if (Clone) {
+ if (Opc == X86::MOV32r0 && !isSafeToClobberEFLAGS(MBB, I)) {
+ DebugLoc DL = Orig->getDebugLoc();
+ BuildMI(MBB, I, DL, get(X86::MOV32ri)).addOperand(Orig->getOperand(0))
+ .addImm(0);
+ } else {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
MBB.insert(I, MI);
- } else {
- BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0);
}
MachineInstr *NewMI = prior(I);
@@ -1772,6 +1826,98 @@ static bool hasLiveCondCodeDef(MachineInstr *MI) {
return false;
}
+/// getTruncatedShiftCount - check whether the shift count for a machine operand
+/// is non-zero.
+inline static unsigned getTruncatedShiftCount(MachineInstr *MI,
+ unsigned ShiftAmtOperandIdx) {
+ // The shift count is six bits with the REX.W prefix and five bits without.
+ unsigned ShiftCountMask = (MI->getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
+ unsigned Imm = MI->getOperand(ShiftAmtOperandIdx).getImm();
+ return Imm & ShiftCountMask;
+}
+
+/// isTruncatedShiftCountForLEA - check whether the given shift count is appropriate
+/// can be represented by a LEA instruction.
+inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
+ // Left shift instructions can be transformed into load-effective-address
+ // instructions if we can encode them appropriately.
+ // A LEA instruction utilizes a SIB byte to encode it's scale factor.
+ // The SIB.scale field is two bits wide which means that we can encode any
+ // shift amount less than 4.
+ return ShAmt < 4 && ShAmt > 0;
+}
+
+bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
+ unsigned Opc, bool AllowSP,
+ unsigned &NewSrc, bool &isKill, bool &isUndef,
+ MachineOperand &ImplicitOp) const {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ const TargetRegisterClass *RC;
+ if (AllowSP) {
+ RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
+ } else {
+ RC = Opc != X86::LEA32r ?
+ &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
+ }
+ unsigned SrcReg = Src.getReg();
+
+ // For both LEA64 and LEA32 the register already has essentially the right
+ // type (32-bit or 64-bit) we may just need to forbid SP.
+ if (Opc != X86::LEA64_32r) {
+ NewSrc = SrcReg;
+ isKill = Src.isKill();
+ isUndef = Src.isUndef();
+
+ if (TargetRegisterInfo::isVirtualRegister(NewSrc) &&
+ !MF.getRegInfo().constrainRegClass(NewSrc, RC))
+ return false;
+
+ return true;
+ }
+
+ // This is for an LEA64_32r and incoming registers are 32-bit. One way or
+ // another we need to add 64-bit registers to the final MI.
+ if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ ImplicitOp = Src;
+ ImplicitOp.setImplicit();
+
+ NewSrc = getX86SubSuperRegister(Src.getReg(), MVT::i64);
+ MachineBasicBlock::LivenessQueryResult LQR =
+ MI->getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI);
+
+ switch (LQR) {
+ case MachineBasicBlock::LQR_Unknown:
+ // We can't give sane liveness flags to the instruction, abandon LEA
+ // formation.
+ return false;
+ case MachineBasicBlock::LQR_Live:
+ isKill = MI->killsRegister(SrcReg);
+ isUndef = false;
+ break;
+ default:
+ // The physreg itself is dead, so we have to use it as an <undef>.
+ isKill = false;
+ isUndef = true;
+ break;
+ }
+ } else {
+ // Virtual register of the wrong class, we have to create a temporary 64-bit
+ // vreg to feed into the LEA.
+ NewSrc = MF.getRegInfo().createVirtualRegister(RC);
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ get(TargetOpcode::COPY))
+ .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
+ .addOperand(Src);
+
+ // Which is obviously going to be dead after we're done with it.
+ isKill = true;
+ isUndef = false;
+ }
+
+ // We've set all the parameters without issue.
+ return true;
+}
+
/// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when
/// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting
/// to a 32-bit superregister and then truncating back down to a 16-bit
@@ -1787,11 +1933,16 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
bool isDead = MI->getOperand(0).isDead();
bool isKill = MI->getOperand(1).isKill();
- unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
- ? X86::LEA64_32r : X86::LEA32r;
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
- unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
+ unsigned Opc, leaInReg;
+ if (TM.getSubtarget<X86Subtarget>().is64Bit()) {
+ Opc = X86::LEA64_32r;
+ leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
+ } else {
+ Opc = X86::LEA32r;
+ leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
+ }
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
@@ -1841,7 +1992,10 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// just a single insert_subreg.
addRegReg(MIB, leaInReg, true, leaInReg, false);
} else {
- leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
+ if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
+ else
+ leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2);
@@ -1891,6 +2045,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const {
MachineInstr *MI = MBBI;
+
+ // The following opcodes also sets the condition code register(s). Only
+ // convert them to equivalent lea if the condition code register def's
+ // are dead!
+ if (hasLiveCondCodeDef(MI))
+ return 0;
+
MachineFunction &MF = *MI->getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
const MachineOperand &Dest = MI->getOperand(0);
@@ -1935,10 +2096,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::SHL64ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
- // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
- // the flags produced by a shift yet, so this is safe.
- unsigned ShAmt = MI->getOperand(2).getImm();
- if (ShAmt == 0 || ShAmt >= 4) return 0;
+ unsigned ShAmt = getTruncatedShiftCount(MI, 2);
+ if (!isTruncatedShiftCountForLEA(ShAmt)) return 0;
// LEA can't handle RSP.
if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
@@ -1953,29 +2112,34 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::SHL32ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
- // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
- // the flags produced by a shift yet, so this is safe.
- unsigned ShAmt = MI->getOperand(2).getImm();
- if (ShAmt == 0 || ShAmt >= 4) return 0;
+ unsigned ShAmt = getTruncatedShiftCount(MI, 2);
+ if (!isTruncatedShiftCountForLEA(ShAmt)) return 0;
+
+ unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
// LEA can't handle ESP.
- if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
- !MF.getRegInfo().constrainRegClass(Src.getReg(),
- &X86::GR32_NOSPRegClass))
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
+ SrcReg, isKill, isUndef, ImplicitOp))
return 0;
- unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addOperand(Dest)
- .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
+ .addReg(0).addImm(1 << ShAmt)
+ .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
+ .addImm(0).addReg(0);
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
+ NewMI = MIB;
+
break;
}
case X86::SHL16ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
- // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
- // the flags produced by a shift yet, so this is safe.
- unsigned ShAmt = MI->getOperand(2).getImm();
- if (ShAmt == 0 || ShAmt >= 4) return 0;
+ unsigned ShAmt = getTruncatedShiftCount(MI, 2);
+ if (!isTruncatedShiftCountForLEA(ShAmt)) return 0;
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
@@ -1985,11 +2149,6 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
break;
}
default: {
- // The following opcodes also sets the condition code register(s). Only
- // convert them to equivalent lea if the condition code register def's
- // are dead!
- if (hasLiveCondCodeDef(MI))
- return 0;
switch (MIOpc) {
default: return 0;
@@ -1999,17 +2158,20 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- const TargetRegisterClass *RC = MIOpc == X86::INC64r ?
- (const TargetRegisterClass*)&X86::GR64_NOSPRegClass :
- (const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
-
- // LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
- !MF.getRegInfo().constrainRegClass(Src.getReg(), RC))
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
+ SrcReg, isKill, isUndef, ImplicitOp))
return 0;
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest).addOperand(Src), 1);
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
+
+ NewMI = addOffset(MIB, 1);
break;
}
case X86::INC16r:
@@ -2026,16 +2188,22 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- const TargetRegisterClass *RC = MIOpc == X86::DEC64r ?
- (const TargetRegisterClass*)&X86::GR64_NOSPRegClass :
- (const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
- // LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
- !MF.getRegInfo().constrainRegClass(Src.getReg(), RC))
+
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
+ SrcReg, isKill, isUndef, ImplicitOp))
return 0;
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest).addOperand(Src), -1);
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
+
+ NewMI = addOffset(MIB, -1);
+
break;
}
case X86::DEC16r:
@@ -2052,36 +2220,41 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD32rr_DB: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc;
- const TargetRegisterClass *RC;
- if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
+ if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
Opc = X86::LEA64r;
- RC = &X86::GR64_NOSPRegClass;
- } else {
+ else
Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- RC = &X86::GR32_NOSPRegClass;
- }
-
- unsigned Src2 = MI->getOperand(2).getReg();
- bool isKill2 = MI->getOperand(2).isKill();
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
+ SrcReg, isKill, isUndef, ImplicitOp))
+ return 0;
- // LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src2) &&
- !MF.getRegInfo().constrainRegClass(Src2, RC))
+ const MachineOperand &Src2 = MI->getOperand(2);
+ bool isKill2, isUndef2;
+ unsigned SrcReg2;
+ MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
+ SrcReg2, isKill2, isUndef2, ImplicitOp2))
return 0;
- NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest),
- Src.getReg(), Src.isKill(), Src2, isKill2);
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest);
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
+ if (ImplicitOp2.getReg() != 0)
+ MIB.addOperand(ImplicitOp2);
+
+ NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
// Preserve undefness of the operands.
- bool isUndef = MI->getOperand(1).isUndef();
- bool isUndef2 = MI->getOperand(2).isUndef();
NewMI->getOperand(1).setIsUndef(isUndef);
NewMI->getOperand(3).setIsUndef(isUndef2);
- if (LV && isKill2)
- LV->replaceKillInstruction(Src2, MI, NewMI);
+ if (LV && Src2.isKill())
+ LV->replaceKillInstruction(SrcReg2, MI, NewMI);
break;
}
case X86::ADD16rr:
@@ -2120,9 +2293,21 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD32ri8_DB: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest).addOperand(Src),
- MI->getOperand(2).getImm());
+
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
+ SrcReg, isKill, isUndef, ImplicitOp))
+ return 0;
+
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
+
+ NewMI = addOffset(MIB, MI->getOperand(2).getImm());
break;
}
case X86::ADD16ri:
@@ -2789,23 +2974,29 @@ static bool isHReg(unsigned Reg) {
// Try and copy between VR128/VR64 and GR64 registers.
static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
- bool HasAVX) {
+ const X86Subtarget& Subtarget) {
+
+
// SrcReg(VR128) -> DestReg(GR64)
// SrcReg(VR64) -> DestReg(GR64)
// SrcReg(GR64) -> DestReg(VR128)
// SrcReg(GR64) -> DestReg(VR64)
+ bool HasAVX = Subtarget.hasAVX();
+ bool HasAVX512 = Subtarget.hasAVX512();
if (X86::GR64RegClass.contains(DestReg)) {
- if (X86::VR128RegClass.contains(SrcReg))
+ if (X86::VR128XRegClass.contains(SrcReg))
// Copy from a VR128 register to a GR64 register.
- return HasAVX ? X86::VMOVPQIto64rr : X86::MOVPQIto64rr;
+ return HasAVX512 ? X86::VMOVPQIto64Zrr: (HasAVX ? X86::VMOVPQIto64rr :
+ X86::MOVPQIto64rr);
if (X86::VR64RegClass.contains(SrcReg))
// Copy from a VR64 register to a GR64 register.
return X86::MOVSDto64rr;
} else if (X86::GR64RegClass.contains(SrcReg)) {
// Copy from a GR64 register to a VR128 register.
- if (X86::VR128RegClass.contains(DestReg))
- return HasAVX ? X86::VMOV64toPQIrr : X86::MOV64toPQIrr;
+ if (X86::VR128XRegClass.contains(DestReg))
+ return HasAVX512 ? X86::VMOV64toPQIZrr: (HasAVX ? X86::VMOV64toPQIrr :
+ X86::MOV64toPQIrr);
// Copy from a GR64 register to a VR64 register.
if (X86::VR64RegClass.contains(DestReg))
return X86::MOV64toSDrr;
@@ -2814,14 +3005,30 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
// SrcReg(FR32) -> DestReg(GR32)
// SrcReg(GR32) -> DestReg(FR32)
- if (X86::GR32RegClass.contains(DestReg) && X86::FR32RegClass.contains(SrcReg))
+ if (X86::GR32RegClass.contains(DestReg) && X86::FR32XRegClass.contains(SrcReg))
// Copy from a FR32 register to a GR32 register.
- return HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr;
+ return HasAVX512 ? X86::VMOVSS2DIZrr : (HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr);
- if (X86::FR32RegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg))
+ if (X86::FR32XRegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg))
// Copy from a GR32 register to a FR32 register.
- return HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr;
+ return HasAVX512 ? X86::VMOVDI2SSZrr : (HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr);
+ return 0;
+}
+static
+unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg) {
+ if (X86::VR128XRegClass.contains(DestReg, SrcReg) ||
+ X86::VR256XRegClass.contains(DestReg, SrcReg) ||
+ X86::VR512RegClass.contains(DestReg, SrcReg)) {
+ DestReg = get512BitSuperRegister(DestReg);
+ SrcReg = get512BitSuperRegister(SrcReg);
+ return X86::VMOVAPSZrr;
+ }
+ if ((X86::VK8RegClass.contains(DestReg) ||
+ X86::VK16RegClass.contains(DestReg)) &&
+ (X86::VK8RegClass.contains(SrcReg) ||
+ X86::VK16RegClass.contains(SrcReg)))
+ return X86::KMOVWkk;
return 0;
}
@@ -2831,7 +3038,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
// First deal with the normal symmetric copies.
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
- unsigned Opc;
+ bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
+ unsigned Opc = 0;
if (X86::GR64RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV64rr;
else if (X86::GR32RegClass.contains(DestReg, SrcReg))
@@ -2849,14 +3057,17 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
"8-bit H register can not be copied outside GR8_NOREX");
} else
Opc = X86::MOV8rr;
- } else if (X86::VR128RegClass.contains(DestReg, SrcReg))
+ }
+ else if (X86::VR64RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MMX_MOVQ64rr;
+ else if (HasAVX512)
+ Opc = copyPhysRegOpcode_AVX512(DestReg, SrcReg);
+ else if (X86::VR128RegClass.contains(DestReg, SrcReg))
Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
else if (X86::VR256RegClass.contains(DestReg, SrcReg))
Opc = X86::VMOVAPSYrr;
- else if (X86::VR64RegClass.contains(DestReg, SrcReg))
- Opc = X86::MMX_MOVQ64rr;
- else
- Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, HasAVX);
+ if (!Opc)
+ Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, TM.getSubtarget<X86Subtarget>());
if (Opc) {
BuildMI(MBB, MI, DL, get(Opc), DestReg)
@@ -2904,6 +3115,18 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
bool isStackAligned,
const TargetMachine &TM,
bool load) {
+ if (TM.getSubtarget<X86Subtarget>().hasAVX512()) {
+ if (X86::VK8RegClass.hasSubClassEq(RC) ||
+ X86::VK16RegClass.hasSubClassEq(RC))
+ return load ? X86::KMOVWkm : X86::KMOVWmk;
+ if (RC->getSize() == 4 && X86::FR32XRegClass.hasSubClassEq(RC))
+ return load ? X86::VMOVSSZrm : X86::VMOVSSZmr;
+ if (RC->getSize() == 8 && X86::FR64XRegClass.hasSubClassEq(RC))
+ return load ? X86::VMOVSDZrm : X86::VMOVSDZmr;
+ if (X86::VR512RegClass.hasSubClassEq(RC))
+ return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
+ }
+
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
switch (RC->getSize()) {
default:
@@ -2945,7 +3168,8 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
return load ? X86::LD_Fp80m : X86::ST_FpP80m;
case 16: {
- assert(X86::VR128RegClass.hasSubClassEq(RC) && "Unknown 16-byte regclass");
+ assert((X86::VR128RegClass.hasSubClassEq(RC) ||
+ X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass");
// If stack is realigned we can use aligned stores.
if (isStackAligned)
return load ?
@@ -2957,12 +3181,19 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
(HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
}
case 32:
- assert(X86::VR256RegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
+ assert((X86::VR256RegClass.hasSubClassEq(RC) ||
+ X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass");
// If stack is realigned we can use aligned stores.
if (isStackAligned)
return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
else
return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
+ case 64:
+ assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
+ if (isStackAligned)
+ return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
+ else
+ return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
}
}
@@ -2989,7 +3220,7 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
const MachineFunction &MF = *MBB.getParent();
assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
"Stack slot too small for store");
- unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
+ unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
@@ -3005,7 +3236,7 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
+ unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
@@ -3025,7 +3256,7 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
- unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
+ unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
@@ -3039,7 +3270,7 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
+ unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
@@ -3171,6 +3402,25 @@ inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg,
inline static bool isDefConvertible(MachineInstr *MI) {
switch (MI->getOpcode()) {
default: return false;
+
+ // The shift instructions only modify ZF if their shift count is non-zero.
+ // N.B.: The processor truncates the shift count depending on the encoding.
+ case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri:
+ case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri:
+ return getTruncatedShiftCount(MI, 2) != 0;
+
+ // Some left shift instructions can be turned into LEA instructions but only
+ // if their flags aren't used. Avoid transforming such instructions.
+ case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{
+ unsigned ShAmt = getTruncatedShiftCount(MI, 2);
+ if (isTruncatedShiftCountForLEA(ShAmt)) return false;
+ return ShAmt != 0;
+ }
+
+ case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
+ case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
+ return getTruncatedShiftCount(MI, 3) != 0;
+
case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
@@ -3200,8 +3450,37 @@ inline static bool isDefConvertible(MachineInstr *MI) {
case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
+ case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
+ case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1:
+ case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1:
+ case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1:
+ case X86::ADC32ri: case X86::ADC32ri8:
+ case X86::ADC32rr: case X86::ADC64ri32:
+ case X86::ADC64ri8: case X86::ADC64rr:
+ case X86::SBB32ri: case X86::SBB32ri8:
+ case X86::SBB32rr: case X86::SBB64ri32:
+ case X86::SBB64ri8: case X86::SBB64rr:
case X86::ANDN32rr: case X86::ANDN32rm:
case X86::ANDN64rr: case X86::ANDN64rm:
+ case X86::BEXTR32rr: case X86::BEXTR64rr:
+ case X86::BEXTR32rm: case X86::BEXTR64rm:
+ case X86::BLSI32rr: case X86::BLSI32rm:
+ case X86::BLSI64rr: case X86::BLSI64rm:
+ case X86::BLSMSK32rr:case X86::BLSMSK32rm:
+ case X86::BLSMSK64rr:case X86::BLSMSK64rm:
+ case X86::BLSR32rr: case X86::BLSR32rm:
+ case X86::BLSR64rr: case X86::BLSR64rm:
+ case X86::BZHI32rr: case X86::BZHI32rm:
+ case X86::BZHI64rr: case X86::BZHI64rm:
+ case X86::LZCNT16rr: case X86::LZCNT16rm:
+ case X86::LZCNT32rr: case X86::LZCNT32rm:
+ case X86::LZCNT64rr: case X86::LZCNT64rm:
+ case X86::POPCNT16rr:case X86::POPCNT16rm:
+ case X86::POPCNT32rr:case X86::POPCNT32rm:
+ case X86::POPCNT64rr:case X86::POPCNT64rm:
+ case X86::TZCNT16rr: case X86::TZCNT16rm:
+ case X86::TZCNT32rr: case X86::TZCNT32rm:
+ case X86::TZCNT64rr: case X86::TZCNT64rm:
return true;
}
}
@@ -3308,10 +3587,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// They are safe to move up, if the definition to EFLAGS is dead and
// earlier instructions do not read or write EFLAGS.
- if (!Movr0Inst && (Instr->getOpcode() == X86::MOV8r0 ||
- Instr->getOpcode() == X86::MOV16r0 ||
- Instr->getOpcode() == X86::MOV32r0 ||
- Instr->getOpcode() == X86::MOV64r0) &&
+ if (!Movr0Inst && Instr->getOpcode() == X86::MOV32r0 &&
Instr->registerDefIsDead(X86::EFLAGS, TRI)) {
Movr0Inst = Instr;
continue;
@@ -3420,20 +3696,38 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// The instruction to be updated is either Sub or MI.
Sub = IsCmpZero ? MI : Sub;
- // Move Movr0Inst to the place right before Sub.
+ // Move Movr0Inst to the appropriate place before Sub.
if (Movr0Inst) {
- Sub->getParent()->remove(Movr0Inst);
- Sub->getParent()->insert(MachineBasicBlock::iterator(Sub), Movr0Inst);
+ // Look backwards until we find a def that doesn't use the current EFLAGS.
+ Def = Sub;
+ MachineBasicBlock::reverse_iterator
+ InsertI = MachineBasicBlock::reverse_iterator(++Def),
+ InsertE = Sub->getParent()->rend();
+ for (; InsertI != InsertE; ++InsertI) {
+ MachineInstr *Instr = &*InsertI;
+ if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
+ Instr->modifiesRegister(X86::EFLAGS, TRI)) {
+ Sub->getParent()->remove(Movr0Inst);
+ Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
+ Movr0Inst);
+ break;
+ }
+ }
+ if (InsertI == InsertE)
+ return false;
}
// Make sure Sub instruction defines EFLAGS and mark the def live.
- unsigned LastOperand = Sub->getNumOperands() - 1;
- assert(Sub->getNumOperands() >= 2 &&
- Sub->getOperand(LastOperand).isReg() &&
- Sub->getOperand(LastOperand).getReg() == X86::EFLAGS &&
- "EFLAGS should be the last operand of SUB, ADD, OR, XOR, AND");
- Sub->getOperand(LastOperand).setIsDef(true);
- Sub->getOperand(LastOperand).setIsDead(false);
+ unsigned i = 0, e = Sub->getNumOperands();
+ for (; i != e; ++i) {
+ MachineOperand &MO = Sub->getOperand(i);
+ if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) {
+ MO.setIsDead(false);
+ break;
+ }
+ }
+ assert(i != e && "Unable to locate a def EFLAGS operand");
+
CmpInstr->eraseFromParent();
// Modify the condition code of instructions in OpsToUpdate.
@@ -3558,6 +3852,8 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
case X86::AVX_SET0:
assert(HasAVX && "AVX not supported");
return Expand2AddrUndef(MIB, get(X86::VXORPSYrr));
+ case X86::AVX512_512_SET0:
+ return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
case X86::V_SETALLONES:
return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
case X86::AVX2_SETALLONES:
@@ -3565,23 +3861,13 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
case X86::TEST8ri_NOREX:
MI->setDesc(get(X86::TEST8ri));
return true;
+ case X86::KSET0W: return Expand2AddrUndef(MIB, get(X86::KXORWrr));
+ case X86::KSET1B:
+ case X86::KSET1W: return Expand2AddrUndef(MIB, get(X86::KXNORWrr));
}
return false;
}
-MachineInstr*
-X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx, uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const {
- X86AddressMode AM;
- AM.BaseType = X86AddressMode::FrameIndexBase;
- AM.Base.FrameIndex = FrameIx;
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(X86::DBG_VALUE));
- addFullAddress(MIB, AM).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
const SmallVectorImpl<MachineOperand> &MOs,
MachineInstr *MI,
@@ -3686,18 +3972,11 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
} else if (i == 0) { // If operand 0
- unsigned Opc = 0;
- switch (MI->getOpcode()) {
- default: break;
- case X86::MOV64r0: Opc = X86::MOV64mi32; break;
- case X86::MOV32r0: Opc = X86::MOV32mi; break;
- case X86::MOV16r0: Opc = X86::MOV16mi; break;
- case X86::MOV8r0: Opc = X86::MOV8mi; break;
+ if (MI->getOpcode() == X86::MOV32r0) {
+ NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
+ if (NewMI)
+ return NewMI;
}
- if (Opc)
- NewMI = MakeM0Inst(*this, Opc, MOs, MI);
- if (NewMI)
- return NewMI;
OpcodeTablePtr = &RegOp2MemOpTable0;
} else if (i == 1) {
@@ -3798,18 +4077,6 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
case X86::RSQRTSSr_Int:
case X86::SQRTSSr:
case X86::SQRTSSr_Int:
- // AVX encoded versions
- case X86::VCVTSD2SSrr:
- case X86::Int_VCVTSD2SSrr:
- case X86::VCVTSS2SDrr:
- case X86::Int_VCVTSS2SDrr:
- case X86::VRCPSSr:
- case X86::VROUNDSDr:
- case X86::VROUNDSDr_Int:
- case X86::VROUNDSSr:
- case X86::VROUNDSSr_Int:
- case X86::VRSQRTSSr:
- case X86::VSQRTSSr:
return true;
}
@@ -3841,10 +4108,77 @@ getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
return 16;
}
+// Return true for any instruction the copies the high bits of the first source
+// operand into the unused high bits of the destination operand.
+static bool hasUndefRegUpdate(unsigned Opcode) {
+ switch (Opcode) {
+ case X86::VCVTSI2SSrr:
+ case X86::Int_VCVTSI2SSrr:
+ case X86::VCVTSI2SS64rr:
+ case X86::Int_VCVTSI2SS64rr:
+ case X86::VCVTSI2SDrr:
+ case X86::Int_VCVTSI2SDrr:
+ case X86::VCVTSI2SD64rr:
+ case X86::Int_VCVTSI2SD64rr:
+ case X86::VCVTSD2SSrr:
+ case X86::Int_VCVTSD2SSrr:
+ case X86::VCVTSS2SDrr:
+ case X86::Int_VCVTSS2SDrr:
+ case X86::VRCPSSr:
+ case X86::VROUNDSDr:
+ case X86::VROUNDSDr_Int:
+ case X86::VROUNDSSr:
+ case X86::VROUNDSSr_Int:
+ case X86::VRSQRTSSr:
+ case X86::VSQRTSSr:
+
+ // AVX-512
+ case X86::VCVTSD2SSZrr:
+ case X86::VCVTSS2SDZrr:
+ return true;
+ }
+
+ return false;
+}
+
+/// Inform the ExeDepsFix pass how many idle instructions we would like before
+/// certain undef register reads.
+///
+/// This catches the VCVTSI2SD family of instructions:
+///
+/// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
+///
+/// We should to be careful *not* to catch VXOR idioms which are presumably
+/// handled specially in the pipeline:
+///
+/// vxorps %xmm1<undef>, %xmm1<undef>, %xmm1
+///
+/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
+/// high bits that are passed-through are not live.
+unsigned X86InstrInfo::
+getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
+ const TargetRegisterInfo *TRI) const {
+ if (!hasUndefRegUpdate(MI->getOpcode()))
+ return 0;
+
+ // Set the OpNum parameter to the first source operand.
+ OpNum = 1;
+
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ // Use the same magic number as getPartialRegUpdateClearance.
+ return 16;
+ }
+ return 0;
+}
+
void X86InstrInfo::
breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
unsigned Reg = MI->getOperand(OpNum).getReg();
+ // If MI kills this register, the false dependence is already broken.
+ if (MI->killsRegister(Reg, TRI))
+ return;
if (X86::VR128RegClass.contains(Reg)) {
// These instructions are all floating point domain, so xorps is the best
// choice.
@@ -3864,10 +4198,75 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
MI->addRegisterKilled(Reg, TRI, true);
}
-MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
+static MachineInstr* foldPatchpoint(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex,
+ const TargetInstrInfo &TII) {
+ unsigned StartIdx = 0;
+ switch (MI->getOpcode()) {
+ case TargetOpcode::STACKMAP:
+ StartIdx = 2; // Skip ID, nShadowBytes.
+ break;
+ case TargetOpcode::PATCHPOINT: {
+ // For PatchPoint, the call args are not foldable.
+ PatchPointOpers opers(MI);
+ StartIdx = opers.getVarIdx();
+ break;
+ }
+ default:
+ llvm_unreachable("unexpected stackmap opcode");
+ }
+
+ // Return false if any operands requested for folding are not foldable (not
+ // part of the stackmap's live values).
+ for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
+ I != E; ++I) {
+ if (*I < StartIdx)
+ return 0;
+ }
+
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
+ MachineInstrBuilder MIB(MF, NewMI);
+
+ // No need to fold return, the meta data, and function arguments
+ for (unsigned i = 0; i < StartIdx; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
+ assert(MO.getReg() && "patchpoint can only fold a vreg operand");
+ // Compute the spill slot size and offset.
+ const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(MO.getReg());
+ unsigned SpillSize;
+ unsigned SpillOffset;
+ bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
+ SpillOffset, &MF.getTarget());
+ if (!Valid)
+ report_fatal_error("cannot spill patchpoint subregister operand");
+
+ MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
+ MIB.addOperand(MachineOperand::CreateImm(SpillSize));
+ MIB.addOperand(MachineOperand::CreateFI(FrameIndex));
+ addOffset(MIB, SpillOffset);
+ }
+ else
+ MIB.addOperand(MO);
+ }
+ return NewMI;
+}
+
+MachineInstr*
+X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+ // Special case stack map and patch point intrinsics.
+ if (MI->getOpcode() == TargetOpcode::STACKMAP
+ || MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ return foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
+ }
// Check switch flag
if (NoFusing) return NULL;
@@ -3881,6 +4280,10 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Size = MFI->getObjectSize(FrameIndex);
unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
+ // If the function stack isn't realigned we don't want to fold instructions
+ // that need increased alignment.
+ if (!RI.needsStackRealignment(MF))
+ Alignment = std::min(Alignment, TM.getFrameLowering()->getStackAlignment());
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
unsigned RCSize = 0;
@@ -3910,6 +4313,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const {
+ // If loading from a FrameIndex, fold directly from the FrameIndex.
+ unsigned NumOps = LoadMI->getDesc().getNumOperands();
+ int FrameIndex;
+ if (isLoadFromStackSlot(LoadMI, FrameIndex))
+ return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
+
// Check switch flag
if (NoFusing) return NULL;
@@ -4035,7 +4444,6 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return NULL;
// Folding a normal load. Just copy the load's address operands.
- unsigned NumOps = LoadMI->getDesc().getNumOperands();
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
MOs.push_back(LoadMI->getOperand(i));
break;
@@ -4083,13 +4491,9 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
} else if (OpNum == 0) { // If operand 0
- switch (Opc) {
- case X86::MOV8r0:
- case X86::MOV16r0:
- case X86::MOV32r0:
- case X86::MOV64r0: return true;
- default: break;
- }
+ if (Opc == X86::MOV32r0)
+ return true;
+
OpcodeTablePtr = &RegOp2MemOpTable0;
} else if (OpNum == 1) {
OpcodeTablePtr = &RegOp2MemOpTable1;
@@ -4250,7 +4654,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
std::vector<SDValue> AfterOps;
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-1; ++i) {
SDValue Op = N->getOperand(i);
@@ -4507,6 +4911,167 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
return true;
}
+bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
+ MachineInstr *Second) const {
+ // Check if this processor supports macro-fusion. Since this is a minor
+ // heuristic, we haven't specifically reserved a feature. hasAVX is a decent
+ // proxy for SandyBridge+.
+ if (!TM.getSubtarget<X86Subtarget>().hasAVX())
+ return false;
+
+ enum {
+ FuseTest,
+ FuseCmp,
+ FuseInc
+ } FuseKind;
+
+ switch(Second->getOpcode()) {
+ default:
+ return false;
+ case X86::JE_4:
+ case X86::JNE_4:
+ case X86::JL_4:
+ case X86::JLE_4:
+ case X86::JG_4:
+ case X86::JGE_4:
+ FuseKind = FuseInc;
+ break;
+ case X86::JB_4:
+ case X86::JBE_4:
+ case X86::JA_4:
+ case X86::JAE_4:
+ FuseKind = FuseCmp;
+ break;
+ case X86::JS_4:
+ case X86::JNS_4:
+ case X86::JP_4:
+ case X86::JNP_4:
+ case X86::JO_4:
+ case X86::JNO_4:
+ FuseKind = FuseTest;
+ break;
+ }
+ switch (First->getOpcode()) {
+ default:
+ return false;
+ case X86::TEST8rr:
+ case X86::TEST16rr:
+ case X86::TEST32rr:
+ case X86::TEST64rr:
+ case X86::TEST8ri:
+ case X86::TEST16ri:
+ case X86::TEST32ri:
+ case X86::TEST32i32:
+ case X86::TEST64i32:
+ case X86::TEST64ri32:
+ case X86::TEST8rm:
+ case X86::TEST16rm:
+ case X86::TEST32rm:
+ case X86::TEST64rm:
+ case X86::AND16i16:
+ case X86::AND16ri:
+ case X86::AND16ri8:
+ case X86::AND16rm:
+ case X86::AND16rr:
+ case X86::AND32i32:
+ case X86::AND32ri:
+ case X86::AND32ri8:
+ case X86::AND32rm:
+ case X86::AND32rr:
+ case X86::AND64i32:
+ case X86::AND64ri32:
+ case X86::AND64ri8:
+ case X86::AND64rm:
+ case X86::AND64rr:
+ case X86::AND8i8:
+ case X86::AND8ri:
+ case X86::AND8rm:
+ case X86::AND8rr:
+ return true;
+ case X86::CMP16i16:
+ case X86::CMP16ri:
+ case X86::CMP16ri8:
+ case X86::CMP16rm:
+ case X86::CMP16rr:
+ case X86::CMP32i32:
+ case X86::CMP32ri:
+ case X86::CMP32ri8:
+ case X86::CMP32rm:
+ case X86::CMP32rr:
+ case X86::CMP64i32:
+ case X86::CMP64ri32:
+ case X86::CMP64ri8:
+ case X86::CMP64rm:
+ case X86::CMP64rr:
+ case X86::CMP8i8:
+ case X86::CMP8ri:
+ case X86::CMP8rm:
+ case X86::CMP8rr:
+ case X86::ADD16i16:
+ case X86::ADD16ri:
+ case X86::ADD16ri8:
+ case X86::ADD16ri8_DB:
+ case X86::ADD16ri_DB:
+ case X86::ADD16rm:
+ case X86::ADD16rr:
+ case X86::ADD16rr_DB:
+ case X86::ADD32i32:
+ case X86::ADD32ri:
+ case X86::ADD32ri8:
+ case X86::ADD32ri8_DB:
+ case X86::ADD32ri_DB:
+ case X86::ADD32rm:
+ case X86::ADD32rr:
+ case X86::ADD32rr_DB:
+ case X86::ADD64i32:
+ case X86::ADD64ri32:
+ case X86::ADD64ri32_DB:
+ case X86::ADD64ri8:
+ case X86::ADD64ri8_DB:
+ case X86::ADD64rm:
+ case X86::ADD64rr:
+ case X86::ADD64rr_DB:
+ case X86::ADD8i8:
+ case X86::ADD8mi:
+ case X86::ADD8mr:
+ case X86::ADD8ri:
+ case X86::ADD8rm:
+ case X86::ADD8rr:
+ case X86::SUB16i16:
+ case X86::SUB16ri:
+ case X86::SUB16ri8:
+ case X86::SUB16rm:
+ case X86::SUB16rr:
+ case X86::SUB32i32:
+ case X86::SUB32ri:
+ case X86::SUB32ri8:
+ case X86::SUB32rm:
+ case X86::SUB32rr:
+ case X86::SUB64i32:
+ case X86::SUB64ri32:
+ case X86::SUB64ri8:
+ case X86::SUB64rm:
+ case X86::SUB64rr:
+ case X86::SUB8i8:
+ case X86::SUB8ri:
+ case X86::SUB8rm:
+ case X86::SUB8rr:
+ return FuseKind == FuseCmp || FuseKind == FuseInc;
+ case X86::INC16r:
+ case X86::INC32r:
+ case X86::INC64_16r:
+ case X86::INC64_32r:
+ case X86::INC64r:
+ case X86::INC8r:
+ case X86::DEC16r:
+ case X86::DEC32r:
+ case X86::DEC64_16r:
+ case X86::DEC64_32r:
+ case X86::DEC64r:
+ case X86::DEC8r:
+ return FuseKind == FuseInc;
+ }
+}
bool X86InstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
@@ -4700,6 +5265,37 @@ bool X86InstrInfo::isHighLatencyDef(int opc) const {
case X86::VSQRTSSm:
case X86::VSQRTSSm_Int:
case X86::VSQRTSSr:
+ case X86::VSQRTPDZrm:
+ case X86::VSQRTPDZrr:
+ case X86::VSQRTPSZrm:
+ case X86::VSQRTPSZrr:
+ case X86::VSQRTSDZm:
+ case X86::VSQRTSDZm_Int:
+ case X86::VSQRTSDZr:
+ case X86::VSQRTSSZm_Int:
+ case X86::VSQRTSSZr:
+ case X86::VSQRTSSZm:
+ case X86::VDIVSDZrm:
+ case X86::VDIVSDZrr:
+ case X86::VDIVSSZrm:
+ case X86::VDIVSSZrr:
+
+ case X86::VGATHERQPSZrm:
+ case X86::VGATHERQPDZrm:
+ case X86::VGATHERDPDZrm:
+ case X86::VGATHERDPSZrm:
+ case X86::VPGATHERQDZrm:
+ case X86::VPGATHERQQZrm:
+ case X86::VPGATHERDDZrm:
+ case X86::VPGATHERDQZrm:
+ case X86::VSCATTERQPDZmr:
+ case X86::VSCATTERQPSZmr:
+ case X86::VSCATTERDPDZmr:
+ case X86::VSCATTERDPSZmr:
+ case X86::VPSCATTERQDZmr:
+ case X86::VPSCATTERQQZmr:
+ case X86::VPSCATTERDDZmr:
+ case X86::VPSCATTERDQZmr:
return true;
}
}
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index 260f054d69cb..600e3922a71e 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -152,6 +152,8 @@ class X86InstrInfo : public X86GenInstrInfo {
MemOp2RegOpTableType &M2RTable,
unsigned RegOp, unsigned MemOp, unsigned Flags);
+ virtual void anchor();
+
public:
explicit X86InstrInfo(X86TargetMachine &tm);
@@ -192,6 +194,19 @@ public:
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const;
+ /// Given an operand within a MachineInstr, insert preceding code to put it
+ /// into the right format for a particular kind of LEA instruction. This may
+ /// involve using an appropriate super-register instead (with an implicit use
+ /// of the original) or creating a new virtual register and inserting COPY
+ /// instructions to get the data into the right class.
+ ///
+ /// Reference parameters are set to indicate how caller should add this
+ /// operand to the LEA instruction.
+ bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
+ unsigned LEAOpcode, bool AllowSP,
+ unsigned &NewSrc, bool &isKill,
+ bool &isUndef, MachineOperand &ImplicitOp) const;
+
/// convertToThreeAddress - This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
/// may be able to convert a two-address instruction into a true
@@ -262,12 +277,6 @@ public:
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
- virtual
- MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx, uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const;
-
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
/// specified operand(s). If this is possible, the target should perform the
@@ -332,6 +341,9 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const;
+ virtual bool shouldScheduleAdjacent(MachineInstr* First,
+ MachineInstr *Second) const LLVM_OVERRIDE;
+
virtual void getNoopForMachoTarget(MCInst &NopInst) const;
virtual
@@ -359,6 +371,8 @@ public:
unsigned getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const;
+ unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
+ const TargetRegisterInfo *TRI) const;
void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const;
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 3380d8c64ea7..6e5d54349faa 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -248,11 +248,12 @@ def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
-def X86andn_flag : SDNode<"X86ISD::ANDN", SDTBinaryArithWithFlags>;
def X86blsi : SDNode<"X86ISD::BLSI", SDTIntUnaryOp>;
def X86blsmsk : SDNode<"X86ISD::BLSMSK", SDTIntUnaryOp>;
def X86blsr : SDNode<"X86ISD::BLSR", SDTIntUnaryOp>;
+def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntShiftOp>;
+def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
@@ -278,43 +279,52 @@ def ptr_rc_nosp : PointerLikeRegClass<1>;
// *mem - Operand definitions for the funky X86 addressing mode operands.
//
-def X86MemAsmOperand : AsmOperandClass {
- let Name = "Mem"; let PredicateMethod = "isMem";
+def X86MemAsmOperand : AsmOperandClass {
+ let Name = "Mem";
}
-def X86Mem8AsmOperand : AsmOperandClass {
- let Name = "Mem8"; let PredicateMethod = "isMem8";
+def X86Mem8AsmOperand : AsmOperandClass {
+ let Name = "Mem8"; let RenderMethod = "addMemOperands";
}
-def X86Mem16AsmOperand : AsmOperandClass {
- let Name = "Mem16"; let PredicateMethod = "isMem16";
+def X86Mem16AsmOperand : AsmOperandClass {
+ let Name = "Mem16"; let RenderMethod = "addMemOperands";
}
-def X86Mem32AsmOperand : AsmOperandClass {
- let Name = "Mem32"; let PredicateMethod = "isMem32";
+def X86Mem32AsmOperand : AsmOperandClass {
+ let Name = "Mem32"; let RenderMethod = "addMemOperands";
}
-def X86Mem64AsmOperand : AsmOperandClass {
- let Name = "Mem64"; let PredicateMethod = "isMem64";
+def X86Mem64AsmOperand : AsmOperandClass {
+ let Name = "Mem64"; let RenderMethod = "addMemOperands";
}
-def X86Mem80AsmOperand : AsmOperandClass {
- let Name = "Mem80"; let PredicateMethod = "isMem80";
+def X86Mem80AsmOperand : AsmOperandClass {
+ let Name = "Mem80"; let RenderMethod = "addMemOperands";
}
-def X86Mem128AsmOperand : AsmOperandClass {
- let Name = "Mem128"; let PredicateMethod = "isMem128";
+def X86Mem128AsmOperand : AsmOperandClass {
+ let Name = "Mem128"; let RenderMethod = "addMemOperands";
}
-def X86Mem256AsmOperand : AsmOperandClass {
- let Name = "Mem256"; let PredicateMethod = "isMem256";
+def X86Mem256AsmOperand : AsmOperandClass {
+ let Name = "Mem256"; let RenderMethod = "addMemOperands";
+}
+def X86Mem512AsmOperand : AsmOperandClass {
+ let Name = "Mem512"; let RenderMethod = "addMemOperands";
}
// Gather mem operands
def X86MemVX32Operand : AsmOperandClass {
- let Name = "MemVX32"; let PredicateMethod = "isMemVX32";
+ let Name = "MemVX32"; let RenderMethod = "addMemOperands";
}
def X86MemVY32Operand : AsmOperandClass {
- let Name = "MemVY32"; let PredicateMethod = "isMemVY32";
+ let Name = "MemVY32"; let RenderMethod = "addMemOperands";
+}
+def X86MemVZ32Operand : AsmOperandClass {
+ let Name = "MemVZ32"; let RenderMethod = "addMemOperands";
}
def X86MemVX64Operand : AsmOperandClass {
- let Name = "MemVX64"; let PredicateMethod = "isMemVX64";
+ let Name = "MemVX64"; let RenderMethod = "addMemOperands";
}
def X86MemVY64Operand : AsmOperandClass {
- let Name = "MemVY64"; let PredicateMethod = "isMemVY64";
+ let Name = "MemVY64"; let RenderMethod = "addMemOperands";
+}
+def X86MemVZ64Operand : AsmOperandClass {
+ let Name = "MemVZ64"; let RenderMethod = "addMemOperands";
}
def X86AbsMemAsmOperand : AsmOperandClass {
@@ -333,28 +343,36 @@ def opaque48mem : X86MemOperand<"printopaquemem">;
def opaque80mem : X86MemOperand<"printopaquemem">;
def opaque512mem : X86MemOperand<"printopaquemem">;
-def i8mem : X86MemOperand<"printi8mem"> {
+def i8mem : X86MemOperand<"printi8mem"> {
let ParserMatchClass = X86Mem8AsmOperand; }
-def i16mem : X86MemOperand<"printi16mem"> {
+def i16mem : X86MemOperand<"printi16mem"> {
let ParserMatchClass = X86Mem16AsmOperand; }
-def i32mem : X86MemOperand<"printi32mem"> {
+def i32mem : X86MemOperand<"printi32mem"> {
let ParserMatchClass = X86Mem32AsmOperand; }
-def i64mem : X86MemOperand<"printi64mem"> {
+def i64mem : X86MemOperand<"printi64mem"> {
let ParserMatchClass = X86Mem64AsmOperand; }
-def i128mem : X86MemOperand<"printi128mem"> {
+def i128mem : X86MemOperand<"printi128mem"> {
let ParserMatchClass = X86Mem128AsmOperand; }
-def i256mem : X86MemOperand<"printi256mem"> {
+def i256mem : X86MemOperand<"printi256mem"> {
let ParserMatchClass = X86Mem256AsmOperand; }
-def f32mem : X86MemOperand<"printf32mem"> {
+def i512mem : X86MemOperand<"printi512mem"> {
+ let ParserMatchClass = X86Mem512AsmOperand; }
+def f32mem : X86MemOperand<"printf32mem"> {
let ParserMatchClass = X86Mem32AsmOperand; }
-def f64mem : X86MemOperand<"printf64mem"> {
+def f64mem : X86MemOperand<"printf64mem"> {
let ParserMatchClass = X86Mem64AsmOperand; }
-def f80mem : X86MemOperand<"printf80mem"> {
+def f80mem : X86MemOperand<"printf80mem"> {
let ParserMatchClass = X86Mem80AsmOperand; }
-def f128mem : X86MemOperand<"printf128mem"> {
+def f128mem : X86MemOperand<"printf128mem"> {
let ParserMatchClass = X86Mem128AsmOperand; }
-def f256mem : X86MemOperand<"printf256mem">{
+def f256mem : X86MemOperand<"printf256mem">{
let ParserMatchClass = X86Mem256AsmOperand; }
+def f512mem : X86MemOperand<"printf512mem">{
+ let ParserMatchClass = X86Mem512AsmOperand; }
+def v512mem : Operand<iPTR> {
+ let PrintMethod = "printf512mem";
+ let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm);
+ let ParserMatchClass = X86Mem512AsmOperand; }
// Gather mem operands
def vx32mem : X86MemOperand<"printi32mem">{
@@ -369,6 +387,15 @@ def vx64mem : X86MemOperand<"printi64mem">{
def vy64mem : X86MemOperand<"printi64mem">{
let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm);
let ParserMatchClass = X86MemVY64Operand; }
+def vy64xmem : X86MemOperand<"printi64mem">{
+ let MIOperandInfo = (ops ptr_rc, i8imm, VR256X, i32imm, i8imm);
+ let ParserMatchClass = X86MemVY64Operand; }
+def vz32mem : X86MemOperand<"printi32mem">{
+ let MIOperandInfo = (ops ptr_rc, i16imm, VR512, i32imm, i8imm);
+ let ParserMatchClass = X86MemVZ32Operand; }
+def vz64mem : X86MemOperand<"printi64mem">{
+ let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm);
+ let ParserMatchClass = X86MemVZ64Operand; }
}
// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
@@ -412,17 +439,49 @@ let OperandType = "OPERAND_PCREL",
def i32imm_pcrel : Operand<i32>;
def i16imm_pcrel : Operand<i16>;
-def offset8 : Operand<i64>;
-def offset16 : Operand<i64>;
-def offset32 : Operand<i64>;
-def offset64 : Operand<i64>;
-
// Branch targets have OtherVT type and print as pc-relative values.
def brtarget : Operand<OtherVT>;
def brtarget8 : Operand<OtherVT>;
}
+def X86MemOffs8AsmOperand : AsmOperandClass {
+ let Name = "MemOffs8";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem8AsmOperand];
+}
+def X86MemOffs16AsmOperand : AsmOperandClass {
+ let Name = "MemOffs16";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem16AsmOperand];
+}
+def X86MemOffs32AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem32AsmOperand];
+}
+def X86MemOffs64AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem64AsmOperand];
+}
+
+let OperandType = "OPERAND_MEMORY" in {
+def offset8 : Operand<i64> {
+ let ParserMatchClass = X86MemOffs8AsmOperand;
+ let PrintMethod = "printMemOffs8"; }
+def offset16 : Operand<i64> {
+ let ParserMatchClass = X86MemOffs16AsmOperand;
+ let PrintMethod = "printMemOffs16"; }
+def offset32 : Operand<i64> {
+ let ParserMatchClass = X86MemOffs32AsmOperand;
+ let PrintMethod = "printMemOffs32"; }
+def offset64 : Operand<i64> {
+ let ParserMatchClass = X86MemOffs64AsmOperand;
+ let PrintMethod = "printMemOffs64"; }
+}
+
+
def SSECC : Operand<i8> {
let PrintMethod = "printSSECC";
let OperandType = "OPERAND_IMMEDIATE";
@@ -443,6 +502,14 @@ class ImmZExtAsmOperandClass : AsmOperandClass {
let RenderMethod = "addImmOperands";
}
+def X86GR32orGR64AsmOperand : AsmOperandClass {
+ let Name = "GR32orGR64";
+}
+
+def GR32orGR64 : RegisterOperand<GR32> {
+ let ParserMatchClass = X86GR32orGR64AsmOperand;
+}
+
// Sign-extended immediate classes. We don't need to define the full lattice
// here because there is no instruction with an ambiguity between ImmSExti64i32
// and ImmSExti32i8.
@@ -523,8 +590,7 @@ def i64i8imm : Operand<i64> {
def lea64_32mem : Operand<i32> {
let PrintMethod = "printi32mem";
- let AsmOperandLowerMethod = "lower_lea64_32mem";
- let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
+ let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm);
let ParserMatchClass = X86MemAsmOperand;
}
@@ -546,7 +612,7 @@ def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex],
[]>;
// In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
-def lea64_32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
+def lea64_32addr : ComplexPattern<i32, 5, "SelectLEA64_32Addr",
[add, sub, mul, X86mul_imm, shl, or,
frameindex, X86WrapperRIP],
[]>;
@@ -591,13 +657,22 @@ def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
def HasAVX : Predicate<"Subtarget->hasAVX()">;
def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
+def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
+def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
+def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
+def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
+def HasCDI : Predicate<"Subtarget->hasCDI()">;
+def HasPFI : Predicate<"Subtarget->hasPFI()">;
+def HasERI : Predicate<"Subtarget->hasERI()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
def HasFMA : Predicate<"Subtarget->hasFMA()">;
+def UseFMAOnAVX : Predicate<"Subtarget->hasFMA() && !Subtarget->hasAVX512()">;
def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
def HasXOP : Predicate<"Subtarget->hasXOP()">;
+def HasTBM : Predicate<"Subtarget->hasTBM()">;
def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
def HasF16C : Predicate<"Subtarget->hasF16C()">;
@@ -609,9 +684,10 @@ def HasRTM : Predicate<"Subtarget->hasRTM()">;
def HasHLE : Predicate<"Subtarget->hasHLE()">;
def HasTSX : Predicate<"Subtarget->hasRTM() || Subtarget->hasHLE()">;
def HasADX : Predicate<"Subtarget->hasADX()">;
+def HasSHA : Predicate<"Subtarget->hasSHA()">;
def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
-def HasPrefetchW : Predicate<"Subtarget->has3DNow() || Subtarget->hasPRFCHW()">;
+def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
@@ -804,11 +880,11 @@ def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
IIC_POP_REG>;
def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
IIC_POP_REG>, OpSize;
-def POP16rmm: I<0x8F, MRM0m, (outs i16mem:$dst), (ins), "pop{w}\t$dst", [],
+def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", [],
IIC_POP_MEM>, OpSize;
def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
IIC_POP_REG>;
-def POP32rmm: I<0x8F, MRM0m, (outs i32mem:$dst), (ins), "pop{l}\t$dst", [],
+def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", [],
IIC_POP_MEM>;
def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>, OpSize;
@@ -852,7 +928,7 @@ def POP64r : I<0x58, AddRegFrm,
(outs GR64:$reg), (ins), "pop{q}\t$reg", [], IIC_POP_REG>;
def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", [],
IIC_POP_REG>;
-def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", [],
+def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", [],
IIC_POP_MEM>;
} // mayLoad, SchedRW
let mayStore = 1, SchedRW = [WriteStore] in {
@@ -884,12 +960,12 @@ def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>,
let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
mayLoad = 1, neverHasSideEffects = 1, SchedRW = [WriteLoad] in {
-def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l|d}", [], IIC_POP_A>,
+def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", [], IIC_POP_A>,
Requires<[In32BitMode]>;
}
let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in {
-def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l|d}", [], IIC_PUSH_A>,
+def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", [], IIC_PUSH_A>,
Requires<[In32BitMode]>;
}
@@ -910,53 +986,56 @@ let Defs = [EFLAGS] in {
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))],
- IIC_BSF>, TB, OpSize, Sched<[WriteShift]>;
+ IIC_BIT_SCAN_REG>, TB, OpSize, Sched<[WriteShift]>;
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))],
- IIC_BSF>, TB, OpSize, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, TB, OpSize, Sched<[WriteShiftLd]>;
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))], IIC_BSF>, TB,
+ [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))],
+ IIC_BIT_SCAN_REG>, TB,
Sched<[WriteShift]>;
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))],
- IIC_BSF>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))],
- IIC_BSF>, TB, Sched<[WriteShift]>;
+ IIC_BIT_SCAN_REG>, TB, Sched<[WriteShift]>;
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))],
- IIC_BSF>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))], IIC_BSR>,
+ [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))],
+ IIC_BIT_SCAN_REG>,
TB, OpSize, Sched<[WriteShift]>;
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))],
- IIC_BSR>, TB,
+ IIC_BIT_SCAN_MEM>, TB,
OpSize, Sched<[WriteShiftLd]>;
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))], IIC_BSR>, TB,
+ [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))],
+ IIC_BIT_SCAN_REG>, TB,
Sched<[WriteShift]>;
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))],
- IIC_BSR>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BSR>, TB,
+ [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BIT_SCAN_REG>, TB,
Sched<[WriteShift]>;
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))],
- IIC_BSR>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
} // Defs = [EFLAGS]
let SchedRW = [WriteMicrocoded] in {
@@ -1038,44 +1117,67 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
[(store i64immSExt32:$src, addr:$dst)], IIC_MOV_MEM>;
} // SchedRW
+let hasSideEffects = 0 in {
+
/// moffs8, moffs16 and moffs32 versions of moves. The immediate is a
/// 32-bit offset from the PC. These are only valid in x86-32 mode.
let SchedRW = [WriteALU] in {
+let mayLoad = 1 in {
def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src),
- "mov{b}\t{$src, %al|AL, $src}", [], IIC_MOV_MEM>,
+ "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
def MOV16o16a : Ii32 <0xA1, RawFrm, (outs), (ins offset16:$src),
- "mov{w}\t{$src, %ax|AL, $src}", [], IIC_MOV_MEM>, OpSize,
+ "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>, OpSize,
Requires<[In32BitMode]>;
def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src),
- "mov{l}\t{$src, %eax|EAX, $src}", [], IIC_MOV_MEM>,
+ "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
+}
+let mayStore = 1 in {
def MOV8ao8 : Ii32 <0xA2, RawFrm, (outs offset8:$dst), (ins),
- "mov{b}\t{%al, $dst|$dst, AL}", [], IIC_MOV_MEM>,
+ "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins),
- "mov{w}\t{%ax, $dst|$dst, AL}", [], IIC_MOV_MEM>, OpSize,
+ "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>, OpSize,
Requires<[In32BitMode]>;
def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins),
- "mov{l}\t{%eax, $dst|$dst, EAX}", [], IIC_MOV_MEM>,
+ "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
}
+}
-// FIXME: These definitions are utterly broken
-// Just leave them commented out for now because they're useless outside
-// of the large code model, and most compilers won't generate the instructions
-// in question.
-/*
-def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
- "mov{q}\t{$src, %rax|RAX, $src}", []>;
-def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
- "mov{q}\t{$src, %rax|RAX, $src}", []>;
-def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
- "mov{q}\t{%rax, $dst|$dst, RAX}", []>;
-def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
- "mov{q}\t{%rax, $dst|$dst, RAX}", []>;
-*/
-
+// These forms all have full 64-bit absolute addresses in their instructions
+// and use the movabs mnemonic to indicate this specific form.
+let mayLoad = 1 in {
+def MOV64o8a : RIi64_NOREX<0xA0, RawFrm, (outs), (ins offset8:$src),
+ "movabs{b}\t{$src, %al|al, $src}", []>,
+ Requires<[In64BitMode]>;
+def MOV64o16a : RIi64_NOREX<0xA1, RawFrm, (outs), (ins offset16:$src),
+ "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize,
+ Requires<[In64BitMode]>;
+def MOV64o32a : RIi64_NOREX<0xA1, RawFrm, (outs), (ins offset32:$src),
+ "movabs{l}\t{$src, %eax|eax, $src}", []>,
+ Requires<[In64BitMode]>;
+def MOV64o64a : RIi64<0xA1, RawFrm, (outs), (ins offset64:$src),
+ "movabs{q}\t{$src, %rax|rax, $src}", []>,
+ Requires<[In64BitMode]>;
+}
+
+let mayStore = 1 in {
+def MOV64ao8 : RIi64_NOREX<0xA2, RawFrm, (outs offset8:$dst), (ins),
+ "movabs{b}\t{%al, $dst|$dst, al}", []>,
+ Requires<[In64BitMode]>;
+def MOV64ao16 : RIi64_NOREX<0xA3, RawFrm, (outs offset16:$dst), (ins),
+ "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize,
+ Requires<[In64BitMode]>;
+def MOV64ao32 : RIi64_NOREX<0xA3, RawFrm, (outs offset32:$dst), (ins),
+ "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
+ Requires<[In64BitMode]>;
+def MOV64ao64 : RIi64<0xA3, RawFrm, (outs offset64:$dst), (ins),
+ "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
+ Requires<[In64BitMode]>;
+}
+} // hasSideEffects = 0
let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in {
def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
@@ -1127,7 +1229,7 @@ def MOV8rr_NOREX : I<0x88, MRMDestReg,
(outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>,
Sched<[WriteMove]>;
-let mayStore = 1 in
+let mayStore = 1, neverHasSideEffects = 1 in
def MOV8mr_NOREX : I<0x88, MRMDestMem,
(outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
@@ -1408,17 +1510,17 @@ def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
// Swap between EAX and other registers.
def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src),
- "xchg{w}\t{$src, %ax|AX, $src}", [], IIC_XCHG_REG>, OpSize;
+ "xchg{w}\t{$src, %ax|ax, $src}", [], IIC_XCHG_REG>, OpSize;
def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src),
- "xchg{l}\t{$src, %eax|EAX, $src}", [], IIC_XCHG_REG>,
+ "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>,
Requires<[In32BitMode]>;
// Uses GR32_NOAX in 64-bit mode to prevent encoding using the 0x90 NOP encoding.
// xchg %eax, %eax needs to clear upper 32-bits of RAX so is not a NOP.
def XCHG32ar64 : I<0x90, AddRegFrm, (outs), (ins GR32_NOAX:$src),
- "xchg{l}\t{$src, %eax|EAX, $src}", [], IIC_XCHG_REG>,
+ "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>,
Requires<[In64BitMode]>;
def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
- "xchg{q}\t{$src, %rax|RAX, $src}", [], IIC_XCHG_REG>;
+ "xchg{q}\t{$src, %rax|rax, $src}", [], IIC_XCHG_REG>;
} // SchedRW
let SchedRW = [WriteALU] in {
@@ -1768,6 +1870,30 @@ let Predicates = [HasBMI2], Defs = [EFLAGS] in {
int_x86_bmi_bzhi_64, loadi64>, VEX_W;
}
+def : Pat<(X86bzhi GR32:$src1, GR8:$src2),
+ (BZHI32rr GR32:$src1,
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+def : Pat<(X86bzhi (loadi32 addr:$src1), GR8:$src2),
+ (BZHI32rm addr:$src1,
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+def : Pat<(X86bzhi GR64:$src1, GR8:$src2),
+ (BZHI64rr GR64:$src1,
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+def : Pat<(X86bzhi (loadi64 addr:$src1), GR8:$src2),
+ (BZHI64rm addr:$src1,
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+
+let Predicates = [HasBMI] in {
+ def : Pat<(X86bextr GR32:$src1, GR32:$src2),
+ (BEXTR32rr GR32:$src1, GR32:$src2)>;
+ def : Pat<(X86bextr (loadi32 addr:$src1), GR32:$src2),
+ (BEXTR32rm addr:$src1, GR32:$src2)>;
+ def : Pat<(X86bextr GR64:$src1, GR64:$src2),
+ (BEXTR64rr GR64:$src1, GR64:$src2)>;
+ def : Pat<(X86bextr (loadi64 addr:$src1), GR64:$src2),
+ (BEXTR64rm addr:$src1, GR64:$src2)>;
+} // HasBMI
+
multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
X86MemOperand x86memop, Intrinsic Int,
PatFrag ld_frag> {
@@ -1792,6 +1918,134 @@ let Predicates = [HasBMI2] in {
}
//===----------------------------------------------------------------------===//
+// TBM Instructions
+//
+let Predicates = [HasTBM], Defs = [EFLAGS] in {
+
+multiclass tbm_ternary_imm_intr<bits<8> opc, RegisterClass RC, string OpcodeStr,
+ X86MemOperand x86memop, PatFrag ld_frag,
+ Intrinsic Int, Operand immtype,
+ SDPatternOperator immoperator> {
+ def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
+ !strconcat(OpcodeStr,
+ "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
+ [(set RC:$dst, (Int RC:$src1, immoperator:$cntl))]>,
+ XOP, XOPA, VEX;
+ def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
+ (ins x86memop:$src1, immtype:$cntl),
+ !strconcat(OpcodeStr,
+ "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
+ [(set RC:$dst, (Int (ld_frag addr:$src1), immoperator:$cntl))]>,
+ XOP, XOPA, VEX;
+}
+
+defm BEXTRI32 : tbm_ternary_imm_intr<0x10, GR32, "bextr", i32mem, loadi32,
+ int_x86_tbm_bextri_u32, i32imm, imm>;
+defm BEXTRI64 : tbm_ternary_imm_intr<0x10, GR64, "bextr", i64mem, loadi64,
+ int_x86_tbm_bextri_u64, i64i32imm,
+ i64immSExt32>, VEX_W;
+
+multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
+ RegisterClass RC, string OpcodeStr,
+ X86MemOperand x86memop, PatFrag ld_frag> {
+let hasSideEffects = 0 in {
+ def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
+ []>, XOP, XOP9, VEX_4V;
+ let mayLoad = 1 in
+ def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
+ []>, XOP, XOP9, VEX_4V;
+}
+}
+
+multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
+ Format FormReg, Format FormMem> {
+ defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr, i32mem,
+ loadi32>;
+ defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr, i64mem,
+ loadi64>, VEX_W;
+}
+
+defm BLCFILL : tbm_binary_intr<0x01, "blcfill", MRM1r, MRM1m>;
+defm BLCI : tbm_binary_intr<0x02, "blci", MRM6r, MRM6m>;
+defm BLCIC : tbm_binary_intr<0x01, "blcic", MRM5r, MRM5m>;
+defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", MRM1r, MRM1m>;
+defm BLCS : tbm_binary_intr<0x01, "blcs", MRM3r, MRM3m>;
+defm BLSFILL : tbm_binary_intr<0x01, "blsfill", MRM2r, MRM2m>;
+defm BLSIC : tbm_binary_intr<0x01, "blsic", MRM6r, MRM6m>;
+defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", MRM7r, MRM7m>;
+defm TZMSK : tbm_binary_intr<0x01, "tzmsk", MRM4r, MRM4m>;
+} // HasTBM, EFLAGS
+
+//===----------------------------------------------------------------------===//
+// Pattern fragments to auto generate TBM instructions.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasTBM] in {
+ def : Pat<(X86bextr GR32:$src1, (i32 imm:$src2)),
+ (BEXTRI32ri GR32:$src1, imm:$src2)>;
+ def : Pat<(X86bextr (loadi32 addr:$src1), (i32 imm:$src2)),
+ (BEXTRI32mi addr:$src1, imm:$src2)>;
+ def : Pat<(X86bextr GR64:$src1, i64immSExt32:$src2),
+ (BEXTRI64ri GR64:$src1, i64immSExt32:$src2)>;
+ def : Pat<(X86bextr (loadi64 addr:$src1), i64immSExt32:$src2),
+ (BEXTRI64mi addr:$src1, i64immSExt32:$src2)>;
+
+ // FIXME: patterns for the load versions are not implemented
+ def : Pat<(and GR32:$src, (add GR32:$src, 1)),
+ (BLCFILL32rr GR32:$src)>;
+ def : Pat<(and GR64:$src, (add GR64:$src, 1)),
+ (BLCFILL64rr GR64:$src)>;
+
+ def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
+ (BLCI32rr GR32:$src)>;
+ def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
+ (BLCI64rr GR64:$src)>;
+
+ // Extra patterns because opt can optimize the above patterns to this.
+ def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
+ (BLCI32rr GR32:$src)>;
+ def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
+ (BLCI64rr GR64:$src)>;
+
+ def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
+ (BLCIC32rr GR32:$src)>;
+ def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
+ (BLCIC64rr GR64:$src)>;
+
+ def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
+ (BLCMSK32rr GR32:$src)>;
+ def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
+ (BLCMSK64rr GR64:$src)>;
+
+ def : Pat<(or GR32:$src, (add GR32:$src, 1)),
+ (BLCS32rr GR32:$src)>;
+ def : Pat<(or GR64:$src, (add GR64:$src, 1)),
+ (BLCS64rr GR64:$src)>;
+
+ def : Pat<(or GR32:$src, (add GR32:$src, -1)),
+ (BLSFILL32rr GR32:$src)>;
+ def : Pat<(or GR64:$src, (add GR64:$src, -1)),
+ (BLSFILL64rr GR64:$src)>;
+
+ def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
+ (BLSIC32rr GR32:$src)>;
+ def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
+ (BLSIC64rr GR64:$src)>;
+
+ def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
+ (T1MSKC32rr GR32:$src)>;
+ def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
+ (T1MSKC64rr GR64:$src)>;
+
+ def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
+ (TZMSK32rr GR32:$src)>;
+ def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
+ (TZMSK64rr GR64:$src)>;
+} // HasTBM
+
+//===----------------------------------------------------------------------===//
// Subsystems.
//===----------------------------------------------------------------------===//
@@ -1815,6 +2069,7 @@ include "X86InstrXOP.td"
// SSE, MMX and 3DNow! vector support.
include "X86InstrSSE.td"
+include "X86InstrAVX512.td"
include "X86InstrMMX.td"
include "X86Instr3DNow.td"
@@ -1867,6 +2122,9 @@ def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"pushfd", "pushfl", "att">;
+def : MnemonicAlias<"popad", "popa", "intel">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"pushad", "pusha", "intel">, Requires<[In32BitMode]>;
+
def : MnemonicAlias<"repe", "rep", "att">;
def : MnemonicAlias<"repz", "rep", "att">;
def : MnemonicAlias<"repnz", "repne", "att">;
@@ -1919,29 +2177,31 @@ def : MnemonicAlias<"fucomip", "fucompi", "att">;
def : MnemonicAlias<"fwait", "wait", "att">;
-class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond>
+class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
+ string VariantName>
: MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
- !strconcat(Prefix, NewCond, Suffix)>;
+ !strconcat(Prefix, NewCond, Suffix), VariantName>;
/// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
/// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
/// example "setz" -> "sete".
-multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix> {
- def C : CondCodeAlias<Prefix, Suffix, "c", "b">; // setc -> setb
- def Z : CondCodeAlias<Prefix, Suffix, "z" , "e">; // setz -> sete
- def NA : CondCodeAlias<Prefix, Suffix, "na", "be">; // setna -> setbe
- def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae">; // setnb -> setae
- def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae">; // setnc -> setae
- def NG : CondCodeAlias<Prefix, Suffix, "ng", "le">; // setng -> setle
- def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge">; // setnl -> setge
- def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne">; // setnz -> setne
- def PE : CondCodeAlias<Prefix, Suffix, "pe", "p">; // setpe -> setp
- def PO : CondCodeAlias<Prefix, Suffix, "po", "np">; // setpo -> setnp
-
- def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b">; // setnae -> setb
- def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a">; // setnbe -> seta
- def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l">; // setnge -> setl
- def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g">; // setnle -> setg
+multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
+ string V = ""> {
+ def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
+ def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
+ def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
+ def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
+ def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
+ def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
+ def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
+ def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
+ def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
+ def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
+
+ def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
+ def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
+ def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
+ def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
}
// Aliases for set<CC>
@@ -1949,9 +2209,11 @@ defm : IntegerCondCodeMnemonicAlias<"set", "">;
// Aliases for j<CC>
defm : IntegerCondCodeMnemonicAlias<"j", "">;
// Aliases for cmov<CC>{w,l,q}
-defm : IntegerCondCodeMnemonicAlias<"cmov", "w">;
-defm : IntegerCondCodeMnemonicAlias<"cmov", "l">;
-defm : IntegerCondCodeMnemonicAlias<"cmov", "q">;
+defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
+defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
+defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
+// No size suffix for intel-style asm.
+defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
//===----------------------------------------------------------------------===//
@@ -1963,75 +2225,83 @@ def : InstAlias<"aad", (AAD8i8 10)>;
def : InstAlias<"aam", (AAM8i8 10)>;
// Disambiguate the mem/imm form of bt-without-a-suffix as btl.
-def : InstAlias<"bt $imm, $mem", (BT32mi8 i32mem:$mem, i32i8imm:$imm)>;
+// Likewise for btc/btr/bts.
+def : InstAlias<"bt {$imm, $mem|$mem, $imm}",
+ (BT32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
+def : InstAlias<"btc {$imm, $mem|$mem, $imm}",
+ (BTC32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
+def : InstAlias<"btr {$imm, $mem|$mem, $imm}",
+ (BTR32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
+def : InstAlias<"bts {$imm, $mem|$mem, $imm}",
+ (BTS32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
// clr aliases.
-def : InstAlias<"clrb $reg", (XOR8rr GR8 :$reg, GR8 :$reg)>;
-def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg)>;
-def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg)>;
-def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg)>;
+def : InstAlias<"clrb $reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
+def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
+def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
+def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
// div and idiv aliases for explicit A register.
-def : InstAlias<"divb $src, %al", (DIV8r GR8 :$src)>;
-def : InstAlias<"divw $src, %ax", (DIV16r GR16:$src)>;
-def : InstAlias<"divl $src, %eax", (DIV32r GR32:$src)>;
-def : InstAlias<"divq $src, %rax", (DIV64r GR64:$src)>;
-def : InstAlias<"divb $src, %al", (DIV8m i8mem :$src)>;
-def : InstAlias<"divw $src, %ax", (DIV16m i16mem:$src)>;
-def : InstAlias<"divl $src, %eax", (DIV32m i32mem:$src)>;
-def : InstAlias<"divq $src, %rax", (DIV64m i64mem:$src)>;
-def : InstAlias<"idivb $src, %al", (IDIV8r GR8 :$src)>;
-def : InstAlias<"idivw $src, %ax", (IDIV16r GR16:$src)>;
-def : InstAlias<"idivl $src, %eax", (IDIV32r GR32:$src)>;
-def : InstAlias<"idivq $src, %rax", (IDIV64r GR64:$src)>;
-def : InstAlias<"idivb $src, %al", (IDIV8m i8mem :$src)>;
-def : InstAlias<"idivw $src, %ax", (IDIV16m i16mem:$src)>;
-def : InstAlias<"idivl $src, %eax", (IDIV32m i32mem:$src)>;
-def : InstAlias<"idivq $src, %rax", (IDIV64m i64mem:$src)>;
+def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
+def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
+def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
+def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
+def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
+def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
+def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
+def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
+def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
+def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
+def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
+def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
+def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
+def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
+def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
+def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
// Various unary fpstack operations default to operating on on ST1.
// For example, "fxch" -> "fxch %st(1)"
def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
-def : InstAlias<"fsubp", (SUBR_FPrST0 ST1)>;
-def : InstAlias<"fsubrp", (SUB_FPrST0 ST1)>;
-def : InstAlias<"fmulp", (MUL_FPrST0 ST1)>;
-def : InstAlias<"fdivp", (DIVR_FPrST0 ST1)>;
-def : InstAlias<"fdivrp", (DIV_FPrST0 ST1)>;
-def : InstAlias<"fxch", (XCH_F ST1)>;
-def : InstAlias<"fcom", (COM_FST0r ST1)>;
-def : InstAlias<"fcomp", (COMP_FST0r ST1)>;
-def : InstAlias<"fcomi", (COM_FIr ST1)>;
-def : InstAlias<"fcompi", (COM_FIPr ST1)>;
-def : InstAlias<"fucom", (UCOM_Fr ST1)>;
-def : InstAlias<"fucomp", (UCOM_FPr ST1)>;
-def : InstAlias<"fucomi", (UCOM_FIr ST1)>;
-def : InstAlias<"fucompi", (UCOM_FIPr ST1)>;
+def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
+def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
+def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
+def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
+def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
+def : InstAlias<"fxch", (XCH_F ST1), 0>;
+def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
+def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
+def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
+def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
+def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
+def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
+def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
+def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
// Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
// For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
// instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
// gas.
multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
- def : InstAlias<!strconcat(Mnemonic, " $op, %st(0)"),
+ def : InstAlias<!strconcat(Mnemonic, "\t{$op, %st(0)|st(0), $op}"),
(Inst RST:$op), EmitAlias>;
- def : InstAlias<!strconcat(Mnemonic, " %st(0), %st(0)"),
+ def : InstAlias<!strconcat(Mnemonic, "\t{%st(0), %st(0)|st(0), st(0)}"),
(Inst ST0), EmitAlias>;
}
defm : FpUnaryAlias<"fadd", ADD_FST0r>;
defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
defm : FpUnaryAlias<"fsub", SUB_FST0r>;
-defm : FpUnaryAlias<"fsubp", SUBR_FPrST0>;
+defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0>;
defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
-defm : FpUnaryAlias<"fsubrp", SUB_FPrST0>;
+defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0>;
defm : FpUnaryAlias<"fmul", MUL_FST0r>;
defm : FpUnaryAlias<"fmulp", MUL_FPrST0>;
defm : FpUnaryAlias<"fdiv", DIV_FST0r>;
-defm : FpUnaryAlias<"fdivp", DIVR_FPrST0>;
+defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0>;
defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
-defm : FpUnaryAlias<"fdivrp", DIV_FPrST0>;
+defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0>;
defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
defm : FpUnaryAlias<"fcompi", COM_FIPr>;
@@ -2041,16 +2311,16 @@ defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
// Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
// commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
// solely because gas supports it.
-def : InstAlias<"faddp %st(0), $op", (ADD_FPrST0 RST:$op), 0>;
-def : InstAlias<"fmulp %st(0), $op", (MUL_FPrST0 RST:$op)>;
-def : InstAlias<"fsubp %st(0), $op", (SUBR_FPrST0 RST:$op)>;
-def : InstAlias<"fsubrp %st(0), $op", (SUB_FPrST0 RST:$op)>;
-def : InstAlias<"fdivp %st(0), $op", (DIVR_FPrST0 RST:$op)>;
-def : InstAlias<"fdivrp %st(0), $op", (DIV_FPrST0 RST:$op)>;
+def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op), 0>;
+def : InstAlias<"fmulp\t{%st(0), $op|$op, st(0)}", (MUL_FPrST0 RST:$op)>;
+def : InstAlias<"fsub{|r}p\t{%st(0), $op|$op, st(0)}", (SUBR_FPrST0 RST:$op)>;
+def : InstAlias<"fsub{r|}p\t{%st(0), $op|$op, st(0)}", (SUB_FPrST0 RST:$op)>;
+def : InstAlias<"fdiv{|r}p\t{%st(0), $op|$op, st(0)}", (DIVR_FPrST0 RST:$op)>;
+def : InstAlias<"fdiv{r|}p\t{%st(0), $op|$op, st(0)}", (DIV_FPrST0 RST:$op)>;
// We accept "fnstsw %eax" even though it only writes %ax.
-def : InstAlias<"fnstsw %eax", (FNSTSW16r)>;
-def : InstAlias<"fnstsw %al" , (FNSTSW16r)>;
+def : InstAlias<"fnstsw\t{%eax|eax}", (FNSTSW16r)>;
+def : InstAlias<"fnstsw\t{%al|al}" , (FNSTSW16r)>;
def : InstAlias<"fnstsw" , (FNSTSW16r)>;
// lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
@@ -2069,12 +2339,12 @@ def : InstAlias<"imulq $imm, $r",(IMUL64rri32 GR64:$r, GR64:$r,i64i32imm:$imm)>;
def : InstAlias<"imulq $imm, $r", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm)>;
// inb %dx -> inb %al, %dx
-def : InstAlias<"inb %dx", (IN8rr)>;
-def : InstAlias<"inw %dx", (IN16rr)>;
-def : InstAlias<"inl %dx", (IN32rr)>;
-def : InstAlias<"inb $port", (IN8ri i8imm:$port)>;
-def : InstAlias<"inw $port", (IN16ri i8imm:$port)>;
-def : InstAlias<"inl $port", (IN32ri i8imm:$port)>;
+def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
+def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
+def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
+def : InstAlias<"inb\t$port", (IN8ri i8imm:$port), 0>;
+def : InstAlias<"inw\t$port", (IN16ri i8imm:$port), 0>;
+def : InstAlias<"inl\t$port", (IN32ri i8imm:$port), 0>;
// jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
@@ -2102,7 +2372,7 @@ def : InstAlias<"movq $src, $dst",
// movsd with no operands (as opposed to the SSE scalar move of a double) is an
// alias for movsl. (as in rep; movsd)
-def : InstAlias<"movsd", (MOVSD)>;
+def : InstAlias<"movsd", (MOVSD), 0>;
// movsx aliases
def : InstAlias<"movsx $src, $dst", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>;
@@ -2123,12 +2393,12 @@ def : InstAlias<"movzx $src, $dst", (MOVZX64rr16_Q GR64:$dst, GR16:$src), 0>;
// Note: No GR32->GR64 movzx form.
// outb %dx -> outb %al, %dx
-def : InstAlias<"outb %dx", (OUT8rr)>;
-def : InstAlias<"outw %dx", (OUT16rr)>;
-def : InstAlias<"outl %dx", (OUT32rr)>;
-def : InstAlias<"outb $port", (OUT8ir i8imm:$port)>;
-def : InstAlias<"outw $port", (OUT16ir i8imm:$port)>;
-def : InstAlias<"outl $port", (OUT32ir i8imm:$port)>;
+def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
+def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
+def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
+def : InstAlias<"outb\t$port", (OUT8ir i8imm:$port), 0>;
+def : InstAlias<"outw\t$port", (OUT16ir i8imm:$port), 0>;
+def : InstAlias<"outl\t$port", (OUT32ir i8imm:$port), 0>;
// 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
// effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
@@ -2136,19 +2406,19 @@ def : InstAlias<"outl $port", (OUT32ir i8imm:$port)>;
def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem)>;
// shld/shrd op,op -> shld op, op, CL
-def : InstAlias<"shldw $r2, $r1", (SHLD16rrCL GR16:$r1, GR16:$r2)>;
-def : InstAlias<"shldl $r2, $r1", (SHLD32rrCL GR32:$r1, GR32:$r2)>;
-def : InstAlias<"shldq $r2, $r1", (SHLD64rrCL GR64:$r1, GR64:$r2)>;
-def : InstAlias<"shrdw $r2, $r1", (SHRD16rrCL GR16:$r1, GR16:$r2)>;
-def : InstAlias<"shrdl $r2, $r1", (SHRD32rrCL GR32:$r1, GR32:$r2)>;
-def : InstAlias<"shrdq $r2, $r1", (SHRD64rrCL GR64:$r1, GR64:$r2)>;
-
-def : InstAlias<"shldw $reg, $mem", (SHLD16mrCL i16mem:$mem, GR16:$reg)>;
-def : InstAlias<"shldl $reg, $mem", (SHLD32mrCL i32mem:$mem, GR32:$reg)>;
-def : InstAlias<"shldq $reg, $mem", (SHLD64mrCL i64mem:$mem, GR64:$reg)>;
-def : InstAlias<"shrdw $reg, $mem", (SHRD16mrCL i16mem:$mem, GR16:$reg)>;
-def : InstAlias<"shrdl $reg, $mem", (SHRD32mrCL i32mem:$mem, GR32:$reg)>;
-def : InstAlias<"shrdq $reg, $mem", (SHRD64mrCL i64mem:$mem, GR64:$reg)>;
+def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
+def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
+def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
+def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
+def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
+def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
+
+def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
+def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
+def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
+def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
+def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
+def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
/* FIXME: This is disabled because the asm matcher is currently incapable of
* matching a fixed immediate like $1.
@@ -2179,19 +2449,19 @@ defm : ShiftRotateByOneAlias<"ror", "ROR">;
FIXME */
// test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
-def : InstAlias<"testb $val, $mem", (TEST8rm GR8 :$val, i8mem :$mem)>;
-def : InstAlias<"testw $val, $mem", (TEST16rm GR16:$val, i16mem:$mem)>;
-def : InstAlias<"testl $val, $mem", (TEST32rm GR32:$val, i32mem:$mem)>;
-def : InstAlias<"testq $val, $mem", (TEST64rm GR64:$val, i64mem:$mem)>;
+def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}", (TEST8rm GR8 :$val, i8mem :$mem)>;
+def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}", (TEST16rm GR16:$val, i16mem:$mem)>;
+def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}", (TEST32rm GR32:$val, i32mem:$mem)>;
+def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}", (TEST64rm GR64:$val, i64mem:$mem)>;
// xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
-def : InstAlias<"xchgb $mem, $val", (XCHG8rm GR8 :$val, i8mem :$mem)>;
-def : InstAlias<"xchgw $mem, $val", (XCHG16rm GR16:$val, i16mem:$mem)>;
-def : InstAlias<"xchgl $mem, $val", (XCHG32rm GR32:$val, i32mem:$mem)>;
-def : InstAlias<"xchgq $mem, $val", (XCHG64rm GR64:$val, i64mem:$mem)>;
+def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}", (XCHG8rm GR8 :$val, i8mem :$mem)>;
+def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}", (XCHG16rm GR16:$val, i16mem:$mem)>;
+def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}", (XCHG32rm GR32:$val, i32mem:$mem)>;
+def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}", (XCHG64rm GR64:$val, i64mem:$mem)>;
// xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
-def : InstAlias<"xchgw %ax, $src", (XCHG16ar GR16:$src)>;
-def : InstAlias<"xchgl %eax, $src", (XCHG32ar GR32:$src)>, Requires<[In32BitMode]>;
-def : InstAlias<"xchgl %eax, $src", (XCHG32ar64 GR32_NOAX:$src)>, Requires<[In64BitMode]>;
-def : InstAlias<"xchgq %rax, $src", (XCHG64ar GR64:$src)>;
+def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src)>;
+def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src)>, Requires<[In32BitMode]>;
+def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar64 GR32_NOAX:$src)>, Requires<[In64BitMode]>;
+def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src)>;
diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td
index 49721df7c118..ba58143e89ec 100644
--- a/lib/Target/X86/X86InstrMMX.td
+++ b/lib/Target/X86/X86InstrMMX.td
@@ -189,13 +189,14 @@ multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
PatFrag ld_frag, string asm, Domain d> {
- def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst),(ins DstRC:$src1, SrcRC:$src2),
- asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
- NoItinerary, d>;
- def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
- (ins DstRC:$src1, x86memop:$src2), asm,
- [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
- NoItinerary, d>;
+ def irr : MMXPI<opc, MRMSrcReg, (outs DstRC:$dst),
+ (ins DstRC:$src1, SrcRC:$src2), asm,
+ [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
+ NoItinerary, d>;
+ def irm : MMXPI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src2), asm,
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
+ NoItinerary, d>;
}
//===----------------------------------------------------------------------===//
@@ -203,7 +204,7 @@ multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
//===----------------------------------------------------------------------===//
def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms",
- [(int_x86_mmx_emms)]>;
+ [(int_x86_mmx_emms)], IIC_MMX_EMMS>;
//===----------------------------------------------------------------------===//
// MMX Scalar Instructions
@@ -235,10 +236,10 @@ def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR64:$src),
(MMX_X86movd2w (x86mmx VR64:$src)))],
IIC_MMX_MOV_REG_MM>, Sched<[WriteMove]>;
-let neverHasSideEffects = 1 in
def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}",
- [], IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>;
+ [(set VR64:$dst, (bitconvert GR64:$src))],
+ IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>;
// These are 64 bit moves, but since the OS X assembler doesn't
// recognize a register-register movq, we write them as
@@ -249,10 +250,6 @@ def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg,
"movd\t{$src, $dst|$dst, $src}",
[(set GR64:$dst,
(bitconvert VR64:$src))], IIC_MMX_MOV_REG_MM>;
-def MMX_MOVD64rrv164 : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst,
- (bitconvert GR64:$src))], IIC_MMX_MOV_MM_RM>;
let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
"movq\t{$src, $dst|$dst, $src}", [],
@@ -288,7 +285,7 @@ def MMX_MOVQ2DQrr : MMXS2SIi8<0xD6, MRMSrcReg, (outs VR128:$dst),
(i64 (bitconvert (x86mmx VR64:$src))))))],
IIC_MMX_MOVQ_RR>;
-let neverHasSideEffects = 1 in
+let isCodeGenOnly = 1, hasSideEffects = 1 in {
def MMX_MOVQ2FR64rr: MMXS2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst),
(ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}",
[], IIC_MMX_MOVQ_RR>;
@@ -296,6 +293,7 @@ def MMX_MOVQ2FR64rr: MMXS2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst),
def MMX_MOVFR642Qrr: MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
(ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}",
[], IIC_MMX_MOVQ_RR>;
+}
} // SchedRW
def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
@@ -303,21 +301,15 @@ def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
[(int_x86_mmx_movnt_dq addr:$dst, VR64:$src)],
IIC_MMX_MOVQ_RM>, Sched<[WriteStore]>;
-let AddedComplexity = 15 in
-// movd to MMX register zero-extends
-def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst,
- (x86mmx (X86vzmovl (x86mmx (scalar_to_vector GR32:$src)))))],
- IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>;
-let AddedComplexity = 20 in
-def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst),
- (ins i32mem:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst,
- (x86mmx (X86vzmovl (x86mmx
- (scalar_to_vector (loadi32 addr:$src))))))],
- IIC_MMX_MOV_MM_RM>, Sched<[WriteLoad]>;
+let Predicates = [HasMMX] in {
+ let AddedComplexity = 15 in
+ // movd to MMX register zero-extends
+ def : Pat<(x86mmx (X86vzmovl (x86mmx (scalar_to_vector GR32:$src)))),
+ (MMX_MOVD64rr GR32:$src)>;
+ let AddedComplexity = 20 in
+ def : Pat<(x86mmx (X86vzmovl (x86mmx (scalar_to_vector (loadi32 addr:$src))))),
+ (MMX_MOVD64rm addr:$src)>;
+}
// Arithmetic Instructions
defm MMX_PABSB : SS3I_unop_rm_int_mm<0x1C, "pabsb", int_x86_ssse3_pabs_b,
@@ -357,21 +349,21 @@ defm MMX_PHADDSW : SS3I_binop_rm_int_mm<0x03, "phaddsw",int_x86_ssse3_phadd_sw,
defm MMX_PSUBB : MMXI_binop_rm_int<0xF8, "psubb", int_x86_mmx_psub_b,
MMX_INTALU_ITINS>;
defm MMX_PSUBW : MMXI_binop_rm_int<0xF9, "psubw", int_x86_mmx_psub_w,
- MMX_INTALU_ITINS, 1>;
+ MMX_INTALU_ITINS>;
defm MMX_PSUBD : MMXI_binop_rm_int<0xFA, "psubd", int_x86_mmx_psub_d,
- MMX_INTALU_ITINS, 1>;
+ MMX_INTALU_ITINS>;
defm MMX_PSUBQ : MMXI_binop_rm_int<0xFB, "psubq", int_x86_mmx_psub_q,
- MMX_INTALUQ_ITINS, 1>;
+ MMX_INTALUQ_ITINS>;
defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b,
- MMX_INTALU_ITINS, 1>;
+ MMX_INTALU_ITINS>;
defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w,
- MMX_INTALU_ITINS, 1>;
+ MMX_INTALU_ITINS>;
defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b,
- MMX_INTALU_ITINS, 1>;
+ MMX_INTALU_ITINS>;
defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w,
- MMX_INTALU_ITINS, 1>;
+ MMX_INTALU_ITINS>;
defm MMX_PHSUBW : SS3I_binop_rm_int_mm<0x05, "phsubw", int_x86_ssse3_phsub_w,
MMX_PHADDSUBW>;
@@ -554,18 +546,18 @@ let Constraints = "$src1 = $dst" in {
// Extract / Insert
def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg,
- (outs GR32:$dst), (ins VR64:$src1, i32i8imm:$src2),
- "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (int_x86_mmx_pextr_w VR64:$src1,
- (iPTR imm:$src2)))],
- IIC_MMX_PEXTR>, Sched<[WriteShuffle]>;
+ (outs GR32orGR64:$dst), (ins VR64:$src1, i32i8imm:$src2),
+ "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32orGR64:$dst, (int_x86_mmx_pextr_w VR64:$src1,
+ (iPTR imm:$src2)))],
+ IIC_MMX_PEXTR>, Sched<[WriteShuffle]>;
let Constraints = "$src1 = $dst" in {
def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg,
(outs VR64:$dst),
- (ins VR64:$src1, GR32:$src2, i32i8imm:$src3),
+ (ins VR64:$src1, GR32orGR64:$src2, i32i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
- GR32:$src2, (iPTR imm:$src3)))],
+ GR32orGR64:$src2, (iPTR imm:$src3)))],
IIC_MMX_PINSRW>, Sched<[WriteShuffle]>;
def MMX_PINSRWirmi : MMXIi8<0xC4, MRMSrcMem,
@@ -579,9 +571,10 @@ let Constraints = "$src1 = $dst" in {
}
// Mask creation
-def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
+def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
+ (ins VR64:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst,
+ [(set GR32orGR64:$dst,
(int_x86_mmx_pmovmskb VR64:$src))]>;
@@ -598,10 +591,10 @@ def : Pat<(x86mmx (MMX_X86movdq2q (loadv2i64 addr:$src))),
// Misc.
let SchedRW = [WriteShuffle] in {
let Uses = [EDI] in
-def MMX_MASKMOVQ : MMXI<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
- "maskmovq\t{$mask, $src|$src, $mask}",
- [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)],
- IIC_MMX_MASKMOV>;
+def MMX_MASKMOVQ : MMXI32<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
+ "maskmovq\t{$mask, $src|$src, $mask}",
+ [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)],
+ IIC_MMX_MASKMOV>;
let Uses = [RDI] in
def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
"maskmovq\t{$mask, $src|$src, $mask}",
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index cce938baafe1..a5debc025690 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -151,6 +151,34 @@ def SSE_MOVU_ITINS : OpndItins<
IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
>;
+def SSE_DPPD_ITINS : OpndItins<
+ IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
+>;
+
+def SSE_DPPS_ITINS : OpndItins<
+ IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
+>;
+
+def DEFAULT_ITINS : OpndItins<
+ IIC_ALU_NONMEM, IIC_ALU_MEM
+>;
+
+def SSE_EXTRACT_ITINS : OpndItins<
+ IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
+>;
+
+def SSE_INSERT_ITINS : OpndItins<
+ IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
+>;
+
+def SSE_MPSADBW_ITINS : OpndItins<
+ IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
+>;
+
+def SSE_PMULLD_ITINS : OpndItins<
+ IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
+>;
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 Instructions Classes
//===----------------------------------------------------------------------===//
@@ -455,10 +483,10 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
// SSE 1 & 2 - Move FP Scalar Instructions
//
// Move Instructions. Register-to-register movss/movsd is not used for FR32/64
-// register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
-// is used instead. Register-to-register movss/movsd is not modeled as an
-// INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
-// in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
+// register copies because it's a partial register update; Register-to-register
+// movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
+// that the insert be implementable in terms of a copy, and just mentioned, we
+// don't use movss/movsd for copies.
//===----------------------------------------------------------------------===//
multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
@@ -526,7 +554,7 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in {
}
// Patterns
-let Predicates = [HasAVX] in {
+let Predicates = [UseAVX] in {
let AddedComplexity = 15 in {
// Move scalar to XMM zero-extended, zeroing a VR128 then do a
// MOVS{S,D} to the lower bits.
@@ -1074,23 +1102,6 @@ let Predicates = [UseSSE1] in {
(MOVUPSmr addr:$dst, VR128:$src)>;
}
-// Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
-// bits are disregarded. FIXME: Set encoding to pseudo!
-let neverHasSideEffects = 1, SchedRW = [WriteMove] in {
-def FsVMOVAPSrr : VPSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- "movaps\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX;
-def FsVMOVAPDrr : VPDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
- "movapd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX;
-def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- "movaps\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>;
-def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
- "movapd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>;
-}
-
// Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
// bits are disregarded. FIXME: Set encoding to pseudo!
let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
@@ -1103,15 +1114,15 @@ let isCodeGenOnly = 1 in {
"movapd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (alignedloadfsf64 addr:$src))],
IIC_SSE_MOVA_P_RM>, VEX;
+ def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
+ "movaps\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
+ IIC_SSE_MOVA_P_RM>;
+ def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
+ IIC_SSE_MOVA_P_RM>;
}
-def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
- "movaps\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
- IIC_SSE_MOVA_P_RM>;
-def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
- IIC_SSE_MOVA_P_RM>;
}
//===----------------------------------------------------------------------===//
@@ -1327,7 +1338,7 @@ let Predicates = [UseSSE2] in {
// SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
//===----------------------------------------------------------------------===//
-let AddedComplexity = 20 in {
+let AddedComplexity = 20, Predicates = [UseAVX] in {
def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -1358,7 +1369,7 @@ let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
IIC_SSE_MOV_LH>, Sched<[WriteShuffle]>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [UseAVX] in {
// MOVLHPS patterns
def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
(VMOVLHPSrr VR128:$src1, VR128:$src2)>;
@@ -1440,7 +1451,7 @@ let neverHasSideEffects = 1 in {
multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
X86MemOperand x86memop, string asm> {
-let neverHasSideEffects = 1 in {
+let neverHasSideEffects = 1, Predicates = [UseAVX] in {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
Sched<[WriteCvtI2F]>;
@@ -1452,6 +1463,7 @@ let neverHasSideEffects = 1 in {
} // neverHasSideEffects = 1
}
+let Predicates = [UseAVX] in {
defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
"cvttss2si\t{$src, $dst|$dst, $src}",
SSE_CVT_SS2SI_32>,
@@ -1485,7 +1497,7 @@ def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
(VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
(VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
-
+}
// The assembler can recognize rr 64-bit instructions by seeing a rxx
// register, but the same isn't true when only using memory operands,
// provide other assembly "l" and "q" forms to address this explicitly
@@ -1499,12 +1511,12 @@ defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
XD, VEX_4V, VEX_W, VEX_LIG;
-def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
+let Predicates = [UseAVX] in {
+ def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
(VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src)>;
-def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
(VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>;
-let Predicates = [HasAVX] in {
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
(VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
@@ -1606,19 +1618,21 @@ multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
+let Predicates = [UseAVX] in {
defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
-
+}
defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
+let Predicates = [UseAVX] in {
defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
SSE_CVT_Scalar, 0>, XS, VEX_4V;
@@ -1633,7 +1647,7 @@ defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
SSE_CVT_Scalar, 0>, XD,
VEX_4V, VEX_W;
-
+}
let Constraints = "$src1 = $dst" in {
defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
int_x86_sse_cvtsi2ss, i32mem, loadi32,
@@ -1652,6 +1666,7 @@ let Constraints = "$src1 = $dst" in {
/// SSE 1 Only
// Aliases for intrinsics
+let Predicates = [UseAVX] in {
defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
ssmem, sse_load_f32, "cvttss2si",
SSE_CVT_SS2SI_32>, XS, VEX;
@@ -1666,6 +1681,7 @@ defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
"cvttsd2si", SSE_CVT_SD2SI>,
XD, VEX, VEX_W;
+}
defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
ssmem, sse_load_f32, "cvttss2si",
SSE_CVT_SS2SI_32>, XS;
@@ -1679,13 +1695,14 @@ defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
"cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
+let Predicates = [UseAVX] in {
defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
ssmem, sse_load_f32, "cvtss2si",
SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
ssmem, sse_load_f32, "cvtss2si",
SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
-
+}
defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
ssmem, sse_load_f32, "cvtss2si",
SSE_CVT_SS2SI_32>, XS;
@@ -1707,6 +1724,7 @@ defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
SSEPackedSingle, SSE_CVT_PS>,
TB, Requires<[UseSSE2]>;
+let Predicates = [UseAVX] in {
def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
(VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
@@ -1723,6 +1741,7 @@ def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
(VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
(VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
+}
def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
(CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
@@ -1744,7 +1763,7 @@ def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
/// SSE 2 Only
// Convert scalar double to scalar single
-let neverHasSideEffects = 1 in {
+let neverHasSideEffects = 1, Predicates = [UseAVX] in {
def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
(ins FR64:$src1, FR64:$src2),
"cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
@@ -1760,7 +1779,7 @@ def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
}
def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
- Requires<[HasAVX]>;
+ Requires<[UseAVX]>;
def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
@@ -1778,27 +1797,27 @@ def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>,
+ IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[UseAVX]>,
Sched<[WriteCvtF2F]>;
def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss
VR128:$src1, sse_load_f64:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>,
+ IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[UseAVX]>,
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
let Constraints = "$src1 = $dst" in {
def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
Sched<[WriteCvtF2F]>;
def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
- "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss
VR128:$src1, sse_load_f64:$src2))],
IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
@@ -1807,7 +1826,7 @@ def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
// Convert scalar single to scalar double
// SSE2 instructions with XS prefix
-let neverHasSideEffects = 1 in {
+let neverHasSideEffects = 1, Predicates = [UseAVX] in {
def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
(ins FR32:$src1, FR32:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -1824,16 +1843,16 @@ def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
}
def : Pat<(f64 (fextend FR32:$src)),
- (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>;
+ (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
def : Pat<(fextend (loadf32 addr:$src)),
- (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>;
+ (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
def : Pat<(extloadf32 addr:$src),
(VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
+ Requires<[UseAVX, OptForSize]>;
def : Pat<(extloadf32 addr:$src),
(VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
- Requires<[HasAVX, OptForSpeed]>;
+ Requires<[UseAVX, OptForSpeed]>;
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
@@ -1861,14 +1880,14 @@ def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>,
+ IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[UseAVX]>,
Sched<[WriteCvtF2F]>;
def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>,
+ IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[UseAVX]>,
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
@@ -1895,7 +1914,7 @@ def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
+ (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
@@ -1905,7 +1924,7 @@ def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))],
+ (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
@@ -1934,7 +1953,7 @@ def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))]>, VEX,
+ (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
Sched<[WriteCvtF2ILd]>;
// YMM only
@@ -1946,7 +1965,7 @@ def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)))]>,
+ (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
(VCVTPD2DQYrr VR128:$dst, VR256:$src)>;
@@ -1972,7 +1991,7 @@ def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttps2dq
- (memopv4f32 addr:$src)))],
+ (loadv4f32 addr:$src)))],
IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
@@ -1982,7 +2001,7 @@ def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
- (memopv8f32 addr:$src)))],
+ (loadv8f32 addr:$src)))],
IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
Sched<[WriteCvtF2ILd]>;
@@ -1999,27 +2018,27 @@ def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
let Predicates = [HasAVX] in {
def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
(VCVTDQ2PSrr VR128:$src)>;
- def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
+ def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
(VCVTDQ2PSrm addr:$src)>;
def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
(VCVTDQ2PSrr VR128:$src)>;
- def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
+ def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
(VCVTDQ2PSrm addr:$src)>;
def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
(VCVTTPS2DQrr VR128:$src)>;
- def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
+ def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
(VCVTTPS2DQrm addr:$src)>;
def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
(VCVTDQ2PSYrr VR256:$src)>;
- def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (memopv4i64 addr:$src)))),
+ def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
(VCVTDQ2PSYrm addr:$src)>;
def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
(VCVTTPS2DQYrr VR256:$src)>;
- def : Pat<(v8i32 (fp_to_sint (memopv8f32 addr:$src))),
+ def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
(VCVTTPS2DQYrm addr:$src)>;
}
@@ -2056,7 +2075,7 @@ def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttpd2dqx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
- (memopv2f64 addr:$src)))],
+ (loadv2f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
// YMM only
@@ -2068,7 +2087,7 @@ def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)))],
+ (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
(VCVTTPD2DQYrr VR128:$dst, VR256:$src)>;
@@ -2076,7 +2095,7 @@ def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
let Predicates = [HasAVX] in {
def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
(VCVTTPD2DQYrr VR256:$src)>;
- def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
+ def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
(VCVTTPD2DQYrm addr:$src)>;
} // Predicates = [HasAVX]
@@ -2110,7 +2129,7 @@ def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))],
+ (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
}
@@ -2140,7 +2159,7 @@ def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvtdq2_pd_256
- (bitconvert (memopv2i64 addr:$src))))]>, VEX, VEX_L,
+ (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
Sched<[WriteCvtI2FLd]>;
def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
@@ -2162,7 +2181,7 @@ def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
let Predicates = [HasAVX] in {
def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
(VCVTDQ2PDYrr VR128:$src)>;
- def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
+ def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
(VCVTDQ2PDYrm addr:$src)>;
} // Predicates = [HasAVX]
@@ -2181,7 +2200,7 @@ def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2psx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
+ (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
// YMM only
@@ -2193,7 +2212,7 @@ def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)))],
+ (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
(VCVTPD2PSYrr VR128:$dst, VR256:$src)>;
@@ -2215,13 +2234,13 @@ def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
let Predicates = [HasAVX] in {
def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
(VCVTDQ2PSYrr VR256:$src)>;
- def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (memopv4i64 addr:$src))),
+ def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
(VCVTDQ2PSYrm addr:$src)>;
// Match fround and fextend for 128/256-bit conversions
def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
(VCVTPD2PSrr VR128:$src)>;
- def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
+ def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
(VCVTPD2PSXrm addr:$src)>;
def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
(VCVTPD2PSYrr VR256:$src)>;
@@ -2299,7 +2318,7 @@ let Constraints = "$src1 = $dst" in {
defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmpsd, f64, loadf64,
"cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
"cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSE_ALU_F32S>, // same latency as 32 bit compare
+ SSE_ALU_F64S>,
XD;
}
@@ -2334,7 +2353,7 @@ let Constraints = "$src1 = $dst" in {
SSE_ALU_F32S>, XS;
defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
"cmp${cc}sd\t{$src, $dst|$dst, $src}",
- SSE_ALU_F32S>, // same latency as f32
+ SSE_ALU_F64S>,
XD;
}
@@ -2342,90 +2361,88 @@ let Constraints = "$src1 = $dst" in {
// sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
ValueType vt, X86MemOperand x86memop,
- PatFrag ld_frag, string OpcodeStr, Domain d> {
- def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
+ PatFrag ld_frag, string OpcodeStr> {
+ def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
- IIC_SSE_COMIS_RR, d>,
+ IIC_SSE_COMIS_RR>,
Sched<[WriteFAdd]>;
- def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
+ def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (OpNode (vt RC:$src1),
(ld_frag addr:$src2)))],
- IIC_SSE_COMIS_RM, d>,
+ IIC_SSE_COMIS_RM>,
Sched<[WriteFAddLd, ReadAfterLd]>;
}
let Defs = [EFLAGS] in {
defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, TB, VEX, VEX_LIG;
+ "ucomiss">, TB, VEX, VEX_LIG;
defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
- "ucomisd", SSEPackedDouble>, TB, OpSize, VEX,
- VEX_LIG;
+ "ucomisd">, TB, OpSize, VEX, VEX_LIG;
let Pattern = []<dag> in {
defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
- "comiss", SSEPackedSingle>, TB, VEX,
- VEX_LIG;
+ "comiss">, TB, VEX, VEX_LIG;
defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
- "comisd", SSEPackedDouble>, TB, OpSize, VEX,
- VEX_LIG;
+ "comisd">, TB, OpSize, VEX, VEX_LIG;
}
defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
- load, "ucomiss", SSEPackedSingle>, TB, VEX;
+ load, "ucomiss">, TB, VEX;
defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
- load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
+ load, "ucomisd">, TB, OpSize, VEX;
defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
- load, "comiss", SSEPackedSingle>, TB, VEX;
+ load, "comiss">, TB, VEX;
defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
- load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
+ load, "comisd">, TB, OpSize, VEX;
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, TB;
+ "ucomiss">, TB;
defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
- "ucomisd", SSEPackedDouble>, TB, OpSize;
+ "ucomisd">, TB, OpSize;
let Pattern = []<dag> in {
defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
- "comiss", SSEPackedSingle>, TB;
+ "comiss">, TB;
defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
- "comisd", SSEPackedDouble>, TB, OpSize;
+ "comisd">, TB, OpSize;
}
defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
- load, "ucomiss", SSEPackedSingle>, TB;
+ load, "ucomiss">, TB;
defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
- load, "ucomisd", SSEPackedDouble>, TB, OpSize;
+ load, "ucomisd">, TB, OpSize;
defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
- "comiss", SSEPackedSingle>, TB;
+ "comiss">, TB;
defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
- "comisd", SSEPackedDouble>, TB, OpSize;
+ "comisd">, TB, OpSize;
} // Defs = [EFLAGS]
// sse12_cmp_packed - sse 1 & 2 compare packed instructions
multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
Operand CC, Intrinsic Int, string asm,
- string asm_alt, Domain d> {
+ string asm_alt, Domain d,
+ OpndItins itins = SSE_ALU_F32P> {
def rri : PIi8<0xC2, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
[(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))],
- IIC_SSE_CMPP_RR, d>,
+ itins.rr, d>,
Sched<[WriteFAdd]>;
def rmi : PIi8<0xC2, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
[(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))],
- IIC_SSE_CMPP_RM, d>,
+ itins.rm, d>,
Sched<[WriteFAddLd, ReadAfterLd]>;
// Accept explicit immediate argument form instead of comparison code.
let neverHasSideEffects = 1 in {
def rri_alt : PIi8<0xC2, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
- asm_alt, [], IIC_SSE_CMPP_RR, d>, Sched<[WriteFAdd]>;
+ asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
def rmi_alt : PIi8<0xC2, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
- asm_alt, [], IIC_SSE_CMPP_RM, d>,
+ asm_alt, [], itins.rm, d>,
Sched<[WriteFAddLd, ReadAfterLd]>;
}
}
@@ -2450,11 +2467,11 @@ let Constraints = "$src1 = $dst" in {
defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSEPackedSingle>, TB;
+ SSEPackedSingle, SSE_ALU_F32P>, TB;
defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
"cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
"cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSEPackedDouble>, TB, OpSize;
+ SSEPackedDouble, SSE_ALU_F64P>, TB, OpSize;
}
let Predicates = [HasAVX] in {
@@ -2514,16 +2531,16 @@ multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- memopv4f32, SSEPackedSingle>, TB, VEX_4V;
+ loadv4f32, SSEPackedSingle>, TB, VEX_4V;
defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- memopv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L;
+ loadv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L;
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
- "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
- memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
+ "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ loadv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
- "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
- memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
+ "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ loadv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
@@ -2538,13 +2555,13 @@ let Constraints = "$src1 = $dst" in {
let Predicates = [HasAVX] in {
def : Pat<(v4i32 (X86Shufp VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
+ (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
(VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v2i64 (X86Shufp VR128:$src1,
- (memopv2i64 addr:$src2), (i8 imm:$imm))),
+ (loadv2i64 addr:$src2), (i8 imm:$imm))),
(VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
@@ -2553,13 +2570,13 @@ let Predicates = [HasAVX] in {
def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v8i32 (X86Shufp VR256:$src1,
- (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
(VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v4i64 (X86Shufp VR256:$src1,
- (memopv4i64 addr:$src2), (i8 imm:$imm))),
+ (loadv4i64 addr:$src2), (i8 imm:$imm))),
(VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
}
@@ -2603,29 +2620,29 @@ multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
-defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
+defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedSingle>, TB, VEX_4V;
-defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
+defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedDouble>, TB, OpSize, VEX_4V;
-defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
+defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedSingle>, TB, VEX_4V;
-defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
+defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedDouble>, TB, OpSize, VEX_4V;
-defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, memopv8f32,
+defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedSingle>, TB, VEX_4V, VEX_L;
-defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, memopv4f64,
+defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
-defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, memopv8f32,
+defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedSingle>, TB, VEX_4V, VEX_L;
-defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, memopv4f64,
+defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
@@ -2645,20 +2662,20 @@ let Constraints = "$src1 = $dst" in {
} // Constraints = "$src1 = $dst"
let Predicates = [HasAVX1Only] in {
- def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
+ def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
(VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
(VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
+ def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
(VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
(VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))),
+ def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
(VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
(VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))),
+ def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
(VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
(VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
@@ -2689,13 +2706,10 @@ let Predicates = [UseSSE2] in {
/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
Domain d> {
- def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set GR32:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
- Sched<[WriteVecLogic]>;
- def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [],
- IIC_SSE_MOVMSK, d>, REX_W, Sched<[WriteVecLogic]>;
+ def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
+ Sched<[WriteVecLogic]>;
}
let Predicates = [HasAVX] in {
@@ -2712,29 +2726,15 @@ let Predicates = [HasAVX] in {
OpSize, VEX, VEX_L;
def : Pat<(i32 (X86fgetsign FR32:$src)),
- (VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(i64 (X86fgetsign FR32:$src)),
- (VMOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ (SUBREG_TO_REG (i64 0),
+ (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
def : Pat<(i32 (X86fgetsign FR64:$src)),
- (VMOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
def : Pat<(i64 (X86fgetsign FR64:$src)),
- (VMOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>;
-
- // Assembler Only
- def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
- SSEPackedSingle>, TB, VEX, Sched<[WriteVecLogic]>;
- def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
- SSEPackedDouble>, TB,
- OpSize, VEX, Sched<[WriteVecLogic]>;
- def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
- "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
- SSEPackedSingle>, TB, VEX, VEX_L, Sched<[WriteVecLogic]>;
- def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
- "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
- SSEPackedDouble>, TB,
- OpSize, VEX, VEX_L, Sched<[WriteVecLogic]>;
+ (SUBREG_TO_REG (i64 0),
+ (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
}
defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
@@ -2743,16 +2743,18 @@ defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
SSEPackedDouble>, TB, OpSize;
def : Pat<(i32 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>,
+ (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
Requires<[UseSSE1]>;
def : Pat<(i64 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>,
+ (SUBREG_TO_REG (i64 0),
+ (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
Requires<[UseSSE1]>;
def : Pat<(i32 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>,
+ (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
Requires<[UseSSE2]>;
def : Pat<(i64 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>,
+ (SUBREG_TO_REG (i64 0),
+ (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
Requires<[UseSSE2]>;
//===---------------------------------------------------------------------===//
@@ -2791,7 +2793,7 @@ multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
OpndItins itins, bit IsCommutable = 0> {
let Predicates = [HasAVX] in
defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
- VR128, memopv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
+ VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
let Constraints = "$src1 = $dst" in
defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
@@ -2799,7 +2801,7 @@ let Constraints = "$src1 = $dst" in
let Predicates = [HasAVX2] in
defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
- OpVT256, VR256, memopv4i64, i256mem, itins,
+ OpVT256, VR256, loadv4i64, i256mem, itins,
IsCommutable, 0>, VEX_4V, VEX_L;
}
@@ -2839,16 +2841,18 @@ multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
}
// Alias bitwise logical operations using SSE logical ops on packed FP values.
-defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
- SSE_BIT_ITINS_P>;
-defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
- SSE_BIT_ITINS_P>;
-defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
- SSE_BIT_ITINS_P>;
-
-let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
- defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef,
+let isCodeGenOnly = 1 in {
+ defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
SSE_BIT_ITINS_P>;
+ defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
+ SSE_BIT_ITINS_P>;
+ defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
+ SSE_BIT_ITINS_P>;
+
+ let isCommutable = 0 in
+ defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", X86fandn,
+ SSE_BIT_ITINS_P>;
+}
/// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
///
@@ -2858,14 +2862,14 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "ps"), f256mem,
[(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
[(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
- (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L;
+ (loadv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L;
defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f256mem,
[(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
(bc_v4i64 (v4f64 VR256:$src2))))],
[(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
- (memopv4i64 addr:$src2)))], 0>,
+ (loadv4i64 addr:$src2)))], 0>,
TB, OpSize, VEX_4V, VEX_L;
// In AVX no need to add a pattern for 128-bit logical rr ps, because they
@@ -2875,14 +2879,14 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, [],
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
+ (loadv2i64 addr:$src2)))], 0>, TB, VEX_4V;
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem,
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
(bc_v2i64 (v2f64 VR128:$src2))))],
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))], 0>,
+ (loadv2i64 addr:$src2)))], 0>,
TB, OpSize, VEX_4V;
let Constraints = "$src1 = $dst" in {
@@ -2924,120 +2928,93 @@ let isCommutable = 0 in
/// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
/// classes below
-multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
- SizeItins itins,
- bit Is2Addr = 1> {
- defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
- OpNode, FR32, f32mem,
- itins.s, Is2Addr>, XS;
- defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
- OpNode, FR64, f64mem,
- itins.d, Is2Addr>, XD;
-}
-
multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
SDNode OpNode, SizeItins itins> {
-let Predicates = [HasAVX] in {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
- VR128, v4f32, f128mem, memopv4f32,
+ VR128, v4f32, f128mem, loadv4f32,
SSEPackedSingle, itins.s, 0>, TB, VEX_4V;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
- VR128, v2f64, f128mem, memopv2f64,
+ VR128, v2f64, f128mem, loadv2f64,
SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_4V;
defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
- OpNode, VR256, v8f32, f256mem, memopv8f32,
+ OpNode, VR256, v8f32, f256mem, loadv8f32,
SSEPackedSingle, itins.s, 0>, TB, VEX_4V, VEX_L;
defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
- OpNode, VR256, v4f64, f256mem, memopv4f64,
+ OpNode, VR256, v4f64, f256mem, loadv4f64,
SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_4V, VEX_L;
-}
-let Constraints = "$src1 = $dst" in {
- defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
- v4f32, f128mem, memopv4f32, SSEPackedSingle,
- itins.s, 1>, TB;
- defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
- v2f64, f128mem, memopv2f64, SSEPackedDouble,
- itins.d, 1>, TB, OpSize;
-}
-}
-
-multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
- SizeItins itins,
- bit Is2Addr = 1> {
- defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
- !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
- itins.s, Is2Addr>, XS;
- defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
- !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
- itins.d, Is2Addr>, XD;
+ let Constraints = "$src1 = $dst" in {
+ defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
+ v4f32, f128mem, memopv4f32, SSEPackedSingle,
+ itins.s>, TB;
+ defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
+ v2f64, f128mem, memopv2f64, SSEPackedDouble,
+ itins.d>, TB, OpSize;
+ }
}
-// Binary Arithmetic instructions
-defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>;
-defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>;
-let isCommutable = 0 in {
- defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>;
- defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>;
- defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>;
- defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>;
-}
+multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SizeItins itins> {
+ defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
+ OpNode, FR32, f32mem, itins.s, 0>, XS, VEX_4V, VEX_LIG;
+ defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
+ OpNode, FR64, f64mem, itins.d, 0>, XD, VEX_4V, VEX_LIG;
-let isCodeGenOnly = 1 in {
- defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>;
- defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>;
+ let Constraints = "$src1 = $dst" in {
+ defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
+ OpNode, FR32, f32mem, itins.s>, XS;
+ defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
+ OpNode, FR64, f64mem, itins.d>, XD;
+ }
}
-defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S, 0>,
- basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
-defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S, 0>,
- basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
+multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
+ SizeItins itins> {
+ defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
+ itins.s, 0>, XS, VEX_4V, VEX_LIG;
+ defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
+ itins.d, 0>, XD, VEX_4V, VEX_LIG;
-let isCommutable = 0 in {
- defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S, 0>,
- basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
- defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S, 0>,
- basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
- defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S, 0>,
- basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
- defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S, 0>,
- basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
+ let Constraints = "$src1 = $dst" in {
+ defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
+ itins.s>, XS;
+ defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
+ itins.d>, XD;
+ }
}
-let Constraints = "$src1 = $dst" in {
- defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
- basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
- defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
- basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
-
- let isCommutable = 0 in {
- defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
- basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
- defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
- basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
- defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
- basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
- defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
- basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
- }
+// Binary Arithmetic instructions
+defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
+defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
+ basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
+ basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
+let isCommutable = 0 in {
+ defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
+ defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
+ basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
+ basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
+ defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
+ defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
}
let isCodeGenOnly = 1 in {
- defm VMAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
- defm VMINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S, 0>,
- VEX_4V, VEX_LIG;
- let Constraints = "$src1 = $dst" in {
- defm MAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
- defm MINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
- }
+ defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
+ defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
}
/// Unop Arithmetic
@@ -3049,12 +3026,20 @@ let isCodeGenOnly = 1 in {
/// And, we have a special variant form for a full-vector intrinsic form.
let Sched = WriteFSqrt in {
-def SSE_SQRTP : OpndItins<
- IIC_SSE_SQRTP_RR, IIC_SSE_SQRTP_RM
+def SSE_SQRTPS : OpndItins<
+ IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM
+>;
+
+def SSE_SQRTSS : OpndItins<
+ IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM
>;
-def SSE_SQRTS : OpndItins<
- IIC_SSE_SQRTS_RR, IIC_SSE_SQRTS_RM
+def SSE_SQRTPD : OpndItins<
+ IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM
+>;
+
+def SSE_SQRTSD : OpndItins<
+ IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM
>;
}
@@ -3175,7 +3160,7 @@ let Predicates = [HasAVX] in {
def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))],
+ [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))],
itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat("v", OpcodeStr,
@@ -3185,7 +3170,7 @@ let Predicates = [HasAVX] in {
def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))],
+ [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))],
itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
}
@@ -3212,7 +3197,7 @@ let Predicates = [HasAVX] in {
def V#NAME#PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
+ [(set VR128:$dst, (V4F32Int (loadv4f32 addr:$src)))],
itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
def V#NAME#PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat("v", OpcodeStr,
@@ -3223,7 +3208,7 @@ let Predicates = [HasAVX] in {
(ins f256mem:$src),
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (V8F32Int (memopv8f32 addr:$src)))],
+ [(set VR256:$dst, (V8F32Int (loadv8f32 addr:$src)))],
itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
}
@@ -3293,7 +3278,7 @@ let Predicates = [HasAVX] in {
def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat("v", OpcodeStr,
"pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))],
+ [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))],
itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat("v", OpcodeStr,
@@ -3303,7 +3288,7 @@ let Predicates = [HasAVX] in {
def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat("v", OpcodeStr,
"pd\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))],
+ [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))],
itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
}
@@ -3319,47 +3304,48 @@ let Predicates = [HasAVX] in {
// Square root.
defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
- SSE_SQRTS>,
- sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTP>,
+ SSE_SQRTSS>,
+ sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS>,
sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd,
- SSE_SQRTS>,
- sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTP>;
+ SSE_SQRTSD>,
+ sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
// Reciprocal approximations. Note that these typically require refinement
// in order to obtain suitable precision.
-defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>,
- sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTP>,
+defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, SSE_SQRTSS>,
+ sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTPS>,
sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
- int_x86_avx_rsqrt_ps_256, SSE_SQRTP>;
+ int_x86_avx_rsqrt_ps_256, SSE_SQRTPS>;
defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, SSE_RCPS>,
sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP>,
sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps,
int_x86_avx_rcp_ps_256, SSE_RCPP>;
-def : Pat<(f32 (fsqrt FR32:$src)),
- (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
-def : Pat<(f32 (fsqrt (load addr:$src))),
- (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
-def : Pat<(f64 (fsqrt FR64:$src)),
- (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
-def : Pat<(f64 (fsqrt (load addr:$src))),
- (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
-
-def : Pat<(f32 (X86frsqrt FR32:$src)),
- (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
-def : Pat<(f32 (X86frsqrt (load addr:$src))),
- (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
-
-def : Pat<(f32 (X86frcp FR32:$src)),
- (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
-def : Pat<(f32 (X86frcp (load addr:$src))),
- (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
-
-let Predicates = [HasAVX] in {
+let Predicates = [UseAVX] in {
+ def : Pat<(f32 (fsqrt FR32:$src)),
+ (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
+ def : Pat<(f32 (fsqrt (load addr:$src))),
+ (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[HasAVX, OptForSize]>;
+ def : Pat<(f64 (fsqrt FR64:$src)),
+ (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
+ def : Pat<(f64 (fsqrt (load addr:$src))),
+ (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[HasAVX, OptForSize]>;
+
+ def : Pat<(f32 (X86frsqrt FR32:$src)),
+ (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
+ def : Pat<(f32 (X86frsqrt (load addr:$src))),
+ (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[HasAVX, OptForSize]>;
+
+ def : Pat<(f32 (X86frcp FR32:$src)),
+ (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
+ def : Pat<(f32 (X86frcp (load addr:$src))),
+ (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[HasAVX, OptForSize]>;
+}
+let Predicates = [UseAVX] in {
def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
(COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
(COPY_TO_REGCLASS VR128:$src, FR32)),
@@ -3373,7 +3359,9 @@ let Predicates = [HasAVX] in {
VR128)>;
def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
(VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
+}
+let Predicates = [HasAVX] in {
def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
(COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
(COPY_TO_REGCLASS VR128:$src, FR32)),
@@ -3657,7 +3645,7 @@ def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
XS, Requires<[UseSSE2]>;
}
-let mayStore = 1, SchedRW = [WriteStore] in {
+let mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in {
def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
@@ -3720,7 +3708,7 @@ multiclass PDI_binop_all_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
bit IsCommutable = 0> {
let Predicates = [HasAVX] in
defm V#NAME : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId128,
- VR128, memopv2i64, i128mem, itins,
+ VR128, loadv2i64, i128mem, itins,
IsCommutable, 0>, VEX_4V;
let Constraints = "$src1 = $dst" in
@@ -3729,7 +3717,7 @@ let Constraints = "$src1 = $dst" in
let Predicates = [HasAVX2] in
defm V#NAME#Y : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId256,
- VR256, memopv4i64, i256mem, itins,
+ VR256, loadv4i64, i256mem, itins,
IsCommutable, 0>, VEX_4V, VEX_L;
}
@@ -3756,11 +3744,11 @@ multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
(bc_frag (memopv2i64 addr:$src2)))))], itins.rm>,
Sched<[WriteVecShiftLd, ReadAfterLd]>;
def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
- (ins RC:$src1, i32i8imm:$src2),
+ (ins RC:$src1, i8imm:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i32 imm:$src2))))], itins.ri>,
+ [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))], itins.ri>,
Sched<[WriteVecShift]>;
}
@@ -3844,15 +3832,15 @@ defm PAVGB : PDI_binop_all_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
defm PAVGW : PDI_binop_all_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
int_x86_avx2_pavg_w, SSE_INTALU_ITINS_P, 1>;
defm PSADBW : PDI_binop_all_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
- int_x86_avx2_psad_bw, SSE_INTALU_ITINS_P, 1>;
+ int_x86_avx2_psad_bw, SSE_PMADD, 1>;
let Predicates = [HasAVX] in
defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
- memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
+ loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
VEX_4V;
let Predicates = [HasAVX2] in
defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
- VR256, memopv4i64, i256mem,
+ VR256, loadv4i64, i256mem,
SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in
defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
@@ -3988,12 +3976,14 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
(outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
"pslldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>;
+ (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))],
+ IIC_SSE_INTSHDQ_P_RI>;
def PSRLDQri : PDIi8<0x73, MRM3r,
(outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
"psrldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>;
+ (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))],
+ IIC_SSE_INTSHDQ_P_RI>;
// PSRADQri doesn't exist in SSE[1-3].
}
} // Constraints = "$src1 = $dst"
@@ -4077,14 +4067,14 @@ let Predicates = [HasAVX] in {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
- IIC_SSE_PSHUF>, VEX, Sched<[WriteShuffle]>;
+ IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, i8imm:$src2),
!strconcat("v", OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
- (vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
- (i8 imm:$src2))))], IIC_SSE_PSHUF>, VEX,
+ (vt128 (OpNode (bitconvert (loadv2i64 addr:$src1)),
+ (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX,
Sched<[WriteShuffleLd]>;
}
@@ -4095,14 +4085,14 @@ let Predicates = [HasAVX2] in {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
- IIC_SSE_PSHUF>, VEX, VEX_L, Sched<[WriteShuffle]>;
+ IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
(ins i256mem:$src1, i8imm:$src2),
!strconcat("v", OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (vt256 (OpNode (bitconvert (memopv4i64 addr:$src1)),
- (i8 imm:$src2))))], IIC_SSE_PSHUF>, VEX, VEX_L,
+ (vt256 (OpNode (bitconvert (loadv4i64 addr:$src1)),
+ (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX, VEX_L,
Sched<[WriteShuffleLd]>;
}
@@ -4113,14 +4103,14 @@ let Predicates = [UseSSE2] in {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
- IIC_SSE_PSHUF>, Sched<[WriteShuffle]>;
+ IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
def mi : Ii8<0x70, MRMSrcMem,
(outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
- (i8 imm:$src2))))], IIC_SSE_PSHUF>,
+ (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
Sched<[WriteShuffleLd]>;
}
}
@@ -4131,7 +4121,7 @@ defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
let Predicates = [HasAVX] in {
- def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
+ def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
(VPSHUFDmi addr:$src1, imm:$imm)>;
def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
(VPSHUFDri VR128:$src1, imm:$imm)>;
@@ -4254,13 +4244,13 @@ let ExeDomain = SSEPackedInt in {
multiclass sse2_pinsrw<bit Is2Addr = 1> {
def rri : Ii8<0xC4, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1,
- GR32:$src2, i32i8imm:$src3),
+ GR32orGR64:$src2, i32i8imm:$src3),
!if(Is2Addr,
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))], IIC_SSE_PINSRW>,
- Sched<[WriteShuffle]>;
+ (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
+ IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
def rmi : Ii8<0xC4, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1,
i16mem:$src2, i32i8imm:$src3),
@@ -4276,29 +4266,24 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
// Extract
let Predicates = [HasAVX] in
def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
- (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
- imm:$src2))]>, TB, OpSize, VEX,
+ [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
+ imm:$src2))]>, TB, OpSize, VEX,
Sched<[WriteShuffle]>;
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
- (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
- imm:$src2))], IIC_SSE_PEXTRW>,
+ [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
+ imm:$src2))], IIC_SSE_PEXTRW>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
// Insert
-let Predicates = [HasAVX] in {
- defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
- def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
- "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, TB, OpSize, VEX_4V, Sched<[WriteShuffle]>;
-}
+let Predicates = [HasAVX] in
+defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
-let Constraints = "$src1 = $dst" in
- defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[UseSSE2]>;
+let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
+defm PINSRW : sse2_pinsrw, TB, OpSize;
} // ExeDomain = SSEPackedInt
@@ -4308,24 +4293,23 @@ let Constraints = "$src1 = $dst" in
let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
-def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
+def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
+ (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
IIC_SSE_MOVMSK>, VEX;
-def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "pmovmskb\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK>, VEX;
let Predicates = [HasAVX2] in {
-def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
+def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
+ (ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX, VEX_L;
-def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
- "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+ [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
+ VEX, VEX_L;
}
-def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
+def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
IIC_SSE_MOVMSK>;
} // ExeDomain = SSEPackedInt
@@ -4336,25 +4320,25 @@ def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
-let Uses = [EDI] in
+let Uses = [EDI], Predicates = [HasAVX,In32BitMode] in
def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
IIC_SSE_MASKMOV>, VEX;
-let Uses = [RDI] in
+let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
IIC_SSE_MASKMOV>, VEX;
-let Uses = [EDI] in
+let Uses = [EDI], Predicates = [UseSSE2,In32BitMode] in
def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
IIC_SSE_MASKMOV>;
-let Uses = [RDI] in
+let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
@@ -4369,43 +4353,45 @@ def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
//===---------------------------------------------------------------------===//
// Move Int Doubleword to Packed Double Int
//
-def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
+def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
VEX, Sched<[WriteMove]>;
-def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
+def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (scalar_to_vector (loadi32 addr:$src))))],
IIC_SSE_MOVDQ>,
VEX, Sched<[WriteLoad]>;
-def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
-def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+let isCodeGenOnly = 1 in
+def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
+ "movq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert GR64:$src))],
IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
-def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
+def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
Sched<[WriteMove]>;
-def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
+def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (scalar_to_vector (loadi32 addr:$src))))],
IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
-def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
-def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
+let isCodeGenOnly = 1 in
+def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert GR64:$src))],
IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
@@ -4413,63 +4399,77 @@ def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
//===---------------------------------------------------------------------===//
// Move Int Doubleword to Single Scalar
//
-def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert GR32:$src))],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
-
-def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
- IIC_SSE_MOVDQ>,
- VEX, Sched<[WriteLoad]>;
-def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert GR32:$src))],
- IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
-
-def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
- IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+let isCodeGenOnly = 1 in {
+ def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert GR32:$src))],
+ IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
+
+ def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
+ IIC_SSE_MOVDQ>,
+ VEX, Sched<[WriteLoad]>;
+ def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert GR32:$src))],
+ IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
+
+ def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
+ IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+}
//===---------------------------------------------------------------------===//
// Move Packed Doubleword Int to Packed Double Int
//
-def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
+def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
(iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
Sched<[WriteMove]>;
-def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
+def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (vector_extract (v4i32 VR128:$src),
(iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
VEX, Sched<[WriteLoad]>;
-def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
+def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
(iPTR 0)))], IIC_SSE_MOVD_ToGP>,
Sched<[WriteMove]>;
-def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
+def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (vector_extract (v4i32 VR128:$src),
(iPTR 0))), addr:$dst)],
IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
+
+def : Pat<(v4i64 (X86Vinsert (bc_v4i64 (v8i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
+
+def : Pat<(v8i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
+
+def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
+ (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
+
//===---------------------------------------------------------------------===//
// Move Packed Doubleword Int first element to Doubleword Int
//
let SchedRW = [WriteMove] in {
-def VMOVPQIto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
(iPTR 0)))],
IIC_SSE_MOVD_ToGP>,
VEX;
-def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
+def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
(iPTR 0)))],
@@ -4479,121 +4479,112 @@ def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
//===---------------------------------------------------------------------===//
// Bitcast FR64 <-> GR64
//
-let Predicates = [HasAVX] in
-def VMOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
- "vmovq\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
- VEX, Sched<[WriteLoad]>;
-def VMOVSDto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
+let isCodeGenOnly = 1 in {
+ let Predicates = [UseAVX] in
+ def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
+ VEX, Sched<[WriteLoad]>;
+ def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (bitconvert FR64:$src))],
+ IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
+ def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
+
+ def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
+ IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+ def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64:$src))],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
-def VMOVSDto64mr : VRPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
+ IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
+ def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
-
-def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
- IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
-def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bitconvert FR64:$src))],
- IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
-def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+ IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+}
//===---------------------------------------------------------------------===//
// Move Scalar Single to Double Int
//
-def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bitconvert FR32:$src))],
- IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
-def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
-def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bitconvert FR32:$src))],
- IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
-def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+let isCodeGenOnly = 1 in {
+ def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (bitconvert FR32:$src))],
+ IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
+ def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
+ def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (bitconvert FR32:$src))],
+ IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
+ def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+}
//===---------------------------------------------------------------------===//
// Patterns and instructions to describe movd/movq to XMM register zero-extends
//
-let SchedRW = [WriteMove] in {
+let isCodeGenOnly = 1, SchedRW = [WriteMove] in {
let AddedComplexity = 15 in {
-def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector GR32:$src)))))],
- IIC_SSE_MOVDQ>, VEX;
-def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
+def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "movq\t{$src, $dst|$dst, $src}", // X86-64 only
[(set VR128:$dst, (v2i64 (X86vzmovl
(v2i64 (scalar_to_vector GR64:$src)))))],
IIC_SSE_MOVDQ>,
VEX, VEX_W;
-}
-let AddedComplexity = 15 in {
-def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector GR32:$src)))))],
- IIC_SSE_MOVDQ>;
-def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+def MOVZQI2PQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
[(set VR128:$dst, (v2i64 (X86vzmovl
(v2i64 (scalar_to_vector GR64:$src)))))],
IIC_SSE_MOVDQ>;
}
-} // SchedRW
+} // isCodeGenOnly, SchedRW
-let AddedComplexity = 20, SchedRW = [WriteLoad] in {
-def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
- (loadi32 addr:$src))))))],
- IIC_SSE_MOVDQ>, VEX;
-def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
- (loadi32 addr:$src))))))],
- IIC_SSE_MOVDQ>;
-} // AddedComplexity, SchedRW
+let Predicates = [UseAVX] in {
+ let AddedComplexity = 15 in
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
+ (VMOVDI2PDIrr GR32:$src)>;
-let Predicates = [HasAVX] in {
// AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
let AddedComplexity = 20 in {
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
+ (VMOVDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
- (VMOVZDI2PDIrm addr:$src)>;
+ (VMOVDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
- (VMOVZDI2PDIrm addr:$src)>;
+ (VMOVDI2PDIrm addr:$src)>;
}
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
(SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
}
-let Predicates = [UseSSE2], AddedComplexity = 20 in {
- def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
- (MOVZDI2PDIrm addr:$src)>;
- def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
- (MOVZDI2PDIrm addr:$src)>;
+let Predicates = [UseSSE2] in {
+ let AddedComplexity = 15 in
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
+ (MOVDI2PDIrr GR32:$src)>;
+
+ let AddedComplexity = 20 in {
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
+ (MOVDI2PDIrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
+ (MOVDI2PDIrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
+ (MOVDI2PDIrm addr:$src)>;
+ }
}
// These are the correct encodings of the instructions so that we know how to
@@ -4602,15 +4593,12 @@ let Predicates = [UseSSE2], AddedComplexity = 20 in {
def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
(MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
(MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
+// Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
+def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
+ (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
+def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
+ (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
//===---------------------------------------------------------------------===//
// SSE2 - Move Quadword
@@ -4625,7 +4613,7 @@ def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
- VEX, Requires<[HasAVX]>;
+ VEX, Requires<[UseAVX]>;
def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -4638,12 +4626,12 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
// Move Packed Quadword Int to Quadword Int
//
let SchedRW = [WriteStore] in {
-def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))), addr:$dst)],
IIC_SSE_MOVDQ>, VEX;
-def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))), addr:$dst)],
@@ -4653,25 +4641,24 @@ def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
//===---------------------------------------------------------------------===//
// Store / copy lower 64-bits of a XMM register.
//
-def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+def VMOVLQ128mr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX,
Sched<[WriteStore]>;
-def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+def MOVLQ128mr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(int_x86_sse2_storel_dq addr:$dst, VR128:$src)],
IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
-let AddedComplexity = 20 in
+let isCodeGenOnly = 1, AddedComplexity = 20 in {
def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (X86vzmovl (v2i64 (scalar_to_vector
(loadi64 addr:$src))))))],
IIC_SSE_MOVDQ>,
- XS, VEX, Requires<[HasAVX]>, Sched<[WriteLoad]>;
+ XS, VEX, Requires<[UseAVX]>, Sched<[WriteLoad]>;
-let AddedComplexity = 20 in
def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -4679,10 +4666,9 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
(loadi64 addr:$src))))))],
IIC_SSE_MOVDQ>,
XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
+}
-let Predicates = [HasAVX], AddedComplexity = 20 in {
- def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
- (VMOVZQI2PQIrm addr:$src)>;
+let Predicates = [UseAVX], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
(VMOVZQI2PQIrm addr:$src)>;
def : Pat<(v2i64 (X86vzload addr:$src)),
@@ -4690,8 +4676,6 @@ let Predicates = [HasAVX], AddedComplexity = 20 in {
}
let Predicates = [UseSSE2], AddedComplexity = 20 in {
- def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
- (MOVZQI2PQIrm addr:$src)>;
def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
(MOVZQI2PQIrm addr:$src)>;
def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
@@ -4714,7 +4698,7 @@ def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
IIC_SSE_MOVQ_RR>,
- XS, VEX, Requires<[HasAVX]>;
+ XS, VEX, Requires<[UseAVX]>;
let AddedComplexity = 15 in
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
@@ -4723,14 +4707,14 @@ def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
XS, Requires<[UseSSE2]>;
} // SchedRW
-let SchedRW = [WriteVecLogicLd] in {
+let isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
let AddedComplexity = 20 in
def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl
(loadv2i64 addr:$src))))],
IIC_SSE_MOVDQ>,
- XS, VEX, Requires<[HasAVX]>;
+ XS, VEX, Requires<[UseAVX]>;
let AddedComplexity = 20 in {
def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movq\t{$src, $dst|$dst, $src}",
@@ -4739,49 +4723,19 @@ def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
IIC_SSE_MOVDQ>,
XS, Requires<[UseSSE2]>;
}
-} // SchedRW
+} // isCodeGenOnly, SchedRW
let AddedComplexity = 20 in {
- let Predicates = [HasAVX] in {
- def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
- (VMOVZPQILo2PQIrm addr:$src)>;
+ let Predicates = [UseAVX] in {
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
(VMOVZPQILo2PQIrr VR128:$src)>;
}
let Predicates = [UseSSE2] in {
- def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
- (MOVZPQILo2PQIrm addr:$src)>;
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
(MOVZPQILo2PQIrr VR128:$src)>;
}
}
-// Instructions to match in the assembler
-let SchedRW = [WriteMove] in {
-def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "movq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVDQ>, VEX, VEX_W;
-def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVDQ>, VEX, VEX_W;
-// Recognize "movd" with GR64 destination, but encode as a "movq"
-def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "movd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVDQ>, VEX, VEX_W;
-} // SchedRW
-
-// Instructions for the disassembler
-// xr = XMM register
-// xm = mem64
-
-let SchedRW = [WriteMove] in {
-let Predicates = [HasAVX] in
-def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
-def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, XS;
-} // SchedRW
-
//===---------------------------------------------------------------------===//
// SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
//===---------------------------------------------------------------------===//
@@ -4800,13 +4754,13 @@ def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
let Predicates = [HasAVX] in {
defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
- v4f32, VR128, memopv4f32, f128mem>, VEX;
+ v4f32, VR128, loadv4f32, f128mem>, VEX;
defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
- v4f32, VR128, memopv4f32, f128mem>, VEX;
+ v4f32, VR128, loadv4f32, f128mem>, VEX;
defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
- v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
+ v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
- v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
+ v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
}
defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
memopv4f32, f128mem>;
@@ -4816,19 +4770,19 @@ defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
let Predicates = [HasAVX] in {
def : Pat<(v4i32 (X86Movshdup VR128:$src)),
(VMOVSHDUPrr VR128:$src)>;
- def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
+ def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (loadv2i64 addr:$src)))),
(VMOVSHDUPrm addr:$src)>;
def : Pat<(v4i32 (X86Movsldup VR128:$src)),
(VMOVSLDUPrr VR128:$src)>;
- def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
+ def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (loadv2i64 addr:$src)))),
(VMOVSLDUPrm addr:$src)>;
def : Pat<(v8i32 (X86Movshdup VR256:$src)),
(VMOVSHDUPYrr VR256:$src)>;
- def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
+ def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (loadv4i64 addr:$src)))),
(VMOVSHDUPYrm addr:$src)>;
def : Pat<(v8i32 (X86Movsldup VR256:$src)),
(VMOVSLDUPYrr VR256:$src)>;
- def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
+ def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (loadv4i64 addr:$src)))),
(VMOVSLDUPYrm addr:$src)>;
}
@@ -4882,20 +4836,20 @@ let Predicates = [HasAVX] in {
defm MOVDDUP : sse3_replicate_dfp<"movddup">;
let Predicates = [HasAVX] in {
- def : Pat<(X86Movddup (memopv2f64 addr:$src)),
+ def : Pat<(X86Movddup (loadv2f64 addr:$src)),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
- def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
+ def : Pat<(X86Movddup (bc_v2f64 (loadv4f32 addr:$src))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
- def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
+ def : Pat<(X86Movddup (bc_v2f64 (loadv2i64 addr:$src))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
def : Pat<(X86Movddup (bc_v2f64
(v2i64 (scalar_to_vector (loadi64 addr:$src))))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
// 256-bit version
- def : Pat<(X86Movddup (memopv4f64 addr:$src)),
+ def : Pat<(X86Movddup (loadv4f64 addr:$src)),
(VMOVDDUPYrm addr:$src)>;
- def : Pat<(X86Movddup (memopv4i64 addr:$src)),
+ def : Pat<(X86Movddup (loadv4i64 addr:$src)),
(VMOVDDUPYrm addr:$src)>;
def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
(VMOVDDUPYrm addr:$src)>;
@@ -5097,12 +5051,12 @@ multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
// Helper fragments to match sext vXi1 to vXiY.
def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
VR128:$src))>;
-def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i32 15)))>;
-def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i32 31)))>;
+def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i8 15)))>;
+def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i8 31)))>;
def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
VR256:$src))>;
-def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i32 15)))>;
-def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i32 31)))>;
+def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
+def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
let Predicates = [HasAVX] in {
defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb",
@@ -5258,34 +5212,34 @@ multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(IntId256 VR256:$src1,
- (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
+ (bitconvert (loadv4i64 addr:$src2))))]>, OpSize;
}
let ImmT = NoImm, Predicates = [HasAVX] in {
let isCommutable = 0 in {
defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PHADDSUBW, 0>, VEX_4V;
defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PHADDSUBD, 0>, VEX_4V;
defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PHADDSUBW, 0>, VEX_4V;
defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PHADDSUBD, 0>, VEX_4V;
defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PSIGN, 0>, VEX_4V;
defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PSIGN, 0>, VEX_4V;
defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PSIGN, 0>, VEX_4V;
defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
- memopv2i64, i128mem,
+ loadv2i64, i128mem,
SSE_PSHUFB, 0>, VEX_4V;
defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
int_x86_ssse3_phadd_sw_128,
@@ -5305,28 +5259,28 @@ defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
let ImmT = NoImm, Predicates = [HasAVX2] in {
let isCommutable = 0 in {
defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
- memopv4i64, i256mem,
+ loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
int_x86_avx2_phadd_sw>, VEX_4V, VEX_L;
@@ -5384,7 +5338,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- [], IIC_SSE_PALIGNR>, OpSize, Sched<[WriteShuffle]>;
+ [], IIC_SSE_PALIGNRR>, OpSize, Sched<[WriteShuffle]>;
let mayLoad = 1 in
def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
@@ -5392,7 +5346,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- [], IIC_SSE_PALIGNR>, OpSize, Sched<[WriteShuffleLd, ReadAfterLd]>;
+ [], IIC_SSE_PALIGNRM>, OpSize, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
}
@@ -5472,28 +5426,29 @@ def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
TB, Requires<[HasSSE3]>;
} // SchedRW
-def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
-def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
+def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[In32BitMode]>;
+def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
-def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
+def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
Requires<[In32BitMode]>;
-def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
+def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
// SSE4.1 - Packed Move with Sign/Zero Extend
//===----------------------------------------------------------------------===//
-multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
+multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId,
+ OpndItins itins = DEFAULT_ITINS> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
+ [(set VR128:$dst, (IntId VR128:$src))], itins.rr>, OpSize;
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
- OpSize;
+ (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))],
+ itins.rm>, OpSize;
}
multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
@@ -5504,22 +5459,23 @@ multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId (load addr:$src)))]>, OpSize;
+ [(set VR256:$dst, (IntId (load addr:$src)))]>,
+ OpSize;
}
let Predicates = [HasAVX] in {
-defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
- VEX;
-defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
- VEX;
-defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
- VEX;
-defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
- VEX;
-defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
- VEX;
-defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
- VEX;
+defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw",
+ int_x86_sse41_pmovsxbw>, VEX;
+defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd",
+ int_x86_sse41_pmovsxwd>, VEX;
+defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq",
+ int_x86_sse41_pmovsxdq>, VEX;
+defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw",
+ int_x86_sse41_pmovzxbw>, VEX;
+defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd",
+ int_x86_sse41_pmovzxwd>, VEX;
+defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq",
+ int_x86_sse41_pmovzxdq>, VEX;
}
let Predicates = [HasAVX2] in {
@@ -5537,12 +5493,12 @@ defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
int_x86_avx2_pmovzxdq>, VEX, VEX_L;
}
-defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
-defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
-defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
-defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
-defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
-defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
+defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw, SSE_INTALU_ITINS_P>;
+defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd, SSE_INTALU_ITINS_P>;
+defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq, SSE_INTALU_ITINS_P>;
+defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw, SSE_INTALU_ITINS_P>;
+defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd, SSE_INTALU_ITINS_P>;
+defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq, SSE_INTALU_ITINS_P>;
let Predicates = [HasAVX] in {
// Common patterns involving scalar load.
@@ -5640,32 +5596,39 @@ let Predicates = [HasAVX2] in {
(VPMOVZXDQYrr VR128:$src)>;
def : Pat<(v8i32 (X86vzmovly (v8i16 VR128:$src))),
(VPMOVZXWDYrr VR128:$src)>;
+ def : Pat<(v16i16 (X86vzmovly (v16i8 VR128:$src))),
+ (VPMOVZXBWYrr VR128:$src)>;
}
def : Pat<(v4i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>;
def : Pat<(v8i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDYrr VR128:$src)>;
+ def : Pat<(v16i16 (X86vsmovl (v16i8 VR128:$src))), (VPMOVSXBWYrr VR128:$src)>;
}
let Predicates = [HasAVX] in {
def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
+ def : Pat<(v8i16 (X86vsmovl (v16i8 VR128:$src))), (VPMOVSXBWrr VR128:$src)>;
}
let Predicates = [UseSSE41] in {
def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
+ def : Pat<(v8i16 (X86vsmovl (v16i8 VR128:$src))), (PMOVSXBWrr VR128:$src)>;
}
-multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
+multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId,
+ OpndItins itins = DEFAULT_ITINS> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
+ [(set VR128:$dst, (IntId VR128:$src))], itins.rr>, OpSize;
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
+ (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))],
+ itins.rm>,
OpSize;
}
@@ -5704,10 +5667,14 @@ defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
int_x86_avx2_pmovzxwq>, VEX, VEX_L;
}
-defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
-defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
-defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
-defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
+defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd,
+ SSE_INTALU_ITINS_P>;
+defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq,
+ SSE_INTALU_ITINS_P>;
+defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd,
+ SSE_INTALU_ITINS_P>;
+defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq,
+ SSE_INTALU_ITINS_P>;
let Predicates = [HasAVX] in {
// Common patterns involving scalar load
@@ -5735,7 +5702,8 @@ let Predicates = [UseSSE41] in {
(PMOVZXWQrm addr:$src)>;
}
-multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
+multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId,
+ OpndItins itins = DEFAULT_ITINS> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
@@ -5774,8 +5742,10 @@ defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq",
defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq",
int_x86_avx2_pmovzxbq>, VEX, VEX_L;
}
-defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
-defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
+defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq,
+ SSE_INTALU_ITINS_P>;
+defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq,
+ SSE_INTALU_ITINS_P>;
let Predicates = [HasAVX2] in {
def : Pat<(v16i16 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBWYrr VR128:$src)>;
@@ -6027,35 +5997,39 @@ let Predicates = [UseSSE41] in {
/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
+ def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
+ imm:$src2))]>,
OpSize;
let neverHasSideEffects = 1, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, OpSize;
// FIXME:
// There's an AssertZext in the way of writing the store pattern
// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX] in
defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
- def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
-}
defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
+ let isCodeGenOnly = 1, hasSideEffects = 0 in
+ def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, OpSize;
+
let neverHasSideEffects = 1, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
@@ -6117,31 +6091,28 @@ defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
/// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
/// destination
-multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
+multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
+ OpndItins itins = DEFAULT_ITINS> {
+ def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set GR32:$dst,
- (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
+ [(set GR32orGR64:$dst,
+ (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
+ itins.rr>,
OpSize;
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
- addr:$dst)]>, OpSize;
+ addr:$dst)], itins.rm>, OpSize;
}
let ExeDomain = SSEPackedSingle in {
- let Predicates = [HasAVX] in {
+ let Predicates = [UseAVX] in
defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
- def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, OpSize, VEX;
- }
- defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
+ defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
}
// Also match an EXTRACTPS store when the store is done as f32 instead of i32.
@@ -6162,13 +6133,13 @@ def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
+ (ins VR128:$src1, GR32orGR64:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
- (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
+ (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>, OpSize;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
!if(Is2Addr,
@@ -6241,7 +6212,8 @@ let Constraints = "$src1 = $dst" in
// are optimized inserts that won't zero arbitrary elements in the destination
// vector. The next one matches the intrinsic and could zero arbitrary elements
// in the target vector.
-multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
+multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
+ OpndItins itins = DEFAULT_ITINS> {
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
!if(Is2Addr,
@@ -6249,7 +6221,7 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
- (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
+ (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
OpSize;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
@@ -6260,14 +6232,14 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
[(set VR128:$dst,
(X86insrtps VR128:$src1,
(v4f32 (scalar_to_vector (loadf32 addr:$src2))),
- imm:$src3))]>, OpSize;
+ imm:$src3))], itins.rm>, OpSize;
}
let ExeDomain = SSEPackedSingle in {
- let Predicates = [HasAVX] in
+ let Predicates = [UseAVX] in
defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
let Constraints = "$src1 = $dst" in
- defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
+ defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
}
//===----------------------------------------------------------------------===//
@@ -6285,7 +6257,8 @@ let ExeDomain = SSEPackedSingle in {
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
+ [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
+ IIC_SSE_ROUNDPS_REG>,
OpSize;
// Vector intrinsic operation, mem
@@ -6294,7 +6267,8 @@ let ExeDomain = SSEPackedSingle in {
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
- (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
+ (V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
+ IIC_SSE_ROUNDPS_MEM>,
OpSize;
} // ExeDomain = SSEPackedSingle
@@ -6304,7 +6278,8 @@ let ExeDomain = SSEPackedDouble in {
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
+ [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
+ IIC_SSE_ROUNDPS_REG>,
OpSize;
// Vector intrinsic operation, mem
@@ -6313,7 +6288,8 @@ let ExeDomain = SSEPackedDouble in {
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
- (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
+ (V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
+ IIC_SSE_ROUNDPS_REG>,
OpSize;
} // ExeDomain = SSEPackedDouble
}
@@ -6397,11 +6373,11 @@ let ExeDomain = GenericDomain in {
let Predicates = [HasAVX] in {
// Intrinsic form
defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
- memopv4f32, memopv2f64,
+ loadv4f32, loadv2f64,
int_x86_sse41_round_ps,
int_x86_sse41_round_pd>, VEX;
defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
- memopv8f32, memopv4f64,
+ loadv8f32, loadv4f64,
int_x86_avx_round_ps_256,
int_x86_avx_round_pd_256>, VEX, VEX_L;
defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
@@ -6539,7 +6515,7 @@ def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
OpSize, VEX;
def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS,(X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
+ [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
OpSize, VEX;
def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
@@ -6548,7 +6524,7 @@ def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
OpSize, VEX, VEX_L;
def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
+ [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
OpSize, VEX, VEX_L;
}
@@ -6577,13 +6553,13 @@ multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
let Defs = [EFLAGS], Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
-defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
-defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>,
+defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32>;
+defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32>,
VEX_L;
}
let ExeDomain = SSEPackedDouble in {
-defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
-defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>,
+defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64>;
+defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64>,
VEX_L;
}
}
@@ -6595,30 +6571,33 @@ defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>,
let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)]>,
+ [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
+ IIC_SSE_POPCNT_RR>,
OpSize, XS;
def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctpop (loadi16 addr:$src))),
- (implicit EFLAGS)]>, OpSize, XS;
+ (implicit EFLAGS)], IIC_SSE_POPCNT_RM>, OpSize, XS;
def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)]>,
+ [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
+ IIC_SSE_POPCNT_RR>,
XS;
def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctpop (loadi32 addr:$src))),
- (implicit EFLAGS)]>, XS;
+ (implicit EFLAGS)], IIC_SSE_POPCNT_RM>, XS;
def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)]>,
+ [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
+ IIC_SSE_POPCNT_RR>,
XS;
def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctpop (loadi64 addr:$src))),
- (implicit EFLAGS)]>, XS;
+ (implicit EFLAGS)], IIC_SSE_POPCNT_RM>, XS;
}
@@ -6646,14 +6625,16 @@ defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Is2Addr = 1> {
+ Intrinsic IntId128, bit Is2Addr = 1,
+ OpndItins itins = DEFAULT_ITINS> {
let isCommutable = 1 in
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))],
+ itins.rr>, OpSize;
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!if(Is2Addr,
@@ -6661,7 +6642,8 @@ multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
(IntId128 VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
+ (bitconvert (memopv2i64 addr:$src2))))],
+ itins.rm>, OpSize;
}
/// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator
@@ -6677,14 +6659,15 @@ multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(IntId256 VR256:$src1,
- (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
+ (bitconvert (loadv4i64 addr:$src2))))]>, OpSize;
}
/// SS48I_binop_rm - Simple SSE41 binary operator.
multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
- X86MemOperand x86memop, bit Is2Addr = 1> {
+ X86MemOperand x86memop, bit Is2Addr = 1,
+ OpndItins itins = DEFAULT_ITINS> {
let isCommutable = 1 in
def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
@@ -6707,21 +6690,21 @@ let Predicates = [HasAVX] in {
defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
0>, VEX_4V;
defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", X86smin, v16i8, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", X86smin, v4i32, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", X86umin, v4i32, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v8i16, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v16i8, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v4i32, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v4i32, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v8i16, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
0>, VEX_4V;
}
@@ -6731,21 +6714,21 @@ let Predicates = [HasAVX2] in {
defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw",
int_x86_avx2_packusdw>, VEX_4V, VEX_L;
defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", X86smin, v32i8, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", X86smin, v8i32, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", X86umin, v8i32, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v16i16, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v32i8, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v8i32, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v8i32, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v16i16, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq",
int_x86_avx2_pmul_dq>, VEX_4V, VEX_L;
}
@@ -6754,22 +6737,23 @@ let Constraints = "$src1 = $dst" in {
let isCommutable = 0 in
defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
defm PMINSB : SS48I_binop_rm<0x38, "pminsb", X86smin, v16i8, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMINSD : SS48I_binop_rm<0x39, "pminsd", X86smin, v4i32, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMINUD : SS48I_binop_rm<0x3B, "pminud", X86umin, v4i32, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", X86umin, v8i16, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", X86smax, v16i8, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", X86smax, v4i32, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", X86umax, v4i32, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", X86umax, v8i16, VR128,
- memopv2i64, i128mem>;
- defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
+ memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
+ defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq,
+ 1, SSE_INTMUL_ITINS_P>;
}
let Predicates = [HasAVX] in {
@@ -6787,15 +6771,16 @@ let Predicates = [HasAVX2] in {
let Constraints = "$src1 = $dst" in {
defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_PMULLD_ITINS>;
defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
- memopv2i64, i128mem>;
+ memopv2i64, i128mem, 1, SSE_INTALUQ_ITINS_P>;
}
/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
- X86MemOperand x86memop, bit Is2Addr = 1> {
+ X86MemOperand x86memop, bit Is2Addr = 1,
+ OpndItins itins = DEFAULT_ITINS> {
let isCommutable = 1 in
def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, u32u8imm:$src3),
@@ -6804,7 +6789,7 @@ multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
+ [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
OpSize;
def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
@@ -6815,7 +6800,7 @@ multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set RC:$dst,
(IntId RC:$src1,
- (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
+ (bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
OpSize;
}
@@ -6823,40 +6808,40 @@ let Predicates = [HasAVX] in {
let isCommutable = 0 in {
let ExeDomain = SSEPackedSingle in {
defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
- VR128, memopv4f32, f128mem, 0>, VEX_4V;
+ VR128, loadv4f32, f128mem, 0>, VEX_4V;
defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
- int_x86_avx_blend_ps_256, VR256, memopv8f32,
+ int_x86_avx_blend_ps_256, VR256, loadv8f32,
f256mem, 0>, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
- VR128, memopv2f64, f128mem, 0>, VEX_4V;
+ VR128, loadv2f64, f128mem, 0>, VEX_4V;
defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
- int_x86_avx_blend_pd_256,VR256, memopv4f64,
+ int_x86_avx_blend_pd_256,VR256, loadv4f64,
f256mem, 0>, VEX_4V, VEX_L;
}
defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
- VR128, memopv2i64, i128mem, 0>, VEX_4V;
+ VR128, loadv2i64, i128mem, 0>, VEX_4V;
defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
- VR128, memopv2i64, i128mem, 0>, VEX_4V;
+ VR128, loadv2i64, i128mem, 0>, VEX_4V;
}
let ExeDomain = SSEPackedSingle in
defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
- VR128, memopv4f32, f128mem, 0>, VEX_4V;
+ VR128, loadv4f32, f128mem, 0>, VEX_4V;
let ExeDomain = SSEPackedDouble in
defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
- VR128, memopv2f64, f128mem, 0>, VEX_4V;
+ VR128, loadv2f64, f128mem, 0>, VEX_4V;
let ExeDomain = SSEPackedSingle in
defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
- VR256, memopv8f32, i256mem, 0>, VEX_4V, VEX_L;
+ VR256, loadv8f32, i256mem, 0>, VEX_4V, VEX_L;
}
let Predicates = [HasAVX2] in {
let isCommutable = 0 in {
defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
- VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ VR256, loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
- VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ VR256, loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
}
}
@@ -6864,21 +6849,27 @@ let Constraints = "$src1 = $dst" in {
let isCommutable = 0 in {
let ExeDomain = SSEPackedSingle in
defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
- VR128, memopv4f32, f128mem>;
+ VR128, memopv4f32, f128mem,
+ 1, SSE_INTALU_ITINS_P>;
let ExeDomain = SSEPackedDouble in
defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
- VR128, memopv2f64, f128mem>;
+ VR128, memopv2f64, f128mem,
+ 1, SSE_INTALU_ITINS_P>;
defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
- VR128, memopv2i64, i128mem>;
+ VR128, memopv2i64, i128mem,
+ 1, SSE_INTALU_ITINS_P>;
defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
- VR128, memopv2i64, i128mem>;
+ VR128, memopv2i64, i128mem,
+ 1, SSE_INTMUL_ITINS_P>;
}
let ExeDomain = SSEPackedSingle in
defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
- VR128, memopv4f32, f128mem>;
+ VR128, memopv4f32, f128mem, 1,
+ SSE_DPPS_ITINS>;
let ExeDomain = SSEPackedDouble in
defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
- VR128, memopv2f64, f128mem>;
+ VR128, memopv2f64, f128mem, 1,
+ SSE_DPPD_ITINS>;
}
/// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
@@ -6905,23 +6896,23 @@ multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedDouble in {
defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
- memopv2f64, int_x86_sse41_blendvpd>;
+ loadv2f64, int_x86_sse41_blendvpd>;
defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
- memopv4f64, int_x86_avx_blendv_pd_256>, VEX_L;
+ loadv4f64, int_x86_avx_blendv_pd_256>, VEX_L;
} // ExeDomain = SSEPackedDouble
let ExeDomain = SSEPackedSingle in {
defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
- memopv4f32, int_x86_sse41_blendvps>;
+ loadv4f32, int_x86_sse41_blendvps>;
defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
- memopv8f32, int_x86_avx_blendv_ps_256>, VEX_L;
+ loadv8f32, int_x86_avx_blendv_ps_256>, VEX_L;
} // ExeDomain = SSEPackedSingle
defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
- memopv2i64, int_x86_sse41_pblendvb>;
+ loadv2i64, int_x86_sse41_pblendvb>;
}
let Predicates = [HasAVX2] in {
defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
- memopv4i64, int_x86_avx2_pblendvb>, VEX_L;
+ loadv4i64, int_x86_avx2_pblendvb>, VEX_L;
}
let Predicates = [HasAVX] in {
@@ -6974,7 +6965,7 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX2] in {
def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
(v32i8 VR256:$src2))),
- (VPBLENDVBYrr VR256:$src1, VR256:$src2, VR256:$mask)>;
+ (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
def : Pat<(v16i16 (X86Blendi (v16i16 VR256:$src1), (v16i16 VR256:$src2),
(imm:$mask))),
(VPBLENDWYrri VR256:$src1, VR256:$src2, imm:$mask)>;
@@ -6983,13 +6974,14 @@ let Predicates = [HasAVX2] in {
/// SS41I_ternary_int - SSE 4.1 ternary operator
let Uses = [XMM0], Constraints = "$src1 = $dst" in {
multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
- X86MemOperand x86memop, Intrinsic IntId> {
+ X86MemOperand x86memop, Intrinsic IntId,
+ OpndItins itins = DEFAULT_ITINS> {
def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
- OpSize;
+ [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
+ itins.rr>, OpSize;
def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, x86memop:$src2),
@@ -6997,7 +6989,8 @@ let Uses = [XMM0], Constraints = "$src1 = $dst" in {
"\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst,
(IntId VR128:$src1,
- (bitconvert (mem_frag addr:$src2)), XMM0))]>, OpSize;
+ (bitconvert (mem_frag addr:$src2)), XMM0))],
+ itins.rm>, OpSize;
}
}
@@ -7011,17 +7004,17 @@ defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
int_x86_sse41_pblendvb>;
// Aliases with the implicit xmm0 argument
-def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
(BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
-def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
(BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
-def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
(BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
-def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
(BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
-def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
(PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
-def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
(PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
let Predicates = [UseSSE41] in {
@@ -7094,11 +7087,11 @@ multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Predicates = [HasAVX] in
defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0>, VEX_4V;
let Predicates = [HasAVX2] in
defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in
defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
@@ -7258,70 +7251,100 @@ let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
// crc intrinsic instruction
// This set of instructions are only rm, the only difference is the size
// of r and m.
+class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
+ RegisterClass RCIn, SDPatternOperator Int> :
+ SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
+ !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
+ [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>;
+
+class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
+ X86MemOperand x86memop, SDPatternOperator Int> :
+ SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
+ !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
+ [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
+ IIC_CRC32_MEM>;
+
let Constraints = "$src1 = $dst" in {
- def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i8mem:$src2),
- "crc32{b} \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32_8 GR32:$src1,
- (load addr:$src2)))]>;
- def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR8:$src2),
- "crc32{b} \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
- def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i16mem:$src2),
- "crc32{w} \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32_16 GR32:$src1,
- (load addr:$src2)))]>,
- OpSize;
- def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR16:$src2),
- "crc32{w} \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
- OpSize;
- def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "crc32{l} \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32_32 GR32:$src1,
- (load addr:$src2)))]>;
- def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "crc32{l} \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
- def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i8mem:$src2),
- "crc32{b} \t{$src2, $src1|$src1, $src2}",
- [(set GR64:$dst,
- (int_x86_sse42_crc32_64_8 GR64:$src1,
- (load addr:$src2)))]>,
- REX_W;
- def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR8:$src2),
- "crc32{b} \t{$src2, $src1|$src1, $src2}",
- [(set GR64:$dst,
- (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
- REX_W;
- def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "crc32{q} \t{$src2, $src1|$src1, $src2}",
- [(set GR64:$dst,
- (int_x86_sse42_crc32_64_64 GR64:$src1,
- (load addr:$src2)))]>,
- REX_W;
- def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "crc32{q} \t{$src2, $src1|$src1, $src2}",
- [(set GR64:$dst,
- (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
- REX_W;
+ def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
+ int_x86_sse42_crc32_32_8>;
+ def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
+ int_x86_sse42_crc32_32_8>;
+ def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
+ int_x86_sse42_crc32_32_16>, OpSize;
+ def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
+ int_x86_sse42_crc32_32_16>, OpSize;
+ def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
+ int_x86_sse42_crc32_32_32>;
+ def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
+ int_x86_sse42_crc32_32_32>;
+ def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
+ int_x86_sse42_crc32_64_64>, REX_W;
+ def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
+ int_x86_sse42_crc32_64_64>, REX_W;
+ let hasSideEffects = 0 in {
+ let mayLoad = 1 in
+ def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
+ null_frag>, REX_W;
+ def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
+ null_frag>, REX_W;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// SHA-NI Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
+ bit UsesXMM0 = 0> {
+ def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [!if(UsesXMM0,
+ (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
+ (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>, T8;
+
+ def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [!if(UsesXMM0,
+ (set VR128:$dst, (IntId VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)), XMM0)),
+ (set VR128:$dst, (IntId VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)))))]>, T8;
+}
+
+let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
+ def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set VR128:$dst,
+ (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
+ (i8 imm:$src3)))]>, TA;
+ def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set VR128:$dst,
+ (int_x86_sha1rnds4 VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)),
+ (i8 imm:$src3)))]>, TA;
+
+ defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte>;
+ defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1>;
+ defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2>;
+
+ let Uses=[XMM0] in
+ defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2, 1>;
+
+ defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1>;
+ defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2>;
}
+// Aliases with explicit %xmm0
+def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
+ (SHA256RNDS2rr VR128:$dst, VR128:$src2)>;
+def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
+ (SHA256RNDS2rm VR128:$dst, i128mem:$src2)>;
+
//===----------------------------------------------------------------------===//
// AES-NI Instructions
//===----------------------------------------------------------------------===//
@@ -7378,7 +7401,7 @@ let Predicates = [HasAVX, HasAES] in {
def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1),
"vaesimc\t{$src1, $dst|$dst, $src1}",
- [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
+ [(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
OpSize, VEX;
}
def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
@@ -7405,7 +7428,7 @@ let Predicates = [HasAVX, HasAES] in {
(ins i128mem:$src1, i8imm:$src2),
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
+ (int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
OpSize, VEX;
}
def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
@@ -7436,7 +7459,7 @@ def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
- (memopv2i64 addr:$src2), imm:$src3))]>;
+ (loadv2i64 addr:$src2), imm:$src3))]>;
// Carry-less Multiplication instructions
let Constraints = "$src1 = $dst" in {
@@ -7444,13 +7467,15 @@ def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
- (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
+ (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
+ IIC_SSE_PCLMULQDQ_RR>;
def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
- (memopv2i64 addr:$src2), imm:$src3))]>;
+ (memopv2i64 addr:$src2), imm:$src3))],
+ IIC_SSE_PCLMULQDQ_RM>;
} // Constraints = "$src1 = $dst"
@@ -7581,62 +7606,62 @@ def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
}
let Predicates = [HasAVX] in {
-def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
+def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
(iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
(iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2),
+def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
(iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
(iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
}
let Predicates = [HasAVX1Only] in {
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
+def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
(iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
(iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
(iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
(iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
+def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
(iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
- (bc_v4i32 (memopv2i64 addr:$src2)),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
+ (bc_v4i32 (loadv2i64 addr:$src2)),
(iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
- (bc_v16i8 (memopv2i64 addr:$src2)),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
+ (bc_v16i8 (loadv2i64 addr:$src2)),
(iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
- (bc_v8i16 (memopv2i64 addr:$src2)),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
+ (bc_v8i16 (loadv2i64 addr:$src2)),
(iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
}
//===----------------------------------------------------------------------===//
@@ -7656,59 +7681,59 @@ def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
// AVX1 patterns
let Predicates = [HasAVX] in {
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v4f32 (VEXTRACTF128rr
(v8f32 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v2f64 (VEXTRACTF128rr
(v4f64 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
-def : Pat<(alignedstore (v4f32 (vextractf128_extract:$ext (v8f32 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v2f64 (vextractf128_extract:$ext (v4f64 VR256:$src1),
- (iPTR imm))), addr:$dst),
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
+def : Pat<(store (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
}
let Predicates = [HasAVX1Only] in {
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v2i64 (VEXTRACTF128rr
(v4i64 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v4i32 (VEXTRACTF128rr
(v8i32 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v8i16 (VEXTRACTF128rr
(v16i16 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v16i8 (VEXTRACTF128rr
(v32i8 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
-def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
+def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
(iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
(iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
(iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
(iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
}
//===----------------------------------------------------------------------===//
@@ -7780,15 +7805,15 @@ multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
let ExeDomain = SSEPackedSingle in {
defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
- memopv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
+ loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
- memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
+ loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
- memopv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
+ loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
- memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
+ loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
}
let Predicates = [HasAVX] in {
@@ -7796,15 +7821,15 @@ def : Pat<(v8i32 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
(VPERMILPSYri VR256:$src1, imm:$imm)>;
def : Pat<(v4i64 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
(VPERMILPDYri VR256:$src1, imm:$imm)>;
-def : Pat<(v8i32 (X86VPermilp (bc_v8i32 (memopv4i64 addr:$src1)),
+def : Pat<(v8i32 (X86VPermilp (bc_v8i32 (loadv4i64 addr:$src1)),
(i8 imm:$imm))),
(VPERMILPSYmi addr:$src1, imm:$imm)>;
-def : Pat<(v4i64 (X86VPermilp (memopv4i64 addr:$src1), (i8 imm:$imm))),
+def : Pat<(v4i64 (X86VPermilp (loadv4i64 addr:$src1), (i8 imm:$imm))),
(VPERMILPDYmi addr:$src1, imm:$imm)>;
def : Pat<(v2i64 (X86VPermilp VR128:$src1, (i8 imm:$imm))),
(VPERMILPDri VR128:$src1, imm:$imm)>;
-def : Pat<(v2i64 (X86VPermilp (memopv2i64 addr:$src1), (i8 imm:$imm))),
+def : Pat<(v2i64 (X86VPermilp (loadv2i64 addr:$src1), (i8 imm:$imm))),
(VPERMILPDmi addr:$src1, imm:$imm)>;
}
@@ -7820,7 +7845,7 @@ def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, i8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv8f32 addr:$src2),
+ [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
(i8 imm:$src3)))]>, VEX_4V, VEX_L;
}
@@ -7828,7 +7853,7 @@ let Predicates = [HasAVX] in {
def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
- (memopv4f64 addr:$src2), (i8 imm:$imm))),
+ (loadv4f64 addr:$src2), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
}
@@ -7843,16 +7868,16 @@ def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
- (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
- (memopv4i64 addr:$src2), (i8 imm:$imm))),
+ (loadv4i64 addr:$src2), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
- (bc_v32i8 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (bc_v32i8 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
- (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
}
@@ -7896,7 +7921,7 @@ multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
TA, OpSize, VEX;
}
-let Predicates = [HasAVX, HasF16C] in {
+let Predicates = [HasF16C] in {
defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
@@ -7930,9 +7955,9 @@ multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
let isCommutable = 0 in {
defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
- VR128, memopv2i64, i128mem>;
+ VR128, loadv2i64, i128mem>;
defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
- VR256, memopv4i64, i256mem>, VEX_L;
+ VR256, loadv4i64, i256mem>, VEX_L;
}
def : Pat<(v4i32 (X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2),
@@ -8110,9 +8135,9 @@ multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
VEX_4V, VEX_L;
}
-defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, v8i32>;
+defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32>;
let ExeDomain = SSEPackedSingle in
-defm VPERMPS : avx2_perm<0x16, "vpermps", memopv8f32, v8f32>;
+defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32>;
multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
ValueType OpVT> {
@@ -8132,9 +8157,9 @@ multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
(i8 imm:$src2))))]>, VEX, VEX_L;
}
-defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, v4i64>, VEX_W;
+defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64>, VEX_W;
let ExeDomain = SSEPackedDouble in
-defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, v4f64>, VEX_W;
+defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64>, VEX_W;
//===----------------------------------------------------------------------===//
// VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
@@ -8147,7 +8172,7 @@ def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, i8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2),
+ [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
(i8 imm:$src3)))]>, VEX_4V, VEX_L;
let Predicates = [HasAVX2] in {
@@ -8158,13 +8183,13 @@ def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (memopv4i64 addr:$src2)),
+def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (loadv4i64 addr:$src2)),
(i8 imm:$imm))),
(VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
- (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
(VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
-def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)),
+def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
(i8 imm:$imm))),
(VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
}
@@ -8186,42 +8211,42 @@ def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
}
let Predicates = [HasAVX2] in {
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
+def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
(iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
(iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
(iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
(iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
+def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
(iPTR imm)),
(VINSERTI128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
- (bc_v4i32 (memopv2i64 addr:$src2)),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
+ (bc_v4i32 (loadv2i64 addr:$src2)),
(iPTR imm)),
(VINSERTI128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
- (bc_v16i8 (memopv2i64 addr:$src2)),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
+ (bc_v16i8 (loadv2i64 addr:$src2)),
(iPTR imm)),
(VINSERTI128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
- (bc_v8i16 (memopv2i64 addr:$src2)),
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
+def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
+ (bc_v8i16 (loadv2i64 addr:$src2)),
(iPTR imm)),
(VINSERTI128rm VR256:$src1, addr:$src2,
- (INSERT_get_vinsertf128_imm VR256:$ins))>;
+ (INSERT_get_vinsert128_imm VR256:$ins))>;
}
//===----------------------------------------------------------------------===//
@@ -8240,39 +8265,39 @@ def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
VEX, VEX_L;
let Predicates = [HasAVX2] in {
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v2i64 (VEXTRACTI128rr
(v4i64 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v4i32 (VEXTRACTI128rr
(v8i32 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v8i16 (VEXTRACTI128rr
(v16i16 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
+def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v16i8 (VEXTRACTI128rr
(v32i8 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+ (EXTRACT_get_vextract128_imm VR128:$ext)))>;
-def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTI128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
- (iPTR imm))), addr:$dst),
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
+def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTI128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
- (iPTR imm))), addr:$dst),
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
+def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTI128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
- (iPTR imm))), addr:$dst),
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
+def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTI128mr addr:$dst, VR256:$src1,
- (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+ (EXTRACT_get_vextract128_imm VR128:$ext))>;
}
//===----------------------------------------------------------------------===//
@@ -8328,7 +8353,7 @@ multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1,
- (vt128 (bitconvert (memopv2i64 addr:$src2))))))]>,
+ (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
VEX_4V;
def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2),
@@ -8341,7 +8366,7 @@ multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1,
- (vt256 (bitconvert (memopv4i64 addr:$src2))))))]>,
+ (vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
VEX_4V, VEX_L;
}
@@ -8367,7 +8392,9 @@ multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
[]>, VEX_4VOp3, VEX_L;
}
-let mayLoad = 1, Constraints = "$src1 = $dst, $mask = $mask_wb" in {
+let mayLoad = 1, Constraints
+ = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
+ in {
defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
diff --git a/lib/Target/X86/X86InstrSVM.td b/lib/Target/X86/X86InstrSVM.td
index 757dcd0b5dcb..0191c01c349f 100644
--- a/lib/Target/X86/X86InstrSVM.td
+++ b/lib/Target/X86/X86InstrSVM.td
@@ -26,37 +26,37 @@ def CLGI : I<0x01, MRM_DD, (outs), (ins), "clgi", []>, TB;
// 0F 01 DE
let Uses = [EAX] in
-def SKINIT : I<0x01, MRM_DE, (outs), (ins), "skinit\t{%eax|EAX}", []>, TB;
+def SKINIT : I<0x01, MRM_DE, (outs), (ins), "skinit\t{%eax|eax}", []>, TB;
// 0F 01 D8
let Uses = [EAX] in
def VMRUN32 : I<0x01, MRM_D8, (outs), (ins),
- "vmrun\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>;
+ "vmrun\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>;
let Uses = [RAX] in
def VMRUN64 : I<0x01, MRM_D8, (outs), (ins),
- "vmrun\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>;
+ "vmrun\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>;
// 0F 01 DA
let Uses = [EAX] in
def VMLOAD32 : I<0x01, MRM_DA, (outs), (ins),
- "vmload\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>;
+ "vmload\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>;
let Uses = [RAX] in
def VMLOAD64 : I<0x01, MRM_DA, (outs), (ins),
- "vmload\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>;
+ "vmload\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>;
// 0F 01 DB
let Uses = [EAX] in
def VMSAVE32 : I<0x01, MRM_DB, (outs), (ins),
- "vmsave\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>;
+ "vmsave\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>;
let Uses = [RAX] in
def VMSAVE64 : I<0x01, MRM_DB, (outs), (ins),
- "vmsave\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>;
+ "vmsave\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>;
// 0F 01 DF
let Uses = [EAX, ECX] in
def INVLPGA32 : I<0x01, MRM_DF, (outs), (ins),
- "invlpga\t{%ecx, %eax|EAX, ECX}", []>, TB, Requires<[In32BitMode]>;
+ "invlpga\t{%ecx, %eax|eax, ecx}", []>, TB, Requires<[In32BitMode]>;
let Uses = [RAX, ECX] in
def INVLPGA64 : I<0x01, MRM_DF, (outs), (ins),
- "invlpga\t{%ecx, %rax|RAX, ECX}", []>, TB, Requires<[In64BitMode]>;
+ "invlpga\t{%ecx, %rax|rax, ecx}", []>, TB, Requires<[In64BitMode]>;
diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td
index 89c1a6891396..1937770d55ee 100644
--- a/lib/Target/X86/X86InstrShiftRotate.td
+++ b/lib/Target/X86/X86InstrShiftRotate.td
@@ -18,16 +18,16 @@ let Defs = [EFLAGS] in {
let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
let Uses = [CL] in {
def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
- "shl{b}\t{%cl, $dst|$dst, CL}",
+ "shl{b}\t{%cl, $dst|$dst, cl}",
[(set GR8:$dst, (shl GR8:$src1, CL))], IIC_SR>;
def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
- "shl{w}\t{%cl, $dst|$dst, CL}",
+ "shl{w}\t{%cl, $dst|$dst, cl}",
[(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize;
def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
- "shl{l}\t{%cl, $dst|$dst, CL}",
+ "shl{l}\t{%cl, $dst|$dst, cl}",
[(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>;
def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
- "shl{q}\t{%cl, $dst|$dst, CL}",
+ "shl{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (shl GR64:$src1, CL))], IIC_SR>;
} // Uses = [CL]
@@ -70,17 +70,17 @@ let SchedRW = [WriteShiftLd, WriteRMW] in {
// using CL?
let Uses = [CL] in {
def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
- "shl{b}\t{%cl, $dst|$dst, CL}",
+ "shl{b}\t{%cl, $dst|$dst, cl}",
[(store (shl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
- "shl{w}\t{%cl, $dst|$dst, CL}",
+ "shl{w}\t{%cl, $dst|$dst, cl}",
[(store (shl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
OpSize;
def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
- "shl{l}\t{%cl, $dst|$dst, CL}",
+ "shl{l}\t{%cl, $dst|$dst, cl}",
[(store (shl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
- "shl{q}\t{%cl, $dst|$dst, CL}",
+ "shl{q}\t{%cl, $dst|$dst, cl}",
[(store (shl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>;
}
def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src),
@@ -124,16 +124,16 @@ def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
let Uses = [CL] in {
def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
- "shr{b}\t{%cl, $dst|$dst, CL}",
+ "shr{b}\t{%cl, $dst|$dst, cl}",
[(set GR8:$dst, (srl GR8:$src1, CL))], IIC_SR>;
def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
- "shr{w}\t{%cl, $dst|$dst, CL}",
+ "shr{w}\t{%cl, $dst|$dst, cl}",
[(set GR16:$dst, (srl GR16:$src1, CL))], IIC_SR>, OpSize;
def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
- "shr{l}\t{%cl, $dst|$dst, CL}",
+ "shr{l}\t{%cl, $dst|$dst, cl}",
[(set GR32:$dst, (srl GR32:$src1, CL))], IIC_SR>;
def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
- "shr{q}\t{%cl, $dst|$dst, CL}",
+ "shr{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (srl GR64:$src1, CL))], IIC_SR>;
}
@@ -171,17 +171,17 @@ def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
let SchedRW = [WriteShiftLd, WriteRMW] in {
let Uses = [CL] in {
def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
- "shr{b}\t{%cl, $dst|$dst, CL}",
+ "shr{b}\t{%cl, $dst|$dst, cl}",
[(store (srl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
- "shr{w}\t{%cl, $dst|$dst, CL}",
+ "shr{w}\t{%cl, $dst|$dst, cl}",
[(store (srl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
OpSize;
def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
- "shr{l}\t{%cl, $dst|$dst, CL}",
+ "shr{l}\t{%cl, $dst|$dst, cl}",
[(store (srl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
- "shr{q}\t{%cl, $dst|$dst, CL}",
+ "shr{q}\t{%cl, $dst|$dst, cl}",
[(store (srl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>;
}
def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src),
@@ -224,19 +224,19 @@ def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
let Uses = [CL] in {
def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
- "sar{b}\t{%cl, $dst|$dst, CL}",
+ "sar{b}\t{%cl, $dst|$dst, cl}",
[(set GR8:$dst, (sra GR8:$src1, CL))],
IIC_SR>;
def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
- "sar{w}\t{%cl, $dst|$dst, CL}",
+ "sar{w}\t{%cl, $dst|$dst, cl}",
[(set GR16:$dst, (sra GR16:$src1, CL))],
IIC_SR>, OpSize;
def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
- "sar{l}\t{%cl, $dst|$dst, CL}",
+ "sar{l}\t{%cl, $dst|$dst, cl}",
[(set GR32:$dst, (sra GR32:$src1, CL))],
IIC_SR>;
def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
- "sar{q}\t{%cl, $dst|$dst, CL}",
+ "sar{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (sra GR64:$src1, CL))],
IIC_SR>;
}
@@ -283,19 +283,19 @@ def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
let SchedRW = [WriteShiftLd, WriteRMW] in {
let Uses = [CL] in {
def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
- "sar{b}\t{%cl, $dst|$dst, CL}",
+ "sar{b}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi8 addr:$dst), CL), addr:$dst)],
IIC_SR>;
def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
- "sar{w}\t{%cl, $dst|$dst, CL}",
+ "sar{w}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi16 addr:$dst), CL), addr:$dst)],
IIC_SR>, OpSize;
def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
- "sar{l}\t{%cl, $dst|$dst, CL}",
+ "sar{l}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi32 addr:$dst), CL), addr:$dst)],
IIC_SR>;
def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
- "sar{q}\t{%cl, $dst|$dst, CL}",
+ "sar{q}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi64 addr:$dst), CL), addr:$dst)],
IIC_SR>;
}
@@ -349,7 +349,7 @@ def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
"rcl{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
- "rcl{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"rcl{w}\t$dst", [], IIC_SR>, OpSize;
@@ -357,7 +357,7 @@ def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
"rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
let Uses = [CL] in
def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "rcl{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
+ "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
"rcl{l}\t$dst", [], IIC_SR>;
@@ -365,7 +365,7 @@ def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
"rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "rcl{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
@@ -374,7 +374,7 @@ def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt),
"rcl{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
- "rcl{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcl{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
@@ -383,7 +383,7 @@ def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
"rcr{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
- "rcr{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"rcr{w}\t$dst", [], IIC_SR>, OpSize;
@@ -391,7 +391,7 @@ def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
"rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
let Uses = [CL] in
def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "rcr{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
+ "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"rcr{l}\t$dst", [], IIC_SR>;
@@ -399,7 +399,7 @@ def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
"rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "rcr{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
"rcr{q}\t$dst", [], IIC_SR>;
@@ -407,7 +407,7 @@ def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt),
"rcr{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
- "rcr{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcr{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
} // Constraints = "$src = $dst"
@@ -448,22 +448,22 @@ def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
let Uses = [CL] in {
def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
- "rcl{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
- "rcl{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
+ "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
- "rcl{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
- "rcl{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcl{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
- "rcr{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
- "rcr{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
+ "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
- "rcr{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
- "rcr{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
+ "rcr{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
}
} // SchedRW
} // hasSideEffects = 0
@@ -472,16 +472,16 @@ let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
// FIXME: provide shorter instructions when imm8 == 1
let Uses = [CL] in {
def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
- "rol{b}\t{%cl, $dst|$dst, CL}",
+ "rol{b}\t{%cl, $dst|$dst, cl}",
[(set GR8:$dst, (rotl GR8:$src1, CL))], IIC_SR>;
def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
- "rol{w}\t{%cl, $dst|$dst, CL}",
+ "rol{w}\t{%cl, $dst|$dst, cl}",
[(set GR16:$dst, (rotl GR16:$src1, CL))], IIC_SR>, OpSize;
def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
- "rol{l}\t{%cl, $dst|$dst, CL}",
+ "rol{l}\t{%cl, $dst|$dst, cl}",
[(set GR32:$dst, (rotl GR32:$src1, CL))], IIC_SR>;
def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
- "rol{q}\t{%cl, $dst|$dst, CL}",
+ "rol{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (rotl GR64:$src1, CL))], IIC_SR>;
}
@@ -525,19 +525,19 @@ def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
let SchedRW = [WriteShiftLd, WriteRMW] in {
let Uses = [CL] in {
def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
- "rol{b}\t{%cl, $dst|$dst, CL}",
+ "rol{b}\t{%cl, $dst|$dst, cl}",
[(store (rotl (loadi8 addr:$dst), CL), addr:$dst)],
IIC_SR>;
def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
- "rol{w}\t{%cl, $dst|$dst, CL}",
+ "rol{w}\t{%cl, $dst|$dst, cl}",
[(store (rotl (loadi16 addr:$dst), CL), addr:$dst)],
IIC_SR>, OpSize;
def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
- "rol{l}\t{%cl, $dst|$dst, CL}",
+ "rol{l}\t{%cl, $dst|$dst, cl}",
[(store (rotl (loadi32 addr:$dst), CL), addr:$dst)],
IIC_SR>;
def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
- "rol{q}\t{%cl, $dst|$dst, %cl}",
+ "rol{q}\t{%cl, $dst|$dst, cl}",
[(store (rotl (loadi64 addr:$dst), CL), addr:$dst)],
IIC_SR>;
}
@@ -582,16 +582,16 @@ def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
let Uses = [CL] in {
def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
- "ror{b}\t{%cl, $dst|$dst, CL}",
+ "ror{b}\t{%cl, $dst|$dst, cl}",
[(set GR8:$dst, (rotr GR8:$src1, CL))], IIC_SR>;
def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
- "ror{w}\t{%cl, $dst|$dst, CL}",
+ "ror{w}\t{%cl, $dst|$dst, cl}",
[(set GR16:$dst, (rotr GR16:$src1, CL))], IIC_SR>, OpSize;
def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
- "ror{l}\t{%cl, $dst|$dst, CL}",
+ "ror{l}\t{%cl, $dst|$dst, cl}",
[(set GR32:$dst, (rotr GR32:$src1, CL))], IIC_SR>;
def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
- "ror{q}\t{%cl, $dst|$dst, CL}",
+ "ror{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (rotr GR64:$src1, CL))], IIC_SR>;
}
@@ -635,19 +635,19 @@ def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
let SchedRW = [WriteShiftLd, WriteRMW] in {
let Uses = [CL] in {
def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
- "ror{b}\t{%cl, $dst|$dst, CL}",
+ "ror{b}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi8 addr:$dst), CL), addr:$dst)],
IIC_SR>;
def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
- "ror{w}\t{%cl, $dst|$dst, CL}",
+ "ror{w}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi16 addr:$dst), CL), addr:$dst)],
IIC_SR>, OpSize;
def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
- "ror{l}\t{%cl, $dst|$dst, CL}",
+ "ror{l}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi32 addr:$dst), CL), addr:$dst)],
IIC_SR>;
def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
- "ror{q}\t{%cl, $dst|$dst, CL}",
+ "ror{q}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi64 addr:$dst), CL), addr:$dst)],
IIC_SR>;
}
@@ -699,35 +699,35 @@ let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
let Uses = [CL] in {
def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
- "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))],
IIC_SHD16_REG_CL>,
TB, OpSize;
def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
- "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))],
IIC_SHD16_REG_CL>,
TB, OpSize;
def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
- "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))],
IIC_SHD32_REG_CL>, TB;
def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
- "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))],
IIC_SHD32_REG_CL>, TB;
def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
- "shld{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))],
IIC_SHD64_REG_CL>,
TB;
def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
- "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))],
IIC_SHD64_REG_CL>,
TB;
@@ -782,29 +782,29 @@ def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
let SchedRW = [WriteShiftLd, WriteRMW] in {
let Uses = [CL] in {
def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize;
def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize;
def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
addr:$dst)], IIC_SHD32_MEM_CL>, TB;
def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
addr:$dst)], IIC_SHD32_MEM_CL>, TB;
def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "shld{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
addr:$dst)], IIC_SHD64_MEM_CL>, TB;
def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
addr:$dst)], IIC_SHD64_MEM_CL>, TB;
}
diff --git a/lib/Target/X86/X86InstrSystem.td b/lib/Target/X86/X86InstrSystem.td
index bab3cdd00095..2196dc32e7a2 100644
--- a/lib/Target/X86/X86InstrSystem.td
+++ b/lib/Target/X86/X86InstrSystem.td
@@ -77,43 +77,43 @@ def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>,
let SchedRW = [WriteSystem] in {
let Defs = [AL], Uses = [DX] in
def IN8rr : I<0xEC, RawFrm, (outs), (ins),
- "in{b}\t{%dx, %al|AL, DX}", [], IIC_IN_RR>;
+ "in{b}\t{%dx, %al|al, dx}", [], IIC_IN_RR>;
let Defs = [AX], Uses = [DX] in
def IN16rr : I<0xED, RawFrm, (outs), (ins),
- "in{w}\t{%dx, %ax|AX, DX}", [], IIC_IN_RR>, OpSize;
+ "in{w}\t{%dx, %ax|ax, dx}", [], IIC_IN_RR>, OpSize;
let Defs = [EAX], Uses = [DX] in
def IN32rr : I<0xED, RawFrm, (outs), (ins),
- "in{l}\t{%dx, %eax|EAX, DX}", [], IIC_IN_RR>;
+ "in{l}\t{%dx, %eax|eax, dx}", [], IIC_IN_RR>;
let Defs = [AL] in
def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i8imm:$port),
- "in{b}\t{$port, %al|AL, $port}", [], IIC_IN_RI>;
+ "in{b}\t{$port, %al|al, $port}", [], IIC_IN_RI>;
let Defs = [AX] in
def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
- "in{w}\t{$port, %ax|AX, $port}", [], IIC_IN_RI>, OpSize;
+ "in{w}\t{$port, %ax|ax, $port}", [], IIC_IN_RI>, OpSize;
let Defs = [EAX] in
def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
- "in{l}\t{$port, %eax|EAX, $port}", [], IIC_IN_RI>;
+ "in{l}\t{$port, %eax|eax, $port}", [], IIC_IN_RI>;
let Uses = [DX, AL] in
def OUT8rr : I<0xEE, RawFrm, (outs), (ins),
- "out{b}\t{%al, %dx|DX, AL}", [], IIC_OUT_RR>;
+ "out{b}\t{%al, %dx|dx, al}", [], IIC_OUT_RR>;
let Uses = [DX, AX] in
def OUT16rr : I<0xEF, RawFrm, (outs), (ins),
- "out{w}\t{%ax, %dx|DX, AX}", [], IIC_OUT_RR>, OpSize;
+ "out{w}\t{%ax, %dx|dx, ax}", [], IIC_OUT_RR>, OpSize;
let Uses = [DX, EAX] in
def OUT32rr : I<0xEF, RawFrm, (outs), (ins),
- "out{l}\t{%eax, %dx|DX, EAX}", [], IIC_OUT_RR>;
+ "out{l}\t{%eax, %dx|dx, eax}", [], IIC_OUT_RR>;
let Uses = [AL] in
def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i8imm:$port),
- "out{b}\t{%al, $port|$port, AL}", [], IIC_OUT_IR>;
+ "out{b}\t{%al, $port|$port, al}", [], IIC_OUT_IR>;
let Uses = [AX] in
def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
- "out{w}\t{%ax, $port|$port, AX}", [], IIC_OUT_IR>, OpSize;
+ "out{w}\t{%ax, $port|$port, ax}", [], IIC_OUT_IR>, OpSize;
let Uses = [EAX] in
def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
- "out{l}\t{%eax, $port|$port, EAX}", [], IIC_OUT_IR>;
+ "out{l}\t{%eax, $port|$port, eax}", [], IIC_OUT_IR>;
def IN8 : I<0x6C, RawFrm, (outs), (ins), "ins{b}", [], IIC_INS>;
def IN16 : I<0x6D, RawFrm, (outs), (ins), "ins{w}", [], IIC_INS>, OpSize;
@@ -248,75 +248,75 @@ def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src),
"ltr{w}\t$src", [], IIC_LTR>, TB;
def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins),
- "push{w}\t{%cs|CS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ "push{w}\t{%cs|cs}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
OpSize;
def PUSHCS32 : I<0x0E, RawFrm, (outs), (ins),
- "push{l}\t{%cs|CS}", [], IIC_PUSH_CS>, Requires<[In32BitMode]>;
+ "push{l}\t{%cs|cs}", [], IIC_PUSH_CS>, Requires<[In32BitMode]>;
def PUSHSS16 : I<0x16, RawFrm, (outs), (ins),
- "push{w}\t{%ss|SS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ "push{w}\t{%ss|ss}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
OpSize;
def PUSHSS32 : I<0x16, RawFrm, (outs), (ins),
- "push{l}\t{%ss|SS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
+ "push{l}\t{%ss|ss}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
def PUSHDS16 : I<0x1E, RawFrm, (outs), (ins),
- "push{w}\t{%ds|DS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ "push{w}\t{%ds|ds}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
OpSize;
def PUSHDS32 : I<0x1E, RawFrm, (outs), (ins),
- "push{l}\t{%ds|DS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
+ "push{l}\t{%ds|ds}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
def PUSHES16 : I<0x06, RawFrm, (outs), (ins),
- "push{w}\t{%es|ES}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ "push{w}\t{%es|es}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
OpSize;
def PUSHES32 : I<0x06, RawFrm, (outs), (ins),
- "push{l}\t{%es|ES}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
+ "push{l}\t{%es|es}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins),
- "push{w}\t{%fs|FS}", [], IIC_PUSH_SR>, OpSize, TB;
+ "push{w}\t{%fs|fs}", [], IIC_PUSH_SR>, OpSize, TB;
def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins),
- "push{l}\t{%fs|FS}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
+ "push{l}\t{%fs|fs}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins),
- "push{w}\t{%gs|GS}", [], IIC_PUSH_SR>, OpSize, TB;
+ "push{w}\t{%gs|gs}", [], IIC_PUSH_SR>, OpSize, TB;
def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins),
- "push{l}\t{%gs|GS}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
+ "push{l}\t{%gs|gs}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
- "push{q}\t{%fs|FS}", [], IIC_PUSH_SR>, TB;
+ "push{q}\t{%fs|fs}", [], IIC_PUSH_SR>, TB;
def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
- "push{q}\t{%gs|GS}", [], IIC_PUSH_SR>, TB;
+ "push{q}\t{%gs|gs}", [], IIC_PUSH_SR>, TB;
// No "pop cs" instruction.
def POPSS16 : I<0x17, RawFrm, (outs), (ins),
- "pop{w}\t{%ss|SS}", [], IIC_POP_SR_SS>,
+ "pop{w}\t{%ss|ss}", [], IIC_POP_SR_SS>,
OpSize, Requires<[In32BitMode]>;
def POPSS32 : I<0x17, RawFrm, (outs), (ins),
- "pop{l}\t{%ss|SS}", [], IIC_POP_SR_SS>,
+ "pop{l}\t{%ss|ss}", [], IIC_POP_SR_SS>,
Requires<[In32BitMode]>;
def POPDS16 : I<0x1F, RawFrm, (outs), (ins),
- "pop{w}\t{%ds|DS}", [], IIC_POP_SR>,
+ "pop{w}\t{%ds|ds}", [], IIC_POP_SR>,
OpSize, Requires<[In32BitMode]>;
def POPDS32 : I<0x1F, RawFrm, (outs), (ins),
- "pop{l}\t{%ds|DS}", [], IIC_POP_SR>,
+ "pop{l}\t{%ds|ds}", [], IIC_POP_SR>,
Requires<[In32BitMode]>;
def POPES16 : I<0x07, RawFrm, (outs), (ins),
- "pop{w}\t{%es|ES}", [], IIC_POP_SR>,
+ "pop{w}\t{%es|es}", [], IIC_POP_SR>,
OpSize, Requires<[In32BitMode]>;
def POPES32 : I<0x07, RawFrm, (outs), (ins),
- "pop{l}\t{%es|ES}", [], IIC_POP_SR>,
+ "pop{l}\t{%es|es}", [], IIC_POP_SR>,
Requires<[In32BitMode]>;
def POPFS16 : I<0xa1, RawFrm, (outs), (ins),
- "pop{w}\t{%fs|FS}", [], IIC_POP_SR>, OpSize, TB;
+ "pop{w}\t{%fs|fs}", [], IIC_POP_SR>, OpSize, TB;
def POPFS32 : I<0xa1, RawFrm, (outs), (ins),
- "pop{l}\t{%fs|FS}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
+ "pop{l}\t{%fs|fs}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
- "pop{q}\t{%fs|FS}", [], IIC_POP_SR>, TB;
+ "pop{q}\t{%fs|fs}", [], IIC_POP_SR>, TB;
def POPGS16 : I<0xa9, RawFrm, (outs), (ins),
- "pop{w}\t{%gs|GS}", [], IIC_POP_SR>, OpSize, TB;
+ "pop{w}\t{%gs|gs}", [], IIC_POP_SR>, OpSize, TB;
def POPGS32 : I<0xa9, RawFrm, (outs), (ins),
- "pop{l}\t{%gs|GS}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
+ "pop{l}\t{%gs|gs}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
- "pop{q}\t{%gs|GS}", [], IIC_POP_SR>, TB;
+ "pop{q}\t{%gs|gs}", [], IIC_POP_SR>, TB;
def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
diff --git a/lib/Target/X86/X86InstrTSX.td b/lib/Target/X86/X86InstrTSX.td
index 363a190aa854..59a6f1e376f0 100644
--- a/lib/Target/X86/X86InstrTSX.td
+++ b/lib/Target/X86/X86InstrTSX.td
@@ -37,3 +37,10 @@ def XTEST : I<0x01, MRM_D6, (outs), (ins),
def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm),
"xabort\t$imm",
[(int_x86_xabort imm:$imm)]>, Requires<[HasRTM]>;
+
+// HLE prefixes
+
+def XACQUIRE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "xacquire", []>, Requires<[HasHLE]>;
+
+def XRELEASE_PREFIX : I<0xF3, RawFrm, (outs), (ins), "xrelease", []>, Requires<[HasHLE]>;
+
diff --git a/lib/Target/X86/X86InstrXOP.td b/lib/Target/X86/X86InstrXOP.td
index 2aa08fad7836..2b6ee5c39ed4 100644
--- a/lib/Target/X86/X86InstrXOP.td
+++ b/lib/Target/X86/X86InstrXOP.td
@@ -20,23 +20,21 @@ multiclass xop2op<bits<8> opc, string OpcodeStr, Intrinsic Int, PatFrag memop> {
[(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
}
-let isAsmParserOnly = 1 in {
- defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, memopv2i64>;
- defm VPHSUBDQ : xop2op<0xE3, "vphsubdq", int_x86_xop_vphsubdq, memopv2i64>;
- defm VPHSUBBW : xop2op<0xE1, "vphsubbw", int_x86_xop_vphsubbw, memopv2i64>;
- defm VPHADDWQ : xop2op<0xC7, "vphaddwq", int_x86_xop_vphaddwq, memopv2i64>;
- defm VPHADDWD : xop2op<0xC6, "vphaddwd", int_x86_xop_vphaddwd, memopv2i64>;
- defm VPHADDUWQ : xop2op<0xD7, "vphadduwq", int_x86_xop_vphadduwq, memopv2i64>;
- defm VPHADDUWD : xop2op<0xD6, "vphadduwd", int_x86_xop_vphadduwd, memopv2i64>;
- defm VPHADDUDQ : xop2op<0xDB, "vphaddudq", int_x86_xop_vphaddudq, memopv2i64>;
- defm VPHADDUBW : xop2op<0xD1, "vphaddubw", int_x86_xop_vphaddubw, memopv2i64>;
- defm VPHADDUBQ : xop2op<0xD3, "vphaddubq", int_x86_xop_vphaddubq, memopv2i64>;
- defm VPHADDUBD : xop2op<0xD2, "vphaddubd", int_x86_xop_vphaddubd, memopv2i64>;
- defm VPHADDDQ : xop2op<0xCB, "vphadddq", int_x86_xop_vphadddq, memopv2i64>;
- defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw, memopv2i64>;
- defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq, memopv2i64>;
- defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd, memopv2i64>;
-}
+defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, memopv2i64>;
+defm VPHSUBDQ : xop2op<0xE3, "vphsubdq", int_x86_xop_vphsubdq, memopv2i64>;
+defm VPHSUBBW : xop2op<0xE1, "vphsubbw", int_x86_xop_vphsubbw, memopv2i64>;
+defm VPHADDWQ : xop2op<0xC7, "vphaddwq", int_x86_xop_vphaddwq, memopv2i64>;
+defm VPHADDWD : xop2op<0xC6, "vphaddwd", int_x86_xop_vphaddwd, memopv2i64>;
+defm VPHADDUWQ : xop2op<0xD7, "vphadduwq", int_x86_xop_vphadduwq, memopv2i64>;
+defm VPHADDUWD : xop2op<0xD6, "vphadduwd", int_x86_xop_vphadduwd, memopv2i64>;
+defm VPHADDUDQ : xop2op<0xDB, "vphaddudq", int_x86_xop_vphaddudq, memopv2i64>;
+defm VPHADDUBW : xop2op<0xD1, "vphaddubw", int_x86_xop_vphaddubw, memopv2i64>;
+defm VPHADDUBQ : xop2op<0xD3, "vphaddubq", int_x86_xop_vphaddubq, memopv2i64>;
+defm VPHADDUBD : xop2op<0xD2, "vphaddubd", int_x86_xop_vphaddubd, memopv2i64>;
+defm VPHADDDQ : xop2op<0xCB, "vphadddq", int_x86_xop_vphadddq, memopv2i64>;
+defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw, memopv2i64>;
+defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq, memopv2i64>;
+defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd, memopv2i64>;
// Scalar load 2 addr operand instructions
multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
@@ -49,12 +47,10 @@ multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
[(set VR128:$dst, (Int (bitconvert mem_cpat:$src)))]>, VEX;
}
-let isAsmParserOnly = 1 in {
- defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
- ssmem, sse_load_f32>;
- defm VFRCZSD : xop2opsld<0x83, "vfrczsd", int_x86_xop_vfrcz_sd,
- sdmem, sse_load_f64>;
-}
+defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
+ ssmem, sse_load_f32>;
+defm VFRCZSD : xop2opsld<0x83, "vfrczsd", int_x86_xop_vfrcz_sd,
+ sdmem, sse_load_f64>;
multiclass xop2op128<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
@@ -66,10 +62,8 @@ multiclass xop2op128<bits<8> opc, string OpcodeStr, Intrinsic Int,
[(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
}
-let isAsmParserOnly = 1 in {
- defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>;
- defm VFRCZPD : xop2op128<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, memopv2f64>;
-}
+defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>;
+defm VFRCZPD : xop2op128<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, memopv2f64>;
multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
@@ -81,12 +75,8 @@ multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
[(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX, VEX_L;
}
-let isAsmParserOnly = 1 in {
- defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256,
- memopv8f32>;
- defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256,
- memopv4f64>;
-}
+defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256, memopv8f32>;
+defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256, memopv4f64>;
multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst),
@@ -107,20 +97,18 @@ multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
VEX_4VOp3;
}
-let isAsmParserOnly = 1 in {
- defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>;
- defm VPSHLQ : xop3op<0x97, "vpshlq", int_x86_xop_vpshlq>;
- defm VPSHLD : xop3op<0x96, "vpshld", int_x86_xop_vpshld>;
- defm VPSHLB : xop3op<0x94, "vpshlb", int_x86_xop_vpshlb>;
- defm VPSHAW : xop3op<0x99, "vpshaw", int_x86_xop_vpshaw>;
- defm VPSHAQ : xop3op<0x9B, "vpshaq", int_x86_xop_vpshaq>;
- defm VPSHAD : xop3op<0x9A, "vpshad", int_x86_xop_vpshad>;
- defm VPSHAB : xop3op<0x98, "vpshab", int_x86_xop_vpshab>;
- defm VPROTW : xop3op<0x91, "vprotw", int_x86_xop_vprotw>;
- defm VPROTQ : xop3op<0x93, "vprotq", int_x86_xop_vprotq>;
- defm VPROTD : xop3op<0x92, "vprotd", int_x86_xop_vprotd>;
- defm VPROTB : xop3op<0x90, "vprotb", int_x86_xop_vprotb>;
-}
+defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>;
+defm VPSHLQ : xop3op<0x97, "vpshlq", int_x86_xop_vpshlq>;
+defm VPSHLD : xop3op<0x96, "vpshld", int_x86_xop_vpshld>;
+defm VPSHLB : xop3op<0x94, "vpshlb", int_x86_xop_vpshlb>;
+defm VPSHAW : xop3op<0x99, "vpshaw", int_x86_xop_vpshaw>;
+defm VPSHAQ : xop3op<0x9B, "vpshaq", int_x86_xop_vpshaq>;
+defm VPSHAD : xop3op<0x9A, "vpshad", int_x86_xop_vpshad>;
+defm VPSHAB : xop3op<0x98, "vpshab", int_x86_xop_vpshab>;
+defm VPROTW : xop3op<0x91, "vprotw", int_x86_xop_vprotw>;
+defm VPROTQ : xop3op<0x93, "vprotq", int_x86_xop_vprotq>;
+defm VPROTD : xop3op<0x92, "vprotd", int_x86_xop_vprotd>;
+defm VPROTB : xop3op<0x90, "vprotb", int_x86_xop_vprotb>;
multiclass xop3opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
@@ -134,12 +122,10 @@ multiclass xop3opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
(Int (bitconvert (memopv2i64 addr:$src1)), imm:$src2))]>, VEX;
}
-let isAsmParserOnly = 1 in {
- defm VPROTW : xop3opimm<0xC1, "vprotw", int_x86_xop_vprotwi>;
- defm VPROTQ : xop3opimm<0xC3, "vprotq", int_x86_xop_vprotqi>;
- defm VPROTD : xop3opimm<0xC2, "vprotd", int_x86_xop_vprotdi>;
- defm VPROTB : xop3opimm<0xC0, "vprotb", int_x86_xop_vprotbi>;
-}
+defm VPROTW : xop3opimm<0xC1, "vprotw", int_x86_xop_vprotwi>;
+defm VPROTQ : xop3opimm<0xC3, "vprotq", int_x86_xop_vprotqi>;
+defm VPROTD : xop3opimm<0xC2, "vprotd", int_x86_xop_vprotdi>;
+defm VPROTB : xop3opimm<0xC0, "vprotb", int_x86_xop_vprotbi>;
// Instruction where second source can be memory, but third must be register
multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int> {
@@ -158,20 +144,18 @@ multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int> {
VR128:$src3))]>, VEX_4V, VEX_I8IMM;
}
-let isAsmParserOnly = 1 in {
- defm VPMADCSWD : xop4opm2<0xB6, "vpmadcswd", int_x86_xop_vpmadcswd>;
- defm VPMADCSSWD : xop4opm2<0xA6, "vpmadcsswd", int_x86_xop_vpmadcsswd>;
- defm VPMACSWW : xop4opm2<0x95, "vpmacsww", int_x86_xop_vpmacsww>;
- defm VPMACSWD : xop4opm2<0x96, "vpmacswd", int_x86_xop_vpmacswd>;
- defm VPMACSSWW : xop4opm2<0x85, "vpmacssww", int_x86_xop_vpmacssww>;
- defm VPMACSSWD : xop4opm2<0x86, "vpmacsswd", int_x86_xop_vpmacsswd>;
- defm VPMACSSDQL : xop4opm2<0x87, "vpmacssdql", int_x86_xop_vpmacssdql>;
- defm VPMACSSDQH : xop4opm2<0x8F, "vpmacssdqh", int_x86_xop_vpmacssdqh>;
- defm VPMACSSDD : xop4opm2<0x8E, "vpmacssdd", int_x86_xop_vpmacssdd>;
- defm VPMACSDQL : xop4opm2<0x97, "vpmacsdql", int_x86_xop_vpmacsdql>;
- defm VPMACSDQH : xop4opm2<0x9F, "vpmacsdqh", int_x86_xop_vpmacsdqh>;
- defm VPMACSDD : xop4opm2<0x9E, "vpmacsdd", int_x86_xop_vpmacsdd>;
-}
+defm VPMADCSWD : xop4opm2<0xB6, "vpmadcswd", int_x86_xop_vpmadcswd>;
+defm VPMADCSSWD : xop4opm2<0xA6, "vpmadcsswd", int_x86_xop_vpmadcsswd>;
+defm VPMACSWW : xop4opm2<0x95, "vpmacsww", int_x86_xop_vpmacsww>;
+defm VPMACSWD : xop4opm2<0x96, "vpmacswd", int_x86_xop_vpmacswd>;
+defm VPMACSSWW : xop4opm2<0x85, "vpmacssww", int_x86_xop_vpmacssww>;
+defm VPMACSSWD : xop4opm2<0x86, "vpmacsswd", int_x86_xop_vpmacsswd>;
+defm VPMACSSDQL : xop4opm2<0x87, "vpmacssdql", int_x86_xop_vpmacssdql>;
+defm VPMACSSDQH : xop4opm2<0x8F, "vpmacssdqh", int_x86_xop_vpmacssdqh>;
+defm VPMACSSDD : xop4opm2<0x8E, "vpmacssdd", int_x86_xop_vpmacssdd>;
+defm VPMACSDQL : xop4opm2<0x97, "vpmacsdql", int_x86_xop_vpmacsdql>;
+defm VPMACSDQH : xop4opm2<0x9F, "vpmacsdqh", int_x86_xop_vpmacsdqh>;
+defm VPMACSDD : xop4opm2<0x9E, "vpmacsdd", int_x86_xop_vpmacsdd>;
// Instruction where second source can be memory, third must be imm8
multiclass xop4opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
@@ -190,16 +174,14 @@ multiclass xop4opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
imm:$src3))]>, VEX_4V;
}
-let isAsmParserOnly = 1 in {
- defm VPCOMB : xop4opimm<0xCC, "vpcomb", int_x86_xop_vpcomb>;
- defm VPCOMW : xop4opimm<0xCD, "vpcomw", int_x86_xop_vpcomw>;
- defm VPCOMD : xop4opimm<0xCE, "vpcomd", int_x86_xop_vpcomd>;
- defm VPCOMQ : xop4opimm<0xCF, "vpcomq", int_x86_xop_vpcomq>;
- defm VPCOMUB : xop4opimm<0xEC, "vpcomub", int_x86_xop_vpcomub>;
- defm VPCOMUW : xop4opimm<0xED, "vpcomuw", int_x86_xop_vpcomuw>;
- defm VPCOMUD : xop4opimm<0xEE, "vpcomud", int_x86_xop_vpcomud>;
- defm VPCOMUQ : xop4opimm<0xEF, "vpcomuq", int_x86_xop_vpcomuq>;
-}
+defm VPCOMB : xop4opimm<0xCC, "vpcomb", int_x86_xop_vpcomb>;
+defm VPCOMW : xop4opimm<0xCD, "vpcomw", int_x86_xop_vpcomw>;
+defm VPCOMD : xop4opimm<0xCE, "vpcomd", int_x86_xop_vpcomd>;
+defm VPCOMQ : xop4opimm<0xCF, "vpcomq", int_x86_xop_vpcomq>;
+defm VPCOMUB : xop4opimm<0xEC, "vpcomub", int_x86_xop_vpcomub>;
+defm VPCOMUW : xop4opimm<0xED, "vpcomuw", int_x86_xop_vpcomuw>;
+defm VPCOMUD : xop4opimm<0xEE, "vpcomud", int_x86_xop_vpcomud>;
+defm VPCOMUQ : xop4opimm<0xEF, "vpcomuq", int_x86_xop_vpcomuq>;
// Instruction where either second or third source can be memory
multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
@@ -227,10 +209,8 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
VEX_4V, VEX_I8IMM;
}
-let isAsmParserOnly = 1 in {
- defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
- defm VPCMOV : xop4op<0xA2, "vpcmov", int_x86_xop_vpcmov>;
-}
+defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
+defm VPCMOV : xop4op<0xA2, "vpcmov", int_x86_xop_vpcmov>;
multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def rrY : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
@@ -257,9 +237,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
VEX_4V, VEX_I8IMM, VEX_L;
}
-let isAsmParserOnly = 1 in {
- defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
-}
+defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
multiclass xop5op<bits<8> opc, string OpcodeStr, Intrinsic Int128,
Intrinsic Int256, PatFrag ld_128, PatFrag ld_256> {
diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp
index 44d8cce05413..e99f2d9567f2 100644
--- a/lib/Target/X86/X86JITInfo.cpp
+++ b/lib/Target/X86/X86JITInfo.cpp
@@ -127,7 +127,7 @@ extern "C" {
"movaps %xmm6, 96(%rsp)\n"
"movaps %xmm7, 112(%rsp)\n"
// JIT callee
-#ifdef _WIN64
+#if defined(_WIN64) || defined(__CYGWIN__)
"subq $32, %rsp\n"
"movq %rbp, %rcx\n" // Pass prev frame and return address
"movq 8(%rbp), %rdx\n"
@@ -339,6 +339,7 @@ extern "C" {
/// must locate the start of the stub or call site and pass it into the JIT
/// compiler function.
extern "C" {
+LLVM_ATTRIBUTE_USED // Referenced from inline asm.
LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr,
intptr_t RetAddr) {
intptr_t *RetAddrLoc = &StackPtr[1];
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index a8a9fd8accde..6649c825b6c9 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -17,6 +17,7 @@
#include "X86COFFMachineModuleInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -34,14 +35,12 @@ namespace {
/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
class X86MCInstLower {
MCContext &Ctx;
- Mangler *Mang;
const MachineFunction &MF;
const TargetMachine &TM;
const MCAsmInfo &MAI;
X86AsmPrinter &AsmPrinter;
public:
- X86MCInstLower(Mangler *mang, const MachineFunction &MF,
- X86AsmPrinter &asmprinter);
+ X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
@@ -50,13 +49,16 @@ public:
private:
MachineModuleInfoMachO &getMachOMMI() const;
+ Mangler *getMang() const {
+ return AsmPrinter.Mang;
+ }
};
} // end anonymous namespace
-X86MCInstLower::X86MCInstLower(Mangler *mang, const MachineFunction &mf,
+X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
X86AsmPrinter &asmprinter)
-: Ctx(mf.getContext()), Mang(mang), MF(mf), TM(mf.getTarget()),
+: Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()),
MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {}
MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
@@ -81,7 +83,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
isImplicitlyPrivate = true;
- Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
+ getMang()->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
} else if (MO.isSymbol()) {
Name += MAI.getGlobalPrefix();
Name += MO.getSymbolName();
@@ -110,7 +112,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
assert(MO.isGlobal() && "Extern symbol not handled yet");
StubSym =
MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(MO.getGlobal()),
+ StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
}
return Sym;
@@ -124,7 +126,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
assert(MO.isGlobal() && "Extern symbol not handled yet");
StubSym =
MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(MO.getGlobal()),
+ StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
}
return Sym;
@@ -140,7 +142,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
if (MO.isGlobal()) {
StubSym =
MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(MO.getGlobal()),
+ StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
} else {
Name.erase(Name.end()-5, Name.end());
@@ -225,32 +227,6 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
}
-
-static void lower_subreg32(MCInst *MI, unsigned OpNo) {
- // Convert registers in the addr mode according to subreg32.
- unsigned Reg = MI->getOperand(OpNo).getReg();
- if (Reg != 0)
- MI->getOperand(OpNo).setReg(getX86SubSuperRegister(Reg, MVT::i32));
-}
-
-static void lower_lea64_32mem(MCInst *MI, unsigned OpNo) {
- // Convert registers in the addr mode according to subreg64.
- for (unsigned i = 0; i != 4; ++i) {
- if (!MI->getOperand(OpNo+i).isReg()) continue;
-
- unsigned Reg = MI->getOperand(OpNo+i).getReg();
- // LEAs can use RIP-relative addressing, and RIP has no sub/super register.
- if (Reg == 0 || Reg == X86::RIP) continue;
-
- MI->getOperand(OpNo+i).setReg(getX86SubSuperRegister(Reg, MVT::i64));
- }
-}
-
-/// LowerSubReg32_Op0 - Things like MOVZX16rr8 -> MOVZX32rr8.
-static void LowerSubReg32_Op0(MCInst &OutMI, unsigned NewOpc) {
- OutMI.setOpcode(NewOpc);
- lower_subreg32(&OutMI, 0);
-}
/// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R
static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) {
OutMI.setOpcode(NewOpc);
@@ -280,6 +256,34 @@ static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
Inst.addOperand(Saved);
}
+/// \brief If a movsx instruction has a shorter encoding for the used register
+/// simplify the instruction to use it instead.
+static void SimplifyMOVSX(MCInst &Inst) {
+ unsigned NewOpcode = 0;
+ unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg();
+ switch (Inst.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected instruction!");
+ case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw
+ if (Op0 == X86::AX && Op1 == X86::AL)
+ NewOpcode = X86::CBW;
+ break;
+ case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl
+ if (Op0 == X86::EAX && Op1 == X86::AX)
+ NewOpcode = X86::CWDE;
+ break;
+ case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq
+ if (Op0 == X86::RAX && Op1 == X86::EAX)
+ NewOpcode = X86::CDQE;
+ break;
+ }
+
+ if (NewOpcode != 0) {
+ Inst = MCInst();
+ Inst.setOpcode(NewOpcode);
+ }
+}
+
/// \brief Simplify things like MOV32rm to MOV32o32a.
static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
unsigned Opcode) {
@@ -376,9 +380,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
// Handle a few special cases to eliminate operand modifiers.
ReSimplify:
switch (OutMI.getOpcode()) {
- case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
- lower_lea64_32mem(&OutMI, 1);
- // FALL THROUGH.
+ case X86::LEA64_32r:
case X86::LEA64r:
case X86::LEA16r:
case X86::LEA32r:
@@ -388,23 +390,10 @@ ReSimplify:
assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
"LEA has segment specified!");
break;
- case X86::MOVZX64rr32: LowerSubReg32_Op0(OutMI, X86::MOV32rr); break;
- case X86::MOVZX64rm32: LowerSubReg32_Op0(OutMI, X86::MOV32rm); break;
- case X86::MOV64ri64i32: LowerSubReg32_Op0(OutMI, X86::MOV32ri); break;
- case X86::MOVZX64rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
- case X86::MOVZX64rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
- case X86::MOVZX64rr16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break;
- case X86::MOVZX64rm16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break;
- case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
- case X86::MOV16r0:
- LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0
- LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
- break;
- case X86::MOV64r0:
- LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV64r0 -> MOV32r0
- LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
+ case X86::MOV32ri64:
+ OutMI.setOpcode(X86::MOV32ri);
break;
// Commute operands to get a smaller encoding by using VEX.R instead of VEX.B
@@ -598,16 +587,11 @@ ReSimplify:
case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break;
case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break;
- case X86::MORESTACK_RET:
- OutMI.setOpcode(X86::RET);
- break;
-
- case X86::MORESTACK_RET_RESTORE_R10:
- OutMI.setOpcode(X86::MOV64rr);
- OutMI.addOperand(MCOperand::CreateReg(X86::R10));
- OutMI.addOperand(MCOperand::CreateReg(X86::RAX));
-
- AsmPrinter.OutStreamer.EmitInstruction(MCInstBuilder(X86::RET));
+ // Try to shrink some forms of movsx.
+ case X86::MOVSX16rr8:
+ case X86::MOVSX32rr16:
+ case X86::MOVSX64rr32:
+ SimplifyMOVSX(OutMI);
break;
}
}
@@ -691,17 +675,143 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
.addExpr(tlsRef));
}
+static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
+parseMemoryOperand(StackMaps::Location::LocationType LocTy, unsigned Size,
+ MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE) {
+
+ typedef StackMaps::Location Location;
+
+ assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem op.");
+
+ const MachineOperand &Base = *MOI;
+ const MachineOperand &Scale = *(++MOI);
+ const MachineOperand &Index = *(++MOI);
+ const MachineOperand &Disp = *(++MOI);
+ const MachineOperand &ZeroReg = *(++MOI);
+
+ // Sanity check for supported operand format.
+ assert(Base.isReg() &&
+ Scale.isImm() && Scale.getImm() == 1 &&
+ Index.isReg() && Index.getReg() == 0 &&
+ Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) &&
+ "Unsupported x86 memory operand sequence.");
+ (void)Scale;
+ (void)Index;
+ (void)ZeroReg;
+
+ return std::make_pair(
+ Location(LocTy, Size, Base.getReg(), Disp.getImm()), ++MOI);
+}
+
+std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
+X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE,
+ const TargetMachine &TM) {
+
+ typedef StackMaps::Location Location;
+
+ const MachineOperand &MOP = *MOI;
+ assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
+ "Register mask and implicit operands should not be processed.");
+
+ if (MOP.isImm()) {
+ // Verify anyregcc
+ // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
+
+ switch (MOP.getImm()) {
+ default: llvm_unreachable("Unrecognized operand type.");
+ case StackMaps::DirectMemRefOp: {
+ unsigned Size = TM.getDataLayout()->getPointerSizeInBits();
+ assert((Size % 8) == 0 && "Need pointer size in bytes.");
+ Size /= 8;
+ return parseMemoryOperand(StackMaps::Location::Direct, Size,
+ llvm::next(MOI), MOE);
+ }
+ case StackMaps::IndirectMemRefOp: {
+ ++MOI;
+ int64_t Size = MOI->getImm();
+ assert(Size > 0 && "Need a valid size for indirect memory locations.");
+ return parseMemoryOperand(StackMaps::Location::Indirect, Size,
+ llvm::next(MOI), MOE);
+ }
+ case StackMaps::ConstantOp: {
+ ++MOI;
+ assert(MOI->isImm() && "Expected constant operand.");
+ int64_t Imm = MOI->getImm();
+ return std::make_pair(
+ Location(Location::Constant, sizeof(int64_t), 0, Imm), ++MOI);
+ }
+ }
+ }
+
+ // Otherwise this is a reg operand. The physical register number will
+ // ultimately be encoded as a DWARF regno. The stack map also records the size
+ // of a spill slot that can hold the register content. (The runtime can
+ // track the actual size of the data type if it needs to.)
+ assert(MOP.isReg() && "Expected register operand here.");
+ assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) &&
+ "Virtreg operands should have been rewritten before now.");
+ const TargetRegisterClass *RC =
+ TM.getRegisterInfo()->getMinimalPhysRegClass(MOP.getReg());
+ assert(!MOP.getSubReg() && "Physical subreg still around.");
+ return std::make_pair(
+ Location(Location::Register, RC->getSize(), MOP.getReg(), 0), ++MOI);
+}
+
+// Lower a stackmap of the form:
+// <id>, <shadowBytes>, ...
+static void LowerSTACKMAP(MCStreamer &OutStreamer,
+ StackMaps &SM,
+ const MachineInstr &MI)
+{
+ unsigned NumNOPBytes = MI.getOperand(1).getImm();
+ SM.recordStackMap(MI);
+ // Emit padding.
+ // FIXME: These nops ensure that the stackmap's shadow is covered by
+ // instructions from the same basic block, but the nops should not be
+ // necessary if instructions from the same block follow the stackmap.
+ for (unsigned i = 0; i < NumNOPBytes; ++i)
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
+}
+
+// Lower a patchpoint of the form:
+// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
+static void LowerPATCHPOINT(MCStreamer &OutStreamer,
+ StackMaps &SM,
+ const MachineInstr &MI) {
+ SM.recordPatchPoint(MI);
+
+ PatchPointOpers opers(&MI);
+ unsigned ScratchIdx = opers.getNextScratchIdx();
+ unsigned EncodedBytes = 0;
+ int64_t CallTarget = opers.getMetaOper(PatchPointOpers::TargetPos).getImm();
+ if (CallTarget) {
+ // Emit MOV to materialize the target address and the CALL to target.
+ // This is encoded with 12-13 bytes, depending on which register is used.
+ // We conservatively assume that it is 12 bytes and emit in worst case one
+ // extra NOP byte.
+ EncodedBytes = 12;
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(MI.getOperand(ScratchIdx).getReg())
+ .addImm(CallTarget));
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::CALL64r)
+ .addReg(MI.getOperand(ScratchIdx).getReg()));
+ }
+ // Emit padding.
+ unsigned NumBytes = opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
+ assert(NumBytes >= EncodedBytes &&
+ "Patchpoint can't request size less than the length of a call.");
+
+ for (unsigned i = EncodedBytes; i < NumBytes; ++i)
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
+}
+
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
- X86MCInstLower MCInstLowering(Mang, *MF, *this);
+ X86MCInstLower MCInstLowering(*MF, *this);
switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE:
- if (isVerbose() && OutStreamer.hasRawTextSupport()) {
- std::string TmpStr;
- raw_string_ostream OS(TmpStr);
- PrintDebugValueComment(MI, OS);
- OutStreamer.EmitRawText(StringRef(OS.str()));
- }
- return;
+ llvm_unreachable("Should be handled target independently");
// Emit nothing here but a comment if we can.
case X86::Int_MemBarrier:
@@ -786,6 +896,24 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addExpr(DotExpr));
return;
}
+
+ case TargetOpcode::STACKMAP:
+ return LowerSTACKMAP(OutStreamer, SM, *MI);
+
+ case TargetOpcode::PATCHPOINT:
+ return LowerPATCHPOINT(OutStreamer, SM, *MI);
+
+ case X86::MORESTACK_RET:
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::RET));
+ return;
+
+ case X86::MORESTACK_RET_RESTORE_R10:
+ // Return, then restore R10.
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::RET));
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(X86::R10)
+ .addReg(X86::RAX));
+ return;
}
MCInst TmpInst;
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 16886e432d19..dbda556b1b5e 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -54,15 +54,14 @@ static cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
-X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
- const TargetInstrInfo &tii)
+X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm)
: X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
? X86::RIP : X86::EIP),
X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
(tm.getSubtarget<X86Subtarget>().is64Bit()
? X86::RIP : X86::EIP)),
- TM(tm), TII(tii) {
+ TM(tm) {
X86_MC::InitLLVM2SEHRegisterMapping(this);
// Cache some information.
@@ -102,8 +101,8 @@ int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
bool
X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
- // Only enable when post-RA scheduling is enabled and this is needed.
- return TM.getSubtargetImpl()->postRAScheduler();
+ // ExeDepsFixer and PostRAScheduler require liveness.
+ return true;
}
int
@@ -240,8 +239,18 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
case CallingConv::HiPE:
return CSR_NoRegs_SaveList;
+ case CallingConv::WebKit_JS:
+ return CSR_64_SaveList;
+ case CallingConv::AnyReg:
+ return CSR_MostRegs_64_SaveList;
+
case CallingConv::Intel_OCL_BI: {
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+ bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
+ if (HasAVX512 && IsWin64)
+ return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
+ if (HasAVX512 && Is64Bit)
+ return CSR_64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
if (HasAVX && Is64Bit)
@@ -276,8 +285,13 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const uint32_t*
X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+ bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
if (CC == CallingConv::Intel_OCL_BI) {
+ if (IsWin64 && HasAVX512)
+ return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
+ if (Is64Bit && HasAVX512)
+ return CSR_64_Intel_OCL_BI_AVX512_RegMask;
if (IsWin64 && HasAVX)
return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
if (Is64Bit && HasAVX)
@@ -287,6 +301,8 @@ X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
}
if (CC == CallingConv::GHC || CC == CallingConv::HiPE)
return CSR_NoRegs_RegMask;
+ if (CC == CallingConv::WebKit_JS || CC == CallingConv::AnyReg)
+ return CSR_MostRegs_64_RegMask;
if (!Is64Bit)
return CSR_32_RegMask;
if (CC == CallingConv::Cold)
@@ -306,19 +322,19 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
// Set the stack-pointer register and its aliases as reserved.
- Reserved.set(X86::RSP);
- for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I)
+ for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
+ ++I)
Reserved.set(*I);
// Set the instruction pointer register and its aliases as reserved.
- Reserved.set(X86::RIP);
- for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I)
+ for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
+ ++I)
Reserved.set(*I);
// Set the frame-pointer register and its aliases as reserved if needed.
if (TFI->hasFP(MF)) {
- Reserved.set(X86::RBP);
- for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I)
+ for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
+ ++I)
Reserved.set(*I);
}
@@ -331,8 +347,8 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
"Stack realignment in presence of dynamic allocas is not supported with"
"this calling convention.");
- Reserved.set(getBaseRegister());
- for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I)
+ for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true);
+ I.isValid(); ++I)
Reserved.set(*I);
}
@@ -345,14 +361,8 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(X86::GS);
// Mark the floating point stack registers as reserved.
- Reserved.set(X86::ST0);
- Reserved.set(X86::ST1);
- Reserved.set(X86::ST2);
- Reserved.set(X86::ST3);
- Reserved.set(X86::ST4);
- Reserved.set(X86::ST5);
- Reserved.set(X86::ST6);
- Reserved.set(X86::ST7);
+ for (unsigned n = 0; n != 8; ++n)
+ Reserved.set(X86::ST0 + n);
// Reserve the registers that only exist in 64-bit mode.
if (!Is64Bit) {
@@ -365,19 +375,20 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
for (unsigned n = 0; n != 8; ++n) {
// R8, R9, ...
- static const uint16_t GPR64[] = {
- X86::R8, X86::R9, X86::R10, X86::R11,
- X86::R12, X86::R13, X86::R14, X86::R15
- };
- for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
+ for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
// XMM8, XMM9, ...
- assert(X86::XMM15 == X86::XMM8+7);
for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
}
}
+ if (!Is64Bit || !TM.getSubtarget<X86Subtarget>().hasAVX512()) {
+ for (unsigned n = 16; n != 32; ++n) {
+ for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
+ Reserved.set(*AI);
+ }
+ }
return Reserved;
}
@@ -407,10 +418,11 @@ bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
}
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
+ if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
+ return false;
+
const MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
- if (!MF.getTarget().Options.RealignStack)
- return false;
// Stack realignment requires a frame pointer. If we already started
// register allocation with frame pointer elimination, it is too late now.
@@ -507,14 +519,6 @@ unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return TFI->hasFP(MF) ? FramePtr : StackPtr;
}
-unsigned X86RegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
-}
-
-unsigned X86RegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
-}
-
namespace llvm {
unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
bool High) {
@@ -688,4 +692,15 @@ unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
}
}
}
+
+unsigned get512BitSuperRegister(unsigned Reg) {
+ if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
+ return X86::ZMM0 + (Reg - X86::XMM0);
+ if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
+ return X86::ZMM0 + (Reg - X86::YMM0);
+ if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
+ return Reg;
+ llvm_unreachable("Unexpected SIMD register");
+}
+
}
diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h
index b9d7b8cf8b9a..22251b23d532 100644
--- a/lib/Target/X86/X86RegisterInfo.h
+++ b/lib/Target/X86/X86RegisterInfo.h
@@ -27,7 +27,6 @@ namespace llvm {
class X86RegisterInfo : public X86GenRegisterInfo {
public:
X86TargetMachine &TM;
- const TargetInstrInfo &TII;
private:
/// Is64Bit - Is the target 64-bits.
@@ -56,7 +55,7 @@ private:
unsigned BasePtr;
public:
- X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii);
+ X86RegisterInfo(X86TargetMachine &tm);
// FIXME: This should be tablegen'd like getDwarfRegNum is
int getSEHRegNum(unsigned i) const;
@@ -127,10 +126,6 @@ public:
unsigned getBaseRegister() const { return BasePtr; }
// FIXME: Move to FrameInfok
unsigned getSlotSize() const { return SlotSize; }
-
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
};
// getX86SubSuperRegister - X86 utility function. It returns the sub or super
@@ -138,6 +133,9 @@ public:
// e.g. getX86SubSuperRegister(X86::EAX, MVT::i16) return X86:AX
unsigned getX86SubSuperRegister(unsigned, MVT::SimpleValueType, bool High=false);
+//get512BitRegister - X86 utility - returns 512-bit super register
+unsigned get512BitSuperRegister(unsigned Reg);
+
} // End llvm namespace
#endif
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td
index be6282a643bd..b8027283cc1f 100644
--- a/lib/Target/X86/X86RegisterInfo.td
+++ b/lib/Target/X86/X86RegisterInfo.td
@@ -21,11 +21,12 @@ class X86Reg<string n, bits<16> Enc, list<Register> subregs = []> : Register<n>
// Subregister indices.
let Namespace = "X86" in {
- def sub_8bit : SubRegIndex;
- def sub_8bit_hi : SubRegIndex;
- def sub_16bit : SubRegIndex;
- def sub_32bit : SubRegIndex;
- def sub_xmm : SubRegIndex;
+ def sub_8bit : SubRegIndex<8>;
+ def sub_8bit_hi : SubRegIndex<8, 8>;
+ def sub_16bit : SubRegIndex<16>;
+ def sub_32bit : SubRegIndex<32>;
+ def sub_xmm : SubRegIndex<128>;
+ def sub_ymm : SubRegIndex<256>;
}
//===----------------------------------------------------------------------===//
@@ -186,28 +187,53 @@ def XMM12: X86Reg<"xmm12", 12>, DwarfRegNum<[29, -2, -2]>;
def XMM13: X86Reg<"xmm13", 13>, DwarfRegNum<[30, -2, -2]>;
def XMM14: X86Reg<"xmm14", 14>, DwarfRegNum<[31, -2, -2]>;
def XMM15: X86Reg<"xmm15", 15>, DwarfRegNum<[32, -2, -2]>;
+
+def XMM16: X86Reg<"xmm16", 16>, DwarfRegNum<[60, -2, -2]>;
+def XMM17: X86Reg<"xmm17", 17>, DwarfRegNum<[61, -2, -2]>;
+def XMM18: X86Reg<"xmm18", 18>, DwarfRegNum<[62, -2, -2]>;
+def XMM19: X86Reg<"xmm19", 19>, DwarfRegNum<[63, -2, -2]>;
+def XMM20: X86Reg<"xmm20", 20>, DwarfRegNum<[64, -2, -2]>;
+def XMM21: X86Reg<"xmm21", 21>, DwarfRegNum<[65, -2, -2]>;
+def XMM22: X86Reg<"xmm22", 22>, DwarfRegNum<[66, -2, -2]>;
+def XMM23: X86Reg<"xmm23", 23>, DwarfRegNum<[67, -2, -2]>;
+def XMM24: X86Reg<"xmm24", 24>, DwarfRegNum<[68, -2, -2]>;
+def XMM25: X86Reg<"xmm25", 25>, DwarfRegNum<[69, -2, -2]>;
+def XMM26: X86Reg<"xmm26", 26>, DwarfRegNum<[70, -2, -2]>;
+def XMM27: X86Reg<"xmm27", 27>, DwarfRegNum<[71, -2, -2]>;
+def XMM28: X86Reg<"xmm28", 28>, DwarfRegNum<[72, -2, -2]>;
+def XMM29: X86Reg<"xmm29", 29>, DwarfRegNum<[73, -2, -2]>;
+def XMM30: X86Reg<"xmm30", 30>, DwarfRegNum<[74, -2, -2]>;
+def XMM31: X86Reg<"xmm31", 31>, DwarfRegNum<[75, -2, -2]>;
+
} // CostPerUse
-// YMM Registers, used by AVX instructions
+// YMM0-15 registers, used by AVX instructions and
+// YMM16-31 registers, used by AVX-512 instructions.
let SubRegIndices = [sub_xmm] in {
-def YMM0: X86Reg<"ymm0", 0, [XMM0]>, DwarfRegAlias<XMM0>;
-def YMM1: X86Reg<"ymm1", 1, [XMM1]>, DwarfRegAlias<XMM1>;
-def YMM2: X86Reg<"ymm2", 2, [XMM2]>, DwarfRegAlias<XMM2>;
-def YMM3: X86Reg<"ymm3", 3, [XMM3]>, DwarfRegAlias<XMM3>;
-def YMM4: X86Reg<"ymm4", 4, [XMM4]>, DwarfRegAlias<XMM4>;
-def YMM5: X86Reg<"ymm5", 5, [XMM5]>, DwarfRegAlias<XMM5>;
-def YMM6: X86Reg<"ymm6", 6, [XMM6]>, DwarfRegAlias<XMM6>;
-def YMM7: X86Reg<"ymm7", 7, [XMM7]>, DwarfRegAlias<XMM7>;
-def YMM8: X86Reg<"ymm8", 8, [XMM8]>, DwarfRegAlias<XMM8>;
-def YMM9: X86Reg<"ymm9", 9, [XMM9]>, DwarfRegAlias<XMM9>;
-def YMM10: X86Reg<"ymm10", 10, [XMM10]>, DwarfRegAlias<XMM10>;
-def YMM11: X86Reg<"ymm11", 11, [XMM11]>, DwarfRegAlias<XMM11>;
-def YMM12: X86Reg<"ymm12", 12, [XMM12]>, DwarfRegAlias<XMM12>;
-def YMM13: X86Reg<"ymm13", 13, [XMM13]>, DwarfRegAlias<XMM13>;
-def YMM14: X86Reg<"ymm14", 14, [XMM14]>, DwarfRegAlias<XMM14>;
-def YMM15: X86Reg<"ymm15", 15, [XMM15]>, DwarfRegAlias<XMM15>;
+ foreach Index = 0-31 in {
+ def YMM#Index : X86Reg<"ymm"#Index, Index, [!cast<X86Reg>("XMM"#Index)]>,
+ DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
+ }
+}
+
+// ZMM Registers, used by AVX-512 instructions.
+let SubRegIndices = [sub_ymm] in {
+ foreach Index = 0-31 in {
+ def ZMM#Index : X86Reg<"zmm"#Index, Index, [!cast<X86Reg>("YMM"#Index)]>,
+ DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
+ }
}
+ // Mask Registers, used by AVX-512 instructions.
+ def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, -2, -2]>;
+ def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, -2, -2]>;
+ def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, -2, -2]>;
+ def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, -2, -2]>;
+ def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, -2, -2]>;
+ def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, -2, -2]>;
+ def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, -2, -2]>;
+ def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, -2, -2]>;
+
class STRegister<string n, bits<16> Enc, list<Register> A> : X86Reg<n, Enc> {
let Aliases = A;
}
@@ -421,3 +447,25 @@ def FPCCR : RegisterClass<"X86", [i16], 16, (add FPSW)> {
let CopyCost = -1; // Don't allow copying of status registers.
let isAllocatable = 0;
}
+
+// AVX-512 vector/mask registers.
+def VR512 : RegisterClass<"X86", [v16f32, v8f64, v16i32, v8i64], 512,
+ (sequence "ZMM%u", 0, 31)>;
+
+// Scalar AVX-512 floating point registers.
+def FR32X : RegisterClass<"X86", [f32], 32, (sequence "XMM%u", 0, 31)>;
+
+def FR64X : RegisterClass<"X86", [f64], 64, (add FR32X)>;
+
+// Extended VR128 and VR256 for AVX-512 instructions
+def VR128X : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
+ 128, (add FR32X)>;
+def VR256X : RegisterClass<"X86", [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ 256, (sequence "YMM%u", 0, 31)>;
+
+def VK8 : RegisterClass<"X86", [v8i1], 8, (sequence "K%u", 0, 7)>;
+def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)>;
+
+def VK8WM : RegisterClass<"X86", [v8i1], 8, (sub VK8, K0)>;
+def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)>;
+
diff --git a/lib/Target/X86/X86SchedHaswell.td b/lib/Target/X86/X86SchedHaswell.td
index 84c9203c3787..9748261262d3 100644
--- a/lib/Target/X86/X86SchedHaswell.td
+++ b/lib/Target/X86/X86SchedHaswell.td
@@ -16,10 +16,13 @@ def HaswellModel : SchedMachineModel {
// All x86 instructions are modeled as a single micro-op, and HW can decode 4
// instructions per cycle.
let IssueWidth = 4;
- let MinLatency = 0; // 0 = Out-of-order execution.
+ let MicroOpBufferSize = 192; // Based on the reorder buffer.
let LoadLatency = 4;
- let ILPWindow = 30;
let MispredictPenalty = 16;
+
+ // FIXME: SSE4 and AVX are unimplemented. This flag is set to allow
+ // the scheduler to assign a default model to unrecognized opcodes.
+ let CompleteModel = 0;
}
let SchedModel = HaswellModel in {
@@ -50,6 +53,12 @@ def HWPort15 : ProcResGroup<[HWPort1, HWPort5]>;
def HWPort015 : ProcResGroup<[HWPort0, HWPort1, HWPort5]>;
def HWPort0156: ProcResGroup<[HWPort0, HWPort1, HWPort5, HWPort6]>;
+// 60 Entry Unified Scheduler
+def HWPortAny : ProcResGroup<[HWPort0, HWPort1, HWPort2, HWPort3, HWPort4,
+ HWPort5, HWPort6, HWPort7]> {
+ let BufferSize=60;
+}
+
// Integer division issued on port 0.
def HWDivider : ProcResource<1>;
@@ -86,6 +95,7 @@ def : WriteRes<WriteZero, []>;
defm : HWWriteResPair<WriteALU, HWPort0156, 1>;
defm : HWWriteResPair<WriteIMul, HWPort1, 3>;
+def : WriteRes<WriteIMulH, []> { let Latency = 3; }
defm : HWWriteResPair<WriteShift, HWPort056, 1>;
defm : HWWriteResPair<WriteJump, HWPort5, 1>;
diff --git a/lib/Target/X86/X86SchedSandyBridge.td b/lib/Target/X86/X86SchedSandyBridge.td
index b36b3ad947e0..3011c6d9d643 100644
--- a/lib/Target/X86/X86SchedSandyBridge.td
+++ b/lib/Target/X86/X86SchedSandyBridge.td
@@ -17,10 +17,13 @@ def SandyBridgeModel : SchedMachineModel {
// instructions per cycle.
// FIXME: Identify instructions that aren't a single fused micro-op.
let IssueWidth = 4;
- let MinLatency = 0; // 0 = Out-of-order execution.
+ let MicroOpBufferSize = 168; // Based on the reorder buffer.
let LoadLatency = 4;
- let ILPWindow = 20;
let MispredictPenalty = 16;
+
+ // FIXME: SSE4 and AVX are unimplemented. This flag is set to allow
+ // the scheduler to assign a default model to unrecognized opcodes.
+ let CompleteModel = 0;
}
let SchedModel = SandyBridgeModel in {
@@ -46,6 +49,11 @@ def SBPort05 : ProcResGroup<[SBPort0, SBPort5]>;
def SBPort15 : ProcResGroup<[SBPort1, SBPort5]>;
def SBPort015 : ProcResGroup<[SBPort0, SBPort1, SBPort5]>;
+// 54 Entry Unified Scheduler
+def SBPortAny : ProcResGroup<[SBPort0, SBPort1, SBPort23, SBPort4, SBPort5]> {
+ let BufferSize=54;
+}
+
// Integer division issued on port 0.
def SBDivider : ProcResource<1>;
@@ -82,6 +90,7 @@ def : WriteRes<WriteZero, []>;
defm : SBWriteResPair<WriteALU, SBPort015, 1>;
defm : SBWriteResPair<WriteIMul, SBPort1, 3>;
+def : WriteRes<WriteIMulH, []> { let Latency = 3; }
defm : SBWriteResPair<WriteShift, SBPort05, 1>;
defm : SBWriteResPair<WriteJump, SBPort5, 1>;
diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td
index 9fbde88b7100..0556437b839b 100644
--- a/lib/Target/X86/X86Schedule.td
+++ b/lib/Target/X86/X86Schedule.td
@@ -42,6 +42,7 @@ multiclass X86SchedWritePair {
// Arithmetic.
defm WriteALU : X86SchedWritePair; // Simple integer ALU op.
defm WriteIMul : X86SchedWritePair; // Integer multiplication.
+def WriteIMulH : SchedWrite; // Integer multiplication, high part.
defm WriteIDiv : X86SchedWritePair; // Integer division.
def WriteLEA : SchedWrite; // LEA instructions can't fold loads.
@@ -140,9 +141,12 @@ def IIC_IDIV64 : InstrItinClass;
// neg/not/inc/dec
def IIC_UNARY_REG : InstrItinClass;
def IIC_UNARY_MEM : InstrItinClass;
-// add/sub/and/or/xor/adc/sbc/cmp/test
+// add/sub/and/or/xor/sbc/cmp/test
def IIC_BIN_MEM : InstrItinClass;
def IIC_BIN_NONMEM : InstrItinClass;
+// adc/sbc
+def IIC_BIN_CARRY_MEM : InstrItinClass;
+def IIC_BIN_CARRY_NONMEM : InstrItinClass;
// shift/rotate
def IIC_SR : InstrItinClass;
// shift double
@@ -249,11 +253,11 @@ def IIC_SSE_INTSH_P_RR : InstrItinClass;
def IIC_SSE_INTSH_P_RM : InstrItinClass;
def IIC_SSE_INTSH_P_RI : InstrItinClass;
-def IIC_SSE_CMPP_RR : InstrItinClass;
-def IIC_SSE_CMPP_RM : InstrItinClass;
+def IIC_SSE_INTSHDQ_P_RI : InstrItinClass;
def IIC_SSE_SHUFP : InstrItinClass;
-def IIC_SSE_PSHUF : InstrItinClass;
+def IIC_SSE_PSHUF_RI : InstrItinClass;
+def IIC_SSE_PSHUF_MI : InstrItinClass;
def IIC_SSE_UNPCK : InstrItinClass;
@@ -266,10 +270,14 @@ def IIC_SSE_PINSRW : InstrItinClass;
def IIC_SSE_PABS_RR : InstrItinClass;
def IIC_SSE_PABS_RM : InstrItinClass;
-def IIC_SSE_SQRTP_RR : InstrItinClass;
-def IIC_SSE_SQRTP_RM : InstrItinClass;
-def IIC_SSE_SQRTS_RR : InstrItinClass;
-def IIC_SSE_SQRTS_RM : InstrItinClass;
+def IIC_SSE_SQRTPS_RR : InstrItinClass;
+def IIC_SSE_SQRTPS_RM : InstrItinClass;
+def IIC_SSE_SQRTSS_RR : InstrItinClass;
+def IIC_SSE_SQRTSS_RM : InstrItinClass;
+def IIC_SSE_SQRTPD_RR : InstrItinClass;
+def IIC_SSE_SQRTPD_RM : InstrItinClass;
+def IIC_SSE_SQRTSD_RR : InstrItinClass;
+def IIC_SSE_SQRTSD_RM : InstrItinClass;
def IIC_SSE_RCPP_RR : InstrItinClass;
def IIC_SSE_RCPP_RM : InstrItinClass;
@@ -311,7 +319,8 @@ def IIC_SSE_PSIGN_RM : InstrItinClass;
def IIC_SSE_PMADD : InstrItinClass;
def IIC_SSE_PMULHRSW : InstrItinClass;
-def IIC_SSE_PALIGNR : InstrItinClass;
+def IIC_SSE_PALIGNRR : InstrItinClass;
+def IIC_SSE_PALIGNRM : InstrItinClass;
def IIC_SSE_MWAIT : InstrItinClass;
def IIC_SSE_MONITOR : InstrItinClass;
@@ -487,8 +496,8 @@ def IIC_PUSH_REG : InstrItinClass;
def IIC_PUSH_F : InstrItinClass;
def IIC_PUSH_A : InstrItinClass;
def IIC_BSWAP : InstrItinClass;
-def IIC_BSF : InstrItinClass;
-def IIC_BSR : InstrItinClass;
+def IIC_BIT_SCAN_MEM : InstrItinClass;
+def IIC_BIT_SCAN_REG : InstrItinClass;
def IIC_MOVS : InstrItinClass;
def IIC_STOS : InstrItinClass;
def IIC_SCAS : InstrItinClass;
@@ -535,6 +544,33 @@ def IIC_BOUND : InstrItinClass;
def IIC_ARPL_REG : InstrItinClass;
def IIC_ARPL_MEM : InstrItinClass;
def IIC_MOVBE : InstrItinClass;
+def IIC_AES : InstrItinClass;
+def IIC_BLEND_MEM : InstrItinClass;
+def IIC_BLEND_NOMEM : InstrItinClass;
+def IIC_CBW : InstrItinClass;
+def IIC_CRC32_REG : InstrItinClass;
+def IIC_CRC32_MEM : InstrItinClass;
+def IIC_SSE_DPPD_RR : InstrItinClass;
+def IIC_SSE_DPPD_RM : InstrItinClass;
+def IIC_SSE_DPPS_RR : InstrItinClass;
+def IIC_SSE_DPPS_RM : InstrItinClass;
+def IIC_MMX_EMMS : InstrItinClass;
+def IIC_SSE_EXTRACTPS_RR : InstrItinClass;
+def IIC_SSE_EXTRACTPS_RM : InstrItinClass;
+def IIC_SSE_INSERTPS_RR : InstrItinClass;
+def IIC_SSE_INSERTPS_RM : InstrItinClass;
+def IIC_SSE_MPSADBW_RR : InstrItinClass;
+def IIC_SSE_MPSADBW_RM : InstrItinClass;
+def IIC_SSE_PMULLD_RR : InstrItinClass;
+def IIC_SSE_PMULLD_RM : InstrItinClass;
+def IIC_SSE_ROUNDPS_REG : InstrItinClass;
+def IIC_SSE_ROUNDPS_MEM : InstrItinClass;
+def IIC_SSE_ROUNDPD_REG : InstrItinClass;
+def IIC_SSE_ROUNDPD_MEM : InstrItinClass;
+def IIC_SSE_POPCNT_RR : InstrItinClass;
+def IIC_SSE_POPCNT_RM : InstrItinClass;
+def IIC_SSE_PCLMULQDQ_RR : InstrItinClass;
+def IIC_SSE_PCLMULQDQ_RM : InstrItinClass;
def IIC_NOP : InstrItinClass;
@@ -546,8 +582,9 @@ def IIC_NOP : InstrItinClass;
// Resources beyond the decoder operate on micro-ops and are bufferred
// so adjacent micro-ops don't directly compete.
//
-// MinLatency=0 indicates that RAW dependencies can be decoded in the
-// same cycle.
+// MicroOpBufferSize > 1 indicates that RAW dependencies can be
+// decoded in the same cycle. The value 32 is a reasonably arbitrary
+// number of in-flight instructions.
//
// HighLatency=10 is optimistic. X86InstrInfo::isHighLatencyDef
// indicates high latency opcodes. Alternatively, InstrItinData
@@ -555,19 +592,15 @@ def IIC_NOP : InstrItinClass;
// latencies. Since these latencies are not used for pipeline hazards,
// they do not need to be exact.
//
-// ILPWindow=10 is an arbitrary threshold that approximates cycles of
-// latency hidden by instruction buffers. The actual value is not very
-// important but should be zero for inorder and nonzero for OOO processors.
-//
-// The GenericModel contains no instruciton itineraries.
+// The GenericModel contains no instruction itineraries.
def GenericModel : SchedMachineModel {
let IssueWidth = 4;
- let MinLatency = 0;
+ let MicroOpBufferSize = 32;
let LoadLatency = 4;
let HighLatency = 10;
- let ILPWindow = 10;
}
include "X86ScheduleAtom.td"
include "X86SchedSandyBridge.td"
include "X86SchedHaswell.td"
+include "X86ScheduleSLM.td"
diff --git a/lib/Target/X86/X86ScheduleAtom.td b/lib/Target/X86/X86ScheduleAtom.td
index cce8f1b11436..ba72f29910fe 100644
--- a/lib/Target/X86/X86ScheduleAtom.td
+++ b/lib/Target/X86/X86ScheduleAtom.td
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the itinerary class data for the Intel Atom (Bonnell)
-// processors.
+// This file defines the itinerary class data for the Intel Atom
+// in order (Saltwell-32nm/Bonnell-45nm) processors.
//
//===----------------------------------------------------------------------===//
@@ -79,9 +79,12 @@ def AtomItineraries : ProcessorItineraries<
// neg/not/inc/dec
InstrItinData<IIC_UNARY_REG, [InstrStage<1, [Port0, Port1]>] >,
InstrItinData<IIC_UNARY_MEM, [InstrStage<1, [Port0]>] >,
- // add/sub/and/or/xor/adc/sbc/cmp/test
+ // add/sub/and/or/xor/cmp/test
InstrItinData<IIC_BIN_NONMEM, [InstrStage<1, [Port0, Port1]>] >,
InstrItinData<IIC_BIN_MEM, [InstrStage<1, [Port0]>] >,
+ // adc/sbc
+ InstrItinData<IIC_BIN_CARRY_NONMEM, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_BIN_CARRY_MEM, [InstrStage<1, [Port0]>] >,
// shift/rotate
InstrItinData<IIC_SR, [InstrStage<1, [Port0]>] >,
// shift double
@@ -203,18 +206,23 @@ def AtomItineraries : ProcessorItineraries<
InstrItinData<IIC_SSE_INTSH_P_RM, [InstrStage<3, [Port0, Port1]>] >,
InstrItinData<IIC_SSE_INTSH_P_RI, [InstrStage<1, [Port0, Port1]>] >,
- InstrItinData<IIC_SSE_CMPP_RR, [InstrStage<6, [Port0, Port1]>] >,
- InstrItinData<IIC_SSE_CMPP_RM, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_INTSHDQ_P_RI, [InstrStage<1, [Port0, Port1]>] >,
InstrItinData<IIC_SSE_SHUFP, [InstrStage<1, [Port0]>] >,
- InstrItinData<IIC_SSE_PSHUF, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_PSHUF_RI, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_PSHUF_MI, [InstrStage<1, [Port0]>] >,
InstrItinData<IIC_SSE_UNPCK, [InstrStage<1, [Port0]>] >,
- InstrItinData<IIC_SSE_SQRTP_RR, [InstrStage<13, [Port0, Port1]>] >,
- InstrItinData<IIC_SSE_SQRTP_RM, [InstrStage<14, [Port0, Port1]>] >,
- InstrItinData<IIC_SSE_SQRTS_RR, [InstrStage<11, [Port0, Port1]>] >,
- InstrItinData<IIC_SSE_SQRTS_RM, [InstrStage<12, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTPS_RR, [InstrStage<70, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTPS_RM, [InstrStage<70, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTSS_RR, [InstrStage<34, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTSS_RM, [InstrStage<34, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_SQRTPD_RR, [InstrStage<125, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTPD_RM, [InstrStage<125, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTSD_RR, [InstrStage<62, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTSD_RM, [InstrStage<62, [Port0, Port1]>] >,
InstrItinData<IIC_SSE_RCPP_RR, [InstrStage<9, [Port0, Port1]>] >,
InstrItinData<IIC_SSE_RCPP_RM, [InstrStage<10, [Port0, Port1]>] >,
@@ -273,7 +281,8 @@ def AtomItineraries : ProcessorItineraries<
InstrItinData<IIC_SSE_PMADD, [InstrStage<5, [Port0]>] >,
InstrItinData<IIC_SSE_PMULHRSW, [InstrStage<5, [Port0]>] >,
- InstrItinData<IIC_SSE_PALIGNR, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_PALIGNRR, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_PALIGNRM, [InstrStage<1, [Port0]>] >,
InstrItinData<IIC_SSE_MWAIT, [InstrStage<46, [Port0, Port1]>] >,
InstrItinData<IIC_SSE_MONITOR, [InstrStage<45, [Port0, Port1]>] >,
@@ -465,8 +474,8 @@ def AtomItineraries : ProcessorItineraries<
InstrItinData<IIC_PUSH_A, [InstrStage<8, [Port0, Port1]>] >,
InstrItinData<IIC_BSWAP, [InstrStage<1, [Port0]>] >,
- InstrItinData<IIC_BSF, [InstrStage<16, [Port0, Port1]>] >,
- InstrItinData<IIC_BSR, [InstrStage<16, [Port0, Port1]>] >,
+ InstrItinData<IIC_BIT_SCAN_MEM, [InstrStage<16, [Port0, Port1]>] >,
+ InstrItinData<IIC_BIT_SCAN_REG, [InstrStage<16, [Port0, Port1]>] >,
InstrItinData<IIC_MOVS, [InstrStage<3, [Port0, Port1]>] >,
InstrItinData<IIC_STOS, [InstrStage<1, [Port0, Port1]>] >,
InstrItinData<IIC_SCAS, [InstrStage<2, [Port0, Port1]>] >,
@@ -513,6 +522,8 @@ def AtomItineraries : ProcessorItineraries<
InstrItinData<IIC_ARPL_REG, [InstrStage<24, [Port0, Port1]>] >,
InstrItinData<IIC_ARPL_MEM, [InstrStage<23, [Port0, Port1]>] >,
InstrItinData<IIC_MOVBE, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_CBW, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_EMMS, [InstrStage<5, [Port0, Port1]>] >,
InstrItinData<IIC_NOP, [InstrStage<1, [Port0, Port1]>] >
]>;
@@ -520,11 +531,9 @@ def AtomItineraries : ProcessorItineraries<
// Atom machine model.
def AtomModel : SchedMachineModel {
let IssueWidth = 2; // Allows 2 instructions per scheduling group.
- let MinLatency = 1; // InstrStage cycles overrides MinLatency.
- // OperandCycles may be used for expected latency.
+ let MicroOpBufferSize = 0; // In-order execution, always hide latency.
let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles.
let HighLatency = 30;// Expected, may be overriden by OperandCycles.
- let ILPWindow = 0; // Always try to hide expected latency.
let Itineraries = AtomItineraries;
}
diff --git a/lib/Target/X86/X86ScheduleSLM.td b/lib/Target/X86/X86ScheduleSLM.td
new file mode 100644
index 000000000000..6c2a30402400
--- /dev/null
+++ b/lib/Target/X86/X86ScheduleSLM.td
@@ -0,0 +1,668 @@
+//===- X86ScheduleSLM.td - X86 Atom Scheduling Definitions -*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the Intel Atom
+// (Silvermont) processor.
+//
+//===----------------------------------------------------------------------===//
+
+def IEC_RSV0 : FuncUnit;
+def IEC_RSV1 : FuncUnit;
+def FPC_RSV0 : FuncUnit;
+def FPC_RSV1 : FuncUnit;
+def MEC_RSV : FuncUnit;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def SLMItineraries : ProcessorItineraries<
+ [ IEC_RSV0, IEC_RSV1, FPC_RSV0, FPC_RSV1, MEC_RSV ],
+ [], [
+ // [InstrStage<N, [FPC_RSV0, FPC_RSV1]>]
+ // [InstrStage<N, [FPC_RSV0, FPC_RSV1], 0>, InstrStage<N, [MEC_RSV]>]
+ // [InstrStage<N, [IEC_RSV0, IEC_RSV1]>]
+ // [InstrStage<N, [IEC_RSV0, IEC_RSV1], 0>,InstrStage<N,[MEC_RSV]>]
+ //
+ // Default is 1 cycle, IEC_RSV0 or IEC_RSV1
+ //InstrItinData<IIC_DEFAULT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_ALU_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_ALU_NONMEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LEA, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LEA_16, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // mul
+ InstrItinData<IIC_MUL8, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MUL16_MEM, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_MUL16_REG, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MUL32_MEM, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<3, [MEC_RSV]>] >,
+ InstrItinData<IIC_MUL32_REG, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MUL64, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ // imul by al, ax, eax, rax
+ InstrItinData<IIC_IMUL8, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL16_MEM, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<6, [MEC_RSV]>] >,
+ InstrItinData<IIC_IMUL16_REG, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL32_MEM, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<6, [MEC_RSV]>] >,
+ InstrItinData<IIC_IMUL32_REG, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL64, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ // imul reg by reg|mem
+ InstrItinData<IIC_IMUL16_RM, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_IMUL16_RR, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL32_RM, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<3, [MEC_RSV]>] >,
+ InstrItinData<IIC_IMUL32_RR, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL64_RM, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_IMUL64_RR, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ // imul reg = reg/mem * imm
+ InstrItinData<IIC_IMUL16_RRI, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL32_RRI, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL64_RRI, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IMUL16_RMI, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_IMUL32_RMI, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<3, [MEC_RSV]>] >,
+ InstrItinData<IIC_IMUL64_RMI, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ // idiv - min latency
+ InstrItinData<IIC_IDIV8, [InstrStage<34, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IDIV16, [InstrStage<35, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IDIV32, [InstrStage<35, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IDIV64, [InstrStage<49, [IEC_RSV0, IEC_RSV1]>] >,
+ // div - min latency
+ InstrItinData<IIC_DIV8_REG, [InstrStage<25, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_DIV8_MEM, [InstrStage<25, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<25, [MEC_RSV]>] >,
+ InstrItinData<IIC_DIV16, [InstrStage<26, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_DIV32, [InstrStage<26, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_DIV64, [InstrStage<38, [IEC_RSV0, IEC_RSV1]>] >,
+ // neg/not/inc/dec
+ InstrItinData<IIC_UNARY_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_UNARY_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ // add/sub/and/or/xor/adc/sbc/cmp/test
+ InstrItinData<IIC_BIN_NONMEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BIN_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ // adc/sbb
+ InstrItinData<IIC_BIN_CARRY_NONMEM, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BIN_CARRY_MEM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ // shift/rotate
+ InstrItinData<IIC_SR, [InstrStage<1, [IEC_RSV0], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ // shift double
+ InstrItinData<IIC_SHD16_REG_IM, [InstrStage<2, [IEC_RSV0]>] >,
+ InstrItinData<IIC_SHD16_REG_CL, [InstrStage<4, [IEC_RSV0]>] >,
+ InstrItinData<IIC_SHD16_MEM_IM, [InstrStage<2, [IEC_RSV0], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_SHD16_MEM_CL, [InstrStage<4, [IEC_RSV0], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SHD32_REG_IM, [InstrStage<2, [IEC_RSV0]>] >,
+ InstrItinData<IIC_SHD32_REG_CL, [InstrStage<4, [IEC_RSV0]>] >,
+ InstrItinData<IIC_SHD32_MEM_IM, [InstrStage<2, [IEC_RSV0], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_SHD32_MEM_CL, [InstrStage<4, [IEC_RSV0], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SHD64_REG_IM, [InstrStage<2, [IEC_RSV0]>] >,
+ InstrItinData<IIC_SHD64_REG_CL, [InstrStage<4, [IEC_RSV0]>] >,
+ InstrItinData<IIC_SHD64_MEM_IM, [InstrStage<2, [IEC_RSV0], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_SHD64_MEM_CL, [InstrStage<4, [IEC_RSV0], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ // cmov
+ InstrItinData<IIC_CMOV16_RM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_CMOV16_RR, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMOV32_RM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_CMOV32_RR, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMOV64_RM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_CMOV64_RR, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
+ // set
+ InstrItinData<IIC_SET_M, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SET_R, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // jcc
+ InstrItinData<IIC_Jcc, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // jcxz/jecxz/jrcxz
+ InstrItinData<IIC_JCXZ, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // jmp rel
+ InstrItinData<IIC_JMP_REL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // jmp indirect
+ InstrItinData<IIC_JMP_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_JMP_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ // jmp far
+ InstrItinData<IIC_JMP_FAR_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_JMP_FAR_PTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // loop/loope/loopne
+ InstrItinData<IIC_LOOP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LOOPE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LOOPNE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // call - all but reg/imm
+ InstrItinData<IIC_CALL_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CALL_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_CALL_FAR_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_CALL_FAR_PTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ //ret
+ InstrItinData<IIC_RET, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_RET_IMM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ //sign extension movs
+ InstrItinData<IIC_MOVSX, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOVSX_R16_R8, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOVSX_R16_M8, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_MOVSX_R16_R16, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOVSX_R32_R32, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ //zero extension movs
+ InstrItinData<IIC_MOVZX, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOVZX_R16_R8, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOVZX_R16_M8, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_REP_MOVS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_REP_STOS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ // SSE binary operations
+ // arithmetic fp scalar
+ InstrItinData<IIC_SSE_ALU_F32S_RR, [InstrStage<3, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_ALU_F32S_RM, [InstrStage<3, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<3, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_ALU_F64S_RR, [InstrStage<3, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_ALU_F64S_RM, [InstrStage<3, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<3, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MUL_F32S_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MUL_F32S_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MUL_F64S_RR, [InstrStage<2, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MUL_F64S_RM, [InstrStage<2, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_DIV_F32S_RR, [InstrStage<13, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_DIV_F32S_RM, [InstrStage<13, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<13, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_DIV_F64S_RR, [InstrStage<13, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_DIV_F64S_RM, [InstrStage<13, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<13, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_COMIS_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_COMIS_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_HADDSUB_RR, [InstrStage<6, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_HADDSUB_RM, [InstrStage<6, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<6, [MEC_RSV]>] >,
+
+ // arithmetic fp parallel
+ InstrItinData<IIC_SSE_ALU_F32P_RR, [InstrStage<3, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_ALU_F32P_RM, [InstrStage<3, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<3, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_ALU_F64P_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_ALU_F64P_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MUL_F32P_RR, [InstrStage<2, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MUL_F32P_RM, [InstrStage<2, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MUL_F64P_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MUL_F64P_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_DIV_F32P_RR, [InstrStage<27, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_DIV_F32P_RM, [InstrStage<27, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<27, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_DIV_F64P_RR, [InstrStage<27, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_DIV_F64P_RM, [InstrStage<27, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<27, [MEC_RSV]>] >,
+
+ // bitwise parallel
+ InstrItinData<IIC_SSE_BIT_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_BIT_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ // arithmetic int parallel
+ InstrItinData<IIC_SSE_INTALU_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_INTALU_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_INTALUQ_P_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_INTALUQ_P_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+
+ // multiply int parallel
+ InstrItinData<IIC_SSE_INTMUL_P_RR, [InstrStage<5, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_INTMUL_P_RM, [InstrStage<5, [FPC_RSV0], 0>,
+ InstrStage<5, [MEC_RSV]>] >,
+
+ // shift parallel
+ InstrItinData<IIC_SSE_INTSH_P_RR, [InstrStage<2, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_INTSH_P_RM, [InstrStage<2, [FPC_RSV0], 0>,
+ InstrStage<2, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_INTSH_P_RI, [InstrStage<1, [FPC_RSV0]>] >,
+
+ InstrItinData<IIC_SSE_INTSHDQ_P_RI, [InstrStage<1, [FPC_RSV0]>] >,
+
+ InstrItinData<IIC_SSE_SHUFP, [InstrStage<1, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_PSHUF_RI, [InstrStage<1, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_PSHUF_MI, [InstrStage<1, [FPC_RSV0], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_UNPCK, [InstrStage<1, [FPC_RSV0]>] >,
+
+ InstrItinData<IIC_SSE_SQRTPS_RR, [InstrStage<26, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_SQRTPS_RM, [InstrStage<26, [FPC_RSV0], 0>,
+ InstrStage<26, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_SQRTSS_RR, [InstrStage<13, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_SQRTSS_RM, [InstrStage<13, [FPC_RSV0], 0>,
+ InstrStage<13, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_SQRTPD_RR, [InstrStage<26, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_SQRTPD_RM, [InstrStage<26, [FPC_RSV0], 0>,
+ InstrStage<26, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_SQRTSD_RR, [InstrStage<13, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_SQRTSD_RM, [InstrStage<13, [FPC_RSV0], 0>,
+ InstrStage<13, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_RCPP_RR, [InstrStage<9, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_RCPP_RM, [InstrStage<9, [FPC_RSV0], 0>,
+ InstrStage<9, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_RCPS_RR, [InstrStage<4, [FPC_RSV0]>] >,
+ InstrItinData<IIC_SSE_RCPS_RM, [InstrStage<4, [FPC_RSV0], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_MOVMSK, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MASKMOV, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_PEXTRW, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PINSRW, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_PABS_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PABS_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_MOV_S_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MOV_S_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MOV_S_MR, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_MOVA_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MOVA_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MOVA_P_MR, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_MOVU_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MOVU_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MOVU_P_MR, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_MOV_LH, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_LDDQU, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_MOVDQ, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MOVD_ToGP, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MOVQ_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_MOVNT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_PREFETCH, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PAUSE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_LFENCE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MFENCE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_SFENCE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_LDMXCSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_STMXCSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_SSE_PHADDSUBD_RR, [InstrStage<6, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBD_RM, [InstrStage<6, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<6, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBSW_RR, [InstrStage<9, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBSW_RM, [InstrStage<9, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<9, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBW_RR, [InstrStage<9, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<9, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBW_RM, [InstrStage<9, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<9, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_PSHUFB_RR, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PSHUFB_RM, [InstrStage<5, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<5, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_PSIGN_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PSIGN_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_SSE_PMADD, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PMULHRSW, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PALIGNRR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PALIGNRM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MWAIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MONITOR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ // conversions
+ // to/from PD ...
+ InstrItinData<IIC_SSE_CVT_PD_RR, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_CVT_PD_RM, [InstrStage<5, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<5, [MEC_RSV]>] >,
+ // to/from PS except to/from PD and PS2PI
+ InstrItinData<IIC_SSE_CVT_PS_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_CVT_PS_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_CVT_Scalar_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_CVT_Scalar_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI32_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI32_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI64_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI64_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_CVT_SD2SI_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_CVT_SD2SI_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+
+ // MMX MOVs
+ InstrItinData<IIC_MMX_MOV_MM_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_MOV_REG_MM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_MOVQ_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_MOVQ_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // other MMX
+ InstrItinData<IIC_MMX_ALU_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_ALU_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_ALUQ_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_ALUQ_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBW_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBW_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBD_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBD_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PMUL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_MISC_FUNC_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_MISC_FUNC_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PSADBW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_SHIFT_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_SHIFT_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_SHIFT_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_UNPCK_H_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_UNPCK_H_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_UNPCK_L, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PCK_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PCK_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PSHUF, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PEXTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_PINSRW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_MASKMOV, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // conversions
+ // from/to PD
+ InstrItinData<IIC_MMX_CVT_PD_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_CVT_PD_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // from/to PI
+ InstrItinData<IIC_MMX_CVT_PS_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MMX_CVT_PS_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_CMPX_LOCK, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMPX_LOCK_8, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMPX_LOCK_8B, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMPX_LOCK_16B, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_FILD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FLD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FLD80, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_FST, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FST80, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FIST, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_FLDZ, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FUCOM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FUCOMI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FCOMI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FNSTSW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FNSTCW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FLDCW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FNINIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FFREE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FNCLEX, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_WAIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FXAM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FNOP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FLDL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_F2XM1, [InstrStage<88, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_FYL2X, [InstrStage<296, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_FPTAN, [InstrStage<281, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_FPATAN, [InstrStage<296, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_FXTRACT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FPREM1, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FPSTP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FPREM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FYL2XP1, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FSINCOS, [InstrStage<281, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_FRNDINT, [InstrStage<25, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_FSCALE, [InstrStage<74, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_FCOMPP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FXSAVE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FXRSTOR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_FXCH, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+
+ // System instructions
+ InstrItinData<IIC_CPUID, [InstrStage<60, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_INT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_INT3, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_INVD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_INVLPG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IRET, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_HLT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LXS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_RDTSC, [InstrStage<30, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_RSM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SIDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SGDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SLDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_STR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SWAPGS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SYSCALL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SYS_ENTER_EXIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_IN_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_IN_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_OUT_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_OUT_IR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_INS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_MOV_REG_DR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOV_DR_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // worst case for mov REG_CRx
+ InstrItinData<IIC_MOV_REG_CR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOV_CR_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_MOV_REG_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOV_MEM_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOV_SR_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOV_SR_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // LAR
+ InstrItinData<IIC_LAR_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LAR_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // LSL
+ InstrItinData<IIC_LSL_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LSL_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_LGDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LIDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LLDT_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LLDT_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // push control register, segment registers
+ InstrItinData<IIC_PUSH_CS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_PUSH_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // pop control register, segment registers
+ InstrItinData<IIC_POP_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_POP_SR_SS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // VERR, VERW
+ InstrItinData<IIC_VERR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_VERW_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_VERW_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // WRMSR, RDMSR
+ InstrItinData<IIC_WRMSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_RDMSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_RDPMC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ // SMSW, LMSW
+ InstrItinData<IIC_SMSW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LMSW_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LMSW_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_ENTER, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LEAVE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_POP_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_POP_REG16, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_POP_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_POP_F, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_POP_FD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_POP_A, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_PUSH_IMM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_PUSH_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_PUSH_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_PUSH_F, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_PUSH_A, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+
+ InstrItinData<IIC_BSWAP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BIT_SCAN_MEM, [InstrStage<10, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<10, [MEC_RSV]>] >,
+ InstrItinData<IIC_BIT_SCAN_REG, [InstrStage<10, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOVS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_STOS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_SCAS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMPS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOV, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_MOV_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_AHF, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BT_MI, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_BT_MR, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_BT_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BT_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BTX_MI, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_BTX_MR, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_BTX_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BTX_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_XCHG_REG, [InstrStage<5, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_XCHG_MEM, [InstrStage<5, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<5, [MEC_RSV]>] >,
+ InstrItinData<IIC_XADD_REG, [InstrStage<5, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_XADD_MEM, [InstrStage<5, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<5, [MEC_RSV]>] >,
+ InstrItinData<IIC_CMPXCHG_MEM, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMPXCHG_REG, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMPXCHG_MEM8, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<6, [MEC_RSV]>] >,
+ InstrItinData<IIC_CMPXCHG_REG8, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<6, [MEC_RSV]>] >,
+ InstrItinData<IIC_CMPXCHG_8B, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMPXCHG_16B, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_LODS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_OUTS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CLC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CLD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CLI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CMC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CLTS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_STC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_STI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_STD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_XLAT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_AAA, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_AAD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_AAM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_AAS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_DAA, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_DAS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_BOUND, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_ARPL_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_ARPL_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_MOVBE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_AES, [InstrStage<8, [FPC_RSV0]>] >,
+ InstrItinData<IIC_BLEND_NOMEM, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_BLEND_MEM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_BIT_SCAN_MEM, [InstrStage<10, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<10, [MEC_RSV]>] >,
+ InstrItinData<IIC_BIT_SCAN_REG, [InstrStage<10, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CBW, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CRC32_REG, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
+ InstrItinData<IIC_CRC32_MEM, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
+ InstrStage<3, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_DPPD_RR, [InstrStage<12, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_DPPD_RM, [InstrStage<12, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<12, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_DPPS_RR, [InstrStage<15, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_DPPS_RM, [InstrStage<15, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<15, [MEC_RSV]>] >,
+ InstrItinData<IIC_MMX_EMMS, [InstrStage<10, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_EXTRACTPS_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_EXTRACTPS_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_INSERTPS_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_INSERTPS_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_MPSADBW_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_MPSADBW_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<1, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_PMULLD_RR, [InstrStage<11, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PMULLD_RM, [InstrStage<11, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<11, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_ROUNDPS_REG, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_ROUNDPS_MEM, [InstrStage<5, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<5, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_ROUNDPD_REG, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
+ InstrItinData<IIC_SSE_ROUNDPD_MEM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_POPCNT_RR, [InstrStage<4, [IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_POPCNT_RM, [InstrStage<4, [IEC_RSV1], 0>,
+ InstrStage<4, [MEC_RSV]>] >,
+ InstrItinData<IIC_SSE_PCLMULQDQ_RR, [InstrStage<10, [IEC_RSV1]>] >,
+ InstrItinData<IIC_SSE_PCLMULQDQ_RM, [InstrStage<10, [IEC_RSV1], 0>,
+ InstrStage<10, [MEC_RSV]>] >,
+
+ InstrItinData<IIC_NOP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >
+ ]>;
+
+// Silvermont machine model.
+def SLMModel : SchedMachineModel {
+ let IssueWidth = 2; // Allows 2 instructions per scheduling group.
+ let MinLatency = 1; // InstrStage cycles overrides MinLatency.
+ // OperandCycles may be used for expected latency.
+ let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles.
+ let HighLatency = 30;// Expected, may be overriden by OperandCycles.
+
+ let Itineraries = SLMItineraries;
+}
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index f934fdd85914..b9c620fddc44 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -27,7 +27,7 @@ X86SelectionDAGInfo::~X86SelectionDAGInfo() {
}
SDValue
-X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
+X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
@@ -46,8 +46,6 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
!ConstantSize ||
ConstantSize->getZExtValue() >
Subtarget->getMaxInlineSizeThreshold()) {
- SDValue InFlag(0, 0);
-
// Check to see if there is a specialized entry-point for memory zeroing.
ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
@@ -175,7 +173,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
}
SDValue
-X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
diff --git a/lib/Target/X86/X86SelectionDAGInfo.h b/lib/Target/X86/X86SelectionDAGInfo.h
index d1d66fe76e94..d728af5aa62b 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.h
+++ b/lib/Target/X86/X86SelectionDAGInfo.h
@@ -34,7 +34,7 @@ public:
~X86SelectionDAGInfo();
virtual
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
@@ -42,7 +42,7 @@ public:
MachinePointerInfo DstPtrInfo) const;
virtual
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 74da2a929ce4..01353b2acb07 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -276,20 +276,29 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
(Family == 6 && Model == 0x2F) || // Westmere: Westmere-EX
(Family == 6 && Model == 0x2A) || // SandyBridge
(Family == 6 && Model == 0x2D) || // SandyBridge: SandyBridge-E*
- (Family == 6 && Model == 0x3A))) {// IvyBridge
+ (Family == 6 && Model == 0x3A) || // IvyBridge
+ (Family == 6 && Model == 0x3E) || // IvyBridge EP
+ (Family == 6 && Model == 0x3C) || // Haswell
+ (Family == 6 && Model == 0x3F) || // ...
+ (Family == 6 && Model == 0x45) || // ...
+ (Family == 6 && Model == 0x46))) { // ...
IsUAMemFast = true;
ToggleFeature(X86::FeatureFastUAMem);
}
- // Set processor type. Currently only Atom is detected.
+ // Set processor type. Currently only Atom or Silvermont (SLM) is detected.
if (Family == 6 &&
- (Model == 28 || Model == 38 || Model == 39
- || Model == 53 || Model == 54)) {
+ (Model == 28 || Model == 38 || Model == 39 ||
+ Model == 53 || Model == 54)) {
X86ProcFamily = IntelAtom;
UseLeaForSP = true;
ToggleFeature(X86::FeatureLeaForSP);
}
+ else if (Family == 6 &&
+ (Model == 55 || Model == 74 || Model == 77)) {
+ X86ProcFamily = IntelSLM;
+ }
unsigned MaxExtLevel;
X86_MC::GetCpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
@@ -351,14 +360,38 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
HasRTM = true;
ToggleFeature(X86::FeatureRTM);
}
- if (IsIntel && ((EBX >> 19) & 0x1)) {
- HasADX = true;
- ToggleFeature(X86::FeatureADX);
+ if (IsIntel && ((EBX >> 16) & 0x1)) {
+ X86SSELevel = AVX512F;
+ ToggleFeature(X86::FeatureAVX512);
}
if (IsIntel && ((EBX >> 18) & 0x1)) {
HasRDSEED = true;
ToggleFeature(X86::FeatureRDSEED);
}
+ if (IsIntel && ((EBX >> 19) & 0x1)) {
+ HasADX = true;
+ ToggleFeature(X86::FeatureADX);
+ }
+ if (IsIntel && ((EBX >> 26) & 0x1)) {
+ HasPFI = true;
+ ToggleFeature(X86::FeaturePFI);
+ }
+ if (IsIntel && ((EBX >> 27) & 0x1)) {
+ HasERI = true;
+ ToggleFeature(X86::FeatureERI);
+ }
+ if (IsIntel && ((EBX >> 28) & 0x1)) {
+ HasCDI = true;
+ ToggleFeature(X86::FeatureCDI);
+ }
+ if (IsIntel && ((EBX >> 29) & 0x1)) {
+ HasSHA = true;
+ ToggleFeature(X86::FeatureSHA);
+ }
+ }
+ if (IsAMD && ((ECX >> 21) & 0x1)) {
+ HasTBM = true;
+ ToggleFeature(X86::FeatureTBM);
}
}
}
@@ -416,8 +449,8 @@ void X86Subtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// Make sure 64-bit features are available in 64-bit mode.
if (In64BitMode) {
- HasX86_64 = true; ToggleFeature(X86::Feature64Bit);
- HasCMov = true; ToggleFeature(X86::FeatureCMOV);
+ if (!HasX86_64) { HasX86_64 = true; ToggleFeature(X86::Feature64Bit); }
+ if (!HasCMov) { HasCMov = true; ToggleFeature(X86::FeatureCMOV); }
if (X86SSELevel < SSE2) {
X86SSELevel = SSE2;
@@ -429,9 +462,9 @@ void X86Subtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// CPUName may have been set by the CPU detection code. Make sure the
// new MCSchedModel is used.
- InitMCProcessorInfo(CPUName, FS);
+ InitCPUSchedModel(CPUName);
- if (X86ProcFamily == IntelAtom)
+ if (X86ProcFamily == IntelAtom || X86ProcFamily == IntelSLM)
PostRAScheduler = true;
InstrItins = getInstrItineraryForCPU(CPUName);
@@ -468,6 +501,7 @@ void X86Subtarget::initializeEnvironment() {
HasFMA = false;
HasFMA4 = false;
HasXOP = false;
+ HasTBM = false;
HasMOVBE = false;
HasRDRAND = false;
HasF16C = false;
@@ -477,7 +511,11 @@ void X86Subtarget::initializeEnvironment() {
HasBMI2 = false;
HasRTM = false;
HasHLE = false;
+ HasERI = false;
+ HasCDI = false;
+ HasPFI = false;
HasADX = false;
+ HasSHA = false;
HasPRFCHW = false;
HasRDSEED = false;
IsBTMemSlow = false;
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index 66832b989be1..dd8c0811ce51 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -42,7 +42,7 @@ enum Style {
class X86Subtarget : public X86GenSubtargetInfo {
protected:
enum X86SSEEnum {
- NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2
+ NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
};
enum X863DNowEnum {
@@ -50,7 +50,7 @@ protected:
};
enum X86ProcFamilyEnum {
- Others, IntelAtom
+ Others, IntelAtom, IntelSLM
};
/// X86ProcFamily - X86 processor family: Intel Atom, and others
@@ -97,6 +97,9 @@ protected:
/// HasXOP - Target has XOP instructions
bool HasXOP;
+ /// HasTBM - Target has TBM instructions.
+ bool HasTBM;
+
/// HasMOVBE - True if the processor has the MOVBE instruction.
bool HasMOVBE;
@@ -127,6 +130,9 @@ protected:
/// HasADX - Processor has ADX instructions.
bool HasADX;
+ /// HasSHA - Processor has SHA instructions.
+ bool HasSHA;
+
/// HasPRFCHW - Processor has PRFCHW instructions.
bool HasPRFCHW;
@@ -169,6 +175,15 @@ protected:
/// address generation (AG) time.
bool LEAUsesAG;
+ /// Processor has AVX-512 PreFetch Instructions
+ bool HasPFI;
+
+ /// Processor has AVX-512 Exponential and Reciprocal Instructions
+ bool HasERI;
+
+ /// Processor has AVX-512 Conflict Detection Instructions
+ bool HasCDI;
+
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -249,6 +264,7 @@ public:
bool hasSSE42() const { return X86SSELevel >= SSE42; }
bool hasAVX() const { return X86SSELevel >= AVX; }
bool hasAVX2() const { return X86SSELevel >= AVX2; }
+ bool hasAVX512() const { return X86SSELevel >= AVX512F; }
bool hasFp256() const { return hasAVX(); }
bool hasInt256() const { return hasAVX2(); }
bool hasSSE4A() const { return HasSSE4A; }
@@ -261,6 +277,7 @@ public:
// FIXME: Favor FMA when both are enabled. Is this the right thing to do?
bool hasFMA4() const { return HasFMA4 && !HasFMA; }
bool hasXOP() const { return HasXOP; }
+ bool hasTBM() const { return HasTBM; }
bool hasMOVBE() const { return HasMOVBE; }
bool hasRDRAND() const { return HasRDRAND; }
bool hasF16C() const { return HasF16C; }
@@ -271,6 +288,7 @@ public:
bool hasRTM() const { return HasRTM; }
bool hasHLE() const { return HasHLE; }
bool hasADX() const { return HasADX; }
+ bool hasSHA() const { return HasSHA; }
bool hasPRFCHW() const { return HasPRFCHW; }
bool hasRDSEED() const { return HasRDSEED; }
bool isBTMemSlow() const { return IsBTMemSlow; }
@@ -282,6 +300,9 @@ public:
bool padShortFunctions() const { return PadShortFunctions; }
bool callRegIndirect() const { return CallRegIndirect; }
bool LEAusesAG() const { return LEAUsesAG; }
+ bool hasCDI() const { return HasCDI; }
+ bool hasPFI() const { return HasPFI; }
+ bool hasERI() const { return HasERI; }
bool isAtom() const { return X86ProcFamily == IntelAtom; }
@@ -298,10 +319,8 @@ public:
return (TargetTriple.getEnvironment() == Triple::ELF ||
TargetTriple.isOSBinFormatELF());
}
- bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
- bool isTargetNaCl() const {
- return TargetTriple.getOS() == Triple::NaCl;
- }
+ bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+ bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; }
@@ -314,15 +333,14 @@ public:
}
bool isTargetEnvMacho() const { return TargetTriple.isEnvironmentMachO(); }
+ bool isOSWindows() const { return TargetTriple.isOSWindows(); }
+
bool isTargetWin64() const {
- // FIXME: x86_64-cygwin has not been released yet.
return In64BitMode && TargetTriple.isOSWindows();
}
bool isTargetWin32() const {
- // FIXME: Cygwin is included for isTargetWin64 -- should it be included
- // here too?
- return !In64BitMode && (isTargetMingw() || isTargetWindows());
+ return !In64BitMode && (isTargetCygMing() || isTargetWindows());
}
bool isPICStyleSet() const { return PICStyle != PICStyles::None; }
@@ -338,7 +356,13 @@ public:
}
bool isPICStyleStubAny() const {
return PICStyle == PICStyles::StubDynamicNoPIC ||
- PICStyle == PICStyles::StubPIC; }
+ PICStyle == PICStyles::StubPIC;
+ }
+
+ bool isCallingConvWin64(CallingConv::ID CC) const {
+ return (isTargetWin64() && CC != CallingConv::X86_64_SysV) ||
+ CC == CallingConv::X86_64_Win64;
+ }
/// ClassifyGlobalReference - Classify a global variable reference for the
/// current subtarget according to how we should reference it in a non-pcrel
@@ -361,11 +385,14 @@ public:
/// memset with zero passed as the second argument. Otherwise it
/// returns null.
const char *getBZeroEntry() const;
-
+
/// This function returns true if the target has sincos() routine in its
/// compiler runtime or math libraries.
bool hasSinCos() const;
+ /// Enable the MachineScheduler pass for all X86 subtargets.
+ bool enableMachineScheduler() const LLVM_OVERRIDE { return true; }
+
/// enablePostRAScheduler - run for Atom optimization.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp
index 00fa47f80b44..ddf580f73145 100644
--- a/lib/Target/X86/X86TargetMachine.cpp
+++ b/lib/Target/X86/X86TargetMachine.cpp
@@ -49,6 +49,7 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT,
TLInfo(*this),
TSInfo(*this),
JITInfo(*this) {
+ initAsmInfo();
}
void X86_64TargetMachine::anchor() { }
@@ -69,6 +70,7 @@ X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT,
TLInfo(*this),
TSInfo(*this),
JITInfo(*this) {
+ initAsmInfo();
}
/// X86TargetMachine ctor - Create an X86 target.
@@ -90,7 +92,7 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
} else if (Subtarget.is64Bit()) {
// PIC in 64 bit mode is always rip-rel.
Subtarget.setPICStyle(PICStyles::RIPRel);
- } else if (Subtarget.isTargetCygMing()) {
+ } else if (Subtarget.isTargetCOFF()) {
Subtarget.setPICStyle(PICStyles::None);
} else if (Subtarget.isTargetDarwin()) {
if (getRelocationModel() == Reloc::PIC_)
@@ -112,14 +114,14 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
// Command line options for x86
//===----------------------------------------------------------------------===//
static cl::opt<bool>
-UseVZeroUpper("x86-use-vzeroupper",
+UseVZeroUpper("x86-use-vzeroupper", cl::Hidden,
cl::desc("Minimize AVX to SSE transition penalty"),
cl::init(true));
// Temporary option to control early if-conversion for x86 while adding machine
// models.
static cl::opt<bool>
-X86EarlyIfConv("x86-early-ifcvt",
+X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
cl::desc("Enable early if-conversion on X86"));
//===----------------------------------------------------------------------===//
@@ -130,7 +132,7 @@ void X86TargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// Add first the target-independent BasicTTI pass, then our X86 pass. This
// allows the X86 pass to delegate to the target independent layer when
// appropriate.
- PM.add(createBasicTargetTransformInfoPass(getTargetLowering()));
+ PM.add(createBasicTargetTransformInfoPass(this));
PM.add(createX86TargetTransformInfoPass(this));
}
diff --git a/lib/Target/X86/X86TargetObjectFile.cpp b/lib/Target/X86/X86TargetObjectFile.cpp
index 871dacd6a1c1..086cd4de534b 100644
--- a/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/lib/Target/X86/X86TargetObjectFile.cpp
@@ -25,7 +25,7 @@ getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
// On Darwin/X86-64, we can reference dwarf symbols with foo@GOTPCREL+4, which
// is an indirect pc-relative reference.
if (Encoding & (DW_EH_PE_indirect | DW_EH_PE_pcrel)) {
- const MCSymbol *Sym = Mang->getSymbol(GV);
+ const MCSymbol *Sym = getSymbol(*Mang, GV);
const MCExpr *Res =
MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, getContext());
const MCExpr *Four = MCConstantExpr::Create(4, getContext());
@@ -39,7 +39,7 @@ getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
MCSymbol *X86_64MachoTargetObjectFile::
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI) const {
- return Mang->getSymbol(GV);
+ return getSymbol(*Mang, GV);
}
void
@@ -47,3 +47,9 @@ X86LinuxTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM) {
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
InitializeELF(TM.Options.UseInitArray);
}
+
+const MCExpr *
+X86LinuxTargetObjectFile::getDebugThreadLocalSymbol(
+ const MCSymbol *Sym) const {
+ return MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_DTPOFF, getContext());
+}
diff --git a/lib/Target/X86/X86TargetObjectFile.h b/lib/Target/X86/X86TargetObjectFile.h
index 9d26d389d4de..79c861d2e12e 100644
--- a/lib/Target/X86/X86TargetObjectFile.h
+++ b/lib/Target/X86/X86TargetObjectFile.h
@@ -36,6 +36,9 @@ namespace llvm {
/// and x86-64.
class X86LinuxTargetObjectFile : public TargetLoweringObjectFileELF {
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+
+ /// \brief Describe a TLS variable address within debug info.
+ virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
};
} // end namespace llvm
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index eba9d782d5c3..f88a666092bc 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -33,7 +33,6 @@ void initializeX86TTIPass(PassRegistry &);
namespace {
class X86TTI : public ImmutablePass, public TargetTransformInfo {
- const X86TargetMachine *TM;
const X86Subtarget *ST;
const X86TargetLowering *TLI;
@@ -42,12 +41,12 @@ class X86TTI : public ImmutablePass, public TargetTransformInfo {
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
public:
- X86TTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
+ X86TTI() : ImmutablePass(ID), ST(0), TLI(0) {
llvm_unreachable("This pass cannot be directly constructed");
}
X86TTI(const X86TargetMachine *TM)
- : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
+ : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
TLI(TM->getTargetLowering()) {
initializeX86TTIPass(*PassRegistry::getPassRegistry());
}
@@ -101,6 +100,11 @@ public:
unsigned Alignment,
unsigned AddressSpace) const;
+ virtual unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex) const;
+
+ virtual unsigned getReductionCost(unsigned Opcode, Type *Ty,
+ bool IsPairwiseForm) const;
+
/// @}
};
@@ -126,8 +130,8 @@ X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
// TODO: Currently the __builtin_popcount() implementation using SSE3
// instructions is inefficient. Once the problem is fixed, we should
- // call ST->hasSSE3() instead of ST->hasSSE4().
- return ST->hasSSE41() ? PSK_FastHardware : PSK_Software;
+ // call ST->hasSSE3() instead of ST->hasPOPCNT().
+ return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
}
unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
@@ -173,7 +177,7 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
- static const CostTblEntry<MVT> AVX2CostTable[] = {
+ static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
// Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
// customize them to detect the cases where shift amount is a scalar one.
{ ISD::SHL, MVT::v4i32, 1 },
@@ -196,17 +200,27 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
{ ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
{ ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
{ ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
+
+ // Vectorizing division is a bad idea. See the SSE2 table for more comments.
+ { ISD::SDIV, MVT::v32i8, 32*20 },
+ { ISD::SDIV, MVT::v16i16, 16*20 },
+ { ISD::SDIV, MVT::v8i32, 8*20 },
+ { ISD::SDIV, MVT::v4i64, 4*20 },
+ { ISD::UDIV, MVT::v32i8, 32*20 },
+ { ISD::UDIV, MVT::v16i16, 16*20 },
+ { ISD::UDIV, MVT::v8i32, 8*20 },
+ { ISD::UDIV, MVT::v4i64, 4*20 },
};
// Look for AVX2 lowering tricks.
if (ST->hasAVX2()) {
- int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable),
- ISD, LT.second);
+ int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
if (Idx != -1)
return LT.first * AVX2CostTable[Idx].Cost;
}
- static const CostTblEntry<MVT> SSE2UniformConstCostTable[] = {
+ static const CostTblEntry<MVT::SimpleValueType>
+ SSE2UniformConstCostTable[] = {
// We don't correctly identify costs of casts because they are marked as
// custom.
// Constant splats are cheaper for the following instructions.
@@ -227,15 +241,13 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
ST->hasSSE2()) {
- int Idx = CostTableLookup<MVT>(SSE2UniformConstCostTable,
- array_lengthof(SSE2UniformConstCostTable),
- ISD, LT.second);
+ int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
if (Idx != -1)
return LT.first * SSE2UniformConstCostTable[Idx].Cost;
}
- static const CostTblEntry<MVT> SSE2CostTable[] = {
+ static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
// We don't correctly identify costs of casts because they are marked as
// custom.
// For some cases, where the shift amount is a scalar we would be able
@@ -258,16 +270,30 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
{ ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
{ ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
{ ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
+
+ // It is not a good idea to vectorize division. We have to scalarize it and
+ // in the process we will often end up having to spilling regular
+ // registers. The overhead of division is going to dominate most kernels
+ // anyways so try hard to prevent vectorization of division - it is
+ // generally a bad idea. Assume somewhat arbitrarily that we have to be able
+ // to hide "20 cycles" for each lane.
+ { ISD::SDIV, MVT::v16i8, 16*20 },
+ { ISD::SDIV, MVT::v8i16, 8*20 },
+ { ISD::SDIV, MVT::v4i32, 4*20 },
+ { ISD::SDIV, MVT::v2i64, 2*20 },
+ { ISD::UDIV, MVT::v16i8, 16*20 },
+ { ISD::UDIV, MVT::v8i16, 8*20 },
+ { ISD::UDIV, MVT::v4i32, 4*20 },
+ { ISD::UDIV, MVT::v2i64, 2*20 },
};
if (ST->hasSSE2()) {
- int Idx = CostTableLookup<MVT>(SSE2CostTable, array_lengthof(SSE2CostTable),
- ISD, LT.second);
+ int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
if (Idx != -1)
return LT.first * SSE2CostTable[Idx].Cost;
}
- static const CostTblEntry<MVT> AVX1CostTable[] = {
+ static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
// We don't have to scalarize unsupported ops. We can issue two half-sized
// operations and we only need to extract the upper YMM half.
// Two ops + 1 extract + 1 insert = 4.
@@ -286,21 +312,19 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
// Look for AVX1 lowering tricks.
if (ST->hasAVX() && !ST->hasAVX2()) {
- int Idx = CostTableLookup<MVT>(AVX1CostTable, array_lengthof(AVX1CostTable),
- ISD, LT.second);
+ int Idx = CostTableLookup(AVX1CostTable, ISD, LT.second);
if (Idx != -1)
return LT.first * AVX1CostTable[Idx].Cost;
}
// Custom lowering of vectors.
- static const CostTblEntry<MVT> CustomLowered[] = {
+ static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
// A v2i64/v4i64 and multiply is custom lowered as a series of long
// multiplies(3), shifts(4) and adds(2).
{ ISD::MUL, MVT::v2i64, 9 },
{ ISD::MUL, MVT::v4i64, 9 },
};
- int Idx = CostTableLookup<MVT>(CustomLowered, array_lengthof(CustomLowered),
- ISD, LT.second);
+ int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
if (Idx != -1)
return LT.first * CustomLowered[Idx].Cost;
@@ -337,7 +361,8 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
- static const TypeConversionCostTblEntry<MVT> SSE2ConvTbl[] = {
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ SSE2ConvTbl[] = {
// These are somewhat magic numbers justified by looking at the output of
// Intel's IACA, running some kernels and making sure when we take
// legalization into account the throughput will be overestimated.
@@ -361,9 +386,8 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
};
if (ST->hasSSE2() && !ST->hasAVX()) {
- int Idx = ConvertCostTableLookup<MVT>(SSE2ConvTbl,
- array_lengthof(SSE2ConvTbl),
- ISD, LTDest.second, LTSrc.second);
+ int Idx =
+ ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
if (Idx != -1)
return LTSrc.first * SSE2ConvTbl[Idx].Cost;
}
@@ -375,13 +399,17 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
if (!SrcTy.isSimple() || !DstTy.isSimple())
return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
- static const TypeConversionCostTblEntry<MVT> AVXConversionTbl[] = {
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ AVXConversionTbl[] = {
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
{ ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
{ ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
{ ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
{ ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
{ ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
@@ -420,9 +448,8 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
};
if (ST->hasAVX()) {
- int Idx = ConvertCostTableLookup<MVT>(AVXConversionTbl,
- array_lengthof(AVXConversionTbl),
- ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT());
+ int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
+ SrcTy.getSimpleVT());
if (Idx != -1)
return AVXConversionTbl[Idx].Cost;
}
@@ -440,7 +467,7 @@ unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
- static const CostTblEntry<MVT> SSE42CostTbl[] = {
+ static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
{ ISD::SETCC, MVT::v2f64, 1 },
{ ISD::SETCC, MVT::v4f32, 1 },
{ ISD::SETCC, MVT::v2i64, 1 },
@@ -449,7 +476,7 @@ unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
{ ISD::SETCC, MVT::v16i8, 1 },
};
- static const CostTblEntry<MVT> AVX1CostTbl[] = {
+ static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
{ ISD::SETCC, MVT::v4f64, 1 },
{ ISD::SETCC, MVT::v8f32, 1 },
// AVX1 does not support 8-wide integer compare.
@@ -459,7 +486,7 @@ unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
{ ISD::SETCC, MVT::v32i8, 4 },
};
- static const CostTblEntry<MVT> AVX2CostTbl[] = {
+ static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
{ ISD::SETCC, MVT::v4i64, 1 },
{ ISD::SETCC, MVT::v8i32, 1 },
{ ISD::SETCC, MVT::v16i16, 1 },
@@ -467,19 +494,19 @@ unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
};
if (ST->hasAVX2()) {
- int Idx = CostTableLookup<MVT>(AVX2CostTbl, array_lengthof(AVX2CostTbl), ISD, MTy);
+ int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
if (Idx != -1)
return LT.first * AVX2CostTbl[Idx].Cost;
}
if (ST->hasAVX()) {
- int Idx = CostTableLookup<MVT>(AVX1CostTbl, array_lengthof(AVX1CostTbl), ISD, MTy);
+ int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
if (Idx != -1)
return LT.first * AVX1CostTbl[Idx].Cost;
}
if (ST->hasSSE42()) {
- int Idx = CostTableLookup<MVT>(SSE42CostTbl, array_lengthof(SSE42CostTbl), ISD, MTy);
+ int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
if (Idx != -1)
return LT.first * SSE42CostTbl[Idx].Cost;
}
@@ -511,8 +538,51 @@ unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
}
+unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
+ bool Extract) const {
+ assert (Ty->isVectorTy() && "Can only scalarize vectors");
+ unsigned Cost = 0;
+
+ for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+ if (Insert)
+ Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+ if (Extract)
+ Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+ }
+
+ return Cost;
+}
+
unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const {
+ // Handle non power of two vectors such as <3 x float>
+ if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
+ unsigned NumElem = VTy->getVectorNumElements();
+
+ // Handle a few common cases:
+ // <3 x float>
+ if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
+ // Cost = 64 bit store + extract + 32 bit store.
+ return 3;
+
+ // <3 x double>
+ if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
+ // Cost = 128 bit store + unpack + 64 bit store.
+ return 3;
+
+ // Assume that all other non power-of-two numbers are scalarized.
+ if (!isPowerOf2_32(NumElem)) {
+ unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
+ VTy->getScalarType(),
+ Alignment,
+ AddressSpace);
+ unsigned SplitCost = getScalarizationOverhead(Src,
+ Opcode == Instruction::Load,
+ Opcode==Instruction::Store);
+ return NumElem * Cost + SplitCost;
+ }
+ }
+
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
@@ -528,3 +598,97 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
return Cost;
}
+
+unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
+ // Address computations in vectorized code with non-consecutive addresses will
+ // likely result in more instructions compared to scalar code where the
+ // computation can more often be merged into the index mode. The resulting
+ // extra micro-ops can significantly decrease throughput.
+ unsigned NumVectorInstToHideOverhead = 10;
+
+ if (Ty->isVectorTy() && IsComplex)
+ return NumVectorInstToHideOverhead;
+
+ return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex);
+}
+
+unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
+ bool IsPairwise) const {
+
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
+
+ MVT MTy = LT.second;
+
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
+ // and make it as the cost.
+
+ static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
+ { ISD::FADD, MVT::v2f64, 2 },
+ { ISD::FADD, MVT::v4f32, 4 },
+ { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
+ { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
+ { ISD::ADD, MVT::v8i16, 5 },
+ };
+
+ static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
+ { ISD::FADD, MVT::v4f32, 4 },
+ { ISD::FADD, MVT::v4f64, 5 },
+ { ISD::FADD, MVT::v8f32, 7 },
+ { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
+ { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
+ { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
+ { ISD::ADD, MVT::v8i16, 5 },
+ { ISD::ADD, MVT::v8i32, 5 },
+ };
+
+ static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
+ { ISD::FADD, MVT::v2f64, 2 },
+ { ISD::FADD, MVT::v4f32, 4 },
+ { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
+ { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
+ { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
+ };
+
+ static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
+ { ISD::FADD, MVT::v4f32, 3 },
+ { ISD::FADD, MVT::v4f64, 3 },
+ { ISD::FADD, MVT::v8f32, 4 },
+ { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
+ { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
+ { ISD::ADD, MVT::v4i64, 3 },
+ { ISD::ADD, MVT::v8i16, 4 },
+ { ISD::ADD, MVT::v8i32, 5 },
+ };
+
+ if (IsPairwise) {
+ if (ST->hasAVX()) {
+ int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
+ if (Idx != -1)
+ return LT.first * AVX1CostTblPairWise[Idx].Cost;
+ }
+
+ if (ST->hasSSE42()) {
+ int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
+ if (Idx != -1)
+ return LT.first * SSE42CostTblPairWise[Idx].Cost;
+ }
+ } else {
+ if (ST->hasAVX()) {
+ int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
+ if (Idx != -1)
+ return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
+ }
+
+ if (ST->hasSSE42()) {
+ int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
+ if (Idx != -1)
+ return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
+ }
+ }
+
+ return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
+}
+
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index 0f77948c0eff..66ae9c2d7f9d 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -105,23 +105,28 @@ FunctionPass *llvm::createX86IssueVZeroUpperPass() {
}
static bool isYmmReg(unsigned Reg) {
- if (Reg >= X86::YMM0 && Reg <= X86::YMM15)
- return true;
+ return (Reg >= X86::YMM0 && Reg <= X86::YMM31);
+}
- return false;
+static bool isZmmReg(unsigned Reg) {
+ return (Reg >= X86::ZMM0 && Reg <= X86::ZMM31);
}
static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) {
for (MachineRegisterInfo::livein_iterator I = MRI.livein_begin(),
E = MRI.livein_end(); I != E; ++I)
- if (isYmmReg(I->first))
+ if (isYmmReg(I->first) || isZmmReg(I->first))
return true;
return false;
}
static bool clobbersAllYmmRegs(const MachineOperand &MO) {
- for (unsigned reg = X86::YMM0; reg < X86::YMM15; ++reg) {
+ for (unsigned reg = X86::YMM0; reg <= X86::YMM31; ++reg) {
+ if (!MO.clobbersPhysReg(reg))
+ return false;
+ }
+ for (unsigned reg = X86::ZMM0; reg <= X86::ZMM31; ++reg) {
if (!MO.clobbersPhysReg(reg))
return false;
}
@@ -143,6 +148,25 @@ static bool hasYmmReg(MachineInstr *MI) {
return false;
}
+/// clobbersAnyYmmReg() - Check if any YMM register will be clobbered by this
+/// instruction.
+static bool clobbersAnyYmmReg(MachineInstr *MI) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isRegMask())
+ continue;
+ for (unsigned reg = X86::YMM0; reg <= X86::YMM31; ++reg) {
+ if (MO.clobbersPhysReg(reg))
+ return true;
+ }
+ for (unsigned reg = X86::ZMM0; reg <= X86::ZMM31; ++reg) {
+ if (MO.clobbersPhysReg(reg))
+ return true;
+ }
+ }
+ return false;
+}
+
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// vzero upper instructions before function calls.
bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
@@ -226,8 +250,9 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
bool BBHasCall = false;
for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
- MachineInstr *MI = I;
DebugLoc dl = I->getDebugLoc();
+ MachineInstr *MI = I;
+
bool isControlFlow = MI->isCall() || MI->isReturn();
// Shortcut: don't need to check regular instructions in dirty state.
@@ -246,6 +271,14 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
if (!isControlFlow)
continue;
+ // If the call won't clobber any YMM register, skip it as well. It usually
+ // happens on helper function calls (such as '_chkstk', '_ftol2') where
+ // standard calling convention is not used (RegMask is not used to mark
+ // register clobbered and register usage (def/imp-def/use) is well-dfined
+ // and explicitly specified.
+ if (MI->isCall() && !clobbersAnyYmmReg(MI))
+ continue;
+
BBHasCall = true;
// The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX
diff --git a/lib/Target/XCore/CMakeLists.txt b/lib/Target/XCore/CMakeLists.txt
index d5bfddc23e0f..3fa3b34245d7 100644
--- a/lib/Target/XCore/CMakeLists.txt
+++ b/lib/Target/XCore/CMakeLists.txt
@@ -22,10 +22,11 @@ add_llvm_target(XCoreCodeGen
XCoreSubtarget.cpp
XCoreTargetMachine.cpp
XCoreTargetObjectFile.cpp
+ XCoreTargetTransformInfo.cpp
XCoreSelectionDAGInfo.cpp
)
-add_dependencies(LLVMXCoreCodeGen intrinsics_gen)
+add_dependencies(LLVMXCoreCodeGen XCoreCommonTableGen intrinsics_gen)
add_subdirectory(Disassembler)
add_subdirectory(InstPrinter)
diff --git a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
index a2ae40c58a6e..9c20abd4c6c7 100644
--- a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
+++ b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
@@ -29,7 +29,7 @@ namespace {
/// \brief A disassembler class for XCore.
class XCoreDisassembler : public MCDisassembler {
- const MCRegisterInfo *RegInfo;
+ OwningPtr<const MCRegisterInfo> RegInfo;
public:
XCoreDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info) :
MCDisassembler(STI), RegInfo(Info) {}
@@ -42,7 +42,7 @@ public:
raw_ostream &vStream,
raw_ostream &cStream) const;
- const MCRegisterInfo *getRegInfo() const { return RegInfo; }
+ const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
};
}
@@ -53,7 +53,7 @@ static bool readInstruction16(const MemoryObject &region,
uint8_t Bytes[4];
// We want to read exactly 2 Bytes of data.
- if (region.readBytes(address, 2, Bytes, NULL) == -1) {
+ if (region.readBytes(address, 2, Bytes) == -1) {
size = 0;
return false;
}
@@ -69,7 +69,7 @@ static bool readInstruction32(const MemoryObject &region,
uint8_t Bytes[4];
// We want to read exactly 4 Bytes of data.
- if (region.readBytes(address, 4, Bytes, NULL) == -1) {
+ if (region.readBytes(address, 4, Bytes) == -1) {
size = 0;
return false;
}
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
index 1cfdbda003b5..3d1c4745d6ac 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
@@ -13,7 +13,7 @@ using namespace llvm;
void XCoreMCAsmInfo::anchor() { }
-XCoreMCAsmInfo::XCoreMCAsmInfo(const Target &T, StringRef TT) {
+XCoreMCAsmInfo::XCoreMCAsmInfo(StringRef TT) {
SupportsDebugInformation = true;
Data16bitsDirective = "\t.short\t";
Data32bitsDirective = "\t.long\t";
@@ -23,10 +23,14 @@ XCoreMCAsmInfo::XCoreMCAsmInfo(const Target &T, StringRef TT) {
PrivateGlobalPrefix = ".L";
AscizDirective = ".asciiz";
- WeakDefDirective = "\t.weak\t";
- WeakRefDirective = "\t.weak\t";
+
+ HiddenVisibilityAttr = MCSA_Invalid;
+ HiddenDeclarationVisibilityAttr = MCSA_Invalid;
+ ProtectedVisibilityAttr = MCSA_Invalid;
// Debug
HasLEB128 = true;
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+ DwarfRegNumForCFI = true;
}
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
index 076777541e33..e53c96b8f745 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
@@ -14,16 +14,16 @@
#ifndef XCORETARGETASMINFO_H
#define XCORETARGETASMINFO_H
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class StringRef;
class Target;
- class XCoreMCAsmInfo : public MCAsmInfo {
+ class XCoreMCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
- explicit XCoreMCAsmInfo(const Target &T, StringRef TT);
+ explicit XCoreMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
index c1773653f5fb..10bb6dfa928a 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
@@ -51,13 +51,13 @@ static MCSubtargetInfo *createXCoreMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createXCoreMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new XCoreMCAsmInfo(T, TT);
+static MCAsmInfo *createXCoreMCAsmInfo(const MCRegisterInfo &MRI,
+ StringRef TT) {
+ MCAsmInfo *MAI = new XCoreMCAsmInfo(TT);
// Initial state of the frame pointer is SP.
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(XCore::SP, 0);
- MAI->addInitialFrameState(0, Dst, Src);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, XCore::SP, 0);
+ MAI->addInitialFrameState(Inst);
return MAI;
}
diff --git a/lib/Target/XCore/README.txt b/lib/Target/XCore/README.txt
index b69205b49b6c..28d551a1024c 100644
--- a/lib/Target/XCore/README.txt
+++ b/lib/Target/XCore/README.txt
@@ -5,3 +5,4 @@ To-do
* Tailcalls
* Investigate loop alignment
* Add builtins
+
diff --git a/lib/Target/XCore/XCore.h b/lib/Target/XCore/XCore.h
index 2f375fc952ca..73c310be0344 100644
--- a/lib/Target/XCore/XCore.h
+++ b/lib/Target/XCore/XCore.h
@@ -31,6 +31,8 @@ namespace llvm {
CodeGenOpt::Level OptLevel);
ModulePass *createXCoreLowerThreadLocalPass();
+ ImmutablePass *createXCoreTargetTransformInfoPass(const XCoreTargetMachine *TM);
+
} // end namespace llvm;
#endif
diff --git a/lib/Target/XCore/XCoreAsmPrinter.cpp b/lib/Target/XCore/XCoreAsmPrinter.cpp
index e177ad300cfe..c03dfe61504e 100644
--- a/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -36,6 +36,7 @@
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -49,7 +50,6 @@ namespace {
class XCoreAsmPrinter : public AsmPrinter {
const XCoreSubtarget &Subtarget;
XCoreMCInstLower MCInstLowering;
- void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
public:
explicit XCoreAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: AsmPrinter(TM, Streamer), Subtarget(TM.getSubtarget<XCoreSubtarget>()),
@@ -76,7 +76,6 @@ namespace {
void EmitInstruction(const MachineInstr *MI);
void EmitFunctionBodyStart();
void EmitFunctionBodyEnd();
- virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
};
} // end of anonymous namespace
@@ -85,16 +84,17 @@ void XCoreAsmPrinter::emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV) {
GV->hasWeakLinkage()) ||
GV->hasLinkOnceLinkage()) && "Unexpected linkage");
if (ArrayType *ATy = dyn_cast<ArrayType>(
- cast<PointerType>(GV->getType())->getElementType())) {
- OutStreamer.EmitSymbolAttribute(Sym, MCSA_Global);
- // FIXME: MCStreamerize.
- OutStreamer.EmitRawText(StringRef(".globound"));
- OutStreamer.EmitRawText("\t.set\t" + Twine(Sym->getName()));
- OutStreamer.EmitRawText(".globound," + Twine(ATy->getNumElements()));
+ cast<PointerType>(GV->getType())->getElementType())) {
+
+ MCSymbol *SymGlob = OutContext.GetOrCreateSymbol(
+ Twine(Sym->getName() + StringRef(".globound")));
+ OutStreamer.EmitSymbolAttribute(SymGlob, MCSA_Global);
+ OutStreamer.EmitAssignment(SymGlob,
+ MCConstantExpr::Create(ATy->getNumElements(),
+ OutContext));
if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()) {
// TODO Use COMDAT groups for LinkOnceLinkage
- OutStreamer.EmitRawText(MAI->getWeakDefDirective() +Twine(Sym->getName())+
- ".globound");
+ OutStreamer.EmitSymbolAttribute(SymGlob, MCSA_Weak);
}
}
}
@@ -109,7 +109,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(GV, Mang,TM));
- MCSymbol *GVSym = Mang->getSymbol(GV);
+ MCSymbol *GVSym = getSymbol(GV);
const Constant *C = GV->getInitializer();
unsigned Align = (unsigned)TD->getPreferredTypeAlignmentShift(C->getType());
@@ -216,7 +216,7 @@ void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
O << *MO.getMBB()->getSymbol();
break;
case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
+ O << *getSymbol(MO.getGlobal());
break;
case MachineOperand::MO_ExternalSymbol:
O << MO.getSymbolName();
@@ -242,45 +242,14 @@ void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
bool XCoreAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant,const char *ExtraCode,
raw_ostream &O) {
- // Does this asm operand have a single letter operand modifier?
- if (ExtraCode && ExtraCode[0])
- if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
- switch (ExtraCode[0]) {
- default:
- // See if this is a generic print operand
- return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
- }
-
- printOperand(MI, OpNo, O);
- return false;
-}
-
-void XCoreAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
- raw_ostream &OS) {
- unsigned NOps = MI->getNumOperands();
- assert(NOps == 4);
- OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
- OS << V.getName();
- OS << " <- ";
- // Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
- OS << '['; printOperand(MI, 0, OS); OS << '+'; printOperand(MI, 1, OS);
- OS << ']';
- OS << "+";
- printOperand(MI, NOps-2, OS);
-}
+ // Print the operand if there is no operand modifier.
+ if (!ExtraCode || !ExtraCode[0]) {
+ printOperand(MI, OpNo, O);
+ return false;
+ }
-MachineLocation XCoreAsmPrinter::
-getDebugValueLocation(const MachineInstr *MI) const {
- // Handles frame addresses emitted in XCoreInstrInfo::emitFrameIndexDebugValue.
- assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() &&
- "Unexpected MachineOperand types");
- return MachineLocation(MI->getOperand(0).getReg(),
- MI->getOperand(1).getImm());
+ // Otherwise fallback on the default implementation.
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
}
void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) {
@@ -288,15 +257,8 @@ void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) {
raw_svector_ostream O(Str);
switch (MI->getOpcode()) {
- case XCore::DBG_VALUE: {
- if (isVerbose() && OutStreamer.hasRawTextSupport()) {
- SmallString<128> TmpStr;
- raw_svector_ostream OS(TmpStr);
- PrintDebugValueComment(MI, OS);
- OutStreamer.EmitRawText(StringRef(OS.str()));
- }
- return;
- }
+ case XCore::DBG_VALUE:
+ llvm_unreachable("Should be handled target independently");
case XCore::ADD_2rus:
if (MI->getOperand(2).getImm() == 0) {
O << "\tmov "
diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp
index beeb07f831c6..c34b35c140f9 100644
--- a/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -30,10 +30,6 @@
using namespace llvm;
// helper functions. FIXME: Eliminate.
-static inline bool isImmUs(unsigned val) {
- return val <= 11;
-}
-
static inline bool isImmU6(unsigned val) {
return val < (1 << 6);
}
@@ -92,11 +88,16 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo *MMI = &MF.getMMI();
+ const MCRegisterInfo *MRI = MMI->getContext().getRegisterInfo();
const XCoreInstrInfo &TII =
*static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ if (MFI->getMaxAlignment() > getStackAlignment())
+ report_fatal_error("emitPrologue unsupported alignment: "
+ + Twine(MFI->getMaxAlignment()));
+
bool FP = hasFP(MF);
const AttributeSet &PAL = MF.getFunction()->getAttributes();
@@ -116,9 +117,9 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
}
bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF);
+ bool saveLR = XFI->getUsesLR();
// Do we need to allocate space on the stack?
if (FrameSize) {
- bool saveLR = XFI->getUsesLR();
bool LRSavedOnEntry = false;
int Opcode;
if (saveLR && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0)) {
@@ -132,34 +133,28 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
if (emitFrameMoves) {
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
-
// Show update of SP.
MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
-
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize * 4);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
-
+ MMI->addFrameInst(MCCFIInstruction::createDefCfaOffset(FrameLabel,
+ -FrameSize*4));
if (LRSavedOnEntry) {
- MachineLocation CSDst(MachineLocation::VirtualFP, 0);
- MachineLocation CSSrc(XCore::LR);
- Moves.push_back(MachineMove(FrameLabel, CSDst, CSSrc));
+ unsigned Reg = MRI->getDwarfRegNum(XCore::LR, true);
+ MMI->addFrameInst(MCCFIInstruction::createOffset(FrameLabel, Reg, 0));
}
}
- if (saveLR) {
- int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
- storeToStack(MBB, MBBI, XCore::LR, LRSpillOffset + FrameSize*4, dl, TII);
- MBB.addLiveIn(XCore::LR);
+ }
+ if (saveLR) {
+ int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
+ storeToStack(MBB, MBBI, XCore::LR, LRSpillOffset + FrameSize*4, dl, TII);
+ MBB.addLiveIn(XCore::LR);
- if (emitFrameMoves) {
- MCSymbol *SaveLRLabel = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLRLabel);
- MachineLocation CSDst(MachineLocation::VirtualFP, LRSpillOffset);
- MachineLocation CSSrc(XCore::LR);
- MMI->getFrameMoves().push_back(MachineMove(SaveLRLabel, CSDst, CSSrc));
- }
+ if (emitFrameMoves) {
+ MCSymbol *SaveLRLabel = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLRLabel);
+ unsigned Reg = MRI->getDwarfRegNum(XCore::LR, true);
+ MMI->addFrameInst(MCCFIInstruction::createOffset(SaveLRLabel, Reg,
+ LRSpillOffset));
}
}
@@ -172,37 +167,34 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
if (emitFrameMoves) {
MCSymbol *SaveR10Label = MMI->getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveR10Label);
- MachineLocation CSDst(MachineLocation::VirtualFP, FPSpillOffset);
- MachineLocation CSSrc(XCore::R10);
- MMI->getFrameMoves().push_back(MachineMove(SaveR10Label, CSDst, CSSrc));
+ unsigned Reg = MRI->getDwarfRegNum(XCore::R10, true);
+ MMI->addFrameInst(MCCFIInstruction::createOffset(SaveR10Label, Reg,
+ FPSpillOffset));
}
// Set the FP from the SP.
unsigned FramePtr = XCore::R10;
- BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr)
- .addImm(0);
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr).addImm(0);
if (emitFrameMoves) {
// Show FP is now valid.
MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
- MachineLocation SPDst(FramePtr);
- MachineLocation SPSrc(MachineLocation::VirtualFP);
- MMI->getFrameMoves().push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ unsigned Reg = MRI->getDwarfRegNum(FramePtr, true);
+ MMI->addFrameInst(MCCFIInstruction::createDefCfaRegister(FrameLabel,
+ Reg));
}
}
if (emitFrameMoves) {
// Frame moves for callee saved.
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
std::vector<std::pair<MCSymbol*, CalleeSavedInfo> >&SpillLabels =
XFI->getSpillLabels();
for (unsigned I = 0, E = SpillLabels.size(); I != E; ++I) {
MCSymbol *SpillLabel = SpillLabels[I].first;
CalleeSavedInfo &CSI = SpillLabels[I].second;
int Offset = MFI->getObjectOffset(CSI.getFrameIdx());
- unsigned Reg = CSI.getReg();
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(SpillLabel, CSDst, CSSrc));
+ unsigned Reg = MRI->getDwarfRegNum(CSI.getReg(), true);
+ MMI->addFrameInst(MCCFIInstruction::createOffset(SpillLabel, Reg,
+ Offset));
}
}
}
@@ -213,6 +205,7 @@ void XCoreFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
const XCoreInstrInfo &TII =
*static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
+ XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
DebugLoc dl = MBBI->getDebugLoc();
bool FP = hasFP(MF);
@@ -237,28 +230,32 @@ void XCoreFrameLowering::emitEpilogue(MachineFunction &MF,
report_fatal_error("emitEpilogue Frame size too big: " + Twine(FrameSize));
}
- if (FrameSize) {
- XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
+ if (FP) {
+ // Restore R10
+ int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
+ FPSpillOffset += FrameSize*4;
+ loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset, dl, TII);
+ }
- if (FP) {
- // Restore R10
- int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
- FPSpillOffset += FrameSize*4;
- loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset, dl, TII);
- }
- bool restoreLR = XFI->getUsesLR();
- if (restoreLR && MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0) {
- int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
- LRSpillOffset += FrameSize*4;
- loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset, dl, TII);
- restoreLR = false;
- }
+ bool restoreLR = XFI->getUsesLR();
+ if (restoreLR &&
+ (FrameSize == 0 || MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0)) {
+ int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
+ LRSpillOffset += FrameSize*4;
+ loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset, dl, TII);
+ restoreLR = false;
+ }
+
+ if (FrameSize) {
if (restoreLR) {
// Fold prologue into return instruction
+ assert(MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0);
assert(MBBI->getOpcode() == XCore::RETSP_u6
|| MBBI->getOpcode() == XCore::RETSP_lu6);
int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6;
- BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
+ for (unsigned i = 3, e = MBBI->getNumOperands(); i < e; ++i)
+ MIB->addOperand(MBBI->getOperand(i)); // copy any variadic operands
MBB.erase(MBBI);
} else {
int Opcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index eb29b5001080..e28f84fec2a9 100644
--- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -37,13 +37,11 @@ using namespace llvm;
///
namespace {
class XCoreDAGToDAGISel : public SelectionDAGISel {
- const XCoreTargetLowering &Lowering;
const XCoreSubtarget &Subtarget;
public:
XCoreDAGToDAGISel(XCoreTargetMachine &TM, CodeGenOpt::Level OptLevel)
: SelectionDAGISel(TM, OptLevel),
- Lowering(*TM.getTargetLowering()),
Subtarget(*TM.getSubtargetImpl()) { }
SDNode *Select(SDNode *N);
@@ -61,7 +59,7 @@ namespace {
if (!isMask_32(value)) {
return false;
}
- int msksize = 32 - CountLeadingZeros_32(value);
+ int msksize = 32 - countLeadingZeros(value);
return (msksize >= 1 && msksize <= 8) ||
msksize == 16 || msksize == 24 || msksize == 32;
}
@@ -109,7 +107,7 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base,
}
SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
@@ -117,7 +115,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
if (immMskBitp(N)) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit
- SDValue MskSize = getI32Imm(32 - CountLeadingZeros_32(Val));
+ SDValue MskSize = getI32Imm(32 - countLeadingZeros((uint32_t)Val));
return CurDAG->getMachineNode(XCore::MKMSK_rus, dl,
MVT::i32, MskSize);
}
@@ -125,7 +123,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
SDValue CPIdx =
CurDAG->getTargetConstantPool(ConstantInt::get(
Type::getInt32Ty(*CurDAG->getContext()), Val),
- TLI.getPointerTy());
+ getTargetLowering()->getPointerTy());
SDNode *node = CurDAG->getMachineNode(XCore::LDWCP_lru6, dl, MVT::i32,
MVT::Other, CPIdx,
CurDAG->getEntryNode());
@@ -204,12 +202,12 @@ replaceInChain(SelectionDAG *CurDAG, SDValue Chain, SDValue Old, SDValue New)
}
if (!found)
return SDValue();
- return CurDAG->getNode(ISD::TokenFactor, Chain->getDebugLoc(), MVT::Other,
+ return CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
&Ops[0], Ops.size());
}
SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
// (brind (int_xcore_checkevent (addr)))
SDValue Chain = N->getOperand(0);
SDValue Addr = N->getOperand(1);
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 2d27f1a0f6e3..89ad27daec13 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -59,6 +59,7 @@ getTargetNodeName(unsigned Opcode) const
case XCoreISD::CRC8 : return "XCoreISD::CRC8";
case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
+ case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
default : return NULL;
}
}
@@ -79,7 +80,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setStackPointerRegisterToSaveRestore(XCore::SP);
- setSchedulingPreference(Sched::RegPressure);
+ setSchedulingPreference(Sched::Source);
// Use i32 for setcc operations results (slt, sgt, ...).
setBooleanContents(ZeroOrOneBooleanContent);
@@ -148,6 +149,13 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
+ // Exception handling
+ setExceptionPointerRegister(XCore::R0);
+ setExceptionSelectorRegister(XCore::R1);
+
+ // Atomic operations
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+
// TRAMPOLINE is custom lowered.
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
@@ -166,6 +174,24 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setMinFunctionAlignment(1);
}
+bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ if (Val.getOpcode() != ISD::LOAD)
+ return false;
+
+ EVT VT1 = Val.getValueType();
+ if (!VT1.isSimple() || !VT1.isInteger() ||
+ !VT2.isSimple() || !VT2.isInteger())
+ return false;
+
+ switch (VT1.getSimpleVT().SimpleTy) {
+ default: break;
+ case MVT::i8:
+ return true;
+ }
+
+ return false;
+}
+
SDValue XCoreTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode())
@@ -188,6 +214,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
}
@@ -215,7 +242,7 @@ void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue XCoreTargetLowering::
LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
{
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
Op.getOperand(3), Op.getOperand(4));
return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
@@ -227,7 +254,7 @@ getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
SelectionDAG &DAG) const
{
// FIXME there is no actual debug info here
- DebugLoc dl = GA.getDebugLoc();
+ SDLoc dl(GA);
const GlobalValue *UnderlyingGV = GV;
// If GV is an alias then use the aliasee to determine the wrapper type
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
@@ -243,7 +270,7 @@ getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
SDValue XCoreTargetLowering::
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
{
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GN->getGlobal();
int64_t Offset = GN->getOffset();
@@ -259,15 +286,10 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
return GA;
}
-static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
- DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
-}
-
SDValue XCoreTargetLowering::
LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
{
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy());
@@ -280,7 +302,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
{
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
// FIXME there isn't really debug info here
- DebugLoc dl = CP->getDebugLoc();
+ SDLoc dl(CP);
EVT PtrVT = Op.getValueType();
SDValue Res;
if (CP->isMachineConstantPoolEntry()) {
@@ -303,7 +325,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
SDValue Chain = Op.getOperand(0);
SDValue Table = Op.getOperand(1);
SDValue Index = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
unsigned JTI = JT->getIndex();
MachineFunction &MF = DAG.getMachineFunction();
@@ -322,7 +344,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
}
SDValue XCoreTargetLowering::
-lowerLoadWordFromAlignedBasePlusOffset(DebugLoc DL, SDValue Chain, SDValue Base,
+lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
int64_t Offset, SelectionDAG &DAG) const
{
if ((Offset & 0x3) == 0) {
@@ -388,7 +410,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
if (!LD->isVolatile()) {
const GlobalValue *GV;
@@ -469,7 +491,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
SDValue Chain = ST->getChain();
SDValue BasePtr = ST->getBasePtr();
SDValue Value = ST->getValue();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
if (ST->getAlignment() == 2) {
SDValue Low = Value;
@@ -516,7 +538,7 @@ LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
{
assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
"Unexpected operand to lower!");
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue Zero = DAG.getConstant(0, MVT::i32);
@@ -533,7 +555,7 @@ LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
{
assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
"Unexpected operand to lower!");
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue Zero = DAG.getConstant(0, MVT::i32);
@@ -618,7 +640,7 @@ TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
} else {
return SDValue();
}
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
SDValue LL, RL, AddendL, AddendH;
LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
@@ -677,7 +699,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
return Result;
}
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
// Extract components
SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
@@ -707,31 +729,33 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
SDValue XCoreTargetLowering::
LowerVAARG(SDValue Op, SelectionDAG &DAG) const
{
- llvm_unreachable("unimplemented");
- // FIXME Arguments passed by reference need a extra dereference.
+ // Whist llvm does not support aggregate varargs we can ignore
+ // the possibility of the ValueType being an implicit byVal vararg.
SDNode *Node = Op.getNode();
- DebugLoc dl = Node->getDebugLoc();
- const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
- EVT VT = Node->getValueType(0);
- SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
- Node->getOperand(1), MachinePointerInfo(V),
+ EVT VT = Node->getValueType(0); // not an aggregate
+ SDValue InChain = Node->getOperand(0);
+ SDValue VAListPtr = Node->getOperand(1);
+ EVT PtrVT = VAListPtr.getValueType();
+ const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
+ SDLoc dl(Node);
+ SDValue VAList = DAG.getLoad(PtrVT, dl, InChain,
+ VAListPtr, MachinePointerInfo(SV),
false, false, false, 0);
// Increment the pointer, VAList, to the next vararg
- SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
- DAG.getConstant(VT.getSizeInBits(),
- getPointerTy()));
+ SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
+ DAG.getIntPtrConstant(VT.getSizeInBits() / 8));
// Store the incremented VAList to the legalized pointer
- Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
- MachinePointerInfo(V), false, false, 0);
+ InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
+ MachinePointerInfo(SV), false, false, 0);
// Load the actual argument out of the pointer VAList
- return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
+ return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(),
false, false, false, 0);
}
SDValue XCoreTargetLowering::
LowerVASTART(SDValue Op, SelectionDAG &DAG) const
{
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// vastart stores the address of the VarArgsFrameIndex slot into the
// memory location argument
MachineFunction &MF = DAG.getMachineFunction();
@@ -743,7 +767,7 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const
SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
// Depths > 0 not supported yet!
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
return SDValue();
@@ -783,7 +807,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
SDValue Addr = Trmp;
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
Addr, MachinePointerInfo(TrmpAddr), false, false,
0);
@@ -817,7 +841,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
SDValue XCoreTargetLowering::
LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (IntNo) {
case Intrinsic::xcore_crc8:
@@ -832,6 +856,12 @@ LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
+SDValue XCoreTargetLowering::
+LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -847,10 +877,10 @@ SDValue
XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -883,7 +913,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// Analyze operands of the call, assigning locations to each operand.
@@ -901,7 +931,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
- getPointerTy(), true));
+ getPointerTy(), true), dl);
SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
SmallVector<SDValue, 12> MemOpChains;
@@ -991,7 +1021,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getConstant(NumBytes, getPointerTy(), true),
DAG.getConstant(0, getPointerTy(), true),
- InFlag);
+ InFlag, dl);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
@@ -1006,7 +1036,7 @@ SDValue
XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
@@ -1031,13 +1061,17 @@ XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===//
+namespace {
+ struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
+}
+
/// XCore formal arguments implementation
SDValue
XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
+ SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
@@ -1062,7 +1096,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
- DebugLoc dl,
+ SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -1080,9 +1114,22 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned LRSaveSize = StackSlotSize;
+ // All getCopyFromReg ops must precede any getMemcpys to prevent the
+ // scheduler clobbering a register before it has been copied.
+ // The stages are:
+ // 1. CopyFromReg (and load) arg & vararg registers.
+ // 2. Chain CopyFromReg nodes into a TokenFactor.
+ // 3. Memcpy 'byVal' args & push final InVals.
+ // 4. Chain mem ops nodes into a TokenFactor.
+ SmallVector<SDValue, 4> CFRegNode;
+ SmallVector<ArgDataPair, 4> ArgData;
+ SmallVector<SDValue, 4> MemOps;
+
+ // 1a. CopyFromReg (and load) arg registers.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
+ SDValue ArgIn;
if (VA.isRegLoc()) {
// Arguments passed in registers
@@ -1099,7 +1146,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
case MVT::i32:
unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
- InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
+ ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
+ CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
}
} else {
// sanity check
@@ -1119,14 +1167,17 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
- InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, false, 0));
+ ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, false, 0);
}
+ const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
+ ArgData.push_back(ADP);
}
+ // 1b. CopyFromReg vararg registers.
if (isVarArg) {
- /* Argument registers */
+ // Argument registers
static const uint16_t ArgRegs[] = {
XCore::R0, XCore::R1, XCore::R2, XCore::R3
};
@@ -1134,7 +1185,6 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
array_lengthof(ArgRegs));
if (FirstVAReg < array_lengthof(ArgRegs)) {
- SmallVector<SDValue, 4> MemOps;
int offset = 0;
// Save remaining registers, storing higher register numbers at a higher
// address
@@ -1150,14 +1200,12 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
RegInfo.addLiveIn(ArgRegs[i], VReg);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+ CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
// Move argument from virt reg -> stack
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
}
- if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOps[0], MemOps.size());
} else {
// This will point to the next argument passed via stack.
XFI->setVarArgsFrameIndex(
@@ -1166,6 +1214,42 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
}
}
+ // 2. chain CopyFromReg nodes into a TokenFactor.
+ if (!CFRegNode.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &CFRegNode[0],
+ CFRegNode.size());
+
+ // 3. Memcpy 'byVal' args & push final InVals.
+ // Aggregates passed "byVal" need to be copied by the callee.
+ // The callee will use a pointer to this copy, rather than the original
+ // pointer.
+ for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
+ ArgDE = ArgData.end();
+ ArgDI != ArgDE; ++ArgDI) {
+ if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
+ unsigned Size = ArgDI->Flags.getByValSize();
+ unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
+ // Create a new object on the stack and copy the pointee into it.
+ int FI = MFI->CreateStackObject(Size, Align, false, false);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ InVals.push_back(FIN);
+ MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
+ DAG.getConstant(Size, MVT::i32),
+ Align, false, false,
+ MachinePointerInfo(),
+ MachinePointerInfo()));
+ } else {
+ InVals.push_back(ArgDI->SDV);
+ }
+ }
+
+ // 4, chain mem ops nodes into a TokenFactor.
+ if (!MemOps.empty()) {
+ MemOps.push_back(Chain);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0],
+ MemOps.size());
+ }
+
return Chain;
}
@@ -1188,7 +1272,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
// the return value to a location
@@ -1305,7 +1389,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc dl = N->getDebugLoc();
+ SDLoc dl(N);
switch (N->getOpcode()) {
default: break;
case XCoreISD::LADD: {
@@ -1582,7 +1666,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
std::pair<unsigned, const TargetRegisterClass*>
XCoreTargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default : break;
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index c7dfa2600691..bc08497b12ed 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -70,7 +70,10 @@ namespace llvm {
BR_JT,
// Jumptable branch using long branches for each entry.
- BR_JT32
+ BR_JT32,
+
+ // Memory barrier.
+ MEMBARRIER
};
}
@@ -83,6 +86,10 @@ namespace llvm {
explicit XCoreTargetLowering(XCoreTargetMachine &TM);
+ using TargetLowering::isZExtFree;
+ virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+
+
virtual unsigned getJumpTableEncoding() const;
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
@@ -115,7 +122,7 @@ namespace llvm {
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
@@ -123,17 +130,17 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
SDValue getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
SelectionDAG &DAG) const;
- SDValue lowerLoadWordFromAlignedBasePlusOffset(DebugLoc DL, SDValue Chain,
+ SDValue lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain,
SDValue Base, int64_t Offset,
SelectionDAG &DAG) const;
@@ -154,11 +161,12 @@ namespace llvm {
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
// Inline asm support
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
// Expand specifics
SDValue TryExpandADDWithMul(SDNode *Op, SelectionDAG &DAG) const;
@@ -177,7 +185,7 @@ namespace llvm {
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -189,7 +197,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
virtual bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp
index e457e0dbf027..33c7f31be761 100644
--- a/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -22,7 +22,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "XCoreGenInstrInfo.inc"
namespace llvm {
@@ -39,9 +39,13 @@ namespace XCore {
using namespace llvm;
+
+// Pin the vtable to this file.
+void XCoreInstrInfo::anchor() {}
+
XCoreInstrInfo::XCoreInstrInfo()
: XCoreGenInstrInfo(XCore::ADJCALLSTACKDOWN, XCore::ADJCALLSTACKUP),
- RI(*this) {
+ RI() {
}
static bool isZeroImm(const MachineOperand &op) {
@@ -386,15 +390,6 @@ void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
.addImm(0);
}
-MachineInstr*
-XCoreInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
- uint64_t Offset, const MDNode *MDPtr,
- DebugLoc DL) const {
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(XCore::DBG_VALUE))
- .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
/// ReverseBranchCondition - Return the inverse opcode of the
/// specified Branch instruction.
bool XCoreInstrInfo::
diff --git a/lib/Target/XCore/XCoreInstrInfo.h b/lib/Target/XCore/XCoreInstrInfo.h
index 42eeed8370f4..4429b07e41ed 100644
--- a/lib/Target/XCore/XCoreInstrInfo.h
+++ b/lib/Target/XCore/XCoreInstrInfo.h
@@ -24,6 +24,7 @@ namespace llvm {
class XCoreInstrInfo : public XCoreGenInstrInfo {
const XCoreRegisterInfo RI;
+ virtual void anchor();
public:
XCoreInstrInfo();
@@ -78,12 +79,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx,
- uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const;
-
virtual bool ReverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const;
};
diff --git a/lib/Target/XCore/XCoreInstrInfo.td b/lib/Target/XCore/XCoreInstrInfo.td
index 587166c1ce1d..934a707e785b 100644
--- a/lib/Target/XCore/XCoreInstrInfo.td
+++ b/lib/Target/XCore/XCoreInstrInfo.td
@@ -70,6 +70,11 @@ def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_XCoreCallSeqStart,
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XCoreCallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def SDT_XCoreMEMBARRIER : SDTypeProfile<0, 0, []>;
+
+def XCoreMemBarrier : SDNode<"XCoreISD::MEMBARRIER", SDT_XCoreMEMBARRIER,
+ [SDNPHasChain]>;
+
//===----------------------------------------------------------------------===//
// Instruction Pattern Stuff
//===----------------------------------------------------------------------===//
@@ -84,7 +89,7 @@ def msksize_xform : SDNodeXForm<imm, [{
// Transformation function: get the size of a mask
assert(isMask_32(N->getZExtValue()));
// look for the first non-zero bit
- return getI32Imm(32 - CountLeadingZeros_32(N->getZExtValue()));
+ return getI32Imm(32 - countLeadingZeros((uint32_t)N->getZExtValue()));
}]>;
def neg_xform : SDNodeXForm<imm, [{
@@ -279,12 +284,6 @@ multiclass FRU6_LRU6_backwards_branch<bits<6> opc, string OpcStr> {
!strconcat(OpcStr, " $a, $b"), []>;
}
-multiclass FRU6_LRU6_cp<bits<6> opc, string OpcStr> {
- def _ru6: _FRU6<opc, (outs RRegs:$a), (ins i32imm:$b),
- !strconcat(OpcStr, " $a, cp[$b]"), []>;
- def _lru6: _FLRU6<opc, (outs RRegs:$a), (ins i32imm:$b),
- !strconcat(OpcStr, " $a, cp[$b]"), []>;
-}
// U6
multiclass FU6_LU6<bits<10> opc, string OpcStr, SDNode OpNode> {
@@ -349,6 +348,10 @@ let usesCustomInserter = 1 in {
(select GRRegs:$cond, GRRegs:$T, GRRegs:$F))]>;
}
+let hasSideEffects = 1 in
+def Int_MemBarrier : PseudoInstXCore<(outs), (ins), "#MEMBARRIER",
+ [(XCoreMemBarrier)]>;
+
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
@@ -539,8 +542,13 @@ def STWDP_lru6 : _FLRU6<0b010100, (outs), (ins RRegs:$a, i32imm:$b),
[(store RRegs:$a, (dprelwrapper tglobaladdr:$b))]>;
//let Uses = [CP] in ..
-let mayLoad = 1, isReMaterializable = 1, neverHasSideEffects = 1 in
-defm LDWCP : FRU6_LRU6_cp<0b011011, "ldw">;
+let mayLoad = 1, isReMaterializable = 1, neverHasSideEffects = 1 in {
+def LDWCP_ru6 : _FRU6<0b011011, (outs RRegs:$a), (ins i32imm:$b),
+ "ldw $a, cp[$b]", []>;
+def LDWCP_lru6: _FLRU6<0b011011, (outs RRegs:$a), (ins i32imm:$b),
+ "ldw $a, cp[$b]",
+ [(set RRegs:$a, (load (cprelwrapper tglobaladdr:$b)))]>;
+}
let Uses = [SP] in {
let mayStore=1 in {
diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index 2e328b4e3441..afce753dec5c 100644
--- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -22,6 +22,9 @@
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/NoFolder.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#define DEBUG_TYPE "xcore-lower-thread-local"
@@ -71,13 +74,104 @@ createLoweredInitializer(ArrayType *NewType, Constant *OriginalInitializer) {
return ConstantArray::get(NewType, Elements);
}
-static bool hasNonInstructionUse(GlobalVariable *GV) {
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;
- ++UI)
- if (!isa<Instruction>(*UI))
- return true;
+static Instruction *
+createReplacementInstr(ConstantExpr *CE, Instruction *Instr) {
+ IRBuilder<true,NoFolder> Builder(Instr);
+ unsigned OpCode = CE->getOpcode();
+ switch (OpCode) {
+ case Instruction::GetElementPtr: {
+ SmallVector<Value *,4> CEOpVec(CE->op_begin(), CE->op_end());
+ ArrayRef<Value *> CEOps(CEOpVec);
+ return dyn_cast<Instruction>(Builder.CreateInBoundsGEP(CEOps[0],
+ CEOps.slice(1)));
+ }
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return dyn_cast<Instruction>(
+ Builder.CreateBinOp((Instruction::BinaryOps)OpCode,
+ CE->getOperand(0), CE->getOperand(1),
+ CE->getName()));
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast:
+ return dyn_cast<Instruction>(
+ Builder.CreateCast((Instruction::CastOps)OpCode,
+ CE->getOperand(0), CE->getType(),
+ CE->getName()));
+ default:
+ llvm_unreachable("Unhandled constant expression!\n");
+ }
+}
+
+static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
+ do {
+ SmallVector<WeakVH,8> WUsers;
+ for (Value::use_iterator I = CE->use_begin(), E = CE->use_end();
+ I != E; ++I)
+ WUsers.push_back(WeakVH(*I));
+ std::sort(WUsers.begin(), WUsers.end());
+ WUsers.erase(std::unique(WUsers.begin(), WUsers.end()), WUsers.end());
+ while (!WUsers.empty())
+ if (WeakVH WU = WUsers.pop_back_val()) {
+ if (PHINode *PN = dyn_cast<PHINode>(WU)) {
+ for (int I = 0, E = PN->getNumIncomingValues(); I < E; ++I)
+ if (PN->getIncomingValue(I) == CE) {
+ BasicBlock *PredBB = PN->getIncomingBlock(I);
+ if (PredBB->getTerminator()->getNumSuccessors() > 1)
+ PredBB = SplitEdge(PredBB, PN->getParent(), P);
+ Instruction *InsertPos = PredBB->getTerminator();
+ Instruction *NewInst = createReplacementInstr(CE, InsertPos);
+ PN->setOperand(I, NewInst);
+ }
+ } else if (Instruction *Instr = dyn_cast<Instruction>(WU)) {
+ Instruction *NewInst = createReplacementInstr(CE, Instr);
+ Instr->replaceUsesOfWith(CE, NewInst);
+ } else {
+ ConstantExpr *CExpr = dyn_cast<ConstantExpr>(WU);
+ if (!CExpr || !replaceConstantExprOp(CExpr, P))
+ return false;
+ }
+ }
+ } while (CE->hasNUsesOrMore(1)); // We need to check becasue a recursive
+ // sibbling may have used 'CE' when createReplacementInstr was called.
+ CE->destroyConstant();
+ return true;
+}
- return false;
+static bool rewriteNonInstructionUses(GlobalVariable *GV, Pass *P) {
+ SmallVector<WeakVH,8> WUsers;
+ for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I)
+ if (!isa<Instruction>(*I))
+ WUsers.push_back(WeakVH(*I));
+ while (!WUsers.empty())
+ if (WeakVH WU = WUsers.pop_back_val()) {
+ ConstantExpr *CE = dyn_cast<ConstantExpr>(WU);
+ if (!CE || !replaceConstantExprOp(CE, P))
+ return false;
+ }
+ return true;
}
static bool isZeroLengthArray(Type *Ty) {
@@ -92,14 +186,16 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
return false;
// Skip globals that we can't lower and leave it for the backend to error.
- if (hasNonInstructionUse(GV) ||
+ if (!rewriteNonInstructionUses(GV, this) ||
!GV->getType()->isSized() || isZeroLengthArray(GV->getType()))
return false;
// Create replacement global.
ArrayType *NewType = createLoweredType(GV->getType()->getElementType());
- Constant *NewInitializer = createLoweredInitializer(NewType,
- GV->getInitializer());
+ Constant *NewInitializer = 0;
+ if (GV->hasInitializer())
+ NewInitializer = createLoweredInitializer(NewType,
+ GV->getInitializer());
GlobalVariable *NewGV =
new GlobalVariable(*M, NewType, GV->isConstant(), GV->getLinkage(),
NewInitializer, "", 0, GlobalVariable::NotThreadLocal,
diff --git a/lib/Target/XCore/XCoreMCInstLower.cpp b/lib/Target/XCore/XCoreMCInstLower.cpp
index f96eda9fcb9f..def2673fa565 100644
--- a/lib/Target/XCore/XCoreMCInstLower.cpp
+++ b/lib/Target/XCore/XCoreMCInstLower.cpp
@@ -43,7 +43,7 @@ MCOperand XCoreMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
Symbol = MO.getMBB()->getSymbol();
break;
case MachineOperand::MO_GlobalAddress:
- Symbol = Mang->getSymbol(MO.getGlobal());
+ Symbol = Printer.getSymbol(MO.getGlobal());
Offset += MO.getOffset();
break;
case MachineOperand::MO_BlockAddress:
diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp
index 49b563497c0b..dbd2f52a5b1a 100644
--- a/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -37,8 +37,8 @@
using namespace llvm;
-XCoreRegisterInfo::XCoreRegisterInfo(const TargetInstrInfo &tii)
- : XCoreGenRegisterInfo(XCore::LR), TII(tii) {
+XCoreRegisterInfo::XCoreRegisterInfo()
+ : XCoreGenRegisterInfo(XCore::LR) {
}
// helper functions
@@ -112,6 +112,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = FrameOp.getIndex();
MachineFunction &MF = *MI.getParent()->getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
int StackSize = MF.getFrameInfo()->getStackSize();
@@ -249,6 +250,7 @@ loadConstant(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
report_fatal_error("loadConstant value too big " + Twine(Value));
}
int Opcode = isImmU6(Value) ? XCore::LDC_ru6 : XCore::LDC_lru6;
+ const TargetInstrInfo &TII = *MBB.getParent()->getTarget().getInstrInfo();
BuildMI(MBB, I, dl, TII.get(Opcode), DstReg).addImm(Value);
}
diff --git a/lib/Target/XCore/XCoreRegisterInfo.h b/lib/Target/XCore/XCoreRegisterInfo.h
index 1db32489cf8d..2370c6280f2e 100644
--- a/lib/Target/XCore/XCoreRegisterInfo.h
+++ b/lib/Target/XCore/XCoreRegisterInfo.h
@@ -25,8 +25,6 @@ class TargetInstrInfo;
struct XCoreRegisterInfo : public XCoreGenRegisterInfo {
private:
- const TargetInstrInfo &TII;
-
void loadConstant(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DstReg, int64_t Value, DebugLoc dl) const;
@@ -40,7 +38,7 @@ private:
unsigned DstReg, int Offset, DebugLoc dl) const;
public:
- XCoreRegisterInfo(const TargetInstrInfo &tii);
+ XCoreRegisterInfo();
/// Code Generation virtual methods...
diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp
index 07e5fff1413b..9ae0b860dff9 100644
--- a/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -33,6 +33,7 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
FrameLowering(Subtarget),
TLInfo(*this),
TSInfo(*this) {
+ initAsmInfo();
}
namespace {
@@ -69,3 +70,11 @@ bool XCorePassConfig::addInstSelector() {
extern "C" void LLVMInitializeXCoreTarget() {
RegisterTargetMachine<XCoreTargetMachine> X(TheXCoreTarget);
}
+
+void XCoreTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
+ // Add first the target-independent BasicTTI pass, then our XCore pass. This
+ // allows the XCore pass to delegate to the target independent layer when
+ // appropriate.
+ PM.add(createBasicTargetTransformInfoPass(this));
+ PM.add(createXCoreTargetTransformInfoPass(this));
+}
diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h
index eb9a1aa420eb..a19a67727f2c 100644
--- a/lib/Target/XCore/XCoreTargetMachine.h
+++ b/lib/Target/XCore/XCoreTargetMachine.h
@@ -57,6 +57,8 @@ public:
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+ virtual void addAnalysisPasses(PassManagerBase &PM);
};
} // end namespace llvm
diff --git a/lib/Target/XCore/XCoreTargetTransformInfo.cpp b/lib/Target/XCore/XCoreTargetTransformInfo.cpp
new file mode 100644
index 000000000000..cc165f70dc28
--- /dev/null
+++ b/lib/Target/XCore/XCoreTargetTransformInfo.cpp
@@ -0,0 +1,83 @@
+//===-- XCoreTargetTransformInfo.cpp - XCore specific TTI pass ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a TargetTransformInfo analysis pass specific to the
+/// XCore target machine. It uses the target's detailed information to provide
+/// more precise answers to certain TTI queries, while letting the target
+/// independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "xcoretti"
+#include "XCore.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/CostTable.h"
+using namespace llvm;
+
+// Declare the pass initialization routine locally as target-specific passes
+// don't havve a target-wide initialization entry point, and so we rely on the
+// pass constructor initialization.
+namespace llvm {
+void initializeXCoreTTIPass(PassRegistry &);
+}
+
+namespace {
+
+class XCoreTTI : public ImmutablePass, public TargetTransformInfo {
+public:
+ XCoreTTI() : ImmutablePass(ID) {
+ llvm_unreachable("This pass cannot be directly constructed");
+ }
+
+ XCoreTTI(const XCoreTargetMachine *TM)
+ : ImmutablePass(ID) {
+ initializeXCoreTTIPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void initializePass() {
+ pushTTIStack(this);
+ }
+
+ virtual void finalizePass() {
+ popTTIStack();
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ TargetTransformInfo::getAnalysisUsage(AU);
+ }
+
+ static char ID;
+
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &TargetTransformInfo::ID)
+ return (TargetTransformInfo*)this;
+ return this;
+ }
+
+ unsigned getNumberOfRegisters(bool Vector) const {
+ if (Vector) {
+ return 0;
+ }
+ return 12;
+ }
+};
+
+} // end anonymous namespace
+
+INITIALIZE_AG_PASS(XCoreTTI, TargetTransformInfo, "xcoretti",
+ "XCore Target Transform Info", true, true, false)
+char XCoreTTI::ID = 0;
+
+
+ImmutablePass *
+llvm::createXCoreTargetTransformInfoPass(const XCoreTargetMachine *TM) {
+ return new XCoreTTI(TM);
+}