diff options
Diffstat (limited to 'llvm/lib/Target')
20 files changed, 82 insertions, 90 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index b2eee2845ba9..5bbf4f97c54a 100644 --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -1049,6 +1049,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, case AArch64::MOVaddrEXT: { // Expand into ADRP + ADD. Register DstReg = MI.getOperand(0).getReg(); + assert(DstReg != AArch64::XZR); MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg) .add(MI.getOperand(1)); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 091a62aa4ada..f29bb83c2d2e 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -6923,6 +6923,8 @@ bool AArch64InstrInfo::isFunctionSafeToOutlineFrom( bool AArch64InstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const { + if (!TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags)) + return false; // Check if LR is available through all of the MBB. If it's not, then set // a flag. assert(MBB.getParent()->getRegInfo().tracksLiveness() && diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 12744e4de09b..f3da6bf057c2 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -673,40 +673,40 @@ let isReMaterializable = 1, isCodeGenOnly = 1 in { // removed, along with the AArch64Wrapper node. let AddedComplexity = 10 in -def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr), - [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>, +def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr), + [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>, Sched<[WriteLDAdr]>; // The MOVaddr instruction should match only when the add is not folded // into a load or store address. def MOVaddr - : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), - [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi), + : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), + [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi), tglobaladdr:$low))]>, Sched<[WriteAdrAdr]>; def MOVaddrJT - : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), - [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi), + : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), + [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi), tjumptable:$low))]>, Sched<[WriteAdrAdr]>; def MOVaddrCP - : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), - [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi), + : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), + [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi), tconstpool:$low))]>, Sched<[WriteAdrAdr]>; def MOVaddrBA - : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), - [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi), + : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), + [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi), tblockaddress:$low))]>, Sched<[WriteAdrAdr]>; def MOVaddrTLS - : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), - [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi), + : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), + [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi), tglobaltlsaddr:$low))]>, Sched<[WriteAdrAdr]>; def MOVaddrEXT - : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), - [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi), + : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), + [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi), texternalsym:$low))]>, Sched<[WriteAdrAdr]>; // Normally AArch64addlow either gets folded into a following ldr/str, @@ -714,8 +714,8 @@ def MOVaddrEXT // might appear without either of them, so allow lowering it into a plain // add. def ADDlowTLS - : Pseudo<(outs GPR64:$dst), (ins GPR64:$src, i64imm:$low), - [(set GPR64:$dst, (AArch64addlow GPR64:$src, + : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low), + [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src, tglobaltlsaddr:$low))]>, Sched<[WriteAdr]>; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp index 8c34027f7bb3..94a0ce09afed 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp @@ -13,6 +13,8 @@ #include "AArch64RegisterBankInfo.h" #include "AArch64InstrInfo.h" +#include "AArch64RegisterInfo.h" +#include "MCTargetDesc/AArch64MCTargetDesc.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/GlobalISel/RegisterBank.h" @@ -271,6 +273,7 @@ AArch64RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC, case AArch64::WSeqPairsClassRegClassID: case AArch64::XSeqPairsClassRegClassID: case AArch64::MatrixIndexGPR32_12_15RegClassID: + case AArch64::GPR64_with_sub_32_in_MatrixIndexGPR32_12_15RegClassID: return getRegBank(AArch64::GPRRegBankID); case AArch64::CCRRegClassID: return getRegBank(AArch64::CCRegBankID); diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index 493c1ad87f93..d6dd807afbce 100644 --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -2048,7 +2048,7 @@ SILoadStoreOptimizer::collectMergeableInsts( // adjacent to each other in the list, which will make it easier to find // matches. MergeList.sort( - [] (const CombineInfo &A, CombineInfo &B) { + [] (const CombineInfo &A, const CombineInfo &B) { return A.Offset < B.Offset; }); ++I; diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp index aff7ec8d2ed6..256a95b94f6c 100644 --- a/llvm/lib/Target/ARM/ARMCallLowering.cpp +++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -525,7 +525,7 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo & MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP) .addImm(ArgAssigner.StackOffset) - .addImm(0) + .addImm(-1ULL) .add(predOps(ARMCC::AL)); return true; diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 28a076edd6dc..9224c2221f4d 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -2022,7 +2022,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs, unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) - .addImm(NumBytes).addImm(0)); + .addImm(NumBytes).addImm(-1ULL)); // Now the return value. if (RetVT != MVT::isVoid) { diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index 9c7055deaaf8..7c238a1099d8 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -79,6 +79,10 @@ public: void Select(SDNode *N) override; + /// Return true as some complex patterns, like those that call + /// canExtractShiftFromMul can modify the DAG inplace. + bool ComplexPatternFuncMutatesDAG() const override { return true; } + bool hasNoVMLxHazardUse(SDNode *N) const; bool isShifterOpProfitable(const SDValue &Shift, ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt); diff --git a/llvm/lib/Target/ARM/ARMInstrThumb.td b/llvm/lib/Target/ARM/ARMInstrThumb.td index ef07b2839bc9..4c9b8b5fbfa9 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb.td @@ -1520,6 +1520,7 @@ def tTBH_JT : tPseudoInst<(outs), let isCall = 1, Defs = [R0, R12, LR, CPSR], Uses = [SP] in def tTPsoft : tPseudoInst<(outs), (ins), 4, IIC_Br, [(set R0, ARMthread_pointer)]>, + Requires<[IsThumb, IsReadTPSoft]>, Sched<[WriteBr]>; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td index e7eed2a0bbb1..f8b226b84e98 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb2.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td @@ -4670,6 +4670,9 @@ def t2CDP2 : T2Cop<0b1111, (outs), (ins p_imm:$cop, imm0_15:$opc1, } +// Reading thread pointer from coprocessor register +def : T2Pat<(ARMthread_pointer), (t2MRC 15, 0, 13, 0, 3)>, + Requires<[IsThumb2, IsReadTPHard]>; //===----------------------------------------------------------------------===// // ARMv8.1 Privilege Access Never extension diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index 7be5fc33a0af..04a835f08855 100644 --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -1027,12 +1027,13 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) { } SDNode *Rdhwr = - CurDAG->getMachineNode(RdhwrOpc, DL, Node->getValueType(0), + CurDAG->getMachineNode(RdhwrOpc, DL, Node->getValueType(0), MVT::Glue, CurDAG->getRegister(Mips::HWR29, MVT::i32), CurDAG->getTargetConstant(0, DL, MVT::i32)); SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, DestReg, - SDValue(Rdhwr, 0)); - SDValue ResNode = CurDAG->getCopyFromReg(Chain, DL, DestReg, PtrVT); + SDValue(Rdhwr, 0), SDValue(Rdhwr, 1)); + SDValue ResNode = CurDAG->getCopyFromReg(Chain, DL, DestReg, PtrVT, + Chain.getValue(1)); ReplaceNode(Node, ResNode.getNode()); return true; } diff --git a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp index 7631bb4bccfb..392de0f251a2 100644 --- a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp +++ b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp @@ -1576,6 +1576,16 @@ bool PPCAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, std::swap(Operands[2], Operands[1]); } + // Handle base mnemonic for atomic loads where the EH bit is zero. + if (Name == "lqarx" || Name == "ldarx" || Name == "lwarx" || + Name == "lharx" || Name == "lbarx") { + if (Operands.size() != 5) + return false; + PPCOperand &EHOp = (PPCOperand &)*Operands[4]; + if (EHOp.isU1Imm() && EHOp.getImm() == 0) + Operands.pop_back(); + } + return false; } @@ -1745,7 +1755,7 @@ unsigned PPCAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, } PPCOperand &Op = static_cast<PPCOperand &>(AsmOp); - if (Op.isImm() && Op.getImm() == ImmVal) + if (Op.isU3Imm() && Op.getImm() == ImmVal) return Match_Success; return Match_InvalidOperand; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 207101763ac2..7dab7a52ac53 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -999,7 +999,7 @@ bool RISCVInstrInfo::isFunctionSafeToOutlineFrom( bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const { // More accurate safety checking is done in getOutliningCandidateInfo. - return true; + return TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags); } // Enum values indicating how an outlined call should be constructed. diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td index 7df7cc93d6eb..53495489cef8 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td @@ -2173,7 +2173,7 @@ let hasSideEffects = 1 in { def EX : SideEffectBinaryRX<"ex", 0x44, ADDR64>; def EXRL : SideEffectBinaryRILPC<"exrl", 0xC60, ADDR64>; let hasNoSchedulingInfo = 1 in - def EXRL_Pseudo : Pseudo<(outs), (ins i64imm:$TargetOpc, ADDR64:$lenMinus1, + def EXRL_Pseudo : Alias<6, (outs), (ins i64imm:$TargetOpc, ADDR64:$lenMinus1, bdaddr12only:$bdl1, bdaddr12only:$bd2), []>; } diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp index 4add8d30e010..65ffe6621545 100644 --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -657,35 +657,24 @@ void X86ExpandPseudo::ExpandVastartSaveXmmRegs( EntryBlk->end()); TailBlk->transferSuccessorsAndUpdatePHIs(EntryBlk); - int64_t FrameIndex = VAStartPseudoInstr->getOperand(1).getImm(); - Register BaseReg; - uint64_t FrameOffset = - X86FL->getFrameIndexReference(*Func, FrameIndex, BaseReg).getFixed(); - uint64_t VarArgsRegsOffset = VAStartPseudoInstr->getOperand(2).getImm(); + uint64_t FrameOffset = VAStartPseudoInstr->getOperand(4).getImm(); + uint64_t VarArgsRegsOffset = VAStartPseudoInstr->getOperand(6).getImm(); // TODO: add support for YMM and ZMM here. unsigned MOVOpc = STI->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; // In the XMM save block, save all the XMM argument registers. - for (int64_t OpndIdx = 3, RegIdx = 0; + for (int64_t OpndIdx = 7, RegIdx = 0; OpndIdx < VAStartPseudoInstr->getNumOperands() - 1; OpndIdx++, RegIdx++) { - - int64_t Offset = FrameOffset + VarArgsRegsOffset + RegIdx * 16; - - MachineMemOperand *MMO = Func->getMachineMemOperand( - MachinePointerInfo::getFixedStack(*Func, FrameIndex, Offset), - MachineMemOperand::MOStore, - /*Size=*/16, Align(16)); - - BuildMI(GuardedRegsBlk, DL, TII->get(MOVOpc)) - .addReg(BaseReg) - .addImm(/*Scale=*/1) - .addReg(/*IndexReg=*/0) - .addImm(/*Disp=*/Offset) - .addReg(/*Segment=*/0) - .addReg(VAStartPseudoInstr->getOperand(OpndIdx).getReg()) - .addMemOperand(MMO); + auto NewMI = BuildMI(GuardedRegsBlk, DL, TII->get(MOVOpc)); + for (int i = 0; i < X86::AddrNumOperands; ++i) { + if (i == X86::AddrDisp) + NewMI.addImm(FrameOffset + VarArgsRegsOffset + RegIdx * 16); + else + NewMI.add(VAStartPseudoInstr->getOperand(i + 1)); + } + NewMI.addReg(VAStartPseudoInstr->getOperand(OpndIdx).getReg()); assert(Register::isPhysicalRegister( VAStartPseudoInstr->getOperand(OpndIdx).getReg())); } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 032db2a80a77..4b13b5b540b6 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3533,13 +3533,19 @@ void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters( SmallVector<SDValue, 12> SaveXMMOps; SaveXMMOps.push_back(Chain); SaveXMMOps.push_back(ALVal); - SaveXMMOps.push_back( - DAG.getTargetConstant(FuncInfo->getRegSaveFrameIndex(), DL, MVT::i32)); + SaveXMMOps.push_back(RSFIN); SaveXMMOps.push_back( DAG.getTargetConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32)); llvm::append_range(SaveXMMOps, LiveXMMRegs); - MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, DL, - MVT::Other, SaveXMMOps)); + MachineMemOperand *StoreMMO = + DAG.getMachineFunction().getMachineMemOperand( + MachinePointerInfo::getFixedStack( + DAG.getMachineFunction(), FuncInfo->getRegSaveFrameIndex(), + Offset), + MachineMemOperand::MOStore, 128, Align(16)); + MemOps.push_back(DAG.getMemIntrinsicNode(X86ISD::VASTART_SAVE_XMM_REGS, + DL, DAG.getVTList(MVT::Other), + SaveXMMOps, MVT::i8, StoreMMO)); } if (!MemOps.empty()) @@ -44070,32 +44076,9 @@ static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG, "Unexpected horizontal add/sub opcode"); if (!shouldUseHorizontalOp(true, DAG, Subtarget)) { - // For slow-hop targets, if we have a hop with a single op, see if we already - // have another user that we can reuse and shuffle the result. MVT VT = N->getSimpleValueType(0); SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); - if (VT.is128BitVector() && LHS == RHS) { - for (SDNode *User : LHS->uses()) { - if (User != N && User->getOpcode() == N->getOpcode()) { - MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32; - if (User->getOperand(0) == LHS && !User->getOperand(1).isUndef()) { - return DAG.getBitcast( - VT, - DAG.getVectorShuffle(ShufVT, SDLoc(N), - DAG.getBitcast(ShufVT, SDValue(User, 0)), - DAG.getUNDEF(ShufVT), {0, 1, 0, 1})); - } - if (User->getOperand(1) == LHS && !User->getOperand(0).isUndef()) { - return DAG.getBitcast( - VT, - DAG.getVectorShuffle(ShufVT, SDLoc(N), - DAG.getBitcast(ShufVT, SDValue(User, 0)), - DAG.getUNDEF(ShufVT), {2, 3, 2, 3})); - } - } - } - } // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)). if (LHS != RHS && LHS.getOpcode() == N->getOpcode() && diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 869857bcc0d6..8b18b5981e86 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -627,10 +627,6 @@ namespace llvm { // packed single precision. DPBF16PS, - // Save xmm argument registers to the stack, according to %al. An operator - // is needed so that this can be expanded with control flow. - VASTART_SAVE_XMM_REGS, - // Windows's _chkstk call to do stack probing. WIN_ALLOCA, @@ -848,6 +844,10 @@ namespace llvm { AESENCWIDE256KL, AESDECWIDE256KL, + // Save xmm argument registers to the stack, according to %al. An operator + // is needed so that this can be expanded with control flow. + VASTART_SAVE_XMM_REGS, + // WARNING: Do not add anything in the end unless you want the node to // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all // opcodes will be thought as target memory ops! diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 202d320cd731..aa14c8016a83 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -69,16 +69,12 @@ def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), let SchedRW = [WriteSystem] in { // x86-64 va_start lowering magic. -let hasSideEffects = 1, Defs = [EFLAGS] in { +let hasSideEffects = 1, mayStore = 1, Defs = [EFLAGS] in { def VASTART_SAVE_XMM_REGS : I<0, Pseudo, (outs), - (ins GR8:$al, - i32imm:$regsavefi, i32imm:$offset, - variable_ops), - "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset", - [(X86vastart_save_xmm_regs GR8:$al, - timm:$regsavefi, - timm:$offset), + (ins GR8:$al, i8mem:$regsavefi, variable_ops), + "#VASTART_SAVE_XMM_REGS $al, $regsavefi", + [(X86vastart_save_xmm_regs GR8:$al, addr:$regsavefi), (implicit EFLAGS)]>; } diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 34afedb5bad2..489ea7fb127a 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -91,8 +91,7 @@ def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>, - SDTCisVT<1, iPTR>, - SDTCisVT<2, iPTR>]>; + SDTCisPtrTy<1>]>; def SDT_X86VAARG : SDTypeProfile<1, -1, [SDTCisPtrTy<0>, SDTCisPtrTy<1>, @@ -184,7 +183,7 @@ def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret, def X86vastart_save_xmm_regs : SDNode<"X86ISD::VASTART_SAVE_XMM_REGS", SDT_X86VASTART_SAVE_XMM_REGS, - [SDNPHasChain, SDNPVariadic]>; + [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPVariadic]>; def X86vaarg64 : SDNode<"X86ISD::VAARG_64", SDT_X86VAARG, [SDNPHasChain, SDNPMayLoad, SDNPMayStore, diff --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td index 48c27051a872..355ddf26e3bb 100644 --- a/llvm/lib/Target/X86/X86InstrSystem.td +++ b/llvm/lib/Target/X86/X86InstrSystem.td @@ -583,7 +583,7 @@ def XRSTORS64 : RI<0xC7, MRM3m, (outs), (ins opaquemem:$dst), //===----------------------------------------------------------------------===// // VIA PadLock crypto instructions let Defs = [RAX, RDI], Uses = [RDX, RDI], SchedRW = [WriteSystem] in - def XSTORE : I<0xa7, MRM_C0, (outs), (ins), "xstore", []>, TB, REP; + def XSTORE : I<0xa7, MRM_C0, (outs), (ins), "xstore", []>, TB; def : InstAlias<"xstorerng", (XSTORE)>; |