diff options
Diffstat (limited to 'contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp')
-rw-r--r-- | contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp | 300 |
1 files changed, 170 insertions, 130 deletions
diff --git a/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp b/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp index a777142a9e70..da2a4d862e7d 100644 --- a/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp +++ b/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp @@ -9,19 +9,17 @@ // /// \file /// \brief R600 Machine Scheduler interface -// TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot // //===----------------------------------------------------------------------===// #define DEBUG_TYPE "misched" #include "R600MachineScheduler.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Pass.h" #include "llvm/PassManager.h" #include "llvm/Support/raw_ostream.h" -#include <set> using namespace llvm; @@ -30,54 +28,80 @@ void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { DAG = dag; TII = static_cast<const R600InstrInfo*>(DAG->TII); TRI = static_cast<const R600RegisterInfo*>(DAG->TRI); + VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA(); MRI = &DAG->MRI; - Available[IDAlu]->clear(); - Available[IDFetch]->clear(); - Available[IDOther]->clear(); CurInstKind = IDOther; CurEmitted = 0; - OccupedSlotsMask = 15; + OccupedSlotsMask = 31; InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); - + InstKindLimit[IDOther] = 32; const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD5XXX) { - InstKindLimit[IDFetch] = 7; // 8 minus 1 for security - } else { - InstKindLimit[IDFetch] = 15; // 16 minus 1 for security - } + InstKindLimit[IDFetch] = ST.getTexVTXClauseSize(); + AluInstCount = 0; + FetchInstCount = 0; } -void R600SchedStrategy::MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst) +void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc, + std::vector<SUnit *> &QDst) { - if (QSrc->empty()) - return; - for (ReadyQueue::iterator I = QSrc->begin(), - E = QSrc->end(); I != E; ++I) { - (*I)->NodeQueueId &= ~QSrc->getID(); - QDst->push(*I); - } - QSrc->clear(); + QDst.insert(QDst.end(), QSrc.begin(), QSrc.end()); + QSrc.clear(); +} + +static +unsigned getWFCountLimitedByGPR(unsigned GPRCount) { + assert (GPRCount && "GPRCount cannot be 0"); + return 248 / GPRCount; } SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { SUnit *SU = 0; - IsTopNode = true; NextInstKind = IDOther; + IsTopNode = false; + // check if we might want to switch current clause type - bool AllowSwitchToAlu = (CurInstKind == IDOther) || - (CurEmitted > InstKindLimit[CurInstKind]) || - (Available[CurInstKind]->empty()); - bool AllowSwitchFromAlu = (CurEmitted > InstKindLimit[CurInstKind]) && - (!Available[IDFetch]->empty() || !Available[IDOther]->empty()); - - if ((AllowSwitchToAlu && CurInstKind != IDAlu) || - (!AllowSwitchFromAlu && CurInstKind == IDAlu)) { + bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) || + (Available[CurInstKind].empty()); + bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) && + (!Available[IDFetch].empty() || !Available[IDOther].empty()); + + if (CurInstKind == IDAlu && !Available[IDFetch].empty()) { + // We use the heuristic provided by AMD Accelerated Parallel Processing + // OpenCL Programming Guide : + // The approx. number of WF that allows TEX inst to hide ALU inst is : + // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU)) + float ALUFetchRationEstimate = + (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) / + (FetchInstCount + Available[IDFetch].size()); + unsigned NeededWF = 62.5f / ALUFetchRationEstimate; + DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" ); + // We assume the local GPR requirements to be "dominated" by the requirement + // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and + // after TEX are indeed likely to consume or generate values from/for the + // TEX clause. + // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause + // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need + // one GPR) or TmXYZW = TnXYZW (need 2 GPR). + // (TODO : use RegisterPressure) + // If we are going too use too many GPR, we flush Fetch instruction to lower + // register pressure on 128 bits regs. + unsigned NearRegisterRequirement = 2 * Available[IDFetch].size(); + if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement)) + AllowSwitchFromAlu = true; + } + + if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) || + (!AllowSwitchFromAlu && CurInstKind == IDAlu))) { // try to pick ALU SU = pickAlu(); + if (!SU && !PhysicalRegCopy.empty()) { + SU = PhysicalRegCopy.front(); + PhysicalRegCopy.erase(PhysicalRegCopy.begin()); + } if (SU) { - if (CurEmitted > InstKindLimit[IDAlu]) + if (CurEmitted >= InstKindLimit[IDAlu]) CurEmitted = 0; NextInstKind = IDAlu; } @@ -99,14 +123,10 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { DEBUG( if (SU) { - dbgs() << "picked node: "; + dbgs() << " ** Pick node **\n"; SU->dump(DAG); } else { - dbgs() << "NO NODE "; - for (int i = 0; i < IDLast; ++i) { - Available[i]->dump(); - Pending[i]->dump(); - } + dbgs() << "NO NODE \n"; for (unsigned i = 0; i < DAG->SUnits.size(); i++) { const SUnit &S = DAG->SUnits[i]; if (!S.isScheduled) @@ -119,19 +139,16 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { } void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { - - DEBUG(dbgs() << "scheduled: "); - DEBUG(SU->dump(DAG)); - if (NextInstKind != CurInstKind) { DEBUG(dbgs() << "Instruction Type Switch\n"); if (NextInstKind != IDAlu) - OccupedSlotsMask = 15; + OccupedSlotsMask |= 31; CurEmitted = 0; CurInstKind = NextInstKind; } if (CurInstKind == IDAlu) { + AluInstCount ++; switch (getAluKind(SU)) { case AluT_XYZW: CurEmitted += 4; @@ -157,20 +174,37 @@ void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { if (CurInstKind != IDFetch) { MoveUnits(Pending[IDFetch], Available[IDFetch]); - } - MoveUnits(Pending[IDOther], Available[IDOther]); + } else + FetchInstCount++; } -void R600SchedStrategy::releaseTopNode(SUnit *SU) { - int IK = getInstKind(SU); +static bool +isPhysicalRegCopy(MachineInstr *MI) { + if (MI->getOpcode() != AMDGPU::COPY) + return false; - DEBUG(dbgs() << IK << " <= "); - DEBUG(SU->dump(DAG)); + return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()); +} - Pending[IK]->push(SU); +void R600SchedStrategy::releaseTopNode(SUnit *SU) { + DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG);); } void R600SchedStrategy::releaseBottomNode(SUnit *SU) { + DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG);); + if (isPhysicalRegCopy(SU->getInstr())) { + PhysicalRegCopy.push_back(SU); + return; + } + + int IK = getInstKind(SU); + + // There is no export clause, we can schedule one as soon as its ready + if (IK == IDOther) + Available[IDOther].push_back(SU); + else + Pending[IK].push_back(SU); + } bool R600SchedStrategy::regBelongsToClass(unsigned Reg, @@ -185,18 +219,19 @@ bool R600SchedStrategy::regBelongsToClass(unsigned Reg, R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { MachineInstr *MI = SU->getInstr(); + if (TII->isTransOnly(MI)) + return AluTrans; + switch (MI->getOpcode()) { + case AMDGPU::PRED_X: + return AluPredX; case AMDGPU::INTERP_PAIR_XY: case AMDGPU::INTERP_PAIR_ZW: case AMDGPU::INTERP_VEC_LOAD: + case AMDGPU::DOT_4: return AluT_XYZW; case AMDGPU::COPY: - if (TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) { - // %vregX = COPY Tn_X is likely to be discarded in favor of an - // assignement of Tn_X to %vregX, don't considers it in scheduling - return AluDiscarded; - } - else if (MI->getOperand(1).isUndef()) { + if (MI->getOperand(1).isUndef()) { // MI will become a KILL, don't considers it in scheduling return AluDiscarded; } @@ -205,10 +240,18 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { } // Does the instruction take a whole IG ? + // XXX: Is it possible to add a helper function in R600InstrInfo that can + // be used here and in R600PacketizerList::isSoloInstruction() ? if(TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode()) || - TII->isReductionOp(MI->getOpcode())) + TII->isReductionOp(MI->getOpcode()) || + MI->getOpcode() == AMDGPU::GROUP_BARRIER) { return AluT_XYZW; + } + + if (TII->isLDSInstr(MI->getOpcode())) { + return AluT_X; + } // Is the result already assigned to a channel ? unsigned DestSubReg = MI->getOperand(0).getSubReg(); @@ -239,6 +282,10 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass)) return AluT_XYZW; + // LDS src registers cannot be used in the Trans slot. + if (TII->readsLDSSrcReg(MI)) + return AluT_XYZW; + return AluAny; } @@ -246,57 +293,39 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { int R600SchedStrategy::getInstKind(SUnit* SU) { int Opcode = SU->getInstr()->getOpcode(); + if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode)) + return IDFetch; + if (TII->isALUInstr(Opcode)) { return IDAlu; } switch (Opcode) { + case AMDGPU::PRED_X: case AMDGPU::COPY: case AMDGPU::CONST_COPY: case AMDGPU::INTERP_PAIR_XY: case AMDGPU::INTERP_PAIR_ZW: case AMDGPU::INTERP_VEC_LOAD: - case AMDGPU::DOT4_eg_pseudo: - case AMDGPU::DOT4_r600_pseudo: + case AMDGPU::DOT_4: return IDAlu; - case AMDGPU::TEX_VTX_CONSTBUF: - case AMDGPU::TEX_VTX_TEXBUF: - case AMDGPU::TEX_LD: - case AMDGPU::TEX_GET_TEXTURE_RESINFO: - case AMDGPU::TEX_GET_GRADIENTS_H: - case AMDGPU::TEX_GET_GRADIENTS_V: - case AMDGPU::TEX_SET_GRADIENTS_H: - case AMDGPU::TEX_SET_GRADIENTS_V: - case AMDGPU::TEX_SAMPLE: - case AMDGPU::TEX_SAMPLE_C: - case AMDGPU::TEX_SAMPLE_L: - case AMDGPU::TEX_SAMPLE_C_L: - case AMDGPU::TEX_SAMPLE_LB: - case AMDGPU::TEX_SAMPLE_C_LB: - case AMDGPU::TEX_SAMPLE_G: - case AMDGPU::TEX_SAMPLE_C_G: - case AMDGPU::TXD: - case AMDGPU::TXD_SHADOW: - return IDFetch; default: - DEBUG( - dbgs() << "other inst: "; - SU->dump(DAG); - ); return IDOther; } } -SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) { +SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) { if (Q.empty()) return NULL; - for (std::set<SUnit *, CompareSUnit>::iterator It = Q.begin(), E = Q.end(); + for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend(); It != E; ++It) { SUnit *SU = *It; InstructionsGroupCandidate.push_back(SU->getInstr()); - if (TII->canBundle(InstructionsGroupCandidate)) { + if (TII->fitsConstReadLimitations(InstructionsGroupCandidate) + && (!AnyALU || !TII->isVectorOnly(SU->getInstr())) + ) { InstructionsGroupCandidate.pop_back(); - Q.erase(It); + Q.erase((It + 1).base()); return SU; } else { InstructionsGroupCandidate.pop_back(); @@ -306,33 +335,37 @@ SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) { } void R600SchedStrategy::LoadAlu() { - ReadyQueue *QSrc = Pending[IDAlu]; - for (ReadyQueue::iterator I = QSrc->begin(), - E = QSrc->end(); I != E; ++I) { - (*I)->NodeQueueId &= ~QSrc->getID(); - AluKind AK = getAluKind(*I); - AvailableAlus[AK].insert(*I); - } - QSrc->clear(); + std::vector<SUnit *> &QSrc = Pending[IDAlu]; + for (unsigned i = 0, e = QSrc.size(); i < e; ++i) { + AluKind AK = getAluKind(QSrc[i]); + AvailableAlus[AK].push_back(QSrc[i]); + } + QSrc.clear(); } void R600SchedStrategy::PrepareNextSlot() { DEBUG(dbgs() << "New Slot\n"); assert (OccupedSlotsMask && "Slot wasn't filled"); OccupedSlotsMask = 0; +// if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS) +// OccupedSlotsMask |= 16; InstructionsGroupCandidate.clear(); LoadAlu(); } void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { - unsigned DestReg = MI->getOperand(0).getReg(); + int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); + if (DstIndex == -1) { + return; + } + unsigned DestReg = MI->getOperand(DstIndex).getReg(); // PressureRegister crashes if an operand is def and used in the same inst // and we try to constraint its regclass for (MachineInstr::mop_iterator It = MI->operands_begin(), E = MI->operands_end(); It != E; ++It) { MachineOperand &MO = *It; if (MO.isReg() && !MO.isDef() && - MO.getReg() == MI->getOperand(0).getReg()) + MO.getReg() == DestReg) return; } // Constrains the regclass of DestReg to assign it to Slot @@ -352,53 +385,60 @@ void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { } } -SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) { +SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) { static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W}; - SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]); - SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]); - if (!UnslotedSU) { + SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu); + if (SlotedSU) return SlotedSU; - } else if (!SlotedSU) { + SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu); + if (UnslotedSU) AssignSlot(UnslotedSU->getInstr(), Slot); - return UnslotedSU; - } else { - //Determine which one to pick (the lesser one) - if (CompareSUnit()(SlotedSU, UnslotedSU)) { - AvailableAlus[AluAny].insert(UnslotedSU); - return SlotedSU; - } else { - AvailableAlus[IndexToID[Slot]].insert(SlotedSU); - AssignSlot(UnslotedSU->getInstr(), Slot); - return UnslotedSU; - } - } + return UnslotedSU; } -bool R600SchedStrategy::isAvailablesAluEmpty() const { - return Pending[IDAlu]->empty() && AvailableAlus[AluAny].empty() && - AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() && - AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() && - AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty(); +unsigned R600SchedStrategy::AvailablesAluCount() const { + return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() + + AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() + + AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() + + AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() + + AvailableAlus[AluPredX].size(); } SUnit* R600SchedStrategy::pickAlu() { - while (!isAvailablesAluEmpty()) { + while (AvailablesAluCount() || !Pending[IDAlu].empty()) { if (!OccupedSlotsMask) { + // Bottom up scheduling : predX must comes first + if (!AvailableAlus[AluPredX].empty()) { + OccupedSlotsMask |= 31; + return PopInst(AvailableAlus[AluPredX], false); + } // Flush physical reg copies (RA will discard them) if (!AvailableAlus[AluDiscarded].empty()) { - OccupedSlotsMask = 15; - return PopInst(AvailableAlus[AluDiscarded]); + OccupedSlotsMask |= 31; + return PopInst(AvailableAlus[AluDiscarded], false); } // If there is a T_XYZW alu available, use it if (!AvailableAlus[AluT_XYZW].empty()) { - OccupedSlotsMask = 15; - return PopInst(AvailableAlus[AluT_XYZW]); + OccupedSlotsMask |= 15; + return PopInst(AvailableAlus[AluT_XYZW], false); + } + } + bool TransSlotOccuped = OccupedSlotsMask & 16; + if (!TransSlotOccuped && VLIW5) { + if (!AvailableAlus[AluTrans].empty()) { + OccupedSlotsMask |= 16; + return PopInst(AvailableAlus[AluTrans], false); + } + SUnit *SU = AttemptFillSlot(3, true); + if (SU) { + OccupedSlotsMask |= 16; + return SU; } } - for (unsigned Chan = 0; Chan < 4; ++Chan) { + for (int Chan = 3; Chan > -1; --Chan) { bool isOccupied = OccupedSlotsMask & (1 << Chan); if (!isOccupied) { - SUnit *SU = AttemptFillSlot(Chan); + SUnit *SU = AttemptFillSlot(Chan, false); if (SU) { OccupedSlotsMask |= (1 << Chan); InstructionsGroupCandidate.push_back(SU->getInstr()); @@ -413,14 +453,14 @@ SUnit* R600SchedStrategy::pickAlu() { SUnit* R600SchedStrategy::pickOther(int QID) { SUnit *SU = 0; - ReadyQueue *AQ = Available[QID]; + std::vector<SUnit *> &AQ = Available[QID]; - if (AQ->empty()) { + if (AQ.empty()) { MoveUnits(Pending[QID], AQ); } - if (!AQ->empty()) { - SU = *AQ->begin(); - AQ->remove(AQ->begin()); + if (!AQ.empty()) { + SU = AQ.back(); + AQ.resize(AQ.size() - 1); } return SU; } |