aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNavdeep Parhar <np@FreeBSD.org>2017-12-22 19:10:19 +0000
committerNavdeep Parhar <np@FreeBSD.org>2017-12-22 19:10:19 +0000
commitf549e3521d7824a65d23bf4f34e68c6d3392531f (patch)
treef66823ad4fc5b5b5c6b6ebd383806d55c59dca29
parentd2064cf0303dbd5585de1a36ae54504a13349e65 (diff)
downloadsrc-f549e3521d7824a65d23bf4f34e68c6d3392531f.tar.gz
src-f549e3521d7824a65d23bf4f34e68c6d3392531f.zip
cxgbe(4): Do not forward interrupts to queues with freelists. This
leaves the firmware event queue (fwq) as the only queue that can take interrupts for others. This simplifies cfg_itype_and_nqueues and queue allocation in the driver at the cost of a little (never?) used configuration. It also allows service_iq to be split into two specialized variants in the future. MFC after: 2 months Sponsored by: Chelsio Communications
Notes
Notes: svn path=/head/; revision=327093
-rw-r--r--sys/dev/cxgbe/adapter.h12
-rw-r--r--sys/dev/cxgbe/t4_main.c279
-rw-r--r--sys/dev/cxgbe/t4_netmap.c14
-rw-r--r--sys/dev/cxgbe/t4_sge.c215
-rw-r--r--sys/dev/cxgbe/t4_vf.c5
5 files changed, 204 insertions, 321 deletions
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 3b1d14b70a21..05d22253c181 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -169,9 +169,6 @@ enum {
DOOMED = (1 << 0),
VI_INIT_DONE = (1 << 1),
VI_SYSCTL_CTX = (1 << 2),
- INTR_RXQ = (1 << 4), /* All NIC rxq's take interrupts */
- INTR_OFLD_RXQ = (1 << 5), /* All TOE rxq's take interrupts */
- INTR_ALL = (INTR_RXQ | INTR_OFLD_RXQ),
/* adapter debug_flags */
DF_DUMP_MBOX = (1 << 0), /* Log all mbox cmd/rpl. */
@@ -349,7 +346,7 @@ enum {
/* iq flags */
IQ_ALLOCATED = (1 << 0), /* firmware resources allocated */
IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */
- IQ_INTR = (1 << 2), /* iq takes direct interrupt */
+ /* 1 << 2 Used to be IQ_INTR */
IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */
IQ_ADJ_CREDIT = (1 << 4), /* hw is off by 1 credit for this iq */
@@ -956,6 +953,13 @@ struct adapter {
/* One for firmware events */
#define T4VF_EXTRA_INTR 1
+static inline int
+forwarding_intr_to_fwq(struct adapter *sc)
+{
+
+ return (sc->intr_count == 1);
+}
+
static inline uint32_t
t4_read_reg(struct adapter *sc, uint32_t reg)
{
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 467421a4afa3..cf9531c6d7c9 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -470,7 +470,6 @@ struct intrs_and_queues {
uint16_t intr_type; /* INTx, MSI, or MSI-X */
uint16_t num_vis; /* number of VIs for each port */
uint16_t nirq; /* Total # of vectors */
- uint16_t intr_flags; /* Interrupt flags for each port */
uint16_t ntxq; /* # of NIC txq's for each port */
uint16_t nrxq; /* # of NIC rxq's for each port */
uint16_t nofldtxq; /* # of TOE txq's for each port */
@@ -1118,7 +1117,6 @@ t4_attach(device_t dev)
vi->first_txq = tqidx;
vi->tmr_idx = t4_tmr_idx;
vi->pktc_idx = t4_pktc_idx;
- vi->flags |= iaq.intr_flags & INTR_RXQ;
vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
@@ -1135,7 +1133,6 @@ t4_attach(device_t dev)
vi->ofld_pktc_idx = t4_pktc_idx_ofld;
vi->first_ofld_rxq = ofld_rqidx;
vi->first_ofld_txq = ofld_tqidx;
- vi->flags |= iaq.intr_flags & INTR_OFLD_RXQ;
vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
@@ -2648,26 +2645,43 @@ fixup_devlog_params(struct adapter *sc)
return (rc);
}
-static int
-cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
+static void
+update_nirq(struct intrs_and_queues *iaq, int nports)
{
- int rc, itype, navail, nrxq, nports, n;
- int nofldrxq = 0;
+ int extra = T4_EXTRA_INTR;
+
+ iaq->nirq = extra;
+ iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq);
+ iaq->nirq += nports * (iaq->num_vis - 1) *
+ max(iaq->nrxq_vi, iaq->nnmrxq_vi);
+ iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
+}
+
+/*
+ * Adjust requirements to fit the number of interrupts available.
+ */
+static void
+calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
+ int navail)
+{
+ int old_nirq;
+ const int nports = sc->params.nports;
- nports = sc->params.nports;
MPASS(nports > 0);
+ MPASS(navail > 0);
bzero(iaq, sizeof(*iaq));
+ iaq->intr_type = itype;
iaq->num_vis = t4_num_vis;
iaq->ntxq = t4_ntxq;
iaq->ntxq_vi = t4_ntxq_vi;
- iaq->nrxq = nrxq = t4_nrxq;
+ iaq->nrxq = t4_nrxq;
iaq->nrxq_vi = t4_nrxq_vi;
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
iaq->nofldtxq = t4_nofldtxq;
iaq->nofldtxq_vi = t4_nofldtxq_vi;
- iaq->nofldrxq = nofldrxq = t4_nofldrxq;
+ iaq->nofldrxq = t4_nofldrxq;
iaq->nofldrxq_vi = t4_nofldrxq_vi;
}
#endif
@@ -2676,6 +2690,105 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
iaq->nnmrxq_vi = t4_nnmrxq_vi;
#endif
+ update_nirq(iaq, nports);
+ if (iaq->nirq <= navail &&
+ (itype != INTR_MSI || powerof2(iaq->nirq))) {
+ /*
+ * This is the normal case -- there are enough interrupts for
+ * everything.
+ */
+ goto done;
+ }
+
+ /*
+ * If extra VIs have been configured try reducing their count and see if
+ * that works.
+ */
+ while (iaq->num_vis > 1) {
+ iaq->num_vis--;
+ update_nirq(iaq, nports);
+ if (iaq->nirq <= navail &&
+ (itype != INTR_MSI || powerof2(iaq->nirq))) {
+ device_printf(sc->dev, "virtual interfaces per port "
+ "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
+ "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
+ "itype %d, navail %u, nirq %d.\n",
+ iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
+ iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
+ itype, navail, iaq->nirq);
+ goto done;
+ }
+ }
+
+ /*
+ * Extra VIs will not be created. Log a message if they were requested.
+ */
+ MPASS(iaq->num_vis == 1);
+ iaq->ntxq_vi = iaq->nrxq_vi = 0;
+ iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
+ iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
+ if (iaq->num_vis != t4_num_vis) {
+ device_printf(sc->dev, "extra virtual interfaces disabled. "
+ "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
+ "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
+ iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
+ iaq->nnmrxq_vi, itype, navail, iaq->nirq);
+ }
+
+ /*
+ * Keep reducing the number of NIC rx queues to the next lower power of
+ * 2 (for even RSS distribution) and halving the TOE rx queues and see
+ * if that works.
+ */
+ do {
+ if (iaq->nrxq > 1) {
+ do {
+ iaq->nrxq--;
+ } while (!powerof2(iaq->nrxq));
+ }
+ if (iaq->nofldrxq > 1)
+ iaq->nofldrxq >>= 1;
+
+ old_nirq = iaq->nirq;
+ update_nirq(iaq, nports);
+ if (iaq->nirq <= navail &&
+ (itype != INTR_MSI || powerof2(iaq->nirq))) {
+ device_printf(sc->dev, "running with reduced number of "
+ "rx queues because of shortage of interrupts. "
+ "nrxq=%u, nofldrxq=%u. "
+ "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
+ iaq->nofldrxq, itype, navail, iaq->nirq);
+ goto done;
+ }
+ } while (old_nirq != iaq->nirq);
+
+ /* One interrupt for everything. Ugh. */
+ device_printf(sc->dev, "running with minimal number of queues. "
+ "itype %d, navail %u.\n", itype, navail);
+ iaq->nirq = 1;
+ MPASS(iaq->nrxq == 1);
+ iaq->ntxq = 1;
+ if (iaq->nofldrxq > 1)
+ iaq->nofldtxq = 1;
+done:
+ MPASS(iaq->num_vis > 0);
+ if (iaq->num_vis > 1) {
+ MPASS(iaq->nrxq_vi > 0);
+ MPASS(iaq->ntxq_vi > 0);
+ }
+ MPASS(iaq->nirq > 0);
+ MPASS(iaq->nrxq > 0);
+ MPASS(iaq->ntxq > 0);
+ if (itype == INTR_MSI) {
+ MPASS(powerof2(iaq->nirq));
+ }
+}
+
+static int
+cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
+{
+ int rc, itype, navail, nalloc;
+
for (itype = INTR_MSIX; itype; itype >>= 1) {
if ((itype & t4_intr_types) == 0)
@@ -2691,126 +2804,33 @@ restart:
if (navail == 0)
continue;
- iaq->intr_type = itype;
- iaq->intr_flags = 0;
-
- /*
- * Best option: an interrupt vector for errors, one for the
- * firmware event queue, and one for every rxq (NIC and TOE) of
- * every VI. The VIs that support netmap use the same
- * interrupts for the NIC rx queues and the netmap rx queues
- * because only one set of queues is active at a time.
- */
- iaq->nirq = T4_EXTRA_INTR;
- iaq->nirq += nports * (nrxq + nofldrxq);
- iaq->nirq += nports * (iaq->num_vis - 1) *
- max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */
- iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
- if (iaq->nirq <= navail &&
- (itype != INTR_MSI || powerof2(iaq->nirq))) {
- iaq->intr_flags = INTR_ALL;
- goto allocate;
- }
-
- /* Disable the VIs (and netmap) if there aren't enough intrs */
- if (iaq->num_vis > 1) {
- device_printf(sc->dev, "virtual interfaces disabled "
- "because num_vis=%u with current settings "
- "(nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
- "nnmrxq_vi=%u) would need %u interrupts but "
- "only %u are available.\n", iaq->num_vis, nrxq,
- nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
- iaq->nnmrxq_vi, iaq->nirq, navail);
- iaq->num_vis = 1;
- iaq->ntxq_vi = iaq->nrxq_vi = 0;
- iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
- iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
- goto restart;
- }
-
- /*
- * Second best option: a vector for errors, one for the firmware
- * event queue, and vectors for either all the NIC rx queues or
- * all the TOE rx queues. The queues that don't get vectors
- * will forward their interrupts to those that do.
- */
- iaq->nirq = T4_EXTRA_INTR;
- if (nrxq >= nofldrxq) {
- iaq->intr_flags = INTR_RXQ;
- iaq->nirq += nports * nrxq;
- } else {
- iaq->intr_flags = INTR_OFLD_RXQ;
- iaq->nirq += nports * nofldrxq;
- }
- if (iaq->nirq <= navail &&
- (itype != INTR_MSI || powerof2(iaq->nirq)))
- goto allocate;
-
- /*
- * Next best option: an interrupt vector for errors, one for the
- * firmware event queue, and at least one per main-VI. At this
- * point we know we'll have to downsize nrxq and/or nofldrxq to
- * fit what's available to us.
- */
- iaq->nirq = T4_EXTRA_INTR;
- iaq->nirq += nports;
- if (iaq->nirq <= navail) {
- int leftover = navail - iaq->nirq;
- int target = max(nrxq, nofldrxq);
-
- iaq->intr_flags = nrxq >= nofldrxq ?
- INTR_RXQ : INTR_OFLD_RXQ;
-
- n = 1;
- while (n < target && leftover >= nports) {
- leftover -= nports;
- iaq->nirq += nports;
- n++;
- }
- iaq->nrxq = min(n, nrxq);
-#ifdef TCP_OFFLOAD
- iaq->nofldrxq = min(n, nofldrxq);
-#endif
-
- if (itype != INTR_MSI || powerof2(iaq->nirq))
- goto allocate;
- }
-
- /*
- * Least desirable option: one interrupt vector for everything.
- */
- iaq->nirq = iaq->nrxq = 1;
- iaq->intr_flags = 0;
-#ifdef TCP_OFFLOAD
- if (is_offload(sc))
- iaq->nofldrxq = 1;
-#endif
-allocate:
- navail = iaq->nirq;
+ calculate_iaq(sc, iaq, itype, navail);
+ nalloc = iaq->nirq;
rc = 0;
if (itype == INTR_MSIX)
- rc = pci_alloc_msix(sc->dev, &navail);
+ rc = pci_alloc_msix(sc->dev, &nalloc);
else if (itype == INTR_MSI)
- rc = pci_alloc_msi(sc->dev, &navail);
+ rc = pci_alloc_msi(sc->dev, &nalloc);
- if (rc == 0) {
- if (navail == iaq->nirq)
+ if (rc == 0 && nalloc > 0) {
+ if (nalloc == iaq->nirq)
return (0);
/*
* Didn't get the number requested. Use whatever number
- * the kernel is willing to allocate (it's in navail).
+ * the kernel is willing to allocate.
*/
device_printf(sc->dev, "fewer vectors than requested, "
"type=%d, req=%d, rcvd=%d; will downshift req.\n",
- itype, iaq->nirq, navail);
+ itype, iaq->nirq, nalloc);
pci_release_msi(sc->dev);
+ navail = nalloc;
goto restart;
}
device_printf(sc->dev,
"failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
- itype, rc, iaq->nirq, navail);
+ itype, rc, iaq->nirq, nalloc);
}
device_printf(sc->dev,
@@ -4352,7 +4372,7 @@ t4_setup_intr_handlers(struct adapter *sc)
*/
irq = &sc->irq[0];
rid = sc->intr_type == INTR_INTX ? 0 : 1;
- if (sc->intr_count == 1)
+ if (forwarding_intr_to_fwq(sc))
return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
/* Multiple interrupts. */
@@ -4387,8 +4407,6 @@ t4_setup_intr_handlers(struct adapter *sc)
if (vi->nnmrxq > 0) {
int n = max(vi->nrxq, vi->nnmrxq);
- MPASS(vi->flags & INTR_RXQ);
-
rxq = &sge->rxq[vi->first_rxq];
#ifdef DEV_NETMAP
nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
@@ -4406,11 +4424,17 @@ t4_setup_intr_handlers(struct adapter *sc)
t4_vi_intr, irq, s);
if (rc != 0)
return (rc);
+#ifdef RSS
+ if (q < vi->nrxq) {
+ bus_bind_intr(sc->dev, irq->res,
+ rss_getcpu(q % nbuckets));
+ }
+#endif
irq++;
rid++;
vi->nintr++;
}
- } else if (vi->flags & INTR_RXQ) {
+ } else {
for_each_rxq(vi, q, rxq) {
snprintf(s, sizeof(s), "%x%c%x", p,
'a' + v, q);
@@ -4428,18 +4452,15 @@ t4_setup_intr_handlers(struct adapter *sc)
}
}
#ifdef TCP_OFFLOAD
- if (vi->flags & INTR_OFLD_RXQ) {
- for_each_ofld_rxq(vi, q, ofld_rxq) {
- snprintf(s, sizeof(s), "%x%c%x", p,
- 'A' + v, q);
- rc = t4_alloc_irq(sc, irq, rid,
- t4_intr, ofld_rxq, s);
- if (rc != 0)
- return (rc);
- irq++;
- rid++;
- vi->nintr++;
- }
+ for_each_ofld_rxq(vi, q, ofld_rxq) {
+ snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
+ rc = t4_alloc_irq(sc, irq, rid, t4_intr,
+ ofld_rxq, s);
+ if (rc != 0)
+ return (rc);
+ irq++;
+ rid++;
+ vi->nintr++;
}
#endif
}
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index 774be6b1fd00..fa3bbb9fc8f6 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -108,16 +108,10 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
V_FW_IQ_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
FW_LEN16(c));
- if (vi->flags & INTR_RXQ) {
- KASSERT(nm_rxq->intr_idx < sc->intr_count,
- ("%s: invalid direct intr_idx %d", __func__,
- nm_rxq->intr_idx));
- v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
- } else {
- CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */
- v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
- F_FW_IQ_CMD_IQANDST;
- }
+ MPASS(!forwarding_intr_to_fwq(sc));
+ KASSERT(nm_rxq->intr_idx < sc->intr_count,
+ ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx));
+ v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
c.type_to_iqandstindex = htobe32(v |
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 0abc2ad678f2..51a53a85d3fd 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -953,70 +953,6 @@ t4_teardown_adapter_queues(struct adapter *sc)
return (0);
}
-static inline int
-first_vector(struct vi_info *vi)
-{
- struct adapter *sc = vi->pi->adapter;
-
- if (sc->intr_count == 1)
- return (0);
-
- return (vi->first_intr);
-}
-
-/*
- * Given an arbitrary "index," come up with an iq that can be used by other
- * queues (of this VI) for interrupt forwarding, SGE egress updates, etc.
- * The iq returned is guaranteed to be something that takes direct interrupts.
- */
-static struct sge_iq *
-vi_intr_iq(struct vi_info *vi, int idx)
-{
- struct adapter *sc = vi->pi->adapter;
- struct sge *s = &sc->sge;
- struct sge_iq *iq = NULL;
- int nintr, i;
-
- if (sc->intr_count == 1)
- return (&sc->sge.fwq);
-
- nintr = vi->nintr;
-#ifdef DEV_NETMAP
- /* Do not consider any netmap-only interrupts */
- if (vi->flags & INTR_RXQ && vi->nnmrxq > vi->nrxq)
- nintr -= vi->nnmrxq - vi->nrxq;
-#endif
- KASSERT(nintr != 0,
- ("%s: vi %p has no exclusive interrupts, total interrupts = %d",
- __func__, vi, sc->intr_count));
- i = idx % nintr;
-
- if (vi->flags & INTR_RXQ) {
- if (i < vi->nrxq) {
- iq = &s->rxq[vi->first_rxq + i].iq;
- goto done;
- }
- i -= vi->nrxq;
- }
-#ifdef TCP_OFFLOAD
- if (vi->flags & INTR_OFLD_RXQ) {
- if (i < vi->nofldrxq) {
- iq = &s->ofld_rxq[vi->first_ofld_rxq + i].iq;
- goto done;
- }
- i -= vi->nofldrxq;
- }
-#endif
- panic("%s: vi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__,
- vi, vi->flags & INTR_ALL, idx, nintr);
-done:
- MPASS(iq != NULL);
- KASSERT(iq->flags & IQ_INTR,
- ("%s: iq %p (vi %p, intr_flags 0x%lx, idx %d)", __func__, iq, vi,
- vi->flags & INTR_ALL, idx));
- return (iq);
-}
-
/* Maximum payload that can be delivered with a single iq descriptor */
static inline int
mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
@@ -1042,7 +978,7 @@ mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
int
t4_setup_vi_queues(struct vi_info *vi)
{
- int rc = 0, i, j, intr_idx, iqid;
+ int rc = 0, i, intr_idx, iqidx;
struct sge_rxq *rxq;
struct sge_txq *txq;
struct sge_wrq *ctrlq;
@@ -1064,14 +1000,14 @@ t4_setup_vi_queues(struct vi_info *vi)
int maxp, mtu = ifp->if_mtu;
/* Interrupt vector to start from (when using multiple vectors) */
- intr_idx = first_vector(vi);
+ intr_idx = vi->first_intr;
#ifdef DEV_NETMAP
saved_idx = intr_idx;
if (ifp->if_capabilities & IFCAP_NETMAP) {
/* netmap is supported with direct interrupts only. */
- MPASS(vi->flags & INTR_RXQ);
+ MPASS(!forwarding_intr_to_fwq(sc));
/*
* We don't have buffers to back the netmap rx queues
@@ -1090,8 +1026,8 @@ t4_setup_vi_queues(struct vi_info *vi)
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
CTLFLAG_RD, NULL, "tx queues");
for_each_nm_txq(vi, i, nm_txq) {
- iqid = vi->first_nm_rxq + (i % vi->nnmrxq);
- rc = alloc_nm_txq(vi, nm_txq, iqid, i, oid);
+ iqidx = vi->first_nm_rxq + (i % vi->nnmrxq);
+ rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid);
if (rc != 0)
goto done;
}
@@ -1102,15 +1038,12 @@ t4_setup_vi_queues(struct vi_info *vi)
#endif
/*
- * First pass over all NIC and TOE rx queues:
- * a) initialize iq and fl
- * b) allocate queue iff it will take direct interrupts.
+ * Allocate rx queues first because a default iqid is required when
+ * creating a tx queue.
*/
maxp = mtu_to_max_payload(sc, mtu, 0);
- if (vi->flags & INTR_RXQ) {
- oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
- CTLFLAG_RD, NULL, "rx queues");
- }
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
+ CTLFLAG_RD, NULL, "rx queues");
for_each_rxq(vi, i, rxq) {
init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq);
@@ -1119,13 +1052,11 @@ t4_setup_vi_queues(struct vi_info *vi)
device_get_nameunit(vi->dev), i);
init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
- if (vi->flags & INTR_RXQ) {
- rxq->iq.flags |= IQ_INTR;
- rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
- if (rc != 0)
- goto done;
- intr_idx++;
- }
+ rc = alloc_rxq(vi, rxq,
+ forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
+ if (rc != 0)
+ goto done;
+ intr_idx++;
}
#ifdef DEV_NETMAP
if (ifp->if_capabilities & IFCAP_NETMAP)
@@ -1133,11 +1064,8 @@ t4_setup_vi_queues(struct vi_info *vi)
#endif
#ifdef TCP_OFFLOAD
maxp = mtu_to_max_payload(sc, mtu, 1);
- if (vi->flags & INTR_OFLD_RXQ) {
- oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
- CTLFLAG_RD, NULL,
- "rx queues for offloaded TCP connections");
- }
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
+ CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections");
for_each_ofld_rxq(vi, i, ofld_rxq) {
init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx,
@@ -1147,70 +1075,29 @@ t4_setup_vi_queues(struct vi_info *vi)
device_get_nameunit(vi->dev), i);
init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
- if (vi->flags & INTR_OFLD_RXQ) {
- ofld_rxq->iq.flags |= IQ_INTR;
- rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
- if (rc != 0)
- goto done;
- intr_idx++;
- }
- }
-#endif
-
- /*
- * Second pass over all NIC and TOE rx queues. The queues forwarding
- * their interrupts are allocated now.
- */
- j = 0;
- if (!(vi->flags & INTR_RXQ)) {
- oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
- CTLFLAG_RD, NULL, "rx queues");
- for_each_rxq(vi, i, rxq) {
- MPASS(!(rxq->iq.flags & IQ_INTR));
-
- intr_idx = vi_intr_iq(vi, j)->abs_id;
-
- rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
- if (rc != 0)
- goto done;
- j++;
- }
- }
-#ifdef TCP_OFFLOAD
- if (vi->nofldrxq != 0 && !(vi->flags & INTR_OFLD_RXQ)) {
- oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
- CTLFLAG_RD, NULL,
- "rx queues for offloaded TCP connections");
- for_each_ofld_rxq(vi, i, ofld_rxq) {
- MPASS(!(ofld_rxq->iq.flags & IQ_INTR));
-
- intr_idx = vi_intr_iq(vi, j)->abs_id;
-
- rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
- if (rc != 0)
- goto done;
- j++;
- }
+ rc = alloc_ofld_rxq(vi, ofld_rxq,
+ forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
+ if (rc != 0)
+ goto done;
+ intr_idx++;
}
#endif
/*
- * Now the tx queues. Only one pass needed.
+ * Now the tx queues.
*/
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
NULL, "tx queues");
- j = 0;
for_each_txq(vi, i, txq) {
- iqid = vi_intr_iq(vi, j)->cntxt_id;
+ iqidx = vi->first_rxq + (i % vi->nrxq);
snprintf(name, sizeof(name), "%s txq%d",
device_get_nameunit(vi->dev), i);
- init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid,
- name);
+ init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan,
+ sc->sge.rxq[iqidx].iq.cntxt_id, name);
rc = alloc_txq(vi, txq, i, oid);
if (rc != 0)
goto done;
- j++;
}
#ifdef TCP_OFFLOAD
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
@@ -1218,11 +1105,11 @@ t4_setup_vi_queues(struct vi_info *vi)
for_each_ofld_txq(vi, i, ofld_txq) {
struct sysctl_oid *oid2;
- iqid = vi_intr_iq(vi, j)->cntxt_id;
+ iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq);
snprintf(name, sizeof(name), "%s ofld_txq%d",
device_get_nameunit(vi->dev), i);
init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
- iqid, name);
+ sc->sge.ofld_rxq[iqidx].iq.cntxt_id, name);
snprintf(name, sizeof(name), "%d", i);
oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
@@ -1231,7 +1118,6 @@ t4_setup_vi_queues(struct vi_info *vi)
rc = alloc_wrq(sc, vi, ofld_txq, oid2);
if (rc != 0)
goto done;
- j++;
}
#endif
@@ -1243,10 +1129,9 @@ t4_setup_vi_queues(struct vi_info *vi)
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
NULL, "ctrl queue");
ctrlq = &sc->sge.ctrlq[pi->port_id];
- iqid = vi_intr_iq(vi, 0)->cntxt_id;
snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
- init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid,
- name);
+ init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan,
+ sc->sge.rxq[vi->first_rxq].iq.cntxt_id, name);
rc = alloc_wrq(sc, vi, ctrlq, oid);
done:
@@ -1312,33 +1197,15 @@ t4_teardown_vi_queues(struct vi_info *vi)
#endif
/*
- * Then take down the rx queues that forward their interrupts, as they
- * reference other rx queues.
+ * Then take down the rx queues.
*/
for_each_rxq(vi, i, rxq) {
- if ((rxq->iq.flags & IQ_INTR) == 0)
- free_rxq(vi, rxq);
+ free_rxq(vi, rxq);
}
#ifdef TCP_OFFLOAD
for_each_ofld_rxq(vi, i, ofld_rxq) {
- if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
- free_ofld_rxq(vi, ofld_rxq);
- }
-#endif
-
- /*
- * Then take down the rx queues that take direct interrupts.
- */
-
- for_each_rxq(vi, i, rxq) {
- if (rxq->iq.flags & IQ_INTR)
- free_rxq(vi, rxq);
- }
-#ifdef TCP_OFFLOAD
- for_each_ofld_rxq(vi, i, ofld_rxq) {
- if (ofld_rxq->iq.flags & IQ_INTR)
- free_ofld_rxq(vi, ofld_rxq);
+ free_ofld_rxq(vi, ofld_rxq);
}
#endif
@@ -2715,9 +2582,9 @@ free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
* Returns errno on failure. Resources allocated up to that point may still be
* allocated. Caller is responsible for cleanup in case this function fails.
*
- * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
- * the intr_idx specifies the vector, starting from 0. Otherwise it specifies
- * the abs_id of the ingress queue to which its interrupts should be forwarded.
+ * If the ingress queue will take interrupts directly then the intr_idx
+ * specifies the vector, starting from 0. -1 means the interrupts for this
+ * queue should be forwarded to the fwq.
*/
static int
alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
@@ -2749,12 +2616,15 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
if (iq == &sc->sge.fwq)
v |= F_FW_IQ_CMD_IQASYNCH;
- if (iq->flags & IQ_INTR) {
+ if (intr_idx < 0) {
+ /* Forwarded interrupts, all headed to fwq */
+ v |= F_FW_IQ_CMD_IQANDST;
+ v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id);
+ } else {
KASSERT(intr_idx < sc->intr_count,
("%s: invalid direct intr_idx %d", __func__, intr_idx));
- } else
- v |= F_FW_IQ_CMD_IQANDST;
- v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
+ v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
+ }
c.type_to_iqandstindex = htobe32(v |
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
@@ -3004,7 +2874,6 @@ alloc_fwq(struct adapter *sc)
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
- fwq->flags |= IQ_INTR; /* always */
if (sc->flags & IS_VF)
intr_idx = 0;
else {
diff --git a/sys/dev/cxgbe/t4_vf.c b/sys/dev/cxgbe/t4_vf.c
index e18bcf04d092..8eb664dcae0c 100644
--- a/sys/dev/cxgbe/t4_vf.c
+++ b/sys/dev/cxgbe/t4_vf.c
@@ -62,7 +62,6 @@ __FBSDID("$FreeBSD$");
struct intrs_and_queues {
uint16_t intr_type; /* MSI, or MSI-X */
uint16_t nirq; /* Total # of vectors */
- uint16_t intr_flags; /* Interrupt flags for each port */
uint16_t ntxq; /* # of NIC txq's for each port */
uint16_t nrxq; /* # of NIC rxq's for each port */
};
@@ -330,7 +329,6 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
continue;
iaq->intr_type = itype;
- iaq->intr_flags = 0;
/*
* XXX: The Linux driver reserves an Ingress Queue for
@@ -438,7 +436,6 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
return (rc);
}
if (navail == iaq->nirq) {
- iaq->intr_flags = INTR_RXQ;
return (0);
}
pci_release_msi(sc->dev);
@@ -455,7 +452,6 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
device_printf(sc->dev,
"failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
itype, rc, iaq->nirq, navail);
- iaq->intr_flags = 0;
return (rc);
}
@@ -702,7 +698,6 @@ t4vf_attach(device_t dev)
vi->first_txq = tqidx;
vi->tmr_idx = t4_tmr_idx;
vi->pktc_idx = t4_pktc_idx;
- vi->flags |= iaq.intr_flags & INTR_RXQ;
vi->nrxq = j == 0 ? iaq.nrxq: 1;
vi->ntxq = j == 0 ? iaq.ntxq: 1;