aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/dev/bnxt/bnxt_txrx.c89
-rw-r--r--sys/dev/bnxt/if_bnxt.c7
-rw-r--r--sys/dev/e1000/em_txrx.c456
-rw-r--r--sys/dev/e1000/if_em.c1371
-rw-r--r--sys/dev/e1000/if_em.h17
-rw-r--r--sys/dev/e1000/igb_txrx.c442
-rw-r--r--sys/net/ifdi_if.m11
-rw-r--r--sys/net/iflib.c1133
-rw-r--r--sys/net/iflib.h114
9 files changed, 2047 insertions, 1593 deletions
diff --git a/sys/dev/bnxt/bnxt_txrx.c b/sys/dev/bnxt/bnxt_txrx.c
index 1729ba3a93ef..94673fc5b877 100644
--- a/sys/dev/bnxt/bnxt_txrx.c
+++ b/sys/dev/bnxt/bnxt_txrx.c
@@ -48,17 +48,19 @@ __FBSDID("$FreeBSD$");
*/
static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
-static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx);
-static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, uint32_t cidx,
- bool clear);
+static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
+static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
-static void bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid,
+static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
+
+/* uint16_t rxqid, uint8_t flid,
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
uint16_t buf_size);
+*/
static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
- uint32_t pidx);
-static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint32_t idx,
- int budget);
+ qidx_t pidx);
+static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
+ qidx_t budget);
static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
static int bnxt_intr(void *sc);
@@ -172,7 +174,7 @@ bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
}
static void
-bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx)
+bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
{
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
@@ -185,7 +187,7 @@ bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx)
}
static int
-bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, uint32_t idx, bool clear)
+bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
{
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
@@ -249,15 +251,27 @@ done:
}
static void
-bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid,
- uint32_t pidx, uint64_t *paddrs,
- caddr_t *vaddrs, uint16_t count, uint16_t len)
+bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
{
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
struct bnxt_ring *rx_ring;
struct rx_prod_pkt_bd *rxbd;
uint16_t type;
uint16_t i;
+ uint16_t rxqid;
+ uint16_t count, len;
+ uint32_t pidx;
+ uint8_t flid;
+ uint64_t *paddrs;
+ caddr_t *vaddrs;
+
+ rxqid = iru->iru_qsidx;
+ count = iru->iru_count;
+ len = iru->iru_buf_size;
+ pidx = iru->iru_pidx;
+ flid = iru->iru_flidx;
+ vaddrs = iru->iru_vaddrs;
+ paddrs = iru->iru_paddrs;
if (flid == 0) {
rx_ring = &softc->rx_rings[rxqid];
@@ -284,7 +298,7 @@ bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid,
static void
bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
- uint32_t pidx)
+ qidx_t pidx)
{
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
struct bnxt_ring *rx_ring;
@@ -310,7 +324,7 @@ bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
}
static int
-bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint32_t idx, int budget)
+bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
{
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
@@ -412,37 +426,6 @@ cmpl_invalid:
return avail;
}
-static void
-bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
-{
- uint8_t rss_profile_id;
-
- rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
- switch (rss_profile_id) {
- case BNXT_RSS_HASH_TYPE_TCPV4:
- ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
- break;
- case BNXT_RSS_HASH_TYPE_UDPV4:
- ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
- break;
- case BNXT_RSS_HASH_TYPE_IPV4:
- ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
- break;
- case BNXT_RSS_HASH_TYPE_TCPV6:
- ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
- break;
- case BNXT_RSS_HASH_TYPE_UDPV6:
- ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
- break;
- case BNXT_RSS_HASH_TYPE_IPV6:
- ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
- break;
- default:
- ri->iri_rsstype = M_HASHTYPE_OPAQUE;
- break;
- }
-}
-
static int
bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
struct bnxt_cp_ring *cpr, uint16_t flags_type)
@@ -460,7 +443,13 @@ bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
/* Extract from the first 16-byte BD */
if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
ri->iri_flowid = le32toh(rcp->rss_hash);
- bnxt_set_rsstype(ri, rcp->rss_hash_type);
+ /*
+ * TODO: Extract something useful from rcp->rss_hash_type
+ * (undocumented)
+ * May be documented in the "LSI ES"
+ * also check the firmware code.
+ */
+ ri->iri_rsstype = M_HASHTYPE_OPAQUE;
}
else {
ri->iri_rsstype = M_HASHTYPE_NONE;
@@ -540,7 +529,13 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
/* Extract from the first 16-byte BD */
if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
ri->iri_flowid = le32toh(tpas->low.rss_hash);
- bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
+ /*
+ * TODO: Extract something useful from tpas->low.rss_hash_type
+ * (undocumented)
+ * May be documented in the "LSI ES"
+ * also check the firmware code.
+ */
+ ri->iri_rsstype = M_HASHTYPE_OPAQUE;
}
else {
ri->iri_rsstype = M_HASHTYPE_NONE;
diff --git a/sys/dev/bnxt/if_bnxt.c b/sys/dev/bnxt/if_bnxt.c
index fcd7fee6b450..d10d76625889 100644
--- a/sys/dev/bnxt/if_bnxt.c
+++ b/sys/dev/bnxt/if_bnxt.c
@@ -253,7 +253,8 @@ static device_method_t bnxt_iflib_methods[] = {
DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
- DEVMETHOD(ifdi_queue_intr_enable, bnxt_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_queue_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_queue_intr_enable),
DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
@@ -711,6 +712,8 @@ bnxt_attach_pre(if_ctx_t ctx)
scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
scctx->isc_vectors = softc->func.max_cp_rings;
+ scctx->isc_txrx = &bnxt_txrx;
+
if (scctx->isc_nrxd[0] <
((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
device_printf(softc->dev,
@@ -1479,7 +1482,7 @@ bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
rc = iflib_irq_alloc_generic(ctx, &softc->rx_cp_rings[i].irq,
- softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RX,
+ softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RXTX,
bnxt_handle_rx_cp, &softc->rx_cp_rings[i], i, "rx_cp");
if (rc) {
device_printf(iflib_get_dev(ctx),
diff --git a/sys/dev/e1000/em_txrx.c b/sys/dev/e1000/em_txrx.c
index 47ca3ed3efff..22e983b370a0 100644
--- a/sys/dev/e1000/em_txrx.c
+++ b/sys/dev/e1000/em_txrx.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
+ * Copyright (c) 2016-2017 Matt Macy <mmacy@nextbsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -27,7 +27,7 @@
/* $FreeBSD$ */
#include "if_em.h"
-#ifdef RSS
+#ifdef RSS
#include <net/rss_config.h>
#include <netinet/in_rss.h>
#endif
@@ -41,23 +41,24 @@
/*********************************************************************
* Local Function prototypes
*********************************************************************/
-static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
-static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
+static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper,
+ u32 *txd_lower);
+static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi,
+ u32 *txd_upper, u32 *txd_lower);
static int em_isc_txd_encap(void *arg, if_pkt_info_t pi);
-static void em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
-static int em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear);
-static void em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
- uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
-static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx);
-static int em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
- int budget);
+static void em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
+static int em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
+static void em_isc_rxd_refill(void *arg, if_rxd_update_t iru);
+static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ qidx_t pidx);
+static int em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
+ qidx_t budget);
static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
-static void lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
- uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
+static void lem_isc_rxd_refill(void *arg, if_rxd_update_t iru);
-static int lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
- int budget);
+static int lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
+ qidx_t budget);
static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri);
@@ -65,7 +66,7 @@ static void em_receive_checksum(uint32_t status, if_rxd_info_t ri);
static int em_determine_rsstype(u32 pkt_info);
extern int em_intr(void *arg);
-struct if_txrx em_txrx = {
+struct if_txrx em_txrx = {
em_isc_txd_encap,
em_isc_txd_flush,
em_isc_txd_credits_update,
@@ -76,7 +77,7 @@ struct if_txrx em_txrx = {
em_intr
};
-struct if_txrx lem_txrx = {
+struct if_txrx lem_txrx = {
em_isc_txd_encap,
em_isc_txd_flush,
em_isc_txd_credits_update,
@@ -87,7 +88,42 @@ struct if_txrx lem_txrx = {
em_intr
};
-extern if_shared_ctx_t em_sctx;
+extern if_shared_ctx_t em_sctx;
+
+void
+em_dump_rs(struct adapter *adapter)
+{
+ if_softc_ctx_t scctx = adapter->shared;
+ struct em_tx_queue *que;
+ struct tx_ring *txr;
+ qidx_t i, ntxd, qid, cur;
+ int16_t rs_cidx;
+ uint8_t status;
+
+ printf("\n");
+ ntxd = scctx->isc_ntxd[0];
+ for (qid = 0; qid < adapter->tx_num_queues; qid++) {
+ que = &adapter->tx_queues[qid];
+ txr = &que->txr;
+ rs_cidx = txr->tx_rs_cidx;
+ if (rs_cidx != txr->tx_rs_pidx) {
+ cur = txr->tx_rsq[rs_cidx];
+ status = txr->tx_base[cur].upper.fields.status;
+ if (!(status & E1000_TXD_STAT_DD))
+ printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur);
+ } else {
+ rs_cidx = (rs_cidx-1)&(ntxd-1);
+ cur = txr->tx_rsq[rs_cidx];
+ printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur);
+ }
+ printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, txr->tx_rs_pidx);
+ for (i = 0; i < ntxd; i++) {
+ if (txr->tx_base[i].upper.fields.status & E1000_TXD_STAT_DD)
+ printf("%d set ", i);
+ }
+ printf("\n");
+ }
+}
/**********************************************************************
*
@@ -99,14 +135,13 @@ static int
em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
{
if_softc_ctx_t scctx = adapter->shared;
- struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
- struct tx_ring *txr = &que->txr;
+ struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
struct e1000_context_desc *TXD;
- struct em_txbuffer *tx_buffer;
- int cur, hdr_len;
+ int cur, hdr_len;
hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
- *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
+ *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
E1000_TXD_DTYP_D | /* Data descr type */
E1000_TXD_CMD_TSE); /* Do TSE on this packet */
@@ -114,10 +149,9 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
*txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
cur = pi->ipi_pidx;
- TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
- tx_buffer = &txr->tx_buffers[cur];
-
- /*
+ TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
+
+ /*
* Start offset for header checksum calculation.
* End offset for header checksum calculation.
* Offset of place put the checksum.
@@ -127,7 +161,7 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1);
TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
- /*
+ /*
* Start offset for payload checksum calculation.
* End offset for payload checksum calculation.
* Offset of place to put the checksum.
@@ -136,8 +170,8 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
TXD->upper_setup.tcp_fields.tucse = 0;
TXD->upper_setup.tcp_fields.tucso =
pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum);
-
- /*
+
+ /*
* Payload size per packet w/o any headers.
* Length of all headers up to payload.
*/
@@ -150,7 +184,6 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
E1000_TXD_CMD_IP | /* Do IP csum */
E1000_TXD_CMD_TCP | /* Do TCP checksum */
(pi->ipi_len - hdr_len)); /* Total len */
- tx_buffer->eop = -1;
txr->tx_tso = TRUE;
if (++cur == scctx->isc_ntxd[0]) {
@@ -178,21 +211,20 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
* until the previous request completes. This means setting up
* a new context effectively disables pipelined Tx data DMA which
* in turn greatly slow down performance to send small sized
- * frames.
+ * frames.
**********************************************************************/
static int
em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
{
- struct e1000_context_desc *TXD = NULL;
- if_softc_ctx_t scctx = adapter->shared;
- struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
- struct tx_ring *txr = &que->txr;
- struct em_txbuffer *tx_buffer;
- int csum_flags = pi->ipi_csum_flags;
- int cur, hdr_len;
- u32 cmd;
-
+ struct e1000_context_desc *TXD = NULL;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ int csum_flags = pi->ipi_csum_flags;
+ int cur, hdr_len;
+ u32 cmd;
+
cur = pi->ipi_pidx;
hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
cmd = adapter->txd_cmd;
@@ -220,7 +252,7 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
if (csum_flags & CSUM_IP) {
- *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
/*
* Start offset for header checksum calculation.
* End offset for header checksum calculation.
@@ -235,7 +267,7 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
if (csum_flags & (CSUM_TCP|CSUM_UDP)) {
uint8_t tucso;
- *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
if (csum_flags & CSUM_TCP) {
@@ -243,9 +275,9 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
cmd |= E1000_TXD_CMD_TCP;
} else
tucso = hdr_len + offsetof(struct udphdr, uh_sum);
- TXD->upper_setup.tcp_fields.tucss = hdr_len;
- TXD->upper_setup.tcp_fields.tucse = htole16(0);
- TXD->upper_setup.tcp_fields.tucso = tucso;
+ TXD->upper_setup.tcp_fields.tucss = hdr_len;
+ TXD->upper_setup.tcp_fields.tucse = htole16(0);
+ TXD->upper_setup.tcp_fields.tucso = tucso;
}
txr->csum_lhlen = pi->ipi_ehdrlen;
@@ -258,9 +290,6 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
TXD->cmd_and_length =
htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
- tx_buffer = &txr->tx_buffers[cur];
- tx_buffer->eop = -1;
-
if (++cur == scctx->isc_ntxd[0]) {
cur = 0;
}
@@ -272,24 +301,26 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
static int
em_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
- struct adapter *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
- struct tx_ring *txr = &que->txr;
- bus_dma_segment_t *segs = pi->ipi_segs;
- int nsegs = pi->ipi_nsegs;
- int csum_flags = pi->ipi_csum_flags;
- int i, j, first, pidx_last;
- u32 txd_upper = 0, txd_lower = 0;
-
- struct em_txbuffer *tx_buffer;
+ struct adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ int nsegs = pi->ipi_nsegs;
+ int csum_flags = pi->ipi_csum_flags;
+ int i, j, first, pidx_last;
+ u32 txd_flags, txd_upper = 0, txd_lower = 0;
+
struct e1000_tx_desc *ctxd = NULL;
- bool do_tso, tso_desc;
-
- i = first = pi->ipi_pidx;
+ bool do_tso, tso_desc;
+ qidx_t ntxd;
+
+ txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_TXD_CMD_RS : 0;
+ i = first = pi->ipi_pidx;
do_tso = (csum_flags & CSUM_TSO);
tso_desc = FALSE;
- /*
+ ntxd = scctx->isc_ntxd[0];
+ /*
* TSO Hardware workaround, if this packet is not
* TSO, and is only a single descriptor long, and
* it follows a TSO burst, then we need to add a
@@ -310,10 +341,10 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
}
if (pi->ipi_mflags & M_VLANTAG) {
- /* Set the vlan id. */
+ /* Set the vlan id. */
txd_upper |= htole16(pi->ipi_vtag) << 16;
- /* Tell hardware to add tag */
- txd_lower |= htole32(E1000_TXD_CMD_VLE);
+ /* Tell hardware to add tag */
+ txd_lower |= htole32(E1000_TXD_CMD_VLE);
}
DPRINTF(iflib_get_dev(sc->ctx), "encap: set up tx: nsegs=%d first=%d i=%d\n", nsegs, first, i);
@@ -326,28 +357,26 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
uint32_t cmd;
ctxd = &txr->tx_base[i];
- tx_buffer = &txr->tx_buffers[i];
seg_addr = segs[j].ds_addr;
seg_len = segs[j].ds_len;
cmd = E1000_TXD_CMD_IFCS | sc->txd_cmd;
/*
- ** TSO Workaround:
- ** If this is the last descriptor, we want to
- ** split it so we have a small final sentinel
- */
+ * TSO Workaround:
+ * If this is the last descriptor, we want to
+ * split it so we have a small final sentinel
+ */
if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) {
seg_len -= TSO_WORKAROUND;
ctxd->buffer_addr = htole64(seg_addr);
ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
ctxd->upper.data = htole32(txd_upper);
- if (++i == scctx->isc_ntxd[0])
+ if (++i == scctx->isc_ntxd[0])
i = 0;
/* Now make the sentinel */
ctxd = &txr->tx_base[i];
- tx_buffer = &txr->tx_buffers[i];
ctxd->buffer_addr = htole64(seg_addr + seg_len);
ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND);
ctxd->upper.data = htole32(txd_upper);
@@ -364,27 +393,28 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
i = 0;
DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
}
- tx_buffer->eop = -1;
}
/*
- * Last Descriptor of Packet
+ * Last Descriptor of Packet
* needs End Of Packet (EOP)
* and Report Status (RS)
- */
- ctxd->lower.data |=
- htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
-
- tx_buffer = &txr->tx_buffers[first];
- tx_buffer->eop = pidx_last;
+ */
+ if (txd_flags) {
+ txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
+ DPRINTF(iflib_get_dev(sc->ctx), "setting to RS on %d rs_pidx %d first: %d\n", pidx_last, txr->tx_rs_pidx, first);
+ txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
+ MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
+ }
+ ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | txd_flags);
DPRINTF(iflib_get_dev(sc->ctx), "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i);
pi->ipi_new_pidx = i;
- return (0);
+ return (0);
}
static void
-em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
+em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
struct adapter *adapter = arg;
struct em_tx_queue *que = &adapter->tx_queues[txqid];
@@ -394,86 +424,72 @@ em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
}
static int
-em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear)
+em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
struct adapter *adapter = arg;
if_softc_ctx_t scctx = adapter->shared;
struct em_tx_queue *que = &adapter->tx_queues[txqid];
struct tx_ring *txr = &que->txr;
- u32 cidx, processed = 0;
- int last, done;
- struct em_txbuffer *buf;
- struct e1000_tx_desc *tx_desc, *eop_desc;
-
- cidx = cidx_init;
- buf = &txr->tx_buffers[cidx];
- tx_desc = &txr->tx_base[cidx];
- last = buf->eop;
- if (last == -1)
- return (processed);
- eop_desc = &txr->tx_base[last];
-
- DPRINTF(iflib_get_dev(adapter->ctx),
- "credits_update: cidx_init=%d clear=%d last=%d\n",
- cidx_init, clear, last);
- /*
- * What this does is get the index of the
- * first descriptor AFTER the EOP of the
- * first packet, that way we can do the
- * simple comparison on the inner while loop.
- */
- if (++last == scctx->isc_ntxd[0])
- last = 0;
- done = last;
-
-
- while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
- /* We clean the range of the packet */
- while (cidx != done) {
- if (clear) {
- tx_desc->upper.data = 0;
- tx_desc->lower.data = 0;
- tx_desc->buffer_addr = 0;
- buf->eop = -1;
- }
- tx_desc++;
- buf++;
- processed++;
-
- /* wrap the ring ? */
- if (++cidx == scctx->isc_ntxd[0]) {
- cidx = 0;
- }
- buf = &txr->tx_buffers[cidx];
- tx_desc = &txr->tx_base[cidx];
- }
- /* See if we can continue to the next packet */
- last = buf->eop;
- if (last == -1)
+ qidx_t processed = 0;
+ int updated;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ uint8_t status;
+
+ rs_cidx = txr->tx_rs_cidx;
+ if (rs_cidx == txr->tx_rs_pidx)
+ return (0);
+ cur = txr->tx_rsq[rs_cidx];
+ MPASS(cur != QIDX_INVALID);
+ status = txr->tx_base[cur].upper.fields.status;
+ updated = !!(status & E1000_TXD_STAT_DD);
+
+ if (clear == false || updated == 0)
+ return (updated);
+
+ prev = txr->tx_cidx_processed;
+ ntxd = scctx->isc_ntxd[0];
+ do {
+ delta = (int32_t)cur - (int32_t)prev;
+ MPASS(prev == 0 || delta != 0);
+ if (delta < 0)
+ delta += ntxd;
+ DPRINTF(iflib_get_dev(adapter->ctx),
+ "%s: cidx_processed=%u cur=%u clear=%d delta=%d\n",
+ __FUNCTION__, prev, cur, clear, delta);
+
+ processed += delta;
+ prev = cur;
+ rs_cidx = (rs_cidx + 1) & (ntxd-1);
+ if (rs_cidx == txr->tx_rs_pidx)
break;
- eop_desc = &txr->tx_base[last];
- /* Get new done point */
- if (++last == scctx->isc_ntxd[0])
- last = 0;
- done = last;
- }
+ cur = txr->tx_rsq[rs_cidx];
+ MPASS(cur != QIDX_INVALID);
+ status = txr->tx_base[cur].upper.fields.status;
+ } while ((status & E1000_TXD_STAT_DD));
- DPRINTF(iflib_get_dev(adapter->ctx), "Processed %d credits update\n", processed);
+ txr->tx_rs_cidx = rs_cidx;
+ txr->tx_cidx_processed = prev;
return(processed);
}
static void
-lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
- uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
+lem_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct adapter *sc = arg;
if_softc_ctx_t scctx = sc->shared;
- struct em_rx_queue *que = &sc->rx_queues[rxqid];
+ struct em_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
struct rx_ring *rxr = &que->rxr;
struct e1000_rx_desc *rxd;
+ uint64_t *paddrs;
+ uint32_t next_pidx, pidx;
+ uint16_t count;
int i;
- uint32_t next_pidx;
+
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
for (i = 0, next_pidx = pidx; i < count; i++) {
rxd = (struct e1000_rx_desc *)&rxr->rx_base[next_pidx];
@@ -487,48 +503,60 @@ lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
}
static void
-em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
- uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
+em_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct adapter *sc = arg;
if_softc_ctx_t scctx = sc->shared;
+ uint16_t rxqid = iru->iru_qsidx;
struct em_rx_queue *que = &sc->rx_queues[rxqid];
struct rx_ring *rxr = &que->rxr;
union e1000_rx_desc_extended *rxd;
+ uint64_t *paddrs;
+ uint32_t next_pidx, pidx;
+ uint16_t count;
int i;
- uint32_t next_pidx;
+
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
for (i = 0, next_pidx = pidx; i < count; i++) {
rxd = &rxr->rx_base[next_pidx];
rxd->read.buffer_addr = htole64(paddrs[i]);
/* DD bits must be cleared */
- rxd->wb.upper.status_error = 0;
-
+ rxd->wb.upper.status_error = 0;
+
if (++next_pidx == scctx->isc_nrxd[0])
next_pidx = 0;
}
}
static void
-em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
+em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
{
- struct adapter *sc = arg;
- struct em_rx_queue *que = &sc->rx_queues[rxqid];
- struct rx_ring *rxr = &que->rxr;
+ struct adapter *sc = arg;
+ struct em_rx_queue *que = &sc->rx_queues[rxqid];
+ struct rx_ring *rxr = &que->rxr;
- E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
+ E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
}
static int
-lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
+lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
{
- struct adapter *sc = arg;
+ struct adapter *sc = arg;
if_softc_ctx_t scctx = sc->shared;
- struct em_rx_queue *que = &sc->rx_queues[rxqid];
- struct rx_ring *rxr = &que->rxr;
+ struct em_rx_queue *que = &sc->rx_queues[rxqid];
+ struct rx_ring *rxr = &que->rxr;
struct e1000_rx_desc *rxd;
- u32 staterr = 0;
- int cnt, i;
+ u32 staterr = 0;
+ int cnt, i;
+
+ if (budget == 1) {
+ rxd = (struct e1000_rx_desc *)&rxr->rx_base[idx];
+ staterr = rxd->status;
+ return (staterr & E1000_RXD_STAT_DD);
+ }
for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
rxd = (struct e1000_rx_desc *)&rxr->rx_base[i];
@@ -547,15 +575,21 @@ lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
}
static int
-em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
+em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
{
- struct adapter *sc = arg;
+ struct adapter *sc = arg;
if_softc_ctx_t scctx = sc->shared;
- struct em_rx_queue *que = &sc->rx_queues[rxqid];
- struct rx_ring *rxr = &que->rxr;
+ struct em_rx_queue *que = &sc->rx_queues[rxqid];
+ struct rx_ring *rxr = &que->rxr;
union e1000_rx_desc_extended *rxd;
- u32 staterr = 0;
- int cnt, i;
+ u32 staterr = 0;
+ int cnt, i;
+
+ if (budget == 1) {
+ rxd = &rxr->rx_base[idx];
+ staterr = le32toh(rxd->wb.upper.status_error);
+ return (staterr & E1000_RXD_STAT_DD);
+ }
for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
rxd = &rxr->rx_base[i];
@@ -578,15 +612,15 @@ em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
static int
lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct adapter *adapter = arg;
- if_softc_ctx_t scctx = adapter->shared;
- struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct adapter *adapter = arg;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
struct e1000_rx_desc *rxd;
- u16 len;
- u32 status, errors;
- bool eop;
- int i, cidx;
+ u16 len;
+ u32 status, errors;
+ bool eop;
+ int i, cidx;
status = errors = i = 0;
cidx = ri->iri_cidx;
@@ -639,31 +673,31 @@ lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
static int
em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct adapter *adapter = arg;
- if_softc_ctx_t scctx = adapter->shared;
- struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct adapter *adapter = arg;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
union e1000_rx_desc_extended *rxd;
- u16 len;
- u32 pkt_info;
- u32 staterr = 0;
- bool eop;
- int i, cidx, vtag;
+ u16 len;
+ u32 pkt_info;
+ u32 staterr = 0;
+ bool eop;
+ int i, cidx, vtag;
i = vtag = 0;
cidx = ri->iri_cidx;
do {
rxd = &rxr->rx_base[cidx];
- staterr = le32toh(rxd->wb.upper.status_error);
+ staterr = le32toh(rxd->wb.upper.status_error);
pkt_info = le32toh(rxd->wb.lower.mrq);
-
+
/* Error Checking then decrement count */
MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
len = le16toh(rxd->wb.upper.length);
- ri->iri_len += len;
+ ri->iri_len += len;
eop = (staterr & E1000_RXD_STAT_EOP) != 0;
@@ -690,15 +724,14 @@ em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
if (staterr & E1000_RXD_STAT_VP) {
vtag = le16toh(rxd->wb.upper.vlan);
- }
-
+ }
+
ri->iri_vtag = vtag;
if (vtag)
ri->iri_flags |= M_VLANTAG;
-
- ri->iri_flowid =
- le32toh(rxd->wb.lower.hi_dword.rss);
- ri->iri_rsstype = em_determine_rsstype(pkt_info);
+
+ ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
+ ri->iri_rsstype = em_determine_rsstype(pkt_info);
ri->iri_nfrags = i;
return (0);
@@ -736,23 +769,24 @@ lem_receive_checksum(int status, int errors, if_rxd_info_t ri)
static int
em_determine_rsstype(u32 pkt_info)
{
- switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
- case E1000_RXDADV_RSSTYPE_IPV4_TCP:
- return M_HASHTYPE_RSS_TCP_IPV4;
- case E1000_RXDADV_RSSTYPE_IPV4:
- return M_HASHTYPE_RSS_IPV4;
- case E1000_RXDADV_RSSTYPE_IPV6_TCP:
- return M_HASHTYPE_RSS_TCP_IPV6;
- case E1000_RXDADV_RSSTYPE_IPV6_EX:
- return M_HASHTYPE_RSS_IPV6_EX;
- case E1000_RXDADV_RSSTYPE_IPV6:
- return M_HASHTYPE_RSS_IPV6;
- case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
- return M_HASHTYPE_RSS_TCP_IPV6_EX;
- default:
- return M_HASHTYPE_OPAQUE;
- }
+ switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
+ case E1000_RXDADV_RSSTYPE_IPV4_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case E1000_RXDADV_RSSTYPE_IPV4:
+ return M_HASHTYPE_RSS_IPV4;
+ case E1000_RXDADV_RSSTYPE_IPV6_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case E1000_RXDADV_RSSTYPE_IPV6_EX:
+ return M_HASHTYPE_RSS_IPV6_EX;
+ case E1000_RXDADV_RSSTYPE_IPV6:
+ return M_HASHTYPE_RSS_IPV6;
+ case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
+ return M_HASHTYPE_RSS_TCP_IPV6_EX;
+ default:
+ return M_HASHTYPE_OPAQUE;
+ }
}
+
static void
em_receive_checksum(uint32_t status, if_rxd_info_t ri)
{
@@ -764,7 +798,7 @@ em_receive_checksum(uint32_t status, if_rxd_info_t ri)
/* If the IP checksum exists and there is no IP Checksum error */
if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
- E1000_RXD_STAT_IPCS) {
+ E1000_RXD_STAT_IPCS) {
ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
}
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 621f15cbe843..76c551be1407 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -50,113 +50,113 @@ char em_driver_version[] = "7.6.1-k";
static pci_vendor_info_t em_vendor_info_array[] =
{
/* Intel(R) PRO/1000 Network Connection - Legacy em*/
- PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) PRO/1000 Network Connection"),
-
- PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
-
- PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) PRO/1000 Network Connection"),
-
- PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
-
- PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) PRO/1000 Network Connection"),
-
- PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) PRO/1000 Network Connection"),
-
- PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) PRO/1000 Network Connection"),
-
- PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) PRO/1000 Network Connection"),
+
+ PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
+
+ PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) PRO/1000 Network Connection"),
+
+ PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
+
+ PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) PRO/1000 Network Connection"),
+
+ PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) PRO/1000 Network Connection"),
+
+ PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) PRO/1000 Network Connection"),
+
+ PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) PRO/1000 Network Connection"),
/* Intel(R) PRO/1000 Network Connection - em */
- PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82572EI, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82572EI, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IFE, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IFE_G, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH9_BM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82574L, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_82574LA, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LF, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_V, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LF, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_V, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LC, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DC, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH2_LV_LM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IFE, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IFE_G, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_BM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82574L, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_82574LA, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LF, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_V, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LF, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_V, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LC, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DC, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH2_LV_LM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_I218_LM3, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_I218_LM3, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, "Intel(R) PRO/1000 Network Connection"),
- PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) PRO/1000 Network Connection"),
+ PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, "Intel(R) PRO/1000 Network Connection"),
@@ -170,7 +170,7 @@ static pci_vendor_info_t em_vendor_info_array[] =
static pci_vendor_info_t igb_vendor_info_array[] =
{
- /* Intel(R) PRO/1000 Network Connection - em */
+ /* Intel(R) PRO/1000 Network Connection - igb */
PVID(0x8086, E1000_DEV_ID_82575EB_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
@@ -217,51 +217,54 @@ static pci_vendor_info_t igb_vendor_info_array[] =
/*********************************************************************
* Function prototypes
*********************************************************************/
-static void *em_register(device_t dev);
-static void *igb_register(device_t dev);
+static void *em_register(device_t dev);
+static void *igb_register(device_t dev);
static int em_if_attach_pre(if_ctx_t ctx);
static int em_if_attach_post(if_ctx_t ctx);
static int em_if_detach(if_ctx_t ctx);
static int em_if_shutdown(if_ctx_t ctx);
static int em_if_suspend(if_ctx_t ctx);
-static int em_if_resume(if_ctx_t ctx);
+static int em_if_resume(if_ctx_t ctx);
-static int em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
-static int em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets);
-static void em_if_queues_free(if_ctx_t ctx);
+static int em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
+static int em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets);
+static void em_if_queues_free(if_ctx_t ctx);
static uint64_t em_if_get_counter(if_ctx_t, ift_counter);
static void em_if_init(if_ctx_t ctx);
static void em_if_stop(if_ctx_t ctx);
static void em_if_media_status(if_ctx_t, struct ifmediareq *);
static int em_if_media_change(if_ctx_t ctx);
-static int em_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
-static void em_if_timer(if_ctx_t ctx, uint16_t qid);
-static void em_if_vlan_register(if_ctx_t ctx, u16 vtag);
-static void em_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+static int em_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+static void em_if_timer(if_ctx_t ctx, uint16_t qid);
+static void em_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void em_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
static void em_identify_hardware(if_ctx_t ctx);
-static int em_allocate_pci_resources(if_ctx_t ctx);
-static void em_free_pci_resources(if_ctx_t ctx);
+static int em_allocate_pci_resources(if_ctx_t ctx);
+static void em_free_pci_resources(if_ctx_t ctx);
static void em_reset(if_ctx_t ctx);
static int em_setup_interface(if_ctx_t ctx);
-static int em_setup_msix(if_ctx_t ctx);
+static int em_setup_msix(if_ctx_t ctx);
static void em_initialize_transmit_unit(if_ctx_t ctx);
static void em_initialize_receive_unit(if_ctx_t ctx);
-static void em_if_enable_intr(if_ctx_t ctx);
+static void em_if_enable_intr(if_ctx_t ctx);
static void em_if_disable_intr(if_ctx_t ctx);
-static int em_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
-static void em_if_multi_set(if_ctx_t ctx);
-static void em_if_update_admin_status(if_ctx_t ctx);
+static int em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
+static int em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
+static void em_if_multi_set(if_ctx_t ctx);
+static void em_if_update_admin_status(if_ctx_t ctx);
+static void em_if_debug(if_ctx_t ctx);
static void em_update_stats_counters(struct adapter *);
static void em_add_hw_stats(struct adapter *adapter);
-static int em_if_set_promisc(if_ctx_t ctx, int flags);
+static int em_if_set_promisc(if_ctx_t ctx, int flags);
static void em_setup_vlan_hw_support(struct adapter *);
static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
static void em_print_nvm_info(struct adapter *);
static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
+static int em_get_rs(SYSCTL_HANDLER_ARGS);
static void em_print_debug_info(struct adapter *);
static int em_is_valid_ether_addr(u8 *);
static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
@@ -270,18 +273,18 @@ static void em_add_int_delay_sysctl(struct adapter *, const char *,
/* Management and WOL Support */
static void em_init_manageability(struct adapter *);
static void em_release_manageability(struct adapter *);
-static void em_get_hw_control(struct adapter *);
-static void em_release_hw_control(struct adapter *);
+static void em_get_hw_control(struct adapter *);
+static void em_release_hw_control(struct adapter *);
static void em_get_wakeup(if_ctx_t ctx);
-static void em_enable_wakeup(if_ctx_t ctx);
+static void em_enable_wakeup(if_ctx_t ctx);
static int em_enable_phy_wakeup(struct adapter *);
static void em_disable_aspm(struct adapter *);
-int em_intr(void *arg);
-static void em_disable_promisc(if_ctx_t ctx);
+int em_intr(void *arg);
+static void em_disable_promisc(if_ctx_t ctx);
/* MSIX handlers */
-static int em_if_msix_intr_assign(if_ctx_t, int);
+static int em_if_msix_intr_assign(if_ctx_t, int);
static int em_msix_link(void *);
static void em_handle_link(void *context);
@@ -291,10 +294,9 @@ static void em_set_sysctl_value(struct adapter *, const char *,
const char *, int *, int);
static int em_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int em_sysctl_eee(SYSCTL_HANDLER_ARGS);
-static void em_if_led_func(if_ctx_t ctx, int onoff);
+static void em_if_led_func(if_ctx_t ctx, int onoff);
-static void em_init_tx_ring(struct em_tx_queue *que);
-static int em_get_regs(SYSCTL_HANDLER_ARGS);
+static int em_get_regs(SYSCTL_HANDLER_ARGS);
static void lem_smartspeed(struct adapter *adapter);
static void igb_configure_queues(struct adapter *adapter);
@@ -305,26 +307,26 @@ static void igb_configure_queues(struct adapter *adapter);
*********************************************************************/
static device_method_t em_methods[] = {
/* Device interface */
- DEVMETHOD(device_register, em_register),
- DEVMETHOD(device_probe, iflib_device_probe),
- DEVMETHOD(device_attach, iflib_device_attach),
- DEVMETHOD(device_detach, iflib_device_detach),
- DEVMETHOD(device_shutdown, iflib_device_shutdown),
- DEVMETHOD(device_suspend, iflib_device_suspend),
- DEVMETHOD(device_resume, iflib_device_resume),
- DEVMETHOD_END
+ DEVMETHOD(device_register, em_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD(device_suspend, iflib_device_suspend),
+ DEVMETHOD(device_resume, iflib_device_resume),
+ DEVMETHOD_END
};
static device_method_t igb_methods[] = {
/* Device interface */
- DEVMETHOD(device_register, igb_register),
- DEVMETHOD(device_probe, iflib_device_probe),
- DEVMETHOD(device_attach, iflib_device_attach),
- DEVMETHOD(device_detach, iflib_device_detach),
- DEVMETHOD(device_shutdown, iflib_device_shutdown),
- DEVMETHOD(device_suspend, iflib_device_suspend),
- DEVMETHOD(device_resume, iflib_device_resume),
- DEVMETHOD_END
+ DEVMETHOD(device_register, igb_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD(device_suspend, iflib_device_suspend),
+ DEVMETHOD(device_resume, iflib_device_resume),
+ DEVMETHOD_END
};
@@ -352,8 +354,8 @@ MODULE_DEPEND(igb, iflib, 1, 1, 1);
static device_method_t em_if_methods[] = {
- DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
- DEVMETHOD(ifdi_attach_post, em_if_attach_post),
+ DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, em_if_attach_post),
DEVMETHOD(ifdi_detach, em_if_detach),
DEVMETHOD(ifdi_shutdown, em_if_shutdown),
DEVMETHOD(ifdi_suspend, em_if_suspend),
@@ -366,7 +368,7 @@ static device_method_t em_if_methods[] = {
DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
DEVMETHOD(ifdi_queues_free, em_if_queues_free),
- DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
+ DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
DEVMETHOD(ifdi_multi_set, em_if_multi_set),
DEVMETHOD(ifdi_media_status, em_if_media_status),
DEVMETHOD(ifdi_media_change, em_if_media_change),
@@ -377,16 +379,18 @@ static device_method_t em_if_methods[] = {
DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
DEVMETHOD(ifdi_get_counter, em_if_get_counter),
DEVMETHOD(ifdi_led_func, em_if_led_func),
- DEVMETHOD(ifdi_queue_intr_enable, em_if_queue_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, em_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, em_if_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_debug, em_if_debug),
DEVMETHOD_END
};
- /*
+/*
* note that if (adapter->msix_mem) is replaced by:
* if (adapter->intr_type == IFLIB_INTR_MSIX)
*/
static driver_t em_if_driver = {
- "em_if", em_if_methods, sizeof(struct adapter)
+ "em_if", em_if_methods, sizeof(struct adapter)
};
/*********************************************************************
@@ -467,7 +471,7 @@ extern struct if_txrx em_txrx;
extern struct if_txrx lem_txrx;
static struct if_shared_ctx em_sctx_init = {
- .isc_magic = IFLIB_MAGIC,
+ .isc_magic = IFLIB_MAGIC,
.isc_q_align = PAGE_SIZE,
.isc_tx_maxsize = EM_TSO_SIZE,
.isc_tx_maxsegsize = PAGE_SIZE,
@@ -490,12 +494,12 @@ static struct if_shared_ctx em_sctx_init = {
.isc_nrxd_default = {EM_DEFAULT_RXD},
.isc_ntxd_default = {EM_DEFAULT_TXD},
};
-
+
if_shared_ctx_t em_sctx = &em_sctx_init;
static struct if_shared_ctx igb_sctx_init = {
- .isc_magic = IFLIB_MAGIC,
+ .isc_magic = IFLIB_MAGIC,
.isc_q_align = PAGE_SIZE,
.isc_tx_maxsize = EM_TSO_SIZE,
.isc_tx_maxsegsize = PAGE_SIZE,
@@ -518,7 +522,7 @@ static struct if_shared_ctx igb_sctx_init = {
.isc_nrxd_default = {EM_DEFAULT_RXD},
.isc_ntxd_default = {EM_DEFAULT_TXD},
};
-
+
if_shared_ctx_t igb_sctx = &igb_sctx_init;
/*****************************************************************
@@ -542,7 +546,7 @@ static int em_get_regs(SYSCTL_HANDLER_ARGS)
rc = sysctl_wire_old_buffer(req, 0);
MPASS(rc == 0);
if (rc != 0)
- return (rc);
+ return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req);
MPASS(sb != NULL);
@@ -572,36 +576,36 @@ static int em_get_regs(SYSCTL_HANDLER_ARGS)
regs_buff[19] = E1000_READ_REG(hw, E1000_TDFT);
regs_buff[20] = E1000_READ_REG(hw, E1000_TDFHS);
regs_buff[21] = E1000_READ_REG(hw, E1000_TDFPC);
-
+
sbuf_printf(sb, "General Registers\n");
- sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
+ sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]);
sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]);
sbuf_printf(sb, "Interrupt Registers\n");
- sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
-
+ sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
+
sbuf_printf(sb, "RX Registers\n");
- sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
+ sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]);
sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]);
- sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
+ sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]);
sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]);
sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]);
sbuf_printf(sb, "TX Registers\n");
- sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
+ sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]);
sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]);
- sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
+ sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]);
sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]);
sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]);
- sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
+ sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]);
sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]);
- sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
+ sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
#ifdef DUMP_DESCS
{
@@ -619,7 +623,6 @@ static int em_get_regs(SYSCTL_HANDLER_ARGS)
}
for (j = 0; j < min(ntxd, 256); j++) {
- struct em_txbuffer *buf = &txr->tx_buffers[j];
unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
@@ -628,37 +631,23 @@ static int em_get_regs(SYSCTL_HANDLER_ARGS)
}
}
-#endif
-
- rc = sbuf_finish(sb);
+#endif
+
+ rc = sbuf_finish(sb);
sbuf_delete(sb);
- return(rc);
+ return(rc);
}
static void *
em_register(device_t dev)
{
- return (em_sctx);
+ return (em_sctx);
}
static void *
igb_register(device_t dev)
{
- return (igb_sctx);
-}
-
-static void
-em_init_tx_ring(struct em_tx_queue *que)
-{
- struct adapter *sc = que->adapter;
- if_softc_ctx_t scctx = sc->shared;
- struct tx_ring *txr = &que->txr;
- struct em_txbuffer *tx_buffer;
-
- tx_buffer = txr->tx_buffers;
- for (int i = 0; i < scctx->isc_ntxd[0]; i++, tx_buffer++) {
- tx_buffer->eop = -1;
- }
+ return (igb_sctx);
}
static int
@@ -669,35 +658,35 @@ em_set_num_queues(if_ctx_t ctx)
/* Sanity check based on HW */
switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- maxqueues = 8;
- break;
- case e1000_i210:
- case e1000_82575:
- maxqueues = 4;
- break;
- case e1000_i211:
- case e1000_82574:
- maxqueues = 2;
- break;
- default:
- maxqueues = 1;
- break;
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ maxqueues = 8;
+ break;
+ case e1000_i210:
+ case e1000_82575:
+ maxqueues = 4;
+ break;
+ case e1000_i211:
+ case e1000_82574:
+ maxqueues = 2;
+ break;
+ default:
+ maxqueues = 1;
+ break;
}
return (maxqueues);
}
-#define EM_CAPS \
+#define EM_CAPS \
IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \
IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | \
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU;
-#define IGB_CAPS \
+#define IGB_CAPS \
IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \
IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | \
IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_TXCSUM_IPV6 | IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU;
@@ -713,16 +702,16 @@ em_set_num_queues(if_ctx_t ctx)
*********************************************************************/
static int
-em_if_attach_pre(if_ctx_t ctx)
+em_if_attach_pre(if_ctx_t ctx)
{
- struct adapter *adapter;
+ struct adapter *adapter;
if_softc_ctx_t scctx;
- device_t dev;
- struct e1000_hw *hw;
- int error = 0;
+ device_t dev;
+ struct e1000_hw *hw;
+ int error = 0;
INIT_DEBUGOUT("em_if_attach_pre begin");
- dev = iflib_get_dev(ctx);
+ dev = iflib_get_dev(ctx);
adapter = iflib_get_softc(ctx);
if (resource_disabled("em", device_get_unit(dev))) {
@@ -734,7 +723,7 @@ em_if_attach_pre(if_ctx_t ctx)
adapter->dev = adapter->osdep.dev = dev;
scctx = adapter->shared = iflib_get_softc_ctx(ctx);
adapter->media = iflib_get_media(ctx);
- hw = &adapter->hw;
+ hw = &adapter->hw;
adapter->tx_process_limit = scctx->isc_ntxd[0];
@@ -757,12 +746,17 @@ em_if_attach_pre(if_ctx_t ctx)
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "reg_dump", CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
- em_get_regs, "A", "Dump Registers");
+ em_get_regs, "A", "Dump Registers");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rs_dump", CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
+ em_get_rs, "I", "Dump RS indexes");
/* Determine hardware and mac info */
em_identify_hardware(ctx);
- /* Set isc_msix_bar */
+ /* Set isc_msix_bar */
scctx->isc_msix_bar = PCIR_BAR(EM_MSIX_BAR);
scctx->isc_tx_nsegments = EM_MAX_SCATTER;
scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
@@ -779,6 +773,8 @@ em_if_attach_pre(if_ctx_t ctx)
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN);
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN);
+ scctx->isc_txd_size[0] = sizeof(union e1000_adv_tx_desc);
+ scctx->isc_rxd_size[0] = sizeof(union e1000_adv_rx_desc);
scctx->isc_txrx = &igb_txrx;
scctx->isc_capenable = IGB_CAPS;
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP \
@@ -798,12 +794,16 @@ em_if_attach_pre(if_ctx_t ctx)
} else if (adapter->hw.mac.type >= em_mac_min) {
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]* sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
+ scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
+ scctx->isc_rxd_size[0] = sizeof(union e1000_rx_desc_extended);
scctx->isc_txrx = &em_txrx;
scctx->isc_capenable = EM_CAPS;
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
} else {
scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
+ scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
+ scctx->isc_rxd_size[0] = sizeof(struct e1000_rx_desc);
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
scctx->isc_txrx = &lem_txrx;
scctx->isc_capenable = EM_CAPS;
@@ -823,7 +823,7 @@ em_if_attach_pre(if_ctx_t ctx)
/*
** For ICH8 and family we need to
** map the flash memory, and this
- ** must happen after the MAC is
+ ** must happen after the MAC is
** identified
*/
if ((hw->mac.type == e1000_ich8lan) ||
@@ -900,7 +900,7 @@ em_if_attach_pre(if_ctx_t ctx)
em_set_sysctl_value(adapter, "rx_processing_limit",
"max number of rx packets to process", &adapter->rx_process_limit,
em_rx_process_limit);
-
+
hw->mac.autoneg = DO_AUTO_NEG;
hw->phy.autoneg_wait_to_complete = FALSE;
hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
@@ -941,7 +941,7 @@ em_if_attach_pre(if_ctx_t ctx)
/* Check SOL/IDER usage */
if (e1000_check_reset_block(hw))
device_printf(dev, "PHY reset is blocked"
- " due to SOL/IDER session.\n");
+ " due to SOL/IDER session.\n");
/* Sysctl for setting Energy Efficient Ethernet */
hw->dev_spec.ich8lan.eee_disable = eee_setting;
@@ -976,7 +976,7 @@ em_if_attach_pre(if_ctx_t ctx)
/* Copy the permanent MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw) < 0) {
device_printf(dev, "EEPROM read error while reading MAC"
- " address\n");
+ " address\n");
error = EIO;
goto err_late;
}
@@ -990,7 +990,7 @@ em_if_attach_pre(if_ctx_t ctx)
/* Disable ULP support */
e1000_disable_ulp_lpt_lp(hw, TRUE);
- /*
+ /*
* Get Wake-on-Lan and Management info for later use
*/
em_get_wakeup(ctx);
@@ -1011,9 +1011,9 @@ err_pci:
static int
em_if_attach_post(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
- int error = 0;
+ int error = 0;
/* Setup OS specific network interface */
error = em_setup_interface(ctx);
@@ -1092,7 +1092,7 @@ em_if_suspend(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
- em_release_manageability(adapter);
+ em_release_manageability(adapter);
em_release_hw_control(adapter);
em_enable_wakeup(ctx);
return (0);
@@ -1108,49 +1108,54 @@ em_if_resume(if_ctx_t ctx)
em_if_init(ctx);
em_init_manageability(adapter);
- return(0);
+ return(0);
}
static int
em_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
- int max_frame_size;
- struct adapter *adapter = iflib_get_softc(ctx);
- struct ifnet *ifp = iflib_get_ifp(ctx);
- if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
-
- IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
-
- switch (adapter->hw.mac.type) {
- case e1000_82571:
- case e1000_82572:
- case e1000_ich9lan:
- case e1000_ich10lan:
- case e1000_pch2lan:
- case e1000_pch_lpt:
- case e1000_pch_spt:
- case e1000_82574:
- case e1000_82583:
- case e1000_80003es2lan: /* 9K Jumbo Frame size */
- max_frame_size = 9234;
- break;
- case e1000_pchlan:
- max_frame_size = 4096;
- break;
- /* Adapters that do not support jumbo frames */
- case e1000_ich8lan:
- max_frame_size = ETHER_MAX_LEN;
- break;
- default:
- max_frame_size = MAX_JUMBO_FRAME_SIZE;
- }
- if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
- return (EINVAL);
- }
-
- scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
- if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
- return (0);
+ int max_frame_size;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
+
+ IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_82574:
+ case e1000_82583:
+ case e1000_80003es2lan:
+ /* 9K Jumbo Frame size */
+ max_frame_size = 9234;
+ break;
+ case e1000_pchlan:
+ max_frame_size = 4096;
+ break;
+ case e1000_82542:
+ case e1000_ich8lan:
+ /* Adapters that do not support jumbo frames */
+ max_frame_size = ETHER_MAX_LEN;
+ break;
+ default:
+ if (adapter->hw.mac.type >= igb_mac_min)
+ max_frame_size = 9234;
+ else /* lem */
+ max_frame_size = MAX_JUMBO_FRAME_SIZE;
+ }
+ if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
+ return (EINVAL);
+ }
+
+ scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
+ if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ return (0);
}
/*********************************************************************
@@ -1167,14 +1172,15 @@ em_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
static void
em_if_init(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- struct ifnet *ifp = iflib_get_ifp(ctx);
-
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct em_tx_queue *tx_que;
+ int i;
INIT_DEBUGOUT("em_if_init: begin");
/* Get the latest mac address, User can use a LAA */
- bcopy(if_getlladdr(ifp), adapter->hw.mac.addr,
- ETHER_ADDR_LEN);
+ bcopy(if_getlladdr(ifp), adapter->hw.mac.addr,
+ ETHER_ADDR_LEN);
/* Put the address into the Receive Address Array */
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
@@ -1191,10 +1197,17 @@ em_if_init(if_ctx_t ctx)
E1000_RAR_ENTRIES - 1);
}
+
/* Initialize the hardware */
em_reset(ctx);
em_if_update_admin_status(ctx);
+ for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+
+ txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
+ }
+
/* Setup VLAN support, basic and offload if available */
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
@@ -1212,16 +1225,20 @@ em_if_init(if_ctx_t ctx)
em_if_multi_set(ctx);
/*
- ** Figure out the desired mbuf
- ** pool for doing jumbos
- */
+ * Figure out the desired mbuf
+ * pool for doing jumbos
+ */
if (adapter->hw.mac.max_frame_size <= 2048)
adapter->rx_mbuf_sz = MCLBYTES;
+#ifndef CONTIGMALLOC_WORKS
+ else
+ adapter->rx_mbuf_sz = MJUMPAGESIZE;
+#else
else if (adapter->hw.mac.max_frame_size <= 4096)
adapter->rx_mbuf_sz = MJUMPAGESIZE;
else
adapter->rx_mbuf_sz = MJUM9BYTES;
-
+#endif
em_initialize_receive_unit(ctx);
/* Use real VLAN Filter support? */
@@ -1272,25 +1289,25 @@ em_if_init(if_ctx_t ctx)
/*********************************************************************
*
- * Fast Legacy/MSI Combined Interrupt Service routine
+ * Fast Legacy/MSI Combined Interrupt Service routine
*
*********************************************************************/
int
em_intr(void *arg)
{
- struct adapter *adapter = arg;
+ struct adapter *adapter = arg;
if_ctx_t ctx = adapter->ctx;
- u32 reg_icr;
+ u32 reg_icr;
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
if (adapter->intr_type != IFLIB_INTR_LEGACY)
goto skip_stray;
- /* Hot eject? */
+ /* Hot eject? */
if (reg_icr == 0xffffffff)
return FILTER_STRAY;
- /* Definitely not our interrupt. */
+ /* Definitely not our interrupt. */
if (reg_icr == 0x0)
return FILTER_STRAY;
@@ -1302,7 +1319,7 @@ em_intr(void *arg)
(reg_icr & E1000_ICR_INT_ASSERTED) == 0)
return FILTER_STRAY;
-skip_stray:
+skip_stray:
/* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
adapter->hw.mac.get_link_status = 1;
@@ -1312,31 +1329,56 @@ skip_stray:
if (reg_icr & E1000_ICR_RXO)
adapter->rx_overruns++;
- return (FILTER_SCHEDULE_THREAD);
+ return (FILTER_SCHEDULE_THREAD);
}
static void
-igb_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
+igb_rx_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
{
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxq->eims);
}
static void
-em_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
+em_rx_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
{
E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxq->eims);
}
+static void
+igb_tx_enable_queue(struct adapter *adapter, struct em_tx_queue *txq)
+{
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txq->eims);
+}
+
+static void
+em_tx_enable_queue(struct adapter *adapter, struct em_tx_queue *txq)
+{
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txq->eims);
+}
+
static int
-em_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rxq = &adapter->rx_queues[rxqid];
-
+
+ if (adapter->hw.mac.type >= igb_mac_min)
+ igb_rx_enable_queue(adapter, rxq);
+ else
+ em_rx_enable_queue(adapter, rxq);
+ return (0);
+}
+
+static int
+em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct em_tx_queue *txq = &adapter->tx_queues[txqid];
+
if (adapter->hw.mac.type >= igb_mac_min)
- igb_enable_queue(adapter, rxq);
+ igb_tx_enable_queue(adapter, txq);
else
- em_enable_queue(adapter, rxq);
+ em_tx_enable_queue(adapter, txq);
return (0);
}
@@ -1349,10 +1391,10 @@ static int
em_msix_que(void *arg)
{
struct em_rx_queue *que = arg;
-
+
++que->irqs;
-
- return (FILTER_SCHEDULE_THREAD);
+
+ return (FILTER_SCHEDULE_THREAD);
}
/*********************************************************************
@@ -1363,8 +1405,8 @@ em_msix_que(void *arg)
static int
em_msix_link(void *arg)
{
- struct adapter *adapter = arg;
- u32 reg_icr;
+ struct adapter *adapter = arg;
+ u32 reg_icr;
++adapter->link_irq;
MPASS(adapter->hw.back != NULL);
@@ -1380,28 +1422,27 @@ em_msix_link(void *arg)
EM_MSIX_LINK | E1000_IMS_LSC);
if (adapter->hw.mac.type >= igb_mac_min)
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
-
}
-
+
/*
- ** Because we must read the ICR for this interrupt
- ** it may clear other causes using autoclear, for
- ** this reason we simply create a soft interrupt
- ** for all these vectors.
- */
+ * Because we must read the ICR for this interrupt
+ * it may clear other causes using autoclear, for
+ * this reason we simply create a soft interrupt
+ * for all these vectors.
+ */
if (reg_icr && adapter->hw.mac.type < igb_mac_min) {
E1000_WRITE_REG(&adapter->hw,
E1000_ICS, adapter->ims);
}
- return (FILTER_HANDLED);
+ return (FILTER_HANDLED);
}
static void
em_handle_link(void *context)
{
- if_ctx_t ctx = context;
- struct adapter *adapter = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct adapter *adapter = iflib_get_softc(ctx);
adapter->hw.mac.get_link_status = 1;
iflib_admin_intr_deferred(ctx);
@@ -1419,12 +1460,12 @@ em_handle_link(void *context)
static void
em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- u_char fiber_type = IFM_1000_SX;
-
- INIT_DEBUGOUT("em_if_media_status: begin");
+ struct adapter *adapter = iflib_get_softc(ctx);
+ u_char fiber_type = IFM_1000_SX;
- iflib_admin_intr_deferred(ctx);
+ INIT_DEBUGOUT("em_if_media_status: begin");
+
+ iflib_admin_intr_deferred(ctx);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
@@ -1471,7 +1512,7 @@ static int
em_if_media_change(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
- struct ifmedia *ifm = iflib_get_media(ctx);
+ struct ifmedia *ifm = iflib_get_media(ctx);
INIT_DEBUGOUT("em_if_media_change: begin");
@@ -1517,10 +1558,10 @@ em_if_media_change(if_ctx_t ctx)
static int
em_if_set_promisc(if_ctx_t ctx, int flags)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- u32 reg_rctl;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ u32 reg_rctl;
- em_disable_promisc(ctx);
+ em_disable_promisc(ctx);
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
@@ -1535,19 +1576,19 @@ em_if_set_promisc(if_ctx_t ctx, int flags)
reg_rctl &= ~E1000_RCTL_UPE;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
}
- return (0);
+ return (0);
}
static void
em_disable_promisc(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- struct ifnet *ifp = iflib_get_ifp(ctx);
- u32 reg_rctl;
- int mcnt = 0;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ u32 reg_rctl;
+ int mcnt = 0;
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
- reg_rctl &= (~E1000_RCTL_UPE);
+ reg_rctl &= (~E1000_RCTL_UPE);
if (if_getflags(ifp) & IFF_ALLMULTI)
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
else
@@ -1570,8 +1611,8 @@ em_disable_promisc(if_ctx_t ctx)
static void
em_if_multi_set(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
u32 reg_rctl = 0;
u8 *mta; /* Multicast array memory */
int mcnt = 0;
@@ -1581,7 +1622,7 @@ em_if_multi_set(if_ctx_t ctx)
mta = adapter->mta;
bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
- if (adapter->hw.mac.type == e1000_82542 &&
+ if (adapter->hw.mac.type == e1000_82542 &&
adapter->hw.revision_id == E1000_REVISION_2) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
@@ -1600,7 +1641,7 @@ em_if_multi_set(if_ctx_t ctx)
} else
e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
- if (adapter->hw.mac.type == e1000_82542 &&
+ if (adapter->hw.mac.type == e1000_82542 &&
adapter->hw.revision_id == E1000_REVISION_2) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl &= ~E1000_RCTL_RST;
@@ -1622,17 +1663,15 @@ em_if_multi_set(if_ctx_t ctx)
static void
em_if_timer(if_ctx_t ctx, uint16_t qid)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *que;
int i;
- int trigger = 0;
+ int trigger = 0;
- if (qid != 0) {
- /* XXX all this stuff is per-adapter */
+ if (qid != 0)
return;
- }
- em_if_update_admin_status(ctx);
+ em_if_update_admin_status(ctx);
em_update_stats_counters(adapter);
/* Reset LAA into RAR[0] on 82571 */
@@ -1656,10 +1695,10 @@ em_if_timer(if_ctx_t ctx, uint16_t qid)
static void
em_if_update_admin_status(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
- struct ifnet *ifp = iflib_get_ifp(ctx);
- device_t dev = iflib_get_dev(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ device_t dev = iflib_get_dev(ctx);
u32 link_check = 0;
/* Get the cached link value or read phy for real */
@@ -1680,7 +1719,7 @@ em_if_update_admin_status(if_ctx_t ctx)
case e1000_media_type_fiber:
e1000_check_for_link(hw);
link_check = (E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STATUS_LU);
+ E1000_STATUS_LU);
break;
case e1000_media_type_internal_serdes:
e1000_check_for_link(hw);
@@ -1740,10 +1779,10 @@ em_if_update_admin_status(if_ctx_t ctx)
static void
em_if_stop(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
INIT_DEBUGOUT("em_stop: begin");
-
+
e1000_reset_hw(&adapter->hw);
if (adapter->hw.mac.type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, E1000_WUFC, 0);
@@ -1761,9 +1800,9 @@ em_if_stop(if_ctx_t ctx)
static void
em_identify_hardware(if_ctx_t ctx)
{
- device_t dev = iflib_get_dev(ctx);
- struct adapter *adapter = iflib_get_softc(ctx);
-
+ device_t dev = iflib_get_dev(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
+
/* Make sure our PCI config space has the necessary stuff set */
adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
@@ -1786,9 +1825,9 @@ em_identify_hardware(if_ctx_t ctx)
static int
em_allocate_pci_resources(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- int rid, val;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ int rid, val;
rid = PCIR_BAR(0);
adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
@@ -1797,14 +1836,13 @@ em_allocate_pci_resources(if_ctx_t ctx)
device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
- adapter->osdep.mem_bus_space_tag =
- rman_get_bustag(adapter->memory);
+ adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory);
adapter->osdep.mem_bus_space_handle =
rman_get_bushandle(adapter->memory);
adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
/* Only older adapters use IO mapping */
- if (adapter->hw.mac.type < em_mac_min &&
+ if (adapter->hw.mac.type < em_mac_min &&
adapter->hw.mac.type > e1000_82543) {
/* Figure our where our IO BAR is ? */
for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
@@ -1847,33 +1885,33 @@ em_allocate_pci_resources(if_ctx_t ctx)
*
**********************************************************************/
static int
-em_if_msix_intr_assign(if_ctx_t ctx, int msix)
+em_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rx_que = adapter->rx_queues;
struct em_tx_queue *tx_que = adapter->tx_queues;
- int error, rid, i, vector = 0;
+ int error, rid, i, vector = 0, rx_vectors;
char buf[16];
/* First set up ring resources */
for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
- rid = vector +1;
- snprintf(buf, sizeof(buf), "rxq%d", i);
- error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RX, em_msix_que, rx_que, rx_que->me, buf);
- if (error) {
- device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
+ rid = vector + 1;
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, em_msix_que, rx_que, rx_que->me, buf);
+ if (error) {
+ device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
adapter->rx_num_queues = i + 1;
goto fail;
}
- rx_que->msix = vector;
-
+ rx_que->msix = vector;
+
/*
- ** Set the bit to enable interrupt
- ** in E1000_IMS -- bits 20 and 21
- ** are for RX0 and RX1, note this has
- ** NOTHING to do with the MSIX vector
- */
+ * Set the bit to enable interrupt
+ * in E1000_IMS -- bits 20 and 21
+ * are for RX0 and RX1, note this has
+ * NOTHING to do with the MSIX vector
+ */
if (adapter->hw.mac.type == e1000_82574) {
rx_que->eims = 1 << (20 + i);
adapter->ims |= rx_que->eims;
@@ -1883,22 +1921,24 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
else
rx_que->eims = 1 << vector;
}
+ rx_vectors = vector;
- for (i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
+ vector = 0;
+ for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "txq%d", i);
tx_que = &adapter->tx_queues[i];
iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->me, buf);
- tx_que->msix = vector;
+ tx_que->msix = (vector % adapter->tx_num_queues);
- /*
- ** Set the bit to enable interrupt
- ** in E1000_IMS -- bits 22 and 23
- ** are for TX0 and TX1, note this has
- ** NOTHING to do with the MSIX vector
- */
- if (adapter->hw.mac.type < igb_mac_min) {
+ /*
+ * Set the bit to enable interrupt
+ * in E1000_IMS -- bits 22 and 23
+ * are for TX0 and TX1, note this has
+ * NOTHING to do with the MSIX vector
+ */
+ if (adapter->hw.mac.type == e1000_82574) {
tx_que->eims = 1 << (22 + i);
adapter->ims |= tx_que->eims;
adapter->ivars |= (8 | tx_que->msix) << (8 + (i * 4));
@@ -1907,22 +1947,22 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
else
tx_que->eims = 1 << (i % adapter->tx_num_queues);
}
-
+
/* Link interrupt */
- rid = vector + 1;
- error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq");
+ rid = rx_vectors + 1;
+ error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq");
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
goto fail;
}
- adapter->linkvec = vector;
+ adapter->linkvec = rx_vectors;
if (adapter->hw.mac.type < igb_mac_min) {
- adapter->ivars |= (8 | vector) << 16;
+ adapter->ivars |= (8 | rx_vectors) << 16;
adapter->ivars |= 0x80000000;
}
return (0);
- fail:
+fail:
iflib_irq_free(ctx, &adapter->irq);
rx_que = adapter->rx_queues;
for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++)
@@ -1933,10 +1973,10 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
static void
igb_configure_queues(struct adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- struct em_rx_queue *rx_que;
- struct em_tx_queue *tx_que;
- u32 tmp, ivar = 0, newitr = 0;
+ struct e1000_hw *hw = &adapter->hw;
+ struct em_rx_queue *rx_que;
+ struct em_tx_queue *tx_que;
+ u32 tmp, ivar = 0, newitr = 0;
/* First turn on RSS capability */
if (adapter->hw.mac.type != e1000_82575)
@@ -2027,13 +2067,13 @@ igb_configure_queues(struct adapter *adapter)
break;
case e1000_82575:
- /* enable MSI-X support*/
+ /* enable MSI-X support*/
tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
- tmp |= E1000_CTRL_EXT_PBA_CLR;
- /* Auto-Mask interrupts upon ICR read. */
- tmp |= E1000_CTRL_EXT_EIAME;
- tmp |= E1000_CTRL_EXT_IRCA;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
+ tmp |= E1000_CTRL_EXT_PBA_CLR;
+ /* Auto-Mask interrupts upon ICR read. */
+ tmp |= E1000_CTRL_EXT_EIAME;
+ tmp |= E1000_CTRL_EXT_IRCA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
/* Queues */
for (int i = 0; i < adapter->rx_num_queues; i++) {
@@ -2058,10 +2098,10 @@ igb_configure_queues(struct adapter *adapter)
if (em_max_interrupt_rate > 0)
newitr = (4000000 / em_max_interrupt_rate) & 0x7FFC;
- if (hw->mac.type == e1000_82575)
- newitr |= newitr << 16;
- else
- newitr |= E1000_EITR_CNT_IGNR;
+ if (hw->mac.type == e1000_82575)
+ newitr |= newitr << 16;
+ else
+ newitr |= E1000_EITR_CNT_IGNR;
for (int i = 0; i < adapter->rx_num_queues; i++) {
rx_que = &adapter->rx_queues[i];
@@ -2074,9 +2114,9 @@ igb_configure_queues(struct adapter *adapter)
static void
em_free_pci_resources(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- struct em_rx_queue *que = adapter->rx_queues;
- device_t dev = iflib_get_dev(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct em_rx_queue *que = adapter->rx_queues;
+ device_t dev = iflib_get_dev(ctx);
/* Release all msix queue resources */
if (adapter->intr_type == IFLIB_INTR_MSIX)
@@ -2086,7 +2126,6 @@ em_free_pci_resources(if_ctx_t ctx)
iflib_irq_free(ctx, &que->que_irq);
}
-
/* First release all the interrupt resources */
if (adapter->memory != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -2182,12 +2221,12 @@ lem_smartspeed(struct adapter *adapter)
static void
em_reset(if_ctx_t ctx)
{
- device_t dev = iflib_get_dev(ctx);
- struct adapter *adapter = iflib_get_softc(ctx);
- struct ifnet *ifp = iflib_get_ifp(ctx);
- struct e1000_hw *hw = &adapter->hw;
- u16 rx_buffer_size;
- u32 pba;
+ device_t dev = iflib_get_dev(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 rx_buffer_size;
+ u32 pba;
INIT_DEBUGOUT("em_reset: begin");
@@ -2239,7 +2278,7 @@ em_reset(if_ctx_t ctx)
pba = E1000_PBA_26K;
break;
case e1000_82575:
- pba = E1000_PBA_32K;
+ pba = E1000_PBA_32K;
break;
case e1000_82576:
case e1000_vfadapt:
@@ -2264,35 +2303,35 @@ em_reset(if_ctx_t ctx)
pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
}
- /* Special needs in case of Jumbo frames */
- if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
- u32 tx_space, min_tx, min_rx;
- pba = E1000_READ_REG(hw, E1000_PBA);
- tx_space = pba >> 16;
- pba &= 0xffff;
- min_tx = (adapter->hw.mac.max_frame_size +
- sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
- min_tx = roundup2(min_tx, 1024);
- min_tx >>= 10;
- min_rx = adapter->hw.mac.max_frame_size;
- min_rx = roundup2(min_rx, 1024);
- min_rx >>= 10;
- if (tx_space < min_tx &&
- ((min_tx - tx_space) < pba)) {
- pba = pba - (min_tx - tx_space);
- /*
- * if short on rx space, rx wins
- * and must trump tx adjustment
- */
- if (pba < min_rx)
- pba = min_rx;
- }
- E1000_WRITE_REG(hw, E1000_PBA, pba);
- }
+ /* Special needs in case of Jumbo frames */
+ if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
+ u32 tx_space, min_tx, min_rx;
+ pba = E1000_READ_REG(hw, E1000_PBA);
+ tx_space = pba >> 16;
+ pba &= 0xffff;
+ min_tx = (adapter->hw.mac.max_frame_size +
+ sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
+ min_tx = roundup2(min_tx, 1024);
+ min_tx >>= 10;
+ min_rx = adapter->hw.mac.max_frame_size;
+ min_rx = roundup2(min_rx, 1024);
+ min_rx >>= 10;
+ if (tx_space < min_tx &&
+ ((min_tx - tx_space) < pba)) {
+ pba = pba - (min_tx - tx_space);
+ /*
+ * if short on rx space, rx wins
+ * and must trump tx adjustment
+ */
+ if (pba < min_rx)
+ pba = min_rx;
+ }
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+ }
if (hw->mac.type < igb_mac_min)
E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
-
+
INIT_DEBUGOUT1("em_reset: pba=%dK",pba);
/*
@@ -2330,7 +2369,7 @@ em_reset(if_ctx_t ctx)
switch (hw->mac.type) {
case e1000_pchlan:
/* Workaround: no TX flow ctrl for PCH */
- hw->fc.requested_mode = e1000_fc_rx_pause;
+ hw->fc.requested_mode = e1000_fc_rx_pause;
hw->fc.pause_time = 0xFFFF; /* override */
if (if_getmtu(ifp) > ETHERMTU) {
hw->fc.high_water = 0x3500;
@@ -2375,7 +2414,7 @@ em_reset(if_ctx_t ctx)
hw->fc.high_water = 0x2800;
hw->fc.low_water = hw->fc.high_water - 8;
break;
- }
+ }
/* else fall thru */
default:
if (hw->mac.type == e1000_80003es2lan)
@@ -2440,7 +2479,7 @@ em_initialize_rss_mapping(struct adapter *adapter)
E1000_MRQC_RSS_FIELD_IPV6);
}
-
+
static void
igb_initialize_rss_mapping(struct adapter *adapter)
{
@@ -2470,7 +2509,7 @@ igb_initialize_rss_mapping(struct adapter *adapter)
/* Warning FM follows */
reta = 0;
for (i = 0; i < 128; i++) {
-#ifdef RSS
+#ifdef RSS
queue_id = rss_get_indirection_to_bucket(i);
/*
* If we have more queues than buckets, we'll
@@ -2512,7 +2551,7 @@ igb_initialize_rss_mapping(struct adapter *adapter)
*/
mrqc = E1000_MRQC_ENABLE_RSS_8Q;
-#ifdef RSS
+#ifdef RSS
/* XXX ew typecasting */
rss_getkey((uint8_t *) &rss_key);
#else
@@ -2545,11 +2584,11 @@ igb_initialize_rss_mapping(struct adapter *adapter)
static int
em_setup_interface(if_ctx_t ctx)
{
- struct ifnet *ifp = iflib_get_ifp(ctx);
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
uint64_t cap = 0;
-
+
INIT_DEBUGOUT("em_setup_interface: begin");
/* TSO parameters */
@@ -2559,12 +2598,12 @@ em_setup_interface(if_ctx_t ctx)
if_sethwtsomaxsegsize(ifp, EM_TSO_SEG_SIZE);
/* Single Queue */
- if (adapter->tx_num_queues == 1) {
- if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
- if_setsendqready(ifp);
+ if (adapter->tx_num_queues == 1) {
+ if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
+ if_setsendqready(ifp);
}
- cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4;
+ cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4;
cap |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU;
/*
@@ -2575,13 +2614,13 @@ em_setup_interface(if_ctx_t ctx)
if_setcapabilitiesbit(ifp, cap, 0);
/*
- ** Don't turn this on by default, if vlans are
- ** created on another pseudo device (eg. lagg)
- ** then vlan events are not passed thru, breaking
- ** operation, but with HW FILTER off it works. If
- ** using vlans directly on the em driver you can
- ** enable this and get full hardware tag filtering.
- */
+ * Don't turn this on by default, if vlans are
+ * created on another pseudo device (eg. lagg)
+ * then vlan events are not passed thru, breaking
+ * operation, but with HW FILTER off it works. If
+ * using vlans directly on the em driver you can
+ * enable this and get full hardware tag filtering.
+ */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER,0);
/* Enable only WOL MAGIC by default */
@@ -2591,8 +2630,8 @@ em_setup_interface(if_ctx_t ctx)
} else {
if_setcapenablebit(ifp, 0, IFCAP_WOL_MAGIC |
IFCAP_WOL_MCAST| IFCAP_WOL_UCAST);
- }
-
+ }
+
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
@@ -2626,8 +2665,8 @@ em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs
struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
int error = E1000_SUCCESS;
- struct em_tx_queue *que;
- int i;
+ struct em_tx_queue *que;
+ int i, j;
MPASS(adapter->tx_num_queues > 0);
MPASS(adapter->tx_num_queues == ntxqsets);
@@ -2641,39 +2680,39 @@ em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs
}
for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) {
- /* Set up some basics */
- struct tx_ring *txr = &que->txr;
- txr->adapter = que->adapter = adapter;
- txr->que = que;
- que->me = txr->me = i;
-
- /* Allocate transmit buffer memory */
- if (!(txr->tx_buffers = (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "failed to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* get the virtual and physical address of the hardware queues */
- txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs];
- txr->tx_paddr = paddrs[i*ntxqs];
-
+ /* Set up some basics */
+
+ struct tx_ring *txr = &que->txr;
+ txr->adapter = que->adapter = adapter;
+ que->me = txr->me = i;
+
+ /* Allocate report status array */
+ if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ for (j = 0; j < scctx->isc_ntxd[0]; j++)
+ txr->tx_rsq[j] = QIDX_INVALID;
+ /* get the virtual and physical address of the hardware queues */
+ txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs];
+ txr->tx_paddr = paddrs[i*ntxqs];
}
device_printf(iflib_get_dev(ctx), "allocated for %d tx_queues\n", adapter->tx_num_queues);
return (0);
- fail:
- em_if_queues_free(ctx);
+fail:
+ em_if_queues_free(ctx);
return (error);
}
static int
em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
int error = E1000_SUCCESS;
- struct em_rx_queue *que;
- int i;
+ struct em_rx_queue *que;
+ int i;
MPASS(adapter->rx_num_queues > 0);
MPASS(adapter->rx_num_queues == nrxqsets);
@@ -2703,33 +2742,33 @@ em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs
return (0);
fail:
- em_if_queues_free(ctx);
+ em_if_queues_free(ctx);
return (error);
}
static void
em_if_queues_free(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- struct em_tx_queue *tx_que = adapter->tx_queues;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct em_tx_queue *tx_que = adapter->tx_queues;
struct em_rx_queue *rx_que = adapter->rx_queues;
if (tx_que != NULL) {
- for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
- struct tx_ring *txr = &tx_que->txr;
- if (txr->tx_buffers == NULL)
- break;
-
- free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
- }
- free(adapter->tx_queues, M_DEVBUF);
- adapter->tx_queues = NULL;
+ for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+ if (txr->tx_rsq == NULL)
+ break;
+
+ free(txr->tx_rsq, M_DEVBUF);
+ txr->tx_rsq = NULL;
+ }
+ free(adapter->tx_queues, M_DEVBUF);
+ adapter->tx_queues = NULL;
}
if (rx_que != NULL) {
- free(adapter->rx_queues, M_DEVBUF);
- adapter->rx_queues = NULL;
+ free(adapter->rx_queues, M_DEVBUF);
+ adapter->rx_queues = NULL;
}
em_release_hw_control(adapter);
@@ -2747,26 +2786,23 @@ em_if_queues_free(if_ctx_t ctx)
static void
em_initialize_transmit_unit(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
- struct em_tx_queue *que;
+ struct em_tx_queue *que;
struct tx_ring *txr;
struct e1000_hw *hw = &adapter->hw;
u32 tctl, txdctl = 0, tarc, tipg = 0;
- INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
+ INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
u64 bus_addr;
caddr_t offp, endp;
- que = &adapter->tx_queues[i];
+ que = &adapter->tx_queues[i];
txr = &que->txr;
bus_addr = txr->tx_paddr;
- /*Enable all queues */
- em_init_tx_ring(que);
-
/* Clear checksum offload context. */
offp = (caddr_t)&txr->csum_flags;
endp = (caddr_t)(txr + 1);
@@ -2776,9 +2812,9 @@ em_initialize_transmit_unit(if_ctx_t ctx)
E1000_WRITE_REG(hw, E1000_TDLEN(i),
scctx->isc_ntxd[0] * sizeof(struct e1000_tx_desc));
E1000_WRITE_REG(hw, E1000_TDBAH(i),
- (u32)(bus_addr >> 32));
+ (u32)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_TDBAL(i),
- (u32)bus_addr);
+ (u32)bus_addr);
/* Init the HEAD/TAIL indices */
E1000_WRITE_REG(hw, E1000_TDT(i), 0);
E1000_WRITE_REG(hw, E1000_TDH(i), 0);
@@ -2788,14 +2824,14 @@ em_initialize_transmit_unit(if_ctx_t ctx)
E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
txdctl = 0; /* clear txdctl */
- txdctl |= 0x1f; /* PTHRESH */
- txdctl |= 1 << 8; /* HTHRESH */
- txdctl |= 1 << 16;/* WTHRESH */
+ txdctl |= 0x1f; /* PTHRESH */
+ txdctl |= 1 << 8; /* HTHRESH */
+ txdctl |= 1 << 16;/* WTHRESH */
txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
txdctl |= E1000_TXDCTL_GRAN;
- txdctl |= 1 << 25; /* LWTHRESH */
+ txdctl |= 1 << 25; /* LWTHRESH */
- E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
}
/* Set the default values for the Tx Inter Packet Gap timer */
@@ -2887,13 +2923,13 @@ em_initialize_transmit_unit(if_ctx_t ctx)
static void
em_initialize_receive_unit(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
- struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
struct e1000_hw *hw = &adapter->hw;
struct em_rx_queue *que;
- int i;
- u32 rctl, rxcsum, rfctl;
+ int i;
+ u32 rctl, rxcsum, rfctl;
INIT_DEBUGOUT("em_initialize_receive_units: begin");
@@ -2921,13 +2957,13 @@ em_initialize_receive_unit(if_ctx_t ctx)
else
rctl &= ~E1000_RCTL_LPE;
- /* Strip the CRC */
- if (!em_disable_crc_stripping)
+ /* Strip the CRC */
+ if (!em_disable_crc_stripping)
rctl |= E1000_RCTL_SECRC;
if (adapter->hw.mac.type >= e1000_82540) {
E1000_WRITE_REG(&adapter->hw, E1000_RADV,
- adapter->rx_abs_int_delay.value);
+ adapter->rx_abs_int_delay.value);
/*
* Set the interrupt throttling rate. Value is calculated
@@ -2942,9 +2978,9 @@ em_initialize_receive_unit(if_ctx_t ctx)
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
/*
- ** When using MSIX interrupts we need to throttle
- ** using the EITR register (82574 only)
- */
+ * When using MSIX interrupts we need to throttle
+ * using the EITR register (82574 only)
+ */
if (hw->mac.type == e1000_82574) {
for (int i = 0; i < 4; i++)
E1000_WRITE_REG(hw, E1000_EITR_82574(i),
@@ -2959,7 +2995,7 @@ em_initialize_receive_unit(if_ctx_t ctx)
adapter->hw.mac.type >= e1000_82543) {
if (adapter->tx_num_queues > 1) {
if (adapter->hw.mac.type >= igb_mac_min) {
- rxcsum |= E1000_RXCSUM_PCSD;
+ rxcsum |= E1000_RXCSUM_PCSD;
if (hw->mac.type != e1000_82575)
rxcsum |= E1000_RXCSUM_CRCOFL;
} else
@@ -2967,7 +3003,7 @@ em_initialize_receive_unit(if_ctx_t ctx)
E1000_RXCSUM_IPOFL |
E1000_RXCSUM_PCSD;
} else {
- if (adapter->hw.mac.type >= igb_mac_min)
+ if (adapter->hw.mac.type >= igb_mac_min)
rxcsum |= E1000_RXCSUM_IPPCSE;
else
rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL;
@@ -2987,17 +3023,17 @@ em_initialize_receive_unit(if_ctx_t ctx)
}
/*
- ** XXX TEMPORARY WORKAROUND: on some systems with 82573
- ** long latencies are observed, like Lenovo X60. This
- ** change eliminates the problem, but since having positive
- ** values in RDTR is a known source of problems on other
- ** platforms another solution is being sought.
- */
+ * XXX TEMPORARY WORKAROUND: on some systems with 82573
+ * long latencies are observed, like Lenovo X60. This
+ * change eliminates the problem, but since having positive
+ * values in RDTR is a known source of problems on other
+ * platforms another solution is being sought.
+ */
if (hw->mac.type == e1000_82573)
E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
- struct rx_ring *rxr = &que->rxr;
+ struct rx_ring *rxr = &que->rxr;
/* Setup the Base and Length of the Rx Descriptor Ring */
u64 bus_addr = rxr->rx_paddr;
#if 0
@@ -3041,15 +3077,14 @@ em_initialize_receive_unit(if_ctx_t ctx)
if (if_getmtu(ifp) > ETHERMTU) {
/* Set maximum packet len */
- psize = scctx->isc_max_frame_size;
- if (psize <= 4096) {
+ if (adapter->rx_mbuf_sz <= 4096) {
srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
- } else if (psize > 4096) {
+ } else if (adapter->rx_mbuf_sz > 4096) {
srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
}
-
+ psize = scctx->isc_max_frame_size;
/* are we on a vlan? */
if (ifp->if_vlantrunk != NULL)
psize += VLAN_TAG_SIZE;
@@ -3082,7 +3117,6 @@ em_initialize_receive_unit(if_ctx_t ctx)
#else
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
#endif
-
E1000_WRITE_REG(hw, E1000_RDLEN(i),
scctx->isc_nrxd[0] * sizeof(struct e1000_rx_desc));
@@ -3107,8 +3141,8 @@ em_initialize_receive_unit(if_ctx_t ctx)
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
- /* Make sure VLAN Filters are off */
- rctl &= ~E1000_RCTL_VFE;
+ /* Make sure VLAN Filters are off */
+ rctl &= ~E1000_RCTL_VFE;
if (adapter->hw.mac.type < igb_mac_min) {
if (adapter->rx_mbuf_sz == MCLBYTES)
@@ -3131,8 +3165,8 @@ em_initialize_receive_unit(if_ctx_t ctx)
static void
em_if_vlan_register(if_ctx_t ctx, u16 vtag)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- u32 index, bit;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ u32 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -3143,8 +3177,8 @@ em_if_vlan_register(if_ctx_t ctx, u16 vtag)
static void
em_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
{
- struct adapter *adapter = iflib_get_softc(ctx);
- u32 index, bit;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ u32 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -3156,25 +3190,25 @@ static void
em_setup_vlan_hw_support(struct adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 reg;
+ u32 reg;
/*
- ** We get here thru init_locked, meaning
- ** a soft reset, this has already cleared
- ** the VFTA and other state, so if there
- ** have been no vlan's registered do nothing.
- */
+ * We get here thru init_locked, meaning
+ * a soft reset, this has already cleared
+ * the VFTA and other state, so if there
+ * have been no vlan's registered do nothing.
+ */
if (adapter->num_vlans == 0)
- return;
+ return;
/*
- ** A soft reset zero's out the VFTA, so
- ** we need to repopulate it now.
- */
+ * A soft reset zero's out the VFTA, so
+ * we need to repopulate it now.
+ */
for (int i = 0; i < EM_VFTA_SIZE; i++)
- if (adapter->shadow_vfta[i] != 0)
+ if (adapter->shadow_vfta[i] != 0)
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
- i, adapter->shadow_vfta[i]);
+ i, adapter->shadow_vfta[i]);
reg = E1000_READ_REG(hw, E1000_CTRL);
reg |= E1000_CTRL_VME;
@@ -3190,7 +3224,7 @@ em_setup_vlan_hw_support(struct adapter *adapter)
static void
em_if_enable_intr(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
u32 ims_mask = IMS_ENABLE_MASK;
@@ -3212,14 +3246,14 @@ em_if_enable_intr(if_ctx_t ctx)
static void
em_if_disable_intr(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
if (adapter->intr_type == IFLIB_INTR_MSIX) {
if (hw->mac.type >= igb_mac_min)
E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
- }
+ }
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
}
@@ -3240,7 +3274,7 @@ em_init_manageability(struct adapter *adapter)
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
- /* enable receiving management packets to the host */
+ /* enable receiving management packets to the host */
manc |= E1000_MANC_EN_MNG2HOST;
#define E1000_MNG2HOST_PORT_623 (1 << 5)
#define E1000_MNG2HOST_PORT_664 (1 << 6)
@@ -3340,9 +3374,9 @@ em_is_valid_ether_addr(u8 *addr)
static void
em_get_wakeup(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
- u16 eeprom_data = 0, device_id, apme_mask;
+ u16 eeprom_data = 0, device_id, apme_mask;
adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
apme_mask = EM_EEPROM_APME;
@@ -3369,7 +3403,7 @@ em_get_wakeup(if_ctx_t ctx)
case e1000_82573:
case e1000_82583:
adapter->has_amt = TRUE;
- /* Falls thru */
+ /* FALLTHROUGH */
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
@@ -3409,12 +3443,12 @@ em_get_wakeup(if_ctx_t ctx)
if (eeprom_data & apme_mask)
adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
/*
- * We have the eeprom settings, now apply the special cases
- * where the eeprom may be wrong or the board won't support
- * wake on lan on a particular port
+ * We have the eeprom settings, now apply the special cases
+ * where the eeprom may be wrong or the board won't support
+ * wake on lan on a particular port
*/
device_id = pci_get_device(dev);
- switch (device_id) {
+ switch (device_id) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->wol = 0;
break;
@@ -3427,13 +3461,13 @@ em_get_wakeup(if_ctx_t ctx)
adapter->wol = 0;
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- /* if quad port adapter, disable WoL on all but port A */
+ /* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->wol = 0;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
- break;
+ break;
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
@@ -3444,13 +3478,13 @@ em_get_wakeup(if_ctx_t ctx)
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
- /* if quad port adapter, disable WoL on all but port A */
+ /* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->wol = 0;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
- break;
+ break;
}
return;
}
@@ -3462,11 +3496,11 @@ em_get_wakeup(if_ctx_t ctx)
static void
em_enable_wakeup(if_ctx_t ctx)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
if_t ifp = iflib_get_ifp(ctx);
- u32 pmc, ctrl, ctrl_ext, rctl, wuc;
- u16 status;
+ u32 pmc, ctrl, ctrl_ext, rctl, wuc;
+ u16 status;
if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
return;
@@ -3494,9 +3528,9 @@ em_enable_wakeup(if_ctx_t ctx)
}
/*
- ** Determine type of Wakeup: note that wol
- ** is set with all bits on by default.
- */
+ * Determine type of Wakeup: note that wol
+ * is set with all bits on by default.
+ */
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
adapter->wol &= ~E1000_WUFC_MAG;
@@ -3522,20 +3556,20 @@ em_enable_wakeup(if_ctx_t ctx)
if (adapter->hw.phy.type == e1000_phy_igp_3)
e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
- /* Request PME */
- status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
+ /* Request PME */
+ status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if (if_getcapenable(ifp) & IFCAP_WOL)
status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
+ pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
return;
}
/*
-** WOL in the newer chipset interfaces (pchlan)
-** require thing to be copied into the phy
-*/
+ * WOL in the newer chipset interfaces (pchlan)
+ * require thing to be copied into the phy
+ */
static int
em_enable_phy_wakeup(struct adapter *adapter)
{
@@ -3609,7 +3643,7 @@ out:
static void
em_if_led_func(if_ctx_t ctx, int onoff)
{
- struct adapter *adapter = iflib_get_softc(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
if (onoff) {
e1000_setup_led(&adapter->hw);
@@ -3621,22 +3655,22 @@ em_if_led_func(if_ctx_t ctx, int onoff)
}
/*
-** Disable the L0S and L1 LINK states
-*/
+ * Disable the L0S and L1 LINK states
+ */
static void
em_disable_aspm(struct adapter *adapter)
{
- int base, reg;
- u16 link_cap,link_ctrl;
- device_t dev = adapter->dev;
+ int base, reg;
+ u16 link_cap,link_ctrl;
+ device_t dev = adapter->dev;
switch (adapter->hw.mac.type) {
- case e1000_82573:
- case e1000_82574:
- case e1000_82583:
- break;
- default:
- return;
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ break;
+ default:
+ return;
}
if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0)
return;
@@ -3732,17 +3766,17 @@ em_update_stats_counters(struct adapter *adapter)
adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC);
if (adapter->hw.mac.type >= e1000_82543) {
- adapter->stats.algnerrc +=
+ adapter->stats.algnerrc +=
E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
- adapter->stats.rxerrc +=
+ adapter->stats.rxerrc +=
E1000_READ_REG(&adapter->hw, E1000_RXERRC);
- adapter->stats.tncrs +=
+ adapter->stats.tncrs +=
E1000_READ_REG(&adapter->hw, E1000_TNCRS);
- adapter->stats.cexterr +=
+ adapter->stats.cexterr +=
E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
- adapter->stats.tsctc +=
+ adapter->stats.tsctc +=
E1000_READ_REG(&adapter->hw, E1000_TSCTC);
- adapter->stats.tsctfc +=
+ adapter->stats.tsctfc +=
E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
}
}
@@ -3787,10 +3821,10 @@ em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
static void
em_add_hw_stats(struct adapter *adapter)
{
- device_t dev = iflib_get_dev(adapter->ctx);
+ device_t dev = iflib_get_dev(adapter->ctx);
struct em_tx_queue *tx_que = adapter->tx_queues;
- struct em_rx_queue *rx_que = adapter->rx_queues;
-
+ struct em_rx_queue *rx_que = adapter->rx_queues;
+
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
@@ -3801,18 +3835,18 @@ em_add_hw_stats(struct adapter *adapter)
#define QUEUE_NAME_LEN 32
char namebuf[QUEUE_NAME_LEN];
-
+
/* Driver Statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
CTLFLAG_RD, &adapter->dropped_pkts,
"Driver dropped packets");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
CTLFLAG_RD, &adapter->link_irq,
"Link MSIX IRQ Handled");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
CTLFLAG_RD, &adapter->mbuf_defrag_failed,
"Defragmenting mbuf chain failed");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
CTLFLAG_RD, &adapter->no_tx_dma_setup,
"Driver tx dma failure in xmit");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
@@ -3821,7 +3855,6 @@ em_add_hw_stats(struct adapter *adapter)
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
CTLFLAG_RD, &adapter->watchdog_events,
"Watchdog timeouts");
-
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
em_sysctl_reg_handler, "IU",
@@ -3833,48 +3866,48 @@ em_add_hw_stats(struct adapter *adapter)
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
"Flow Control High Watermark");
- SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
"Flow Control Low Watermark");
for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
- struct tx_ring *txr = &tx_que->txr;
+ struct tx_ring *txr = &tx_que->txr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "TX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_TDH(txr->me),
em_sysctl_reg_handler, "IU",
- "Transmit Descriptor Head");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
+ "Transmit Descriptor Head");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_TDT(txr->me),
em_sysctl_reg_handler, "IU",
- "Transmit Descriptor Tail");
+ "Transmit Descriptor Tail");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
CTLFLAG_RD, &txr->tx_irq,
"Queue MSI-X Transmit Interrupts");
- SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &txr->no_desc_avail,
"Queue No Descriptor Available");
}
for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) {
- struct rx_ring *rxr = &rx_que->rxr;
+ struct rx_ring *rxr = &rx_que->rxr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "RX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_RDH(rxr->me),
em_sysctl_reg_handler, "IU",
"Receive Descriptor Head");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_RDT(rxr->me),
em_sysctl_reg_handler, "IU",
@@ -3886,7 +3919,7 @@ em_add_hw_stats(struct adapter *adapter)
/* MAC stats get their own sub node */
- stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
+ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
CTLFLAG_RD, NULL, "Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
@@ -3989,14 +4022,14 @@ em_add_hw_stats(struct adapter *adapter)
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
CTLFLAG_RD, &adapter->stats.prc1522,
"1023-1522 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
- CTLFLAG_RD, &adapter->stats.gorc,
- "Good Octets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
+ CTLFLAG_RD, &adapter->stats.gorc,
+ "Good Octets Received");
/* Packet Transmission Stats */
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
- CTLFLAG_RD, &adapter->stats.gotc,
- "Good Octets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ CTLFLAG_RD, &adapter->stats.gotc,
+ "Good Octets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
CTLFLAG_RD, &adapter->stats.tpt,
"Total Packets Transmitted");
@@ -4112,8 +4145,8 @@ em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
static void
em_print_nvm_info(struct adapter *adapter)
{
- u16 eeprom_data;
- int i, j, row = 0;
+ u16 eeprom_data;
+ int i, j, row = 0;
/* Its a bit crude, but it gets the job done */
printf("\nInterface EEPROM Dump:\n");
@@ -4137,7 +4170,7 @@ em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
u32 regval;
int error, usecs, ticks;
- info = (struct em_int_delay_info *)arg1;
+ info = (struct em_int_delay_info *) arg1;
usecs = info->value;
error = sysctl_handle_int(oidp, &usecs, 0, req);
if (error != 0 || req->newptr == NULL)
@@ -4150,7 +4183,7 @@ em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
ticks *= 4;
adapter = info->adapter;
-
+
regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
regval = (regval & ~0xffff) | (ticks & 0xffff);
/* Handle a few special cases. */
@@ -4196,65 +4229,65 @@ em_set_sysctl_value(struct adapter *adapter, const char *name,
/*
-** Set flow control using sysctl:
-** Flow control values:
-** 0 - off
-** 1 - rx pause
-** 2 - tx pause
-** 3 - full
-*/
+ * Set flow control using sysctl:
+ * Flow control values:
+ * 0 - off
+ * 1 - rx pause
+ * 2 - tx pause
+ * 3 - full
+ */
static int
em_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
- int error;
- static int input = 3; /* default is full */
- struct adapter *adapter = (struct adapter *) arg1;
-
- error = sysctl_handle_int(oidp, &input, 0, req);
-
- if ((error) || (req->newptr == NULL))
- return (error);
-
+{
+ int error;
+ static int input = 3; /* default is full */
+ struct adapter *adapter = (struct adapter *) arg1;
+
+ error = sysctl_handle_int(oidp, &input, 0, req);
+
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
if (input == adapter->fc) /* no change? */
return (error);
- switch (input) {
- case e1000_fc_rx_pause:
- case e1000_fc_tx_pause:
- case e1000_fc_full:
- case e1000_fc_none:
- adapter->hw.fc.requested_mode = input;
- adapter->fc = input;
- break;
- default:
- /* Do nothing */
- return (error);
- }
+ switch (input) {
+ case e1000_fc_rx_pause:
+ case e1000_fc_tx_pause:
+ case e1000_fc_full:
+ case e1000_fc_none:
+ adapter->hw.fc.requested_mode = input;
+ adapter->fc = input;
+ break;
+ default:
+ /* Do nothing */
+ return (error);
+ }
- adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
- e1000_force_mac_fc(&adapter->hw);
- return (error);
+ adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
+ e1000_force_mac_fc(&adapter->hw);
+ return (error);
}
/*
-** Manage Energy Efficient Ethernet:
-** Control values:
-** 0/1 - enabled/disabled
-*/
+ * Manage Energy Efficient Ethernet:
+ * Control values:
+ * 0/1 - enabled/disabled
+ */
static int
em_sysctl_eee(SYSCTL_HANDLER_ARGS)
{
- struct adapter *adapter = (struct adapter *) arg1;
- int error, value;
+ struct adapter *adapter = (struct adapter *) arg1;
+ int error, value;
- value = adapter->hw.dev_spec.ich8lan.eee_disable;
- error = sysctl_handle_int(oidp, &value, 0, req);
- if (error || req->newptr == NULL)
- return (error);
- adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0);
- em_if_init(adapter->ctx);
+ value = adapter->hw.dev_spec.ich8lan.eee_disable;
+ error = sysctl_handle_int(oidp, &value, 0, req);
+ if (error || req->newptr == NULL)
+ return (error);
+ adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0);
+ em_if_init(adapter->ctx);
- return (0);
+ return (0);
}
static int
@@ -4271,17 +4304,40 @@ em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
return (error);
if (result == 1) {
- adapter = (struct adapter *)arg1;
+ adapter = (struct adapter *) arg1;
em_print_debug_info(adapter);
}
return (error);
}
+static int
+em_get_rs(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *adapter = (struct adapter *) arg1;
+ int error;
+ int result;
+
+ result = 0;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr || result != 1)
+ return (error);
+ em_dump_rs(adapter);
+
+ return (error);
+}
+
+static void
+em_if_debug(if_ctx_t ctx)
+{
+ em_dump_rs(iflib_get_softc(ctx));
+}
+
/*
-** This routine is meant to be fluid, add whatever is
-** needed for debugging a problem. -jfv
-*/
+ * This routine is meant to be fluid, add whatever is
+ * needed for debugging a problem. -jfv
+ */
static void
em_print_debug_info(struct adapter *adapter)
{
@@ -4302,19 +4358,18 @@ em_print_debug_info(struct adapter *adapter)
for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
device_printf(dev, "TX Queue %d ------\n", i);
device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
- E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
- E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
+ E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
+ E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
}
for (int j=0; j < adapter->rx_num_queues; j++, rxr++) {
device_printf(dev, "RX Queue %d ------\n", j);
device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
- E1000_READ_REG(&adapter->hw, E1000_RDH(j)),
- E1000_READ_REG(&adapter->hw, E1000_RDT(j)));
+ E1000_READ_REG(&adapter->hw, E1000_RDH(j)),
+ E1000_READ_REG(&adapter->hw, E1000_RDT(j)));
}
}
-
/*
* 82574 only:
* Write a new value to the EEPROM increasing the number of MSIX
@@ -4345,7 +4400,7 @@ em_enable_vectors_82574(if_ctx_t ctx)
#ifdef DDB
DB_COMMAND(em_reset_dev, em_ddb_reset_dev)
{
- devclass_t dc;
+ devclass_t dc;
int max_em;
dc = devclass_find("em");
@@ -4362,7 +4417,7 @@ DB_COMMAND(em_reset_dev, em_ddb_reset_dev)
}
DB_COMMAND(em_dump_queue, em_ddb_dump_queue)
{
- devclass_t dc;
+ devclass_t dc;
int max_em;
dc = devclass_find("em");
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index cf9d13cbf3e0..d11cb0af6a11 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -356,14 +356,14 @@ struct em_int_delay_info {
*/
struct tx_ring {
struct adapter *adapter;
- struct em_tx_queue *que;
- u32 me;
- int busy;
struct e1000_tx_desc *tx_base;
uint64_t tx_paddr;
- struct em_txbuffer *tx_buffers;
- u32 tx_tso; /* last tx was tso */
-
+ qidx_t *tx_rsq;
+ bool tx_tso; /* last tx was tso */
+ uint8_t me;
+ qidx_t tx_rs_cidx;
+ qidx_t tx_rs_pidx;
+ qidx_t tx_cidx_processed;
/* Interrupt resources */
void *tag;
struct resource *res;
@@ -532,10 +532,7 @@ typedef struct _em_vendor_info_t {
unsigned int index;
} em_vendor_info_t;
-struct em_txbuffer {
- int eop;
-};
-
+void em_dump_rs(struct adapter *);
#define EM_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF)
diff --git a/sys/dev/e1000/igb_txrx.c b/sys/dev/e1000/igb_txrx.c
index 039269caad95..80010369d290 100644
--- a/sys/dev/e1000/igb_txrx.c
+++ b/sys/dev/e1000/igb_txrx.c
@@ -27,7 +27,7 @@
/* $FreeBSD$ */
#include "if_em.h"
-#ifdef RSS
+#ifdef RSS
#include <net/rss_config.h>
#include <netinet/in_rss.h>
#endif
@@ -42,14 +42,14 @@
* Local Function prototypes
*********************************************************************/
static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi);
-static void igb_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
-static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx, bool clear);
-
-static void igb_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
- uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buf_len __unused);
-static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx);
-static int igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
- int budget);
+static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
+static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
+
+static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru);
+
+static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx);
+static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget);
+
static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status);
@@ -61,7 +61,7 @@ static int igb_determine_rsstype(u16 pkt_info);
extern void igb_if_enable_intr(if_ctx_t ctx);
extern int em_intr(void *arg);
-struct if_txrx igb_txrx = {
+struct if_txrx igb_txrx = {
igb_isc_txd_encap,
igb_isc_txd_flush,
igb_isc_txd_credits_update,
@@ -84,34 +84,34 @@ static int
igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status)
{
struct e1000_adv_tx_context_desc *TXD;
- struct adapter *adapter = txr->adapter;
- u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
- u32 paylen;
-
- switch(pi->ipi_etype) {
- case ETHERTYPE_IPV6:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
- break;
- case ETHERTYPE_IP:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
- /* Tell transmit desc to also do IPv4 checksum. */
- *olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
- break;
- default:
- panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
- __func__, ntohs(pi->ipi_etype));
- break;
- }
-
- TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
-
- /* This is used in the transmit desc in encap */
- paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
-
- /* VLAN MACLEN IPLEN */
+ struct adapter *adapter = txr->adapter;
+ u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 paylen;
+
+ switch(pi->ipi_etype) {
+ case ETHERTYPE_IPV6:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
+ break;
+ case ETHERTYPE_IP:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+ /* Tell transmit desc to also do IPv4 checksum. */
+ *olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+ break;
+ default:
+ panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
+ __func__, ntohs(pi->ipi_etype));
+ break;
+ }
+
+ TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
+
+ /* This is used in the transmit desc in encap */
+ paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
+
+ /* VLAN MACLEN IPLEN */
if (pi->ipi_mflags & M_VLANTAG) {
- vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
+ vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
}
vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
@@ -132,11 +132,11 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *oli
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
TXD->seqnum_seed = htole32(0);
- *cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+ *cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
*olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
-
- return (1);
+
+ return (1);
}
/*********************************************************************
@@ -147,29 +147,29 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *oli
static int
igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status)
{
- struct e1000_adv_tx_context_desc *TXD;
+ struct e1000_adv_tx_context_desc *TXD;
struct adapter *adapter = txr->adapter;
- u32 vlan_macip_lens, type_tucmd_mlhl;
+ u32 vlan_macip_lens, type_tucmd_mlhl;
u32 mss_l4len_idx;
mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
int offload = TRUE;
- /* First check if TSO is to be used */
+ /* First check if TSO is to be used */
if (pi->ipi_csum_flags & CSUM_TSO)
return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status));
- /* Indicate the whole packet as payload when not doing TSO */
- *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
+ /* Indicate the whole packet as payload when not doing TSO */
+ *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
/* Now ready a context descriptor */
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
- /*
+ /*
** In advanced descriptors the vlan tag must
** be placed into the context descriptor. Hence
** we need to make one even if not doing offloads.
*/
- if (pi->ipi_mflags & M_VLANTAG) {
+ if (pi->ipi_mflags & M_VLANTAG) {
vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
} else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) {
return (0);
@@ -179,108 +179,92 @@ igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *
vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
switch(pi->ipi_etype) {
- case ETHERTYPE_IP:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
- break;
- case ETHERTYPE_IPV6:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
- break;
- default:
- offload = FALSE;
- break;
+ case ETHERTYPE_IP:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+ break;
+ case ETHERTYPE_IPV6:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
+ break;
+ default:
+ offload = FALSE;
+ break;
}
-
- vlan_macip_lens |= pi->ipi_ip_hlen;
+
+ vlan_macip_lens |= pi->ipi_ip_hlen;
type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
switch (pi->ipi_ipproto) {
- case IPPROTO_TCP:
- #if __FreeBSD_version >= 1000000
- if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
-#else
- if (pi->ipi_csum_flags & CSUM_TCP)
-#endif
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
- break;
- case IPPROTO_UDP:
-#if __FreeBSD_version >= 1000000
- if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
-#else
- if (pi->ipi_csum_flags & CSUM_UDP)
-#endif
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
- break;
-
-#if __FreeBSD_version >= 800000
- case IPPROTO_SCTP:
-#if __FreeBSD_version >= 1000000
- if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
-#else
- if (pi->ipi_csum_flags & CSUM_SCTP)
-#endif
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
- break;
-#endif
- default:
- offload = FALSE;
- break;
+ case IPPROTO_TCP:
+ if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_UDP:
+ if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
+ break;
+ case IPPROTO_SCTP:
+ if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+ default:
+ offload = FALSE;
+ break;
}
if (offload) /* For the TX descriptor setup */
- *olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ *olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
/* 82575 needs the queue index added */
if (adapter->hw.mac.type == e1000_82575)
mss_l4len_idx = txr->me << 4;
-
+
/* Now copy bits into descriptor */
TXD->vlan_macip_lens = htole32(vlan_macip_lens);
TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
TXD->seqnum_seed = htole32(0);
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
+
return (1);
}
static int
igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
- struct adapter *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
- struct tx_ring *txr = &que->txr;
- int nsegs = pi->ipi_nsegs;
- bus_dma_segment_t *segs = pi->ipi_segs;
- struct em_txbuffer *txbuf;
- union e1000_adv_tx_desc *txd = NULL;
-
- int i, j, first, pidx_last;
- u32 olinfo_status, cmd_type_len;
+ struct adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ union e1000_adv_tx_desc *txd = NULL;
+ int i, j, first, pidx_last;
+ u32 olinfo_status, cmd_type_len, txd_flags;
+ qidx_t ntxd;
pidx_last = olinfo_status = 0;
/* Basic descriptor defines */
cmd_type_len = (E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
-
+ E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
+
if (pi->ipi_mflags & M_VLANTAG)
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
first = i = pi->ipi_pidx;
-
+ ntxd = scctx->isc_ntxd[0];
+ txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0;
/* Consume the first descriptor */
- i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
- if (i == scctx->isc_ntxd[0])
+ i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
+ if (i == scctx->isc_ntxd[0])
i = 0;
-
+
/* 82575 needs the queue index added */
if (sc->hw.mac.type == e1000_82575)
olinfo_status |= txr->me << 4;
-
+
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
bus_addr_t segaddr;
- txbuf = &txr->tx_buffers[i];
txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
seglen = segs[j].ds_len;
segaddr = htole64(segs[j].ds_addr);
@@ -294,108 +278,91 @@ igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
i = 0;
}
}
-
- txd->read.cmd_type_len |=
- htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
-
- /* Set the EOP descriptor that will be marked done */
- txbuf = &txr->tx_buffers[first];
- txbuf->eop = pidx_last;
+ if (txd_flags) {
+ txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
+ txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
+ MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
+ }
+ txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags);
pi->ipi_new_pidx = i;
-
+
return (0);
}
static void
-igb_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
+igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
- struct adapter *adapter = arg;
- struct em_tx_queue *que = &adapter->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
-
- E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx);
+ struct adapter *adapter = arg;
+ struct em_tx_queue *que = &adapter->tx_queues[txqid];
+ struct tx_ring *txr = &que->txr;
+
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx);
}
static int
-igb_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear)
+igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
- struct adapter *adapter = arg;
- if_softc_ctx_t scctx = adapter->shared;
+ struct adapter *adapter = arg;
+ if_softc_ctx_t scctx = adapter->shared;
struct em_tx_queue *que = &adapter->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
+ struct tx_ring *txr = &que->txr;
- u32 cidx, ntxd, processed = 0;
+ qidx_t processed = 0;
+ int updated;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ uint8_t status;
- struct em_txbuffer *buf;
- union e1000_adv_tx_desc *txd, *eop;
- int limit;
-
- cidx = cidx_init;
+ rs_cidx = txr->tx_rs_cidx;
+ if (rs_cidx == txr->tx_rs_pidx)
+ return (0);
+ cur = txr->tx_rsq[rs_cidx];
+ status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
+ updated = !!(status & E1000_TXD_STAT_DD);
- buf = &txr->tx_buffers[cidx];
- txd = (union e1000_adv_tx_desc *)&txr->tx_base[cidx];
- ntxd = scctx->isc_ntxd[0];
- limit = adapter->tx_process_limit;
+ if (!clear || !updated)
+ return (updated);
+ prev = txr->tx_cidx_processed;
+ ntxd = scctx->isc_ntxd[0];
do {
- if (buf->eop == -1) /* No work */
+ delta = (int32_t)cur - (int32_t)prev;
+ MPASS(prev == 0 || delta != 0);
+ if (delta < 0)
+ delta += ntxd;
+
+ processed += delta;
+ prev = cur;
+ rs_cidx = (rs_cidx + 1) & (ntxd-1);
+ if (rs_cidx == txr->tx_rs_pidx)
break;
+ cur = txr->tx_rsq[rs_cidx];
+ status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
+ } while ((status & E1000_TXD_STAT_DD));
- eop = (union e1000_adv_tx_desc *)&txr->tx_base[buf->eop];
- if ((eop->wb.status & E1000_TXD_STAT_DD) == 0)
- break; /* I/O not complete */
-
- if (clear)
- buf->eop = -1; /* clear indicate processed */
-
- /* We clean the range if multi segment */
- while (txd != eop) {
- ++txd;
- ++buf;
- /* wrap the ring? */
- if (++cidx == scctx->isc_ntxd[0]) {
- cidx = 0;
- buf = txr->tx_buffers;
- txd = (union e1000_adv_tx_desc *)txr->tx_base;
- }
-
- buf = &txr->tx_buffers[cidx];
- if (clear)
- buf->eop = -1;
- processed++;
- }
- processed++;
-
- /* Try the next packet */
- txd++;
- buf++;
-
- /* reset with a wrap */
- if (++cidx == scctx->isc_ntxd[0]) {
- cidx = 0;
- buf = txr->tx_buffers;
- txd = (union e1000_adv_tx_desc *)txr->tx_base;
- }
- prefetch(txd);
- prefetch(txd+1);
- } while (__predict_true(--limit) && cidx != cidx_init);
-
+ txr->tx_rs_cidx = rs_cidx;
+ txr->tx_cidx_processed = prev;
return (processed);
}
static void
-igb_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
- uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused,
- uint16_t count, uint16_t buf_len __unused)
+igb_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
- struct adapter *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct em_rx_queue *que = &sc->rx_queues[rxqid];
+ struct adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ uint16_t rxqid = iru->iru_qsidx;
+ struct em_rx_queue *que = &sc->rx_queues[rxqid];
union e1000_adv_rx_desc *rxd;
- struct rx_ring *rxr = &que->rxr;
- int i;
- uint32_t next_pidx;
+ struct rx_ring *rxr = &que->rxr;
+ uint64_t *paddrs;
+ uint32_t next_pidx, pidx;
+ uint16_t count;
+ int i;
+
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
for (i = 0, next_pidx = pidx; i < count; i++) {
rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx];
@@ -407,33 +374,39 @@ igb_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
}
static void
-igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
+igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
{
- struct adapter *sc = arg;
- struct em_rx_queue *que = &sc->rx_queues[rxqid];
- struct rx_ring *rxr = &que->rxr;
+ struct adapter *sc = arg;
+ struct em_rx_queue *que = &sc->rx_queues[rxqid];
+ struct rx_ring *rxr = &que->rxr;
E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
}
static int
-igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
+igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
{
- struct adapter *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct em_rx_queue *que = &sc->rx_queues[rxqid];
- struct rx_ring *rxr = &que->rxr;
+ struct adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct em_rx_queue *que = &sc->rx_queues[rxqid];
+ struct rx_ring *rxr = &que->rxr;
union e1000_adv_rx_desc *rxd;
- u32 staterr = 0;
- int cnt, i, iter;
+ u32 staterr = 0;
+ int cnt, i, iter;
+
+ if (budget == 1) {
+ rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[idx];
+ staterr = le32toh(rxd->wb.upper.status_error);
+ return (staterr & E1000_RXD_STAT_DD);
+ }
for (iter = cnt = 0, i = idx; iter < scctx->isc_nrxd[0] && iter <= budget;) {
rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i];
- staterr = le32toh(rxd->wb.upper.status_error);
-
+ staterr = le32toh(rxd->wb.upper.status_error);
+
if ((staterr & E1000_RXD_STAT_DD) == 0)
break;
-
+
if (++i == scctx->isc_nrxd[0]) {
i = 0;
}
@@ -442,13 +415,6 @@ igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
cnt++;
iter++;
}
- {
- struct e1000_hw *hw = &sc->hw;
- int rdt, rdh;
- rdt = E1000_READ_REG(hw, E1000_RDT(rxr->me));
- rdh = E1000_READ_REG(hw, E1000_RDH(rxr->me));
- DPRINTF(iflib_get_dev(sc->ctx), "sidx:%d eidx:%d iter=%d pktcnt=%d RDT=%d RDH=%d\n", idx, i, iter, cnt, rdt, rdh);
- }
return (cnt);
}
@@ -462,39 +428,39 @@ igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
static int
igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct adapter *adapter = arg;
- if_softc_ctx_t scctx = adapter->shared;
- struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
- struct rx_ring *rxr = &que->rxr;
- struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
- union e1000_adv_rx_desc *rxd;
-
- u16 pkt_info, len;
- u16 vtag = 0;
- u32 ptype;
- u32 staterr = 0;
- bool eop;
- int i = 0;
- int cidx = ri->iri_cidx;
+ struct adapter *adapter = arg;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
+ union e1000_adv_rx_desc *rxd;
+
+ u16 pkt_info, len;
+ u16 vtag = 0;
+ u32 ptype;
+ u32 staterr = 0;
+ bool eop;
+ int i = 0;
+ int cidx = ri->iri_cidx;
do {
rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx];
staterr = le32toh(rxd->wb.upper.status_error);
pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
-
+
MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
len = le16toh(rxd->wb.upper.length);
ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
ri->iri_len += len;
- rxr->rx_bytes += ri->iri_len;
+ rxr->rx_bytes += ri->iri_len;
rxd->wb.upper.status_error = 0;
eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
if (((adapter->hw.mac.type == e1000_i350) ||
- (adapter->hw.mac.type == e1000_i354)) &&
+ (adapter->hw.mac.type == e1000_i354)) &&
(staterr & E1000_RXDEXT_STATERR_LB))
vtag = be16toh(rxd->wb.upper.vlan);
else
@@ -509,25 +475,25 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
ri->iri_frags[i].irf_flid = 0;
ri->iri_frags[i].irf_idx = cidx;
ri->iri_frags[i].irf_len = len;
-
+
if (++cidx == scctx->isc_nrxd[0])
cidx = 0;
-#ifdef notyet
+#ifdef notyet
if (rxr->hdr_split == TRUE) {
ri->iri_frags[i].irf_flid = 1;
- ri->iri_frags[i].irf_idx = cidx;
+ ri->iri_frags[i].irf_idx = cidx;
if (++cidx == scctx->isc_nrxd[0])
cidx = 0;
}
-#endif
+#endif
i++;
} while (!eop);
-
+
rxr->rx_packets++;
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
igb_rx_checksum(staterr, ri, ptype);
-
+
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
(staterr & E1000_RXD_STAT_VP) != 0) {
ri->iri_vtag = vtag;
@@ -538,7 +504,7 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
ri->iri_rsstype = igb_determine_rsstype(pkt_info);
ri->iri_nfrags = i;
- return (0);
+ return (0);
}
/*********************************************************************
@@ -552,8 +518,8 @@ static void
igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
{
u16 status = (u16)staterr;
- u8 errors = (u8) (staterr >> 24);
- bool sctp = FALSE;
+ u8 errors = (u8) (staterr >> 24);
+ bool sctp = FALSE;
/* Ignore Checksum bit is set */
if (status & E1000_RXD_STAT_IXSM) {
@@ -579,10 +545,8 @@ igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
-#if __FreeBSD_version >= 800000
if (sctp) /* reassign */
type = CSUM_SCTP_VALID;
-#endif
/* Did it pass? */
if (!(errors & E1000_RXD_ERR_TCPE)) {
ri->iri_csum_flags |= type;
@@ -598,10 +562,10 @@ igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
* Parse the packet type to determine the appropriate hash
*
******************************************************************/
-static int
-igb_determine_rsstype(u16 pkt_info)
+static int
+igb_determine_rsstype(u16 pkt_info)
{
- switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
+ switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
case E1000_RXDADV_RSSTYPE_IPV4_TCP:
return M_HASHTYPE_RSS_TCP_IPV4;
case E1000_RXDADV_RSSTYPE_IPV4:
diff --git a/sys/net/ifdi_if.m b/sys/net/ifdi_if.m
index c2ff904039f3..81c9ba1fe443 100644
--- a/sys/net/ifdi_if.m
+++ b/sys/net/ifdi_if.m
@@ -195,7 +195,12 @@ METHOD void intr_disable {
if_ctx_t _ctx;
};
-METHOD int queue_intr_enable {
+METHOD int rx_queue_intr_enable {
+ if_ctx_t _ctx;
+ uint16_t _qid;
+} DEFAULT null_queue_intr_enable;
+
+METHOD int tx_queue_intr_enable {
if_ctx_t _ctx;
uint16_t _qid;
} DEFAULT null_queue_intr_enable;
@@ -333,4 +338,6 @@ METHOD int sysctl_int_delay {
if_int_delay_info_t _iidi;
} DEFAULT null_sysctl_int_delay;
-
+METHOD void debug {
+ if_ctx_t _ctx;
+} DEFAULT null_void_op;
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index 7c3fb3f3f7d3..87ad4a309834 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -94,7 +94,8 @@ __FBSDID("$FreeBSD$");
#endif
/*
- * enable accounting of every mbuf as it comes in to and goes out of iflib's software descriptor references
+ * enable accounting of every mbuf as it comes in to and goes out of
+ * iflib's software descriptor references
*/
#define MEMORY_LOGGING 0
/*
@@ -139,7 +140,7 @@ typedef struct iflib_filter_info {
driver_filter_t *ifi_filter;
void *ifi_filter_arg;
struct grouptask *ifi_task;
- struct iflib_ctx *ifi_ctx;
+ void *ifi_ctx;
} *iflib_filter_info_t;
struct iflib_ctx {
@@ -185,8 +186,8 @@ struct iflib_ctx {
uint16_t ifc_sysctl_nrxqs;
uint16_t ifc_sysctl_qs_eq_override;
- uint16_t ifc_sysctl_ntxds[8];
- uint16_t ifc_sysctl_nrxds[8];
+ qidx_t ifc_sysctl_ntxds[8];
+ qidx_t ifc_sysctl_nrxds[8];
struct if_txrx ifc_txrx;
#define isc_txd_encap ifc_txrx.ift_txd_encap
#define isc_txd_flush ifc_txrx.ift_txd_flush
@@ -254,7 +255,9 @@ iflib_get_sctx(if_ctx_t ctx)
return (ctx->ifc_sctx);
}
+#define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
#define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
+#define CACHE_PTR_NEXT(ptr) ((void *)(((vm_paddr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
#define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
#define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
@@ -275,47 +278,56 @@ typedef struct iflib_sw_tx_desc_array {
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
struct mbuf **ifsd_m; /* pkthdr mbufs */
uint8_t *ifsd_flags;
-} iflib_txsd_array_t;
+} if_txsd_vec_t;
/* magic number that should be high enough for any hardware */
#define IFLIB_MAX_TX_SEGS 128
#define IFLIB_MAX_RX_SEGS 32
-#define IFLIB_RX_COPY_THRESH 63
+#define IFLIB_RX_COPY_THRESH 128
#define IFLIB_MAX_RX_REFRESH 32
+/* The minimum descriptors per second before we start coalescing */
+#define IFLIB_MIN_DESC_SEC 16384
+#define IFLIB_DEFAULT_TX_UPDATE_FREQ 16
#define IFLIB_QUEUE_IDLE 0
#define IFLIB_QUEUE_HUNG 1
#define IFLIB_QUEUE_WORKING 2
+/* maximum number of txqs that can share an rx interrupt */
+#define IFLIB_MAX_TX_SHARED_INTR 4
-/* this should really scale with ring size - 32 is a fairly arbitrary value for this */
-#define TX_BATCH_SIZE 16
+/* this should really scale with ring size - this is a fairly arbitrary value */
+#define TX_BATCH_SIZE 32
#define IFLIB_RESTART_BUDGET 8
-#define IFC_LEGACY 0x01
-#define IFC_QFLUSH 0x02
-#define IFC_MULTISEG 0x04
-#define IFC_DMAR 0x08
-#define IFC_SC_ALLOCATED 0x10
-#define IFC_INIT_DONE 0x20
-
+#define IFC_LEGACY 0x001
+#define IFC_QFLUSH 0x002
+#define IFC_MULTISEG 0x004
+#define IFC_DMAR 0x008
+#define IFC_SC_ALLOCATED 0x010
+#define IFC_INIT_DONE 0x020
+#define IFC_PREFETCH 0x040
+#define IFC_DO_RESET 0x080
+#define IFC_CHECK_HUNG 0x100
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
struct iflib_txq {
- uint16_t ift_in_use;
- uint16_t ift_cidx;
- uint16_t ift_cidx_processed;
- uint16_t ift_pidx;
+ qidx_t ift_in_use;
+ qidx_t ift_cidx;
+ qidx_t ift_cidx_processed;
+ qidx_t ift_pidx;
uint8_t ift_gen;
- uint8_t ift_db_pending;
- uint8_t ift_db_pending_queued;
- uint8_t ift_npending;
uint8_t ift_br_offset;
+ uint16_t ift_npending;
+ uint16_t ift_db_pending;
+ uint16_t ift_rs_pending;
/* implicit pad */
+ uint8_t ift_txd_size[8];
uint64_t ift_processed;
uint64_t ift_cleaned;
+ uint64_t ift_cleaned_prev;
#if MEMORY_LOGGING
uint64_t ift_enqueued;
uint64_t ift_dequeued;
@@ -333,19 +345,16 @@ struct iflib_txq {
/* constant values */
if_ctx_t ift_ctx;
- struct ifmp_ring **ift_br;
+ struct ifmp_ring *ift_br;
struct grouptask ift_task;
- uint16_t ift_size;
+ qidx_t ift_size;
uint16_t ift_id;
struct callout ift_timer;
- struct callout ift_db_check;
-
- iflib_txsd_array_t ift_sds;
- uint8_t ift_nbr;
- uint8_t ift_qstatus;
- uint8_t ift_active;
- uint8_t ift_closed;
- int ift_watchdog_time;
+
+ if_txsd_vec_t ift_sds;
+ uint8_t ift_qstatus;
+ uint8_t ift_closed;
+ uint8_t ift_update_freq;
struct iflib_filter_info ift_filter_info;
bus_dma_tag_t ift_desc_tag;
bus_dma_tag_t ift_tso_desc_tag;
@@ -360,10 +369,11 @@ struct iflib_txq {
} __aligned(CACHE_LINE_SIZE);
struct iflib_fl {
- uint16_t ifl_cidx;
- uint16_t ifl_pidx;
- uint16_t ifl_credits;
+ qidx_t ifl_cidx;
+ qidx_t ifl_pidx;
+ qidx_t ifl_credits;
uint8_t ifl_gen;
+ uint8_t ifl_rxd_size;
#if MEMORY_LOGGING
uint64_t ifl_m_enqueued;
uint64_t ifl_m_dequeued;
@@ -373,7 +383,7 @@ struct iflib_fl {
/* implicit pad */
/* constant */
- uint16_t ifl_size;
+ qidx_t ifl_size;
uint16_t ifl_buf_size;
uint16_t ifl_cltype;
uma_zone_t ifl_zone;
@@ -384,12 +394,13 @@ struct iflib_fl {
iflib_dma_info_t ifl_ifdi;
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
+ qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
} __aligned(CACHE_LINE_SIZE);
-static inline int
-get_inuse(int size, int cidx, int pidx, int gen)
+static inline qidx_t
+get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
{
- int used;
+ qidx_t used;
if (pidx > cidx)
used = pidx - cidx;
@@ -415,9 +426,9 @@ struct iflib_rxq {
* these are the cq cidx and pidx. Otherwise
* these are unused.
*/
- uint16_t ifr_size;
- uint16_t ifr_cq_cidx;
- uint16_t ifr_cq_pidx;
+ qidx_t ifr_size;
+ qidx_t ifr_cq_cidx;
+ qidx_t ifr_cq_pidx;
uint8_t ifr_cq_gen;
uint8_t ifr_fl_offset;
@@ -427,10 +438,13 @@ struct iflib_rxq {
uint16_t ifr_id;
uint8_t ifr_lro_enabled;
uint8_t ifr_nfl;
+ uint8_t ifr_ntxqirq;
+ uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
struct lro_ctrl ifr_lc;
struct grouptask ifr_task;
struct iflib_filter_info ifr_filter_info;
iflib_dma_info_t ifr_ifdi;
+
/* dynamically allocate if any drivers need a value substantially larger than this */
struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
@@ -438,6 +452,69 @@ struct iflib_rxq {
#endif
} __aligned(CACHE_LINE_SIZE);
+typedef struct if_rxsd {
+ caddr_t *ifsd_cl;
+ struct mbuf **ifsd_m;
+ iflib_fl_t ifsd_fl;
+ qidx_t ifsd_cidx;
+} *if_rxsd_t;
+
+/* multiple of word size */
+#ifdef __LP64__
+#define PKT_INFO_SIZE 6
+#define RXD_INFO_SIZE 5
+#define PKT_TYPE uint64_t
+#else
+#define PKT_INFO_SIZE 11
+#define RXD_INFO_SIZE 8
+#define PKT_TYPE uint32_t
+#endif
+#define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3)
+#define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4)
+
+typedef struct if_pkt_info_pad {
+ PKT_TYPE pkt_val[PKT_INFO_SIZE];
+} *if_pkt_info_pad_t;
+typedef struct if_rxd_info_pad {
+ PKT_TYPE rxd_val[RXD_INFO_SIZE];
+} *if_rxd_info_pad_t;
+
+CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
+CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
+
+
+static inline void
+pkt_info_zero(if_pkt_info_t pi)
+{
+ if_pkt_info_pad_t pi_pad;
+
+ pi_pad = (if_pkt_info_pad_t)pi;
+ pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
+ pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
+#ifndef __LP64__
+ pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
+ pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
+#endif
+}
+
+static inline void
+rxd_info_zero(if_rxd_info_t ri)
+{
+ if_rxd_info_pad_t ri_pad;
+ int i;
+
+ ri_pad = (if_rxd_info_pad_t)ri;
+ for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
+ ri_pad->rxd_val[i] = 0;
+ ri_pad->rxd_val[i+1] = 0;
+ ri_pad->rxd_val[i+2] = 0;
+ ri_pad->rxd_val[i+3] = 0;
+ }
+#ifdef __LP64__
+ ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
+#endif
+}
+
/*
* Only allow a single packet to take up most 1/nth of the tx ring
*/
@@ -455,12 +532,6 @@ static int enable_msix = 1;
#define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx)
-#define TXDB_LOCK_INIT(txq) mtx_init(&(txq)->ift_db_mtx, (txq)->ift_db_mtx_name, NULL, MTX_DEF)
-#define TXDB_TRYLOCK(txq) mtx_trylock(&(txq)->ift_db_mtx)
-#define TXDB_LOCK(txq) mtx_lock(&(txq)->ift_db_mtx)
-#define TXDB_UNLOCK(txq) mtx_unlock(&(txq)->ift_db_mtx)
-#define TXDB_LOCK_DESTROY(txq) mtx_destroy(&(txq)->ift_db_mtx)
-
#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
@@ -480,6 +551,7 @@ MODULE_VERSION(iflib, 1);
MODULE_DEPEND(iflib, pci, 1, 1, 1);
MODULE_DEPEND(iflib, ether, 1, 1, 1);
+TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
#ifndef IFLIB_DEBUG_COUNTERS
@@ -497,9 +569,11 @@ static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
* XXX need to ensure that this can't accidentally cause the head to be moved backwards
*/
static int iflib_min_tx_latency = 0;
-
SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
&iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
+static int iflib_no_tx_batch = 0;
+SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
+ &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
#if IFLIB_DEBUG_COUNTERS
@@ -621,7 +695,7 @@ static void iflib_tx_structures_free(if_ctx_t ctx);
static void iflib_rx_structures_free(if_ctx_t ctx);
static int iflib_queues_alloc(if_ctx_t ctx);
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
-static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget);
+static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
static int iflib_qset_structures_setup(if_ctx_t ctx);
static int iflib_msix_init(if_ctx_t ctx);
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str);
@@ -633,6 +707,11 @@ static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
static void iflib_add_device_sysctl_post(if_ctx_t ctx);
static void iflib_ifmp_purge(iflib_txq_t txq);
static void _iflib_pre_assert(if_softc_ctx_t scctx);
+static void iflib_stop(if_ctx_t ctx);
+static void iflib_if_init_locked(if_ctx_t ctx);
+#ifndef __NO_STRICT_ALIGNMENT
+static struct mbuf * iflib_fixup_rx(struct mbuf *m);
+#endif
#ifdef DEV_NETMAP
#include <sys/selinfo.h>
@@ -676,6 +755,7 @@ iflib_netmap_register(struct netmap_adapter *na, int onoff)
{
struct ifnet *ifp = na->ifp;
if_ctx_t ctx = ifp->if_softc;
+ int status;
CTX_LOCK(ctx);
IFDI_INTR_DISABLE(ctx);
@@ -692,10 +772,14 @@ iflib_netmap_register(struct netmap_adapter *na, int onoff)
} else {
nm_clear_native_flags(na);
}
- IFDI_INIT(ctx);
+ iflib_stop(ctx);
+ iflib_init_locked(ctx);
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
+ status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
+ if (status)
+ nm_clear_native_flags(na);
CTX_UNLOCK(ctx);
- return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
+ return (status);
}
/*
@@ -734,12 +818,9 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
if_ctx_t ctx = ifp->if_softc;
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
- pi.ipi_segs = txq->ift_segs;
- pi.ipi_qsidx = kring->ring_id;
- pi.ipi_ndescs = 0;
-
- bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ if (txq->ift_sds.ifsd_map)
+ bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
@@ -765,23 +846,32 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
*/
nm_i = kring->nr_hwcur;
+ pkt_info_zero(&pi);
+ pi.ipi_segs = txq->ift_segs;
+ pi.ipi_qsidx = kring->ring_id;
if (nm_i != head) { /* we have new packets to send */
nic_i = netmap_idx_k2n(kring, nm_i);
__builtin_prefetch(&ring->slot[nm_i]);
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
- __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
+ if (txq->ift_sds.ifsd_map)
+ __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
for (n = 0; nm_i != head; n++) {
struct netmap_slot *slot = &ring->slot[nm_i];
u_int len = slot->len;
- uint64_t paddr;
+ vm_paddr_t paddr;
void *addr = PNMB(na, slot, &paddr);
int flags = (slot->flags & NS_REPORT ||
nic_i == 0 || nic_i == report_frequency) ?
IPI_TX_INTR : 0;
/* device-specific */
+ pi.ipi_len = len;
+ pi.ipi_segs[0].ds_addr = paddr;
+ pi.ipi_segs[0].ds_len = len;
+ pi.ipi_nsegs = 1;
+ pi.ipi_ndescs = 0;
pi.ipi_pidx = nic_i;
pi.ipi_flags = flags;
@@ -791,27 +881,28 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
/* prefetch for next round */
__builtin_prefetch(&ring->slot[nm_i + 1]);
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
- __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
+ if (txq->ift_sds.ifsd_map) {
+ __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
- NM_CHECK_ADDR_LEN(na, addr, len);
+ NM_CHECK_ADDR_LEN(na, addr, len);
- if (slot->flags & NS_BUF_CHANGED) {
- /* buffer has changed, reload map */
- netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
+ if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, reload map */
+ netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
+ }
+ /* make sure changes to the buffer are synced */
+ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
+ BUS_DMASYNC_PREWRITE);
}
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
-
- /* make sure changes to the buffer are synced */
- bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
- BUS_DMASYNC_PREWRITE);
-
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
kring->nr_hwcur = head;
/* synchronize the NIC ring */
- bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
+ if (txq->ift_sds.ifsd_map)
+ bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* (re)start the tx unit up to slot nic_i (excluded) */
@@ -846,30 +937,30 @@ static int
iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
- struct ifnet *ifp = na->ifp;
struct netmap_ring *ring = kring->ring;
- u_int nm_i; /* index into the netmap ring */
- u_int nic_i; /* index into the NIC ring */
+ uint32_t nm_i; /* index into the netmap ring */
+ uint32_t nic_i, nic_i_start; /* index into the NIC ring */
u_int i, n;
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = kring->rhead;
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
struct if_rxd_info ri;
- /* device-specific */
+ struct if_rxd_update iru;
+
+ struct ifnet *ifp = na->ifp;
if_ctx_t ctx = ifp->if_softc;
iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
iflib_fl_t fl = rxq->ifr_fl;
if (head > lim)
return netmap_ring_reinit(kring);
- bzero(&ri, sizeof(ri));
- ri.iri_qsidx = kring->ring_id;
- ri.iri_ifp = ctx->ifc_ifp;
/* XXX check sync modes */
- for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++)
+ for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
+ if (fl->ifl_sds.ifsd_map == NULL)
+ continue;
bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
+ }
/*
* First part: import newly received packets.
*
@@ -893,16 +984,20 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
for (fl = rxq->ifr_fl, i = 0; i < rxq->ifr_nfl; i++, fl++) {
nic_i = fl->ifl_cidx;
nm_i = netmap_idx_n2k(kring, nic_i);
- avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i, INT_MAX);
+ avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX);
for (n = 0; avail > 0; n++, avail--) {
+ rxd_info_zero(&ri);
+ ri.iri_frags = rxq->ifr_frags;
+ ri.iri_qsidx = kring->ring_id;
+ ri.iri_ifp = ctx->ifc_ifp;
+ ri.iri_cidx = nic_i;
+
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
- if (error)
- ring->slot[nm_i].len = 0;
- else
- ring->slot[nm_i].len = ri.iri_len - crclen;
+ ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
ring->slot[nm_i].flags = slot_flags;
- bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
- fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
+ if (fl->ifl_sds.ifsd_map)
+ bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
+ fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
@@ -928,50 +1023,84 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
*/
/* XXX not sure how this will work with multiple free lists */
nm_i = kring->nr_hwcur;
- if (nm_i != head) {
- nic_i = netmap_idx_k2n(kring, nm_i);
- for (n = 0; nm_i != head; n++) {
- struct netmap_slot *slot = &ring->slot[nm_i];
- uint64_t paddr;
- caddr_t vaddr;
- void *addr = PNMB(na, slot, &paddr);
+ if (nm_i == head)
+ return (0);
- if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
- goto ring_reset;
+ iru.iru_paddrs = fl->ifl_bus_addrs;
+ iru.iru_vaddrs = &fl->ifl_vm_addrs[0];
+ iru.iru_idxs = fl->ifl_rxd_idxs;
+ iru.iru_qsidx = rxq->ifr_id;
+ iru.iru_buf_size = fl->ifl_buf_size;
+ iru.iru_flidx = fl->ifl_id;
+ nic_i_start = nic_i = netmap_idx_k2n(kring, nm_i);
+ for (i = 0; nm_i != head; i++) {
+ struct netmap_slot *slot = &ring->slot[nm_i];
+ void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[i]);
+
+ if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
+ goto ring_reset;
+
+ fl->ifl_vm_addrs[i] = addr;
+ if (fl->ifl_sds.ifsd_map && (slot->flags & NS_BUF_CHANGED)) {
+ /* buffer has changed, reload map */
+ netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], addr);
+ }
+ slot->flags &= ~NS_BUF_CHANGED;
- vaddr = addr;
- if (slot->flags & NS_BUF_CHANGED) {
- /* buffer has changed, reload map */
- netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], addr);
- slot->flags &= ~NS_BUF_CHANGED;
- }
- /*
- * XXX we should be batching this operation - TODO
- */
- ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1, fl->ifl_buf_size);
+ nm_i = nm_next(nm_i, lim);
+ fl->ifl_rxd_idxs[i] = nic_i = nm_next(nic_i, lim);
+ if (nm_i != head && i < IFLIB_MAX_RX_REFRESH)
+ continue;
+
+ iru.iru_pidx = nic_i_start;
+ iru.iru_count = i;
+ i = 0;
+ ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
+ if (fl->ifl_sds.ifsd_map == NULL) {
+ nic_i_start = nic_i;
+ continue;
+ }
+ nic_i = nic_i_start;
+ for (n = 0; n < iru.iru_count; n++) {
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i],
- BUS_DMASYNC_PREREAD);
- nm_i = nm_next(nm_i, lim);
+ BUS_DMASYNC_PREREAD);
nic_i = nm_next(nic_i, lim);
}
- kring->nr_hwcur = head;
-
- bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * IMPORTANT: we must leave one free slot in the ring,
- * so move nic_i back by one unit
- */
- nic_i = nm_prev(nic_i, lim);
- ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
+ nic_i_start = nic_i;
}
+ kring->nr_hwcur = head;
+ if (fl->ifl_sds.ifsd_map)
+ bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * IMPORTANT: we must leave one free slot in the ring,
+ * so move nic_i back by one unit
+ */
+ nic_i = nm_prev(nic_i, lim);
+ ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
return 0;
ring_reset:
return netmap_ring_reinit(kring);
}
+static void
+iflib_netmap_intr(struct netmap_adapter *na, int onoff)
+{
+ struct ifnet *ifp = na->ifp;
+ if_ctx_t ctx = ifp->if_softc;
+
+ CTX_LOCK(ctx);
+ if (onoff) {
+ IFDI_INTR_ENABLE(ctx);
+ } else {
+ IFDI_INTR_DISABLE(ctx);
+ }
+ CTX_UNLOCK(ctx);
+}
+
+
static int
iflib_netmap_attach(if_ctx_t ctx)
{
@@ -990,6 +1119,7 @@ iflib_netmap_attach(if_ctx_t ctx)
na.nm_txsync = iflib_netmap_txsync;
na.nm_rxsync = iflib_netmap_rxsync;
na.nm_register = iflib_netmap_register;
+ na.nm_intr = iflib_netmap_intr;
na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
return (netmap_attach(&na));
@@ -1004,6 +1134,8 @@ iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
if (slot == NULL)
return;
+ if (txq->ift_sds.ifsd_map == NULL)
+ return;
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
@@ -1023,24 +1155,46 @@ iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
{
struct netmap_adapter *na = NA(ctx->ifc_ifp);
struct netmap_slot *slot;
+ struct if_rxd_update iru;
+ iflib_fl_t fl;
bus_dmamap_t *map;
int nrxd;
+ uint32_t i, j, pidx_start;
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
if (slot == NULL)
return;
- map = rxq->ifr_fl[0].ifl_sds.ifsd_map;
+ fl = &rxq->ifr_fl[0];
+ map = fl->ifl_sds.ifsd_map;
nrxd = ctx->ifc_softc_ctx.isc_nrxd[0];
- for (int i = 0; i < nrxd; i++, map++) {
- int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i);
- uint64_t paddr;
- void *addr;
- caddr_t vaddr;
-
- vaddr = addr = PNMB(na, slot + sj, &paddr);
+ iru.iru_paddrs = fl->ifl_bus_addrs;
+ iru.iru_vaddrs = &fl->ifl_vm_addrs[0];
+ iru.iru_idxs = fl->ifl_rxd_idxs;
+ iru.iru_qsidx = rxq->ifr_id;
+ iru.iru_buf_size = rxq->ifr_fl[0].ifl_buf_size;
+ iru.iru_flidx = 0;
+
+ for (pidx_start = i = j = 0; i < nrxd; i++, j++) {
+ int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i);
+ void *addr;
+
+ fl->ifl_rxd_idxs[j] = i;
+ addr = fl->ifl_vm_addrs[j] = PNMB(na, slot + sj, &fl->ifl_bus_addrs[j]);
+ if (map) {
netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, *map, addr);
- /* Update descriptor and the cached value */
- ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1, rxq->ifr_fl[0].ifl_buf_size);
+ map++;
+ }
+
+ if (j < IFLIB_MAX_RX_REFRESH && i < nrxd - 1)
+ continue;
+
+ iru.iru_pidx = pidx_start;
+ pidx_start = i;
+ iru.iru_count = j;
+ j = 0;
+ MPASS(pidx_start + j <= nrxd);
+ /* Update descriptors and the cached value */
+ ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
}
/* preserve queue */
if (ctx->ifc_ifp->if_capenable & IFCAP_NETMAP) {
@@ -1060,6 +1214,7 @@ iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
#define iflib_netmap_attach(ctx) (0)
#define netmap_rx_irq(ifp, qid, budget) (0)
+#define netmap_tx_irq(ifp, qid) do {} while (0)
#endif
@@ -1214,6 +1369,61 @@ iflib_fast_intr(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
+ if (!iflib_started)
+ return (FILTER_HANDLED);
+
+ DBG_COUNTER_INC(fast_intrs);
+ if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
+ return (FILTER_HANDLED);
+
+ GROUPTASK_ENQUEUE(gtask);
+ return (FILTER_HANDLED);
+}
+
+static int
+iflib_fast_intr_rxtx(void *arg)
+{
+ iflib_filter_info_t info = arg;
+ struct grouptask *gtask = info->ifi_task;
+ iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
+ if_ctx_t ctx;
+ int i, cidx;
+
+ if (!iflib_started)
+ return (FILTER_HANDLED);
+
+ DBG_COUNTER_INC(fast_intrs);
+ if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
+ return (FILTER_HANDLED);
+
+ for (i = 0; i < rxq->ifr_ntxqirq; i++) {
+ qidx_t txqid = rxq->ifr_txqid[i];
+
+ ctx = rxq->ifr_ctx;
+
+ if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) {
+ IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
+ continue;
+ }
+ GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
+ }
+ if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
+ cidx = rxq->ifr_cq_cidx;
+ else
+ cidx = rxq->ifr_fl[0].ifl_cidx;
+ if (iflib_rxd_avail(ctx, rxq, cidx, 1))
+ GROUPTASK_ENQUEUE(gtask);
+ else
+ IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+ return (FILTER_HANDLED);
+}
+
+
+static int
+iflib_fast_intr_ctx(void *arg)
+{
+ iflib_filter_info_t info = arg;
+ struct grouptask *gtask = info->ifi_task;
if (!iflib_started)
return (FILTER_HANDLED);
@@ -1337,7 +1547,7 @@ iflib_txsd_alloc(iflib_txq_t txq)
}
/* Create the descriptor buffer dma maps */
-#if defined(ACPI_DMAR) || (!(defined(__i386__) && !defined(__amd64__)))
+#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
if ((ctx->ifc_flags & IFC_DMAR) == 0)
return (0);
@@ -1438,9 +1648,12 @@ iflib_txq_setup(iflib_txq_t txq)
/* Set number of descriptors available */
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
+ /* XXX make configurable */
+ txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
/* Reset indices */
- txq->ift_cidx_processed = txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
+ txq->ift_cidx_processed = 0;
+ txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
@@ -1517,7 +1730,7 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
}
/* Create the descriptor buffer dma maps */
-#if defined(ACPI_DMAR) || (!(defined(__i386__) && !defined(__amd64__)))
+#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
if ((ctx->ifc_flags & IFC_DMAR) == 0)
continue;
@@ -1531,7 +1744,7 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
if (err != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
+ device_printf(dev, "Unable to create RX buffer DMA map\n");
goto fail;
}
}
@@ -1589,6 +1802,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
caddr_t cl, *sd_cl;
struct mbuf **sd_m;
uint8_t *sd_flags;
+ struct if_rxd_update iru;
bus_dmamap_t *sd_map;
int n, i = 0;
uint64_t bus_addr;
@@ -1614,7 +1828,12 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
DBG_COUNTER_INC(fl_refills);
if (n > 8)
DBG_COUNTER_INC(fl_refills_large);
-
+ iru.iru_paddrs = fl->ifl_bus_addrs;
+ iru.iru_vaddrs = &fl->ifl_vm_addrs[0];
+ iru.iru_idxs = fl->ifl_rxd_idxs;
+ iru.iru_qsidx = fl->ifl_rxq->ifr_id;
+ iru.iru_buf_size = fl->ifl_buf_size;
+ iru.iru_flidx = fl->ifl_id;
while (n--) {
/*
* We allocate an uninitialized mbuf + cluster, mbuf is
@@ -1637,19 +1856,6 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
#endif
DBG_COUNTER_INC(rx_allocs);
-#ifdef notyet
- if ((sd_flags[pidx] & RX_SW_DESC_MAP_CREATED) == 0) {
- int err;
-
- if ((err = bus_dmamap_create(fl->ifl_ifdi->idi_tag, 0, &sd_map[idx]))) {
- log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
- uma_zfree(fl->ifl_zone, cl);
- n = 0;
- goto done;
- }
- sd_flags[idx] |= RX_SW_DESC_MAP_CREATED;
- }
-#endif
#if defined(__i386__) || defined(__amd64__)
if (!IS_DMAR(ctx)) {
bus_addr = pmap_kextract((vm_offset_t)cl);
@@ -1661,8 +1867,11 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
cb_arg.error = 0;
q = fl->ifl_rxq;
+ MPASS(sd_map != NULL);
+ MPASS(sd_map[idx] != NULL);
err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[idx],
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
+ bus_dmamap_sync(fl->ifl_desc_tag, sd_map[idx], BUS_DMASYNC_PREREAD);
if (err != 0 || cb_arg.error) {
/*
@@ -1681,6 +1890,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
MPASS(sd_m[idx] == NULL);
sd_cl[idx] = cl;
sd_m[idx] = m;
+ fl->ifl_rxd_idxs[i] = idx;
fl->ifl_bus_addrs[i] = bus_addr;
fl->ifl_vm_addrs[i] = cl;
fl->ifl_credits++;
@@ -1691,8 +1901,9 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
idx = 0;
}
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
- ctx->isc_rxd_refill(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx,
- fl->ifl_bus_addrs, fl->ifl_vm_addrs, i, fl->ifl_buf_size);
+ iru.iru_pidx = pidx;
+ iru.iru_count = i;
+ ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
i = 0;
pidx = idx;
}
@@ -1705,6 +1916,10 @@ done:
pidx = fl->ifl_size - 1;
else
pidx = fl->ifl_pidx - 1;
+
+ if (sd_map)
+ bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
}
@@ -1759,6 +1974,13 @@ iflib_fl_bufs_free(iflib_fl_t fl)
*sd_cl = NULL;
*sd_m = NULL;
}
+#ifdef INVARIANTS
+ for (i = 0; i < fl->ifl_size; i++) {
+ MPASS(fl->ifl_sds.ifsd_flags[i] == 0);
+ MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
+ MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
+ }
+#endif
/*
* Reset free list values
*/
@@ -1790,12 +2012,17 @@ iflib_fl_setup(iflib_fl_t fl)
*/
if (sctx->isc_max_frame_size <= 2048)
fl->ifl_buf_size = MCLBYTES;
+#ifndef CONTIGMALLOC_WORKS
+ else
+ fl->ifl_buf_size = MJUMPAGESIZE;
+#else
else if (sctx->isc_max_frame_size <= 4096)
fl->ifl_buf_size = MJUMPAGESIZE;
else if (sctx->isc_max_frame_size <= 9216)
fl->ifl_buf_size = MJUM9BYTES;
else
fl->ifl_buf_size = MJUM16BYTES;
+#endif
if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
@@ -1860,7 +2087,6 @@ iflib_timer(void *arg)
{
iflib_txq_t txq = arg;
if_ctx_t ctx = txq->ift_ctx;
- if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
return;
@@ -1871,11 +2097,15 @@ iflib_timer(void *arg)
*/
IFDI_TIMER(ctx, txq->ift_id);
if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
- (ctx->ifc_pause_frames == 0))
+ ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
+ (ctx->ifc_pause_frames == 0)))
goto hung;
- if (TXQ_AVAIL(txq) <= 2*scctx->isc_tx_nsegments ||
- ifmp_ring_is_stalled(txq->ift_br[0]))
+ if (ifmp_ring_is_stalled(txq->ift_br))
+ txq->ift_qstatus = IFLIB_QUEUE_HUNG;
+ txq->ift_cleaned_prev = txq->ift_cleaned;
+ /* handle any laggards */
+ if (txq->ift_db_pending)
GROUPTASK_ENQUEUE(&txq->ift_task);
ctx->ifc_pause_frames = 0;
@@ -1927,11 +2157,11 @@ iflib_init_locked(if_ctx_t ctx)
for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
- callout_stop(&txq->ift_db_check);
CALLOUT_UNLOCK(txq);
iflib_netmap_txq_init(ctx, txq);
}
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
+ MPASS(rxq->ifr_id == i);
iflib_netmap_rxq_init(ctx, rxq);
}
#ifdef INVARIANTS
@@ -1940,6 +2170,9 @@ iflib_init_locked(if_ctx_t ctx)
IFDI_INIT(ctx);
MPASS(if_getdrvflags(ifp) == i);
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
+ /* XXX this should really be done on a per-queue basis */
+ if (if_getcapenable(ifp) & IFCAP_NETMAP)
+ continue;
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
if (iflib_fl_setup(fl)) {
device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
@@ -1994,9 +2227,9 @@ iflib_stop(if_ctx_t ctx)
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
IFDI_INTR_DISABLE(ctx);
- DELAY(100000);
+ DELAY(1000);
IFDI_STOP(ctx);
- DELAY(100000);
+ DELAY(1000);
iflib_debug_reset();
/* Wait for current tx queue users to exit to disarm watchdog timer. */
@@ -2014,7 +2247,7 @@ iflib_stop(if_ctx_t ctx)
txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
txq->ift_pullups = 0;
- ifmp_ring_reset_stats(txq->ift_br[0]);
+ ifmp_ring_reset_stats(txq->ift_br);
for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
}
@@ -2029,15 +2262,38 @@ iflib_stop(if_ctx_t ctx)
}
}
+static inline caddr_t
+calc_next_rxd(iflib_fl_t fl, int cidx)
+{
+ qidx_t size;
+ int nrxd;
+ caddr_t start, end, cur, next;
+
+ nrxd = fl->ifl_size;
+ size = fl->ifl_rxd_size;
+ start = fl->ifl_ifdi->idi_vaddr;
+
+ if (__predict_false(size == 0))
+ return (start);
+ cur = start + size*cidx;
+ end = start + size*nrxd;
+ next = CACHE_PTR_NEXT(cur);
+ return (next < end ? next : start);
+}
+
static inline void
prefetch_pkts(iflib_fl_t fl, int cidx)
{
int nextptr;
int nrxd = fl->ifl_size;
+ caddr_t next_rxd;
+
nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
+ next_rxd = calc_next_rxd(fl, cidx);
+ prefetch(next_rxd);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
@@ -2049,7 +2305,7 @@ prefetch_pkts(iflib_fl_t fl, int cidx)
}
static void
-rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload, iflib_fl_t *pfl, int *pcidx)
+rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
{
int flid, cidx;
bus_dmamap_t map;
@@ -2057,16 +2313,20 @@ rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload, ifli
iflib_dma_info_t di;
int next;
+ map = NULL;
flid = irf->irf_flid;
cidx = irf->irf_idx;
fl = &rxq->ifr_fl[flid];
+ sd->ifsd_fl = fl;
+ sd->ifsd_cidx = cidx;
+ sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
+ sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
fl->ifl_credits--;
#if MEMORY_LOGGING
fl->ifl_m_dequeued++;
- if (cltype)
- fl->ifl_cl_dequeued++;
#endif
- prefetch_pkts(fl, cidx);
+ if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
+ prefetch_pkts(fl, cidx);
if (fl->ifl_sds.ifsd_map != NULL) {
next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
prefetch(&fl->ifl_sds.ifsd_map[next]);
@@ -2082,45 +2342,39 @@ rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload, ifli
if (unload)
bus_dmamap_unload(fl->ifl_desc_tag, map);
}
- if (__predict_false(++fl->ifl_cidx == fl->ifl_size)) {
- fl->ifl_cidx = 0;
+ fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
+ if (__predict_false(fl->ifl_cidx == 0))
fl->ifl_gen = 0;
- }
- /* YES ick */
- if (cltype)
- *cltype = fl->ifl_cltype;
- *pfl = fl;
- *pcidx = cidx;
+ if (map != NULL)
+ bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static struct mbuf *
-assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri)
+assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
{
- int i, padlen , flags, cltype;
- struct mbuf *m, *mh, *mt, *sd_m;
- iflib_fl_t fl;
- int cidx;
- caddr_t cl, sd_cl;
+ int i, padlen , flags;
+ struct mbuf *m, *mh, *mt;
+ caddr_t cl;
i = 0;
mh = NULL;
do {
- rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE, &fl, &cidx);
- sd_m = fl->ifl_sds.ifsd_m[cidx];
- sd_cl = fl->ifl_sds.ifsd_cl[cidx];
+ rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
- MPASS(sd_cl != NULL);
- MPASS(sd_m != NULL);
+ MPASS(*sd->ifsd_cl != NULL);
+ MPASS(*sd->ifsd_m != NULL);
/* Don't include zero-length frags */
if (ri->iri_frags[i].irf_len == 0) {
/* XXX we can save the cluster here, but not the mbuf */
- m_init(sd_m, M_NOWAIT, MT_DATA, 0);
- m_free(sd_m);
- fl->ifl_sds.ifsd_m[cidx] = NULL;
+ m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
+ m_free(*sd->ifsd_m);
+ *sd->ifsd_m = NULL;
continue;
}
- m = sd_m;
+ m = *sd->ifsd_m;
+ *sd->ifsd_m = NULL;
if (mh == NULL) {
flags = M_PKTHDR|M_EXT;
mh = mt = m;
@@ -2132,13 +2386,12 @@ assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri)
/* assuming padding is only on the first fragment */
padlen = 0;
}
- fl->ifl_sds.ifsd_m[cidx] = NULL;
- cl = fl->ifl_sds.ifsd_cl[cidx];
- fl->ifl_sds.ifsd_cl[cidx] = NULL;
+ cl = *sd->ifsd_cl;
+ *sd->ifsd_cl = NULL;
/* Can these two be made one ? */
m_init(m, M_NOWAIT, MT_DATA, flags);
- m_cljset(m, cl, cltype);
+ m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
/*
* These must follow m_init and m_cljset
*/
@@ -2156,23 +2409,24 @@ assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri)
static struct mbuf *
iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
{
+ struct if_rxsd sd;
struct mbuf *m;
- iflib_fl_t fl;
- caddr_t sd_cl;
- int cidx;
/* should I merge this back in now that the two paths are basically duplicated? */
if (ri->iri_nfrags == 1 &&
ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) {
- rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE, &fl, &cidx);
- m = fl->ifl_sds.ifsd_m[cidx];
- fl->ifl_sds.ifsd_m[cidx] = NULL;
- sd_cl = fl->ifl_sds.ifsd_cl[cidx];
+ rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
+ m = *sd.ifsd_m;
+ *sd.ifsd_m = NULL;
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
- memcpy(m->m_data, sd_cl, ri->iri_len);
+#ifndef __NO_STRICT_ALIGNMENT
+ if (!IP_ALIGNED(m))
+ m->m_data += 2;
+#endif
+ memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
m->m_len = ri->iri_frags[0].irf_len;
} else {
- m = assemble_segments(rxq, ri);
+ m = assemble_segments(rxq, ri, &sd);
}
m->m_pkthdr.len = ri->iri_len;
m->m_pkthdr.rcvif = ri->iri_ifp;
@@ -2186,27 +2440,33 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
}
static bool
-iflib_rxeof(iflib_rxq_t rxq, int budget)
+iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int avail, i;
- uint16_t *cidxp;
+ qidx_t *cidxp;
struct if_rxd_info ri;
int err, budget_left, rx_bytes, rx_pkts;
iflib_fl_t fl;
struct ifnet *ifp;
int lro_enabled;
+
/*
* XXX early demux data packets so that if_input processing only handles
* acks in interrupt context
*/
struct mbuf *m, *mh, *mt;
- if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &budget)) {
- return (FALSE);
+ ifp = ctx->ifc_ifp;
+#ifdef DEV_NETMAP
+ if (ifp->if_capenable & IFCAP_NETMAP) {
+ u_int work = 0;
+ if (netmap_rx_irq(ifp, rxq->ifr_id, &work))
+ return (FALSE);
}
+#endif
mh = mt = NULL;
MPASS(budget > 0);
@@ -2230,18 +2490,19 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
/*
* Reset client set fields to their default values
*/
- bzero(&ri, sizeof(ri));
+ rxd_info_zero(&ri);
ri.iri_qsidx = rxq->ifr_id;
ri.iri_cidx = *cidxp;
- ri.iri_ifp = ctx->ifc_ifp;
+ ri.iri_ifp = ifp;
ri.iri_frags = rxq->ifr_frags;
err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
- /* in lieu of handling correctly - make sure it isn't being unhandled */
- MPASS(err == 0);
+ if (err)
+ goto err;
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
*cidxp = ri.iri_cidx;
/* Update our consumer index */
+ /* XXX NB: shurd - check if this is still safe */
while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
rxq->ifr_cq_gen = 0;
@@ -2274,12 +2535,15 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
__iflib_fl_refill_lt(ctx, fl, budget + 8);
- ifp = ctx->ifc_ifp;
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
while (mh != NULL) {
m = mh;
mh = mh->m_nextpkt;
m->m_nextpkt = NULL;
+#ifndef __NO_STRICT_ALIGNMENT
+ if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
+ continue;
+#endif
rx_bytes += m->m_pkthdr.len;
rx_pkts++;
#if defined(INET6) || defined(INET)
@@ -2302,43 +2566,76 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
if (avail)
return true;
return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
+err:
+ CTX_LOCK(ctx);
+ ctx->ifc_flags |= IFC_DO_RESET;
+ iflib_admin_intr_deferred(ctx);
+ CTX_UNLOCK(ctx);
+ return (false);
+}
+
+#define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
+static inline qidx_t
+txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
+{
+ qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
+ qidx_t minthresh = txq->ift_size / 8;
+ if (in_use > 4*minthresh)
+ return (notify_count);
+ if (in_use > 2*minthresh)
+ return (notify_count >> 1);
+ if (in_use > minthresh)
+ return (notify_count >> 3);
+ return (0);
+}
+
+static inline qidx_t
+txq_max_rs_deferred(iflib_txq_t txq)
+{
+ qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
+ qidx_t minthresh = txq->ift_size / 8;
+ if (txq->ift_in_use > 4*minthresh)
+ return (notify_count);
+ if (txq->ift_in_use > 2*minthresh)
+ return (notify_count >> 1);
+ if (txq->ift_in_use > minthresh)
+ return (notify_count >> 2);
+ return (notify_count >> 4);
}
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
-#define TXQ_MAX_DB_DEFERRED(size) (size >> 5)
+
+#define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
+#define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
#define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
-static __inline void
-iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring)
-{
- uint32_t dbval;
+/* forward compatibility for cxgb */
+#define FIRST_QSET(ctx) 0
+#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
+#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
+#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
+#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
+
+/* XXX we should be setting this to something other than zero */
+#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
+#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
- if (ring || txq->ift_db_pending >=
- TXQ_MAX_DB_DEFERRED(txq->ift_size)) {
+static inline bool
+iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
+{
+ qidx_t dbval, max;
+ bool rang;
- /* the lock will only ever be contended in the !min_latency case */
- if (!TXDB_TRYLOCK(txq))
- return;
+ rang = false;
+ max = TXQ_MAX_DB_DEFERRED(txq, in_use);
+ if (ring || txq->ift_db_pending >= max) {
dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
txq->ift_db_pending = txq->ift_npending = 0;
- TXDB_UNLOCK(txq);
+ rang = true;
}
-}
-
-static void
-iflib_txd_deferred_db_check(void * arg)
-{
- iflib_txq_t txq = arg;
-
- /* simple non-zero boolean so use bitwise OR */
- if ((txq->ift_db_pending | txq->ift_npending) &&
- txq->ift_db_pending >= txq->ift_db_pending_queued)
- iflib_txd_db_check(txq->ift_ctx, txq, TRUE);
- txq->ift_db_pending_queued = 0;
- if (ifmp_ring_is_stalled(txq->ift_br[0]))
- iflib_txq_check_drain(txq, 4);
+ return (rang);
}
#ifdef PKT_DEBUG
@@ -2675,6 +2972,25 @@ err:
return (EFBIG);
}
+static inline caddr_t
+calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
+{
+ qidx_t size;
+ int ntxd;
+ caddr_t start, end, cur, next;
+
+ ntxd = txq->ift_size;
+ size = txq->ift_txd_size[qid];
+ start = txq->ift_ifdi[qid].idi_vaddr;
+
+ if (__predict_false(size == 0))
+ return (start);
+ cur = start + size*cidx;
+ end = start + size*ntxd;
+ next = CACHE_PTR_NEXT(cur);
+ return (next < end ? next : start);
+}
+
static int
iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
{
@@ -2683,6 +2999,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
if_softc_ctx_t scctx;
bus_dma_segment_t *segs;
struct mbuf *m_head;
+ void *next_txd;
bus_dmamap_t map;
struct if_pkt_info pi;
int remap = 0;
@@ -2703,17 +3020,22 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
*/
cidx = txq->ift_cidx;
pidx = txq->ift_pidx;
- next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
+ if (ctx->ifc_flags & IFC_PREFETCH) {
+ next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
+ if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
+ next_txd = calc_next_txd(txq, cidx, 0);
+ prefetch(next_txd);
+ }
- /* prefetch the next cache line of mbuf pointers and flags */
- prefetch(&txq->ift_sds.ifsd_m[next]);
- if (txq->ift_sds.ifsd_map != NULL) {
- prefetch(&txq->ift_sds.ifsd_map[next]);
+ /* prefetch the next cache line of mbuf pointers and flags */
+ prefetch(&txq->ift_sds.ifsd_m[next]);
+ if (txq->ift_sds.ifsd_map != NULL) {
+ prefetch(&txq->ift_sds.ifsd_map[next]);
+ next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
+ prefetch(&txq->ift_sds.ifsd_flags[next]);
+ }
+ } else if (txq->ift_sds.ifsd_map != NULL)
map = txq->ift_sds.ifsd_map[pidx];
- next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
- prefetch(&txq->ift_sds.ifsd_flags[next]);
- }
-
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
desc_tag = txq->ift_tso_desc_tag;
@@ -2723,7 +3045,8 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
max_segs = scctx->isc_tx_nsegments;
}
m_head = *m_headp;
- bzero(&pi, sizeof(pi));
+
+ pkt_info_zero(&pi);
pi.ipi_len = m_head->m_pkthdr.len;
pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
@@ -2785,6 +3108,19 @@ defrag:
GROUPTASK_ENQUEUE(&txq->ift_task);
return (ENOBUFS);
}
+ /*
+ * On Intel cards we can greatly reduce the number of TX interrupts
+ * we see by only setting report status on every Nth descriptor.
+ * However, this also means that the driver will need to keep track
+ * of the descriptors that RS was set on to check them for the DD bit.
+ */
+ txq->ift_rs_pending += nsegs + 1;
+ if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
+ iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs - 1) <= MAX_TX_DESC(ctx)) {
+ pi.ipi_flags |= IPI_TX_INTR;
+ txq->ift_rs_pending = 0;
+ }
+
pi.ipi_segs = segs;
pi.ipi_nsegs = nsegs;
@@ -2792,13 +3128,14 @@ defrag:
#ifdef PKT_DEBUG
print_pkt(&pi);
#endif
+ if (map != NULL)
+ bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
- bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
+ if (map != NULL)
+ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
DBG_COUNTER_INC(tx_encap);
- MPASS(pi.ipi_new_pidx >= 0 &&
- pi.ipi_new_pidx < txq->ift_size);
+ MPASS(pi.ipi_new_pidx < txq->ift_size);
ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
if (pi.ipi_new_pidx < pi.ipi_pidx) {
@@ -2813,6 +3150,7 @@ defrag:
MPASS(pi.ipi_new_pidx != pidx);
MPASS(ndesc > 0);
txq->ift_in_use += ndesc;
+
/*
* We update the last software descriptor again here because there may
* be a sentinel and/or there may be more mbufs than segments
@@ -2837,36 +3175,6 @@ defrag_failed:
return (ENOMEM);
}
-/* forward compatibility for cxgb */
-#define FIRST_QSET(ctx) 0
-
-#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
-#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
-#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
-#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
-#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
-#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
-
-
-
-/* if there are more than TXQ_MIN_OCCUPANCY packets pending we consider deferring
- * doorbell writes
- *
- * ORing with 2 assures that min occupancy is never less than 2 without any conditional logic
- */
-#define TXQ_MIN_OCCUPANCY(size) ((size >> 6)| 0x2)
-
-static inline int
-iflib_txq_min_occupancy(iflib_txq_t txq)
-{
- if_ctx_t ctx;
-
- ctx = txq->ift_ctx;
- return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx,
- txq->ift_gen) < TXQ_MIN_OCCUPANCY(txq->ift_size) +
- MAX_TX_DESC(ctx));
-}
-
static void
iflib_tx_desc_free(iflib_txq_t txq, int n)
{
@@ -2875,6 +3183,7 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
struct mbuf *m, **ifsd_m;
uint8_t *ifsd_flags;
bus_dmamap_t *ifsd_map;
+ bool do_prefetch;
cidx = txq->ift_cidx;
gen = txq->ift_gen;
@@ -2884,11 +3193,13 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
ifsd_flags = txq->ift_sds.ifsd_flags;
ifsd_m = txq->ift_sds.ifsd_m;
ifsd_map = txq->ift_sds.ifsd_map;
+ do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
while (n--) {
- prefetch(ifsd_m[(cidx + 3) & mask]);
- prefetch(ifsd_m[(cidx + 4) & mask]);
-
+ if (do_prefetch) {
+ prefetch(ifsd_m[(cidx + 3) & mask]);
+ prefetch(ifsd_m[(cidx + 4) & mask]);
+ }
if (ifsd_m[cidx] != NULL) {
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
@@ -2951,24 +3262,34 @@ iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
txq->ift_cleaned += reclaim;
txq->ift_in_use -= reclaim;
- if (txq->ift_active == FALSE)
- txq->ift_active = TRUE;
-
return (reclaim);
}
static struct mbuf **
-_ring_peek_one(struct ifmp_ring *r, int cidx, int offset)
+_ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
{
+ int next, size;
+ struct mbuf **items;
- return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (r->size-1)]));
+ size = r->size;
+ next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
+ items = __DEVOLATILE(struct mbuf **, &r->items[0]);
+
+ prefetch(items[(cidx + offset) & (size-1)]);
+ if (remaining > 1) {
+ prefetch(&items[next]);
+ prefetch(items[(cidx + offset + 1) & (size-1)]);
+ prefetch(items[(cidx + offset + 2) & (size-1)]);
+ prefetch(items[(cidx + offset + 3) & (size-1)]);
+ }
+ return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
}
static void
iflib_txq_check_drain(iflib_txq_t txq, int budget)
{
- ifmp_ring_check_drainage(txq->ift_br[0], budget);
+ ifmp_ring_check_drainage(txq->ift_br, budget);
}
static uint32_t
@@ -2978,7 +3299,7 @@ iflib_txq_can_drain(struct ifmp_ring *r)
if_ctx_t ctx = txq->ift_ctx;
return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
- ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, txq->ift_cidx_processed, false));
+ ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
}
static uint32_t
@@ -2986,16 +3307,19 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
{
iflib_txq_t txq = r->cookie;
if_ctx_t ctx = txq->ift_ctx;
- if_t ifp = ctx->ifc_ifp;
+ struct ifnet *ifp = ctx->ifc_ifp;
struct mbuf **mp, *m;
- int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail, err, in_use_prev, desc_used;
+ int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
+ int reclaimed, err, in_use_prev, desc_used;
+ bool do_prefetch, ring, rang;
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
!LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(txq_drain_notready);
return (0);
}
-
+ reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
avail = IDXDIFF(pidx, cidx, r->size);
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
DBG_COUNTER_INC(txq_drain_flushing);
@@ -3005,16 +3329,17 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
}
return (avail);
}
- iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+
if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
- callout_stop(&txq->ift_db_check);
CALLOUT_UNLOCK(txq);
DBG_COUNTER_INC(txq_drain_oactive);
return (0);
}
+ if (reclaimed)
+ txq->ift_qstatus = IFLIB_QUEUE_IDLE;
consumed = mcast_sent = bytes_sent = pkt_sent = 0;
count = MIN(avail, TX_BATCH_SIZE);
#ifdef INVARIANTS
@@ -3022,48 +3347,50 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
avail, ctx->ifc_flags, TXQ_AVAIL(txq));
#endif
+ do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
+ avail = TXQ_AVAIL(txq);
+ for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
+ int pidx_prev, rem = do_prefetch ? count - i : 0;
- for (desc_used = i = 0; i < count && TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2; i++) {
- mp = _ring_peek_one(r, cidx, i);
+ mp = _ring_peek_one(r, cidx, i, rem);
MPASS(mp != NULL && *mp != NULL);
+ if (__predict_false(*mp == (struct mbuf *)txq)) {
+ consumed++;
+ reclaimed++;
+ continue;
+ }
in_use_prev = txq->ift_in_use;
- if ((err = iflib_encap(txq, mp)) == ENOBUFS) {
+ pidx_prev = txq->ift_pidx;
+ err = iflib_encap(txq, mp);
+ if (__predict_false(err)) {
DBG_COUNTER_INC(txq_drain_encapfail);
/* no room - bail out */
- break;
- }
- consumed++;
- if (err) {
+ if (err == ENOBUFS)
+ break;
+ consumed++;
DBG_COUNTER_INC(txq_drain_encapfail);
/* we can't send this packet - skip it */
continue;
}
+ consumed++;
pkt_sent++;
m = *mp;
DBG_COUNTER_INC(tx_sent);
bytes_sent += m->m_pkthdr.len;
- if (m->m_flags & M_MCAST)
- mcast_sent++;
+ mcast_sent += !!(m->m_flags & M_MCAST);
+ avail = TXQ_AVAIL(txq);
txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
desc_used += (txq->ift_in_use - in_use_prev);
- iflib_txd_db_check(ctx, txq, FALSE);
ETHER_BPF_MTAP(ifp, m);
- if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
- break;
-
- if (desc_used >= TXQ_MAX_DB_CONSUMED(txq->ift_size))
+ if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
break;
+ rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
}
- if ((iflib_min_tx_latency || iflib_txq_min_occupancy(txq)) && txq->ift_db_pending)
- iflib_txd_db_check(ctx, txq, TRUE);
- else if ((txq->ift_db_pending || TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2) &&
- (callout_pending(&txq->ift_db_check) == 0)) {
- txq->ift_db_pending_queued = txq->ift_db_pending;
- callout_reset_on(&txq->ift_db_check, 1, iflib_txd_deferred_db_check,
- txq, txq->ift_db_check.c_cpu);
- }
+ /* deliberate use of bitwise or to avoid gratuitous short-circuit */
+ ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
+ iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
if (mcast_sent)
@@ -3093,12 +3420,13 @@ iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
- callout_stop(&txq->ift_db_check);
CALLOUT_UNLOCK(txq);
avail = IDXDIFF(pidx, cidx, r->size);
for (i = 0; i < avail; i++) {
- mp = _ring_peek_one(r, cidx, i);
+ mp = _ring_peek_one(r, cidx, i, avail - i);
+ if (__predict_false(*mp == (struct mbuf *)txq))
+ continue;
m_freem(*mp);
}
MPASS(ifmp_ring_is_stalled(r) == 0);
@@ -3110,7 +3438,7 @@ iflib_ifmp_purge(iflib_txq_t txq)
{
struct ifmp_ring *r;
- r = txq->ift_br[0];
+ r = txq->ift_br;
r->drain = iflib_txq_drain_free;
r->can_drain = iflib_txq_drain_always;
@@ -3125,13 +3453,30 @@ _task_fn_tx(void *context)
{
iflib_txq_t txq = context;
if_ctx_t ctx = txq->ift_ctx;
+ struct ifnet *ifp = ctx->ifc_ifp;
+ int rc;
#ifdef IFLIB_DIAGNOSTICS
txq->ift_cpu_exec_count[curcpu]++;
#endif
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
return;
- ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE);
+ if ((ifp->if_capenable & IFCAP_NETMAP)) {
+ if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
+ netmap_tx_irq(ifp, txq->ift_id);
+ IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
+ return;
+ }
+ if (txq->ift_db_pending)
+ ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE);
+ else
+ ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
+ if (ctx->ifc_flags & IFC_LEGACY)
+ IFDI_INTR_ENABLE(ctx);
+ else {
+ rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
+ KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
+ }
}
static void
@@ -3148,13 +3493,12 @@ _task_fn_rx(void *context)
DBG_COUNTER_INC(task_fn_rxs);
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
return;
-
if ((more = iflib_rxeof(rxq, 16 /* XXX */)) == false) {
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
else {
DBG_COUNTER_INC(rx_intr_enables);
- rc = IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+ rc = IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
}
}
@@ -3185,6 +3529,10 @@ _task_fn_admin(void *context)
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
IFDI_LINK_INTR_ENABLE(ctx);
+ if (ctx->ifc_flags & IFC_DO_RESET) {
+ ctx->ifc_flags &= ~IFC_DO_RESET;
+ iflib_if_init_locked(ctx);
+ }
CTX_UNLOCK(ctx);
if (LINK_ACTIVE(ctx) == 0)
@@ -3306,7 +3654,7 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
}
#endif
DBG_COUNTER_INC(tx_seen);
- err = ifmp_ring_enqueue(txq->ift_br[0], (void **)&m, 1, TX_BATCH_SIZE);
+ err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE);
if (err) {
GROUPTASK_ENQUEUE(&txq->ift_task);
@@ -3314,7 +3662,7 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
#ifdef DRIVER_BACKPRESSURE
txq->ift_closed = TRUE;
#endif
- ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE);
+ ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
m_freem(m);
} else if (TXQ_AVAIL(txq) < (txq->ift_size >> 1)) {
GROUPTASK_ENQUEUE(&txq->ift_task);
@@ -3334,7 +3682,7 @@ iflib_if_qflush(if_t ifp)
ctx->ifc_flags |= IFC_QFLUSH;
CTX_UNLOCK(ctx);
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
- while (!(ifmp_ring_is_idle(txq->ift_br[0]) || ifmp_ring_is_stalled(txq->ift_br[0])))
+ while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
iflib_txq_check_drain(txq, 0);
CTX_LOCK(ctx);
ctx->ifc_flags &= ~IFC_QFLUSH;
@@ -3729,19 +4077,14 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
#ifdef ACPI_DMAR
if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
ctx->ifc_flags |= IFC_DMAR;
+#elif !(defined(__i386__) || defined(__amd64__))
+ /* set unconditionally for !x86 */
+ ctx->ifc_flags |= IFC_DMAR;
#endif
msix_bar = scctx->isc_msix_bar;
-
- if(sctx->isc_flags & IFLIB_HAS_TXCQ)
- main_txq = 1;
- else
- main_txq = 0;
-
- if(sctx->isc_flags & IFLIB_HAS_RXCQ)
- main_rxq = 1;
- else
- main_rxq = 0;
+ main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
+ main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
/* XXX change for per-queue sizes */
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
@@ -3921,10 +4264,9 @@ iflib_device_deregister(if_ctx_t ctx)
if (ctx->ifc_led_dev != NULL)
led_destroy(ctx->ifc_led_dev);
/* XXX drain any dependent tasks */
- tqg = qgroup_softirq;
+ tqg = qgroup_if_io_tqg;
for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
callout_drain(&txq->ift_timer);
- callout_drain(&txq->ift_db_check);
if (txq->ift_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &txq->ift_task);
}
@@ -4191,7 +4533,6 @@ iflib_queues_alloc(if_ctx_t ctx)
caddr_t *vaddrs;
uint64_t *paddrs;
struct ifmp_ring **brscp;
- int nbuf_rings = 1; /* XXX determine dynamically */
KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
@@ -4217,11 +4558,6 @@ iflib_queues_alloc(if_ctx_t ctx)
err = ENOMEM;
goto rx_fail;
}
- if (!(brscp = malloc(sizeof(void *) * nbuf_rings * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to buf_ring_sc * memory\n");
- err = ENOMEM;
- goto rx_fail;
- }
ctx->ifc_txqs = txq;
ctx->ifc_rxqs = rxq;
@@ -4244,6 +4580,7 @@ iflib_queues_alloc(if_ctx_t ctx)
err = ENOMEM;
goto err_tx_desc;
}
+ txq->ift_txd_size[j] = scctx->isc_txd_size[j];
bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
}
txq->ift_ctx = ctx;
@@ -4255,8 +4592,6 @@ iflib_queues_alloc(if_ctx_t ctx)
}
/* XXX fix this */
txq->ift_timer.c_cpu = cpu;
- txq->ift_db_check.c_cpu = cpu;
- txq->ift_nbr = nbuf_rings;
if (iflib_txsd_alloc(txq)) {
device_printf(dev, "Critical Failure setting up TX buffers\n");
@@ -4269,21 +4604,16 @@ iflib_queues_alloc(if_ctx_t ctx)
device_get_nameunit(dev), txq->ift_id);
mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
- callout_init_mtx(&txq->ift_db_check, &txq->ift_mtx, 0);
snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
device_get_nameunit(dev), txq->ift_id);
- TXDB_LOCK_INIT(txq);
-
- txq->ift_br = brscp + i*nbuf_rings;
- for (j = 0; j < nbuf_rings; j++) {
- err = ifmp_ring_alloc(&txq->ift_br[j], 2048, txq, iflib_txq_drain,
- iflib_txq_can_drain, M_IFLIB, M_WAITOK);
- if (err) {
- /* XXX free any allocated rings */
- device_printf(dev, "Unable to allocate buf_ring\n");
- goto err_tx_desc;
- }
+
+ err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
+ iflib_txq_can_drain, M_IFLIB, M_WAITOK);
+ if (err) {
+ /* XXX free any allocated rings */
+ device_printf(dev, "Unable to allocate buf_ring\n");
+ goto err_tx_desc;
}
}
@@ -4297,6 +4627,9 @@ iflib_queues_alloc(if_ctx_t ctx)
}
rxq->ifr_ifdi = ifdip;
+ /* XXX this needs to be changed if #rx queues != #tx queues */
+ rxq->ifr_ntxqirq = 1;
+ rxq->ifr_txqid[0] = i;
for (j = 0; j < nrxqs; j++, ifdip++) {
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
device_printf(dev, "Unable to allocate Descriptor memory\n");
@@ -4321,10 +4654,10 @@ iflib_queues_alloc(if_ctx_t ctx)
}
rxq->ifr_fl = fl;
for (j = 0; j < nfree_lists; j++) {
- rxq->ifr_fl[j].ifl_rxq = rxq;
- rxq->ifr_fl[j].ifl_id = j;
- rxq->ifr_fl[j].ifl_ifdi =
- &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
+ fl[j].ifl_rxq = rxq;
+ fl[j].ifl_id = j;
+ fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
+ fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
}
/* Allocate receive buffers for the ring*/
if (iflib_rxsd_alloc(rxq)) {
@@ -4540,6 +4873,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
cpuset_t cpus;
gtask_fn_t *fn;
int tqrid, err, cpuid;
+ driver_filter_t *intr_fast;
void *q;
info = &ctx->ifc_filter_info;
@@ -4551,16 +4885,27 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
q = &ctx->ifc_txqs[qid];
info = &ctx->ifc_txqs[qid].ift_filter_info;
gtask = &ctx->ifc_txqs[qid].ift_task;
- tqg = qgroup_softirq;
+ tqg = qgroup_if_io_tqg;
fn = _task_fn_tx;
+ intr_fast = iflib_fast_intr;
GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_RX:
q = &ctx->ifc_rxqs[qid];
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
gtask = &ctx->ifc_rxqs[qid].ifr_task;
- tqg = qgroup_softirq;
+ tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
+ intr_fast = iflib_fast_intr;
+ GROUPTASK_INIT(gtask, 0, fn, q);
+ break;
+ case IFLIB_INTR_RXTX:
+ q = &ctx->ifc_rxqs[qid];
+ info = &ctx->ifc_rxqs[qid].ifr_filter_info;
+ gtask = &ctx->ifc_rxqs[qid].ifr_task;
+ tqg = qgroup_if_io_tqg;
+ fn = _task_fn_rx;
+ intr_fast = iflib_fast_intr_rxtx;
GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_ADMIN:
@@ -4570,6 +4915,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
gtask = &ctx->ifc_admin_task;
tqg = qgroup_if_config_tqg;
fn = _task_fn_admin;
+ intr_fast = iflib_fast_intr_ctx;
break;
default:
panic("unknown net intr type");
@@ -4578,9 +4924,9 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
info->ifi_filter = filter;
info->ifi_filter_arg = filter_arg;
info->ifi_task = gtask;
- info->ifi_ctx = ctx;
+ info->ifi_ctx = q;
- err = _iflib_irq_alloc(ctx, irq, rid, iflib_fast_intr, NULL, info, name);
+ err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
if (err != 0) {
device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
return (err);
@@ -4610,13 +4956,13 @@ iflib_softirq_alloc_generic(if_ctx_t ctx, int rid, iflib_intr_type_t type, void
case IFLIB_INTR_TX:
q = &ctx->ifc_txqs[qid];
gtask = &ctx->ifc_txqs[qid].ift_task;
- tqg = qgroup_softirq;
+ tqg = qgroup_if_io_tqg;
fn = _task_fn_tx;
break;
case IFLIB_INTR_RX:
q = &ctx->ifc_rxqs[qid];
gtask = &ctx->ifc_rxqs[qid].ifr_task;
- tqg = qgroup_softirq;
+ tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
break;
case IFLIB_INTR_IOV:
@@ -4660,7 +5006,7 @@ iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *
q = &ctx->ifc_rxqs[0];
info = &rxq[0].ifr_filter_info;
gtask = &rxq[0].ifr_task;
- tqg = qgroup_softirq;
+ tqg = qgroup_if_io_tqg;
tqrid = irq->ii_rid = *rid;
fn = _task_fn_rx;
@@ -4671,13 +5017,13 @@ iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *
info->ifi_ctx = ctx;
/* We allocate a single interrupt resource */
- if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr, NULL, info, name)) != 0)
+ if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
return (err);
GROUPTASK_INIT(gtask, 0, fn, q);
taskqgroup_attach(tqg, gtask, q, tqrid, name);
GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
- taskqgroup_attach(qgroup_softirq, &txq->ift_task, txq, tqrid, "tx");
+ taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, tqrid, "tx");
return (0);
}
@@ -4686,7 +5032,7 @@ iflib_led_create(if_ctx_t ctx)
{
ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
- device_get_nameunit(ctx->ifc_dev));
+ device_get_nameunit(ctx->ifc_dev));
}
void
@@ -4727,7 +5073,7 @@ void
iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
{
- taskqgroup_attach_cpu(qgroup_softirq, gt, uniq, cpu, -1, name);
+ taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
}
void
@@ -4753,6 +5099,8 @@ iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
iflib_txq_t txq = ctx->ifc_txqs;
if_setbaudrate(ifp, baudrate);
+ if (baudrate >= IF_Gbps(10))
+ ctx->ifc_flags |= IFC_PREFETCH;
/* If link down, disable watchdog */
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
@@ -4769,12 +5117,12 @@ iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
int credits;
#ifdef INVARIANTS
int credits_pre = txq->ift_cidx_processed;
-#endif
+#endif
if (ctx->isc_txd_credits_update == NULL)
return (0);
- if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, txq->ift_cidx_processed, true)) == 0)
+ if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
return (0);
txq->ift_processed += credits;
@@ -4787,7 +5135,7 @@ iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
}
static int
-iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget)
+iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
{
return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
@@ -5004,7 +5352,7 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
if_ctx_t ctx = (void *)arg1;
enum iflib_ndesc_handler type = arg2;
char buf[256] = {0};
- uint16_t *ndesc;
+ qidx_t *ndesc;
char *p, *next;
int nqs, rc, i;
@@ -5166,25 +5514,25 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
CTLFLAG_RD,
&txq->ift_cleaned, "total cleaned");
SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
- CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br[0]->state),
+ CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
0, mp_ring_state_handler, "A", "soft ring state");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
- CTLFLAG_RD, &txq->ift_br[0]->enqueues,
+ CTLFLAG_RD, &txq->ift_br->enqueues,
"# of enqueues to the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
- CTLFLAG_RD, &txq->ift_br[0]->drops,
+ CTLFLAG_RD, &txq->ift_br->drops,
"# of drops in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
- CTLFLAG_RD, &txq->ift_br[0]->starts,
+ CTLFLAG_RD, &txq->ift_br->starts,
"# of normal consumer starts in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
- CTLFLAG_RD, &txq->ift_br[0]->stalls,
+ CTLFLAG_RD, &txq->ift_br->stalls,
"# of consumer stalls in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
- CTLFLAG_RD, &txq->ift_br[0]->restarts,
+ CTLFLAG_RD, &txq->ift_br->restarts,
"# of consumer restarts in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
- CTLFLAG_RD, &txq->ift_br[0]->abdications,
+ CTLFLAG_RD, &txq->ift_br->abdications,
"# of consumer abdications in the mp_ring for this queue");
}
@@ -5241,3 +5589,30 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
}
}
+
+#ifndef __NO_STRICT_ALIGNMENT
+static struct mbuf *
+iflib_fixup_rx(struct mbuf *m)
+{
+ struct mbuf *n;
+
+ if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
+ bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
+ m->m_data += ETHER_HDR_LEN;
+ n = m;
+ } else {
+ MGETHDR(n, M_NOWAIT, MT_DATA);
+ if (n == NULL) {
+ m_freem(m);
+ return (NULL);
+ }
+ bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
+ m->m_data += ETHER_HDR_LEN;
+ m->m_len -= ETHER_HDR_LEN;
+ n->m_len = ETHER_HDR_LEN;
+ M_MOVE_PKTHDR(n, m);
+ n->m_next = m;
+ }
+ return (n);
+}
+#endif
diff --git a/sys/net/iflib.h b/sys/net/iflib.h
index 9c8a8c95e13e..9af87a2c09b7 100644
--- a/sys/net/iflib.h
+++ b/sys/net/iflib.h
@@ -37,7 +37,13 @@
#include <sys/nv.h>
#include <sys/gtaskqueue.h>
-
+/*
+ * The value type for indexing, limits max descriptors
+ * to 65535 can be conditionally redefined to uint32_t
+ * in the future if the need arises.
+ */
+typedef uint16_t qidx_t;
+#define QIDX_INVALID 0xFFFF
/*
* Most cards can handle much larger TSO requests
* but the FreeBSD TCP stack will break on larger
@@ -63,7 +69,7 @@ typedef struct if_int_delay_info *if_int_delay_info_t;
typedef struct if_rxd_frag {
uint8_t irf_flid;
- uint16_t irf_idx;
+ qidx_t irf_idx;
uint16_t irf_len;
} *if_rxd_frag_t;
@@ -73,47 +79,61 @@ typedef struct if_rxd_info {
uint16_t iri_vtag; /* vlan tag - if flag set */
/* XXX redundant with the new irf_len field */
uint16_t iri_len; /* packet length */
- uint16_t iri_cidx; /* consumer index of cq */
+ qidx_t iri_cidx; /* consumer index of cq */
struct ifnet *iri_ifp; /* some drivers >1 interface per softc */
/* updated by driver */
- uint16_t iri_flags; /* mbuf flags for packet */
+ if_rxd_frag_t iri_frags;
uint32_t iri_flowid; /* RSS hash for packet */
uint32_t iri_csum_flags; /* m_pkthdr csum flags */
+
uint32_t iri_csum_data; /* m_pkthdr csum data */
+ uint8_t iri_flags; /* mbuf flags for packet */
uint8_t iri_nfrags; /* number of fragments in packet */
uint8_t iri_rsstype; /* RSS hash type */
uint8_t iri_pad; /* any padding in the received data */
- if_rxd_frag_t iri_frags;
} *if_rxd_info_t;
+typedef struct if_rxd_update {
+ uint64_t *iru_paddrs;
+ caddr_t *iru_vaddrs;
+ qidx_t *iru_idxs;
+ qidx_t iru_pidx;
+ uint16_t iru_qsidx;
+ uint16_t iru_count;
+ uint16_t iru_buf_size;
+ uint8_t iru_flidx;
+} *if_rxd_update_t;
+
#define IPI_TX_INTR 0x1 /* send an interrupt when this packet is sent */
#define IPI_TX_IPV4 0x2 /* ethertype IPv4 */
#define IPI_TX_IPV6 0x4 /* ethertype IPv6 */
typedef struct if_pkt_info {
- uint32_t ipi_len; /* packet length */
- bus_dma_segment_t *ipi_segs; /* physical addresses */
- uint16_t ipi_qsidx; /* queue set index */
- uint16_t ipi_nsegs; /* number of segments */
- uint16_t ipi_ndescs; /* number of descriptors used by encap */
- uint16_t ipi_flags; /* iflib per-packet flags */
- uint32_t ipi_pidx; /* start pidx for encap */
- uint32_t ipi_new_pidx; /* next available pidx post-encap */
+ bus_dma_segment_t *ipi_segs; /* physical addresses */
+ uint32_t ipi_len; /* packet length */
+ uint16_t ipi_qsidx; /* queue set index */
+ qidx_t ipi_nsegs; /* number of segments */
+
+ qidx_t ipi_ndescs; /* number of descriptors used by encap */
+ uint16_t ipi_flags; /* iflib per-packet flags */
+ qidx_t ipi_pidx; /* start pidx for encap */
+ qidx_t ipi_new_pidx; /* next available pidx post-encap */
/* offload handling */
- uint64_t ipi_csum_flags; /* packet checksum flags */
- uint16_t ipi_tso_segsz; /* tso segment size */
- uint16_t ipi_mflags; /* packet mbuf flags */
- uint16_t ipi_vtag; /* VLAN tag */
- uint16_t ipi_etype; /* ether header type */
- uint8_t ipi_ehdrlen; /* ether header length */
- uint8_t ipi_ip_hlen; /* ip header length */
- uint8_t ipi_tcp_hlen; /* tcp header length */
- uint8_t ipi_tcp_hflags; /* tcp header flags */
- uint8_t ipi_ipproto; /* ip protocol */
- /* implied padding */
- uint32_t ipi_tcp_seq; /* tcp seqno */
- uint32_t ipi_tcp_sum; /* tcp csum */
+ uint8_t ipi_ehdrlen; /* ether header length */
+ uint8_t ipi_ip_hlen; /* ip header length */
+ uint8_t ipi_tcp_hlen; /* tcp header length */
+ uint8_t ipi_ipproto; /* ip protocol */
+
+ uint32_t ipi_csum_flags; /* packet checksum flags */
+ uint16_t ipi_tso_segsz; /* tso segment size */
+ uint16_t ipi_vtag; /* VLAN tag */
+ uint16_t ipi_etype; /* ether header type */
+ uint8_t ipi_tcp_hflags; /* tcp header flags */
+ uint8_t ipi_mflags; /* packet mbuf flags */
+
+ uint32_t ipi_tcp_seq; /* tcp seqno */
+ uint32_t ipi_tcp_sum; /* tcp csum */
} *if_pkt_info_t;
typedef struct if_irq {
@@ -156,15 +176,13 @@ typedef struct pci_vendor_info {
typedef struct if_txrx {
int (*ift_txd_encap) (void *, if_pkt_info_t);
- void (*ift_txd_flush) (void *, uint16_t, uint32_t);
- int (*ift_txd_credits_update) (void *, uint16_t, uint32_t, bool);
+ void (*ift_txd_flush) (void *, uint16_t, qidx_t pidx);
+ int (*ift_txd_credits_update) (void *, uint16_t qsidx, bool clear);
- int (*ift_rxd_available) (void *, uint16_t qsidx, uint32_t pidx,
- int budget);
+ int (*ift_rxd_available) (void *, uint16_t qsidx, qidx_t pidx, qidx_t budget);
int (*ift_rxd_pkt_get) (void *, if_rxd_info_t ri);
- void (*ift_rxd_refill) (void * , uint16_t qsidx, uint8_t flidx, uint32_t pidx,
- uint64_t *paddrs, caddr_t *vaddrs, uint16_t count, uint16_t buf_size);
- void (*ift_rxd_flush) (void *, uint16_t qsidx, uint8_t flidx, uint32_t pidx);
+ void (*ift_rxd_refill) (void * , if_rxd_update_t iru);
+ void (*ift_rxd_flush) (void *, uint16_t qsidx, uint8_t flidx, qidx_t pidx);
int (*ift_legacy_intr) (void *);
} *if_txrx_t;
@@ -179,6 +197,10 @@ typedef struct if_softc_ctx {
uint32_t isc_txqsizes[8];
uint32_t isc_rxqsizes[8];
+ /* is there such thing as a descriptor that is more than 248 bytes ? */
+ uint8_t isc_txd_size[8];
+ uint8_t isc_rxd_size[8];
+
int isc_max_txqsets;
int isc_max_rxqsets;
int isc_tx_tso_segments_max;
@@ -203,22 +225,14 @@ typedef struct if_softc_ctx {
struct if_shared_ctx {
int isc_magic;
driver_t *isc_driver;
- int isc_nfl;
- int isc_flags;
bus_size_t isc_q_align;
bus_size_t isc_tx_maxsize;
bus_size_t isc_tx_maxsegsize;
bus_size_t isc_rx_maxsize;
bus_size_t isc_rx_maxsegsize;
int isc_rx_nsegments;
- int isc_rx_process_limit;
- int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */
- int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */
int isc_admin_intrcnt; /* # of admin/link interrupts */
-
- int isc_tx_reclaim_thresh;
-
/* fields necessary for probe */
pci_vendor_info_t *isc_vendor_info;
char *isc_driver_version;
@@ -231,6 +245,14 @@ struct if_shared_ctx {
int isc_ntxd_min[8];
int isc_ntxd_default[8];
int isc_ntxd_max[8];
+
+ /* actively used during operation */
+ int isc_nfl __aligned(CACHE_LINE_SIZE);
+ int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */
+ int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */
+ int isc_rx_process_limit;
+ int isc_tx_reclaim_thresh;
+ int isc_flags;
};
typedef struct iflib_dma_info {
@@ -244,8 +266,9 @@ typedef struct iflib_dma_info {
#define IFLIB_MAGIC 0xCAFEF00D
typedef enum {
- IFLIB_INTR_TX,
IFLIB_INTR_RX,
+ IFLIB_INTR_TX,
+ IFLIB_INTR_RXTX,
IFLIB_INTR_ADMIN,
IFLIB_INTR_IOV,
} iflib_intr_type_t;
@@ -279,6 +302,10 @@ typedef enum {
* Interface doesn't expect in_pseudo for th_sum
*/
#define IFLIB_TSO_INIT_IP 0x20
+/*
+ * Interface doesn't align IP header
+ */
+#define IFLIB_DO_RX_FIXUP 0x40
@@ -298,9 +325,6 @@ if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx);
void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]);
-
-
-
/*
* If the driver can plug cleanly in to newbus use these
*/