diff options
Diffstat (limited to 'sys/dev/netmap')
-rw-r--r-- | sys/dev/netmap/if_em_netmap.h | 25 | ||||
-rw-r--r-- | sys/dev/netmap/if_igb_netmap.h | 12 | ||||
-rw-r--r-- | sys/dev/netmap/if_lem_netmap.h | 22 | ||||
-rw-r--r-- | sys/dev/netmap/if_re_netmap.h | 35 | ||||
-rw-r--r-- | sys/dev/netmap/ixgbe_netmap.h | 13 | ||||
-rw-r--r-- | sys/dev/netmap/netmap_kern.h | 52 |
6 files changed, 84 insertions, 75 deletions
diff --git a/sys/dev/netmap/if_em_netmap.h b/sys/dev/netmap/if_em_netmap.h index 2009f87a1bc5..a10bef040419 100644 --- a/sys/dev/netmap/if_em_netmap.h +++ b/sys/dev/netmap/if_em_netmap.h @@ -27,7 +27,7 @@ * $FreeBSD$ * $Id: if_em_netmap.h 9802 2011-12-02 18:42:37Z luigi $ * - * netmap support for if_em. + * netmap support for if_em.c * * For structure and details on the individual functions please see * ixgbe_netmap.h @@ -66,9 +66,6 @@ em_netmap_attach(struct adapter *adapter) } -/* - * wrapper to export locks to the generic code - */ static void em_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid) { @@ -214,9 +211,7 @@ em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) */ j = kring->nr_hwcur; if (j != k) { /* we have packets to send */ - l = j - kring->nkr_hwofs; - if (l < 0) - l += lim + 1; + l = netmap_tidx_k2n(na, ring_nr, j); while (j != k) { struct netmap_slot *slot = &ring->slot[j]; struct e1000_tx_desc *curr = &txr->tx_base[l]; @@ -322,12 +317,7 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) * j == (l + kring->nkr_hwofs) % ring_size */ l = rxr->next_to_check; - j = l + kring->nkr_hwofs; - /* XXX here nkr_hwofs can be negative so must check for j < 0 */ - if (j < 0) - j += lim + 1; - else if (j > lim) - j -= lim + 1; + j = netmap_ridx_n2k(na, ring_nr, l); for (n = 0; ; n++) { struct e1000_rx_desc *curr = &rxr->rx_base[l]; @@ -347,15 +337,10 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) } /* skip past packets that userspace has already processed */ - j = kring->nr_hwcur; + j = kring->nr_hwcur; /* netmap ring index */ if (j != k) { /* userspace has read some packets. */ n = 0; - l = j - kring->nkr_hwofs; /* NIC ring index */ - /* here nkr_hwofs can be negative so check for l > lim */ - if (l < 0) - l += lim + 1; - else if (l > lim) - l -= lim + 1; + l = netmap_ridx_k2n(na, ring_nr, j); /* NIC ring index */ while (j != k) { struct netmap_slot *slot = &ring->slot[j]; struct e1000_rx_desc *curr = &rxr->rx_base[l]; diff --git a/sys/dev/netmap/if_igb_netmap.h b/sys/dev/netmap/if_igb_netmap.h index 86dd331455b6..7bea4b146a79 100644 --- a/sys/dev/netmap/if_igb_netmap.h +++ b/sys/dev/netmap/if_igb_netmap.h @@ -169,9 +169,7 @@ igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) u32 olinfo_status = (adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0; - l = j - kring->nkr_hwofs; - if (l < 0) - l += lim + 1; + l = netmap_tidx_k2n(na, ring_nr, j); while (j != k) { struct netmap_slot *slot = &ring->slot[j]; union e1000_adv_tx_desc *curr = @@ -287,9 +285,7 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) * j == (l + kring->nkr_hwofs) % ring_size */ l = rxr->next_to_check; - j = l + kring->nkr_hwofs; - if (j > lim) - j -= lim + 1; + j = netmap_ridx_n2k(na, ring_nr, l); for (n = 0; ; n++) { union e1000_adv_rx_desc *curr = &rxr->rx_base[l]; uint32_t staterr = le32toh(curr->wb.upper.status_error); @@ -311,9 +307,7 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) j = kring->nr_hwcur; if (j != k) { /* userspace has read some packets. */ n = 0; - l = j - kring->nkr_hwofs; - if (l < 0) - l += lim + 1; + l = netmap_ridx_k2n(na, ring_nr, j); while (j != k) { struct netmap_slot *slot = ring->slot + j; union e1000_adv_rx_desc *curr = &rxr->rx_base[l]; diff --git a/sys/dev/netmap/if_lem_netmap.h b/sys/dev/netmap/if_lem_netmap.h index 785d10290ccf..0f5c5a0acfe6 100644 --- a/sys/dev/netmap/if_lem_netmap.h +++ b/sys/dev/netmap/if_lem_netmap.h @@ -45,8 +45,6 @@ static int lem_netmap_rxsync(struct ifnet *, u_int, int); static void lem_netmap_lock_wrapper(struct ifnet *, int, u_int); -SYSCTL_NODE(_dev, OID_AUTO, lem, CTLFLAG_RW, 0, "lem card"); - static void lem_netmap_attach(struct adapter *adapter) { @@ -153,7 +151,7 @@ lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(adapter->ifp); - struct netmap_kring *kring = &na->tx_rings[0]; + struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; @@ -176,9 +174,7 @@ lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) */ j = kring->nr_hwcur; if (j != k) { /* we have packets to send */ - l = j - kring->nkr_hwofs; - if (l < 0) - l += lim + 1; + l = netmap_tidx_k2n(na, ring_nr, j); while (j != k) { struct netmap_slot *slot = &ring->slot[j]; struct e1000_tx_desc *curr = &adapter->tx_desc_base[l]; @@ -260,7 +256,7 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(adapter->ifp); - struct netmap_kring *kring = &na->rx_rings[0]; + struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, k, l, n, lim = kring->nkr_num_slots - 1; @@ -283,9 +279,7 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) * j == (l + kring->nkr_hwofs) % ring_size */ l = adapter->next_rx_desc_to_check; - j = l + kring->nkr_hwofs; - if (j > lim) - j -= lim + 1; + j = netmap_ridx_n2k(na, ring_nr, l); for (n = 0; ; n++) { struct e1000_rx_desc *curr = &adapter->rx_desc_base[l]; int len; @@ -310,12 +304,10 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) } /* skip past packets that userspace has already processed */ - j = kring->nr_hwcur; /* netmap ring index */ + j = kring->nr_hwcur; /* netmap ring index */ if (j != k) { /* userspace has read some packets. */ n = 0; - l = j - kring->nkr_hwofs; /* NIC ring index */ - if (l < 0) - l += lim + 1; + l = netmap_ridx_k2n(na, ring_nr, j); /* NIC ring index */ while (j != k) { struct netmap_slot *slot = &ring->slot[j]; struct e1000_rx_desc *curr = &adapter->rx_desc_base[l]; @@ -332,7 +324,7 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) curr->status = 0; if (slot->flags & NS_BUF_CHANGED) { curr->buffer_addr = htole64(paddr); - /* buffer has changed, and reload map */ + /* buffer has changed, reload map */ netmap_reload_map(adapter->rxtag, rxbuf->map, addr); slot->flags &= ~NS_BUF_CHANGED; } diff --git a/sys/dev/netmap/if_re_netmap.h b/sys/dev/netmap/if_re_netmap.h index af8e21a486ba..d0b89193fe6c 100644 --- a/sys/dev/netmap/if_re_netmap.h +++ b/sys/dev/netmap/if_re_netmap.h @@ -263,7 +263,7 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) * is to limit the amount of data reported up to 'lim' */ l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */ - j = l + kring->nkr_hwofs; + j = netmap_ridx_n2k(na, ring_nr, l); /* the kring index */ for (n = kring->nr_hwavail; n < lim ; n++) { struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l]; uint32_t rxstat = le32toh(cur_rx->rl_cmdstat); @@ -296,9 +296,7 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) j = kring->nr_hwcur; if (j != k) { /* userspace has read some packets. */ n = 0; - l = kring->nr_hwcur - kring->nkr_hwofs; - if (l < 0) - l += lim + 1; + l = netmap_ridx_k2n(na, ring_nr, j); /* the NIC index */ while (j != k) { struct netmap_slot *slot = ring->slot + j; struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l]; @@ -370,11 +368,7 @@ re_netmap_tx_init(struct rl_softc *sc) for (i = 0; i < n; i++) { void *addr; uint64_t paddr; - struct netmap_kring *kring = &na->tx_rings[0]; - int l = i + kring->nkr_hwofs; - - if (l >= n) - l -= n; + int l = netmap_tidx_n2k(na, 0, i); addr = PNMB(slot + l, &paddr); desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); @@ -391,19 +385,21 @@ re_netmap_rx_init(struct rl_softc *sc) struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); struct rl_desc *desc = sc->rl_ldata.rl_rx_list; uint32_t cmdstat; - int i, n; + int i, n, max_avail; if (!slot) return; n = sc->rl_ldata.rl_rx_desc_cnt; + /* + * Userspace owned hwavail packets before the reset, + * so the NIC that last hwavail descriptors of the ring + * are still owned by the driver (and keep one empty). + */ + max_avail = n - 1 - na->rx_rings[0].nr_hwavail; for (i = 0; i < n; i++) { void *addr; uint64_t paddr; - struct netmap_kring *kring = &na->rx_rings[0]; - int l = i + kring->nkr_hwofs; - - if (l >= n) - l -= n; + int l = netmap_ridx_n2k(na, 0, i); addr = PNMB(slot + l, &paddr); @@ -414,14 +410,9 @@ re_netmap_rx_init(struct rl_softc *sc) desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); cmdstat = na->buff_size; - if (i == n - 1) + if (i == n - 1) /* mark the end of ring */ cmdstat |= RL_RDESC_CMD_EOR; - /* - * userspace knows that hwavail packets were ready before the - * reset, so we need to tell the NIC that last hwavail - * descriptors of the ring are still owned by the driver. - */ - if (i < n - 1 - kring->nr_hwavail) // XXX + 1 ? + if (i < max_avail) cmdstat |= RL_RDESC_CMD_OWN; desc[i].rl_cmdstat = htole32(cmdstat); } diff --git a/sys/dev/netmap/ixgbe_netmap.h b/sys/dev/netmap/ixgbe_netmap.h index bdcaf3875e44..5f5a240b4921 100644 --- a/sys/dev/netmap/ixgbe_netmap.h +++ b/sys/dev/netmap/ixgbe_netmap.h @@ -210,6 +210,7 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) IXGBE_TX_LOCK(txr); /* take a copy of ring->cur now, and never read it again */ k = ring->cur; + /* do a sanity check on cur - hwcur XXX verify */ l = k - kring->nr_hwcur; if (l < 0) l += lim + 1; @@ -240,9 +241,7 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ - l = j - kring->nkr_hwofs; - if (l < 0) /* wraparound */ - l += lim + 1; + l = netmap_tidx_k2n(na, ring_nr, j); /* NIC index */ while (j != k) { /* @@ -459,9 +458,7 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) * rxr->next_to_check is set to 0 on a ring reinit */ l = rxr->next_to_check; - j = rxr->next_to_check + kring->nkr_hwofs; - if (j > lim) - j -= lim + 1; + j = netmap_ridx_n2k(na, ring_nr, l); if (netmap_no_pendintr || force_update) { for (n = 0; ; n++) { @@ -493,9 +490,7 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) j = kring->nr_hwcur; if (j != k) { /* userspace has read some packets. */ n = 0; - l = kring->nr_hwcur - kring->nkr_hwofs; - if (l < 0) - l += lim + 1; + l = netmap_ridx_k2n(na, ring_nr, j); while (j != k) { /* collect per-slot info, with similar validations * and flag handling as in the txsync code. diff --git a/sys/dev/netmap/netmap_kern.h b/sys/dev/netmap/netmap_kern.h index 4fb59980444f..6e0357047773 100644 --- a/sys/dev/netmap/netmap_kern.h +++ b/sys/dev/netmap/netmap_kern.h @@ -250,6 +250,58 @@ netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) } } +/* + * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) + */ +static inline int +netmap_ridx_n2k(struct netmap_adapter *na, int ring, int nic_idx) +{ + int kring_idx = nic_idx + na->rx_rings[ring].nkr_hwofs; + if (kring_idx < 0) + return kring_idx + na->num_rx_desc; + else if (kring_idx < na->num_rx_desc) + return kring_idx; + else + return kring_idx - na->num_rx_desc; +} + +static inline int +netmap_tidx_n2k(struct netmap_adapter *na, int ring, int nic_idx) +{ + int kring_idx = nic_idx + na->tx_rings[ring].nkr_hwofs; + if (kring_idx < 0) + return kring_idx + na->num_tx_desc; + else if (kring_idx < na->num_tx_desc) + return kring_idx; + else + return kring_idx - na->num_tx_desc; +} + + +static inline int +netmap_ridx_k2n(struct netmap_adapter *na, int ring, int kring_idx) +{ + int nic_idx = kring_idx - na->rx_rings[ring].nkr_hwofs; + if (nic_idx < 0) + return nic_idx + na->num_rx_desc; + else if (nic_idx < na->num_rx_desc) + return nic_idx; + else + return nic_idx - na->num_rx_desc; +} + + +static inline int +netmap_tidx_k2n(struct netmap_adapter *na, int ring, int kring_idx) +{ + int nic_idx = kring_idx - na->tx_rings[ring].nkr_hwofs; + if (nic_idx < 0) + return nic_idx + na->num_tx_desc; + else if (nic_idx < na->num_tx_desc) + return nic_idx; + else + return nic_idx - na->num_tx_desc; +} /* * NMB return the virtual address of a buffer (buffer 0 on bad index) |