aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorHans Petter Selasky <hselasky@FreeBSD.org>2017-08-08 11:35:02 +0000
committerHans Petter Selasky <hselasky@FreeBSD.org>2017-08-08 11:35:02 +0000
commit1a59bf5f7a36b56e2015c9ad09cc403ee90c3c17 (patch)
tree63533e95c62a85232abe711107d70cdb1160cdf3 /sys
parent984d43cca516265123e2db5a07f2de2fbf8b7a74 (diff)
downloadsrc-1a59bf5f7a36b56e2015c9ad09cc403ee90c3c17.tar.gz
src-1a59bf5f7a36b56e2015c9ad09cc403ee90c3c17.zip
Fix for mlx4en(4) to properly call m_defrag().
The m_defrag() function can only defrag mbuf chains which have a valid mbuf packet header. In r291699 when the mlx4en(4) driver was converted into using BUSDMA(9), the call to m_defrag() was moved after the part of the transmit routine which strips the header from the mbuf chain. This effectivly disabled the mbuf defrag mechanism and such packets simply got dropped. This patch removes the stripping of mbufs from a chain and loads all mbufs using busdma. If busdma finds there are no segments, unload the DMA map and free the mbuf right away, because that means all data in the mbuf has been inlined in the TX ring. Else proceed as usual. Add a per-ring rounter for the number of defrag attempts and make sure the oversized_packets counter gets zeroed while at it. The counters are per-ring to avoid excessive cache misses in the TX path. Submitted by: mjoras@ Differential Revision: https://reviews.freebsd.org/D11683 MFC after: 1 week Sponsored by: Mellanox Technologies
Notes
Notes: svn path=/head/; revision=322248
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/mlx4/mlx4_en/en.h2
-rw-r--r--sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c6
-rw-r--r--sys/dev/mlx4/mlx4_en/mlx4_en_port.c5
-rw-r--r--sys/dev/mlx4/mlx4_en/mlx4_en_tx.c28
-rw-r--r--sys/dev/mlx4/stats.h1
5 files changed, 27 insertions, 15 deletions
diff --git a/sys/dev/mlx4/mlx4_en/en.h b/sys/dev/mlx4/mlx4_en/en.h
index afc0c4cc22f7..9e0349137cd9 100644
--- a/sys/dev/mlx4/mlx4_en/en.h
+++ b/sys/dev/mlx4/mlx4_en/en.h
@@ -278,6 +278,8 @@ struct mlx4_en_tx_ring {
unsigned long queue_stopped;
unsigned long oversized_packets;
unsigned long wake_queue;
+ unsigned long tso_packets;
+ unsigned long defrag_attempts;
struct mlx4_bf bf;
bool bf_enabled;
int hwtstamp_tx_type;
diff --git a/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c b/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
index e1ce46666e8c..9c9ca3b7feb1 100644
--- a/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
+++ b/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
@@ -2681,6 +2681,8 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload",
CTLFLAG_RD, &priv->port_stats.tx_chksum_offload,
"TX checksum offloads");
+ SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "defrag_attempts", CTLFLAG_RD,
+ &priv->port_stats.defrag_attempts, "Oversized chains defragged");
/* Could strdup the names and add in a loop. This is simpler. */
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
@@ -2774,6 +2776,10 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
CTLFLAG_RD, &tx_ring->packets, "TX packets");
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
CTLFLAG_RD, &tx_ring->bytes, "TX bytes");
+ SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "tso_packets",
+ CTLFLAG_RD, &tx_ring->tso_packets, "TSO packets");
+ SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "defrag_attempts",
+ CTLFLAG_RD, &tx_ring->defrag_attempts, "Oversized chains defragged");
}
for (i = 0; i < priv->rx_ring_num; i++) {
diff --git a/sys/dev/mlx4/mlx4_en/mlx4_en_port.c b/sys/dev/mlx4/mlx4_en/mlx4_en_port.c
index 75e929892b18..eb6cbefbf467 100644
--- a/sys/dev/mlx4/mlx4_en/mlx4_en_port.c
+++ b/sys/dev/mlx4/mlx4_en/mlx4_en_port.c
@@ -191,11 +191,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
+ priv->port_stats.oversized_packets = 0;
+ priv->port_stats.tso_packets = 0;
+ priv->port_stats.defrag_attempts = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
priv->port_stats.queue_stopped += priv->tx_ring[i]->queue_stopped;
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
priv->port_stats.oversized_packets += priv->tx_ring[i]->oversized_packets;
+ priv->port_stats.tso_packets += priv->tx_ring[i]->tso_packets;
+ priv->port_stats.defrag_attempts += priv->tx_ring[i]->defrag_attempts;
}
/* RX Statistics */
priv->pkstats.rx_packets = be64_to_cpu(mlx4_en_stats->RTOT_prio_0) +
diff --git a/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c b/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
index 55c419a9f1f9..be08f9a42cdb 100644
--- a/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
+++ b/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
@@ -793,7 +793,7 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
num_pkts = DIV_ROUND_UP(payload_len, mss);
ring->bytes += payload_len + (num_pkts * ihs);
ring->packets += num_pkts;
- priv->port_stats.tso_packets++;
+ ring->tso_packets++;
/* store pointer to inline header */
dseg_inline = dseg;
/* copy data inline */
@@ -814,20 +814,11 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
}
m_adj(mb, ihs);
- /* trim off empty mbufs */
- while (mb->m_len == 0) {
- mb = m_free(mb);
- /* check if all data has been inlined */
- if (mb == NULL) {
- nr_segs = 0;
- goto skip_dma;
- }
- }
-
err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
mb, segs, &nr_segs, BUS_DMA_NOWAIT);
if (unlikely(err == EFBIG)) {
/* Too many mbuf fragments */
+ ring->defrag_attempts++;
m = m_defrag(mb, M_NOWAIT);
if (m == NULL) {
ring->oversized_packets++;
@@ -843,11 +834,18 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
ring->oversized_packets++;
goto tx_drop;
}
- /* make sure all mbuf data is written to RAM */
- bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
- BUS_DMASYNC_PREWRITE);
+ /* If there were no errors and we didn't load anything, don't sync. */
+ if (nr_segs != 0) {
+ /* make sure all mbuf data is written to RAM */
+ bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ /* All data was inlined, free the mbuf. */
+ bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
+ m_freem(mb);
+ mb = NULL;
+ }
-skip_dma:
/* compute number of DS needed */
ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs;
diff --git a/sys/dev/mlx4/stats.h b/sys/dev/mlx4/stats.h
index f70a83358839..3b86ea18cbd0 100644
--- a/sys/dev/mlx4/stats.h
+++ b/sys/dev/mlx4/stats.h
@@ -126,6 +126,7 @@ struct mlx4_en_port_stats {
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long tx_chksum_offload;
+ unsigned long defrag_attempts;
};
struct mlx4_en_perf_stats {