From eeb76a1889b36f16bded9cfc8140b4cabcf6e792 Mon Sep 17 00:00:00 2001 From: Sam Leffler Date: Thu, 17 Jan 2008 21:25:09 +0000 Subject: promote ath_defrag to m_collapse (and retire private+unused m_collapse from cxgb) Reviewed by: pyun, jhb, kmacy MFC after: 2 weeks --- sys/dev/ath/if_ath.c | 86 +------------------------------------------------ sys/dev/cxgb/sys/mvec.h | 14 -------- sys/kern/uipc_mbuf.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++ sys/sys/mbuf.h | 1 + 4 files changed, 88 insertions(+), 99 deletions(-) diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c index 3f74add351b4..1e3047472eac 100644 --- a/sys/dev/ath/if_ath.c +++ b/sys/dev/ath/if_ath.c @@ -3891,90 +3891,6 @@ ath_tx_cleanup(struct ath_softc *sc) ATH_TXQ_LOCK_DESTROY(&sc->sc_mcastq); } -/* - * Defragment an mbuf chain, returning at most maxfrags separate - * mbufs+clusters. If this is not possible NULL is returned and - * the original mbuf chain is left in it's present (potentially - * modified) state. We use two techniques: collapsing consecutive - * mbufs and replacing consecutive mbufs by a cluster. - */ -static struct mbuf * -ath_defrag(struct mbuf *m0, int how, int maxfrags) -{ - struct mbuf *m, *n, *n2, **prev; - u_int curfrags; - - /* - * Calculate the current number of frags. - */ - curfrags = 0; - for (m = m0; m != NULL; m = m->m_next) - curfrags++; - /* - * First, try to collapse mbufs. Note that we always collapse - * towards the front so we don't need to deal with moving the - * pkthdr. This may be suboptimal if the first mbuf has much - * less data than the following. - */ - m = m0; -again: - for (;;) { - n = m->m_next; - if (n == NULL) - break; - if ((m->m_flags & M_RDONLY) == 0 && - n->m_len < M_TRAILINGSPACE(m)) { - bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, - n->m_len); - m->m_len += n->m_len; - m->m_next = n->m_next; - m_free(n); - if (--curfrags <= maxfrags) - return m0; - } else - m = n; - } - KASSERT(maxfrags > 1, - ("maxfrags %u, but normal collapse failed", maxfrags)); - /* - * Collapse consecutive mbufs to a cluster. - */ - prev = &m0->m_next; /* NB: not the first mbuf */ - while ((n = *prev) != NULL) { - if ((n2 = n->m_next) != NULL && - n->m_len + n2->m_len < MCLBYTES) { - m = m_getcl(how, MT_DATA, 0); - if (m == NULL) - goto bad; - bcopy(mtod(n, void *), mtod(m, void *), n->m_len); - bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, - n2->m_len); - m->m_len = n->m_len + n2->m_len; - m->m_next = n2->m_next; - *prev = m; - m_free(n); - m_free(n2); - if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ - return m0; - /* - * Still not there, try the normal collapse - * again before we allocate another cluster. - */ - goto again; - } - prev = &n->m_next; - } - /* - * No place where we can collapse to a cluster; punt. - * This can occur if, for example, you request 2 frags - * but the packet requires that both be clusters (we - * never reallocate the first mbuf to avoid moving the - * packet header). - */ -bad: - return NULL; -} - /* * Return h/w rate index for an IEEE rate (w/o basic rate bit). */ @@ -4033,7 +3949,7 @@ ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) */ if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ sc->sc_stats.ast_tx_linear++; - m = ath_defrag(m0, M_DONTWAIT, ATH_TXDESC); + m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); if (m == NULL) { ath_freetx(m0); sc->sc_stats.ast_tx_nombuf++; diff --git a/sys/dev/cxgb/sys/mvec.h b/sys/dev/cxgb/sys/mvec.h index b2423de61fb7..f314de7d3aca 100644 --- a/sys/dev/cxgb/sys/mvec.h +++ b/sys/dev/cxgb/sys/mvec.h @@ -114,7 +114,6 @@ void mi_init(void); void mi_deinit(void); int _m_explode(struct mbuf *); -int _m_collapse(struct mbuf *, int maxbufs, struct mbuf **); void mb_free_vec(struct mbuf *m); static __inline void @@ -185,19 +184,6 @@ struct mbuf *mi_collapse_mbuf(struct mbuf_iovec *mi, struct mbuf *m); struct mbuf *mi_collapse_sge(struct mbuf_iovec *mi, bus_dma_segment_t *seg); void *mcl_alloc(int seg_count, int *type); -static __inline int -m_collapse(struct mbuf *m, int maxbufs, struct mbuf **mnew) -{ -#if (!defined(__sparc64__) && !defined(__sun4v__)) - if (m->m_next == NULL) -#endif - { - *mnew = m; - return (0); - } - return _m_collapse(m, maxbufs, mnew); -} - void mb_free_ext_fast(struct mbuf_iovec *mi, int type, int idx); static __inline void diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 3fefecc17b83..a7fa18c83bbd 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -1539,6 +1539,92 @@ nospace: return (NULL); } +/* + * Defragment an mbuf chain, returning at most maxfrags separate + * mbufs+clusters. If this is not possible NULL is returned and + * the original mbuf chain is left in it's present (potentially + * modified) state. We use two techniques: collapsing consecutive + * mbufs and replacing consecutive mbufs by a cluster. + * + * NB: this should really be named m_defrag but that name is taken + */ +struct mbuf * +m_collapse(struct mbuf *m0, int how, int maxfrags) +{ + struct mbuf *m, *n, *n2, **prev; + u_int curfrags; + + /* + * Calculate the current number of frags. + */ + curfrags = 0; + for (m = m0; m != NULL; m = m->m_next) + curfrags++; + /* + * First, try to collapse mbufs. Note that we always collapse + * towards the front so we don't need to deal with moving the + * pkthdr. This may be suboptimal if the first mbuf has much + * less data than the following. + */ + m = m0; +again: + for (;;) { + n = m->m_next; + if (n == NULL) + break; + if ((m->m_flags & M_RDONLY) == 0 && + n->m_len < M_TRAILINGSPACE(m)) { + bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, + n->m_len); + m->m_len += n->m_len; + m->m_next = n->m_next; + m_free(n); + if (--curfrags <= maxfrags) + return m0; + } else + m = n; + } + KASSERT(maxfrags > 1, + ("maxfrags %u, but normal collapse failed", maxfrags)); + /* + * Collapse consecutive mbufs to a cluster. + */ + prev = &m0->m_next; /* NB: not the first mbuf */ + while ((n = *prev) != NULL) { + if ((n2 = n->m_next) != NULL && + n->m_len + n2->m_len < MCLBYTES) { + m = m_getcl(how, MT_DATA, 0); + if (m == NULL) + goto bad; + bcopy(mtod(n, void *), mtod(m, void *), n->m_len); + bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, + n2->m_len); + m->m_len = n->m_len + n2->m_len; + m->m_next = n2->m_next; + *prev = m; + m_free(n); + m_free(n2); + if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ + return m0; + /* + * Still not there, try the normal collapse + * again before we allocate another cluster. + */ + goto again; + } + prev = &n->m_next; + } + /* + * No place where we can collapse to a cluster; punt. + * This can occur if, for example, you request 2 frags + * but the packet requires that both be clusters (we + * never reallocate the first mbuf to avoid moving the + * packet header). + */ +bad: + return NULL; +} + #ifdef MBUF_STRESS_TEST /* diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h index e1e0dfaca525..5a499e5c0ee0 100644 --- a/sys/sys/mbuf.h +++ b/sys/sys/mbuf.h @@ -746,6 +746,7 @@ int m_append(struct mbuf *, int, c_caddr_t); void m_cat(struct mbuf *, struct mbuf *); void m_extadd(struct mbuf *, caddr_t, u_int, void (*)(void *, void *), void *, int, int); +struct mbuf *m_collapse(struct mbuf *, int, int); void m_copyback(struct mbuf *, int, int, c_caddr_t); void m_copydata(const struct mbuf *, int, int, caddr_t); struct mbuf *m_copym(struct mbuf *, int, int, int); -- cgit v1.2.3