diff options
author | Zhenlei Huang <zlei@FreeBSD.org> | 2023-03-29 16:25:16 +0000 |
---|---|---|
committer | Zhenlei Huang <zlei@FreeBSD.org> | 2023-03-29 16:29:51 +0000 |
commit | d4a80d21b3d32a2de02d1820cc1f38dba1f127cb (patch) | |
tree | 3e5febf28287abbe5afe273e5a59e997a2daf375 /sys | |
parent | dbe86dd5de18fdf61e1300f6575e0f50785bf6b3 (diff) | |
download | src-d4a80d21b3d32a2de02d1820cc1f38dba1f127cb.tar.gz src-d4a80d21b3d32a2de02d1820cc1f38dba1f127cb.zip |
lagg(4): Do not enter net epoch recursively
This saves a little resources.
No functional change intended.
Reviewed by: kp
Fixes: b8a6e03fac92 Widen NET_EPOCH coverage
MFC after: 1 week
Differential Revision: https://reviews.freebsd.org/D39267
Diffstat (limited to 'sys')
-rw-r--r-- | sys/net/if_lagg.c | 23 |
1 files changed, 5 insertions, 18 deletions
diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c index 7acfa524670c..063782b49ad7 100644 --- a/sys/net/if_lagg.c +++ b/sys/net/if_lagg.c @@ -2107,18 +2107,15 @@ lagg_setflags(struct lagg_port *lp, int status) static int lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m) { - struct epoch_tracker et; struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; - int error; + NET_EPOCH_ASSERT(); #if defined(KERN_TLS) || defined(RATELIMIT) if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) MPASS(m->m_pkthdr.snd_tag->ifp == ifp); #endif - NET_EPOCH_ENTER(et); /* We need a Tx algorithm and at least one port */ if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { - NET_EPOCH_EXIT(et); m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENXIO); @@ -2126,26 +2123,21 @@ lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m) ETHER_BPF_MTAP(ifp, m); - error = lagg_proto_start(sc, m); - NET_EPOCH_EXIT(et); - return (error); + return (lagg_proto_start(sc, m)); } static int lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m) { - struct epoch_tracker et; struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; - int error; + NET_EPOCH_ASSERT(); #if defined(KERN_TLS) || defined(RATELIMIT) if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) MPASS(m->m_pkthdr.snd_tag->ifp == ifp); #endif - NET_EPOCH_ENTER(et); /* We need a Tx algorithm and at least one port */ if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { - NET_EPOCH_EXIT(et); m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENXIO); @@ -2153,9 +2145,7 @@ lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m) infiniband_bpf_mtap(ifp, m); - error = lagg_proto_start(sc, m); - NET_EPOCH_EXIT(et); - return (error); + return (lagg_proto_start(sc, m)); } /* @@ -2169,16 +2159,14 @@ lagg_qflush(struct ifnet *ifp __unused) static struct mbuf * lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m) { - struct epoch_tracker et; struct lagg_port *lp = ifp->if_lagg; struct lagg_softc *sc = lp->lp_softc; struct ifnet *scifp = sc->sc_ifp; - NET_EPOCH_ENTER(et); + NET_EPOCH_ASSERT(); if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || lp->lp_detaching != 0 || sc->sc_proto == LAGG_PROTO_NONE) { - NET_EPOCH_EXIT(et); m_freem(m); return (NULL); } @@ -2198,7 +2186,6 @@ lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m) } #endif /* DEV_NETMAP */ - NET_EPOCH_EXIT(et); return (m); } |