aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander V. Chernikov <melifaro@FreeBSD.org>2018-03-20 22:57:06 +0000
committerAlexander V. Chernikov <melifaro@FreeBSD.org>2018-03-20 22:57:06 +0000
commitb2b7ca49dca771f65f281f4b0aa0e3c39eb42837 (patch)
tree6c1e0d75b76e34bb3944f606751eaf895208eca4
parentb7440a86c8dab00377163bf300ae79183c3f5b84 (diff)
downloadsrc-b2b7ca49dca771f65f281f4b0aa0e3c39eb42837.tar.gz
src-b2b7ca49dca771f65f281f4b0aa0e3c39eb42837.zip
Use count(9) api for the bpf(4) statistics.
Currently each bfp descriptor uses u64 variables to maintain its counters. On interfaces with high packet rate this leads to unnecessary contention and inaccurate reporting. PR: kern/205320 Reported by: elofu17 at hotmail.com MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D14726
Notes
Notes: svn path=/head/; revision=331275
-rw-r--r--sys/net/bpf.c95
-rw-r--r--sys/net/bpfdesc.h15
2 files changed, 64 insertions, 46 deletions
diff --git a/sys/net/bpf.c b/sys/net/bpf.c
index 0c8dcfe1b9ae..8092965a0d25 100644
--- a/sys/net/bpf.c
+++ b/sys/net/bpf.c
@@ -280,7 +280,7 @@ bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
return (bpf_buffer_append_bytes(d, buf, offset, src, len));
case BPF_BUFMODE_ZBUF:
- d->bd_zcopy++;
+ counter_u64_add(d->bd_zcopy, 1);
return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
default:
@@ -300,7 +300,7 @@ bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
case BPF_BUFMODE_ZBUF:
- d->bd_zcopy++;
+ counter_u64_add(d->bd_zcopy, 1);
return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
default:
@@ -886,6 +886,15 @@ bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
return (error);
}
+ /* Setup counters */
+ d->bd_rcount = counter_u64_alloc(M_WAITOK);
+ d->bd_dcount = counter_u64_alloc(M_WAITOK);
+ d->bd_fcount = counter_u64_alloc(M_WAITOK);
+ d->bd_wcount = counter_u64_alloc(M_WAITOK);
+ d->bd_wfcount = counter_u64_alloc(M_WAITOK);
+ d->bd_wdcount = counter_u64_alloc(M_WAITOK);
+ d->bd_zcopy = counter_u64_alloc(M_WAITOK);
+
/*
* For historical reasons, perform a one-time initialization call to
* the buffer routines, even though we're not yet committed to a
@@ -1111,22 +1120,22 @@ bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
return (error);
BPF_PID_REFRESH_CUR(d);
- d->bd_wcount++;
+ counter_u64_add(d->bd_wcount, 1);
/* XXX: locking required */
if (d->bd_bif == NULL) {
- d->bd_wdcount++;
+ counter_u64_add(d->bd_wdcount, 1);
return (ENXIO);
}
ifp = d->bd_bif->bif_ifp;
if ((ifp->if_flags & IFF_UP) == 0) {
- d->bd_wdcount++;
+ counter_u64_add(d->bd_wdcount, 1);
return (ENETDOWN);
}
if (uio->uio_resid == 0) {
- d->bd_wdcount++;
+ counter_u64_add(d->bd_wdcount, 1);
return (0);
}
@@ -1137,10 +1146,10 @@ bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
&m, &dst, &hlen, d);
if (error) {
- d->bd_wdcount++;
+ counter_u64_add(d->bd_wdcount, 1);
return (error);
}
- d->bd_wfcount++;
+ counter_u64_add(d->bd_wfcount, 1);
if (d->bd_hdrcmplt)
dst.sa_family = pseudo_AF_HDRCMPLT;
@@ -1176,7 +1185,7 @@ bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
error = (*ifp->if_output)(ifp, m, &dst, &ro);
if (error)
- d->bd_wdcount++;
+ counter_u64_add(d->bd_wdcount, 1);
if (mc != NULL) {
if (error == 0)
@@ -1215,13 +1224,13 @@ reset_d(struct bpf_d *d)
}
if (bpf_canwritebuf(d))
d->bd_slen = 0;
- d->bd_rcount = 0;
- d->bd_dcount = 0;
- d->bd_fcount = 0;
- d->bd_wcount = 0;
- d->bd_wfcount = 0;
- d->bd_wdcount = 0;
- d->bd_zcopy = 0;
+ counter_u64_zero(d->bd_rcount);
+ counter_u64_zero(d->bd_dcount);
+ counter_u64_zero(d->bd_fcount);
+ counter_u64_zero(d->bd_wcount);
+ counter_u64_zero(d->bd_wfcount);
+ counter_u64_zero(d->bd_wdcount);
+ counter_u64_zero(d->bd_zcopy);
}
/*
@@ -1592,8 +1601,8 @@ bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
struct bpf_stat *bs = (struct bpf_stat *)addr;
/* XXXCSJP overflow */
- bs->bs_recv = d->bd_rcount;
- bs->bs_drop = d->bd_dcount;
+ bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount);
+ bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount);
break;
}
@@ -2146,8 +2155,7 @@ bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
* write lock, too
*/
- /* XXX: Do not protect counter for the sake of performance. */
- ++d->bd_rcount;
+ counter_u64_add(d->bd_rcount, 1);
/*
* NB: We dont call BPF_CHECK_DIRECTION() here since there is no
* way for the caller to indiciate to us whether this packet
@@ -2167,7 +2175,7 @@ bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
*/
BPFD_LOCK(d);
- d->bd_fcount++;
+ counter_u64_add(d->bd_fcount, 1);
if (gottime < bpf_ts_quality(d->bd_tstamp))
gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
#ifdef MAC
@@ -2214,7 +2222,7 @@ bpf_mtap(struct bpf_if *bp, struct mbuf *m)
LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
continue;
- ++d->bd_rcount;
+ counter_u64_add(d->bd_rcount, 1);
#ifdef BPF_JITTER
bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
/* XXX We cannot handle multiple mbufs. */
@@ -2226,7 +2234,7 @@ bpf_mtap(struct bpf_if *bp, struct mbuf *m)
if (slen != 0) {
BPFD_LOCK(d);
- d->bd_fcount++;
+ counter_u64_add(d->bd_fcount, 1);
if (gottime < bpf_ts_quality(d->bd_tstamp))
gottime = bpf_gettime(&bt, d->bd_tstamp, m);
#ifdef MAC
@@ -2277,12 +2285,12 @@ bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
continue;
- ++d->bd_rcount;
+ counter_u64_add(d->bd_rcount, 1);
slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
if (slen != 0) {
BPFD_LOCK(d);
- d->bd_fcount++;
+ counter_u64_add(d->bd_fcount, 1);
if (gottime < bpf_ts_quality(d->bd_tstamp))
gottime = bpf_gettime(&bt, d->bd_tstamp, m);
#ifdef MAC
@@ -2435,7 +2443,7 @@ catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
* buffer model.
*/
bpf_buffull(d);
- ++d->bd_dcount;
+ counter_u64_add(d->bd_dcount, 1);
return;
}
KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
@@ -2535,6 +2543,15 @@ bpf_freed(struct bpf_d *d)
if (d->bd_wfilter != NULL)
free((caddr_t)d->bd_wfilter, M_BPF);
mtx_destroy(&d->bd_lock);
+
+ counter_u64_free(d->bd_rcount);
+ counter_u64_free(d->bd_dcount);
+ counter_u64_free(d->bd_fcount);
+ counter_u64_free(d->bd_wcount);
+ counter_u64_free(d->bd_wfcount);
+ counter_u64_free(d->bd_wdcount);
+ counter_u64_free(d->bd_zcopy);
+
}
/*
@@ -2835,12 +2852,12 @@ bpf_zero_counters(void)
BPFIF_RLOCK(bp);
LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
BPFD_LOCK(bd);
- bd->bd_rcount = 0;
- bd->bd_dcount = 0;
- bd->bd_fcount = 0;
- bd->bd_wcount = 0;
- bd->bd_wfcount = 0;
- bd->bd_zcopy = 0;
+ counter_u64_zero(bd->bd_rcount);
+ counter_u64_zero(bd->bd_dcount);
+ counter_u64_zero(bd->bd_fcount);
+ counter_u64_zero(bd->bd_wcount);
+ counter_u64_zero(bd->bd_wfcount);
+ counter_u64_zero(bd->bd_zcopy);
BPFD_UNLOCK(bd);
}
BPFIF_RUNLOCK(bp);
@@ -2865,9 +2882,9 @@ bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
d->bd_direction = bd->bd_direction;
d->bd_feedback = bd->bd_feedback;
d->bd_async = bd->bd_async;
- d->bd_rcount = bd->bd_rcount;
- d->bd_dcount = bd->bd_dcount;
- d->bd_fcount = bd->bd_fcount;
+ d->bd_rcount = counter_u64_fetch(bd->bd_rcount);
+ d->bd_dcount = counter_u64_fetch(bd->bd_dcount);
+ d->bd_fcount = counter_u64_fetch(bd->bd_fcount);
d->bd_sig = bd->bd_sig;
d->bd_slen = bd->bd_slen;
d->bd_hlen = bd->bd_hlen;
@@ -2876,10 +2893,10 @@ bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
strlcpy(d->bd_ifname,
bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
d->bd_locked = bd->bd_locked;
- d->bd_wcount = bd->bd_wcount;
- d->bd_wdcount = bd->bd_wdcount;
- d->bd_wfcount = bd->bd_wfcount;
- d->bd_zcopy = bd->bd_zcopy;
+ d->bd_wcount = counter_u64_fetch(bd->bd_wcount);
+ d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount);
+ d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount);
+ d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy);
d->bd_bufmode = bd->bd_bufmode;
}
diff --git a/sys/net/bpfdesc.h b/sys/net/bpfdesc.h
index 1d91ffe1ed3f..f72a579f0042 100644
--- a/sys/net/bpfdesc.h
+++ b/sys/net/bpfdesc.h
@@ -45,6 +45,7 @@
#include <sys/selinfo.h>
#include <sys/queue.h>
#include <sys/conf.h>
+#include <sys/counter.h>
#include <net/if.h>
/*
@@ -76,8 +77,8 @@ struct bpf_d {
struct bpf_insn *bd_rfilter; /* read filter code */
struct bpf_insn *bd_wfilter; /* write filter code */
void *bd_bfilter; /* binary filter code */
- u_int64_t bd_rcount; /* number of packets received */
- u_int64_t bd_dcount; /* number of packets dropped */
+ counter_u64_t bd_rcount; /* number of packets received */
+ counter_u64_t bd_dcount; /* number of packets dropped */
u_char bd_promisc; /* true if listening promiscuously */
u_char bd_state; /* idle, waiting, or timed out */
@@ -94,14 +95,14 @@ struct bpf_d {
struct mtx bd_lock; /* per-descriptor lock */
struct callout bd_callout; /* for BPF timeouts with select */
struct label *bd_label; /* MAC label for descriptor */
- u_int64_t bd_fcount; /* number of packets which matched filter */
+ counter_u64_t bd_fcount; /* number of packets which matched filter */
pid_t bd_pid; /* PID which created descriptor */
int bd_locked; /* true if descriptor is locked */
u_int bd_bufmode; /* Current buffer mode. */
- u_int64_t bd_wcount; /* number of packets written */
- u_int64_t bd_wfcount; /* number of packets that matched write filter */
- u_int64_t bd_wdcount; /* number of packets dropped during a write */
- u_int64_t bd_zcopy; /* number of zero copy operations */
+ counter_u64_t bd_wcount; /* number of packets written */
+ counter_u64_t bd_wfcount; /* number of packets that matched write filter */
+ counter_u64_t bd_wdcount; /* number of packets dropped during a write */
+ counter_u64_t bd_zcopy; /* number of zero copy operations */
u_char bd_compat32; /* 32-bit stream on LP64 system */
};