aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorGleb Smirnoff <glebius@FreeBSD.org>2014-11-12 09:57:15 +0000
committerGleb Smirnoff <glebius@FreeBSD.org>2014-11-12 09:57:15 +0000
commitcfa6009e3646cc9efc24f3b3fb2d15b4f6a6c2b5 (patch)
tree376a152fabadae344479bb291540f2dc2d1edf0d /sys
parentb10cc05cf5d5fc7a0827c50a1a58265752d3edc8 (diff)
downloadsrc-cfa6009e3646cc9efc24f3b3fb2d15b4f6a6c2b5.tar.gz
src-cfa6009e3646cc9efc24f3b3fb2d15b4f6a6c2b5.zip
In preparation of merging projects/sendfile, transform bare access to
sb_cc member of struct sockbuf to a couple of inline functions: sbavail() and sbused() Right now they are equal, but once notion of "not ready socket buffer data", will be checked in, they are going to be different. Sponsored by: Netflix Sponsored by: Nginx, Inc.
Notes
Notes: svn path=/head/; revision=274421
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.c4
-rw-r--r--sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c14
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/cm.c12
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c24
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c44
-rw-r--r--sys/dev/iscsi/icl.c2
-rw-r--r--sys/kern/sys_socket.c25
-rw-r--r--sys/kern/uipc_socket.c42
-rw-r--r--sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c5
-rw-r--r--sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c2
-rw-r--r--sys/netgraph/bluetooth/socket/ng_btsocket_sco.c4
-rw-r--r--sys/netinet/accf_dns.c6
-rw-r--r--sys/netinet/accf_http.c11
-rw-r--r--sys/netinet/siftr.c4
-rw-r--r--sys/netinet/tcp_input.c14
-rw-r--r--sys/netinet/tcp_output.c31
-rw-r--r--sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c26
-rw-r--r--sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c2
-rw-r--r--sys/rpc/clnt_vc.c4
-rw-r--r--sys/rpc/svc_vc.c2
-rw-r--r--sys/sys/sockbuf.h28
-rw-r--r--sys/sys/socketvar.h2
22 files changed, 173 insertions, 135 deletions
diff --git a/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.c b/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.c
index fea86eba709d..eba1ab72b0f0 100644
--- a/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.c
+++ b/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.c
@@ -1507,11 +1507,11 @@ process_data(struct iwch_ep *ep)
process_mpa_request(ep);
break;
default:
- if (ep->com.so->so_rcv.sb_cc)
+ if (sbavail(&ep->com.so->so_rcv))
printf("%s Unexpected streaming data."
" ep %p state %d so %p so_state %x so_rcv.sb_cc %u so_rcv.sb_mb %p\n",
__FUNCTION__, ep, state_read(&ep->com), ep->com.so, ep->com.so->so_state,
- ep->com.so->so_rcv.sb_cc, ep->com.so->so_rcv.sb_mb);
+ sbavail(&ep->com.so->so_rcv), ep->com.so->so_rcv.sb_mb);
break;
}
return;
diff --git a/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c b/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c
index a86bf7207796..81a446a64a4f 100644
--- a/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c
+++ b/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c
@@ -445,8 +445,8 @@ t3_push_frames(struct socket *so, int req_completion)
* Autosize the send buffer.
*/
if (snd->sb_flags & SB_AUTOSIZE && VNET(tcp_do_autosndbuf)) {
- if (snd->sb_cc >= (snd->sb_hiwat / 8 * 7) &&
- snd->sb_cc < VNET(tcp_autosndbuf_max)) {
+ if (sbused(snd) >= (snd->sb_hiwat / 8 * 7) &&
+ sbused(snd) < VNET(tcp_autosndbuf_max)) {
if (!sbreserve_locked(snd, min(snd->sb_hiwat +
VNET(tcp_autosndbuf_inc), VNET(tcp_autosndbuf_max)),
so, curthread))
@@ -597,10 +597,10 @@ t3_rcvd(struct toedev *tod, struct tcpcb *tp)
INP_WLOCK_ASSERT(inp);
SOCKBUF_LOCK(so_rcv);
- KASSERT(toep->tp_enqueued >= so_rcv->sb_cc,
- ("%s: so_rcv->sb_cc > enqueued", __func__));
- toep->tp_rx_credits += toep->tp_enqueued - so_rcv->sb_cc;
- toep->tp_enqueued = so_rcv->sb_cc;
+ KASSERT(toep->tp_enqueued >= sbused(so_rcv),
+ ("%s: sbused(so_rcv) > enqueued", __func__));
+ toep->tp_rx_credits += toep->tp_enqueued - sbused(so_rcv);
+ toep->tp_enqueued = sbused(so_rcv);
SOCKBUF_UNLOCK(so_rcv);
must_send = toep->tp_rx_credits + 16384 >= tp->rcv_wnd;
@@ -1768,7 +1768,7 @@ wr_ack(struct toepcb *toep, struct mbuf *m)
so_sowwakeup_locked(so);
}
- if (snd->sb_sndptroff < snd->sb_cc)
+ if (snd->sb_sndptroff < sbused(snd))
t3_push_frames(so, 0);
out_free:
diff --git a/sys/dev/cxgbe/iw_cxgbe/cm.c b/sys/dev/cxgbe/iw_cxgbe/cm.c
index d9009bdf6b88..4702e19a5866 100644
--- a/sys/dev/cxgbe/iw_cxgbe/cm.c
+++ b/sys/dev/cxgbe/iw_cxgbe/cm.c
@@ -584,8 +584,8 @@ process_data(struct c4iw_ep *ep)
{
struct sockaddr_in *local, *remote;
- CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sb_cc %d", __func__,
- ep->com.so, ep, states[ep->com.state], ep->com.so->so_rcv.sb_cc);
+ CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
+ ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
switch (state_read(&ep->com)) {
case MPA_REQ_SENT:
@@ -601,11 +601,11 @@ process_data(struct c4iw_ep *ep)
process_mpa_request(ep);
break;
default:
- if (ep->com.so->so_rcv.sb_cc)
- log(LOG_ERR, "%s: Unexpected streaming data. "
- "ep %p, state %d, so %p, so_state 0x%x, sb_cc %u\n",
+ if (sbused(&ep->com.so->so_rcv))
+ log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
+ "state %d, so %p, so_state 0x%x, sbused %u\n",
__func__, ep, state_read(&ep->com), ep->com.so,
- ep->com.so->so_state, ep->com.so->so_rcv.sb_cc);
+ ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
break;
}
}
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 9af2248dede4..29e5fa243be5 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -365,15 +365,15 @@ t4_rcvd(struct toedev *tod, struct tcpcb *tp)
INP_WLOCK_ASSERT(inp);
SOCKBUF_LOCK(sb);
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
+ __func__, sb, sbused(sb), toep->sb_cc));
if (toep->ulp_mode == ULP_MODE_ISCSI) {
toep->rx_credits += toep->sb_cc;
toep->sb_cc = 0;
} else {
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
- toep->sb_cc = sb->sb_cc;
+ toep->rx_credits += toep->sb_cc - sbused(sb);
+ toep->sb_cc = sbused(sb);
}
credits = toep->rx_credits;
SOCKBUF_UNLOCK(sb);
@@ -1079,15 +1079,15 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
tp->rcv_nxt = be32toh(cpl->rcv_nxt);
toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
#ifdef USE_DDP_RX_FLOW_CONTROL
toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */
#endif
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
}
socantrcvmore_locked(so); /* unlocks the sockbuf */
@@ -1582,12 +1582,12 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
}
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
sorwakeup_locked(so);
SOCKBUF_UNLOCK_ASSERT(sb);
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 3691a3b516dc..89585cf83039 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -224,15 +224,15 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
tp->rcv_wnd -= n;
#endif
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
#ifdef USE_DDP_RX_FLOW_CONTROL
toep->rx_credits -= n; /* adjust for F_RX_FC_DDP */
#endif
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
}
/* SET_TCB_FIELD sent as a ULP command looks like this */
@@ -459,15 +459,15 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
else
discourage_ddp(toep);
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
#ifdef USE_DDP_RX_FLOW_CONTROL
toep->rx_credits -= len; /* adjust for F_RX_FC_DDP */
#endif
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
wakeup:
KASSERT(toep->ddp_flags & db_flag,
("%s: DDP buffer not active. toep %p, ddp_flags 0x%x, report 0x%x",
@@ -908,7 +908,7 @@ handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
#endif
/* XXX: too eager to disable DDP, could handle NBIO better than this. */
- if (sb->sb_cc >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
+ if (sbused(sb) >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
uio->uio_resid > MAX_DDP_BUFFER_SIZE || uio->uio_iovcnt > 1 ||
so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO) ||
error || so->so_error || sb->sb_state & SBS_CANTRCVMORE)
@@ -946,7 +946,7 @@ handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
* payload.
*/
ddp_flags = select_ddp_flags(so, flags, db_idx);
- wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sb->sb_cc, ddp_flags);
+ wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sbused(sb), ddp_flags);
if (wr == NULL) {
/*
* Just unhold the pages. The DDP buffer's software state is
@@ -1134,8 +1134,8 @@ restart:
/* uio should be just as it was at entry */
KASSERT(oresid == uio->uio_resid,
- ("%s: oresid = %d, uio_resid = %zd, sb_cc = %d",
- __func__, oresid, uio->uio_resid, sb->sb_cc));
+ ("%s: oresid = %d, uio_resid = %zd, sbused = %d",
+ __func__, oresid, uio->uio_resid, sbused(sb)));
error = handle_ddp(so, uio, flags, 0);
ddp_handled = 1;
@@ -1145,7 +1145,7 @@ restart:
/* Abort if socket has reported problems. */
if (so->so_error) {
- if (sb->sb_cc > 0)
+ if (sbused(sb))
goto deliver;
if (oresid > uio->uio_resid)
goto out;
@@ -1157,32 +1157,32 @@ restart:
/* Door is closed. Deliver what is left, if any. */
if (sb->sb_state & SBS_CANTRCVMORE) {
- if (sb->sb_cc > 0)
+ if (sbused(sb))
goto deliver;
else
goto out;
}
/* Socket buffer is empty and we shall not block. */
- if (sb->sb_cc == 0 &&
+ if (sbused(sb) == 0 &&
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
error = EAGAIN;
goto out;
}
/* Socket buffer got some data that we shall deliver now. */
- if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
+ if (sbused(sb) && !(flags & MSG_WAITALL) &&
((sb->sb_flags & SS_NBIO) ||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
- sb->sb_cc >= sb->sb_lowat ||
- sb->sb_cc >= uio->uio_resid ||
- sb->sb_cc >= sb->sb_hiwat) ) {
+ sbused(sb) >= sb->sb_lowat ||
+ sbused(sb) >= uio->uio_resid ||
+ sbused(sb) >= sb->sb_hiwat) ) {
goto deliver;
}
/* On MSG_WAITALL we must wait until all data or error arrives. */
if ((flags & MSG_WAITALL) &&
- (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
+ (sbused(sb) >= uio->uio_resid || sbused(sb) >= sb->sb_lowat))
goto deliver;
/*
@@ -1201,7 +1201,7 @@ restart:
deliver:
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
+ KASSERT(sbused(sb) > 0, ("%s: sockbuf empty", __func__));
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled)
@@ -1212,7 +1212,7 @@ deliver:
uio->uio_td->td_ru.ru_msgrcv++;
/* Fill uio until full or current end of socket buffer is reached. */
- len = min(uio->uio_resid, sb->sb_cc);
+ len = min(uio->uio_resid, sbused(sb));
if (mp0 != NULL) {
/* Dequeue as many mbufs as possible. */
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
diff --git a/sys/dev/iscsi/icl.c b/sys/dev/iscsi/icl.c
index f56e494fea7e..6bce18020425 100644
--- a/sys/dev/iscsi/icl.c
+++ b/sys/dev/iscsi/icl.c
@@ -758,7 +758,7 @@ icl_receive_thread(void *arg)
* is enough data received to read the PDU.
*/
SOCKBUF_LOCK(&so->so_rcv);
- available = so->so_rcv.sb_cc;
+ available = sbavail(&so->so_rcv);
if (available < ic->ic_receive_len) {
so->so_rcv.sb_lowat = ic->ic_receive_len;
cv_wait(&ic->ic_receive_cv, &so->so_rcv.sb_mtx);
diff --git a/sys/kern/sys_socket.c b/sys/kern/sys_socket.c
index 47cedfeab4b8..dd831ae81a66 100644
--- a/sys/kern/sys_socket.c
+++ b/sys/kern/sys_socket.c
@@ -175,16 +175,17 @@ soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
case FIONREAD:
/* Unlocked read. */
- *(int *)data = so->so_rcv.sb_cc;
+ *(int *)data = sbavail(&so->so_rcv);
break;
case FIONWRITE:
/* Unlocked read. */
- *(int *)data = so->so_snd.sb_cc;
+ *(int *)data = sbavail(&so->so_snd);
break;
case FIONSPACE:
- if ((so->so_snd.sb_hiwat < so->so_snd.sb_cc) ||
+ /* Unlocked read. */
+ if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
(so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt))
*(int *)data = 0;
else
@@ -254,6 +255,7 @@ soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
struct thread *td)
{
struct socket *so = fp->f_data;
+ struct sockbuf *sb;
#ifdef MAC
int error;
#endif
@@ -269,15 +271,18 @@ soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
* If SBS_CANTRCVMORE is set, but there's still data left in the
* receive buffer, the socket is still readable.
*/
- SOCKBUF_LOCK(&so->so_rcv);
- if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 ||
- so->so_rcv.sb_cc != 0)
+ sb = &so->so_rcv;
+ SOCKBUF_LOCK(sb);
+ if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
- ub->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
- SOCKBUF_UNLOCK(&so->so_rcv);
- /* Unlocked read. */
- if ((so->so_snd.sb_state & SBS_CANTSENDMORE) == 0)
+ ub->st_size = sbavail(sb) - sb->sb_ctl;
+ SOCKBUF_UNLOCK(sb);
+
+ sb = &so->so_snd;
+ SOCKBUF_LOCK(sb);
+ if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
+ SOCKBUF_UNLOCK(sb);
ub->st_uid = so->so_cred->cr_uid;
ub->st_gid = so->so_cred->cr_gid;
return (*so->so_proto->pr_usrreqs->pru_sense)(so, ub);
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index 706632716b1f..e2fd1f3a6a58 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -1522,12 +1522,12 @@ restart:
* 2. MSG_DONTWAIT is not set
*/
if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
- so->so_rcv.sb_cc < uio->uio_resid) &&
- so->so_rcv.sb_cc < so->so_rcv.sb_lowat &&
+ sbavail(&so->so_rcv) < uio->uio_resid) &&
+ sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
- KASSERT(m != NULL || !so->so_rcv.sb_cc,
- ("receive: m == %p so->so_rcv.sb_cc == %u",
- m, so->so_rcv.sb_cc));
+ KASSERT(m != NULL || !sbavail(&so->so_rcv),
+ ("receive: m == %p sbavail == %u",
+ m, sbavail(&so->so_rcv)));
if (so->so_error) {
if (m != NULL)
goto dontblock;
@@ -1976,7 +1976,7 @@ restart:
/* Abort if socket has reported problems. */
if (so->so_error) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb) > 0)
goto deliver;
if (oresid > uio->uio_resid)
goto out;
@@ -1988,32 +1988,32 @@ restart:
/* Door is closed. Deliver what is left, if any. */
if (sb->sb_state & SBS_CANTRCVMORE) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb) > 0)
goto deliver;
else
goto out;
}
/* Socket buffer is empty and we shall not block. */
- if (sb->sb_cc == 0 &&
+ if (sbavail(sb) == 0 &&
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
error = EAGAIN;
goto out;
}
/* Socket buffer got some data that we shall deliver now. */
- if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
+ if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
((sb->sb_flags & SS_NBIO) ||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
- sb->sb_cc >= sb->sb_lowat ||
- sb->sb_cc >= uio->uio_resid ||
- sb->sb_cc >= sb->sb_hiwat) ) {
+ sbavail(sb) >= sb->sb_lowat ||
+ sbavail(sb) >= uio->uio_resid ||
+ sbavail(sb) >= sb->sb_hiwat) ) {
goto deliver;
}
/* On MSG_WAITALL we must wait until all data or error arrives. */
if ((flags & MSG_WAITALL) &&
- (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_hiwat))
+ (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
goto deliver;
/*
@@ -2027,7 +2027,7 @@ restart:
deliver:
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
+ KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
/* Statistics. */
@@ -2035,7 +2035,7 @@ deliver:
uio->uio_td->td_ru.ru_msgrcv++;
/* Fill uio until full or current end of socket buffer is reached. */
- len = min(uio->uio_resid, sb->sb_cc);
+ len = min(uio->uio_resid, sbavail(sb));
if (mp0 != NULL) {
/* Dequeue as many mbufs as possible. */
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
@@ -2170,9 +2170,9 @@ soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
*/
SOCKBUF_LOCK(&so->so_rcv);
while ((m = so->so_rcv.sb_mb) == NULL) {
- KASSERT(so->so_rcv.sb_cc == 0,
- ("soreceive_dgram: sb_mb NULL but sb_cc %u",
- so->so_rcv.sb_cc));
+ KASSERT(sbavail(&so->so_rcv) == 0,
+ ("soreceive_dgram: sb_mb NULL but sbavail %u",
+ sbavail(&so->so_rcv)));
if (so->so_error) {
error = so->so_error;
so->so_error = 0;
@@ -3248,7 +3248,7 @@ filt_soread(struct knote *kn, long hint)
so = kn->kn_fp->f_data;
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
+ kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
kn->kn_flags |= EV_EOF;
kn->kn_fflags = so->so_error;
@@ -3260,7 +3260,7 @@ filt_soread(struct knote *kn, long hint)
if (kn->kn_data >= kn->kn_sdata)
return 1;
} else {
- if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat)
+ if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
return 1;
}
@@ -3451,7 +3451,7 @@ soisdisconnected(struct socket *so)
sorwakeup_locked(so);
SOCKBUF_LOCK(&so->so_snd);
so->so_snd.sb_state |= SBS_CANTSENDMORE;
- sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
+ sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
sowwakeup_locked(so);
wakeup(&so->so_timeo);
}
diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c b/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c
index bab8bbbbdd85..d2e04879a9f9 100644
--- a/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c
+++ b/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c
@@ -1127,9 +1127,8 @@ ng_btsocket_l2cap_process_l2ca_write_rsp(struct ng_mesg *msg,
/*
* Check if we have more data to send
*/
-
sbdroprecord(&pcb->so->so_snd);
- if (pcb->so->so_snd.sb_cc > 0) {
+ if (sbavail(&pcb->so->so_snd) > 0) {
if (ng_btsocket_l2cap_send2(pcb) == 0)
ng_btsocket_l2cap_timeout(pcb);
else
@@ -2513,7 +2512,7 @@ ng_btsocket_l2cap_send2(ng_btsocket_l2cap_pcb_p pcb)
mtx_assert(&pcb->pcb_mtx, MA_OWNED);
- if (pcb->so->so_snd.sb_cc == 0)
+ if (sbavail(&pcb->so->so_snd) == 0)
return (EINVAL); /* XXX */
m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT);
diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c b/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c
index cb3753d4bf04..a2190c78a618 100644
--- a/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c
+++ b/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c
@@ -3279,7 +3279,7 @@ ng_btsocket_rfcomm_pcb_send(ng_btsocket_rfcomm_pcb_p pcb, int limit)
}
for (error = 0, sent = 0; sent < limit; sent ++) {
- length = min(pcb->mtu, pcb->so->so_snd.sb_cc);
+ length = min(pcb->mtu, sbavail(&pcb->so->so_snd));
if (length == 0)
break;
diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c b/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c
index f0d87b3940b7..9ff0cebabb88 100644
--- a/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c
+++ b/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c
@@ -906,7 +906,7 @@ ng_btsocket_sco_default_msg_input(struct ng_mesg *msg, hook_p hook)
sbdroprecord(&pcb->so->so_snd);
/* Send more if we have any */
- if (pcb->so->so_snd.sb_cc > 0)
+ if (sbavail(&pcb->so->so_snd) > 0)
if (ng_btsocket_sco_send2(pcb) == 0)
ng_btsocket_sco_timeout(pcb);
@@ -1748,7 +1748,7 @@ ng_btsocket_sco_send2(ng_btsocket_sco_pcb_p pcb)
mtx_assert(&pcb->pcb_mtx, MA_OWNED);
while (pcb->rt->pending < pcb->rt->num_pkts &&
- pcb->so->so_snd.sb_cc > 0) {
+ sbavail(&pcb->so->so_snd) > 0) {
/* Get a copy of the first packet on send queue */
m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT);
if (m == NULL) {
diff --git a/sys/netinet/accf_dns.c b/sys/netinet/accf_dns.c
index ec2b4cfb804f..85214d6d93ca 100644
--- a/sys/netinet/accf_dns.c
+++ b/sys/netinet/accf_dns.c
@@ -75,7 +75,7 @@ sohasdns(struct socket *so, void *arg, int waitflag)
struct sockbuf *sb = &so->so_rcv;
/* If the socket is full, we're ready. */
- if (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax)
+ if (sbused(sb) >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax)
goto ready;
/* Check to see if we have a request. */
@@ -115,14 +115,14 @@ skippacket(struct sockbuf *sb) {
unsigned long packlen;
struct packet q, *p = &q;
- if (sb->sb_cc < 2)
+ if (sbavail(sb) < 2)
return DNS_WAIT;
q.m = sb->sb_mb;
q.n = q.m->m_nextpkt;
q.moff = 0;
q.offset = 0;
- q.len = sb->sb_cc;
+ q.len = sbavail(sb);
GET16(p, packlen);
if (packlen + 2 > q.len)
diff --git a/sys/netinet/accf_http.c b/sys/netinet/accf_http.c
index 41e442c4fa7f..33734c717a62 100644
--- a/sys/netinet/accf_http.c
+++ b/sys/netinet/accf_http.c
@@ -92,7 +92,7 @@ sbfull(struct sockbuf *sb)
"mbcnt(%ld) >= mbmax(%ld): %d",
sb->sb_cc, sb->sb_hiwat, sb->sb_cc >= sb->sb_hiwat,
sb->sb_mbcnt, sb->sb_mbmax, sb->sb_mbcnt >= sb->sb_mbmax);
- return (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax);
+ return (sbused(sb) >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax);
}
/*
@@ -162,13 +162,14 @@ static int
sohashttpget(struct socket *so, void *arg, int waitflag)
{
- if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 && !sbfull(&so->so_rcv)) {
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 &&
+ !sbfull(&so->so_rcv)) {
struct mbuf *m;
char *cmp;
int cmplen, cc;
m = so->so_rcv.sb_mb;
- cc = so->so_rcv.sb_cc - 1;
+ cc = sbavail(&so->so_rcv) - 1;
if (cc < 1)
return (SU_OK);
switch (*mtod(m, char *)) {
@@ -215,7 +216,7 @@ soparsehttpvers(struct socket *so, void *arg, int waitflag)
goto fallout;
m = so->so_rcv.sb_mb;
- cc = so->so_rcv.sb_cc;
+ cc = sbavail(&so->so_rcv);
inspaces = spaces = 0;
for (m = so->so_rcv.sb_mb; m; m = n) {
n = m->m_nextpkt;
@@ -304,7 +305,7 @@ soishttpconnected(struct socket *so, void *arg, int waitflag)
* have NCHRS left
*/
copied = 0;
- ccleft = so->so_rcv.sb_cc;
+ ccleft = sbavail(&so->so_rcv);
if (ccleft < NCHRS)
goto readmore;
a = b = c = '\0';
diff --git a/sys/netinet/siftr.c b/sys/netinet/siftr.c
index 9d2ca50232fe..d65564f990fa 100644
--- a/sys/netinet/siftr.c
+++ b/sys/netinet/siftr.c
@@ -782,9 +782,9 @@ siftr_siftdata(struct pkt_node *pn, struct inpcb *inp, struct tcpcb *tp,
pn->flags = tp->t_flags;
pn->rxt_length = tp->t_rxtcur;
pn->snd_buf_hiwater = inp->inp_socket->so_snd.sb_hiwat;
- pn->snd_buf_cc = inp->inp_socket->so_snd.sb_cc;
+ pn->snd_buf_cc = sbused(&inp->inp_socket->so_snd);
pn->rcv_buf_hiwater = inp->inp_socket->so_rcv.sb_hiwat;
- pn->rcv_buf_cc = inp->inp_socket->so_rcv.sb_cc;
+ pn->rcv_buf_cc = sbused(&inp->inp_socket->so_rcv);
pn->sent_inflight_bytes = tp->snd_max - tp->snd_una;
pn->t_segqlen = tp->t_segqlen;
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index 625992afaef7..468f14210565 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -1745,7 +1745,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tcp_timer_activate(tp, TT_REXMT,
tp->t_rxtcur);
sowwakeup(so);
- if (so->so_snd.sb_cc)
+ if (sbavail(&so->so_snd))
(void) tcp_output(tp);
goto check_delack;
}
@@ -2526,7 +2526,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* Otherwise we would send pure ACKs.
*/
SOCKBUF_LOCK(&so->so_snd);
- avail = so->so_snd.sb_cc -
+ avail = sbavail(&so->so_snd) -
(tp->snd_nxt - tp->snd_una);
SOCKBUF_UNLOCK(&so->so_snd);
if (avail > 0)
@@ -2661,10 +2661,10 @@ process_ACK:
cc_ack_received(tp, th, CC_ACK);
SOCKBUF_LOCK(&so->so_snd);
- if (acked > so->so_snd.sb_cc) {
- tp->snd_wnd -= so->so_snd.sb_cc;
+ if (acked > sbavail(&so->so_snd)) {
+ tp->snd_wnd -= sbavail(&so->so_snd);
mfree = sbcut_locked(&so->so_snd,
- (int)so->so_snd.sb_cc);
+ (int)sbavail(&so->so_snd));
ourfinisacked = 1;
} else {
mfree = sbcut_locked(&so->so_snd, acked);
@@ -2790,7 +2790,7 @@ step6:
* actually wanting to send this much urgent data.
*/
SOCKBUF_LOCK(&so->so_rcv);
- if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
+ if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
th->th_urp = 0; /* XXX */
thflags &= ~TH_URG; /* XXX */
SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
@@ -2812,7 +2812,7 @@ step6:
*/
if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
tp->rcv_up = th->th_seq + th->th_urp;
- so->so_oobmark = so->so_rcv.sb_cc +
+ so->so_oobmark = sbavail(&so->so_rcv) +
(tp->rcv_up - tp->rcv_nxt) - 1;
if (so->so_oobmark == 0)
so->so_rcv.sb_state |= SBS_RCVATMARK;
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index 7919e2b72e01..160fadaea5cd 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -322,7 +322,7 @@ after_sack_rexmit:
* to send then the probe will be the FIN
* itself.
*/
- if (off < so->so_snd.sb_cc)
+ if (off < sbused(&so->so_snd))
flags &= ~TH_FIN;
sendwin = 1;
} else {
@@ -348,7 +348,8 @@ after_sack_rexmit:
*/
if (sack_rxmit == 0) {
if (sack_bytes_rxmt == 0)
- len = ((long)ulmin(so->so_snd.sb_cc, sendwin) - off);
+ len = ((long)ulmin(sbavail(&so->so_snd), sendwin) -
+ off);
else {
long cwin;
@@ -357,8 +358,8 @@ after_sack_rexmit:
* sending new data, having retransmitted all the
* data possible in the scoreboard.
*/
- len = ((long)ulmin(so->so_snd.sb_cc, tp->snd_wnd)
- - off);
+ len = ((long)ulmin(sbavail(&so->so_snd), tp->snd_wnd) -
+ off);
/*
* Don't remove this (len > 0) check !
* We explicitly check for len > 0 here (although it
@@ -457,12 +458,15 @@ after_sack_rexmit:
* TODO: Shrink send buffer during idle periods together
* with congestion window. Requires another timer. Has to
* wait for upcoming tcp timer rewrite.
+ *
+ * XXXGL: should there be used sbused() or sbavail()?
*/
if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
- so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
- so->so_snd.sb_cc < V_tcp_autosndbuf_max &&
- sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
+ sbused(&so->so_snd) >= (so->so_snd.sb_hiwat / 8 * 7) &&
+ sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
+ sendwin >= (sbused(&so->so_snd) -
+ (tp->snd_nxt - tp->snd_una))) {
if (!sbreserve_locked(&so->so_snd,
min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
V_tcp_autosndbuf_max), so, curthread))
@@ -499,10 +503,11 @@ after_sack_rexmit:
tso = 1;
if (sack_rxmit) {
- if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc))
+ if (SEQ_LT(p->rxmit + len, tp->snd_una + sbused(&so->so_snd)))
flags &= ~TH_FIN;
} else {
- if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
+ if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
+ sbused(&so->so_snd)))
flags &= ~TH_FIN;
}
@@ -532,7 +537,7 @@ after_sack_rexmit:
*/
if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
(idle || (tp->t_flags & TF_NODELAY)) &&
- len + off >= so->so_snd.sb_cc &&
+ len + off >= sbavail(&so->so_snd) &&
(tp->t_flags & TF_NOPUSH) == 0) {
goto send;
}
@@ -660,7 +665,7 @@ dontupdate:
* if window is nonzero, transmit what we can,
* otherwise force out a byte.
*/
- if (so->so_snd.sb_cc && !tcp_timer_active(tp, TT_REXMT) &&
+ if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) &&
!tcp_timer_active(tp, TT_PERSIST)) {
tp->t_rxtshift = 0;
tcp_setpersist(tp);
@@ -863,7 +868,7 @@ send:
* emptied:
*/
max_len = (tp->t_maxopd - optlen);
- if ((off + len) < so->so_snd.sb_cc) {
+ if ((off + len) < sbavail(&so->so_snd)) {
moff = len % max_len;
if (moff != 0) {
len -= moff;
@@ -979,7 +984,7 @@ send:
* give data to the user when a buffer fills or
* a PUSH comes in.)
*/
- if (off + len == so->so_snd.sb_cc)
+ if (off + len == sbused(&so->so_snd))
flags |= TH_PUSH;
SOCKBUF_UNLOCK(&so->so_snd);
} else {
diff --git a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
index 910424dd768d..a6eba64e1622 100644
--- a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
+++ b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
@@ -747,7 +747,7 @@ sdp_start_disconnect(struct sdp_sock *ssk)
("sdp_start_disconnect: sdp_drop() returned NULL"));
} else {
soisdisconnecting(so);
- unread = so->so_rcv.sb_cc;
+ unread = sbused(&so->so_rcv);
sbflush(&so->so_rcv);
sdp_usrclosed(ssk);
if (!(ssk->flags & SDP_DROPPED)) {
@@ -1259,7 +1259,7 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
/* We will never ever get anything unless we are connected. */
if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
/* When disconnecting there may be still some data left. */
- if (sb->sb_cc > 0)
+ if (sbavail(sb))
goto deliver;
if (!(so->so_state & SS_ISDISCONNECTED))
error = ENOTCONN;
@@ -1267,7 +1267,7 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
}
/* Socket buffer is empty and we shall not block. */
- if (sb->sb_cc == 0 &&
+ if (sbavail(sb) == 0 &&
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
error = EAGAIN;
goto out;
@@ -1278,7 +1278,7 @@ restart:
/* Abort if socket has reported problems. */
if (so->so_error) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb))
goto deliver;
if (oresid > uio->uio_resid)
goto out;
@@ -1290,25 +1290,25 @@ restart:
/* Door is closed. Deliver what is left, if any. */
if (sb->sb_state & SBS_CANTRCVMORE) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb))
goto deliver;
else
goto out;
}
/* Socket buffer got some data that we shall deliver now. */
- if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
+ if (sbavail(sb) && !(flags & MSG_WAITALL) &&
((so->so_state & SS_NBIO) ||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
- sb->sb_cc >= sb->sb_lowat ||
- sb->sb_cc >= uio->uio_resid ||
- sb->sb_cc >= sb->sb_hiwat) ) {
+ sbavail(sb) >= sb->sb_lowat ||
+ sbavail(sb) >= uio->uio_resid ||
+ sbavail(sb) >= sb->sb_hiwat) ) {
goto deliver;
}
/* On MSG_WAITALL we must wait until all data or error arrives. */
if ((flags & MSG_WAITALL) &&
- (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
+ (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_lowat))
goto deliver;
/*
@@ -1322,7 +1322,7 @@ restart:
deliver:
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
+ KASSERT(sbavail(sb), ("%s: sockbuf empty", __func__));
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
/* Statistics. */
@@ -1330,7 +1330,7 @@ deliver:
uio->uio_td->td_ru.ru_msgrcv++;
/* Fill uio until full or current end of socket buffer is reached. */
- len = min(uio->uio_resid, sb->sb_cc);
+ len = min(uio->uio_resid, sbavail(sb));
if (mp0 != NULL) {
/* Dequeue as many mbufs as possible. */
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
@@ -1510,7 +1510,7 @@ sdp_urg(struct sdp_sock *ssk, struct mbuf *mb)
if (so == NULL)
return;
- so->so_oobmark = so->so_rcv.sb_cc + mb->m_pkthdr.len - 1;
+ so->so_oobmark = sbused(&so->so_rcv) + mb->m_pkthdr.len - 1;
sohasoutofband(so);
ssk->oobflags &= ~(SDP_HAVEOOB | SDP_HADOOB);
if (!(so->so_options & SO_OOBINLINE)) {
diff --git a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
index f8d6181c0ede..1fe5cb060fa9 100644
--- a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
+++ b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
@@ -183,7 +183,7 @@ sdp_post_recvs_needed(struct sdp_sock *ssk)
* Compute bytes in the receive queue and socket buffer.
*/
bytes_in_process = (posted - SDP_MIN_TX_CREDITS) * buffer_size;
- bytes_in_process += ssk->socket->so_rcv.sb_cc;
+ bytes_in_process += sbused(&ssk->socket->so_rcv);
return bytes_in_process < max_bytes;
}
diff --git a/sys/rpc/clnt_vc.c b/sys/rpc/clnt_vc.c
index 67ad58f5cd1b..3899511990a6 100644
--- a/sys/rpc/clnt_vc.c
+++ b/sys/rpc/clnt_vc.c
@@ -860,7 +860,7 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
* error condition
*/
do_read = FALSE;
- if (so->so_rcv.sb_cc >= sizeof(uint32_t)
+ if (sbavail(&so->so_rcv) >= sizeof(uint32_t)
|| (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|| so->so_error)
do_read = TRUE;
@@ -913,7 +913,7 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
* buffered.
*/
do_read = FALSE;
- if (so->so_rcv.sb_cc >= ct->ct_record_resid
+ if (sbavail(&so->so_rcv) >= ct->ct_record_resid
|| (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|| so->so_error)
do_read = TRUE;
diff --git a/sys/rpc/svc_vc.c b/sys/rpc/svc_vc.c
index df1d86e045cd..0190a0ce3834 100644
--- a/sys/rpc/svc_vc.c
+++ b/sys/rpc/svc_vc.c
@@ -546,7 +546,7 @@ svc_vc_ack(SVCXPRT *xprt, uint32_t *ack)
{
*ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
- *ack -= xprt->xp_socket->so_snd.sb_cc;
+ *ack -= sbused(&xprt->xp_socket->so_snd);
return (TRUE);
}
diff --git a/sys/sys/sockbuf.h b/sys/sys/sockbuf.h
index ef80e9c14571..f9e8da4cde0b 100644
--- a/sys/sys/sockbuf.h
+++ b/sys/sys/sockbuf.h
@@ -166,6 +166,34 @@ int sblock(struct sockbuf *sb, int flags);
void sbunlock(struct sockbuf *sb);
/*
+ * Return how much data is available to be taken out of socket
+ * bufffer right now.
+ */
+static inline u_int
+sbavail(struct sockbuf *sb)
+{
+
+#if 0
+ SOCKBUF_LOCK_ASSERT(sb);
+#endif
+ return (sb->sb_cc);
+}
+
+/*
+ * Return how much data sits there in the socket buffer
+ * It might be that some data is not yet ready to be read.
+ */
+static inline u_int
+sbused(struct sockbuf *sb)
+{
+
+#if 0
+ SOCKBUF_LOCK_ASSERT(sb);
+#endif
+ return (sb->sb_cc);
+}
+
+/*
* How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
* This is problematical if the fields are unsigned, as the space might
* still be negative (cc > hiwat or mbcnt > mbmax). Should detect
diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h
index bfdae0d07163..dfeeede33bb2 100644
--- a/sys/sys/socketvar.h
+++ b/sys/sys/socketvar.h
@@ -208,7 +208,7 @@ struct xsocket {
/* can we read something from so? */
#define soreadabledata(so) \
- ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \
+ (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \
!TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
#define soreadable(so) \
(soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE))