aboutsummaryrefslogtreecommitdiff
path: root/sys/netinet/tcp_var.h
diff options
context:
space:
mode:
authorRandall Stewart <rrs@FreeBSD.org>2018-08-20 12:43:18 +0000
committerRandall Stewart <rrs@FreeBSD.org>2018-08-20 12:43:18 +0000
commitc28440db2909b1a4d76a7a78bef22093556b8f61 (patch)
treef4c64f901dc39e7492aafeef039f783b49d74946 /sys/netinet/tcp_var.h
parenta800b45c184c7471a43766908aa0009877595b86 (diff)
This change represents a substantial restructure of the way we
reassembly inbound tcp segments. The old algorithm just blindly dropped in segments without coalescing. This meant that every segment could take up greater and greater room on the linked list of segments. This of course is now subject to a tighter limit (100) of segments which in a high BDP situation will cause us to be a lot more in-efficent as we drop segments beyond 100 entries that we receive. What this restructure does is cause the reassembly buffer to coalesce segments putting an emphasis on the two common cases (which avoid walking the list of segments) i.e. where we add to the back of the queue of segments and where we add to the front. We also have the reassembly buffer supporting a couple of debug options (black box logging as well as counters for code coverage). These are compiled out by default but can be added by uncommenting the defines. Sponsored by: Netflix Inc. Differential Revision: https://reviews.freebsd.org/D16626
Notes
Notes: svn path=/head/; revision=338102
Diffstat (limited to 'sys/netinet/tcp_var.h')
-rw-r--r--sys/netinet/tcp_var.h16
1 files changed, 11 insertions, 5 deletions
diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
index 5f8c0ade6700..2fbe07ade5dc 100644
--- a/sys/netinet/tcp_var.h
+++ b/sys/netinet/tcp_var.h
@@ -46,12 +46,15 @@
#if defined(_KERNEL) || defined(_WANT_TCPCB)
/* TCP segment queue entry */
struct tseg_qent {
- LIST_ENTRY(tseg_qent) tqe_q;
+ TAILQ_ENTRY(tseg_qent) tqe_q;
+ struct mbuf *tqe_m; /* mbuf contains packet */
+ struct mbuf *tqe_last; /* last mbuf in chain */
+ tcp_seq tqe_start; /* TCP Sequence number start */
int tqe_len; /* TCP segment data length */
- struct tcphdr *tqe_th; /* a pointer to tcp header */
- struct mbuf *tqe_m; /* mbuf contains packet */
+ uint32_t tqe_flags; /* The flags from the th->th_flags */
+ uint32_t tqe_mbuf_cnt; /* Count of mbuf overhead */
};
-LIST_HEAD(tsegqe_head, tseg_qent);
+TAILQ_HEAD(tsegqe_head, tseg_qent);
struct sackblk {
tcp_seq start; /* start seq no. of sack block */
@@ -79,6 +82,8 @@ struct sackhint {
uint64_t _pad[1]; /* TBD */
};
+#define SEGQ_EMPTY(tp) TAILQ_EMPTY(&(tp)->t_segq)
+
STAILQ_HEAD(tcp_log_stailq, tcp_log_mem);
/*
@@ -131,6 +136,7 @@ struct tcpcb {
/* Cache line 3 */
tcp_seq rcv_up; /* receive urgent pointer */
int t_segqlen; /* segment reassembly queue length */
+ uint32_t t_segqmbuflen; /* Count of bytes mbufs on all entries */
struct tsegqe_head t_segq; /* segment reassembly queue */
struct mbuf *t_in_pkt;
struct mbuf *t_tail_pkt;
@@ -837,7 +843,7 @@ char *tcp_log_addrs(struct in_conninfo *, struct tcphdr *, void *,
const void *);
char *tcp_log_vain(struct in_conninfo *, struct tcphdr *, void *,
const void *);
-int tcp_reass(struct tcpcb *, struct tcphdr *, int *, struct mbuf *);
+int tcp_reass(struct tcpcb *, struct tcphdr *, tcp_seq *, int *, struct mbuf *);
void tcp_reass_global_init(void);
void tcp_reass_flush(struct tcpcb *);
void tcp_dooptions(struct tcpopt *, u_char *, int, int);