aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/cxgbe/t4_l2t.c
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2019-11-21 19:30:31 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2019-11-21 19:30:31 +0000
commitbddf73433e07e70192b5333caade3b2c37628c8f (patch)
tree70403f05baf274e65a206a9bab00fd4a045d3042 /sys/dev/cxgbe/t4_l2t.c
parente3c42ad8097161a15da0d08118ec5d525b763dc1 (diff)
NIC KTLS for Chelsio T6 adapters.
This adds support for ifnet (NIC) KTLS using Chelsio T6 adapters. Unlike the TOE-based KTLS in r353328, NIC TLS works with non-TOE connections. NIC KTLS on T6 is not able to use the normal TSO (LSO) path to segment the encrypted TLS frames output by the crypto engine. Instead, the TOE is placed into a special setup to permit "dummy" connections to be associated with regular sockets using KTLS. This permits using the TOE to segment the encrypted TLS records. However, this approach does have some limitations: 1) Regular TOE sockets cannot be used when the TOE is in this special mode. One can use either TOE and TOE-based KTLS or NIC KTLS, but not both at the same time. 2) In NIC KTLS mode, the TOE is only able to accept a per-connection timestamp offset that varies in the upper 4 bits. Put another way, only connections whose timestamp offset has the 28 lower bits cleared can use NIC KTLS and generate correct timestamps. The driver will refuse to enable NIC KTLS on connections with a timestamp offset with any of the lower 28 bits set. To use NIC KTLS, users can either disable TCP timestamps by setting the net.inet.tcp.rfc1323 sysctl to 0, or apply a local patch to the tcp_new_ts_offset() function to clear the lower 28 bits of the generated offset. 3) Because the TCP segmentation relies on fields mirrored in a TCB in the TOE, not all fields in a TCP packet can be sent in the TCP segments generated from a TLS record. Specifically, for packets containing TCP options other than timestamps, the driver will inject an "empty" TCP packet holding the requested options (e.g. a SACK scoreboard) along with the segments from the TLS record. These empty TCP packets are counted by the dev.cc.N.txq.M.kern_tls_options sysctls. Unlike TOE TLS which is able to buffer encrypted TLS records in on-card memory to handle retransmits, NIC KTLS must re-encrypt TLS records for retransmit requests as well as non-retransmit requests that do not include the start of a TLS record but do include the trailer. The T6 NIC KTLS code tries to optimize some of the cases for requests to transmit partial TLS records. In particular it attempts to minimize sending "waste" bytes that have to be given as input to the crypto engine but are not needed on the wire to satisfy mbufs sent from the TCP stack down to the driver. TCP packets for TLS requests are broken down into the following classes (with associated counters): - Mbufs that send an entire TLS record in full do not have any waste bytes (dev.cc.N.txq.M.kern_tls_full). - Mbufs that send a short TLS record that ends before the end of the trailer (dev.cc.N.txq.M.kern_tls_short). For sockets using AES-CBC, the encryption must always start at the beginning, so if the mbuf starts at an offset into the TLS record, the offset bytes will be "waste" bytes. For sockets using AES-GCM, the encryption can start at the 16 byte block before the starting offset capping the waste at 15 bytes. - Mbufs that send a partial TLS record that has a non-zero starting offset but ends at the end of the trailer (dev.cc.N.txq.M.kern_tls_partial). In order to compute the authentication hash stored in the trailer, the entire TLS record must be sent as input to the crypto engine, so the bytes before the offset are always "waste" bytes. In addition, other per-txq sysctls are provided: - dev.cc.N.txq.M.kern_tls_cbc: Count of sockets sent via this txq using AES-CBC. - dev.cc.N.txq.M.kern_tls_gcm: Count of sockets sent via this txq using AES-GCM. - dev.cc.N.txq.M.kern_tls_fin: Count of empty FIN-only packets sent to compensate for the TOE engine not being able to set FIN on the last segment of a TLS record if the TLS record mbuf had FIN set. - dev.cc.N.txq.M.kern_tls_records: Count of TLS records sent via this txq including full, short, and partial records. - dev.cc.N.txq.M.kern_tls_octets: Count of non-waste bytes (TLS header and payload) sent for TLS record requests. - dev.cc.N.txq.M.kern_tls_waste: Count of waste bytes sent for TLS record requests. To enable NIC KTLS with T6, set the following tunables prior to loading the cxgbe(4) driver: hw.cxgbe.config_file=kern_tls hw.cxgbe.kern_tls=1 Reviewed by: np Sponsored by: Chelsio Communications Differential Revision: https://reviews.freebsd.org/D21962
Notes
Notes: svn path=/head/; revision=354974
Diffstat (limited to 'sys/dev/cxgbe/t4_l2t.c')
-rw-r--r--sys/dev/cxgbe/t4_l2t.c114
1 files changed, 104 insertions, 10 deletions
diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c
index 5e96579f1717..47c995932a21 100644
--- a/sys/dev/cxgbe/t4_l2t.c
+++ b/sys/dev/cxgbe/t4_l2t.c
@@ -145,6 +145,23 @@ find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac
return (e);
}
+static void
+mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
+ void *dst)
+{
+ struct cpl_l2t_write_req *req;
+ int idx;
+
+ req = dst;
+ idx = e->idx + sc->vres.l2t.start;
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
+ V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
+ req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
+ req->l2t_idx = htons(idx);
+ req->vlan = htons(e->vlan);
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+}
/*
* Write an L2T entry. Must be called with the entry locked.
@@ -157,7 +174,6 @@ t4_write_l2e(struct l2t_entry *e, int sync)
struct adapter *sc;
struct wrq_cookie cookie;
struct cpl_l2t_write_req *req;
- int idx;
mtx_assert(&e->lock, MA_OWNED);
MPASS(e->wrq != NULL);
@@ -169,14 +185,7 @@ t4_write_l2e(struct l2t_entry *e, int sync)
if (req == NULL)
return (ENOMEM);
- idx = e->idx + sc->vres.l2t.start;
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
- V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
- req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
- req->l2t_idx = htons(idx);
- req->vlan = htons(e->vlan);
- memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+ mk_write_l2e(sc, e, sync, sync, req);
commit_wrq_wr(wrq, req, &cookie);
@@ -187,6 +196,90 @@ t4_write_l2e(struct l2t_entry *e, int sync)
}
/*
+ * Allocate an L2T entry for use by a TLS connection. These entries are
+ * associated with a specific VLAN and destination MAC that never changes.
+ * However, multiple TLS connections might share a single entry.
+ *
+ * If a new L2T entry is allocated, a work request to initialize it is
+ * written to 'txq' and 'ndesc' will be set to 1. Otherwise, 'ndesc'
+ * will be set to 0.
+ *
+ * To avoid races, separate L2T entries are reserved for individual
+ * queues since the L2T entry update is written to a txq just prior to
+ * TLS work requests that will depend on it being written.
+ */
+struct l2t_entry *
+t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
+ int *ndesc, uint16_t vlan, uint8_t port, uint8_t *eth_addr)
+{
+ struct l2t_data *d;
+ struct l2t_entry *e;
+ int i;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+
+ d = sc->l2t;
+ *ndesc = 0;
+
+ rw_rlock(&d->lock);
+
+ /* First, try to find an existing entry. */
+ for (i = 0; i < d->l2t_size; i++) {
+ e = &d->l2tab[i];
+ if (e->state != L2T_STATE_TLS)
+ continue;
+ if (e->vlan == vlan && e->lport == port &&
+ e->wrq == (struct sge_wrq *)txq &&
+ memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
+ if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
+ /*
+ * This entry wasn't held but is still
+ * valid, so decrement nfree.
+ */
+ atomic_subtract_int(&d->nfree, 1);
+ }
+ KASSERT(e->refcnt > 0,
+ ("%s: refcount overflow", __func__));
+ rw_runlock(&d->lock);
+ return (e);
+ }
+ }
+
+ /*
+ * Don't bother rechecking if the upgrade fails since the txq is
+ * already locked.
+ */
+ if (!rw_try_upgrade(&d->lock)) {
+ rw_runlock(&d->lock);
+ rw_wlock(&d->lock);
+ }
+
+ /* Match not found, allocate a new entry. */
+ e = t4_alloc_l2e(d);
+ if (e == NULL) {
+ rw_wunlock(&d->lock);
+ return (e);
+ }
+
+ /* Initialize the entry. */
+ e->state = L2T_STATE_TLS;
+ e->vlan = vlan;
+ e->lport = port;
+ e->iqid = sc->sge.fwq.abs_id;
+ e->wrq = (struct sge_wrq *)txq;
+ memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
+ atomic_store_rel_int(&e->refcnt, 1);
+ rw_wunlock(&d->lock);
+
+ /* Write out the work request. */
+ *ndesc = howmany(sizeof(struct cpl_l2t_write_req), EQ_ESIZE);
+ MPASS(*ndesc == 1);
+ mk_write_l2e(sc, e, 1, 0, dst);
+
+ return (e);
+}
+
+/*
* Allocate an L2T entry for use by a switching rule. Such need to be
* explicitly freed and while busy they are not on any hash chain, so normal
* address resolution updates do not see them.
@@ -307,6 +400,7 @@ l2e_state(const struct l2t_entry *e)
case L2T_STATE_SYNC_WRITE: return 'W';
case L2T_STATE_RESOLVING: return STAILQ_EMPTY(&e->wr_list) ? 'R' : 'A';
case L2T_STATE_SWITCHING: return 'X';
+ case L2T_STATE_TLS: return 'T';
default: return 'U';
}
}
@@ -343,7 +437,7 @@ sysctl_l2t(SYSCTL_HANDLER_ARGS)
"Ethernet address VLAN/P LP State Users Port");
header = 1;
}
- if (e->state == L2T_STATE_SWITCHING)
+ if (e->state >= L2T_STATE_SWITCHING)
ip[0] = 0;
else {
inet_ntop(e->ipv6 ? AF_INET6 : AF_INET, &e->addr[0],