aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/cxgbe/t4_l2t.c
diff options
context:
space:
mode:
authorNavdeep Parhar <np@FreeBSD.org>2012-06-19 07:34:13 +0000
committerNavdeep Parhar <np@FreeBSD.org>2012-06-19 07:34:13 +0000
commit09fe63205c597be4f762c7f3017e2854c121d6d1 (patch)
tree9255a545bbd49a0458ed8850371b4fe6ed2cd01f /sys/dev/cxgbe/t4_l2t.c
parent41b8cbda7d45229258c6205b697b5fbf80888493 (diff)
- Updated TOE support in the kernel.
- Stateful TCP offload drivers for Terminator 3 and 4 (T3 and T4) ASICs. These are available as t3_tom and t4_tom modules that augment cxgb(4) and cxgbe(4) respectively. The cxgb/cxgbe drivers continue to work as usual with or without these extra features. - iWARP driver for Terminator 3 ASIC (kernel verbs). T4 iWARP in the works and will follow soon. Build-tested with make universe. 30s overview ============ What interfaces support TCP offload? Look for TOE4 and/or TOE6 in the capabilities of an interface: # ifconfig -m | grep TOE Enable/disable TCP offload on an interface (just like any other ifnet capability): # ifconfig cxgbe0 toe # ifconfig cxgbe0 -toe Which connections are offloaded? Look for toe4 and/or toe6 in the output of netstat and sockstat: # netstat -np tcp | grep toe # sockstat -46c | grep toe Reviewed by: bz, gnn Sponsored by: Chelsio communications. MFC after: ~3 months (after 9.1, and after ensuring MFC is feasible)
Notes
Notes: svn path=/head/; revision=237263
Diffstat (limited to 'sys/dev/cxgbe/t4_l2t.c')
-rw-r--r--sys/dev/cxgbe/t4_l2t.c563
1 files changed, 44 insertions, 519 deletions
diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c
index 55491cda3043..8373c32e1f7a 100644
--- a/sys/dev/cxgbe/t4_l2t.c
+++ b/sys/dev/cxgbe/t4_l2t.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2011 Chelsio Communications, Inc.
+ * Copyright (c) 2012 Chelsio Communications, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,16 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sbuf.h>
-#include <net/if.h>
-#include <net/if_types.h>
-#include <net/ethernet.h>
-#include <net/if_vlan_var.h>
-#include <net/if_dl.h>
-#include <net/if_llatbl.h>
-#include <net/route.h>
#include <netinet/in.h>
-#include <netinet/in_var.h>
-#include <netinet/if_ether.h>
#include "common/common.h"
#include "common/jhash.h"
@@ -72,42 +63,11 @@ __FBSDID("$FreeBSD$");
* lifetime of an L2T entry is fully contained in the lifetime of the TOE.
*/
-/* identifies sync vs async L2T_WRITE_REQs */
-#define S_SYNC_WR 12
-#define V_SYNC_WR(x) ((x) << S_SYNC_WR)
-#define F_SYNC_WR V_SYNC_WR(1)
-
-enum {
- L2T_STATE_VALID, /* entry is up to date */
- L2T_STATE_STALE, /* entry may be used but needs revalidation */
- L2T_STATE_RESOLVING, /* entry needs address resolution */
- L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
- /* when state is one of the below the entry is not hashed */
- L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
- L2T_STATE_UNUSED /* entry not in use */
-};
-
-struct l2t_data {
- struct rwlock lock;
- volatile int nfree; /* number of free entries */
- struct l2t_entry *rover;/* starting point for next allocation */
- struct l2t_entry l2tab[L2T_SIZE];
-};
-
-static int do_l2t_write_rpl(struct sge_iq *, const struct rss_header *,
- struct mbuf *);
-
-#define VLAN_NONE 0xfff
-#define SA(x) ((struct sockaddr *)(x))
-#define SIN(x) ((struct sockaddr_in *)(x))
-#define SINADDR(x) (SIN(x)->sin_addr.s_addr)
-
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
-static struct l2t_entry *
-alloc_l2e(struct l2t_data *d)
+struct l2t_entry *
+t4_alloc_l2e(struct l2t_data *d)
{
struct l2t_entry *end, *e, **p;
@@ -121,7 +81,8 @@ alloc_l2e(struct l2t_data *d)
if (atomic_load_acq_int(&e->refcnt) == 0)
goto found;
- for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e) ;
+ for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e)
+ continue;
found:
d->rover = e + 1;
atomic_subtract_int(&d->nfree, 1);
@@ -148,19 +109,18 @@ found:
* Write an L2T entry. Must be called with the entry locked.
* The write may be synchronous or asynchronous.
*/
-static int
-write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
+int
+t4_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
{
- struct mbuf *m;
+ struct wrqe *wr;
struct cpl_l2t_write_req *req;
mtx_assert(&e->lock, MA_OWNED);
- if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
+ wr = alloc_wrqe(sizeof(*req), &sc->sge.mgmtq);
+ if (wr == NULL)
return (ENOMEM);
-
- req = mtod(m, struct cpl_l2t_write_req *);
- m->m_pkthdr.len = m->m_len = sizeof(*req);
+ req = wrtod(wr);
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx |
@@ -170,7 +130,7 @@ write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
req->vlan = htons(e->vlan);
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
- t4_mgmt_tx(sc, m);
+ t4_wrq_tx(sc, wr);
if (sync && e->state != L2T_STATE_SWITCHING)
e->state = L2T_STATE_SYNC_WRITE;
@@ -189,7 +149,7 @@ t4_l2t_alloc_switching(struct l2t_data *d)
struct l2t_entry *e;
rw_rlock(&d->lock);
- e = alloc_l2e(d);
+ e = t4_alloc_l2e(d);
if (e) {
mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
e->state = L2T_STATE_SWITCHING;
@@ -214,7 +174,7 @@ t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan,
e->lport = port;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
mtx_lock(&e->lock);
- rc = write_l2e(sc, e, 0);
+ rc = t4_write_l2e(sc, e, 0);
mtx_unlock(&e->lock);
return (rc);
}
@@ -234,10 +194,13 @@ t4_init_l2t(struct adapter *sc, int flags)
rw_init(&d->lock, "L2T");
for (i = 0; i < L2T_SIZE; i++) {
- d->l2tab[i].idx = i;
- d->l2tab[i].state = L2T_STATE_UNUSED;
- mtx_init(&d->l2tab[i].lock, "L2T_E", NULL, MTX_DEF);
- atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
+ struct l2t_entry *e = &d->l2tab[i];
+
+ e->idx = i;
+ e->state = L2T_STATE_UNUSED;
+ mtx_init(&e->lock, "L2T_E", NULL, MTX_DEF);
+ STAILQ_INIT(&e->wr_list);
+ atomic_store_rel_int(&e->refcnt, 0);
}
sc->l2t = d;
@@ -259,6 +222,24 @@ t4_free_l2t(struct l2t_data *d)
return (0);
}
+int
+do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss,
+ struct mbuf *m)
+{
+ const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
+ unsigned int tid = GET_TID(rpl);
+ unsigned int idx = tid & (L2T_SIZE - 1);
+
+ if (__predict_false(rpl->status != CPL_ERR_NONE)) {
+ log(LOG_ERR,
+ "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, idx);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
#ifdef SBUF_DRAIN
static inline unsigned int
vlan_prio(const struct l2t_entry *e)
@@ -273,7 +254,7 @@ l2e_state(const struct l2t_entry *e)
case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */
case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */
case L2T_STATE_SYNC_WRITE: return 'W';
- case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
+ case L2T_STATE_RESOLVING: return STAILQ_EMPTY(&e->wr_list) ? 'R' : 'A';
case L2T_STATE_SWITCHING: return 'X';
default: return 'U';
}
@@ -311,20 +292,20 @@ sysctl_l2t(SYSCTL_HANDLER_ARGS)
"Ethernet address VLAN/P LP State Users Port");
header = 1;
}
- if (e->state == L2T_STATE_SWITCHING || e->v6)
+ if (e->state == L2T_STATE_SWITCHING)
ip[0] = 0;
else
snprintf(ip, sizeof(ip), "%s",
- inet_ntoa(*(struct in_addr *)&e->addr[0]));
+ inet_ntoa(*(struct in_addr *)&e->addr));
- /* XXX: accessing lle probably not safe? */
+ /* XXX: e->ifp may not be around */
sbuf_printf(sb, "\n%4u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
" %u %2u %c %5u %s",
e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
e->dmac[3], e->dmac[4], e->dmac[5],
e->vlan & 0xfff, vlan_prio(e), e->lport,
l2e_state(e), atomic_load_acq_int(&e->refcnt),
- e->lle ? e->lle->lle_tbl->llt_ifp->if_xname : "");
+ e->ifp->if_xname);
skip:
mtx_unlock(&e->lock);
}
@@ -335,459 +316,3 @@ skip:
return (rc);
}
#endif
-
-#ifndef TCP_OFFLOAD_DISABLE
-static inline void
-l2t_hold(struct l2t_data *d, struct l2t_entry *e)
-{
- if (atomic_fetchadd_int(&e->refcnt, 1) == 0) /* 0 -> 1 transition */
- atomic_subtract_int(&d->nfree, 1);
-}
-
-/*
- * To avoid having to check address families we do not allow v4 and v6
- * neighbors to be on the same hash chain. We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
- */
-enum {
- L2T_SZ_HALF = L2T_SIZE / 2,
- L2T_HASH_MASK = L2T_SZ_HALF - 1
-};
-
-static inline unsigned int
-arp_hash(const uint32_t *key, int ifindex)
-{
- return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
-}
-
-static inline unsigned int
-ipv6_hash(const uint32_t *key, int ifindex)
-{
- uint32_t xor = key[0] ^ key[1] ^ key[2] ^ key[3];
-
- return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
-}
-
-static inline unsigned int
-addr_hash(const uint32_t *addr, int addr_len, int ifindex)
-{
- return addr_len == 4 ? arp_hash(addr, ifindex) :
- ipv6_hash(addr, ifindex);
-}
-
-/*
- * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
- * whether the L2T entry and the address are of the same address family.
- * Callers ensure an address is only checked against L2T entries of the same
- * family, something made trivial by the separation of IP and IPv6 hash chains
- * mentioned above. Returns 0 if there's a match,
- */
-static inline int
-addreq(const struct l2t_entry *e, const uint32_t *addr)
-{
- if (e->v6)
- return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
- (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
- return e->addr[0] ^ addr[0];
-}
-
-/*
- * Add a packet to an L2T entry's queue of packets awaiting resolution.
- * Must be called with the entry's lock held.
- */
-static inline void
-arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
-{
- mtx_assert(&e->lock, MA_OWNED);
-
- KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt not NULL", __func__));
- if (e->arpq_head)
- e->arpq_tail->m_nextpkt = m;
- else
- e->arpq_head = m;
- e->arpq_tail = m;
-}
-
-static inline void
-send_pending(struct adapter *sc, struct l2t_entry *e)
-{
- struct mbuf *m, *next;
-
- mtx_assert(&e->lock, MA_OWNED);
-
- for (m = e->arpq_head; m; m = next) {
- next = m->m_nextpkt;
- m->m_nextpkt = NULL;
- t4_wrq_tx(sc, MBUF_EQ(m), m);
- }
- e->arpq_head = e->arpq_tail = NULL;
-}
-
-#ifdef INET
-/*
- * Looks up and fills up an l2t_entry's lle. We grab all the locks that we need
- * ourself, and update e->state at the end if e->lle was successfully filled.
- *
- * The lle passed in comes from arpresolve and is ignored as it does not appear
- * to be of much use.
- */
-static int
-l2t_fill_lle(struct adapter *sc, struct l2t_entry *e, struct llentry *unused)
-{
- int rc = 0;
- struct sockaddr_in sin;
- struct ifnet *ifp = e->ifp;
- struct llentry *lle;
-
- bzero(&sin, sizeof(struct sockaddr_in));
- if (e->v6)
- panic("%s: IPv6 L2 resolution not supported yet.", __func__);
-
- sin.sin_family = AF_INET;
- sin.sin_len = sizeof(struct sockaddr_in);
- memcpy(&sin.sin_addr, e->addr, sizeof(struct sockaddr_in));
-
- mtx_assert(&e->lock, MA_NOTOWNED);
- KASSERT(e->addr && ifp, ("%s: bad prep before call", __func__));
-
- IF_AFDATA_LOCK(ifp);
- lle = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, SA(&sin));
- IF_AFDATA_UNLOCK(ifp);
- if (!LLE_IS_VALID(lle))
- return (ENOMEM);
- if (!(lle->la_flags & LLE_VALID)) {
- rc = EINVAL;
- goto done;
- }
-
- LLE_ADDREF(lle);
-
- mtx_lock(&e->lock);
- if (e->state == L2T_STATE_RESOLVING) {
- KASSERT(e->lle == NULL, ("%s: lle already valid", __func__));
- e->lle = lle;
- memcpy(e->dmac, &lle->ll_addr, ETHER_ADDR_LEN);
- write_l2e(sc, e, 1);
- } else {
- KASSERT(e->lle == lle, ("%s: lle changed", __func__));
- LLE_REMREF(lle);
- }
- mtx_unlock(&e->lock);
-done:
- LLE_WUNLOCK(lle);
- return (rc);
-}
-#endif
-
-int
-t4_l2t_send(struct adapter *sc, struct mbuf *m, struct l2t_entry *e)
-{
-#ifndef INET
- return (EINVAL);
-#else
- struct llentry *lle = NULL;
- struct sockaddr_in sin;
- struct ifnet *ifp = e->ifp;
-
- if (e->v6)
- panic("%s: IPv6 L2 resolution not supported yet.", __func__);
-
- bzero(&sin, sizeof(struct sockaddr_in));
- sin.sin_family = AF_INET;
- sin.sin_len = sizeof(struct sockaddr_in);
- memcpy(&sin.sin_addr, e->addr, sizeof(struct sockaddr_in));
-
-again:
- switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
- if (arpresolve(ifp, NULL, NULL, SA(&sin), e->dmac, &lle) == 0)
- l2t_fill_lle(sc, e, lle);
-
- /* Fall through */
-
- case L2T_STATE_VALID: /* fast-path, send the packet on */
- return t4_wrq_tx(sc, MBUF_EQ(m), m);
-
- case L2T_STATE_RESOLVING:
- case L2T_STATE_SYNC_WRITE:
- mtx_lock(&e->lock);
- if (e->state != L2T_STATE_SYNC_WRITE &&
- e->state != L2T_STATE_RESOLVING) {
- /* state changed by the time we got here */
- mtx_unlock(&e->lock);
- goto again;
- }
- arpq_enqueue(e, m);
- mtx_unlock(&e->lock);
-
- if (e->state == L2T_STATE_RESOLVING &&
- arpresolve(ifp, NULL, NULL, SA(&sin), e->dmac, &lle) == 0)
- l2t_fill_lle(sc, e, lle);
- }
-
- return (0);
-#endif
-}
-
-/*
- * Called when an L2T entry has no more users. The entry is left in the hash
- * table since it is likely to be reused but we also bump nfree to indicate
- * that the entry can be reallocated for a different neighbor. We also drop
- * the existing neighbor reference in case the neighbor is going away and is
- * waiting on our reference.
- *
- * Because entries can be reallocated to other neighbors once their ref count
- * drops to 0 we need to take the entry's lock to avoid races with a new
- * incarnation.
- */
-static void
-t4_l2e_free(struct l2t_entry *e)
-{
- struct llentry *lle = NULL;
- struct l2t_data *d;
-
- mtx_lock(&e->lock);
- if (atomic_load_acq_int(&e->refcnt) == 0) { /* hasn't been recycled */
- lle = e->lle;
- e->lle = NULL;
- /*
- * Don't need to worry about the arpq, an L2T entry can't be
- * released if any packets are waiting for resolution as we
- * need to be able to communicate with the device to close a
- * connection.
- */
- }
- mtx_unlock(&e->lock);
-
- d = container_of(e, struct l2t_data, l2tab[e->idx]);
- atomic_add_int(&d->nfree, 1);
-
- if (lle)
- LLE_FREE(lle);
-}
-
-void
-t4_l2t_release(struct l2t_entry *e)
-{
- if (atomic_fetchadd_int(&e->refcnt, -1) == 1)
- t4_l2e_free(e);
-}
-
-static int
-do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss,
- struct mbuf *m)
-{
- struct adapter *sc = iq->adapter;
- const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
- unsigned int tid = GET_TID(rpl);
- unsigned int idx = tid & (L2T_SIZE - 1);
-
- if (__predict_false(rpl->status != CPL_ERR_NONE)) {
- log(LOG_ERR,
- "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
- rpl->status, idx);
- return (EINVAL);
- }
-
- if (tid & F_SYNC_WR) {
- struct l2t_entry *e = &sc->l2t->l2tab[idx];
-
- mtx_lock(&e->lock);
- if (e->state != L2T_STATE_SWITCHING) {
- send_pending(sc, e);
- e->state = L2T_STATE_VALID;
- }
- mtx_unlock(&e->lock);
- }
-
- return (0);
-}
-
-/*
- * Reuse an L2T entry that was previously used for the same next hop.
- */
-static void
-reuse_entry(struct l2t_entry *e)
-{
- struct llentry *lle;
-
- mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
- lle = e->lle;
- if (lle) {
- KASSERT(lle->la_flags & LLE_VALID,
- ("%s: invalid lle stored in l2t_entry", __func__));
-
- if (lle->la_expire >= time_uptime)
- e->state = L2T_STATE_STALE;
- else
- e->state = L2T_STATE_VALID;
- } else
- e->state = L2T_STATE_RESOLVING;
- mtx_unlock(&e->lock);
-}
-
-/*
- * The TOE wants an L2 table entry that it can use to reach the next hop over
- * the specified port. Produce such an entry - create one if needed.
- *
- * Note that the ifnet could be a pseudo-device like if_vlan, if_lagg, etc. on
- * top of the real cxgbe interface.
- */
-struct l2t_entry *
-t4_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa)
-{
- struct l2t_entry *e;
- struct l2t_data *d = pi->adapter->l2t;
- int addr_len;
- uint32_t *addr;
- int hash;
- struct sockaddr_in6 *sin6;
- unsigned int smt_idx = pi->port_id;
-
- if (sa->sa_family == AF_INET) {
- addr = (uint32_t *)&SINADDR(sa);
- addr_len = sizeof(SINADDR(sa));
- } else if (sa->sa_family == AF_INET6) {
- sin6 = (struct sockaddr_in6 *)sa;
- addr = (uint32_t *)&sin6->sin6_addr.s6_addr;
- addr_len = sizeof(sin6->sin6_addr.s6_addr);
- } else
- return (NULL);
-
-#ifndef VLAN_TAG
- if (ifp->if_type == IFT_L2VLAN)
- return (NULL);
-#endif
-
- hash = addr_hash(addr, addr_len, ifp->if_index);
-
- rw_wlock(&d->lock);
- for (e = d->l2tab[hash].first; e; e = e->next) {
- if (!addreq(e, addr) && e->ifp == ifp && e->smt_idx == smt_idx){
- l2t_hold(d, e);
- if (atomic_load_acq_int(&e->refcnt) == 1)
- reuse_entry(e);
- goto done;
- }
- }
-
- /* Need to allocate a new entry */
- e = alloc_l2e(d);
- if (e) {
- mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
- e->state = L2T_STATE_RESOLVING;
- memcpy(e->addr, addr, addr_len);
- e->ifindex = ifp->if_index;
- e->smt_idx = smt_idx;
- e->ifp = ifp;
- e->hash = hash;
- e->lport = pi->lport;
- e->v6 = (addr_len == 16);
- e->lle = NULL;
- atomic_store_rel_int(&e->refcnt, 1);
-#ifdef VLAN_TAG
- if (ifp->if_type == IFT_L2VLAN)
- VLAN_TAG(ifp, &e->vlan);
- else
- e->vlan = VLAN_NONE;
-#endif
- e->next = d->l2tab[hash].first;
- d->l2tab[hash].first = e;
- mtx_unlock(&e->lock);
- }
-done:
- rw_wunlock(&d->lock);
- return e;
-}
-
-/*
- * Called when the host's neighbor layer makes a change to some entry that is
- * loaded into the HW L2 table.
- */
-void
-t4_l2t_update(struct adapter *sc, struct llentry *lle)
-{
- struct l2t_entry *e;
- struct l2t_data *d = sc->l2t;
- struct sockaddr *sa = L3_ADDR(lle);
- struct llentry *old_lle = NULL;
- uint32_t *addr = (uint32_t *)&SINADDR(sa);
- struct ifnet *ifp = lle->lle_tbl->llt_ifp;
- int hash = addr_hash(addr, sizeof(*addr), ifp->if_index);
-
- KASSERT(d != NULL, ("%s: no L2 table", __func__));
- LLE_WLOCK_ASSERT(lle);
- KASSERT(lle->la_flags & LLE_VALID || lle->la_flags & LLE_DELETED,
- ("%s: entry neither valid nor deleted.", __func__));
-
- rw_rlock(&d->lock);
- for (e = d->l2tab[hash].first; e; e = e->next) {
- if (!addreq(e, addr) && e->ifp == ifp) {
- mtx_lock(&e->lock);
- if (atomic_load_acq_int(&e->refcnt))
- goto found;
- e->state = L2T_STATE_STALE;
- mtx_unlock(&e->lock);
- break;
- }
- }
- rw_runlock(&d->lock);
-
- /* The TOE has no interest in this LLE */
- return;
-
- found:
- rw_runlock(&d->lock);
-
- if (atomic_load_acq_int(&e->refcnt)) {
-
- /* Entry is referenced by at least 1 offloaded connection. */
-
- /* Handle deletes first */
- if (lle->la_flags & LLE_DELETED) {
- if (lle == e->lle) {
- e->lle = NULL;
- e->state = L2T_STATE_RESOLVING;
- LLE_REMREF(lle);
- }
- goto done;
- }
-
- if (lle != e->lle) {
- old_lle = e->lle;
- LLE_ADDREF(lle);
- e->lle = lle;
- }
-
- if (e->state == L2T_STATE_RESOLVING ||
- memcmp(e->dmac, &lle->ll_addr, ETHER_ADDR_LEN)) {
-
- /* unresolved -> resolved; or dmac changed */
-
- memcpy(e->dmac, &lle->ll_addr, ETHER_ADDR_LEN);
- write_l2e(sc, e, 1);
- } else {
-
- /* +ve reinforcement of a valid or stale entry */
-
- }
-
- e->state = L2T_STATE_VALID;
-
- } else {
- /*
- * Entry was used previously but is unreferenced right now.
- * e->lle has been released and NULL'd out by t4_l2t_free, or
- * l2t_release is about to call t4_l2t_free and do that.
- *
- * Either way this is of no interest to us.
- */
- }
-
-done:
- mtx_unlock(&e->lock);
- if (old_lle)
- LLE_FREE(old_lle);
-}
-
-#endif