aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/xen/netfront
diff options
context:
space:
mode:
authorRoger Pau Monné <royger@FreeBSD.org>2017-05-19 08:19:51 +0000
committerRoger Pau Monné <royger@FreeBSD.org>2017-05-19 08:19:51 +0000
commitbf319173f298f79632aaff2013c09f4bb50914b0 (patch)
tree44c7a968832d0c7ec90bee495313ce6d1c8e80d5 /sys/dev/xen/netfront
parentfcb93d74937ba64f0a5b0b054d08b948bdb9adf7 (diff)
downloadsrc-bf319173f298f79632aaff2013c09f4bb50914b0.tar.gz
src-bf319173f298f79632aaff2013c09f4bb50914b0.zip
xen/netfront: don't drop the ring RX lock with inconsistent ring state
Make sure the RX ring lock is only released when the state of the ring is consistent, or else concurrent calls to xn_rxeof might get an inconsistent ring state and thus some packets might be processed twice. Note that this is not very common, and could only happen when an interrupt is delivered while in xn_ifinit. Reported by: cperciva Tested by: cperciva MFC after: 1 week Sponsored by: Citrix Systems R&D
Notes
Notes: svn path=/head/; revision=318523
Diffstat (limited to 'sys/dev/xen/netfront')
-rw-r--r--sys/dev/xen/netfront/netfront.c82
1 files changed, 39 insertions, 43 deletions
diff --git a/sys/dev/xen/netfront/netfront.c b/sys/dev/xen/netfront/netfront.c
index 99ccdaaf5000..482b9e948fde 100644
--- a/sys/dev/xen/netfront/netfront.c
+++ b/sys/dev/xen/netfront/netfront.c
@@ -1161,17 +1161,18 @@ xn_rxeof(struct netfront_rxq *rxq)
struct mbufq mbufq_rxq, mbufq_errq;
int err, work_to_do;
- do {
- XN_RX_LOCK_ASSERT(rxq);
- if (!netfront_carrier_ok(np))
- return;
+ XN_RX_LOCK_ASSERT(rxq);
+
+ if (!netfront_carrier_ok(np))
+ return;
- /* XXX: there should be some sane limit. */
- mbufq_init(&mbufq_errq, INT_MAX);
- mbufq_init(&mbufq_rxq, INT_MAX);
+ /* XXX: there should be some sane limit. */
+ mbufq_init(&mbufq_errq, INT_MAX);
+ mbufq_init(&mbufq_rxq, INT_MAX);
- ifp = np->xn_ifp;
+ ifp = np->xn_ifp;
+ do {
rp = rxq->ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
@@ -1191,7 +1192,7 @@ xn_rxeof(struct netfront_rxq *rxq)
}
m->m_pkthdr.rcvif = ifp;
- if ( rx->flags & NETRXF_data_validated ) {
+ if (rx->flags & NETRXF_data_validated) {
/*
* According to mbuf(9) the correct way to tell
* the stack that the checksum of an inbound
@@ -1214,50 +1215,45 @@ xn_rxeof(struct netfront_rxq *rxq)
}
(void )mbufq_enqueue(&mbufq_rxq, m);
- rxq->ring.rsp_cons = i;
}
- mbufq_drain(&mbufq_errq);
+ rxq->ring.rsp_cons = i;
- /*
- * Process all the mbufs after the remapping is complete.
- * Break the mbuf chain first though.
- */
- while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) {
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+ xn_alloc_rx_buffers(rxq);
- /* XXX: Do we really need to drop the rx lock? */
- XN_RX_UNLOCK(rxq);
+ RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
+ } while (work_to_do);
+
+ XN_RX_UNLOCK(rxq);
+ mbufq_drain(&mbufq_errq);
+ /*
+ * Process all the mbufs after the remapping is complete.
+ * Break the mbuf chain first though.
+ */
+ while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) {
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
#if (defined(INET) || defined(INET6))
- /* Use LRO if possible */
- if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
- lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
- /*
- * If LRO fails, pass up to the stack
- * directly.
- */
- (*ifp->if_input)(ifp, m);
- }
-#else
+ /* Use LRO if possible */
+ if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
+ lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
+ /*
+ * If LRO fails, pass up to the stack
+ * directly.
+ */
(*ifp->if_input)(ifp, m);
-#endif
-
- XN_RX_LOCK(rxq);
}
-
- rxq->ring.rsp_cons = i;
+#else
+ (*ifp->if_input)(ifp, m);
+#endif
+ }
#if (defined(INET) || defined(INET6))
- /*
- * Flush any outstanding LRO work
- */
- tcp_lro_flush_all(lro);
+ /*
+ * Flush any outstanding LRO work
+ */
+ tcp_lro_flush_all(lro);
#endif
-
- xn_alloc_rx_buffers(rxq);
-
- RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
- } while (work_to_do);
+ XN_RX_LOCK(rxq);
}
static void