aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ath/if_ath.c
diff options
context:
space:
mode:
authorAdrian Chadd <adrian@FreeBSD.org>2013-03-24 00:03:12 +0000
committerAdrian Chadd <adrian@FreeBSD.org>2013-03-24 00:03:12 +0000
commitb837332d0a4090781a3d7b01a092e164dd2a24ca (patch)
tree893a15cd3bd710b84e95de5c9446cd5febdbb313 /sys/dev/ath/if_ath.c
parent49ddabc4bdac14b4860dbc72a1651aff94c75593 (diff)
downloadsrc-b837332d0a4090781a3d7b01a092e164dd2a24ca.tar.gz
src-b837332d0a4090781a3d7b01a092e164dd2a24ca.zip
Overhaul the TXQ locking (again!) as part of some beacon/cabq timing
related issues. Moving the TX locking under one lock made things easier to progress on but it had one important side-effect - it increased the latency when handling CABQ setup when sending beacons. This commit introduces a bunch of new changes and a few unrelated changs that are just easier to lump in here. The aim is to have the CABQ locking separate from other locking. The CABQ transmit path in the beacon process thus doesn't have to grab the general TX lock, reducing lock contention/latency and making it more likely that we'll make the beacon TX timing. The second half of this commit is the CABQ related setup changes needed for sane looking EDMA CABQ support. Right now the EDMA TX code naively assumes that only one frame (MPDU or A-MPDU) is being pushed into each FIFO slot. For the CABQ this isn't true - a whole list of frames is being pushed in - and thus CABQ handling breaks very quickly. The aim here is to setup the CABQ list and then push _that list_ to the hardware for transmission. I can then extend the EDMA TX code to stamp that list as being "one" FIFO entry (likely by tagging the last buffer in that list as "FIFO END") so the EDMA TX completion code correctly tracks things. Major: * Migrate the per-TXQ add/removal locking back to per-TXQ, rather than a single lock. * Leave the software queue side of things under the ATH_TX_LOCK lock, (continuing) to serialise things as they are. * Add a new function which is called whenever there's a beacon miss, to print out some debugging. This is primarily designed to help me figure out if the beacon miss events are due to a noisy environment, issues with the PHY/MAC, or other. * Move the CABQ setup/enable to occur _after_ all the VAPs have been looked at. This means that for multiple VAPS in bursted mode, the CABQ gets primed once all VAPs are checked, rather than being primed on the first VAP and then having frames appended after this. Minor: * Add a (disabled) twiddle to let me enable/disable cabq traffic. It's primarily there to let me easily debug what's going on with beacon and CABQ setup/traffic; there's some DMA engine hangs which I'm finally trying to trace down. * Clear bf_next when flushing frames; it should quieten some warnings that show up when a node goes away. Tested: * AR9280, STA/hostap, up to 4 vaps (staggered) * AR5416, STA/hostap, up to 4 vaps (staggered) TODO: * (Lots) more AR9380 and later testing, as I may have missed something here. * Leverage this to fix CABQ hanling for AR9380 and later chips. * Force bursted beaconing on the chips that default to staggered beacons and ensure the CABQ stuff is all sane (eg, the MORE bits that aren't being correctly set when chaining descriptors.)
Notes
Notes: svn path=/head/; revision=248671
Diffstat (limited to 'sys/dev/ath/if_ath.c')
-rw-r--r--sys/dev/ath/if_ath.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 2fe8de3d3f2d..022e80d54c8d 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -694,6 +694,9 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
*/
sc->sc_txq_mcastq_maxdepth = ath_txbuf;
+ /* Enable CABQ by default */
+ sc->sc_cabq_enable = 1;
+
/*
* Allow the TX and RX chainmasks to be overridden by
* environment variables and/or device.hints.
@@ -1899,7 +1902,7 @@ ath_bmiss_vap(struct ieee80211vap *vap)
ATH_VAP(vap)->av_bmiss(vap);
}
-static int
+int
ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
{
uint32_t rsize;
@@ -2364,14 +2367,17 @@ ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
/* Restart TX completion and pending TX */
if (reset_type == ATH_RESET_NOLOSS) {
- ATH_TX_LOCK(sc);
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
+ ATH_TXQ_LOCK(&sc->sc_txq[i]);
ath_txq_restart_dma(sc, &sc->sc_txq[i]);
+ ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
+
+ ATH_TX_LOCK(sc);
ath_txq_sched(sc, &sc->sc_txq[i]);
+ ATH_TX_UNLOCK(sc);
}
}
- ATH_TX_UNLOCK(sc);
}
/*
@@ -2922,6 +2928,9 @@ void
ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
{
+ ATH_TXQ_LOCK_ASSERT(src);
+ ATH_TXQ_LOCK_ASSERT(dst);
+
TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
dst->axq_link = src->axq_link;
src->axq_link = NULL;
@@ -3401,6 +3410,7 @@ ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
txq->axq_softc = sc;
TAILQ_INIT(&txq->axq_q);
TAILQ_INIT(&txq->axq_tidq);
+ ATH_TXQ_LOCK_INIT(sc, txq);
}
/*
@@ -3585,6 +3595,7 @@ ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
+ ATH_TXQ_LOCK_DESTROY(txq);
}
/*
@@ -3837,11 +3848,11 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
nacked = 0;
for (;;) {
- ATH_TX_LOCK(sc);
+ ATH_TXQ_LOCK(txq);
txq->axq_intrcnt = 0; /* reset periodic desc intr count */
bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
- ATH_TX_UNLOCK(sc);
+ ATH_TXQ_UNLOCK(txq);
break;
}
ds = bf->bf_lastds; /* XXX must be setup correctly! */
@@ -3869,7 +3880,7 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
"ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
txq->axq_qnum, bf, ds);
- ATH_TX_UNLOCK(sc);
+ ATH_TXQ_UNLOCK(txq);
break;
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
@@ -3906,7 +3917,7 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
ts->ts_rssi);
}
- ATH_TX_UNLOCK(sc);
+ ATH_TXQ_UNLOCK(txq);
/*
* Update statistics and call completion
@@ -4286,7 +4297,7 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
* we do not need to block ath_tx_proc
*/
for (ix = 0;; ix++) {
- ATH_TX_LOCK(sc);
+ ATH_TXQ_LOCK(txq);
bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
txq->axq_link = NULL;
@@ -4301,7 +4312,7 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
* very fruity very quickly.
*/
txq->axq_fifo_depth = 0;
- ATH_TX_UNLOCK(sc);
+ ATH_TXQ_UNLOCK(txq);
break;
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
@@ -4337,7 +4348,7 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
* Clear ATH_BUF_BUSY; the completion handler
* will free the buffer.
*/
- ATH_TX_UNLOCK(sc);
+ ATH_TXQ_UNLOCK(txq);
bf->bf_flags &= ~ATH_BUF_BUSY;
if (bf->bf_comp)
bf->bf_comp(sc, bf, 1);