Index: if_ath_tx_edma.c =================================================================== --- if_ath_tx_edma.c (revision 243648) +++ if_ath_tx_edma.c (working copy) @@ -142,7 +142,7 @@ struct ath_buf *bf; int i = 0; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called\n", __func__); @@ -181,7 +181,7 @@ txq, txq->axq_qnum); - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); ath_edma_tx_fifo_fill(sc, txq); } @@ -204,7 +204,7 @@ { struct ath_hal *ah = sc->sc_ah; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); @@ -249,7 +249,7 @@ struct ath_buf *bf) { - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); @@ -300,7 +300,7 @@ struct ath_buf *bf) { - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_XMIT_DESC, "%s: called; bf=%p, txq=%p, qnum=%d\n", @@ -523,7 +523,7 @@ txq = &sc->sc_txq[ts.ts_queue_id]; - ATH_TXQ_LOCK(txq); + ATH_TX_LOCK(sc); bf = TAILQ_FIRST(&txq->axq_q); DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: qcuid=%d, bf=%p\n", @@ -551,7 +551,7 @@ txq->axq_aggr_depth--; txq->axq_fifo_depth --; /* XXX assert FIFO depth >= 0 */ - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); /* * First we need to make sure ts_rate is valid. @@ -633,11 +633,11 @@ * to begin validating that things are somewhat * working. */ - ATH_TXQ_LOCK(txq); + ATH_TX_LOCK(sc); if (dosched && txq->axq_fifo_depth == 0) { ath_edma_tx_fifo_fill(sc, txq); } - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); } sc->sc_wd_timer = 0; Index: if_ath_beacon.c =================================================================== --- if_ath_beacon.c (revision 243648) +++ if_ath_beacon.c (working copy) @@ -564,8 +564,7 @@ struct ath_hal *ah = sc->sc_ah; /* NB: only at DTIM */ - ATH_TXQ_LOCK(cabq); - ATH_TXQ_LOCK(&avp->av_mcastq); + ATH_TX_LOCK(sc); if (nmcastq) { struct ath_buf *bfm; @@ -586,8 +585,7 @@ /* NB: gated by beacon so safe to start here */ if (! TAILQ_EMPTY(&(cabq->axq_q))) ath_hal_txstart(ah, cabq->axq_qnum); - ATH_TXQ_UNLOCK(&avp->av_mcastq); - ATH_TXQ_UNLOCK(cabq); + ATH_TX_UNLOCK(sc); } return bf; } Index: if_athvar.h =================================================================== --- if_athvar.h (revision 243648) +++ if_athvar.h (working copy) @@ -105,17 +105,15 @@ */ struct ath_tid { TAILQ_HEAD(,ath_buf) tid_q; /* pending buffers */ - u_int axq_depth; /* SW queue depth */ - char axq_name[48]; /* lock name */ struct ath_node *an; /* pointer to parent */ int tid; /* tid */ int ac; /* which AC gets this trafic */ int hwq_depth; /* how many buffers are on HW */ + u_int axq_depth; /* SW queue depth */ struct { TAILQ_HEAD(,ath_buf) tid_q; /* filtered queue */ u_int axq_depth; /* SW queue depth */ - char axq_name[48]; /* lock name */ } filtq; /* @@ -332,7 +330,6 @@ u_int axq_intrcnt; /* interrupt count */ u_int32_t *axq_link; /* link ptr in last TX desc */ TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */ - struct mtx axq_lock; /* lock on q and link */ char axq_name[12]; /* e.g. "ath0_txq4" */ /* Per-TID traffic queue for software -> hardware TX */ @@ -345,25 +342,6 @@ #define ATH_NODE_UNLOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, \ MA_NOTOWNED) -#define ATH_TXQ_LOCK_INIT(_sc, _tq) do { \ - snprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \ - device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \ - mtx_init(&(_tq)->axq_lock, (_tq)->axq_name, NULL, MTX_DEF); \ -} while (0) -#define ATH_TXQ_LOCK_DESTROY(_tq) mtx_destroy(&(_tq)->axq_lock) -#define ATH_TXQ_LOCK(_tq) mtx_lock(&(_tq)->axq_lock) -#define ATH_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->axq_lock) -#define ATH_TXQ_LOCK_ASSERT(_tq) \ - mtx_assert(&(_tq)->axq_lock, MA_OWNED) -#define ATH_TXQ_UNLOCK_ASSERT(_tq) \ - mtx_assert(&(_tq)->axq_lock, MA_NOTOWNED) -#define ATH_TXQ_IS_LOCKED(_tq) mtx_owned(&(_tq)->axq_lock) - -#define ATH_TID_LOCK_ASSERT(_sc, _tid) \ - ATH_TXQ_LOCK_ASSERT((_sc)->sc_ac2q[(_tid)->ac]) -#define ATH_TID_UNLOCK_ASSERT(_sc, _tid) \ - ATH_TXQ_UNLOCK_ASSERT((_sc)->sc_ac2q[(_tid)->ac]) - /* * These are for the hardware queue. */ Index: if_ath_tx_ht.c =================================================================== --- if_ath_tx_ht.c (revision 243648) +++ if_ath_tx_ht.c (working copy) @@ -658,7 +658,7 @@ int prev_frames = 0; /* XXX for AR5416 burst, not done here */ int prev_al = 0; /* XXX also for AR5416 burst */ - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); if (tap == NULL) { Index: if_ath_tx.c =================================================================== --- if_ath_tx.c (revision 243648) +++ if_ath_tx.c (working copy) @@ -708,7 +708,8 @@ ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); + KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); if (txq->axq_link != NULL) { @@ -745,7 +746,7 @@ * the SWBA handler since frames only go out on DTIM and * to avoid possible races. */ - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, @@ -924,7 +925,7 @@ struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf, *bf_last; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); /* This is always going to be cleared, empty or not */ txq->axq_flags &= ~ATH_TXQ_PUTPENDING; @@ -950,7 +951,7 @@ ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) @@ -1395,7 +1396,7 @@ struct ath_buf *bf) { - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); if (tid->clrdmask == 1) { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; @@ -1421,7 +1422,7 @@ struct ath_node *an = ATH_NODE(bf->bf_node); struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); /* * For now, just enable CLRDMASK. ath_tx_xmit_normal() does @@ -1492,7 +1493,7 @@ * re-ordered frames to have out of order CCMP PN's, resulting * in many, many frame drops. */ - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; @@ -1788,6 +1789,8 @@ ieee80211_seq seqno; uint8_t type, subtype; + ATH_TX_LOCK_ASSERT(sc); + /* * Determine the target hardware queue. * @@ -1817,14 +1820,14 @@ * XXX duplicated in ath_raw_xmit(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { - ATH_TXQ_LOCK(sc->sc_cabq); + ATH_TX_LOCK(sc); if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; r = ENOBUFS; } - ATH_TXQ_UNLOCK(sc->sc_cabq); + ATH_TX_UNLOCK(sc); if (r != 0) { m_freem(m0); @@ -1867,16 +1870,6 @@ /* XXX should just bzero the bf_state? */ bf->bf_state.bfs_dobaw = 0; - /* - * Acquire the TXQ lock early, so both the encap and seqno - * are allocated together. - * - * XXX should TXQ for CABQ traffic be the multicast queue, - * or the TXQ the given PRI would allocate from? (eg for - * sequence number allocation locking.) - */ - ATH_TXQ_LOCK(txq); - /* A-MPDU TX? Manually set sequence number */ /* * Don't do it whilst pending; the net80211 layer still @@ -1964,8 +1957,6 @@ ath_tx_xmit_normal(sc, txq, bf); #endif done: - ATH_TXQ_UNLOCK(txq); - return 0; } @@ -1990,6 +1981,8 @@ int o_tid = -1; int do_override; + ATH_TX_LOCK_ASSERT(sc); + wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); @@ -2024,8 +2017,6 @@ pri = TID_TO_WME_AC(o_tid); } - ATH_TXQ_LOCK(sc->sc_ac2q[pri]); - /* Handle encryption twiddling if needed */ if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, @@ -2188,8 +2179,6 @@ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); #endif - ATH_TXQ_UNLOCK(sc->sc_ac2q[pri]); - return 0; } @@ -2237,15 +2226,11 @@ * XXX duplicated in ath_tx_start(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { - ATH_TXQ_LOCK(sc->sc_cabq); - if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; error = ENOBUFS; } - ATH_TXQ_UNLOCK(sc->sc_cabq); - if (error != 0) { m_freem(m); goto bad; @@ -2435,8 +2420,7 @@ int index, cindex; struct ieee80211_tx_ampdu *tap; - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); if (bf->bf_state.bfs_isretried) return; @@ -2531,8 +2515,7 @@ struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(old_bf->bf_state.bfs_seqno); - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); @@ -2580,8 +2563,7 @@ struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(bf->bf_state.bfs_seqno); - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); @@ -2637,7 +2619,7 @@ { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); if (tid->paused) return; /* paused, can't schedule yet */ @@ -2661,7 +2643,7 @@ { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); if (tid->sched == 0) return; @@ -2697,7 +2679,7 @@ if (! IEEE80211_QOS_HAS_SEQ(wh)) return -1; - ATH_TID_LOCK_ASSERT(sc, &(ATH_NODE(ni)->an_tid[tid])); + ATH_TX_LOCK_ASSERT(sc); /* * Is it a QOS NULL Data frame? Give it a sequence number from @@ -2746,8 +2728,7 @@ bf->bf_state.bfs_txq->axq_qnum); } - ATH_TXQ_LOCK_ASSERT(txq); - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); @@ -2832,7 +2813,7 @@ int pri, tid; struct mbuf *m0 = bf->bf_m; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); /* Fetch the TID - non-QoS frames get assigned to TID 16 */ wh = mtod(m0, struct ieee80211_frame *); @@ -2840,8 +2821,6 @@ tid = ath_tx_gettid(sc, m0); atid = &an->an_tid[tid]; - ATH_TID_LOCK_ASSERT(sc, atid); - DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); @@ -2970,7 +2949,7 @@ ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) { - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK_ASSERT(sc); tid->paused++; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", __func__, tid->paused); @@ -2982,8 +2961,9 @@ static void ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) { - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK_ASSERT(sc); + tid->paused--; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", @@ -3022,7 +3002,8 @@ struct ath_buf *bf) { - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); + if (! tid->isfiltered) device_printf(sc->sc_dev, "%s: not filtered?!\n", __func__); @@ -3045,7 +3026,7 @@ struct ath_buf *bf) { - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); if (! tid->isfiltered) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", @@ -3070,7 +3051,7 @@ { struct ath_buf *bf; - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); if (tid->hwq_depth != 0) return; @@ -3104,7 +3085,7 @@ struct ath_buf *nbf; int retval; - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); /* * Don't allow a filtered frame to live forever. @@ -3152,7 +3133,7 @@ { struct ath_buf *bf, *bf_next, *nbf; - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); bf = bf_first; while (bf) { @@ -3207,8 +3188,9 @@ static void ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) { - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK_ASSERT(sc); + DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p, bar_wait=%d, bar_tx=%d, called\n", __func__, @@ -3240,8 +3222,9 @@ static void ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) { - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK_ASSERT(sc); + DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p, called\n", __func__, @@ -3265,7 +3248,7 @@ ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) { - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK_ASSERT(sc); if (tid->bar_wait == 0 || tid->hwq_depth > 0) return (0); @@ -3293,7 +3276,7 @@ { struct ieee80211_tx_ampdu *tap; - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p, called\n", @@ -3349,15 +3332,15 @@ /* Try sending the BAR frame */ /* We can't hold the lock here! */ - ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]); + ATH_TX_UNLOCK(sc); if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { /* Success? Now we wait for notification that it's done */ - ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK(sc); return; } /* Failure? For now, warn loudly and continue */ - ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK(sc); device_printf(sc->sc_dev, "%s: tid=%p, failed to TX BAR, continue!\n", __func__, tid); ath_tx_tid_bar_unsuspend(sc, tid); @@ -3368,7 +3351,7 @@ struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) { - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); /* * If the current TID is running AMPDU, update @@ -3471,7 +3454,7 @@ tap = ath_tx_get_tx_tid(an, tid->tid); - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); /* Walk the queue, free frames */ t = 0; @@ -3559,17 +3542,16 @@ ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", &an->an_node); + ATH_TX_LOCK(sc); for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { struct ath_tid *atid = &an->an_tid[tid]; - struct ath_txq *txq = sc->sc_ac2q[atid->ac]; - ATH_TXQ_LOCK(txq); /* Free packets */ ath_tx_tid_drain(sc, an, atid, &bf_cq); /* Remove this tid from the list of active tids */ ath_tx_tid_unsched(sc, atid); - ATH_TXQ_UNLOCK(txq); } + ATH_TX_UNLOCK(sc); /* Handle completed frames */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { @@ -3589,7 +3571,7 @@ struct ath_buf *bf; TAILQ_INIT(&bf_cq); - ATH_TXQ_LOCK(txq); + ATH_TX_LOCK(sc); /* * Iterate over all active tids for the given txq, @@ -3601,7 +3583,7 @@ ath_tx_tid_unsched(sc, tid); } - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); @@ -3635,7 +3617,7 @@ struct ath_tx_status *ts = &bf->bf_status.ds_txstat; /* The TID state is protected behind the TXQ lock */ - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", __func__, bf, fail, atid->hwq_depth - 1); @@ -3676,7 +3658,7 @@ */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* * punt to rate control if we're not being cleaned up @@ -3708,7 +3690,7 @@ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", __func__, tid, atid->incomp); - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); atid->incomp--; if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, @@ -3717,7 +3699,7 @@ atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, 0); } @@ -3745,7 +3727,7 @@ "%s: TID %d: called\n", __func__, tid); TAILQ_INIT(&bf_cq); - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); /* * Move the filtered frames to the TX queue, before @@ -3827,7 +3809,7 @@ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleanup needed: %d packets\n", __func__, tid, atid->incomp); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* Handle completing frames and fail them */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { @@ -3907,7 +3889,7 @@ struct ath_tid *atid = &an->an_tid[tid]; struct ieee80211_tx_ampdu *tap; - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid); @@ -3953,7 +3935,7 @@ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* Free buffer, bf is free after this call */ ath_tx_default_comp(sc, bf, 0); @@ -3978,7 +3960,7 @@ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); } /* @@ -3997,7 +3979,7 @@ int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK_ASSERT(sc); /* XXX clr11naggr should be done for all subframes */ ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); @@ -4080,7 +4062,7 @@ bf_first->bf_state.bfs_pktlen, bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); - ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); + ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid->tid); sc->sc_stats.ast_tx_aggr_failall++; @@ -4127,7 +4109,7 @@ if (ath_tx_tid_bar_tx_ready(sc, tid)) ath_tx_tid_bar_tx(sc, tid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]); + ATH_TX_UNLOCK(sc); /* Complete frames which errored out */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { @@ -4153,7 +4135,7 @@ bf = bf_first; - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); /* update incomp */ while (bf) { @@ -4174,7 +4156,7 @@ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* Handle frame completion */ while (bf) { @@ -4230,7 +4212,7 @@ TAILQ_INIT(&bf_cq); /* The TID state is kept behind the TXQ lock */ - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); atid->hwq_depth--; if (atid->hwq_depth < 0) @@ -4255,7 +4237,7 @@ device_printf(sc->sc_dev, "%s: isfiltered=1, normal_comp?\n", __func__); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); ath_tx_comp_cleanup_aggr(sc, bf_first); return; } @@ -4319,7 +4301,7 @@ if (ts.ts_status & HAL_TXERR_XRETRY) { #endif if (ts.ts_status != 0) { - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); ath_tx_comp_aggr_error(sc, bf_first, atid); return; } @@ -4438,7 +4420,7 @@ * TXed. */ txseq = tap->txa_start; - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); if (nframes != nf) device_printf(sc->sc_dev, @@ -4458,15 +4440,15 @@ */ if (drops) { /* Suspend the TX queue and get ready to send the BAR */ - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); ath_tx_tid_bar_suspend(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); } DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: txa_start now %d\n", __func__, tap->txa_start); - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); /* Prepend all frames to the beginning of the queue */ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { @@ -4500,7 +4482,7 @@ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* Do deferred completion */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { @@ -4549,7 +4531,7 @@ * This unfortunately means that it's released and regrabbed * during retry and cleanup. That's rather inefficient. */ - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); if (tid == IEEE80211_NONQOS_TID) device_printf(sc->sc_dev, "%s: TID=16!\n", __func__); @@ -4583,7 +4565,7 @@ device_printf(sc->sc_dev, "%s: isfiltered=1, normal_comp?\n", __func__); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", __func__); ath_tx_comp_cleanup_unaggr(sc, bf); @@ -4639,7 +4621,7 @@ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* * If freeframe is set, then the frame couldn't be * cloned and bf is still valid. Just complete/free it. @@ -4658,7 +4640,7 @@ if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { #endif if (fail == 0 && ts.ts_status != 0) { - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", __func__); ath_tx_aggr_retry_unaggr(sc, bf); @@ -4696,7 +4678,7 @@ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, fail); /* bf is freed at this point */ @@ -4727,7 +4709,7 @@ ath_bufhead bf_q; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); @@ -4933,7 +4915,7 @@ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", __func__, an, tid->tid); - ATH_TID_LOCK_ASSERT(sc, tid); + ATH_TX_LOCK_ASSERT(sc); /* Check - is AMPDU pending or running? then print out something */ if (ath_tx_ampdu_pending(sc, an, tid->tid)) @@ -5012,7 +4994,7 @@ { struct ath_tid *tid, *next, *last; - ATH_TXQ_LOCK_ASSERT(txq); + ATH_TX_LOCK_ASSERT(sc); /* * Don't schedule if the hardware queue is busy. @@ -5162,7 +5144,7 @@ * it'll be "after" the left edge of the BAW and thus it'll * fall within it. */ - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); /* * This is a bit annoying. Until net80211 HT code inherits some * (any) locking, we may have this called in parallel BUT only @@ -5172,7 +5154,7 @@ ath_tx_tid_pause(sc, atid); atid->addba_tx_pending = 1; } - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", @@ -5231,7 +5213,7 @@ */ r = sc->sc_addba_response(ni, tap, status, code, batimeout); - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; /* * XXX dirty! @@ -5240,7 +5222,7 @@ */ tap->txa_start = ni->ni_txseqs[tid]; ath_tx_tid_resume(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); return r; } @@ -5265,7 +5247,7 @@ * Pause TID traffic early, so there aren't any races * Unblock the pending BAR held traffic, if it's currently paused. */ - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); ath_tx_tid_pause(sc, atid); if (atid->bar_wait) { /* @@ -5276,7 +5258,7 @@ atid->bar_tx = 1; ath_tx_tid_bar_unsuspend(sc, atid); } - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* There's no need to hold the TXQ lock here */ sc->sc_addba_stop(ni, tap); @@ -5332,7 +5314,7 @@ * has beaten us to the punch? (XXX figure out what?) */ if (status == 0 || attempts == 50) { - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); if (atid->bar_tx == 0 || atid->bar_wait == 0) device_printf(sc->sc_dev, "%s: huh? bar_tx=%d, bar_wait=%d\n", @@ -5340,7 +5322,7 @@ atid->bar_tx, atid->bar_wait); else ath_tx_tid_bar_unsuspend(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); } } @@ -5360,17 +5342,17 @@ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called; resuming\n", __func__); - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); /* Note: This updates the aggregate state to (again) pending */ sc->sc_addba_response_timeout(ni, tap); /* Unpause the TID; which reschedules it */ - ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_LOCK(sc); ath_tx_tid_resume(sc, atid); - ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + ATH_TX_UNLOCK(sc); } /* @@ -5438,14 +5420,14 @@ */ /* Suspend all traffic on the node */ + ATH_TX_LOCK(sc); for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; - ATH_TXQ_LOCK(txq); ath_tx_tid_pause(sc, atid); - ATH_TXQ_UNLOCK(txq); } + ATH_TX_UNLOCK(sc); ATH_NODE_LOCK(an); @@ -5455,14 +5437,14 @@ device_printf(sc->sc_dev, "%s: an=%p: node was already asleep\n", __func__, an); + ATH_TX_LOCK(sc); for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; - ATH_TXQ_LOCK(txq); ath_tx_tid_resume(sc, atid); - ATH_TXQ_UNLOCK(txq); } + ATH_TX_UNLOCK(sc); return; } @@ -5500,14 +5482,14 @@ ATH_NODE_UNLOCK(an); + ATH_TX_LOCK(sc); for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; - ATH_TXQ_LOCK(txq); ath_tx_tid_resume(sc, atid); - ATH_TXQ_UNLOCK(txq); } + ATH_TX_UNLOCK(sc); } static int Index: if_ath.c =================================================================== --- if_ath.c (revision 243648) +++ if_ath.c (working copy) @@ -1369,7 +1369,6 @@ * Reclaim any pending mcast frames for the vap. */ ath_tx_draintxq(sc, &avp->av_mcastq); - ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); } /* * Update bookkeeping. @@ -2271,16 +2270,16 @@ /* Restart TX/RX as needed */ ath_txrx_start(sc); - /* XXX Restart TX completion and pending TX */ + /* Restart TX completion and pending TX */ if (reset_type == ATH_RESET_NOLOSS) { + ATH_TX_LOCK(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { - ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_txq_restart_dma(sc, &sc->sc_txq[i]); ath_txq_sched(sc, &sc->sc_txq[i]); - ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } } + ATH_TX_UNLOCK(sc); } /* @@ -2513,7 +2512,9 @@ sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); + ATH_TX_LOCK(sc); ath_start(sc->sc_ifp); + ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; @@ -2534,6 +2535,8 @@ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) return; + ATH_TX_LOCK_ASSERT(sc); + ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called"); for (;;) { @@ -2605,6 +2608,10 @@ ath_returnbuf_head(sc, bf); ath_txfrag_cleanup(sc, &frags, ni); ATH_TXBUF_UNLOCK(sc); + /* + * XXX todo, free the node outside of + * the TX lock context! + */ if (ni != NULL) ieee80211_free_node(ni); continue; @@ -2816,9 +2823,6 @@ ath_txqmove(struct ath_txq *dst, struct ath_txq *src) { - ATH_TXQ_LOCK_ASSERT(dst); - ATH_TXQ_LOCK_ASSERT(src); - TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); dst->axq_link = src->axq_link; src->axq_link = NULL; @@ -3298,7 +3302,6 @@ txq->axq_softc = sc; TAILQ_INIT(&txq->axq_q); TAILQ_INIT(&txq->axq_tidq); - ATH_TXQ_LOCK_INIT(sc, txq); } /* @@ -3482,7 +3485,6 @@ { ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); - ATH_TXQ_LOCK_DESTROY(txq); sc->sc_txqsetup &= ~(1<axq_qnum); } @@ -3691,7 +3693,7 @@ struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = NULL; - ATH_TXQ_UNLOCK_ASSERT(txq); + ATH_TX_UNLOCK_ASSERT(sc); /* If unicast frame, update general statistics */ if (ni != NULL) { @@ -3760,11 +3762,11 @@ nacked = 0; for (;;) { - ATH_TXQ_LOCK(txq); + ATH_TX_LOCK(sc); txq->axq_intrcnt = 0; /* reset periodic desc intr count */ bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); break; } ds = bf->bf_lastds; /* XXX must be setup correctly! */ @@ -3792,7 +3794,7 @@ ATH_KTR(sc, ATH_KTR_TXCOMP, 3, "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", txq->axq_qnum, bf, ds); - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); break; } ATH_TXQ_REMOVE(txq, bf, bf_list); @@ -3833,7 +3835,7 @@ ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, ts->ts_rssi); } - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); /* * Update statistics and call completion @@ -3852,9 +3854,9 @@ /* Kick the TXQ scheduler */ if (dosched) { - ATH_TXQ_LOCK(txq); + ATH_TX_LOCK(sc); ath_txq_sched(sc, txq); - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); } ATH_KTR(sc, ATH_KTR_TXCOMP, 1, @@ -4027,13 +4029,13 @@ sc->sc_txproc_cnt++; ATH_PCU_UNLOCK(sc); + ATH_TX_LOCK(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { - ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_txq_sched(sc, &sc->sc_txq[i]); - ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } } + ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; @@ -4166,7 +4168,7 @@ ATH_TXBUF_UNLOCK(sc); for (ix = 0;; ix++) { - ATH_TXQ_LOCK(txq); + ATH_TX_LOCK(sc); bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { txq->axq_link = NULL; @@ -4181,7 +4183,7 @@ * very fruity very quickly. */ txq->axq_fifo_depth = 0; - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); break; } ATH_TXQ_REMOVE(txq, bf, bf_list); @@ -4217,7 +4219,7 @@ * Clear ATH_BUF_BUSY; the completion handler * will free the buffer. */ - ATH_TXQ_UNLOCK(txq); + ATH_TX_UNLOCK(sc); bf->bf_flags &= ~ATH_BUF_BUSY; if (bf->bf_comp) bf->bf_comp(sc, bf, 1);