Index: if_ath_misc.h =================================================================== --- if_ath_misc.h (revision 246707) +++ if_ath_misc.h (working copy) @@ -127,7 +127,9 @@ ath_tx_kick(struct ath_softc *sc) { - taskqueue_enqueue(sc->sc_tx_tq, &sc->sc_txpkttask); + ATH_TX_LOCK(sc); + ath_start(sc->sc_ifp); + ATH_TX_UNLOCK(sc); } /* Index: if_athvar.h =================================================================== --- if_athvar.h (revision 246707) +++ if_athvar.h (working copy) @@ -279,8 +279,6 @@ int32_t bfs_keyix; /* crypto key index */ int32_t bfs_txantenna; /* TX antenna config */ - uint16_t bfs_nextpktlen; /* length of next frag pkt */ - /* Make this an 8 bit value? */ enum ieee80211_protmode bfs_protmode; @@ -525,7 +523,6 @@ struct mtx sc_tx_ic_mtx; /* TX queue mutex */ char sc_tx_ic_mtx_name[32]; struct taskqueue *sc_tq; /* private task queue */ - struct taskqueue *sc_tx_tq; /* private TX task queue */ struct ath_hal *sc_ah; /* Atheros HAL */ struct ath_ratectrl *sc_rc; /* tx rate control support */ struct ath_tx99 *sc_tx99; /* tx99 adjunct state */ @@ -815,6 +812,8 @@ MA_OWNED) #define ATH_TX_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_mtx, \ MA_NOTOWNED) +#define ATH_TX_TRYLOCK(_sc) (mtx_owned(&(_sc)->sc_tx_mtx) != 0 && \ + mtx_trylock(&(_sc)->sc_tx_mtx)) /* * The IC TX lock is non-reentrant and serialises packet queuing from Index: if_ath_tx.c =================================================================== --- if_ath_tx.c (revision 246707) +++ if_ath_tx.c (working copy) @@ -1124,11 +1124,7 @@ dur = rt->info[rix].lpAckDuration; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { dur += dur; /* additional SIFS+ACK */ - if (bf->bf_state.bfs_nextpktlen == 0) { - device_printf(sc->sc_dev, - "%s: next txfrag len=0?\n", - __func__); - } + KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment")); /* * Include the size of next fragment so NAV is * updated properly. The last fragment uses only @@ -1139,7 +1135,7 @@ * first fragment! */ dur += ath_hal_computetxtime(ah, rt, - bf->bf_state.bfs_nextpktlen, + bf->bf_m->m_nextpkt->m_pkthdr.len, rix, shortPreamble); } if (isfrag) { Index: if_ath.c =================================================================== --- if_ath.c (revision 246707) +++ if_ath.c (working copy) @@ -152,6 +152,7 @@ static void ath_stop_locked(struct ifnet *); static void ath_stop(struct ifnet *); static int ath_reset_vap(struct ieee80211vap *, u_long); +static void ath_start_queue(struct ifnet *ifp); static int ath_media_change(struct ifnet *); static void ath_watchdog(void *); static int ath_ioctl(struct ifnet *, u_long, caddr_t); @@ -212,14 +213,6 @@ static void ath_node_powersave(struct ieee80211_node *, int); static int ath_node_set_tim(struct ieee80211_node *, int); -static int ath_transmit(struct ifnet *ifp, struct mbuf *m); -static void ath_qflush(struct ifnet *ifp); - -static void ath_txq_qinit(struct ifnet *ifp); -static void ath_txq_qflush(struct ifnet *ifp); -static int ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0); -static void ath_txq_qrun(struct ifnet *ifp); - #ifdef IEEE80211_SUPPORT_TDMA #include #endif @@ -436,21 +429,6 @@ taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", ifp->if_xname); - /* - * This taskqueue doesn't get any higher priority - * than the ath(4) taskqueue (PI_NET) so TX won't - * pre-empt RX and other task priorities. - * - * This may not be optimal - the previous behaviour - * was to direct-dispatch frames via the sending - * task context, rather than (always) software - * queuing. - */ - sc->sc_tx_tq = taskqueue_create("ath_tx_taskq", M_NOWAIT, - taskqueue_thread_enqueue, &sc->sc_tx_tq); - taskqueue_start_threads(&sc->sc_tx_tq, 1, PI_NET, - "%s TX taskq", ifp->if_xname); - TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); @@ -579,18 +557,13 @@ ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; - /* XXX net80211 uses if_start to re-start ifnet processing */ - ifp->if_start = ath_start; - ifp->if_transmit = ath_transmit; - ifp->if_qflush = ath_qflush; + ifp->if_start = ath_start_queue; ifp->if_ioctl = ath_ioctl; ifp->if_init = ath_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); - ath_txq_qinit(ifp); - ic->ic_ifp = ifp; /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; @@ -996,7 +969,6 @@ ath_stop(ifp); ieee80211_ifdetach(ifp->if_l2com); taskqueue_free(sc->sc_tq); - taskqueue_free(sc->sc_tx_tq); #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->detach(sc->sc_tx99); @@ -1005,7 +977,6 @@ #ifdef ATH_DEBUG_ALQ if_ath_alq_tidyup(&sc->sc_alq); #endif - ath_txq_qflush(ifp); ath_spectral_detach(sc); ath_dfs_detach(sc); ath_desc_free(sc); @@ -2145,7 +2116,6 @@ { taskqueue_unblock(sc->sc_tq); - taskqueue_unblock(sc->sc_tx_tq); } /* @@ -2246,7 +2216,6 @@ /* Try to (stop any further TX/RX from occuring */ taskqueue_block(sc->sc_tq); - taskqueue_block(sc->sc_tx_tq); ATH_PCU_LOCK(sc); ath_hal_intrset(ah, 0); /* disable interrupts */ @@ -2546,70 +2515,13 @@ } static void -ath_qflush(struct ifnet *ifp) +ath_start_queue(struct ifnet *ifp) { + struct ath_softc *sc = ifp->if_softc; - /* XXX complete/suspend TX */ - ath_txq_qflush(ifp); - - /* Unsuspend TX? */ -} - -/* - * Transmit a frame from net80211. - */ -static int -ath_transmit(struct ifnet *ifp, struct mbuf *m) -{ - struct ieee80211_node *ni; - struct ath_softc *sc = (struct ath_softc *) ifp->if_softc; - - ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; - - if (ath_txq_qadd(ifp, m) < 0) { - /* - * If queuing fails, the if_transmit() API makes the - * callee responsible for freeing the mbuf (rather than - * the caller, who just assumes the mbuf has been dealt - * with somehow). - * - * BUT, net80211 will free node references if if_transmit() - * fails _on encapsulated buffers_. Since drivers - * only get fully encapsulated frames from net80211 (via - * raw or otherwise APIs), we must be absolutely careful - * to not free the node ref or things will get loopy - * down the track. - * - * For tx fragments, the TX code must free whatever - * new references it created, but NOT the original - * TX node ref that was passed in. - */ - ath_freetx(m); - return (ENOBUFS); - } - - /* - * Unconditionally kick the taskqueue. - * - * Now, there's a subtle race condition possible here if we - * went down the path of only kicking the taskqueue if it - * wasn't running. If we're not absolutely, positively - * careful, we could have a small race window between - * finishing the taskqueue and clearing the TX flag, which - * would be interpreted in _this_ context as "we don't need - * to kick the TX taskqueue, as said taskqueue is already - * running." - * - * It's a problem in some of the 1GE/10GE NIC drivers. - * So until a _correct_ method for implementing this is - * drafted up and written, which avoids (potentially) - * large amounts of locking contention per-frame, let's - * just do the inefficient "kick taskqueue each time" - * method. - */ + ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: start"); ath_tx_kick(sc); - - return (0); + ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: finished"); } void @@ -2636,7 +2548,9 @@ sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); - ath_txq_qrun(ifp); + ATH_TX_LOCK(sc); + ath_start(sc->sc_ifp); + ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; @@ -2644,292 +2558,91 @@ ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: finished"); } -/* - * Pending TX buffer chain management routines. - */ - - -/* - * Initialise the TX queue! - */ -static void -ath_txq_qinit(struct ifnet *ifp) +void +ath_start(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; - - TAILQ_INIT(&sc->sc_txbuf_list); -} - -/* - * Add this mbuf to the TX buffer chain. - * - * This allocates an ath_buf, links the mbuf into it, and - * appends it to the end of the TX buffer chain. - * It doesn't fill out the ath_buf in any way besides - * that. - * - * Since the mbuf may be a list of mbufs representing - * 802.11 fragments, handle allocating ath_bufs for each - * of the mbuf fragments. - * - * If we queued it, 0 is returned. Else, < 0 is returned. - * - * If <0 is returned, the sender is responsible for - * freeing the mbuf if appropriate. - */ -static int -ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0) -{ - struct ath_softc *sc = ifp->if_softc; + struct ieee80211_node *ni; struct ath_buf *bf; + struct mbuf *m, *next; ath_bufhead frags; - struct ieee80211_node *ni; - struct mbuf *m; + int npkts = 0; - /* XXX recursive TX completion -> TX? */ - ATH_TX_IC_UNLOCK_ASSERT(sc); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) + return; - /* - * We grab the node pointer, but we don't deref - * the node. The caller must be responsible for - * freeing the node reference if it decides to - * free the mbuf. - */ - ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif; + ATH_TX_LOCK_ASSERT(sc); - ATH_TXBUF_LOCK(sc); - if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { - /* XXX increment counter? */ - ATH_TXBUF_UNLOCK(sc); - IF_LOCK(&ifp->if_snd); - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - IF_UNLOCK(&ifp->if_snd); - return (-1); - } - ATH_TXBUF_UNLOCK(sc); + ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called"); - /* - * Grab a TX buffer and associated resources. - */ - bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); - if (bf == NULL) { - device_printf(sc->sc_dev, - "%s: couldn't allocate a buffer\n", - __func__); - return (-1); - } - - /* Setup the initial buffer node contents */ - bf->bf_m = m0; - bf->bf_node = ni; - - /* - * Check for fragmentation. If this frame - * has been broken up verify we have enough - * buffers to send all the fragments so all - * go out or none... - */ - TAILQ_INIT(&frags); - if (m0->m_flags & M_FRAG) - DPRINTF(sc, ATH_DEBUG_XMIT, "%s: txfrag\n", __func__); - if ((m0->m_flags & M_FRAG) && - !ath_txfrag_setup(sc, &frags, m0, ni)) { - DPRINTF(sc, ATH_DEBUG_XMIT, - "%s: out of txfrag buffers\n", __func__); - sc->sc_stats.ast_tx_nofrag++; - ifp->if_oerrors++; - goto bad; - } - - /* - * Don't stuff the non-fragment frame onto the fragment - * queue. ath_txfrag_cleanup() should only be called on fragments - - * ie, the _extra_ ieee80211_node references - and not the single - * node reference already done as part of the net08211 TX call - * into the driver. - */ - - ATH_TX_IC_LOCK(sc); - - /* - * Throw the single frame onto the queue. - */ - TAILQ_INSERT_TAIL(&sc->sc_txbuf_list, bf, bf_list); - - /* - * Update next packet duration length if it's a fragment. - * It's needed for accurate NAV calculations (which for - * fragments include the length of the NEXT fragment.) - */ - if (m0->m_nextpkt != NULL) - bf->bf_state.bfs_nextpktlen = - m0->m_nextpkt->m_pkthdr.len; - - /* - * Append the fragments. We have to populate bf and node - * references here as although the txfrag setup code does - * create buffers and increment the node ref, it doesn't - * populate the fields for us. - */ - m = m0->m_nextpkt; - while ( (bf = TAILQ_FIRST(&frags)) != NULL) { - bf->bf_m = m; - bf->bf_node = ni; - device_printf(sc->sc_dev, "%s: adding bf=%p, m=%p, ni=%p\n", - __func__, - bf, - bf->bf_m, - bf->bf_node); - TAILQ_REMOVE(&frags, bf, bf_list); - TAILQ_INSERT_TAIL(&sc->sc_txbuf_list, bf, bf_list); - - /* - * For duration (NAV) calculations, we need - * to know the next fragment size. - * - * XXX This isn't entirely accurate as it doesn't - * take pad bytes and such into account, but it'll do - * for fragment length / NAV calculations. - */ - if (m->m_nextpkt != NULL) - bf->bf_state.bfs_nextpktlen = - m->m_nextpkt->m_pkthdr.len; - - m = m->m_nextpkt; - } - ATH_TX_IC_UNLOCK(sc); - - return (0); -bad: - device_printf(sc->sc_dev, "%s: bad?!\n", __func__); - bf->bf_m = NULL; - bf->bf_node = NULL; - ATH_TXBUF_LOCK(sc); - ath_returnbuf_head(sc, bf); - ath_txfrag_cleanup(sc, &frags, ni); - ATH_TXBUF_UNLOCK(sc); - return (-1); -} - -/* - * Flush the pending TX buffer chain. - */ -static void -ath_txq_qflush(struct ifnet *ifp) -{ - struct ath_softc *sc = ifp->if_softc; - ath_bufhead txlist; - struct ath_buf *bf; - - device_printf(sc->sc_dev, "%s: called\n", __func__); - TAILQ_INIT(&txlist); - - /* Grab lock */ - ATH_TX_IC_LOCK(sc); - - /* Copy everything out of sc_txbuf_list into txlist */ - TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list); - - /* Unlock */ - ATH_TX_IC_UNLOCK(sc); - - /* Now, walk the list, freeing things */ - while ((bf = TAILQ_FIRST(&txlist)) != NULL) { - TAILQ_REMOVE(&txlist, bf, bf_list); - - if (bf->bf_node) - ieee80211_free_node(bf->bf_node); - - m_free(bf->bf_m); - - /* XXX paranoia! */ - bf->bf_m = NULL; - bf->bf_node = NULL; - - /* - * XXX Perhaps do a second pass with the TXBUF lock - * held and free them all at once? - */ + for (;;) { ATH_TXBUF_LOCK(sc); - ath_returnbuf_head(sc, bf); + if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { + /* XXX increment counter? */ + ATH_TXBUF_UNLOCK(sc); + IF_LOCK(&ifp->if_snd); + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + IF_UNLOCK(&ifp->if_snd); + break; + } ATH_TXBUF_UNLOCK(sc); - } -} - -/* - * Walk the TX buffer queue and call ath_tx_start() on each - * of them. - */ -static void -ath_txq_qrun(struct ifnet *ifp) -{ - struct ath_softc *sc = ifp->if_softc; - ath_bufhead txlist; - struct ath_buf *bf, *bf_next; - struct ieee80211_node *ni; - struct mbuf *m; - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) - return; - - TAILQ_INIT(&txlist); - - /* - * Grab the frames to transmit from the tx queue - */ - - /* Copy everything out of sc_txbuf_list into txlist */ - ATH_TX_IC_LOCK(sc); - TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list); - ATH_TX_IC_UNLOCK(sc); - - /* - * Attempt to transmit each frame. - * - * In the old code path - if a TX fragment fails, subsequent - * fragments in that group would be aborted. - * - * It would be nice to chain together TX fragments in this - * way so they can be aborted together. - */ - ATH_TX_LOCK(sc); - TAILQ_FOREACH_SAFE(bf, &txlist, bf_list, bf_next) { + /* - * Clear, because we're going to reuse this - * as a real ath_buf now + * Grab a TX buffer and associated resources. */ - ni = bf->bf_node; - m = bf->bf_m; + bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); + if (bf == NULL) + break; - bf->bf_node = NULL; - bf->bf_m = NULL; - + IFQ_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) { + ATH_TXBUF_LOCK(sc); + ath_returnbuf_head(sc, bf); + ATH_TXBUF_UNLOCK(sc); + break; + } + ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; + npkts ++; /* - * Remove it from the list. + * Check for fragmentation. If this frame + * has been broken up verify we have enough + * buffers to send all the fragments so all + * go out or none... */ - TAILQ_REMOVE(&txlist, bf, bf_list); - + TAILQ_INIT(&frags); + if ((m->m_flags & M_FRAG) && + !ath_txfrag_setup(sc, &frags, m, ni)) { + DPRINTF(sc, ATH_DEBUG_XMIT, + "%s: out of txfrag buffers\n", __func__); + sc->sc_stats.ast_tx_nofrag++; + ifp->if_oerrors++; + ath_freetx(m); + goto bad; + } + ifp->if_opackets++; + nextfrag: /* - * If we fail, free this buffer and go to the next one; - * ath_tx_start() frees the mbuf but not the node - * reference. + * Pass the frame to the h/w for transmission. + * Fragmented frames have each frag chained together + * with m_nextpkt. We know there are sufficient ath_buf's + * to send all the frags because of work done by + * ath_txfrag_setup. We leave m_nextpkt set while + * calling ath_tx_start so it can use it to extend the + * the tx duration to cover the subsequent frag and + * so it can reclaim all the mbufs in case of an error; + * ath_tx_start clears m_nextpkt once it commits to + * handing the frame to the hardware. */ + next = m->m_nextpkt; if (ath_tx_start(sc, ni, bf, m)) { - /* - * XXX m is freed by ath_tx_start(); node reference - * is not! - */ - DPRINTF(sc, ATH_DEBUG_XMIT, - "%s: failed; bf=%p, ni=%p, m=%p\n", - __func__, - bf, - ni, - m); + bad: ifp->if_oerrors++; + reclaim: bf->bf_m = NULL; bf->bf_node = NULL; ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, bf); + ath_txfrag_cleanup(sc, &frags, ni); ATH_TXBUF_UNLOCK(sc); /* * XXX todo, free the node outside of @@ -2937,86 +2650,38 @@ */ if (ni != NULL) ieee80211_free_node(ni); - } else { - /* - * Check here if the node is in power save state. - * XXX we should hold a node ref here, and release - * it after the TX has completed. - */ - ath_tx_update_tim(sc, ni, 1); - ifp->if_opackets++; + continue; } /* - * XXX should check for state change and flip out - * if needed. + * Check here if the node is in power save state. */ - } - ATH_TX_UNLOCK(sc); + ath_tx_update_tim(sc, ni, 1); - /* - * If we break out early (eg a state change) we should prepend these - * frames onto the TX queue. - */ -} - -/* - * This is now primarily used by the net80211 layer to kick-start - * queue processing. - */ -void -ath_start(struct ifnet *ifp) -{ - struct mbuf *m; - struct ath_softc *sc = ifp->if_softc; - struct ieee80211_node *ni; - int npkts = 0; - - ATH_TX_UNLOCK_ASSERT(sc); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) - return; - - /* - * If we're below the free buffer limit, don't dequeue anything. - * The original code would not dequeue anything from the queue - * if allocating an ath_buf failed. - * - * For if_transmit, we have to either queue or drop the frame. - * So we have to try and queue it _somewhere_. - */ - for (;;) { - IFQ_DEQUEUE(&ifp->if_snd, m); - if (m == NULL) { - break; + if (next != NULL) { + /* + * Beware of state changing between frags. + * XXX check sta power-save state? + */ + if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { + DPRINTF(sc, ATH_DEBUG_XMIT, + "%s: flush fragmented packet, state %s\n", + __func__, + ieee80211_state_name[ni->ni_vap->iv_state]); + ath_freetx(next); + goto reclaim; + } + m = next; + bf = TAILQ_FIRST(&frags); + KASSERT(bf != NULL, ("no buf for txfrag")); + TAILQ_REMOVE(&frags, bf, bf_list); + goto nextfrag; } - /* - * If we do fail here, just break out for now - * and wait until we've transmitted something - * before we attempt again? - */ - if (ath_txq_qadd(ifp, m) < 0) { - DPRINTF(sc, ATH_DEBUG_XMIT, - "%s: ath_txq_qadd failed\n", - __func__); - ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; - if (ni != NULL) - ieee80211_free_node(ni); - ath_freetx(m); - break; - } - npkts++; + sc->sc_wd_timer = 5; } - - /* - * Kick the taskqueue into activity, but only if we - * queued something. - */ - if (npkts > 0) - ath_tx_kick(sc); + ATH_KTR(sc, ATH_KTR_TX, 1, "ath_start: finished; npkts=%d", npkts); } - static int ath_media_change(struct ifnet *ifp) { @@ -3039,7 +2704,6 @@ DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); taskqueue_block(sc->sc_tq); - taskqueue_block(sc->sc_tx_tq); IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ } @@ -3052,7 +2716,6 @@ DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); IF_UNLOCK(&ifp->if_snd); taskqueue_unblock(sc->sc_tq); - taskqueue_unblock(sc->sc_tx_tq); } static void @@ -4273,11 +3936,11 @@ if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); - ath_txq_qrun(ifp); - ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); + + ath_tx_kick(sc); } /* @@ -4326,11 +3989,11 @@ if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); - ath_txq_qrun(ifp); - ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); + + ath_tx_kick(sc); } /* @@ -4371,11 +4034,11 @@ if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); - ath_txq_qrun(ifp); - ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); + + ath_tx_kick(sc); } #undef TXQACTIVE @@ -4726,7 +4389,6 @@ /* (Try to) stop TX/RX from occuring */ taskqueue_block(sc->sc_tq); - taskqueue_block(sc->sc_tx_tq); ATH_PCU_LOCK(sc); ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ @@ -5116,7 +4778,6 @@ sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); sc->sc_beacons = 0; taskqueue_unblock(sc->sc_tq); - taskqueue_unblock(sc->sc_tx_tq); } ni = ieee80211_ref_node(vap->iv_bss); @@ -5282,7 +4943,6 @@ "%s: calibration disabled\n", __func__); } taskqueue_unblock(sc->sc_tq); - taskqueue_unblock(sc->sc_tx_tq); } else if (nstate == IEEE80211_S_INIT) { /* * If there are no vaps left in RUN state then @@ -5296,7 +4956,6 @@ /* disable interrupts */ ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); taskqueue_block(sc->sc_tq); - taskqueue_block(sc->sc_tx_tq); sc->sc_beacons = 0; } #ifdef IEEE80211_SUPPORT_TDMA