Index: if_ath_debug.h =================================================================== --- if_ath_debug.h (revision 239916) +++ if_ath_debug.h (working copy) @@ -65,6 +65,7 @@ ATH_DEBUG_FATAL = 0x080000000ULL, /* fatal errors */ ATH_DEBUG_SW_TX_BAR = 0x100000000ULL, /* BAR TX */ ATH_DEBUG_EDMA_RX = 0x200000000ULL, /* RX EDMA state */ + ATH_DEBUG_SW_TX_FILT = 0x400000000ULL, /* SW TX FF */ ATH_DEBUG_ANY = 0xffffffffffffffffULL }; Index: if_ath_rx.c =================================================================== --- if_ath_rx.c (revision 239916) +++ if_ath_rx.c (working copy) @@ -253,7 +253,6 @@ ("multi-segment packet; nseg %u", bf->bf_nseg)); bf->bf_m = m; } - bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); /* * Setup descriptors. For receive we always terminate @@ -276,7 +275,16 @@ * buffer (or rx fifo). This can incorrectly acknowledge packets * to a sender if last desc is self-linked. */ + ds = bf->bf_desc; + + /* + * For now, bzero() the descriptor; or some garbage may not + * be overwritten when a partial RX descriptor is DMAed out? + * + * XXX this is temporary! + */ + bzero(ds, sizeof(struct ath_desc)); if (sc->sc_rxslink) ds->ds_link = bf->bf_daddr; /* link to self */ else @@ -287,8 +295,26 @@ , 0 ); + /* + * Sync this descriptor. + */ + bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); + + /* + * This call links the descriptor into the current TX list. + * + * It's hoped that the currently RX descriptor isn't anywhere + * near close to this descriptor or RX may either hit the self-linked + * ds_next or the NULL ds_next pointer. Hopefully it won't be + * in some intermediate state. + * + * XXX shouldn't there be a call to sync the descriptor this thing + * is pointing to? + */ if (sc->sc_rxlink != NULL) *sc->sc_rxlink = bf->bf_daddr; + + sc->sc_rxlink = &ds->ds_link; return 0; } Index: if_ath_sysctl.c =================================================================== --- if_ath_sysctl.c (revision 239916) +++ if_ath_sysctl.c (working copy) @@ -937,6 +937,8 @@ "Number of multicast frames exceeding maximum mcast queue depth"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_keymiss", CTLFLAG_RD, &sc->sc_stats.ast_rx_keymiss, 0, ""); + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_swfiltered", CTLFLAG_RD, + &sc->sc_stats.ast_tx_swfiltered, 0, ""); /* Attach the RX phy error array */ ath_sysctl_stats_attach_rxphyerr(sc, child); Index: if_ath_tx.c =================================================================== --- if_ath_tx.c (revision 240180) +++ if_ath_tx.c (working copy) @@ -113,6 +113,9 @@ struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid); +static struct ath_buf * +ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, + struct ath_tid *tid, struct ath_buf *bf); /* * Whether to use the 11n rate scenario functions or not @@ -1214,6 +1217,11 @@ /* Get rid of any previous state */ bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); + if (bf->bf_node == NULL) { + device_printf(sc->sc_dev, "%s: bf=%p; bf->bf_node=NULL!\n", + __func__, bf); + } + ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, &rix, &try0, &rate); @@ -2705,7 +2713,12 @@ for (i = 0; i < IEEE80211_TID_SIZE; i++) { atid = &an->an_tid[i]; + + /* XXX now with this bzer(), is the field 0'ing needed? */ + bzero(atid, sizeof(*atid)); + TAILQ_INIT(&atid->axq_q); + TAILQ_INIT(&atid->filtq.axq_q); atid->tid = i; atid->an = an; for (j = 0; j < ATH_TID_MAX_BUFS; j++) @@ -2715,6 +2728,7 @@ atid->sched = 0; atid->hwq_depth = 0; atid->cleanup_inprogress = 0; + atid->clrdmask = 1; /* Always start by setting this bit */ if (i == IEEE80211_NONQOS_TID) atid->ac = WME_AC_BE; else @@ -2756,6 +2770,12 @@ return; } + /* XXX isfiltered shouldn't ever be 0 at this point */ + if (tid->isfiltered == 1) { + device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); + return; + } + ath_tx_tid_sched(sc, tid); /* Punt some frames to the hardware if needed */ //ath_txq_sched(sc, sc->sc_ac2q[tid->ac]); @@ -2763,6 +2783,193 @@ } /* + * Add the given ath_buf to the TID filtered frame list. + * This requires the TID be filtered. + */ +static void +ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, + struct ath_buf *bf) +{ + + ATH_TID_LOCK_ASSERT(sc, tid); + if (! tid->isfiltered) + device_printf(sc->sc_dev, "%s: not filtered?!\n", __func__); + + DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); + + /* + * Set the retry counter but don't update the retry bits. + * XXX Should I just set the retry bits here? + */ + sc->sc_stats.ast_tx_swfiltered++; + bf->bf_state.bfs_isretried = 1; + bf->bf_state.bfs_retries ++; + ATH_TXQ_INSERT_TAIL(&tid->filtq, bf, bf_list); +} + +/* + * Handle a completed filtered frame from the given TID. + * This just enables/pauses the filtered frame state if required + * and appends the filtered frame to the filtered queue. + */ +static void +ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, + struct ath_buf *bf) +{ + + ATH_TID_LOCK_ASSERT(sc, tid); + + if (! tid->isfiltered) { + DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", + __func__); + tid->isfiltered = 1; + ath_tx_tid_pause(sc, tid); + } + + if (bf->bf_node == NULL) + device_printf(sc->sc_dev, + "%s: bf=%p, bf_node=NULL?!\n", + __func__, + bf); + + /* Add the frame to the filter queue */ + ath_tx_tid_filt_addbuf(sc, tid, bf); +} + +/* + * Complete the filtered frame TX completion. + * + * If there are no more frames in the hardware queue, unpause/unfilter + * the TID if applicable. Otherwise we will wait for a node PS transition + * to unfilter. + */ +static void +ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) +{ + struct ath_buf *bf; + + ATH_TID_LOCK_ASSERT(sc, tid); + + if (tid->hwq_depth != 0) + return; + + DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", + __func__); + tid->isfiltered = 0; + tid->clrdmask = 1; + + /* XXX this is really quite inefficient */ + while ((bf = TAILQ_LAST(&tid->filtq.axq_q, ath_bufhead_s)) != NULL) { + if (bf->bf_node == NULL) + device_printf(sc->sc_dev, + "%s: bf=%p, bf_node=NULL?!\n", + __func__, + bf); + ATH_TXQ_REMOVE(&tid->filtq, bf, bf_list); + ATH_TXQ_INSERT_HEAD(tid, bf, bf_list); + } + + ath_tx_tid_resume(sc, tid); +} + +/* + * Called when a single (aggregate or otherwise) frame is completed. + * + * Returns 1 if the buffer could be added to the filtered list + * (cloned or otherwise), 0 if the buffer couldn't be added to the + * filtered list and the caller should free it. + */ +static int +ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, + struct ath_buf *bf) +{ + + ATH_TID_LOCK_ASSERT(sc, tid); + struct ath_buf *nbf; + int retval; + + + if (bf->bf_node == NULL) + device_printf(sc->sc_dev, + "%s: bf=%p, bf_node=NULL?!\n", + __func__, + bf); + + /* + * A busy buffer can't be added to the retry list. + * It needs to be cloned. + */ + if (bf->bf_flags & ATH_BUF_BUSY) { + nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); + DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, + "%s: busy buffer clone: %p -> %p\n", + __func__, bf, nbf); + } else { + nbf = bf; + } + + if (nbf == NULL) { + DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, + "%s: busy buffer couldn't be cloned (%p)!\n", + __func__, bf); + retval = 1; + } else { + ath_tx_tid_filt_comp_buf(sc, tid, nbf); + retval = 0; + } + ath_tx_tid_filt_comp_complete(sc, tid); + + return (retval); +} + +static void +ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, + struct ath_buf *bf_first, ath_bufhead *bf_q) +{ + struct ath_buf *bf, *bf_next, *nbf; + + ATH_TID_LOCK_ASSERT(sc, tid); + + bf = bf_first; + while (bf) { + bf_next = bf->bf_next; + bf->bf_next = NULL; /* Remove it from the aggr list */ + + if (bf->bf_node == NULL) + device_printf(sc->sc_dev, + "%s: bf=%p, bf_node=NULL?!\n", + __func__, + bf); + + if (bf->bf_flags & ATH_BUF_BUSY) { + nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); + DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, + "%s: busy buffer cloned: %p -> %p", + __func__, bf, nbf); + } else { + nbf = bf; + } + + /* + * If the buffer couldn't be cloned, add it to bf_q; + * the caller will free the buffer(s) as required. + */ + if (nbf == NULL) { + DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, + "%s: buffer couldn't be cloned! (%p)\n", + __func__, bf); + TAILQ_INSERT_TAIL(bf_q, bf, bf_list); + } else { + ath_tx_tid_filt_comp_buf(sc, tid, nbf); + } + + bf = bf_next; + } + + ath_tx_tid_filt_comp_complete(sc, tid); +} + +/* * Suspend the queue because we need to TX a BAR. */ static void @@ -2918,7 +3125,81 @@ ath_tx_tid_bar_unsuspend(sc, tid); } +static void +ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, + struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) +{ + ATH_TID_LOCK_ASSERT(sc, tid); + + /* + * If the current TID is running AMPDU, update + * the BAW. + */ + if (ath_tx_ampdu_running(sc, an, tid->tid) && + bf->bf_state.bfs_dobaw) { + /* + * Only remove the frame from the BAW if it's + * been transmitted at least once; this means + * the frame was in the BAW to begin with. + */ + if (bf->bf_state.bfs_retries > 0) { + ath_tx_update_baw(sc, an, tid, bf); + bf->bf_state.bfs_dobaw = 0; + } + /* + * This has become a non-fatal error now + */ + if (! bf->bf_state.bfs_addedbaw) + device_printf(sc->sc_dev, + "%s: wasn't added: seqno %d\n", + __func__, SEQNO(bf->bf_state.bfs_seqno)); + } + TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); +} + +static void +ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, + struct ath_tid *tid, struct ath_buf *bf) +{ + struct ieee80211_node *ni = &an->an_node; + struct ath_txq *txq = sc->sc_ac2q[tid->ac]; + struct ieee80211_tx_ampdu *tap; + + tap = ath_tx_get_tx_tid(an, tid->tid); + + device_printf(sc->sc_dev, + "%s: node %p: bf=%p: addbaw=%d, dobaw=%d, " + "seqno=%d, retry=%d\n", + __func__, ni, bf, + bf->bf_state.bfs_addedbaw, + bf->bf_state.bfs_dobaw, + SEQNO(bf->bf_state.bfs_seqno), + bf->bf_state.bfs_retries); + device_printf(sc->sc_dev, + "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, isfiltered=%d\n", + __func__, ni, bf, + tid->axq_depth, + tid->hwq_depth, + tid->bar_wait, + tid->isfiltered); + device_printf(sc->sc_dev, + "%s: node %p: tid %d: txq_depth=%d, " + "txq_aggr_depth=%d, sched=%d, paused=%d, " + "hwq_depth=%d, incomp=%d, baw_head=%d, " + "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", + __func__, ni, tid->tid, txq->axq_depth, + txq->axq_aggr_depth, tid->sched, tid->paused, + tid->hwq_depth, tid->incomp, tid->baw_head, + tid->baw_tail, tap == NULL ? -1 : tap->txa_start, + ni->ni_txseqs[tid->tid]); + + /* XXX Dump the frame, see what it is? */ + ieee80211_dump_pkt(ni->ni_ic, + mtod(bf->bf_m, const uint8_t *), + bf->bf_m->m_len, 0, -1); +} + /* * Free any packets currently pending in the software TX queue. * @@ -2941,14 +3222,14 @@ struct ath_buf *bf; struct ieee80211_tx_ampdu *tap; struct ieee80211_node *ni = &an->an_node; - int t = 0; - struct ath_txq *txq = sc->sc_ac2q[tid->ac]; + int t; tap = ath_tx_get_tx_tid(an, tid->tid); - ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); + ATH_TID_LOCK_ASSERT(sc, tid); /* Walk the queue, free frames */ + t = 0; for (;;) { bf = TAILQ_FIRST(&tid->axq_q); if (bf == NULL) { @@ -2956,65 +3237,28 @@ } if (t == 0) { - device_printf(sc->sc_dev, - "%s: node %p: bf=%p: addbaw=%d, dobaw=%d, " - "seqno=%d, retry=%d\n", - __func__, ni, bf, - bf->bf_state.bfs_addedbaw, - bf->bf_state.bfs_dobaw, - SEQNO(bf->bf_state.bfs_seqno), - bf->bf_state.bfs_retries); - device_printf(sc->sc_dev, - "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d\n", - __func__, ni, bf, - tid->axq_depth, - tid->hwq_depth, - tid->bar_wait); - device_printf(sc->sc_dev, - "%s: node %p: tid %d: txq_depth=%d, " - "txq_aggr_depth=%d, sched=%d, paused=%d, " - "hwq_depth=%d, incomp=%d, baw_head=%d, " - "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", - __func__, ni, tid->tid, txq->axq_depth, - txq->axq_aggr_depth, tid->sched, tid->paused, - tid->hwq_depth, tid->incomp, tid->baw_head, - tid->baw_tail, tap == NULL ? -1 : tap->txa_start, - ni->ni_txseqs[tid->tid]); + ath_tx_tid_drain_print(sc, an, tid, bf); + t = 1; + } - /* XXX Dump the frame, see what it is? */ - ieee80211_dump_pkt(ni->ni_ic, - mtod(bf->bf_m, const uint8_t *), - bf->bf_m->m_len, 0, -1); + ATH_TXQ_REMOVE(tid, bf, bf_list); + ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); + } + /* And now, drain the filtered frame queue */ + t = 0; + for (;;) { + bf = TAILQ_FIRST(&tid->filtq.axq_q); + if (bf == NULL) + break; + + if (t == 0) { + ath_tx_tid_drain_print(sc, an, tid, bf); t = 1; } - - /* - * If the current TID is running AMPDU, update - * the BAW. - */ - if (ath_tx_ampdu_running(sc, an, tid->tid) && - bf->bf_state.bfs_dobaw) { - /* - * Only remove the frame from the BAW if it's - * been transmitted at least once; this means - * the frame was in the BAW to begin with. - */ - if (bf->bf_state.bfs_retries > 0) { - ath_tx_update_baw(sc, an, tid, bf); - bf->bf_state.bfs_dobaw = 0; - } - /* - * This has become a non-fatal error now - */ - if (! bf->bf_state.bfs_addedbaw) - device_printf(sc->sc_dev, - "%s: wasn't added: seqno %d\n", - __func__, SEQNO(bf->bf_state.bfs_seqno)); - } - ATH_TXQ_REMOVE(tid, bf, bf_list); - TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); + ATH_TXQ_REMOVE(&tid->filtq, bf, bf_list); + ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); } /* @@ -3128,9 +3372,29 @@ __func__, bf, fail, atid->hwq_depth - 1); atid->hwq_depth--; + + if (atid->isfiltered) + device_printf(sc->sc_dev, "%s: isfiltered=1, normal_comp?\n", + __func__); + if (atid->hwq_depth < 0) device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); + + /* + * If the queue is filtered, potentially mark it as complete + * and reschedule it as needed. + * + * This is required as there may be a subsequent TX descriptor + * for this end-node that has CLRDMASK set, so it's quite possible + * that a filtered frame will be followed by a non-filtered + * (complete or otherwise) frame. + * + * XXX should we do this before we complete the frame? + */ + if (atid->isfiltered) + ath_tx_tid_filt_comp_complete(sc, atid); + ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); /* @@ -3203,6 +3467,16 @@ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); /* + * Move the filtered frames to the TX queue, before + * we run off and discard/process things. + */ + /* XXX this is really quite inefficient */ + while ((bf = TAILQ_LAST(&atid->filtq.axq_q, ath_bufhead_s)) != NULL) { + ATH_TXQ_REMOVE(&atid->filtq, bf, bf_list); + ATH_TXQ_INSERT_HEAD(atid, bf, bf_list); + } + + /* * Update the frames in the software TX queue: * * + Discard retry frames in the queue @@ -3346,6 +3620,12 @@ bf->bf_m = NULL; bf->bf_node = NULL; ath_freebuf(sc, bf); + + if (nbf->bf_node == NULL) + device_printf(sc->sc_dev, + "%s: bf=%p, bf_node=NULL?!\n", + __func__, + nbf); return nbf; } @@ -3499,6 +3779,11 @@ ath_tx_set_retry(sc, bf); bf->bf_next = NULL; /* Just to make sure */ + /* Clear the aggregate state */ + bf->bf_state.bfs_aggr = 0; + bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ + bf->bf_state.bfs_nframes = 1; + TAILQ_INSERT_TAIL(bf_q, bf, bf_list); return 0; } @@ -3669,6 +3954,9 @@ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", __func__, atid->hwq_depth); + TAILQ_INIT(&bf_q); + TAILQ_INIT(&bf_cq); + /* The TID state is kept behind the TXQ lock */ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); @@ -3681,12 +3969,46 @@ * Punt cleanup to the relevant function, not our problem now */ if (atid->cleanup_inprogress) { + if (atid->isfiltered) + device_printf(sc->sc_dev, + "%s: isfiltered=1, normal_comp?\n", + __func__); ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); ath_tx_comp_cleanup_aggr(sc, bf_first); return; } /* + * If the frame is filtered, transition to filtered frame + * mode and add this to the filtered frame list. + * + * XXX TODO: figure out how this interoperates with + * BAR, pause and cleanup states. + */ + if ((ts.ts_status & HAL_TXERR_FILT) || + (ts.ts_status != 0 && atid->isfiltered)) { + if (fail != 0) + device_printf(sc->sc_dev, + "%s: isfiltered=1, fail=%d\n", __func__, fail); + ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); + + /* Remove from BAW */ + TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { + if (bf->bf_state.bfs_dobaw) { + ath_tx_update_baw(sc, an, atid, bf); + if (! bf->bf_state.bfs_addedbaw) + device_printf(sc->sc_dev, + "%s: wasn't added: seqno %d\n", + __func__, + SEQNO(bf->bf_state.bfs_seqno)); + } + bf->bf_state.bfs_dobaw = 0; + } + + goto finish_send_bar; + } + + /* * Take a copy; this may be needed -after- bf_first * has been completed and freed. */ @@ -3713,8 +4035,6 @@ return; } - TAILQ_INIT(&bf_q); - TAILQ_INIT(&bf_cq); tap = ath_tx_get_tx_tid(an, tid); /* @@ -3871,6 +4191,21 @@ ath_tx_tid_sched(sc, atid); /* + * If the queue is filtered, re-schedule as required. + * + * This is required as there may be a subsequent TX descriptor + * for this end-node that has CLRDMASK set, so it's quite possible + * that a filtered frame will be followed by a non-filtered + * (complete or otherwise) frame. + * + * XXX should we do this before we complete the frame? + */ + if (atid->isfiltered) + ath_tx_tid_filt_comp_complete(sc, atid); + +finish_send_bar: + + /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) @@ -3940,6 +4275,10 @@ * function in net80211, etc. */ if (atid->cleanup_inprogress) { + if (atid->isfiltered) + device_printf(sc->sc_dev, + "%s: isfiltered=1, normal_comp?\n", + __func__); ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", __func__); @@ -3948,6 +4287,57 @@ } /* + * XXX TODO: how does cleanup, BAR and filtered frame handling + * overlap? + * + * If the frame is filtered OR if it's any failure but + * the TID is filtered, the frame must be added to the + * filtered frame list. + * + * However - a busy buffer can't be added to the filtered + * list as it will end up being recycled without having + * been made available for the hardware. + */ + if ((ts->ts_status & HAL_TXERR_FILT) || + (ts->ts_status != 0 && atid->isfiltered)) { + int freeframe; + + if (fail != 0) + device_printf(sc->sc_dev, + "%s: isfiltered=1, fail=%d\n", + __func__, + fail); + freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); + if (freeframe) { + /* Remove from BAW */ + if (bf->bf_state.bfs_dobaw) { + ath_tx_update_baw(sc, an, atid, bf); + if (! bf->bf_state.bfs_addedbaw) + device_printf(sc->sc_dev, + "%s: wasn't added: seqno %d\n", + __func__, SEQNO(bf->bf_state.bfs_seqno)); + } + bf->bf_state.bfs_dobaw = 0; + } + + /* + * Send BAR if required + */ + if (ath_tx_tid_bar_tx_ready(sc, atid)) + ath_tx_tid_bar_tx(sc, atid); + + ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); + /* + * If freeframe is set, then the frame couldn't be + * cloned and bf is still valid. Just complete/free it. + */ + if (freeframe) + ath_tx_default_comp(sc, bf, fail); + + + return; + } + /* * Don't bother with the retry check if all frames * are being failed (eg during queue deletion.) */ @@ -3975,6 +4365,19 @@ } /* + * If the queue is filtered, re-schedule as required. + * + * This is required as there may be a subsequent TX descriptor + * for this end-node that has CLRDMASK set, so it's quite possible + * that a filtered frame will be followed by a non-filtered + * (complete or otherwise) frame. + * + * XXX should we do this before we complete the frame? + */ + if (atid->isfiltered) + ath_tx_tid_filt_comp_complete(sc, atid); + + /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) Index: if_athioctl.h =================================================================== --- if_athioctl.h (revision 239916) +++ if_athioctl.h (working copy) @@ -162,8 +162,9 @@ u_int32_t ast_tx_aggr_fail; /* aggregate TX failed */ u_int32_t ast_tx_mcastq_overflow; /* multicast queue overflow */ u_int32_t ast_rx_keymiss; + u_int32_t ast_tx_swfiltered; - u_int32_t ast_pad[16]; + u_int32_t ast_pad[15]; }; #define SIOCGATHSTATS _IOWR('i', 137, struct ifreq) Index: if_athvar.h =================================================================== --- if_athvar.h (revision 239916) +++ if_athvar.h (working copy) @@ -107,6 +107,12 @@ int ac; /* which AC gets this trafic */ int hwq_depth; /* how many buffers are on HW */ + struct { + TAILQ_HEAD(,ath_buf) axq_q; /* filtered queue */ + u_int axq_depth; /* SW queue depth */ + char axq_name[48]; /* lock name */ + } filtq; + /* * Entry on the ath_txq; when there's traffic * to send @@ -114,9 +120,16 @@ TAILQ_ENTRY(ath_tid) axq_qelem; int sched; int paused; /* >0 if the TID has been paused */ + + /* + * These are flags - perhaps later collapse + * down to a single uint32_t ? + */ int addba_tx_pending; /* TX ADDBA pending */ int bar_wait; /* waiting for BAR */ int bar_tx; /* BAR TXed */ + int isfiltered; /* is this node currently filtered */ + int clrdmask; /* has clrdmask been set */ /* * Is the TID being cleaned up after a transition @@ -336,6 +349,8 @@ #define ATH_TID_LOCK_ASSERT(_sc, _tid) \ ATH_TXQ_LOCK_ASSERT((_sc)->sc_ac2q[(_tid)->ac]) +#define ATH_TID_UNLOCK_ASSERT(_sc, _tid) \ + ATH_TXQ_UNLOCK_ASSERT((_sc)->sc_ac2q[(_tid)->ac]) #define ATH_TXQ_INSERT_HEAD(_tq, _elm, _field) do { \ TAILQ_INSERT_HEAD(&(_tq)->axq_q, (_elm), _field); \