Index: sys/dev/fxp/if_fxp.c =================================================================== --- sys/dev/fxp/if_fxp.c (revision 206204) +++ sys/dev/fxp/if_fxp.c (working copy) @@ -768,15 +768,14 @@ goto fail; } } + /* + * Pre-allocate our RX DMA maps. + */ error = bus_dmamap_create(sc->fxp_rxmtag, 0, &sc->spare_map); if (error) { device_printf(dev, "can't create spare DMA map\n"); goto fail; } - - /* - * Pre-allocate our receive buffers. - */ sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; @@ -785,11 +784,6 @@ device_printf(dev, "can't create DMA map for RX\n"); goto fail; } - if (fxp_new_rfabuf(sc, rxp) != 0) { - error = ENOMEM; - goto fail; - } - fxp_add_rfabuf(sc, rxp); } /* @@ -957,27 +951,24 @@ if (sc->fxp_rxmtag) { for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; - if (rxp->rx_mbuf != NULL) { - bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map); - m_freem(rxp->rx_mbuf); + if (rxp->rx_map != NULL) { + bus_dmamap_destroy(sc->fxp_rxmtag, rxp->rx_map); + rxp->rx_map = NULL; } - bus_dmamap_destroy(sc->fxp_rxmtag, rxp->rx_map); } - bus_dmamap_destroy(sc->fxp_rxmtag, sc->spare_map); + if (sc->spare_map != NULL) { + bus_dmamap_destroy(sc->fxp_rxmtag, sc->spare_map); + sc->spare_map = NULL; + } bus_dma_tag_destroy(sc->fxp_rxmtag); } if (sc->fxp_txmtag) { for (i = 0; i < FXP_NTXCB; i++) { txp = &sc->fxp_desc.tx_list[i]; - if (txp->tx_mbuf != NULL) { - bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map); - m_freem(txp->tx_mbuf); + if (txp->tx_map != NULL) { + bus_dmamap_destroy(sc->fxp_txmtag, txp->tx_map); + txp->tx_map = NULL; } - bus_dmamap_destroy(sc->fxp_txmtag, txp->tx_map); } bus_dma_tag_destroy(sc->fxp_txmtag); } @@ -1859,13 +1850,13 @@ struct mbuf *m; struct fxp_rx *rxp; struct fxp_rfa *rfa; - int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; - int rx_npkts; + int rnr, rx_npkts, total_len; uint16_t status; rx_npkts = 0; FXP_LOCK_ASSERT(sc, MA_OWNED); + rnr = (statack & (FXP_SCB_STATACK_RNR | FXP_SCB_STATACK_SWI)) ? 1 : 0; if (rnr) sc->rnr++; #ifdef DEVICE_POLLING @@ -1901,7 +1892,8 @@ /* * Just return if nothing happened on the receive side. */ - if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0) + if (!rnr && (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | + FXP_SCB_STATACK_SWI)) == 0) return (rx_npkts); /* @@ -1922,7 +1914,7 @@ rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, - BUS_DMASYNC_POSTREAD); + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */ if (count >= 0 && count-- == 0) { @@ -1936,6 +1928,8 @@ #endif /* DEVICE_POLLING */ status = le16toh(rfa->rfa_status); + if ((status & FXP_RFA_STATUS_RNR) != 0) + rnr++; if ((status & FXP_RFA_STATUS_C) == 0) break; @@ -1944,33 +1938,35 @@ */ sc->fxp_desc.rx_head = rxp->rx_next; + total_len = le16toh(rfa->actual_size) & 0x3fff; + if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && + (ifp->if_capenable & IFCAP_RXCSUM) != 0) { + /* Adjust for appended checksum bytes. */ + total_len -= 2; + } /* - * Add a new buffer to the receive chain. - * If this fails, the old buffer is recycled - * instead. + * If controller encounter a RFD that received a bad + * frame it skips to next available RFD and the + * original RFD is reused. So driver also should + * reuse the RFD, otherwise the received buffer and + * status are out of synchronization which in turn + * results in unexpected behavior. */ - if (fxp_new_rfabuf(sc, rxp) == 0) { - int total_len; - - /* - * Fetch packet length (the top 2 bits of - * actual_size are flags set by the controller - * upon completion), and drop the packet in case - * of bogus length or CRC errors. - */ - total_len = le16toh(rfa->actual_size) & 0x3fff; - if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && - (ifp->if_capenable & IFCAP_RXCSUM) != 0) { - /* Adjust for appended checksum bytes. */ - total_len -= 2; - } - if (total_len < sizeof(struct ether_header) || - total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE - - sc->rfa_size || status & FXP_RFA_STATUS_CRC) { - m_freem(m); - continue; - } - + if ((status & FXP_RFA_STATUS_CRC) != 0) { + fxp_add_rfabuf(sc, rxp); + continue; + } + /* + * Fetch packet length (the top 2 bits of + * actual_size are flags set by the controller + * upon completion), and drop the packet in case + * of bogus length. For sane frames, add a new + * buffer to the receive chain. If this fails, + * the old buffer is recycled instead. + */ + if (total_len >= sizeof(struct ether_header) && + total_len <= (MCLBYTES - RFA_ALIGNMENT_FUDGE - + sc->rfa_size) && fxp_new_rfabuf(sc, rxp) == 0) { m->m_pkthdr.len = m->m_len = total_len; m->m_pkthdr.rcvif = ifp; @@ -2134,6 +2130,7 @@ { struct ifnet *ifp = sc->ifp; struct fxp_tx *txp; + struct fxp_rx *rxp; int i; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); @@ -2174,6 +2171,23 @@ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->tx_queued = 0; + /* + * Release RX buffers. + */ + if (sc->fxp_desc.rx_head != NULL) { + for (i = 0; i < FXP_NRFABUFS; i++) { + rxp = &sc->fxp_desc.rx_list[i]; + if (rxp->rx_mbuf != NULL) { + bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, + BUS_DMASYNC_POSTREAD | + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map); + m_freem(rxp->rx_mbuf); + rxp->rx_mbuf = NULL; + } + } + sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; + } } /* @@ -2224,6 +2238,7 @@ struct fxp_cb_ias *cb_ias; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; + struct fxp_rx *rxp; int i, prm; FXP_LOCK_ASSERT(sc, MA_OWNED); @@ -2360,7 +2375,10 @@ cbp->fc_delay_lsb = 0; cbp->fc_delay_msb = 0x40; cbp->pri_fc_thresh = 3; - cbp->tx_fc_dis = 0; + if (sc->revision == FXP_REV_82557) + cbp->tx_fc_dis = 0; + else + cbp->tx_fc_dis = 1; cbp->rx_fc_restop = 0; cbp->rx_fc_restart = 0; cbp->fc_filter = 0; @@ -2450,9 +2468,16 @@ /* * Initialize receiver buffer area - RFA. */ - fxp_scb_wait(sc); - CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); - fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); + sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; + for (i = 0; i < FXP_NRFABUFS; i++) { + rxp = &sc->fxp_desc.rx_list[i]; + if (fxp_new_rfabuf(sc, rxp) != 0) { + device_printf(sc->dev, "no memory for Rx buffers\n"); + fxp_stop(sc); + return; + } + fxp_add_rfabuf(sc, rxp); + } /* * Set current media. @@ -2475,7 +2500,15 @@ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); else #endif /* DEVICE_POLLING */ - CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); + /* + * Request a software generated interrupt that will be used to + * (re)start the RU processing. If we direct the chip to start + * receiving from the start of queue now, instead of letting the + * interrupt handler first process all received packets, we run + * the risk of having it overwrite mbuf clusters while they are + * being processed or after they have been returned to the pool. + */ + CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_SWI); /* * Start stats updater. @@ -2592,7 +2625,7 @@ /* Map the RFA into DMA memory. */ error = bus_dmamap_load(sc->fxp_rxmtag, sc->spare_map, rfa, MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr, - &rxp->rx_addr, 0); + &rxp->rx_addr, BUS_DMA_NOWAIT); if (error) { m_freem(m); return (error); @@ -2628,10 +2661,12 @@ le32enc(&p_rfa->link_addr, rxp->rx_addr); p_rfa->rfa_control = 0; bus_dmamap_sync(sc->fxp_rxmtag, p_rx->rx_map, - BUS_DMASYNC_PREWRITE); + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } else { rxp->rx_next = NULL; sc->fxp_desc.rx_head = rxp; + bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } sc->fxp_desc.rx_tail = rxp; }