Index: if_bce.c =================================================================== --- if_bce.c (revision 204734) +++ if_bce.c (working copy) @@ -278,6 +278,8 @@ static int bce_probe (device_t); static int bce_attach (device_t); static int bce_detach (device_t); +static int bce_suspend (device_t); +static int bce_resume (device_t); static int bce_shutdown (device_t); @@ -410,19 +412,19 @@ static int bce_ioctl (struct ifnet *, u_long, caddr_t); static void bce_watchdog (struct bce_softc *); static int bce_ifmedia_upd (struct ifnet *); -static void bce_ifmedia_upd_locked (struct ifnet *); +static int bce_ifmedia_upd_locked (struct ifnet *); static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void bce_init_locked (struct bce_softc *); static void bce_init (void *); static void bce_mgmt_init_locked (struct bce_softc *sc); -static void bce_init_ctx (struct bce_softc *); +static int bce_init_ctx (struct bce_softc *); static void bce_get_mac_addr (struct bce_softc *); static void bce_set_mac_addr (struct bce_softc *); -static void bce_phy_intr (struct bce_softc *); +static void bce_phy_intr (struct bce_softc *, u32, u32); static inline u16 bce_get_hw_rx_cons(struct bce_softc *); -static void bce_rx_intr (struct bce_softc *); -static void bce_tx_intr (struct bce_softc *); +static void bce_rx_intr (struct bce_softc *, u16, int); +static void bce_tx_intr (struct bce_softc *, u16); static void bce_disable_intr (struct bce_softc *); static void bce_enable_intr (struct bce_softc *, int); @@ -432,6 +434,8 @@ static void bce_tick (void *); static void bce_pulse (void *); static void bce_add_sysctls (struct bce_softc *); +static void bce_setlinkspeed (struct bce_softc *); +static void bce_setwol (struct bce_softc *); /****************************************************************************/ @@ -445,8 +449,8 @@ DEVMETHOD(device_shutdown, bce_shutdown), /* Supported by device interface but not used here. */ /* DEVMETHOD(device_identify, bce_identify), */ -/* DEVMETHOD(device_suspend, bce_suspend), */ -/* DEVMETHOD(device_resume, bce_resume), */ + DEVMETHOD(device_suspend, bce_suspend), + DEVMETHOD(device_resume, bce_resume), /* DEVMETHOD(device_quiesce, bce_quiesce), */ /* Bus interface (bus_if.h) */ @@ -655,20 +659,24 @@ /* Check if PCI-X capability is enabled. */ if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { - if (reg != 0) + if (reg != 0) { sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; + sc->bce_pcixcap = reg; + } } /* Check if PCIe capability is enabled. */ if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { if (reg != 0) { - u16 link_status = pci_read_config(dev, reg + 0x12, 2); + u16 link_status = pci_read_config(dev, + reg + PCIR_EXPRESS_LINK_STA, 2); DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = 0x%08X\n", link_status); sc->link_speed = link_status & 0xf; sc->link_width = (link_status >> 4) & 0x3f; sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; sc->bce_flags |= BCE_PCIE_FLAG; + sc->bce_expcap = reg; } } @@ -704,7 +712,7 @@ struct bce_softc *sc; struct ifnet *ifp; u32 val; - int error, rid, rc = 0; + int cap, error, rid, rc = 0; sc = device_get_softc(dev); sc->bce_dev = dev; @@ -783,8 +791,8 @@ if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG; + sc->bce_intr = bce_intr; sc->bce_irq_rid = 1; - sc->bce_intr = bce_intr; } } @@ -887,6 +895,9 @@ /* Check if any management firwmare is running. */ val = bce_shmem_rd(sc, BCE_PORT_FEATURE); + if ((val & BCE_PORT_FEATURE_WOL_ENABLED) && + pci_find_extcap(sc->bce_dev, PCIY_PMG, &cap) == 0) + sc->bce_flags |= BCE_WOL_FLAG; if (val & BCE_PORT_FEATURE_ASF_ENABLED) { sc->bce_flags |= BCE_MFW_ENABLE_FLAG; @@ -1007,20 +1018,35 @@ sc->bce_rx_quick_cons_trip = 1; sc->bce_rx_ticks_int = 0; sc->bce_rx_ticks = 0; + + sc->bce_comp_prod_trip_int = 0; + sc->bce_comp_prod_trip = 0; #else /* Improve throughput at the expense of increased latency. */ - sc->bce_tx_quick_cons_trip_int = 20; - sc->bce_tx_quick_cons_trip = 20; - sc->bce_tx_ticks_int = 80; - sc->bce_tx_ticks = 80; + sc->bce_tx_quick_cons_trip_int = 64; + sc->bce_tx_quick_cons_trip = 64; + sc->bce_tx_ticks_int = 1000; /* 1ms */ + sc->bce_tx_ticks = 1000; /* 1ms */ - sc->bce_rx_quick_cons_trip_int = 6; - sc->bce_rx_quick_cons_trip = 6; - sc->bce_rx_ticks_int = 18; - sc->bce_rx_ticks = 18; + sc->bce_rx_quick_cons_trip_int = 32; + sc->bce_rx_quick_cons_trip = 32; + sc->bce_rx_ticks_int = 100; /* 100us */ + sc->bce_rx_ticks = 100; /* 100us */ + + sc->bce_comp_prod_trip_int = 0; + sc->bce_comp_prod_trip = 0; #endif + /* Not required for L2 only driver. */ + sc->bce_com_ticks_int = 0; + sc->bce_com_ticks = 0; + sc->bce_cmd_ticks_int = 0; + sc->bce_cmd_ticks = 0; - /* Update statistics once every second. */ + /* + * Update statistics once every second. + * It seems the resoultion of timer is 256us so lower + * 8bit should always be 0. + */ sc->bce_stats_ticks = 1000000 & 0xffff00; /* Find the media type for the adapter. */ @@ -1064,7 +1090,9 @@ ifp->if_hwassist = BCE_IF_HWASSIST; ifp->if_capabilities = BCE_IF_CAPABILITIES; } - + if ((sc->bce_flags & BCE_WOL_FLAG) != 0 && + (sc->bce_flags & BCE_NO_WOL_FLAG) == 0) + ifp->if_capabilities |= IFCAP_WOL_MAGIC; ifp->if_capenable = ifp->if_capabilities; /* @@ -1230,26 +1258,194 @@ static int bce_shutdown(device_t dev) { - struct bce_softc *sc = device_get_softc(dev); - u32 msg; - DBENTER(BCE_VERBOSE); + return (bce_suspend(dev)); +} + +static int +bce_resume(device_t dev) +{ + struct bce_softc *sc; + struct ifnet *ifp; + u32 val; + u16 pmstat; + int cap; + + sc = device_get_softc(dev); BCE_LOCK(sc); + if (pci_find_extcap(sc->bce_dev, PCIY_PMG, &cap) == 0) { + /* Disable PME and clear PME status. */ + pmstat = pci_read_config(sc->bce_dev, + cap + PCIR_POWER_STATUS, 2); + if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { + pmstat &= ~PCIM_PSTAT_PMEENABLE; + pci_write_config(sc->bce_dev, + cap + PCIR_POWER_STATUS, pmstat, 2); + } + } + /* Disable WOL. */ + val = REG_RD(sc, BCE_EMAC_MODE); + val |= BCE_EMAC_MODE_MPKT_RCVD | BCE_EMAC_MODE_ACPI_RCVD; + val &= ~BCE_EMAC_MODE_MPKT; + REG_WR(sc, BCE_EMAC_MODE, val); + val = REG_RD(sc, BCE_RPM_CONFIG); + val &= ~BCE_RPM_CONFIG_ACPI_ENA; + REG_WR(sc, BCE_RPM_CONFIG, val); + ifp = sc->bce_ifp; + if ((ifp->if_flags & IFF_UP) != 0) { + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + bce_init_locked(sc); + } + BCE_UNLOCK(sc); + return (0); +} + + +static int +bce_suspend(device_t dev) +{ + struct bce_softc *sc; + struct ifnet *ifp; + u32 msg; + + sc = device_get_softc(dev); + BCE_LOCK(sc); bce_stop(sc); + ifp = sc->bce_ifp; if (sc->bce_flags & BCE_NO_WOL_FLAG) msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; + else if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) + msg = BCE_DRV_MSG_CODE_SUSPEND_WOL; else - msg = BCE_DRV_MSG_CODE_UNLOAD; + msg = BCE_DRV_MSG_CODE_SUSPEND_NO_WOL; bce_reset(sc, msg); + bce_setwol(sc); BCE_UNLOCK(sc); + return (0); +} - DBEXIT(BCE_VERBOSE); - return (0); +static void +bce_setlinkspeed(struct bce_softc *sc) +{ + struct mii_data *mii; + int aneg, i; + + if ((sc->bce_flags & BCE_NO_WOL_FLAG) != 0 || + (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) != 0) + return; + mii = device_get_softc(sc->bce_miibus); + mii_pollstat(mii); + aneg = 0; + if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == + (IFM_ACTIVE | IFM_AVALID)) { + switch IFM_SUBTYPE(mii->mii_media_active) { + case IFM_10_T: + case IFM_100_TX: + return; + case IFM_1000_T: + aneg++; + break; + default: + break; + } + } + bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, MII_100T2CR, 0); + bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, + MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); + bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, + MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); + DELAY(1000); + if (aneg != 0) { + /* + * Poll link state until bce(4) get a 10/100Mbps link. + */ + for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { + mii_pollstat(mii); + if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) + == (IFM_ACTIVE | IFM_AVALID)) { + switch (IFM_SUBTYPE(mii->mii_media_active)) { + case IFM_10_T: + case IFM_100_TX: + bce_miibus_statchg(sc->bce_dev); + return; + default: + break; + } + } + BCE_UNLOCK(sc); + pause("bcelnk", hz); + BCE_LOCK(sc); + } + if (i == MII_ANEGTICKS_GIGE) + device_printf(sc->bce_dev, + "establishing a link failed, WOL may not work!"); + } + /* + * No link, force EMAC to have 100Mbps, full-duplex link. + * This is the last resort and may/may not work. + */ + mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; + mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; + bce_miibus_statchg(sc->bce_dev); } +static void +bce_setwol(struct bce_softc *sc) +{ + struct ifnet *ifp; + u32 msg, val; + u16 pmstat; + int cap, i; + + ifp = sc->bce_ifp; + if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { + /* Establish 10/100Mbps link. */ + bce_setlinkspeed(sc); + bce_set_mac_addr(sc); + val = REG_RD(sc, BCE_EMAC_MODE); + val |= BCE_EMAC_MODE_MPKT_RCVD | BCE_EMAC_MODE_ACPI_RCVD | + BCE_EMAC_MODE_MPKT; + REG_WR(sc, BCE_EMAC_MODE, val); + /* Receive all multicast. */ + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) + REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), + 0xFFFFFFFF); + REG_WR(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_SORT_MODE); + REG_BARRIER(sc, BCE_EMAC_RX_MODE); + val = 1 | BCE_RPM_SORT_USER0_BC_EN | BCE_RPM_SORT_USER0_MC_EN; + REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); + REG_BARRIER(sc, BCE_RPM_SORT_USER0); + REG_WR(sc, BCE_RPM_SORT_USER0, val); + REG_BARRIER(sc, BCE_RPM_SORT_USER0); + REG_WR(sc, BCE_RPM_SORT_USER0, val | BCE_RPM_SORT_USER0_ENA); + REG_BARRIER(sc, BCE_RPM_SORT_USER0); + /* Enable EMAC and RPM for WOL. */ + REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, + BCE_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | + BCE_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | + BCE_MISC_ENABLE_SET_BITS_EMAC_ENABLE); + val = REG_RD(sc, BCE_RPM_CONFIG); + val &= ~BCE_RPM_CONFIG_ACPI_ENA; + REG_WR(sc, BCE_RPM_CONFIG, val); + msg = BCE_DRV_MSG_CODE_SUSPEND_WOL; + } else + msg = BCE_DRV_MSG_CODE_SUSPEND_NO_WOL; + if ((sc->bce_flags & BCE_NO_WOL_FLAG) == 0) + bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT3 | msg); + if (pci_find_extcap(sc->bce_dev, PCIY_PMG, &cap) != 0) + return; + /* Request PME. */ + pmstat = pci_read_config(sc->bce_dev, cap + PCIR_POWER_STATUS, 2); + pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); + if ((ifp->if_capenable & IFCAP_WOL) != 0) + pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; + pci_write_config(sc->bce_dev, cap + PCIR_POWER_STATUS, pmstat, 2); +} + + #ifdef BCE_DEBUG /****************************************************************************/ /* Register read. */ @@ -2178,7 +2374,7 @@ sc->bce_flash_info = NULL; BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", __FILE__, __LINE__); - rc = ENODEV; + return (ENODEV); } bce_init_nvram_get_flash_size: @@ -2682,6 +2878,13 @@ (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; + if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708 && + BCE_CHIP_ID(sc) != BCE_CHIP_ID_5708_B1) || + (REG_RD(sc, BCE_PCI_CONFIG_3) & BCE_PCI_CONFIG_3_VAUX_PRESET) == 0) { + sc->bce_flags |= BCE_NO_WOL_FLAG; + sc->bce_flags &= ~BCE_WOL_FLAG; + } + bce_get_media_exit: DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY), "Using PHY address %d.\n", sc->bce_phy_addr); @@ -2951,6 +3154,8 @@ { bus_addr_t *busaddr = arg; + KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!", + __FUNCTION__, nsegs)); /* Simulate a mapping failure. */ DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control), error = ENOMEM); @@ -3005,16 +3210,16 @@ /* * Allocate the parent bus DMA tag appropriate for PCI. */ - if (bus_dma_tag_create(NULL, + if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, - MAXBSIZE, - BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE_32BIT, 0, + BUS_SPACE_MAXSIZE_32BIT, + 0, NULL, NULL, &sc->parent_tag)) { BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", @@ -3048,7 +3253,7 @@ if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, - BUS_DMA_NOWAIT, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->status_map)) { BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n", __FILE__, __LINE__); @@ -3056,8 +3261,6 @@ goto bce_dma_alloc_exit; } - bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ); - error = bus_dmamap_load(sc->status_tag, sc->status_map, sc->status_block, @@ -3101,7 +3304,7 @@ if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, - BUS_DMA_NOWAIT, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) { BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n", __FILE__, __LINE__); @@ -3109,8 +3312,6 @@ goto bce_dma_alloc_exit; } - bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ); - error = bus_dmamap_load(sc->stats_tag, sc->stats_map, sc->stats_block, @@ -3168,7 +3369,7 @@ if(bus_dmamem_alloc(sc->ctx_tag, (void **)&sc->ctx_block[i], - BUS_DMA_NOWAIT, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->ctx_map[i])) { BCE_PRINTF("%s(%d): Could not allocate CTX " "DMA memory!\n", __FILE__, __LINE__); @@ -3176,8 +3377,6 @@ goto bce_dma_alloc_exit; } - bzero((char *)sc->ctx_block[i], BCM_PAGE_SIZE); - error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], sc->ctx_block[i], @@ -3225,7 +3424,7 @@ if(bus_dmamem_alloc(sc->tx_bd_chain_tag, (void **)&sc->tx_bd_chain[i], - BUS_DMA_NOWAIT, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->tx_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate TX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); @@ -3320,7 +3519,7 @@ if (bus_dmamem_alloc(sc->rx_bd_chain_tag, (void **)&sc->rx_bd_chain[i], - BUS_DMA_NOWAIT, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->rx_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " "DMA memory!\n", __FILE__, __LINE__); @@ -3328,8 +3527,6 @@ goto bce_dma_alloc_exit; } - bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); - error = bus_dmamap_load(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], sc->rx_bd_chain[i], @@ -3353,6 +3550,7 @@ * Create a DMA tag for RX mbufs. */ #ifdef BCE_JUMBO_HDRSPLIT + /* XXX rx_bd_mbuf_alloc_size was not initialized before bce_dma_alloc() */ max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? MCLBYTES : sc->rx_bd_mbuf_alloc_size); #else @@ -3365,7 +3563,7 @@ (uintmax_t) max_size, max_segments, (uintmax_t) max_seg_size); if (bus_dma_tag_create(sc->parent_tag, - 1, + BCE_RX_BUF_ALIGN, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, @@ -3421,7 +3619,7 @@ if (bus_dmamem_alloc(sc->pg_bd_chain_tag, (void **)&sc->pg_bd_chain[i], - BUS_DMA_NOWAIT, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->pg_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate page descriptor chain " "DMA memory!\n", __FILE__, __LINE__); @@ -3429,8 +3627,6 @@ goto bce_dma_alloc_exit; } - bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); - error = bus_dmamap_load(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], sc->pg_bd_chain[i], @@ -4361,15 +4557,18 @@ /* Returns: */ /* Nothing. */ /****************************************************************************/ -static void +static int bce_init_ctx(struct bce_softc *sc) { + int i, rc; + + rc = 0; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { - int i, retry_cnt = CTX_INIT_RETRY_COUNT; + int retry_cnt = CTX_INIT_RETRY_COUNT; u32 val; DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n"); @@ -4390,11 +4589,12 @@ break; DELAY(2); } - - /* ToDo: Consider returning an error here. */ - DBRUNIF((val & BCE_CTX_COMMAND_MEM_INIT), + if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) { BCE_PRINTF("%s(): Context memory initialization failed!\n", - __FUNCTION__)); + __FUNCTION__); + rc = EBUSY; + goto init_ctx_fail; + } for (i = 0; i < sc->ctx_pages; i++) { int j; @@ -4416,10 +4616,12 @@ DELAY(5); } - /* ToDo: Consider returning an error here. */ - DBRUNIF((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ), + if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) { BCE_PRINTF("%s(): Failed to initialize context page %d!\n", - __FUNCTION__, i)); + __FUNCTION__, i); + rc = EBUSY; + goto init_ctx_fail; + } } } else { u32 vcid_addr, offset; @@ -4450,6 +4652,9 @@ } DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); + +init_ctx_fail: + return (rc); } @@ -4534,9 +4739,6 @@ bce_stop(struct bce_softc *sc) { struct ifnet *ifp; - struct ifmedia_entry *ifm; - struct mii_data *mii = NULL; - int mtmp, itmp; DBENTER(BCE_VERBOSE_RESET); @@ -4544,8 +4746,6 @@ ifp = sc->bce_ifp; - mii = device_get_softc(sc->bce_miibus); - callout_stop(&sc->bce_tick_callout); /* Disable the transmit/receive blocks. */ @@ -4564,25 +4764,6 @@ /* Free TX buffers. */ bce_free_tx_chain(sc); - /* - * Isolate/power down the PHY, but leave the media selection - * unchanged so that things will be put back to normal when - * we bring the interface back up. - */ - - itmp = ifp->if_flags; - ifp->if_flags |= IFF_UP; - - /* If we are called from bce_detach(), mii is already NULL. */ - if (mii != NULL) { - ifm = mii->mii_media.ifm_cur; - mtmp = ifm->ifm_media; - ifm->ifm_media = IFM_ETHER | IFM_NONE; - mii_mediachg(mii); - ifm->ifm_media = mtmp; - } - - ifp->if_flags = itmp; sc->watchdog_timer = 0; sc->bce_link = 0; @@ -4809,7 +4990,7 @@ bce_blockinit(struct bce_softc *sc) { u32 reg, val; - int rc = 0; + int i, rc = 0; DBENTER(BCE_VERBOSE_RESET); @@ -4823,6 +5004,9 @@ REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); sc->last_status_idx = 0; + /* Clear status block. */ + bzero(sc->status_block, BCE_STATUS_BLK_SZ); + bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREREAD); sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; /* Set up link change interrupt generation. */ @@ -4842,26 +5026,36 @@ /* Program various host coalescing parameters. */ REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, - (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip); + ((sc->bce_tx_quick_cons_trip_int & 0xFF) << 16) | + (sc->bce_tx_quick_cons_trip & 0xFF)); REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, - (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip); + ((sc->bce_rx_quick_cons_trip_int & 0xFF) << 16) | + (sc->bce_rx_quick_cons_trip & 0xFF)); REG_WR(sc, BCE_HC_COMP_PROD_TRIP, - (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); + ((sc->bce_comp_prod_trip_int & 0xFF) << 16) | + (sc->bce_comp_prod_trip & 0xFF)); REG_WR(sc, BCE_HC_TX_TICKS, - (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); + ((sc->bce_tx_ticks_int & 0x3FF) << 16) | + (sc->bce_tx_ticks & 0x3FF)); REG_WR(sc, BCE_HC_RX_TICKS, - (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); + ((sc->bce_rx_ticks_int & 0x3FF) << 16) | + (sc->bce_rx_ticks & 0x3FF)); REG_WR(sc, BCE_HC_COM_TICKS, - (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); + ((sc->bce_com_ticks_int & 0x3FF) << 16) | + (sc->bce_com_ticks & 0x3FF)); REG_WR(sc, BCE_HC_CMD_TICKS, - (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); + ((sc->bce_cmd_ticks_int & 0x3FF) << 16) | + (sc->bce_cmd_ticks & 0x3FF)); + /* + * XXX Missing broken statistics handling on PCIX. + */ REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ /* Configure the Host Coalescing block. */ val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | - BCE_HC_CONFIG_COLLECT_STATS; + BCE_HC_CONFIG_COM_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; #if 0 /* ToDo: Add MSI-X support. */ @@ -4953,6 +5147,22 @@ REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); DELAY(20); +#if 1 + /* Disable firmware "COAL_NOW" behavior on first packet. */ + if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || + (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { + REG_WR_IND(sc, BCE_RBUF_FW_BUF_ALLOC, 0x80000000); + for (i = 0; i < 10; i++) { + DELAY(1); + val = REG_RD_IND(sc, BCE_RBUF_FW_BUF_ALLOC); + if ((val & 0x80000000) != 0) + break; + } + if (i == 10) + BCE_PRINTF("%s(%d): Diabling COAL_NOW timedout!.\n", + __FILE__, __LINE__); + } +#endif /* Save the current host coalescing block settings. */ sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); @@ -4974,7 +5184,7 @@ u16 *chain_prod, u32 *prod_bseq) { bus_dmamap_t map; - bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; + bus_dma_segment_t segs[1]; struct mbuf *m_new = NULL; struct rx_bd *rxbd; int nsegs, error, rc = 0; @@ -5045,9 +5255,10 @@ /* Handle any mapping errors. */ if (error) { +#ifdef BCE_DEBUG BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain (%d)!\n", __FILE__, __LINE__, error); - +#endif sc->dma_map_addr_rx_failed_count++; m_freem(m_new); @@ -5061,8 +5272,6 @@ KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", __FUNCTION__, nsegs)); - /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */ - /* Setup the rx_bd for the segment. */ rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; @@ -5160,9 +5369,10 @@ /* Handle any mapping errors. */ if (error) { +#ifdef BCE_DEBUG BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", __FILE__, __LINE__); - +#endif m_freem(m_new); DBRUN(sc->debug_pg_mbuf_alloc--); @@ -5268,6 +5478,8 @@ DBRUN(sc->tx_hi_watermark = USABLE_TX_BD); DBRUN(sc->tx_full_count = 0); + for (i = 0; i < TX_PAGES; i++) + bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); /* * The NetXtreme II supports a linked-list structre called * a Buffer Descriptor Chain (or BD chain). A BD chain @@ -5293,6 +5505,9 @@ txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); } + for (i = 0; i < TX_PAGES; i++) + bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i], + BUS_DMASYNC_PREWRITE); bce_init_tx_context(sc); @@ -5329,8 +5544,11 @@ } /* Clear each TX chain page. */ - for (i = 0; i < TX_PAGES; i++) + for (i = 0; i < TX_PAGES; i++) { bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); + bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i], + BUS_DMASYNC_PREWRITE); + } sc->used_tx_bd = 0; @@ -5450,11 +5668,6 @@ /* Fill up the RX chain. */ bce_fill_rx_chain(sc); - for (i = 0; i < RX_PAGES; i++) { - bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - } - bce_init_rx_context(sc); DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD)); @@ -5477,6 +5690,7 @@ { u16 prod, prod_idx; u32 prod_bseq; + int i; DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); @@ -5495,6 +5709,11 @@ prod = NEXT_RX_BD(prod); } + /* Prepare the page chain pages to be accessed by the NIC. */ + for (i = 0; i < RX_PAGES; i++) + bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + /* Save the RX chain producer indices. */ sc->rx_prod = prod; sc->rx_prod_bseq = prod_bseq; @@ -5597,6 +5816,9 @@ pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); } + /* Fill up the page chain. */ + bce_fill_pg_chain(sc); + /* Setup the MQ BIN mapping for host_pg_bidx. */ if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) @@ -5618,14 +5840,6 @@ val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val); - /* Fill up the page chain. */ - bce_fill_pg_chain(sc); - - for (i = 0; i < PG_PAGES; i++) { - bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - } - DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); @@ -5644,6 +5858,7 @@ bce_fill_pg_chain(struct bce_softc *sc) { u16 prod, prod_idx; + int i; DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); @@ -5661,6 +5876,10 @@ prod = NEXT_PG_BD(prod); } + for (i = 0; i < PG_PAGES; i++) + bus_dmamap_sync(sc->pg_bd_chain_tag, + sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); + /* Save the page chain producer index. */ sc->pg_prod = prod; @@ -5731,15 +5950,16 @@ bce_ifmedia_upd(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; + int error; DBENTER(BCE_VERBOSE); BCE_LOCK(sc); - bce_ifmedia_upd_locked(ifp); + error = bce_ifmedia_upd_locked(ifp); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE); - return (0); + return (error); } @@ -5749,14 +5969,16 @@ /* Returns: */ /* Nothing. */ /****************************************************************************/ -static void +static int bce_ifmedia_upd_locked(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; struct mii_data *mii; + int error; DBENTER(BCE_VERBOSE); + error = 0; BCE_LOCK_ASSERT(sc); mii = device_get_softc(sc->bce_miibus); @@ -5770,10 +5992,11 @@ LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } - mii_mediachg(mii); + error = mii_mediachg(mii); } DBEXIT(BCE_VERBOSE); + return (error); } @@ -5812,17 +6035,11 @@ /* Nothing. */ /****************************************************************************/ static void -bce_phy_intr(struct bce_softc *sc) +bce_phy_intr(struct bce_softc *sc, u32 new_link_state, u32 old_link_state) { - u32 new_link_state, old_link_state; DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); - new_link_state = sc->status_block->status_attn_bits & - STATUS_ATTN_BITS_LINK_STATE; - old_link_state = sc->status_block->status_attn_bits_ack & - STATUS_ATTN_BITS_LINK_STATE; - /* Handle any changes if the link state has changed. */ if (new_link_state != old_link_state) { @@ -5869,7 +6086,6 @@ { u16 hw_cons; - rmb(); hw_cons = sc->status_block->status_rx_quick_consumer_index0; if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) hw_cons++; @@ -5884,18 +6100,20 @@ /* Nothing. */ /****************************************************************************/ static void -bce_rx_intr(struct bce_softc *sc) +bce_rx_intr(struct bce_softc *sc, u16 hw_rx_cons, int count) { struct ifnet *ifp = sc->bce_ifp; struct l2_fhdr *l2fhdr; + struct mbuf *m0; struct ether_vlan_header *vh; unsigned int pkt_len; - u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; + u16 sw_rx_cons, sw_rx_cons_idx; u32 status; #ifdef BCE_JUMBO_HDRSPLIT unsigned int rem_len; u16 sw_pg_cons, sw_pg_cons_idx; #endif + int i; DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); DBRUN(sc->rx_interrupts++); @@ -5904,20 +6122,17 @@ __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); /* Prepare the RX chain pages to be accessed by the host CPU. */ - for (int i = 0; i < RX_PAGES; i++) + for (i = 0; i < RX_PAGES; i++) bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); #ifdef BCE_JUMBO_HDRSPLIT /* Prepare the page chain pages to be accessed by the host CPU. */ - for (int i = 0; i < PG_PAGES; i++) + for (i = 0; i < PG_PAGES; i++) bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD); #endif - /* Get the hardware's view of the RX consumer index. */ - hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); - /* Get working copies of the driver's view of the consumer indices. */ sw_rx_cons = sc->rx_cons; #ifdef BCE_JUMBO_HDRSPLIT @@ -5930,11 +6145,7 @@ DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); /* Scan through the receive chain as long as there is work to do */ - /* ToDo: Consider setting a limit on the number of packets processed. */ - rmb(); - while (sw_rx_cons != hw_rx_cons) { - struct mbuf *m0; - + while (sw_rx_cons != hw_rx_cons && count != 0) { /* Convert the producer/consumer indices to an actual rx_bd index. */ sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); @@ -6201,10 +6412,9 @@ sw_pg_cons = sc->pg_cons; #endif } - - /* Refresh hw_cons to see if there's new work */ - if (sw_rx_cons == hw_rx_cons) - hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); + count--; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; } /* No new packets to process. Refill the RX and page chains and exit. */ @@ -6216,17 +6426,6 @@ sc->rx_cons = sw_rx_cons; bce_fill_rx_chain(sc); - /* Prepare the page chain pages to be accessed by the NIC. */ - for (int i = 0; i < RX_PAGES; i++) - bus_dmamap_sync(sc->rx_bd_chain_tag, - sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); - -#ifdef BCE_JUMBO_HDRSPLIT - for (int i = 0; i < PG_PAGES; i++) - bus_dmamap_sync(sc->pg_bd_chain_tag, - sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); -#endif - DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, " "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); @@ -6246,7 +6445,6 @@ { u16 hw_cons; - mb(); hw_cons = sc->status_block->status_tx_quick_consumer_index0; if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) hw_cons++; @@ -6262,10 +6460,11 @@ /* Nothing. */ /****************************************************************************/ static void -bce_tx_intr(struct bce_softc *sc) +bce_tx_intr(struct bce_softc *sc, u16 hw_tx_cons) { struct ifnet *ifp = sc->bce_ifp; - u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; + u16 sw_tx_cons, sw_tx_chain_cons; + int i; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); DBRUN(sc->tx_interrupts++); @@ -6275,14 +6474,10 @@ BCE_LOCK_ASSERT(sc); - /* Get the hardware's view of the TX consumer index. */ - hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); + for (i = 0; i < TX_PAGES; i++) + bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i], + BUS_DMASYNC_POSTWRITE); sw_tx_cons = sc->tx_cons; - - /* Prevent speculative reads from getting ahead of the status block. */ - bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, - BUS_SPACE_BARRIER_READ); - /* Cycle through any completed TX chain page entries. */ while (sw_tx_cons != hw_tx_cons) { #ifdef BCE_DEBUG @@ -6329,6 +6524,9 @@ BCE_PRINTF("%s(): Unloading map/freeing mbuf " "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons)); + bus_dmamap_sync(sc->tx_mbuf_tag, + sc->tx_mbuf_map[sw_tx_chain_cons], + BUS_DMASYNC_POSTWRITE); /* Unmap the mbuf. */ bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[sw_tx_chain_cons]); @@ -6343,17 +6541,12 @@ sc->used_tx_bd--; sw_tx_cons = NEXT_TX_BD(sw_tx_cons); - - /* Refresh hw_cons to see if there's new work. */ - hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); - - /* Prevent speculative reads from getting ahead of the status block. */ - bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, - BUS_SPACE_BARRIER_READ); } + sc->tx_cons = sw_tx_cons; /* Clear the TX timeout timer. */ - sc->watchdog_timer = 0; + if (sc->used_tx_bd == 0) + sc->watchdog_timer = 0; /* Clear the tx hardware queue full flag. */ if (sc->used_tx_bd < sc->max_tx_bd) { @@ -6364,8 +6557,6 @@ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } - sc->tx_cons = sw_tx_cons; - DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, " "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); @@ -6403,10 +6594,6 @@ DBENTER(BCE_VERBOSE_INTR); REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, - BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | - BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); - - REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); /* Force an immediate interrupt (whether there is new data or not). */ @@ -6485,7 +6672,7 @@ /* Program the mtu, enabling jumbo frame support if necessary. */ if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, - min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | + min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU_VLAN) | BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); else REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); @@ -6513,6 +6700,10 @@ /* Init TX buffer descriptor chain. */ bce_init_tx_chain(sc); + /* Ack all interrupts. */ + REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, + BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | + BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); /* Enable host interrupts. */ bce_enable_intr(sc, 1); @@ -6707,7 +6898,7 @@ bus_dmamap_t map; struct tx_bd *txbd = NULL; struct mbuf *m0; - u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0; + u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0, si; u32 prod_bseq; #ifdef BCE_DEBUG @@ -6746,7 +6937,7 @@ /* Map the mbuf into DMAable memory. */ prod = sc->tx_prod; - chain_prod = TX_CHAIN_IDX(prod); + si = chain_prod = TX_CHAIN_IDX(prod); map = sc->tx_mbuf_map[chain_prod]; /* Map the mbuf into our DMA address space. */ @@ -6782,9 +6973,11 @@ goto bce_tx_encap_exit; } else if (error != 0) { /* Still can't map the mbuf, release it and return an error. */ +#ifdef BCE_DEBUG BCE_PRINTF( "%s(%d): Unknown error mapping mbuf into TX chain!\n", __FILE__, __LINE__); +#endif m_freem(m0); *m_head = NULL; sc->dma_map_addr_tx_failed_count++; @@ -6810,6 +7003,7 @@ rc = ENOBUFS; goto bce_tx_encap_exit; } + bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); /* prod points to an empty tx_bd at this point. */ prod_bseq = sc->tx_prod_bseq; @@ -6865,6 +7059,9 @@ * have been freed. */ sc->tx_mbuf_ptr[chain_prod] = m0; + /* Swap DMA maps. */ + sc->tx_mbuf_map[si] = sc->tx_mbuf_map[chain_prod]; + sc->tx_mbuf_map[chain_prod] = map; sc->used_tx_bd += nsegs; /* Update some debug statistic counters */ @@ -6902,7 +7099,7 @@ { struct bce_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; - int count = 0; + int count = 0, i; u16 tx_prod, tx_chain_prod; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); @@ -6919,8 +7116,10 @@ __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); /* If there's no link or the transmit queue is empty then just exit. */ - if (!sc->bce_link) { - DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", + if (sc->bce_link == 0 || + (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) { + DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or TX busy.\n", __FUNCTION__); goto bce_start_locked_exit; } @@ -6972,7 +7171,9 @@ __FUNCTION__); goto bce_start_locked_exit; } - + for (i = 0; i < TX_PAGES; i++) + bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i], + BUS_DMASYNC_PREWRITE); DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into send queue.\n", __FUNCTION__, count); @@ -7271,118 +7472,103 @@ { struct bce_softc *sc; struct ifnet *ifp; - u32 status_attn_bits; - u16 hw_rx_cons, hw_tx_cons; + u32 status_attn_bits, status_attn_bits_ack; + u32 new_link_state, old_link_state; + u16 hw_rx_cons, hw_tx_cons, status_idx; sc = xsc; ifp = sc->bce_ifp; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); - DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); BCE_LOCK(sc); DBRUN(sc->interrupts_generated++); - - /* Synchnorize before we read from interface's status block */ - bus_dmamap_sync(sc->status_tag, sc->status_map, - BUS_DMASYNC_POSTREAD); - /* - * If the hardware status block index - * matches the last value read by the - * driver and we haven't asserted our - * interrupt then there's nothing to do. + * For INTx case, disable interrupts to ensure status + * block updates but don't ack interrupts. For MSI case, + * it has no harm to do it here as driver also should + * disable interrupts unless BCE_HC_CONFIG_ONE_SHOT is + * used. */ - if ((sc->status_block->status_idx == sc->last_status_idx) && - (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { - DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n", - __FUNCTION__); - goto bce_intr_exit; - } - - /* Ack the interrupt and stop others from occuring. */ - REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, - BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | - BCE_PCICFG_INT_ACK_CMD_MASK_INT); - - /* Check if the hardware has finished any work. */ - hw_rx_cons = bce_get_hw_rx_cons(sc); - hw_tx_cons = bce_get_hw_tx_cons(sc); - - /* Keep processing data as long as there is work to do. */ - for (;;) { - + REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); + while ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { + /* Synchronize before we read from interface's status block */ + bus_dmamap_sync(sc->status_tag, sc->status_map, + BUS_DMASYNC_POSTREAD); + DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); + status_idx = sc->status_block->status_idx; status_attn_bits = sc->status_block->status_attn_bits; + status_attn_bits_ack = sc->status_block->status_attn_bits_ack; + hw_rx_cons = bce_get_hw_rx_cons(sc); + hw_tx_cons = bce_get_hw_tx_cons(sc); + bus_dmamap_sync(sc->status_tag, sc->status_map, + BUS_DMASYNC_PREREAD); + /* + * If the hardware status block index + * matches the last value read by the + * driver and we haven't asserted our + * interrupt then there's nothing to do. + */ + if (status_idx == sc->last_status_idx && + (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & + BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { + DBPRINT(sc, BCE_VERBOSE_INTR, + "%s(): Spurious interrupt.\n", __FUNCTION__); + break; + } + sc->last_status_idx = status_idx; - DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), - BCE_PRINTF("Simulating unexpected status attention bit set."); - sc->unexpected_attention_sim_count++; - status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR); + DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), + BCE_PRINTF( + "Simulating unexpected status attention bit set."); + sc->unexpected_attention_sim_count++; + status_attn_bits = status_attn_bits | + STATUS_ATTN_BITS_PARITY_ERROR); + new_link_state = status_attn_bits & STATUS_ATTN_BITS_LINK_STATE; + old_link_state = status_attn_bits_ack & + STATUS_ATTN_BITS_LINK_STATE; /* Was it a link change interrupt? */ - if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != - (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { - bce_phy_intr(sc); - - /* Clear any transient status updates during link state change. */ - REG_WR(sc, BCE_HC_COMMAND, - sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); + if (new_link_state != old_link_state) { + bce_phy_intr(sc, new_link_state, old_link_state); + /* + * Clear any transient status updates during link + * state change. + */ + REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | + BCE_HC_COMMAND_COAL_NOW_WO_INT); REG_RD(sc, BCE_HC_COMMAND); } /* If any other attention is asserted then the chip is toast. */ if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != - (sc->status_block->status_attn_bits_ack & - ~STATUS_ATTN_BITS_LINK_STATE))) { - - sc->unexpected_attention_count++; - + (status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE))) { BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n", - __FILE__, __LINE__, sc->status_block->status_attn_bits); - + __FILE__, __LINE__, + sc->status_block->status_attn_bits); DBRUNMSG(BCE_FATAL, - if (unexpected_attention_sim_control == 0) - bce_breakpoint(sc)); - + if (unexpected_attention_sim_control == 0) + bce_breakpoint(sc)); + sc->unexpected_attention_count++; bce_init_locked(sc); goto bce_intr_exit; } + if (hw_rx_cons != sc->rx_cons) + bce_rx_intr(sc, hw_rx_cons, -1); + if (hw_tx_cons != sc->tx_cons) + bce_tx_intr(sc, hw_tx_cons); + break; + } - /* Check for any completed RX frames. */ - if (hw_rx_cons != sc->hw_rx_cons) - bce_rx_intr(sc); - - /* Check for any completed TX frames. */ - if (hw_tx_cons != sc->hw_tx_cons) - bce_tx_intr(sc); - - /* Save the status block index value for use during the next interrupt. */ - sc->last_status_idx = sc->status_block->status_idx; - - /* Prevent speculative reads from getting ahead of the status block. */ - bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, - BUS_SPACE_BARRIER_READ); - - /* If there's no work left then exit the interrupt service routine. */ - hw_rx_cons = bce_get_hw_rx_cons(sc); - hw_tx_cons = bce_get_hw_tx_cons(sc); - - if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons)) - break; - + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + bce_start_locked(ifp); } - - bus_dmamap_sync(sc->status_tag, sc->status_map, - BUS_DMASYNC_PREREAD); - - /* Re-enable interrupts. */ - bce_enable_intr(sc, 0); - - /* Handle any frames that arrived while handling the interrupt. */ - if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - bce_start_locked(ifp); - + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + /* Re-enable interrupts. */ + bce_enable_intr(sc, 0); + } bce_intr_exit: BCE_UNLOCK(sc); @@ -7469,12 +7655,16 @@ sc->rx_mode = rx_mode; REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); + REG_BARRIER(sc, BCE_EMAC_RX_MODE); } /* Disable and clear the exisitng sort before enabling a new sort. */ REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); + REG_BARRIER(sc, BCE_RPM_SORT_USER0); REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); + REG_BARRIER(sc, BCE_RPM_SORT_USER0); REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); + REG_BARRIER(sc, BCE_RPM_SORT_USER0); DBEXIT(BCE_VERBOSE_MISC); } @@ -8145,6 +8335,64 @@ ctx = device_get_sysctl_ctx(sc->bce_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_quick_cons_trip_int", + CTLFLAG_RW, &sc->bce_tx_quick_cons_trip_int, 0, + "TX Quick BD chain entries before status block update during " + "an interrupt"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "tx_quick_cons_trip_int", + &sc->bce_tx_quick_cons_trip_int); + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_quick_cons_trip", + CTLFLAG_RW, &sc->bce_tx_quick_cons_trip, 0, + "TX Quick BD chain entries before status block update"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "tx_quick_cons_trip", + &sc->bce_tx_quick_cons_trip); + + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_ticks_int", + CTLFLAG_RW, &sc->bce_tx_ticks_int, 0, + "Number of 1us ticks that should be counted during an interrupt " + "before status block update caused by TX activity"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "tx_ticks_int", + &sc->bce_tx_ticks_int); + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_ticks", + CTLFLAG_RW, &sc->bce_tx_ticks, 0, + "Number of 1us ticks that should be counted " + "before status block update caused by TX activity"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "tx_ticks", + &sc->bce_tx_ticks); + + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_quick_cons_trip_int", + CTLFLAG_RW, &sc->bce_rx_quick_cons_trip_int, 0, + "RX Quick BD chain entries before status block update during " + "an interrupt"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "rx_quick_cons_trip_int", + &sc->bce_rx_quick_cons_trip_int); + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_quick_cons_trip", + CTLFLAG_RW, &sc->bce_tx_quick_cons_trip, 0, + "RX Quick BD chain entries before status block update"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "rx_quick_cons_trip", + &sc->bce_rx_quick_cons_trip); + + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_ticks_int", + CTLFLAG_RW, &sc->bce_rx_ticks_int, 0, + "Number of 1us ticks that should be counted during an interrupt " + "before status block update caused by RX activity"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "rx_ticks_int", + &sc->bce_rx_ticks_int); + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_ticks", + CTLFLAG_RW, &sc->bce_rx_ticks, 0, + "Number of 1us ticks that should be counted " + "before status block update caused by RX activity"); + resource_int_value(device_get_name(sc->bce_dev), + device_get_unit(sc->bce_dev), "rx_ticks", + &sc->bce_rx_ticks); + #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "l2fhdr_error_sim_control", Index: if_bcereg.h =================================================================== --- if_bcereg.h (revision 204734) +++ if_bcereg.h (working copy) @@ -1093,10 +1093,12 @@ #define REG_WR(sc, offset, val) bce_reg_wr(sc, offset, val) #define REG_WR16(sc, offset, val) bce_reg_wr16(sc, offset, val) #define REG_RD(sc, offset) bce_reg_rd(sc, offset) +#define REG_BARRIER(sc, offset) bus_space_barrier(sc->bce_btag, sc->bce_bhandle, offset, 4, BUS_SPACE_BARRIER_WRITE) #else #define REG_WR(sc, offset, val) bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val) #define REG_WR16(sc, offset, val) bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val) #define REG_RD(sc, offset) bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset) +#define REG_BARRIER(sc, offset) bus_space_barrier(sc->bce_btag, sc->bce_bhandle, offset, 4, BUS_SPACE_BARRIER_WRITE) #endif #define REG_RD_IND(sc, offset) bce_reg_rd_ind(sc, offset) @@ -6346,11 +6348,12 @@ #define BCE_TX_TIMEOUT 5 #define BCE_MAX_SEGMENTS 32 -#define BCE_TSO_MAX_SIZE 65536 +#define BCE_TSO_MAX_SIZE (65535 + sizeof(struct ether_vlan_header)) #define BCE_TSO_MAX_SEG_SIZE 4096 #define BCE_DMA_ALIGN 8 #define BCE_DMA_BOUNDARY 0 +#define BCE_RX_BUF_ALIGN 16 /* The BCM5708 has a problem with addresses greater that 40bits. */ /* Handle the sizing issue in an architecture agnostic fashion. */ @@ -6437,6 +6440,7 @@ #define BCE_ONE_SHOT_MSI_FLAG 0x00000080 #define BCE_USING_MSIX_FLAG 0x00000100 #define BCE_PCIE_FLAG 0x00000200 +#define BCE_WOL_FLAG 0x00000400 /* Controller capability flags. */ u32 bce_cap_flags; @@ -6444,6 +6448,8 @@ #define BCE_MSIX_CAPABLE_FLAG 0x00000002 #define BCE_PCIE_CAPABLE_FLAG 0x00000004 #define BCE_PCIX_CAPABLE_FLAG 0x00000008 + int bce_pcixcap; + int bce_expcap; /* PHY specific flags. */ u32 bce_phy_flags; @@ -6491,21 +6497,21 @@ /* These setting are used by the host coalescing (HC) block to */ /* to control how often the status block, statistics block and */ /* interrupts are generated. */ - u16 bce_tx_quick_cons_trip_int; - u16 bce_tx_quick_cons_trip; - u16 bce_rx_quick_cons_trip_int; - u16 bce_rx_quick_cons_trip; - u16 bce_comp_prod_trip_int; - u16 bce_comp_prod_trip; - u16 bce_tx_ticks_int; - u16 bce_tx_ticks; - u16 bce_rx_ticks_int; - u16 bce_rx_ticks; - u16 bce_com_ticks_int; - u16 bce_com_ticks; - u16 bce_cmd_ticks_int; - u16 bce_cmd_ticks; - u32 bce_stats_ticks; + int bce_tx_quick_cons_trip_int; + int bce_tx_quick_cons_trip; + int bce_rx_quick_cons_trip_int; + int bce_rx_quick_cons_trip; + int bce_comp_prod_trip_int; + int bce_comp_prod_trip; + int bce_tx_ticks_int; + int bce_tx_ticks; + int bce_rx_ticks_int; + int bce_rx_ticks; + int bce_com_ticks_int; + int bce_com_ticks; + int bce_cmd_ticks_int; + int bce_cmd_ticks; + int bce_stats_ticks; /* The address of the integrated PHY on the MII bus. */ int bce_phy_addr;