diff -u -r old/if_nfe.c nfe/if_nfe.c --- old/if_nfe.c Thu Aug 10 02:22:52 2006 +++ nfe/if_nfe.c Thu Aug 10 20:17:49 2006 @@ -1,4 +1,4 @@ -/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ +/* $OpenBSD: if_nfe.c,v 1.54 2006/08/11 12:38:12 jsg Exp $ */ /*- * Copyright (c) 2006 Shigeaki Tagashira @@ -26,10 +26,11 @@ /* Uncomment the following line to enable polling. */ /* #define DEVICE_POLLING */ -#define NFE_NO_JUMBO +#define NFE_JUMBO #define NFE_CSUM -#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) -#define NVLAN 0 +#define NVLAN +//#define NFE_FLOW_CONTROL +//#define NFE_WOL #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" @@ -67,36 +68,23 @@ #include #include -#include -#include +#include <./if_nfereg.h> +#include <./if_nfevar.h> MODULE_DEPEND(nfe, pci, 1, 1, 1); MODULE_DEPEND(nfe, ether, 1, 1, 1); -MODULE_DEPEND(nfe, miibus, 1, 1, 1); -#include "miibus_if.h" static int nfe_probe (device_t); static int nfe_attach (device_t); static int nfe_detach (device_t); static void nfe_shutdown(device_t); -static int nfe_miibus_readreg (device_t, int, int); -static int nfe_miibus_writereg (device_t, int, int, int); -static void nfe_miibus_statchg (device_t); +static int nfe_mii_readreg (device_t, int, int); +static int nfe_mii_writereg (device_t, int, int, int); static int nfe_ioctl(struct ifnet *, u_long, caddr_t); static void nfe_intr(void *); -static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); -static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); -static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); -static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); -static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); -static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); static void nfe_rxeof(struct nfe_softc *); static void nfe_txeof(struct nfe_softc *); static int nfe_encap(struct nfe_softc *, struct mbuf *); -static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); -static void nfe_jfree(void *, void *); -static int nfe_jpool_alloc(struct nfe_softc *); -static void nfe_jpool_free(struct nfe_softc *); static void nfe_setmulti(struct nfe_softc *); static void nfe_start(struct ifnet *); static void nfe_start_locked(struct ifnet *); @@ -110,11 +98,6 @@ static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); -static int nfe_ifmedia_upd(struct ifnet *); -static int nfe_ifmedia_upd_locked(struct ifnet *); -static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); -static void nfe_tick(void *); -static void nfe_tick_locked(struct nfe_softc *); static void nfe_get_macaddr(struct nfe_softc *, u_char *); static void nfe_set_macaddr(struct nfe_softc *, u_char *); static void nfe_dma_map_segs (void *, bus_dma_segment_t *, int, int); @@ -145,16 +128,6 @@ DEVMETHOD(device_attach, nfe_attach), DEVMETHOD(device_detach, nfe_detach), DEVMETHOD(device_shutdown, nfe_shutdown), - - /* bus interface */ - DEVMETHOD(bus_print_child, bus_generic_print_child), - DEVMETHOD(bus_driver_added, bus_generic_driver_added), - - /* MII interface */ - DEVMETHOD(miibus_readreg, nfe_miibus_readreg), - DEVMETHOD(miibus_writereg, nfe_miibus_writereg), - DEVMETHOD(miibus_statchg, nfe_miibus_statchg), - { 0, 0 } }; @@ -167,7 +140,6 @@ static devclass_t nfe_devclass; DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); -DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); static struct nfe_type nfe_devs[] = { {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, @@ -200,6 +172,22 @@ "NVIDIA nForce MCP55 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, "NVIDIA nForce MCP55 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, + "NVIDIA nForce MCP61 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, + "NVIDIA nForce MCP61 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, + "NVIDIA nForce MCP61 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, + "NVIDIA nForce MCP61 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, + "NVIDIA nForce MCP65 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, + "NVIDIA nForce MCP65 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, + "NVIDIA nForce MCP65 Networking Adapter"}, + {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, + "NVIDIA nForce MCP65 Networking Adapter"}, {0, 0, NULL} }; @@ -224,12 +212,417 @@ return (ENXIO); } +static void +nv_update_pause(struct nfe_softc *sc, u_int32_t pause_flags) +{ + + u_int32_t pff,regmisc; + + if (sc->nfe_flags & NFE_PAUSEFRAME_RX) { + pff = NFE_READ(sc,NFE_RXFILTER) & ~NFE_PFF_PAUSE_RX; + + if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { + NFE_WRITE(sc,NFE_RXFILTER,pff|NFE_PFF_PAUSE_RX); + + } else { + NFE_WRITE(sc,NFE_RXFILTER,pff); + } + } + if (sc->nfe_flags & NFE_PAUSEFRAME_TX) { + regmisc = NFE_READ(sc, NFE_MISC1) & ~NFE_MISC1_PAUSE_TX; + + if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { + NFE_WRITE(sc,NFE_TxPauseFrame,NFE_TX_PAUSEFRAME_ENABLE); + NFE_WRITE(sc,NFE_MISC1,regmisc|NFE_MISC1_PAUSE_TX); + } else { + NFE_WRITE(sc,NFE_TxPauseFrame,NFE_TX_PAUSEFRAME_DISABLE); + NFE_WRITE(sc,NFE_MISC1,regmisc); + } + } +} + +static void +nfe_check_for_link(struct nfe_softc *sc) +{ + u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; + u_int32_t mii_status,lpa=0,adv_lpa=0,adv=0,control_1000,status_1000; + u_int32_t adv_pause,lpa_pause; + u_int32_t pause_flags=0; + device_t dev; + + dev = sc->nfe_dev; + /* BMSR_LSTATUS is latched, read it twice: + * we want the current value. + */ + nfe_mii_readreg(dev, sc->mii_phyaddr, MII_BMSR); + mii_status = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_BMSR); + + if (!(mii_status & BMSR_LINK)) { + DPRINTFN(2,("nfe%d: no link detected by phy falling back to 10HD.\n",sc->nfe_unit)); + sc->link_speed = 10; + sc->link_duplex = 0; + sc->nfe_link=0; + goto set_speed; + } + sc->nfe_link=1; + /* check auto negotiation is complete */ + if (!(mii_status & BMSR_ACOMP)) { + /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ + sc->link_speed = 10; + sc->link_duplex = 0; + DPRINTFN(2,("nfe%d: autoneg not completed - falling back to 10HD\n",sc->nfe_unit)); + goto set_speed; + } + + adv = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_ANAR); + lpa = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_ANLPAR); + + /* fix me: if it is not giga lan */ + control_1000 = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_100T2CR); + status_1000 = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_100T2SR); + + if ((control_1000 & GTCR_ADV_1000TFDX) && + (status_1000 & GTSR_LP_1000TFDX)) { + DPRINTFN(2,("nfe%d: nv_update_linkspeed: GBit ethernet detected.\n", + sc->nfe_unit)); + sc->link_speed = 1000; + sc->link_duplex = 1; + + goto set_speed; + } + + adv_lpa = lpa & adv; + if (adv_lpa & ANLPAR_TX_FD) { + sc->link_speed = 100; + sc->link_duplex = 1; + } else if (adv_lpa & ANLPAR_TX) { + sc->link_speed = 100; + sc->link_duplex = 0; + } else if (adv_lpa & ANLPAR_10_FD) { + sc->link_speed = 10; + sc->link_duplex = 1; + } else if (adv_lpa & ANLPAR_10_FD) { + sc->link_speed = 10; + sc->link_duplex = 0; + } else { + sc->link_speed = 10; + sc->link_duplex = 0; + DPRINTFN(2,("nfe%d: bad ability %04x - falling back to 10HD.\n", sc->nfe_unit, adv_lpa)); + } + +set_speed: + + phy = NFE_READ(sc, NFE_PHY_IFACE); + phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); + + seed = NFE_READ(sc, NFE_RNDSEED); + seed &= ~NFE_SEED_MASK; + + if (sc->link_duplex == 0) { + phy |= NFE_PHY_HDX; /* half-duplex */ + misc |= NFE_MISC1_HDX; + } + + switch (sc->link_speed) { + case 1000: + link |= NFE_MEDIA_1000T; + seed |= NFE_SEED_1000T; + phy |= NFE_PHY_1000T; + break; + case 100: + link |= NFE_MEDIA_100TX; + seed |= NFE_SEED_100TX; + phy |= NFE_PHY_100TX; + break; + case 10: + link |= NFE_MEDIA_10T; + seed |= NFE_SEED_10T; + break; + } + + NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ + + NFE_WRITE(sc, NFE_PHY_IFACE, phy); + NFE_WRITE(sc, NFE_MISC1, misc); + NFE_WRITE(sc, NFE_LINKSPEED, link); + + if(sc->nfe_flags & (NFE_PAUSEFRAME_RX|NFE_PAUSEFRAME_TX)){ + if(sc->link_duplex==1){ + + adv_pause = adv & (ANAR_FC| ADVERTISE_PAUSE_ASYM); + lpa_pause = lpa & (ANLPAR_FC| LPA_PAUSE_ASYM); + + switch (adv_pause) { + case (ANAR_FC): + if (lpa_pause & ANLPAR_FC) { + pause_flags = NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE; + } + break; + case (ADVERTISE_PAUSE_ASYM): + if (lpa_pause == (ANLPAR_FC| LPA_PAUSE_ASYM)) + { + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + break; + case (ANAR_FC| ADVERTISE_PAUSE_ASYM): + if (lpa_pause & ANLPAR_FC) + { + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + if (lpa_pause == LPA_PAUSE_ASYM) + { + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + } + break; + } + }else + pause_flags = 0; + + nv_update_pause(sc,pause_flags); + } +} + +static int +nfe_media_change (struct ifnet *ifp) +{ + struct nfe_softc *sc = ifp->if_softc; + struct ifmedia *ifm; + u_int32_t bmcr,adv; + int autoneg=0,speed=0,duplex=0; + + ifm = &sc->nfe_media ; + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return(EINVAL); + + bmcr = nfe_mii_readreg(sc->nfe_dev, sc->mii_phyaddr, MII_BMCR); + adv = nfe_mii_readreg(sc->nfe_dev, sc->mii_phyaddr, MII_ANAR); + adv &= ~(ANAR_TX_FD|ANAR_TX|ANAR_10_FD|ANAR_10) ; + + switch (IFM_SUBTYPE(ifm->ifm_media)){ + case IFM_AUTO: + adv = adv|ANAR_TX_FD|ANAR_TX|ANAR_10_FD|ANAR_10; + autoneg = 1; + break; + case IFM_1000_T: + /* Note: autonegotiation disable, speed 1000 intentionally forbidden - noone should need that. */ + return(EINVAL); + break; + case IFM_100_TX: + speed = 100; + if(ifm->ifm_media & IFM_FDX){ + adv |= ANAR_TX_FD; + duplex = 1; + }else{ + adv |= ANAR_TX; + } + + break; + case IFM_10_T: + speed = 100; + if(ifm->ifm_media & IFM_FDX){ + adv |= ANAR_10_FD; + duplex = 1; + }else{ + adv |= ANAR_10; + } + break; + default: + printf("nfe%d: Unsupported media type\n", sc->nfe_unit); + return 0; + } + + nfe_mii_writereg(sc->nfe_dev, sc->mii_phyaddr,MII_ANAR , adv); + + if(autoneg){ + bmcr |= (BMCR_AUTOEN | BMCR_STARTNEG); + }else{ + bmcr &= ~(BMCR_AUTOEN |BMCR_S100|BMCR_S1000|BMCR_FDX); + if(speed==100){ + bmcr |= BMCR_S100; + } + if(duplex==1) + bmcr |= BMCR_FDX; + + } + + nfe_mii_writereg(sc->nfe_dev, sc->mii_phyaddr,MII_BMCR,bmcr); + return 0; +} + +static void +nfe_media_status (struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct nfe_softc *sc = ifp->if_softc;; + + nfe_check_for_link(sc); + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!sc->nfe_link) + return; + + ifmr->ifm_status |= IFM_ACTIVE; + + switch (sc->link_speed) { + case 10: + ifmr->ifm_active |= IFM_10_T; + break; + case 100: + ifmr->ifm_active |= IFM_100_TX; + break; + case 1000: + ifmr->ifm_active |= IFM_1000_T; + break; + } + if (sc->link_duplex == 1) + ifmr->ifm_active |= IFM_FDX; + else + ifmr->ifm_active |= IFM_HDX; +} + + +static int + nfe_mii_init(device_t dev) +{ + u_int32_t reg,phy_reserved,phyinterface,mii_status ; + u_int32_t val=0; + struct nfe_softc *sc = device_get_softc(dev); + + /* phy errata for E3016 phy */ + if (sc->phy_model == PHY_MODEL_MARVELL_E3016) { + reg = nfe_mii_readreg(dev,sc->mii_phyaddr,MII_NCONFIG); + reg &= ~PHY_MARVELL_E3016_INITMASK; + if(nfe_mii_writereg(dev,sc->mii_phyaddr,MII_NCONFIG,reg)){ + printf("nfe%d: phy write to errata reg failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + } + + val = ANAR_TX_FD|ANAR_TX|ANAR_10_FD|ANAR_10|ANAR_CSMA; + if(sc->nfe_flags & (NFE_PAUSEFRAME_RX|NFE_PAUSEFRAME_TX)) + val |= ANAR_FC|ADVERTISE_PAUSE_ASYM; + + nfe_mii_writereg(sc->nfe_dev,sc->mii_phyaddr,MII_ANAR,val); + + /* get phy interface type */ + phyinterface = NFE_READ(sc,NFE_PHY_IFACE); + mii_status = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_BMSR); + if (mii_status & PHY_GIGABIT) { + sc->gigabit = PHY_GIGABIT; + nfe_mii_writereg(sc->nfe_dev,sc->mii_phyaddr,MII_100T2CR,GTCR_ADV_1000TFDX|GTCR_ADV_1000THDX); + }else + sc->gigabit = PHY_GIGABIT; + + nfe_mii_writereg(sc->nfe_dev,sc->mii_phyaddr,MII_BMCR,mii_status|BMCR_RESET); + + /* phy vendor specific configuration */ + if ((sc->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { + phy_reserved = nfe_mii_readreg(dev, sc->mii_phyaddr , MII_RESV1); + phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); + phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); + if(nfe_mii_writereg(dev,sc->mii_phyaddr,MII_RESV1,phy_reserved)){ + printf("nfe%d: phy init failed.\n",sc->nfe_unit); + return PHY_ERROR; + } + phy_reserved = nfe_mii_readreg(dev,sc->mii_phyaddr,MII_NCONFIG); + phy_reserved |= PHY_CICADA_INIT5; + if (nfe_mii_writereg(dev,sc->mii_phyaddr, MII_NCONFIG, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + } + if (sc->phy_oui == PHY_OUI_CICADA) { + phy_reserved = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_SREVISION); + phy_reserved |= PHY_CICADA_INIT6; + if (nfe_mii_writereg(dev, sc->mii_phyaddr, MII_SREVISION, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + } + if (sc->phy_oui == PHY_OUI_VITESSE) { + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { + printf("%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + phy_reserved = nfe_mii_readreg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG4); + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + phy_reserved = nfe_mii_readreg(dev,sc->mii_phyaddr,PHY_VITESSE_INIT_REG3); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + phy_reserved = nfe_mii_readreg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG4); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + phy_reserved = nfe_mii_readreg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG3); + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + phy_reserved = nfe_mii_readreg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG4); + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + phy_reserved = nfe_mii_readreg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG3); + phy_reserved &= ~PHY_VITESSE_INIT_MSK2; + phy_reserved |= PHY_VITESSE_INIT8; + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + if (nfe_mii_writereg(dev,sc->mii_phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { + printf("nfe%d: phy init failed.\n", sc->nfe_unit); + return PHY_ERROR; + } + } + + /* restart auto negotiation */ + //mii_status = nfe_mii_readreg(dev, sc->mii_phyaddr, MII_BMSR); + nfe_mii_writereg(sc->nfe_dev,sc->mii_phyaddr,MII_BMCR,BMCR_AUTOEN|BMCR_STARTNEG); +} + static int nfe_attach(device_t dev) { struct nfe_softc *sc; struct ifnet *ifp; - int unit, error = 0, rid; + int unit, error = 0, rid,i; sc = device_get_softc(dev); unit = device_get_unit(dev); @@ -240,7 +633,6 @@ MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); - pci_enable_busmaster(dev); rid = NV_RID; @@ -274,32 +666,40 @@ switch (pci_get_device(dev)) { case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: + sc->nfe_flags |= NFE_HW_CSUM; + break; case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; break; - case PCI_PRODUCT_NVIDIA_MCP51_LAN1: - case PCI_PRODUCT_NVIDIA_MCP51_LAN2: - sc->nfe_flags |= NFE_40BIT_ADDR; - break; case PCI_PRODUCT_NVIDIA_CK804_LAN1: case PCI_PRODUCT_NVIDIA_CK804_LAN2: case PCI_PRODUCT_NVIDIA_MCP04_LAN1: case PCI_PRODUCT_NVIDIA_MCP04_LAN2: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; break; + case PCI_PRODUCT_NVIDIA_MCP51_LAN1: + case PCI_PRODUCT_NVIDIA_MCP51_LAN2: + sc->nfe_flags |= NFE_40BIT_ADDR; + break; case PCI_PRODUCT_NVIDIA_MCP55_LAN1: case PCI_PRODUCT_NVIDIA_MCP55_LAN2: - sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN; + sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN|NFE_PAUSEFRAME_TX; + break; + case PCI_PRODUCT_NVIDIA_MCP61_LAN1: + case PCI_PRODUCT_NVIDIA_MCP61_LAN2: + case PCI_PRODUCT_NVIDIA_MCP61_LAN3: + case PCI_PRODUCT_NVIDIA_MCP61_LAN4: + sc->nfe_flags |= NFE_40BIT_ADDR|NFE_PAUSEFRAME_TX; + break; + case PCI_PRODUCT_NVIDIA_MCP65_LAN1: + case PCI_PRODUCT_NVIDIA_MCP65_LAN2: + case PCI_PRODUCT_NVIDIA_MCP65_LAN3: + case PCI_PRODUCT_NVIDIA_MCP65_LAN4: + sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM|NFE_PAUSEFRAME_TX; break; } -#ifndef NFE_NO_JUMBO - /* enable jumbo frames for adapters that support it */ - if (sc->nfe_flags & NFE_JUMBO_SUP) - sc->nfe_flags |= NFE_USE_JUMBO; -#endif - /* * Allocate the parent bus DMA tag appropriate for PCI. */ @@ -316,7 +716,16 @@ &sc->nfe_parent_tag); if (error) goto fail; - + + ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + printf("nfe%d: can not if_alloc()\n", unit); + error = ENOSPC; + goto fail; + } + + sc->nfe_mtu = ifp->if_mtu = ETHERMTU; + /* * Allocate Tx and Rx rings. */ @@ -333,48 +742,51 @@ goto fail; } - ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) { - printf("nfe%d: can not if_alloc()\n", unit); - error = ENOSPC; - goto fail; - } - ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); - ifp->if_mtu = ETHERMTU; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = nfe_ioctl; ifp->if_start = nfe_start; - /* ifp->if_hwassist = NFE_CSUM_FEATURES; */ + ifp->if_watchdog = nfe_watchdog; ifp->if_init = nfe_init; ifp->if_baudrate = IF_Gbps(1); ifp->if_snd.ifq_maxlen = NFE_IFQ_MAXLEN; ifp->if_capabilities = IFCAP_VLAN_MTU; -#if NVLAN > 0 + +#ifndef NFE_JUMBO + sc->nfe_flags &= ~NFE_JUMBO_SUP; +#endif + +#ifdef NVLAN if (sc->nfe_flags & NFE_HW_VLAN) ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; +#else + sc->nfe_flags &= ~NFE_HW_VLAN; #endif + #ifdef NFE_CSUM if (sc->nfe_flags & NFE_HW_CSUM) { ifp->if_capabilities |= IFCAP_HWCSUM; + ifp->if_capenable |= IFCAP_HWCSUM; + ifp->if_hwassist = NFE_CSUM_FEATURES; } +#else + sc->nfe_flags &= ~NFE_HW_CSUM; #endif + +#ifndef NFE_FLOW_CONTROL + sc->nfe_flags &= ~(NFE_PAUSEFRAME_RX|NFE_PAUSEFRAME_TX); +#endif + ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif - /* Do MII setup */ - if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, nfe_ifmedia_sts)) { - printf("nfe%d: MII without any phy!\n", unit); - error = ENXIO; - goto fail; - } - ether_ifattach(ifp, sc->eaddr); error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET|INTR_MPSAFE, @@ -386,6 +798,47 @@ goto fail; } + /* find a suitable phy */ + for (i = 1; i <= 32; i++) { + int id1, id2; + int phyaddr = i & 0x1F; + + id1 = nfe_mii_readreg(sc->nfe_dev, phyaddr, MII_PHYIDR1); + if (id1 < 0 || id1 == 0xffff) + continue; + id2 = nfe_mii_readreg(sc->nfe_dev, phyaddr, MII_PHYIDR2); + if (id2 < 0 || id2 == 0xffff) + continue; + sc->phy_model = id2 & PHYID2_MODEL_MASK; + id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; + id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; + sc->mii_phyaddr = phyaddr; + sc->phy_oui = id1 | id2; + break; + } + if (i == 33) { + printf("nfe%d: open: Could not find a valid PHY.\n", + sc->nfe_unit); + } + + /* Do MII interface setup */ + ifmedia_init(&sc->nfe_media, IFM_IMASK, nfe_media_change, + nfe_media_status); + ifmedia_add(&sc->nfe_media, IFM_ETHER | IFM_10_T, 0, NULL); + ifmedia_add(&sc->nfe_media, IFM_ETHER | IFM_10_T | IFM_FDX, + 0, NULL); + ifmedia_add(&sc->nfe_media, IFM_ETHER | IFM_100_TX, + 0, NULL); + ifmedia_add(&sc->nfe_media, IFM_ETHER | IFM_100_TX | IFM_FDX, + 0, NULL); + ifmedia_add(&sc->nfe_media, IFM_ETHER | IFM_1000_T | IFM_FDX, + 0, NULL); + ifmedia_add(&sc->nfe_media, IFM_ETHER | IFM_1000_T, 0, NULL); + + ifmedia_add(&sc->nfe_media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&sc->nfe_media, IFM_ETHER | IFM_AUTO); + + nfe_mii_init(dev); fail: if (error) nfe_detach(dev); @@ -393,6 +846,21 @@ return (error); } +#ifdef NFE_WOL +static void +nfe_start_rx(struct nfe_softc *sc) +{ + + /* Already running? Stop it. */ + if(NFE_READ(sc,NFE_RX_CTL)==NFE_RX_START){ + NFE_WRITE(sc,NFE_RX_CTL,0); + } + NFE_WRITE(sc, NFE_LINKSPEED, sc->link_speed); + + NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); + +} +#endif static int nfe_detach(device_t dev) @@ -401,7 +869,7 @@ struct ifnet *ifp; u_char eaddr[ETHER_ADDR_LEN]; int i; - + sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); ifp = sc->nfe_ifp; @@ -417,7 +885,9 @@ nfe_set_macaddr(sc, eaddr); if (device_is_attached(dev)) { + NFE_LOCK(sc); nfe_stop(ifp, 1); + NFE_UNLOCK(sc); ifp->if_flags &= ~IFF_UP; callout_drain(&sc->nfe_stat_ch); ether_ifdetach(ifp); @@ -425,76 +895,29 @@ if (ifp) if_free(ifp); - if (sc->nfe_miibus) - device_delete_child(dev, sc->nfe_miibus); bus_generic_detach(dev); +#ifdef NFE_WOL + nfe_start_rx(sc); +#endif + if (sc->nfe_intrhand) bus_teardown_intr(dev, sc->nfe_irq, sc->nfe_intrhand); if (sc->nfe_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq); if (sc->nfe_res) bus_release_resource(dev, SYS_RES_MEMORY, NV_RID, sc->nfe_res); - nfe_free_tx_ring(sc, &sc->txq); nfe_free_rx_ring(sc, &sc->rxq); - if (sc->nfe_parent_tag) bus_dma_tag_destroy(sc->nfe_parent_tag); - mtx_destroy(&sc->nfe_mtx); return (0); } - -static void -nfe_miibus_statchg(device_t dev) -{ - struct nfe_softc *sc; - struct mii_data *mii; - u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; - - sc = device_get_softc(dev); - mii = device_get_softc(sc->nfe_miibus); - - phy = NFE_READ(sc, NFE_PHY_IFACE); - phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); - - seed = NFE_READ(sc, NFE_RNDSEED); - seed &= ~NFE_SEED_MASK; - - if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { - phy |= NFE_PHY_HDX; /* half-duplex */ - misc |= NFE_MISC1_HDX; - } - - switch (IFM_SUBTYPE(mii->mii_media_active)) { - case IFM_1000_T: /* full-duplex only */ - link |= NFE_MEDIA_1000T; - seed |= NFE_SEED_1000T; - phy |= NFE_PHY_1000T; - break; - case IFM_100_TX: - link |= NFE_MEDIA_100TX; - seed |= NFE_SEED_100TX; - phy |= NFE_PHY_100TX; - break; - case IFM_10_T: - link |= NFE_MEDIA_10T; - seed |= NFE_SEED_10T; - break; - } - - NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ - - NFE_WRITE(sc, NFE_PHY_IFACE, phy); - NFE_WRITE(sc, NFE_MISC1, misc); - NFE_WRITE(sc, NFE_LINKSPEED, link); -} - static int -nfe_miibus_readreg(device_t dev, int phy, int reg) +nfe_mii_readreg(device_t dev, int phy, int reg) { struct nfe_softc *sc = device_get_softc(dev); u_int32_t val; @@ -514,27 +937,27 @@ if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) break; } + if (ntries == 1000) { DPRINTFN(2, ("nfe%d: timeout waiting for PHY\n", sc->nfe_unit)); - return 0; + return -1; } if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit)); - return 0; + return -1; } val = NFE_READ(sc, NFE_PHY_DATA); - if (val != 0xffffffff && val != 0) - sc->mii_phyaddr = phy; DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n", sc->nfe_unit, phy, reg, val)); return val; } + static int -nfe_miibus_writereg(device_t dev, int phy, int reg, int val) +nfe_mii_writereg(device_t dev, int phy, int reg, int val) { struct nfe_softc *sc = device_get_softc(dev); u_int32_t ctl; @@ -563,13 +986,13 @@ return 0; } + static int nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; - struct nfe_jbuf *jbuf; void **desc; bus_addr_t physaddr; int i, error, descsize; @@ -583,7 +1006,7 @@ } ring->cur = ring->next = 0; - ring->bufsz = MCLBYTES; + ring->bufsz = sc->nfe_mtu + NFE_RX_HEADERS; error = bus_dma_tag_create(sc->nfe_parent_tag, PAGE_SIZE, 0, /* alignment, boundary */ @@ -619,14 +1042,6 @@ ring->rx_desc_addr = ring->rx_desc_segs.ds_addr; ring->physaddr = ring->rx_desc_addr; - if (sc->nfe_flags & NFE_USE_JUMBO) { - ring->bufsz = NFE_JBYTES; - if ((error = nfe_jpool_alloc(sc)) != 0) { - printf("nfe%d: could not allocate jumbo frames\n", sc->nfe_unit); - goto fail; - } - } - /* * Pre-allocate Rx buffers and populate Rx ring. */ @@ -640,56 +1055,44 @@ goto fail; } - if (sc->nfe_flags & NFE_USE_JUMBO) { - if ((jbuf = nfe_jalloc(sc)) == NULL) { - printf("nfe%d: could not allocate jumbo buffer\n", sc->nfe_unit); - goto fail; - } - data->m->m_data = (void *)jbuf->buf; - data->m->m_len = data->m->m_pkthdr.len = NFE_JBYTES; - MEXTADD(data->m, jbuf->buf, NFE_JBYTES, nfe_jfree, (struct nfe_softc *)sc, 0, EXT_NET_DRV); - /* m_adj(data->m, ETHER_ALIGN); */ - physaddr = jbuf->physaddr; - } else { - error = bus_dma_tag_create(sc->nfe_parent_tag, - PAGE_SIZE, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MCLBYTES, 1, /* maxsize, nsegments */ - MCLBYTES, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &data->rx_data_tag); - if (error != 0) { - printf("nfe%d: could not create DMA map\n", sc->nfe_unit); - goto fail; - } - - error = bus_dmamap_create(data->rx_data_tag, 0, &data->rx_data_map); - if (error != 0) { - printf("nfe%d: could not allocate mbuf cluster\n", sc->nfe_unit); - goto fail; - } - - MCLGET(data->m, M_DONTWAIT); - if (!(data->m->m_flags & M_EXT)) { - error = ENOMEM; - goto fail; - } - - error = bus_dmamap_load(data->rx_data_tag, data->rx_data_map, mtod(data->m, void *), - MCLBYTES, nfe_dma_map_segs, &data->rx_data_segs, BUS_DMA_NOWAIT); - if (error != 0) { - printf("nfe%d: could not load rx buf DMA map", sc->nfe_unit); - goto fail; - } + error = bus_dma_tag_create(sc->nfe_parent_tag, + PAGE_SIZE, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + ring->bufsz, 1, /* maxsize, nsegments */ + ring->bufsz, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &data->rx_data_tag); + if (error != 0) { + printf("nfe%d: could not create DMA map\n", sc->nfe_unit); + goto fail; + } + + error = bus_dmamap_create(data->rx_data_tag, 0, &data->rx_data_map); + if (error != 0) { + printf("nfe%d: could not allocate mbuf cluster\n", sc->nfe_unit); + goto fail; + } - data->rx_data_addr = data->rx_data_segs.ds_addr; - physaddr = data->rx_data_addr; + MCLGET(data->m, M_DONTWAIT); + if (!(data->m->m_flags & M_EXT)) { + error = ENOMEM; + goto fail; + } + error = bus_dmamap_load(data->rx_data_tag, data->rx_data_map, mtod(data->m, void *), + MCLBYTES, nfe_dma_map_segs, &data->rx_data_segs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("nfe%d: could not load rx buf DMA map", sc->nfe_unit); + goto fail; } + data->rx_data_addr = data->rx_data_segs.ds_addr; + physaddr = data->rx_data_addr; + + if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[i]; #if defined(__LP64__) @@ -700,141 +1103,21 @@ desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[i]; - desc32->physaddr = htole32(physaddr); - desc32->length = htole16(sc->rxq.bufsz); - desc32->flags = htole16(NFE_RX_READY); - } - - } - - bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE); - - return 0; - -fail: nfe_free_rx_ring(sc, ring); - - return error; -} - -static int -nfe_jpool_alloc(struct nfe_softc *sc) -{ - struct nfe_rx_ring *ring = &sc->rxq; - struct nfe_jbuf *jbuf; - bus_addr_t physaddr; - caddr_t buf; - int i, error; - - /* - * Allocate a big chunk of DMA'able memory. - */ - error = bus_dma_tag_create(sc->nfe_parent_tag, - PAGE_SIZE, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - NFE_JPOOL_SIZE, 1, /* maxsize, nsegments */ - NFE_JPOOL_SIZE, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &ring->rx_jumbo_tag); - if (error != 0) { - printf("nfe%d: could not create jumbo DMA tag\n", sc->nfe_unit); - goto fail; - } - error = bus_dmamem_alloc(ring->rx_jumbo_tag, (void **)&ring->jpool, BUS_DMA_NOWAIT, &ring->rx_jumbo_map); - if (error != 0) { - printf("nfe%d: could not create jumbo DMA memory\n", sc->nfe_unit); - goto fail; - } - - error = bus_dmamap_load(ring->rx_jumbo_tag, ring->rx_jumbo_map, ring->jpool, - NFE_JPOOL_SIZE, nfe_dma_map_segs, &ring->rx_jumbo_segs, BUS_DMA_NOWAIT); - if (error != 0) { - printf("nfe%d: could not load jumbo DMA map\n", sc->nfe_unit); - goto fail; - } - - /* ..and split it into 9KB chunks */ - SLIST_INIT(&ring->jfreelist); - - buf = ring->jpool; - ring->rx_jumbo_addr = ring->rx_jumbo_segs.ds_addr; - physaddr = ring->rx_jumbo_addr; - - for (i = 0; i < NFE_JPOOL_COUNT; i++) { - jbuf = &ring->jbuf[i]; - - jbuf->buf = buf; - jbuf->physaddr = physaddr; - - SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); - - buf += NFE_JBYTES; - physaddr += NFE_JBYTES; - } - - return 0; - -fail: nfe_jpool_free(sc); - return error; -} - - -static void -nfe_jpool_free(struct nfe_softc *sc) -{ - struct nfe_rx_ring *ring = &sc->rxq; - - if (ring->jpool != NULL) { -#if 0 - bus_dmamem_unmap(ring->rx_jumbo_tag, ring->jpool, NFE_JPOOL_SIZE); -#endif - bus_dmamem_free(ring->rx_jumbo_tag, &ring->rx_jumbo_segs, ring->rx_jumbo_map); - } - if (ring->rx_jumbo_map != NULL) { - bus_dmamap_sync(ring->rx_jumbo_tag, ring->rx_jumbo_map, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(ring->rx_jumbo_tag, ring->rx_jumbo_map); - bus_dmamap_destroy(ring->rx_jumbo_tag, ring->rx_jumbo_map); - } -} - -static struct nfe_jbuf * -nfe_jalloc(struct nfe_softc *sc) -{ - struct nfe_jbuf *jbuf; - - jbuf = SLIST_FIRST(&sc->rxq.jfreelist); - if (jbuf == NULL) - return NULL; - SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); - return jbuf; -} - -/* - * This is called automatically by the network stack when the mbuf is freed. - * Caution must be taken that the NIC might be reset by the time the mbuf is - * freed. - */ -static void -nfe_jfree(void *buf, void *arg) -{ - struct nfe_softc *sc = arg; - struct nfe_jbuf *jbuf; - int i; + desc32->physaddr = htole32(physaddr); + desc32->length = htole16(sc->rxq.bufsz); + desc32->flags = htole16(NFE_RX_READY); + } - /* find the jbuf from the base pointer */ - i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES; - if (i < 0 || i >= NFE_JPOOL_COUNT) { - printf("nfe%d: request to free a buffer (%p) not managed by us\n", sc->nfe_unit, buf); - return; } - jbuf = &sc->rxq.jbuf[i]; - /* ..and put it back in the free list */ - SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); -} + bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE); + + return 0; + +fail: nfe_free_rx_ring(sc, ring); + return error; +} static void nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) @@ -856,7 +1139,6 @@ ring->cur = ring->next = 0; } - static void nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { @@ -880,21 +1162,17 @@ } - if (sc->nfe_flags & NFE_USE_JUMBO) { - nfe_jpool_free(sc); - } else { - for (i = 0; i < NFE_RX_RING_COUNT; i++) { - data = &ring->data[i]; + for (i = 0; i < NFE_RX_RING_COUNT; i++) { + data = &ring->data[i]; - if (data->rx_data_map != NULL) { - bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(data->rx_data_tag, data->rx_data_map); - bus_dmamap_destroy(data->rx_data_tag, data->rx_data_map); - bus_dma_tag_destroy(data->rx_data_tag); - } - if (data->m != NULL) - m_freem(data->m); - } + if (data->rx_data_map != NULL) { + bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(data->rx_data_tag, data->rx_data_map); + bus_dmamap_destroy(data->rx_data_tag, data->rx_data_map); + bus_dma_tag_destroy(data->rx_data_tag); + } + if (data->m != NULL) + m_freem(data->m); } } @@ -917,15 +1195,15 @@ ring->cur = ring->next = 0; error = bus_dma_tag_create(sc->nfe_parent_tag, - PAGE_SIZE, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ - NFE_TX_RING_COUNT * descsize, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &ring->tx_desc_tag); + PAGE_SIZE, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ + NFE_TX_RING_COUNT * descsize, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &ring->tx_desc_tag); if (error != 0) { printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit); goto fail; @@ -938,7 +1216,7 @@ } error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc, - NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs, BUS_DMA_NOWAIT); + NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs, BUS_DMA_NOWAIT); if (error != 0) { printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit); goto fail; @@ -950,18 +1228,18 @@ ring->physaddr = ring->tx_desc_addr; error = bus_dma_tag_create(sc->nfe_parent_tag, - ETHER_ALIGN, 0, - BUS_SPACE_MAXADDR_32BIT, - BUS_SPACE_MAXADDR, - NULL, NULL, + ETHER_ALIGN, 0, + BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, + NULL, NULL, NFE_JBYTES, NFE_MAX_SCATTER, NFE_JBYTES, - BUS_DMA_ALLOCNOW, - NULL, NULL, - &ring->tx_data_tag); + BUS_DMA_ALLOCNOW, + NULL, NULL, + &ring->tx_data_tag); if (error != 0) { - printf("nfe%d: could not create DMA tag\n", sc->nfe_unit); - goto fail; + printf("nfe%d: could not create DMA tag\n", sc->nfe_unit); + goto fail; } for (i = 0; i < NFE_TX_RING_COUNT; i++) { @@ -1005,6 +1283,7 @@ ring->queued = 0; ring->cur = ring->next = 0; + } static void @@ -1013,7 +1292,7 @@ struct nfe_tx_data *data; void *desc; int i, descsize; - + if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); @@ -1084,123 +1363,179 @@ nfe_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { - if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) { - return; - } - NFE_WRITE(sc, NFE_IRQ_STATUS, r); - - if (r & NFE_IRQ_LINK) { - NFE_READ(sc, NFE_PHY_STATUS); - NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); - DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit)); - } + if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) { + return; + } + NFE_WRITE(sc, NFE_IRQ_STATUS, r); + + if (r & NFE_IRQ_LINK) { + NFE_READ(sc, NFE_PHY_STATUS); + NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); + DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit)); + } } } #endif /* DEVICE_POLLING */ - static int nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { int error = 0; struct nfe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; - struct mii_data *mii; switch (cmd) { - case SIOCSIFMTU: - if (ifr->ifr_mtu < ETHERMIN || - ((sc->nfe_flags & NFE_USE_JUMBO) && - ifr->ifr_mtu > ETHERMTU_JUMBO) || - (!(sc->nfe_flags & NFE_USE_JUMBO) && - ifr->ifr_mtu > ETHERMTU)) - error = EINVAL; - else if (ifp->if_mtu != ifr->ifr_mtu) { - ifp->if_mtu = ifr->ifr_mtu; - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - nfe_init(sc); - } - break; - case SIOCSIFFLAGS: - NFE_LOCK(sc); - if (ifp->if_flags & IFF_UP) { - /* - * If only the PROMISC or ALLMULTI flag changes, then - * don't do a full re-init of the chip, just update - * the Rx filter. - */ - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && - ((ifp->if_flags ^ sc->nfe_if_flags) & - (IFF_ALLMULTI | IFF_PROMISC)) != 0) - nfe_setmulti(sc); - else - nfe_init_locked(sc); - } else { - if (ifp->if_drv_flags & IFF_DRV_RUNNING) + case SIOCSIFMTU: + if(ifr->ifr_mtu==ifp->if_mtu){ + error = EINVAL; + break; + } + + if((sc->nfe_flags & NFE_JUMBO_SUP ) && (ifr->ifr_mtu > ETHERMIN || ifr->ifr_mtu < NV_PKTLIMIT_2)) { + sc->nfe_mtu = ifp->if_mtu = ifr->ifr_mtu; nfe_stop(ifp, 1); - } - sc->nfe_if_flags = ifp->if_flags; - NFE_UNLOCK(sc); - error = 0; - break; - case SIOCADDMULTI: - case SIOCDELMULTI: - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + nfe_free_tx_ring(sc, &sc->txq); + nfe_free_rx_ring(sc, &sc->rxq); + + /* + * Reallocate Tx and Rx rings. + */ + if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { + printf("nfe%d: could not allocate Tx ring\n",sc->nfe_unit); + error = ENXIO; + break; + } + + if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { + printf("nfe%d: could not allocate Rx ring\n", sc->nfe_unit); + nfe_free_tx_ring(sc, &sc->txq); + error = ENXIO; + break; + } + + nfe_init_locked(sc); + }else + error = EINVAL; + + break; + case SIOCSIFFLAGS: NFE_LOCK(sc); - nfe_setmulti(sc); + if (ifp->if_flags & IFF_UP) { + /* + * If only the PROMISC or ALLMULTI flag changes, then + * don't do a full re-init of the chip, just update + * the Rx filter. + */ + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && + ((ifp->if_flags ^ sc->nfe_if_flags) & + (IFF_ALLMULTI | IFF_PROMISC)) != 0) + nfe_setmulti(sc); + else + nfe_init_locked(sc); + } else { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + nfe_stop(ifp, 1); + } + sc->nfe_if_flags = ifp->if_flags; NFE_UNLOCK(sc); error = 0; - } - break; - case SIOCSIFMEDIA: - case SIOCGIFMEDIA: - mii = device_get_softc(sc->nfe_miibus); - error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); - break; - case SIOCSIFCAP: - { - int mask = ifr->ifr_reqcap ^ ifp->if_capenable; -#ifdef DEVICE_POLLING - if (mask & IFCAP_POLLING) { - if (ifr->ifr_reqcap & IFCAP_POLLING) { - error = ether_poll_register(nfe_poll, ifp); - if (error) - return(error); - NFE_LOCK(sc); - NFE_WRITE(sc, NFE_IRQ_MASK, 0); - ifp->if_capenable |= IFCAP_POLLING; - NFE_UNLOCK(sc); - } else { - error = ether_poll_deregister(ifp); - /* Enable interrupt even in error case */ + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { NFE_LOCK(sc); - NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); - ifp->if_capenable &= ~IFCAP_POLLING; + nfe_setmulti(sc); NFE_UNLOCK(sc); + error = 0; } - } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + ifmedia_ioctl(ifp, ifr, &sc->nfe_media, cmd); + break; + case SIOCSIFCAP: + { + int init=0; + int mask =0; + mask = ifr->ifr_reqcap ^ ifp->if_capenable; +#ifdef DEVICE_POLLING + if (mask & IFCAP_POLLING) { + if (ifr->ifr_reqcap & IFCAP_POLLING) { + error = ether_poll_register(nfe_poll, ifp); + if (error) + return(error); + NFE_LOCK(sc); + NFE_WRITE(sc, NFE_IRQ_MASK, 0); + ifp->if_capenable |= IFCAP_POLLING; + NFE_UNLOCK(sc); + } else { + error = ether_poll_deregister(ifp); + /* Enable interrupt even in error case */ + NFE_LOCK(sc); + NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); + ifp->if_capenable &= ~IFCAP_POLLING; + NFE_UNLOCK(sc); + } + } #endif - if (mask & IFCAP_HWCSUM) { - ifp->if_capenable ^= IFCAP_HWCSUM; - if (IFCAP_HWCSUM & ifp->if_capenable && - IFCAP_HWCSUM & ifp->if_capabilities) - ifp->if_hwassist = NFE_CSUM_FEATURES; - else - ifp->if_hwassist = 0; - } - } - break; - default: - error = ether_ioctl(ifp, cmd, data); - break; +#ifdef NFE_CSUM + if (mask & IFCAP_HWCSUM) { + ifp->if_capenable ^= IFCAP_HWCSUM; + if (IFCAP_HWCSUM & ifp->if_capenable && + IFCAP_HWCSUM & ifp->if_capabilities) + ifp->if_hwassist = NFE_CSUM_FEATURES; + else + ifp->if_hwassist = 0; + + sc->nfe_flags ^= NFE_HW_CSUM; + init = 1; + } +#endif + +#ifdef NVLAN + if (mask & IFCAP_VLAN_HWTAGGING) { + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + sc->nfe_flags ^= NFE_HW_VLAN ; + init = 1; + } +#endif + + if (init && ifp->if_drv_flags & IFF_DRV_RUNNING) + nfe_init(sc); + + } + break; + + default: + error = ether_ioctl(ifp, cmd, data); + break; } return error; } -static void nfe_intr(void *arg) +static void +nfe_link_change(struct nfe_softc *sc) +{ + nfe_check_for_link(sc); +} + +static void +nfe_link_intr(struct nfe_softc *sc) +{ + u_int32_t miistat; + + miistat = NFE_READ(sc,NFE_PHY_STATUS); + NFE_WRITE(sc,NFE_PHY_STATUS,NFE_MIISTAT_MASK); + if(miistat & NFE_MII_INTERRUPT) + nfe_link_change(sc); + +} + +static void +nfe_intr(void *arg) { struct nfe_softc *sc = arg; struct ifnet *ifp = sc->nfe_ifp; @@ -1216,7 +1551,7 @@ #endif if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) { - NFE_UNLOCK(sc); + NFE_UNLOCK(sc); return; /* not for us */ } NFE_WRITE(sc, NFE_IRQ_STATUS, r); @@ -1228,6 +1563,7 @@ if (r & NFE_IRQ_LINK) { NFE_READ(sc, NFE_PHY_STATUS); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); + nfe_link_intr(sc); DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit)); } @@ -1241,7 +1577,7 @@ NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); if (ifp->if_drv_flags & IFF_DRV_RUNNING && - !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) nfe_start_locked(ifp); NFE_UNLOCK(sc); @@ -1249,60 +1585,25 @@ return; } -static void -nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) -{ - bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); -} - -static void -nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) -{ - bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); -} - -static void -nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) -{ - bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); -} - -static void -nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) -{ - bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); -} - -static void -nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) -{ - bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops); -} - -static void -nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) -{ - bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops); -} -static void nfe_rxeof(struct nfe_softc *sc) +static void +nfe_rxeof(struct nfe_softc *sc) { struct ifnet *ifp = sc->nfe_ifp; struct nfe_desc32 *desc32=NULL; struct nfe_desc64 *desc64=NULL; struct nfe_rx_data *data; - struct nfe_jbuf *jbuf; struct mbuf *m, *mnew; bus_addr_t physaddr; - u_int16_t flags; - int error, len; -#if NVLAN > 1 + u_int16_t flags=0; + int error=0, len=0; u_int16_t vlan_tag = 0; int have_tag = 0; -#endif NFE_LOCK_ASSERT(sc); + bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, + BUS_DMASYNC_POSTREAD); for (;;) { @@ -1318,21 +1619,16 @@ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[sc->rxq.cur]; - nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); - flags = letoh16(desc64->flags); len = letoh16(desc64->length) & 0x3fff; - -#if NVLAN > 1 - if (flags & NFE_TX_VLAN_TAG) { - have_tag = 1; - vlan_tag = desc64->vtag; + if(sc->nfe_flags & NFE_HW_VLAN){ + vlan_tag = htole32(desc64->physaddr[1]); + if (vlan_tag & NFE_VLAN_TAG_PRESENT) { + have_tag = 1; + } } -#endif - } else { desc32 = &sc->rxq.desc32[sc->rxq.cur]; - nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); flags = letoh16(desc32->flags); len = letoh16(desc32->length) & 0x3fff; @@ -1376,54 +1672,38 @@ goto skip; } - if (sc->nfe_flags & NFE_USE_JUMBO) { - if ((jbuf = nfe_jalloc(sc)) == NULL) { - m_freem(mnew); - ifp->if_ierrors++; - goto skip; - } - mnew->m_data = (void *)jbuf->buf; - mnew->m_len = mnew->m_pkthdr.len = NFE_JBYTES; - MEXTADD(mnew, jbuf->buf, NFE_JBYTES, nfe_jfree, - (struct nfe_softc *)sc, 0 , EXT_NET_DRV); - - bus_dmamap_sync(sc->rxq.rx_jumbo_tag, - sc->rxq.rx_jumbo_map, BUS_DMASYNC_POSTREAD); - physaddr = jbuf->physaddr; - } else { - MCLGET(mnew, M_DONTWAIT); - if (!(mnew->m_flags & M_EXT)) { - m_freem(mnew); - ifp->if_ierrors++; - goto skip; - } + MCLGET(mnew, M_DONTWAIT); + if (!(mnew->m_flags & M_EXT)) { + m_freem(mnew); + ifp->if_ierrors++; + goto skip; + } - bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(data->rx_data_tag, data->rx_data_map); + bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(data->rx_data_tag, data->rx_data_map); + error = bus_dmamap_load(data->rx_data_tag, + data->rx_data_map, mtod(mnew, void *), MCLBYTES, + nfe_dma_map_segs, &data->rx_data_segs, + BUS_DMA_NOWAIT); + if (error != 0) { + m_freem(mnew); + + /* try to reload the old mbuf */ error = bus_dmamap_load(data->rx_data_tag, - data->rx_data_map, mtod(mnew, void *), MCLBYTES, - nfe_dma_map_segs, &data->rx_data_segs, - BUS_DMA_NOWAIT); + data->rx_data_map, mtod(data->m, void *), + MCLBYTES, nfe_dma_map_segs, + &data->rx_data_segs, BUS_DMA_NOWAIT); if (error != 0) { - m_freem(mnew); - - /* try to reload the old mbuf */ - error = bus_dmamap_load(data->rx_data_tag, - data->rx_data_map, mtod(data->m, void *), - MCLBYTES, nfe_dma_map_segs, - &data->rx_data_segs, BUS_DMA_NOWAIT); - if (error != 0) { - /* very unlikely that it will fail.. */ - panic("nfe%d: could not load old rx mbuf", - sc->nfe_unit); - } - ifp->if_ierrors++; - goto skip; + /* very unlikely that it will fail.. */ + panic("nfe%d: could not load old rx mbuf", + sc->nfe_unit); } - data->rx_data_addr = data->rx_data_segs.ds_addr; - physaddr = data->rx_data_addr; + ifp->if_ierrors++; + goto skip; } + data->rx_data_addr = data->rx_data_segs.ds_addr; + physaddr = data->rx_data_addr; /* * New mbuf successfully loaded, update Rx ring and continue @@ -1436,30 +1716,24 @@ m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; - -#if defined(NFE_CSUM) if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (flags & NFE_RX_IP_CSUMOK_V2) { - m->m_pkthdr.csum_flags |= CSUM_IP_VALID; + m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (flags & NFE_RX_UDP_CSUMOK_V2 || - flags & NFE_RX_TCP_CSUMOK_V2) { + flags & NFE_RX_TCP_CSUMOK_V2) { m->m_pkthdr.csum_flags |= - CSUM_DATA_VALID|CSUM_PSEUDO_HDR; + CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } -#endif -#if NVLAN > 1 if (have_tag) { VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); if (m == NULL) continue; } -#endif - ifp->if_ipackets++; NFE_UNLOCK(sc); @@ -1480,19 +1754,23 @@ desc64->length = htole16(sc->rxq.bufsz); desc64->flags = htole16(NFE_RX_READY); - nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, + BUS_DMASYNC_PREWRITE); } else { desc32->length = htole16(sc->rxq.bufsz); desc32->flags = htole16(NFE_RX_READY); - nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, + BUS_DMASYNC_PREWRITE); } sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; } + } -static void nfe_txeof(struct nfe_softc *sc) +static void +nfe_txeof(struct nfe_softc *sc) { struct ifnet *ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; @@ -1502,22 +1780,21 @@ NFE_LOCK_ASSERT(sc); + bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, + BUS_DMASYNC_POSTREAD); + while (sc->txq.next != sc->txq.cur) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[sc->txq.next]; - nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); - flags = letoh16(desc64->flags); } else { desc32 = &sc->txq.desc32[sc->txq.next]; - nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); flags = letoh16(desc32->flags); } if (flags & NFE_TX_VALID) break; - data = &sc->txq.data[sc->txq.next]; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { @@ -1525,9 +1802,6 @@ goto skip; if ((flags & NFE_TX_ERROR_V1) != 0) { - printf("nfe%d: tx v1 error 0x%4b\n", - sc->nfe_unit, flags, NFE_V1_TXERR); - ifp->if_oerrors++; } else ifp->if_opackets++; @@ -1536,23 +1810,20 @@ goto skip; if ((flags & NFE_TX_ERROR_V2) != 0) { - printf("nfe%d: tx v1 error 0x%4b\n", - sc->nfe_unit, flags, NFE_V2_TXERR); - ifp->if_oerrors++; } else ifp->if_opackets++; } if (data->m == NULL) { /* should not get there */ - printf("nfe%d: last fragment bit w/o associated mbuf!\n", - sc->nfe_unit); + printf("nfe%d: last fragment bit w/o associated mbuf!\n", + sc->nfe_unit); goto skip; } /* last fragment of the mbuf chain transmitted */ bus_dmamap_sync(sc->txq.tx_data_tag, data->active, - BUS_DMASYNC_POSTWRITE); + BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.tx_data_tag, data->active); m_freem(data->m); data->m = NULL; @@ -1569,28 +1840,27 @@ } } -static int nfe_encap(struct nfe_softc *sc, struct mbuf *m0) +static int +nfe_encap(struct nfe_softc *sc, struct mbuf *m0) { struct nfe_desc32 *desc32=NULL; struct nfe_desc64 *desc64=NULL; struct nfe_tx_data *data=NULL; bus_dmamap_t map; u_int16_t flags = NFE_TX_VALID; -#if NVLAN > 0 - struct m_tag *vtag; -#endif + struct m_tag *vtag=NULL; bus_dma_segment_t segs[NFE_MAX_SCATTER]; int nsegs; int error, i; - + map = sc->txq.data[sc->txq.cur].tx_data_map; error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs, - &nsegs, BUS_DMA_NOWAIT); + &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit, - error); + error); return error; } @@ -1599,20 +1869,18 @@ return ENOBUFS; } - -#if NVLAN > 0 /* setup h/w VLAN tagging */ - vtag = VLAN_OUTPUT_TAG(sc->nfe_ifp, m0); -#endif + if(sc->nfe_flags & NFE_HW_VLAN) + vtag = VLAN_OUTPUT_TAG(sc->nfe_ifp, m0); -#ifdef NFE_CSUM - if (m0->m_pkthdr.csum_flags & CSUM_IP) - flags |= NFE_TX_IP_CSUM; - if (m0->m_pkthdr.csum_flags & CSUM_TCP) - flags |= NFE_TX_TCP_CSUM; - if (m0->m_pkthdr.csum_flags & CSUM_UDP) - flags |= NFE_TX_TCP_CSUM; -#endif + if(sc->nfe_flags & NFE_HW_CSUM){ + if (m0->m_pkthdr.csum_flags & CSUM_IP) + flags |= NFE_TX_IP_CSUM; + if (m0->m_pkthdr.csum_flags & CSUM_TCP) + flags |= NFE_TX_TCP_CSUM; + if (m0->m_pkthdr.csum_flags & CSUM_UDP) + flags |= NFE_TX_TCP_CSUM; + } for (i = 0; i < nsegs; i++) { data = &sc->txq.data[sc->txq.cur]; @@ -1626,10 +1894,13 @@ 0xffffffff); desc64->length = htole16(segs[i].ds_len - 1); desc64->flags = htole16(flags); -#if NVLAN > 0 - desc64->vtag = htole32(NFE_TX_VTAG | - VLAN_TAG_VALUE(vtag)); -#endif + + if(sc->nfe_flags & NFE_HW_VLAN){ + if(vtag != NULL) { + desc64->vtag = htole32(NFE_TX_VTAG | + VLAN_TAG_VALUE(vtag)); + } + } } else { desc32 = &sc->txq.desc32[sc->txq.cur]; @@ -1641,11 +1912,9 @@ /* csum flags and vtag belong to the first fragment only */ if (nsegs > 1) { flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); -#if NVLAN > 0 - vtag = 0; -#endif + if(sc->nfe_flags & NFE_HW_VLAN) + vtag = 0; } - sc->txq.queued++; sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; } @@ -1672,7 +1941,8 @@ } -static void nfe_setmulti(struct nfe_softc *sc) +static void +nfe_setmulti(struct nfe_softc *sc) { struct ifnet *ifp = sc->nfe_ifp; struct ifmultiaddr *ifma; @@ -1729,17 +1999,19 @@ NFE_WRITE(sc, NFE_RXFILTER, filter); } -static void nfe_start(struct ifnet *ifp) +static void +nfe_start(struct ifnet *ifp) { struct nfe_softc *sc; - + sc = ifp->if_softc; NFE_LOCK(sc); nfe_start_locked(ifp); NFE_UNLOCK(sc); } -static void nfe_start_locked(struct ifnet *ifp) +static void +nfe_start_locked(struct ifnet *ifp) { struct nfe_softc *sc = ifp->if_softc; int old = sc->txq.cur; @@ -1749,18 +2021,17 @@ return; } - for (;;) { - IFQ_POLL(&ifp->if_snd, m0); + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (nfe_encap(sc, m0) != 0) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; + IFQ_DRV_PREPEND(&ifp->if_snd, m0); break; } - /* packet put in h/w queue, remove from s/w queue */ - IFQ_DEQUEUE(&ifp->if_snd, m0); BPF_MTAP(ifp, m0); } @@ -1769,9 +2040,11 @@ } if (sc->nfe_flags & NFE_40BIT_ADDR) - nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, + BUS_DMASYNC_PREWRITE); else - nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, + BUS_DMASYNC_PREWRITE); /* kick Tx */ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); @@ -1784,12 +2057,11 @@ return; } -static void nfe_watchdog(struct ifnet *ifp) +static void +nfe_watchdog(struct ifnet *ifp) { struct nfe_softc *sc = ifp->if_softc; - printf("nfe%d: watchdog timeout\n", sc->nfe_unit); - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; nfe_init(sc); @@ -1798,7 +2070,8 @@ return; } -static void nfe_init(void *xsc) +static void +nfe_init(void *xsc) { struct nfe_softc *sc = xsc; @@ -1809,23 +2082,20 @@ return; } -static void nfe_init_locked(void *xsc) +static void +nfe_init_locked(void *xsc) { struct nfe_softc *sc = xsc; struct ifnet *ifp = sc->nfe_ifp; - struct mii_data *mii; u_int32_t tmp; NFE_LOCK_ASSERT(sc); - mii = device_get_softc(sc->nfe_miibus); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { return; } nfe_stop(ifp, 0); - NFE_WRITE(sc, NFE_TX_UNK, 0); NFE_WRITE(sc, NFE_STATUS, 0); @@ -1834,12 +2104,10 @@ sc->rxtxctl |= NFE_RXTX_V3MAGIC; else if (sc->nfe_flags & NFE_JUMBO_SUP) sc->rxtxctl |= NFE_RXTX_V2MAGIC; -#ifdef NFE_CSUM + if (sc->nfe_flags & NFE_HW_CSUM) sc->rxtxctl |= NFE_RXTX_RXCSUM; -#endif -#if NVLAN > 0 /* * Although the adapter is capable of stripping VLAN tags from received * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on @@ -1847,16 +2115,13 @@ */ if (sc->nfe_flags & NFE_HW_VLAN) sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; -#endif NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); DELAY(10); NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); -#if NVLAN if (sc->nfe_flags & NFE_HW_VLAN) NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); -#endif NFE_WRITE(sc, NFE_SETUP_R6, 0); @@ -1901,8 +2166,10 @@ /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); - NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); + //NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); +#ifdef NFE_WOL NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); +#endif sc->rxtxctl &= ~NFE_RXTX_BIT2; NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); @@ -1912,10 +2179,8 @@ /* set Rx filter */ nfe_setmulti(sc); - nfe_ifmedia_upd(ifp); - - nfe_tick_locked(sc); - + sc->nfe_link++; + /* enable Rx */ NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); @@ -1934,23 +2199,19 @@ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - sc->nfe_link = 0; - return; } -static void nfe_stop(struct ifnet *ifp, int disable) +static void +nfe_stop(struct ifnet *ifp, int disable) { struct nfe_softc *sc = ifp->if_softc; - struct mii_data *mii; NFE_LOCK_ASSERT(sc); ifp->if_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); - mii = device_get_softc(sc->nfe_miibus); - callout_stop(&sc->nfe_stat_ch); /* abort Tx */ @@ -1971,101 +2232,8 @@ return; } -static int nfe_ifmedia_upd(struct ifnet *ifp) -{ - struct nfe_softc *sc = ifp->if_softc; - - NFE_LOCK(sc); - nfe_ifmedia_upd_locked(ifp); - NFE_UNLOCK(sc); - return (0); -} - -static int nfe_ifmedia_upd_locked(struct ifnet *ifp) -{ - struct nfe_softc *sc = ifp->if_softc; - struct mii_data *mii; - - NFE_LOCK_ASSERT(sc); - - mii = device_get_softc(sc->nfe_miibus); - - if (mii->mii_instance) { - struct mii_softc *miisc; - for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; - miisc = LIST_NEXT(miisc, mii_list)) { - mii_phy_reset(miisc); - } - } - mii_mediachg(mii); - - return (0); -} - -static void nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) -{ - struct nfe_softc *sc; - struct mii_data *mii; - - sc = ifp->if_softc; - - NFE_LOCK(sc); - mii = device_get_softc(sc->nfe_miibus); - mii_pollstat(mii); - NFE_UNLOCK(sc); - - ifmr->ifm_active = mii->mii_media_active; - ifmr->ifm_status = mii->mii_media_status; - - return; -} - static void -nfe_tick(void *xsc) -{ - struct nfe_softc *sc; - - sc = xsc; - - NFE_LOCK(sc); - nfe_tick_locked(sc); - NFE_UNLOCK(sc); -} - - -void nfe_tick_locked(struct nfe_softc *arg) -{ - struct nfe_softc *sc; - struct mii_data *mii; - struct ifnet *ifp; - - sc = arg; - - NFE_LOCK_ASSERT(sc); - - ifp = sc->nfe_ifp; - - mii = device_get_softc(sc->nfe_miibus); - mii_tick(mii); - - if (!sc->nfe_link) { - if (mii->mii_media_status & IFM_ACTIVE && - IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { - sc->nfe_link++; - if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T - && bootverbose) - if_printf(sc->nfe_ifp, "gigabit link up\n"); - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - nfe_start_locked(ifp); - } - } - callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); - - return; -} - - -static void nfe_shutdown(device_t dev) +nfe_shutdown(device_t dev) { struct nfe_softc *sc; struct ifnet *ifp; @@ -2082,7 +2250,8 @@ } -static void nfe_get_macaddr(struct nfe_softc *sc, u_char *addr) +static void +nfe_get_macaddr(struct nfe_softc *sc, u_char *addr) { uint32_t tmp; @@ -2097,7 +2266,8 @@ addr[5] = (tmp & 0xff); } -static void nfe_set_macaddr(struct nfe_softc *sc, u_char *addr) +static void +nfe_set_macaddr(struct nfe_softc *sc, u_char *addr) { NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); @@ -2108,7 +2278,6 @@ /* * Map a single buffer address. */ - static void nfe_dma_map_segs(arg, segs, nseg, error) void *arg; diff -u -r old/if_nfereg.h nfe/if_nfereg.h --- old/if_nfereg.h Thu Aug 10 02:23:28 2006 +++ nfe/if_nfereg.h Fri Aug 11 02:09:50 2006 @@ -24,8 +24,16 @@ #define NFE_TX_RING_COUNT 256 #define NFE_JBYTES (ETHER_MAX_LEN_JUMBO + ETHER_ALIGN) -#define NFE_JPOOL_COUNT (NFE_RX_RING_COUNT + 64) -#define NFE_JPOOL_SIZE (NFE_JPOOL_COUNT * NFE_JBYTES) + +/* maximum mtu size */ +#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ +#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ + +/* rx/tx mac addr + type + vlan + align + slack*/ +#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) +/* even more slack */ +#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) + #define NFE_MAX_SCATTER (NFE_TX_RING_COUNT - 2) @@ -61,6 +69,7 @@ #define NFE_RXTX_CTL 0x144 #define NFE_TX_RING_ADDR_HI 0x148 #define NFE_RX_RING_ADDR_HI 0x14c +#define NFE_TxPauseFrame 0x170 #define NFE_PHY_STATUS 0x180 #define NFE_SETUP_R4 0x184 #define NFE_STATUS 0x188 @@ -74,6 +83,10 @@ #define NFE_PWR_STATE 0x26c #define NFE_VTAG_CTL 0x300 +#define NFE_MIISTAT_MASK 0x000f +#define NFE_MIISTAT_LINKCHANGE 0x0008 +#define NFE_MII_INTERRUPT 0x0010 + #define NFE_PHY_ERROR 0x00001 #define NFE_PHY_WRITE 0x00400 #define NFE_PHY_BUSY 0x08000 @@ -85,7 +98,7 @@ #define NFE_R2_MAGIC 0x16 #define NFE_R4_MAGIC 0x08 #define NFE_R6_MAGIC 0x03 -#define NFE_WOL_MAGIC 0x7770 +#define NFE_WOL_MAGIC 0x1111 #define NFE_RX_START 0x01 #define NFE_TX_START 0x01 @@ -113,9 +126,15 @@ #define NFE_RXTX_RXCSUM 0x0400 #define NFE_RXTX_V2MAGIC 0x2100 #define NFE_RXTX_V3MAGIC 0x2200 +#define NFE_PFF_PAUSE_RX 0x08 #define NFE_RXFILTER_MAGIC 0x007f0008 +#define NFE_MISC1_PAUSE_TX 0x01 #define NFE_U2M (1 << 5) #define NFE_PROMISC (1 << 7) +#define NFE_TX_PAUSEFRAME_DISABLE 0x1ff0080 +#define NFE_TX_PAUSEFRAME_ENABLE 0x0c00030 + +#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* default interrupt moderation timer of 128us */ #define NFE_IM_DEFAULT ((128 * 100) / 1024) @@ -142,6 +161,59 @@ #define NFE_SEED_100TX 0x00002d00 #define NFE_SEED_1000T 0x00007400 +#define NV_PAUSEFRAME_RX_ENABLE 0x0004 +#define NV_PAUSEFRAME_TX_ENABLE 0x0008 +#define ADVERTISE_PAUSE_ASYM 0x0800 +#define LPA_PAUSE_ASYM 0x0800 +#define NFE_VLAN_TAG_PRESENT (1<<16) + +/* rx/tx mac addr + type + vlan + align + slack*/ +#define NFE_RX_HEADERS (64) + +/* PHY defines */ +#define PHY_OUI_MARVELL 0x5043 +#define PHY_OUI_CICADA 0x03f1 +#define PHY_OUI_VITESSE 0x01c1 +#define PHYID1_OUI_MASK 0x03ff +#define PHYID1_OUI_SHFT 6 +#define PHYID2_OUI_MASK 0xfc00 +#define PHYID2_OUI_SHFT 10 +#define PHYID2_MODEL_MASK 0x03f0 +#define PHY_MODEL_MARVELL_E3016 0x220 +#define PHY_MARVELL_E3016_INITMASK 0x0300 +#define PHY_CICADA_INIT1 0x0f000 +#define PHY_CICADA_INIT2 0x0e00 +#define PHY_CICADA_INIT3 0x01000 +#define PHY_CICADA_INIT4 0x0200 +#define PHY_CICADA_INIT5 0x0004 +#define PHY_CICADA_INIT6 0x02000 +#define PHY_VITESSE_INIT_REG1 0x1f +#define PHY_VITESSE_INIT_REG2 0x10 +#define PHY_VITESSE_INIT_REG3 0x11 +#define PHY_VITESSE_INIT_REG4 0x12 +#define PHY_VITESSE_INIT_MSK1 0xc +#define PHY_VITESSE_INIT_MSK2 0x0180 +#define PHY_VITESSE_INIT1 0x52b5 +#define PHY_VITESSE_INIT2 0xaf8a +#define PHY_VITESSE_INIT3 0x8 +#define PHY_VITESSE_INIT4 0x8f8a +#define PHY_VITESSE_INIT5 0xaf86 +#define PHY_VITESSE_INIT6 0x8f86 +#define PHY_VITESSE_INIT7 0xaf82 +#define PHY_VITESSE_INIT8 0x0100 +#define PHY_VITESSE_INIT9 0x8f82 +#define PHY_VITESSE_INIT10 0x0 + +#define MII_SREVISION 0x16 /* Silicon revision */ +#define MII_RESV1 0x17 +#define MII_NCONFIG 0x1c /* Network interface config */ +#define PHY_GIGABIT 0x0100 +#define PHY_RGMII 0x10000000 + +#define PHY_TIMEOUT 0x1 +#define PHY_ERROR 0x2 + + /* Rx/Tx descriptor */ struct nfe_desc32 { uint32_t physaddr; @@ -218,6 +290,14 @@ #define PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 0x0269 #define PCI_PRODUCT_NVIDIA_MCP55_LAN1 0x0372 #define PCI_PRODUCT_NVIDIA_MCP55_LAN2 0x0373 +#define PCI_PRODUCT_NVIDIA_MCP61_LAN1 0x03e5 +#define PCI_PRODUCT_NVIDIA_MCP61_LAN2 0x03e6 +#define PCI_PRODUCT_NVIDIA_MCP61_LAN3 0x03ee +#define PCI_PRODUCT_NVIDIA_MCP61_LAN4 0x03ef +#define PCI_PRODUCT_NVIDIA_MCP65_LAN1 0x0450 +#define PCI_PRODUCT_NVIDIA_MCP65_LAN2 0x0451 +#define PCI_PRODUCT_NVIDIA_MCP65_LAN3 0x0452 +#define PCI_PRODUCT_NVIDIA_MCP65_LAN4 0x0453 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 diff -u -r old/if_nfevar.h nfe/if_nfevar.h --- old/if_nfevar.h Thu Aug 10 02:23:40 2006 +++ nfe/if_nfevar.h Fri Aug 11 02:10:26 2006 @@ -43,12 +43,6 @@ bus_dma_tag_t tx_data_tag; }; -struct nfe_jbuf { - caddr_t buf; - bus_addr_t physaddr; - SLIST_ENTRY(nfe_jbuf) jnext; -}; - struct nfe_rx_data { bus_dmamap_t rx_data_map; bus_dma_tag_t rx_data_tag; @@ -62,15 +56,6 @@ bus_dma_segment_t rx_desc_segs; bus_dma_tag_t rx_desc_tag; bus_addr_t rx_desc_addr; -#ifndef JMBUF - bus_dmamap_t rx_jumbo_map; - bus_dma_segment_t rx_jumbo_segs; - bus_dma_tag_t rx_jumbo_tag; - bus_addr_t rx_jumbo_addr; - caddr_t jpool; - struct nfe_jbuf jbuf[NFE_JPOOL_COUNT]; - SLIST_HEAD(, nfe_jbuf) jfreelist; -#endif bus_addr_t physaddr; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; @@ -90,7 +75,7 @@ struct resource *nfe_res; struct resource *nfe_irq; void *nfe_intrhand; - struct mii_data nfe_mii; + struct ifmedia nfe_media; u_int8_t nfe_unit; struct callout nfe_stat_ch; @@ -105,8 +90,10 @@ #define NFE_40BIT_ADDR 0x02 #define NFE_HW_CSUM 0x04 #define NFE_HW_VLAN 0x08 -#define NFE_USE_JUMBO 0x10 +#define NFE_PAUSEFRAME_RX 0x10 +#define NFE_PAUSEFRAME_TX 0x20 u_int32_t rxtxctl; + u_int32_t nfe_mtu; u_int8_t mii_phyaddr; u_char eaddr[ETHER_ADDR_LEN]; struct task nfe_txtask; @@ -115,6 +102,12 @@ struct nfe_tx_ring txq; struct nfe_rx_ring rxq; + u_int16_t link_speed; + u_int16_t link_duplex; + u_int16_t gigabit; + u_int phy_model; + u_int phy_oui; + #ifdef DEVICE_POLLING int rxcycles; #endif