--- sys/conf/NOTES.orig Wed Jun 28 19:13:31 2006 +++ sys/conf/NOTES Wed Jul 5 19:56:48 2006 @@ -1784,6 +1784,9 @@ # SMC91C90/92/94/95 chips. # ste: Sundance Technologies ST201 PCI fast ethernet controller, includes # the D-Link DFE-550TX. +# stge: Support for gigabit ethernet adapters based on the Sundance/Tamarack +# TC9021 family of controllers, including the Sundance ST2021/ST2023, +# the Sundance/Tamarack TC9021, the D-Link DL-4000 and ASUS NX1101. # ti: Support for PCI gigabit ethernet NICs based on the Alteon Networks # Tigon 1 and Tigon 2 chipsets. This includes the Alteon AceNIC, the # 3Com 3c985, the Netgear GA620 and various others. Note that you will --- sys/conf/files.orig Wed Jun 28 19:13:31 2006 +++ sys/conf/files Mon Jul 10 11:18:43 2006 @@ -708,8 +708,10 @@ dev/mii/e1000phy.c optional miibus | e1000phy # XXX only xl cards? dev/mii/exphy.c optional miibus | exphy +dev/mii/gentbi.c optional miibus | gentbi # XXX only fxp cards? dev/mii/inphy.c optional miibus | inphy +dev/mii/ip1000phy.c optional miibus | ip1000phy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/mii.c optional miibus | mii dev/mii/mii_physubr.c optional miibus | mii @@ -918,6 +920,7 @@ dev/stg/tmc18c30_pccard.c optional stg pccard dev/stg/tmc18c30_pci.c optional stg pci dev/stg/tmc18c30_subr.c optional stg +dev/stge/if_stge.c optional stge pci dev/streams/streams.c optional streams dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" @@ -1329,6 +1332,7 @@ kern/sched_core.c optional sched_core kern/sched_ule.c optional sched_ule kern/serdev_if.m optional puc | scc +kern/subr_acl_posix1e.c standard kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard --- /dev/null Wed Jul 12 09:22:12 2006 +++ sys/dev/mii/gentbi.c Fri Jun 23 12:36:22 2006 @@ -0,0 +1,314 @@ +/* $NetBSD: gentbi.c,v 1.15 2006/03/29 07:05:24 thorpej Exp $ */ + +/*- + * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 1997 Manuel Bouyer. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Manuel Bouyer. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Driver for generic ten-bit (1000BASE-SX) interfaces, built in to + * many Gigabit Ethernet chips. + * + * All we have to do here is correctly report speed and duplex. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Driver for generic unknown ten-bit interfaces(1000BASE-{LX,SX} + * fiber interfaces). + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include "miidevs.h" + +#include "miibus_if.h" + +static int gentbi_probe(device_t); +static int gentbi_attach(device_t); + +static device_method_t gentbi_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, gentbi_probe), + DEVMETHOD(device_attach, gentbi_attach), + DEVMETHOD(device_detach, mii_phy_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + {0, 0} +}; + +static devclass_t gentbi_devclass; + +static driver_t gentbi_driver = { + "gentbi", + gentbi_methods, + sizeof(struct mii_softc) +}; + +DRIVER_MODULE(gentbi, miibus, gentbi_driver, gentbi_devclass, 0, 0); + +static int gentbi_service(struct mii_softc *, struct mii_data *, int); +static void gentbi_status(struct mii_softc *); + +static int +gentbi_probe(device_t dev) +{ + device_t parent; + struct mii_attach_args *ma; + int bmsr, extsr; + + parent = device_get_parent(dev); + ma = device_get_ivars(dev); + + /* + * We match as a generic TBI if: + * + * - There is no media in the BMSR. + * - EXTSR has only 1000X. + */ + bmsr = MIIBUS_READREG(parent, ma->mii_phyno, MII_BMSR); + if ((bmsr & BMSR_EXTSTAT) == 0 || (bmsr & BMSR_MEDIAMASK) != 0) + return (ENXIO); + + extsr = MIIBUS_READREG(parent, ma->mii_phyno, MII_EXTSR); + if (extsr & (EXTSR_1000TFDX|EXTSR_1000THDX)) + return (ENXIO); + + if (extsr & (EXTSR_1000XFDX|EXTSR_1000XHDX)) { + /* + * We think this is a generic TBI. Return a match + * priority higher than ukphy, but lower than what + * specific drivers will return. + */ + device_set_desc(dev, "Generic ten-bit interface"); + return (BUS_PROBE_LOW_PRIORITY); + } + + return (ENXIO); +} + +static int +gentbi_attach(device_t dev) +{ + struct mii_softc *sc; + struct mii_attach_args *ma; + struct mii_data *mii; + + sc = device_get_softc(dev); + ma = device_get_ivars(dev); + sc->mii_dev = device_get_parent(dev); + mii = device_get_softc(sc->mii_dev); + LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list); + + if (bootverbose) + device_printf(dev, "OUI 0x%06x, model 0x%04x, rev. %d\n", + MII_OUI(ma->mii_id1, ma->mii_id2), + MII_MODEL(ma->mii_id2), MII_REV(ma->mii_id2)); + + sc->mii_inst = mii->mii_instance; + sc->mii_phy = ma->mii_phyno; + sc->mii_service = gentbi_service; + sc->mii_pdata = mii; + sc->mii_flags |= MIIF_NOISOLATE; + + mii->mii_instance++; + + mii_phy_reset(sc); + + /* + * Mask out all media in the BMSR. We only are really interested + * in "auto". + */ + sc->mii_capabilities = + PHY_READ(sc, MII_BMSR) & ma->mii_capmask & ~BMSR_MEDIAMASK; + if (sc->mii_capabilities & BMSR_EXTSTAT) + sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR); + + device_printf(dev, " "); + if ((sc->mii_capabilities & BMSR_MEDIAMASK) == 0 && + (sc->mii_extcapabilities & EXTSR_MEDIAMASK) == 0) + printf("no media present"); + else + mii_phy_add_media(sc); + printf("\n"); + + return (0); +} + +static int +gentbi_service(struct mii_softc *sc, struct mii_data *mii, int cmd) +{ + struct ifmedia_entry *ife = mii->mii_media.ifm_cur; + int reg; + + switch (cmd) { + case MII_POLLSTAT: + /* + * If we're not polling our PHY instance, just return. + */ + if (IFM_INST(ife->ifm_media) != sc->mii_inst) + return (0); + break; + + case MII_MEDIACHG: + /* + * If the media indicates a different PHY instance, + * isolate ourselves. + */ + if (IFM_INST(ife->ifm_media) != sc->mii_inst) { + reg = PHY_READ(sc, MII_BMCR); + PHY_WRITE(sc, MII_BMCR, reg | BMCR_ISO); + return (0); + } + + /* + * If the interface is not up, don't do anything. + */ + if ((mii->mii_ifp->if_flags & IFF_UP) == 0) + break; + + mii_phy_setmedia(sc); + break; + + case MII_TICK: + /* + * If we're not currently selected, just return. + */ + if (IFM_INST(ife->ifm_media) != sc->mii_inst) + return (0); + + if (mii_phy_tick(sc) == EJUSTRETURN) + return (0); + break; + } + + /* Update the media status. */ + gentbi_status(sc); + + /* Callback if something changed. */ + mii_phy_update(sc, cmd); + return (0); +} + +static void +gentbi_status(struct mii_softc *sc) +{ + struct mii_data *mii = sc->mii_pdata; + struct ifmedia_entry *ife = mii->mii_media.ifm_cur; + int bmsr, bmcr, anlpar; + + mii->mii_media_status = IFM_AVALID; + mii->mii_media_active = IFM_ETHER; + + bmsr = PHY_READ(sc, MII_BMSR) | PHY_READ(sc, MII_BMSR); + + if (bmsr & BMSR_LINK) + mii->mii_media_status |= IFM_ACTIVE; + + bmcr = PHY_READ(sc, MII_BMCR); + if (bmcr & BMCR_ISO) { + mii->mii_media_active |= IFM_NONE; + mii->mii_media_status = 0; + return; + } + + if (bmcr & BMCR_LOOP) + mii->mii_media_active |= IFM_LOOP; + + if (bmcr & BMCR_AUTOEN) { + /* + * The media status bits are only valid of autonegotiation + * has completed (or it's disabled). + */ + if ((bmsr & BMSR_ACOMP) == 0) { + /* Erg, still trying, I guess... */ + mii->mii_media_active |= IFM_NONE; + return; + } + + /* + * The media is always 1000baseSX. Check the ANLPAR to + * see if we're doing full-duplex. + */ + mii->mii_media_active |= IFM_1000_SX; + + anlpar = PHY_READ(sc, MII_ANLPAR); + if ((sc->mii_extcapabilities & EXTSR_1000XFDX) != 0 && + (anlpar & ANLPAR_X_FD) != 0) + mii->mii_media_active |= IFM_FDX; + } else + mii->mii_media_active = ife->ifm_media; +} --- /dev/null Wed Jul 12 09:22:12 2006 +++ sys/dev/mii/ip1000phy.c Mon Jul 3 19:27:59 2006 @@ -0,0 +1,444 @@ +/*- + * Copyright (c) 2006, Pyun YongHyeon + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Driver for the IC Plus IP1000A 10/100/1000 PHY. + */ + +#include +#include +#include +#include +#include +#include + + +#include +#include + +#include +#include +#include "miidevs.h" + +#include + +#include "miibus_if.h" + +#include +#include + +static int ip1000phy_probe(device_t); +static int ip1000phy_attach(device_t); + +static device_method_t ip1000phy_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, ip1000phy_probe), + DEVMETHOD(device_attach, ip1000phy_attach), + DEVMETHOD(device_detach, mii_phy_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + { 0, 0 } +}; + +static devclass_t ip1000phy_devclass; +static driver_t ip1000phy_driver = { + "ip1000phy", + ip1000phy_methods, + sizeof (struct mii_softc) +}; + +DRIVER_MODULE(ip1000phy, miibus, ip1000phy_driver, ip1000phy_devclass, 0, 0); + +static int ip1000phy_service(struct mii_softc *, struct mii_data *, int); +static void ip1000phy_status(struct mii_softc *); +static void ip1000phy_reset(struct mii_softc *); +static int ip1000phy_mii_phy_auto(struct mii_softc *); + +static const struct mii_phydesc ip1000phys[] = { + MII_PHY_DESC(ICPLUS, IP1000A), + MII_PHY_END +}; + +static int +ip1000phy_probe(device_t dev) +{ + struct mii_attach_args *ma; + const struct mii_phydesc *mpd; + + ma = device_get_ivars(dev); + mpd = mii_phy_match(ma, ip1000phys); + if (mpd != NULL) { + device_set_desc(dev, mpd->mpd_name); + return (BUS_PROBE_DEFAULT); + } + + return (ENXIO); +} + +static int +ip1000phy_attach(device_t dev) +{ + struct mii_softc *sc; + struct mii_attach_args *ma; + struct mii_data *mii; + + sc = device_get_softc(dev); + ma = device_get_ivars(dev); + sc->mii_dev = device_get_parent(dev); + mii = device_get_softc(sc->mii_dev); + LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list); + + sc->mii_inst = mii->mii_instance; + sc->mii_phy = ma->mii_phyno; + sc->mii_service = ip1000phy_service; + sc->mii_pdata = mii; + sc->mii_anegticks = MII_ANEGTICKS_GIGE; + sc->mii_flags |= MIIF_NOISOLATE; + + mii->mii_instance++; + + device_printf(dev, " "); + +#define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) + + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst), + BMCR_ISO); + + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst), + IP1000PHY_BMCR_10); + printf("10baseT, "); + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst), + IP1000PHY_BMCR_10 | IP1000PHY_BMCR_FDX); + printf("10baseT-FDX, "); + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst), + IP1000PHY_BMCR_100); + printf("100baseTX, "); + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst), + IP1000PHY_BMCR_100 | IP1000PHY_BMCR_FDX); + printf("100baseTX-FDX, "); + /* 1000baseT half-duplex, really supported? */ + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, 0, sc->mii_inst), + IP1000PHY_BMCR_1000); + printf("1000baseTX, "); + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, sc->mii_inst), + IP1000PHY_BMCR_1000 | IP1000PHY_BMCR_FDX); + printf("1000baseTX-FDX, "); + ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0); + printf("auto\n"); +#undef ADD + + ip1000phy_reset(sc); + + MIIBUS_MEDIAINIT(sc->mii_dev); + return(0); +} + +static int +ip1000phy_service(struct mii_softc *sc, struct mii_data *mii, int cmd) +{ + struct ifmedia_entry *ife = mii->mii_media.ifm_cur; + uint32_t gig, reg, speed; + + switch (cmd) { + case MII_POLLSTAT: + /* + * If we're not polling our PHY instance, just return. + */ + if (IFM_INST(ife->ifm_media) != sc->mii_inst) + return (0); + break; + + case MII_MEDIACHG: + /* + * If the media indicates a different PHY instance, + * isolate ourselves. + */ + if (IFM_INST(ife->ifm_media) != sc->mii_inst) { + reg = PHY_READ(sc, IP1000PHY_MII_BMCR); + PHY_WRITE(sc, IP1000PHY_MII_BMCR, + reg | IP1000PHY_BMCR_ISO); + return (0); + } + + /* + * If the interface is not up, don't do anything. + */ + if ((mii->mii_ifp->if_flags & IFF_UP) == 0) { + break; + } + + ip1000phy_reset(sc); + switch (IFM_SUBTYPE(ife->ifm_media)) { + case IFM_AUTO: + (void)ip1000phy_mii_phy_auto(sc); + goto done; + break; + + case IFM_1000_T: + /* + * XXX + * Manual 1000baseT setting doesn't seem to work. + */ + speed = IP1000PHY_BMCR_1000; + break; + + case IFM_100_TX: + speed = IP1000PHY_BMCR_100; + break; + + case IFM_10_T: + speed = IP1000PHY_BMCR_10; + break; + + default: + return (EINVAL); + } + + if (((ife->ifm_media & IFM_GMASK) & IFM_FDX) != 0) { + speed |= IP1000PHY_BMCR_FDX; + gig = IP1000PHY_1000CR_1000T_FDX; + } else + gig = IP1000PHY_1000CR_1000T; + + PHY_WRITE(sc, IP1000PHY_MII_1000CR, 0); + PHY_WRITE(sc, IP1000PHY_MII_BMCR, speed); + + if (IFM_SUBTYPE(ife->ifm_media) != IFM_1000_T) + break; + + PHY_WRITE(sc, IP1000PHY_MII_1000CR, gig); + PHY_WRITE(sc, IP1000PHY_MII_BMCR, speed); + + /* + * When settning the link manually, one side must + * be the master and the other the slave. However + * ifmedia doesn't give us a good way to specify + * this, so we fake it by using one of the LINK + * flags. If LINK0 is set, we program the PHY to + * be a master, otherwise it's a slave. + */ + if ((mii->mii_ifp->if_flags & IFF_LINK0)) + PHY_WRITE(sc, IP1000PHY_MII_1000CR, gig | + IP1000PHY_1000CR_MASTER | + IP1000PHY_1000CR_MMASTER | + IP1000PHY_1000CR_MANUAL); + else + PHY_WRITE(sc, IP1000PHY_MII_1000CR, gig | + IP1000PHY_1000CR_MASTER | + IP1000PHY_1000CR_MANUAL); + +done: + break; + + case MII_TICK: + /* + * If we're not currently selected, just return. + */ + if (IFM_INST(ife->ifm_media) != sc->mii_inst) + return (0); + /* + * Is the interface even up? + */ + if ((mii->mii_ifp->if_flags & IFF_UP) == 0) + return (0); + + /* + * Only used for autonegotiation. + */ + if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) { + sc->mii_ticks = 0; + break; + } + + /* + * check for link. + */ + reg = PHY_READ(sc, MII_BMSR) | PHY_READ(sc, MII_BMSR); + if (reg & BMSR_LINK) { + sc->mii_ticks = 0; + break; + } + + /* Announce link loss right after it happens */ + if (sc->mii_ticks++ == 0) + break; + + /* + * Only retry autonegotiation every mii_anegticks seconds. + */ + if (sc->mii_ticks <= sc->mii_anegticks) + return (0); + + sc->mii_ticks = 0; + ip1000phy_mii_phy_auto(sc); + break; + } + + /* Update the media status. */ + ip1000phy_status(sc); + + /* Callback if something changed. */ + mii_phy_update(sc, cmd); + return (0); +} + +static void +ip1000phy_status(struct mii_softc *sc) +{ + struct mii_data *mii = sc->mii_pdata; + uint32_t bmsr, bmcr, stat; + uint32_t ar, lpar; + + mii->mii_media_status = IFM_AVALID; + mii->mii_media_active = IFM_ETHER; + + bmsr = PHY_READ(sc, IP1000PHY_MII_BMSR) | + PHY_READ(sc, IP1000PHY_MII_BMSR); + if ((bmsr & IP1000PHY_BMSR_LINK) != 0) + mii->mii_media_status |= IFM_ACTIVE; + + bmcr = PHY_READ(sc, IP1000PHY_MII_BMCR); + if ((bmcr & IP1000PHY_BMCR_LOOP) != 0) + mii->mii_media_active |= IFM_LOOP; + + if ((bmcr & IP1000PHY_BMCR_AUTOEN) != 0) { + if ((bmsr & IP1000PHY_BMSR_ANEGCOMP) == 0) { + /* Erg, still trying, I guess... */ + mii->mii_media_active |= IFM_NONE; + return; + } + } + + stat = PHY_READ(sc, STGE_PhyCtrl); + switch (PC_LinkSpeed(stat)) { + case PC_LinkSpeed_Down: + mii->mii_media_active |= IFM_NONE; + return; + case PC_LinkSpeed_10: + mii->mii_media_active |= IFM_10_T; + break; + case PC_LinkSpeed_100: + mii->mii_media_active |= IFM_100_TX; + break; + case PC_LinkSpeed_1000: + mii->mii_media_active |= IFM_1000_T; + break; + } + if ((stat & PC_PhyDuplexStatus) != 0) + mii->mii_media_active |= IFM_FDX; + else + mii->mii_media_active |= IFM_HDX; + + ar = PHY_READ(sc, IP1000PHY_MII_ANAR); + lpar = PHY_READ(sc, IP1000PHY_MII_ANLPAR); + + /* + * FLAG0 : Rx flow-control + * FLAG1 : Tx flow-control + */ + if ((ar & IP1000PHY_ANAR_PAUSE) && (lpar & IP1000PHY_ANLPAR_PAUSE)) + mii->mii_media_active |= IFM_FLAG0 | IFM_FLAG1; + else if (!(ar & IP1000PHY_ANAR_PAUSE) && (ar & IP1000PHY_ANAR_APAUSE) && + (lpar & IP1000PHY_ANLPAR_PAUSE) && (lpar & IP1000PHY_ANLPAR_APAUSE)) + mii->mii_media_active |= IFM_FLAG1; + else if ((ar & IP1000PHY_ANAR_PAUSE) && (ar & IP1000PHY_ANAR_APAUSE) && + !(lpar & IP1000PHY_ANLPAR_PAUSE) && + (lpar & IP1000PHY_ANLPAR_APAUSE)) { + mii->mii_media_active |= IFM_FLAG0; + } + + /* + * FLAG2 : local PHY resolved to MASTER + */ + if ((mii->mii_media_active & IFM_1000_T) != 0) { + stat = PHY_READ(sc, IP1000PHY_MII_1000SR); + if ((stat & IP1000PHY_1000SR_MASTER) != 0) + mii->mii_media_active |= IFM_FLAG2; + } +} + +static int +ip1000phy_mii_phy_auto(struct mii_softc *mii) +{ + uint32_t reg; + + PHY_WRITE(mii, IP1000PHY_MII_ANAR, + IP1000PHY_ANAR_10T | IP1000PHY_ANAR_10T_FDX | + IP1000PHY_ANAR_100TX | IP1000PHY_ANAR_100TX_FDX | + IP1000PHY_ANAR_PAUSE | IP1000PHY_ANAR_APAUSE); + reg = IP1000PHY_1000CR_1000T | IP1000PHY_1000CR_1000T_FDX; + reg |= IP1000PHY_1000CR_MASTER; + PHY_WRITE(mii, IP1000PHY_MII_1000CR, reg); + PHY_WRITE(mii, IP1000PHY_MII_BMCR, (IP1000PHY_BMCR_FDX | + IP1000PHY_BMCR_AUTOEN | IP1000PHY_BMCR_STARTNEG)); + + return (EJUSTRETURN); +} + +static void +ip1000phy_load_dspcode(struct mii_softc *sc) +{ + + PHY_WRITE(sc, 31, 0x0001); + PHY_WRITE(sc, 27, 0x01e0); + PHY_WRITE(sc, 31, 0x0002); + PHY_WRITE(sc, 27, 0xeb8e); + PHY_WRITE(sc, 31, 0x0000); + PHY_WRITE(sc, 30, 0x005e); + PHY_WRITE(sc, 9, 0x0700); + + DELAY(50); +} + +static void +ip1000phy_reset(struct mii_softc *sc) +{ + struct stge_softc *stge_sc; + struct mii_data *mii; + uint32_t reg; + + mii_phy_reset(sc); + + /* clear autoneg/full-duplex as we don't want it after reset */ + reg = PHY_READ(sc, IP1000PHY_MII_BMCR); + reg &= ~(IP1000PHY_BMCR_AUTOEN | IP1000PHY_BMCR_FDX); + PHY_WRITE(sc, MII_BMCR, reg); + + mii = sc->mii_pdata; + /* + * XXX There should be more general way to pass PHY specific + * data via mii interface. + */ + if (strcmp(mii->mii_ifp->if_dname, "stge") == 0) { + stge_sc = mii->mii_ifp->if_softc; + if (stge_sc->sc_rev >= 0x40 && stge_sc->sc_rev <= 0x4e) + ip1000phy_load_dspcode(sc); + } +} --- /dev/null Wed Jul 12 09:22:12 2006 +++ sys/dev/mii/ip1000phyreg.h Thu Jun 1 14:35:37 2006 @@ -0,0 +1,141 @@ +/*- + * Copyright (c) 2006, Pyun YongHyeon + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _DEV_MII_IP1000PHYREG_H_ +#define _DEV_MII_IP1000PHYREG_H_ + +/* + * Registers for the IC Plus IP1000A internal PHY. + */ + +/* Control register */ +#define IP1000PHY_MII_BMCR 0x00 +#define IP1000PHY_BMCR_FDX 0x0100 +#define IP1000PHY_BMCR_STARTNEG 0x0200 +#define IP1000PHY_BMCR_ISO 0x0400 +#define IP1000PHY_BMCR_PDOWN 0x0800 +#define IP1000PHY_BMCR_AUTOEN 0x1000 +#define IP1000PHY_BMCR_LOOP 0x4000 +#define IP1000PHY_BMCR_RESET 0x8000 + +#define IP1000PHY_BMCR_10 0x0000 +#define IP1000PHY_BMCR_100 0x2000 +#define IP1000PHY_BMCR_1000 0x0040 + +/* Status register */ +#define IP1000PHY_MII_BMSR 0x01 +#define IP1000PHY_BMSR_EXT 0x0001 +#define IP1000PHY_BMSR_LINK 0x0004 +#define IP1000PHY_BMSR_ANEG 0x0008 +#define IP1000PHY_BMSR_RFAULT 0x0010 +#define IP1000PHY_BMSR_ANEGCOMP 0x0020 +#define IP1000PHY_BMSR_EXTSTS 0x0100 + +#define IP1000PHY_MII_ID1 0x02 + +/* Autonegotiation advertisement register */ +#define IP1000PHY_MII_ANAR 0x04 +#define IP1000PHY_ANAR_10T 0x0020 +#define IP1000PHY_ANAR_10T_FDX 0x0040 +#define IP1000PHY_ANAR_100TX 0x0080 +#define IP1000PHY_ANAR_100TX_FDX 0x0100 +#define IP1000PHY_ANAR_100T4 0x0200 +#define IP1000PHY_ANAR_PAUSE 0x0400 +#define IP1000PHY_ANAR_APAUSE 0x0800 +#define IP1000PHY_ANAR_RFAULT 0x2000 +#define IP1000PHY_ANAR_NP 0x8000 + +/* Autonegotiation link parnet ability register */ +#define IP1000PHY_MII_ANLPAR 0x05 +#define IP1000PHY_ANLPAR_10T 0x0020 +#define IP1000PHY_ANLPAR_10T_FDX 0x0040 +#define IP1000PHY_ANLPAR_100TX 0x0080 +#define IP1000PHY_ANLPAR_100TX_FDX 0x0100 +#define IP1000PHY_ANLPAR_100T4 0x0200 +#define IP1000PHY_ANLPAR_PAUSE 0x0400 +#define IP1000PHY_ANLPAR_APAUSE 0x0800 +#define IP1000PHY_ANLPAR_RFAULT 0x2000 +#define IP1000PHY_ANLPAR_ACK 0x4000 +#define IP1000PHY_ANLPAR_NP 0x8000 + +/* Autonegotiation expansion register */ +#define IP1000PHY_MII_ANER 0x06 +#define IP1000PHY_ANER_LPNWAY 0x0001 +#define IP1000PHY_ANER_PRCVD 0x0002 +#define IP1000PHY_ANER_NEXTP 0x0004 +#define IP1000PHY_ANER_LPNEXTP 0x0008 +#define IP1000PHY_ANER_PDF 0x0100 + +/* Autonegotiation next page transmit register */ +#define IP1000PHY_MII_NEXTP 0x07 +#define IP1000PHY_NEXTP_MSGC 0x0001 +#define IP1000PHY_NEXTP_TOGGLE 0x0800 +#define IP1000PHY_NEXTP_ACK2 0x1000 +#define IP1000PHY_NEXTP_MSGP 0x2000 +#define IP1000PHY_NEXTP_NEXTP 0x8000 + +/* Autonegotiation link partner next page register */ +#define IP1000PHY_MII_NEXTPLP 0x08 +#define IP1000PHY_NEXTPLP_MSGC 0x0001 +#define IP1000PHY_NEXTPLP_TOGGLE 0x0800 +#define IP1000PHY_NEXTPLP_ACK2 0x1000 +#define IP1000PHY_NEXTPLP_MSGP 0x2000 +#define IP1000PHY_NEXTPLP_ACK 0x4000 +#define IP1000PHY_NEXTPLP_NEXTP 0x8000 + +/* 1000baseT control register */ +#define IP1000PHY_MII_1000CR 0x09 +#define IP1000PHY_1000CR_1000T 0x0100 +#define IP1000PHY_1000CR_1000T_FDX 0x0200 +#define IP1000PHY_1000CR_MASTER 0x0400 +#define IP1000PHY_1000CR_MMASTER 0x0800 +#define IP1000PHY_1000CR_MANUAL 0x1000 +#define IP1000PHY_1000CR_TMNORMAL 0x0000 +#define IP1000PHY_1000CR_TM1 0x2000 +#define IP1000PHY_1000CR_TM2 0x4000 +#define IP1000PHY_1000CR_TM3 0x6000 +#define IP1000PHY_1000CR_TM4 0x8000 + +/* 1000baseT status register */ +#define IP1000PHY_MII_1000SR 0x0A +#define IP1000PHY_1000SR_LP 0x0400 +#define IP1000PHY_1000SR_LP_FDX 0x0800 +#define IP1000PHY_1000SR_RXSTAT 0x1000 +#define IP1000PHY_1000SR_LRXSTAT 0x2000 +#define IP1000PHY_1000SR_MASTER 0x4000 +#define IP1000PHY_1000SR_MASTERF 0x8000 + +/* Extended status register */ +#define IP1000PHY_MII_EXTSTS 0x0F +#define IP1000PHY_EXTSTS_1000T 0x1000 +#define IP1000PHY_EXTSTS_1000T_FDX 0x2000 +#define IP1000PHY_EXTSTS_1000X 0x4000 +#define IP1000PHY_EXTSTS_1000X_FDX 0x8000 + +#endif /* _DEV_MII_IP1000PHYREG_H_ */ --- /dev/null Wed Jul 12 09:22:12 2006 +++ sys/dev/stge/if_stge.c Wed Jul 12 09:20:11 2006 @@ -0,0 +1,2738 @@ +/* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */ + +/*- + * Copyright (c) 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Device driver for the Sundance Tech. TC9021 10/100/1000 + * Ethernet controller. + */ + +#include +__FBSDID("$FreeBSD$"); + +#ifdef HAVE_KERNEL_OPTION_HEADERS +#include "opt_device_polling.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#if 1 +#include +#else +#include "if_stgereg.h" +#endif + +/* + * Redifine VLAN related macros to support both CURRENT and 6.1R. + */ +#ifdef VLAN_INPUT_TAG_NEW +#define STGE_VLAN_INPUT_TAG VLAN_INPUT_TAG_NEW +#else +#define STGE_VLAN_INPUT_TAG VLAN_INPUT_TAG +#endif +#ifndef IFCAP_VLAN_HWCSUM +#define IFCAP_VLAN_HWCSUM 0 +#endif +#ifndef VLAN_CAPABILITIES +#define STGE_VLAN_CAPABILITIES(x) +#else +#define STGE_VLAN_CAPABILITIES(x) VLAN_CAPABILITIES(x) +#endif + +#define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) + +MODULE_DEPEND(stge, pci, 1, 1, 1); +MODULE_DEPEND(stge, ether, 1, 1, 1); +MODULE_DEPEND(stge, miibus, 1, 1, 1); + +/* "device miibus" required. See GENERIC if you get errors here. */ +#include "miibus_if.h" + +/* + * Devices supported by this driver. + */ +static struct stge_product { + uint16_t stge_vendorid; + uint16_t stge_deviceid; + const char *stge_name; +} stge_products[] = { + { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023, + "Sundance ST-1023 Gigabit Ethernet" }, + + { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021, + "Sundance ST-2021 Gigabit Ethernet" }, + + { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021, + "Tamarack TC9021 Gigabit Ethernet" }, + + { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT, + "Tamarack TC9021 Gigabit Ethernet" }, + + /* + * The Sundance sample boards use the Sundance vendor ID, + * but the Tamarack product ID. + */ + { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021, + "Sundance TC9021 Gigabit Ethernet" }, + + { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT, + "Sundance TC9021 Gigabit Ethernet" }, + + { VENDOR_DLINK, DEVICEID_DLINK_DL4000, + "D-Link DL-4000 Gigabit Ethernet" }, + + { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021, + "Antares Gigabit Ethernet" } +}; + +static int stge_probe(device_t); +static int stge_attach(device_t); +static int stge_detach(device_t); +static void stge_shutdown(device_t); +static int stge_suspend(device_t); +static int stge_resume(device_t); + +static int stge_encap(struct stge_softc *, struct mbuf **); +static void stge_start(struct ifnet *); +static void stge_start_locked(struct ifnet *); +static void stge_watchdog(struct ifnet *); +static int stge_ioctl(struct ifnet *, u_long, caddr_t); +static void stge_init(void *); +static void stge_init_locked(struct stge_softc *); +static void stge_vlan_setup(struct stge_softc *); +static void stge_stop(struct stge_softc *); +static void stge_start_tx(struct stge_softc *); +static void stge_start_rx(struct stge_softc *); +static void stge_stop_tx(struct stge_softc *); +static void stge_stop_rx(struct stge_softc *); + +static void stge_reset(struct stge_softc *, uint32_t); +static int stge_eeprom_wait(struct stge_softc *); +static void stge_read_eeprom(struct stge_softc *, int, uint16_t *); +static void stge_tick(void *); +static void stge_stats_update(struct stge_softc *); +static void stge_set_filter(struct stge_softc *); +static void stge_set_multi(struct stge_softc *); + +static void stge_link_task(void *, int); +static void stge_intr(void *); +static __inline int stge_tx_error(struct stge_softc *); +static void stge_txeof(struct stge_softc *); +static void stge_rxeof(struct stge_softc *); +static __inline void stge_discard_rxbuf(struct stge_softc *, int); +static int stge_newbuf(struct stge_softc *, int); +#ifndef __NO_STRICT_ALIGNMENT +static __inline mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *); +#endif + +static void stge_mii_sync(struct stge_softc *); +static void stge_mii_send(struct stge_softc *, uint32_t, int); +static int stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *); +static int stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *); +static int stge_miibus_readreg(device_t, int, int); +static int stge_miibus_writereg(device_t, int, int, int); +static void stge_miibus_statchg(device_t); +static int stge_mediachange(struct ifnet *); +static void stge_mediastatus(struct ifnet *, struct ifmediareq *); + +static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int); +static int stge_dma_alloc(struct stge_softc *); +static void stge_dma_free(struct stge_softc *); +static void stge_dma_wait(struct stge_softc *); +static void stge_init_tx_ring(struct stge_softc *); +static int stge_init_rx_ring(struct stge_softc *); +#ifdef DEVICE_POLLING +static void stge_poll(struct ifnet *, enum poll_cmd, int); +#endif + +static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); +static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS); +static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS); + +static device_method_t stge_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, stge_probe), + DEVMETHOD(device_attach, stge_attach), + DEVMETHOD(device_detach, stge_detach), + DEVMETHOD(device_shutdown, stge_shutdown), + DEVMETHOD(device_suspend, stge_suspend), + DEVMETHOD(device_resume, stge_resume), + + /* MII interface */ + DEVMETHOD(miibus_readreg, stge_miibus_readreg), + DEVMETHOD(miibus_writereg, stge_miibus_writereg), + DEVMETHOD(miibus_statchg, stge_miibus_statchg), + + { 0, 0 } + +}; + +static driver_t stge_driver = { + "stge", + stge_methods, + sizeof(struct stge_softc) +}; + +static devclass_t stge_devclass; + +DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0); +DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0); + +#define MII_SET(x) \ + CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x)) +#define MII_CLR(x) \ + CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x)) + +/* + * Sync the PHYs by setting data bit and strobing the clock 32 times. + */ +static void +stge_mii_sync(struct stge_softc *sc) +{ + int i; + + MII_SET(PC_MgmtDir | PC_MgmtData); + + for (i = 0; i < 32; i++) { + MII_SET(PC_MgmtClk); + DELAY(1); + MII_CLR(PC_MgmtClk); + DELAY(1); + } +} + +/* + * Clock a series of bits through the MII. + */ +static void +stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt) +{ + int i; + + MII_CLR(PC_MgmtClk); + + for (i = (0x1 << (cnt - 1)); i; i >>= 1) { + if (bits & i) + MII_SET(PC_MgmtData); + else + MII_CLR(PC_MgmtData); + DELAY(1); + MII_CLR(PC_MgmtClk); + DELAY(1); + MII_SET(PC_MgmtClk); + } +} + +/* + * Read an PHY register through the MII. + */ +static int +stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame) +{ + int i, ack; + + /* + * Set up frame for RX. + */ + frame->mii_stdelim = STGE_MII_STARTDELIM; + frame->mii_opcode = STGE_MII_READOP; + frame->mii_turnaround = 0; + frame->mii_data = 0; + + CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl); + /* + * Turn on data xmit. + */ + MII_SET(PC_MgmtDir); + + stge_mii_sync(sc); + + /* + * Send command/address info. + */ + stge_mii_send(sc, frame->mii_stdelim, 2); + stge_mii_send(sc, frame->mii_opcode, 2); + stge_mii_send(sc, frame->mii_phyaddr, 5); + stge_mii_send(sc, frame->mii_regaddr, 5); + + /* Turn off xmit. */ + MII_CLR(PC_MgmtDir); + + /* Idle bit */ + MII_CLR((PC_MgmtClk | PC_MgmtData)); + DELAY(1); + MII_SET(PC_MgmtClk); + DELAY(1); + + /* Check for ack */ + MII_CLR(PC_MgmtClk); + DELAY(1); + ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData; + MII_SET(PC_MgmtClk); + DELAY(1); + + /* + * Now try reading data bits. If the ack failed, we still + * need to clock through 16 cycles to keep the PHY(s) in sync. + */ + if (ack) { + for(i = 0; i < 16; i++) { + MII_CLR(PC_MgmtClk); + DELAY(1); + MII_SET(PC_MgmtClk); + DELAY(1); + } + goto fail; + } + + for (i = 0x8000; i; i >>= 1) { + MII_CLR(PC_MgmtClk); + DELAY(1); + if (!ack) { + if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData) + frame->mii_data |= i; + DELAY(1); + } + MII_SET(PC_MgmtClk); + DELAY(1); + } + +fail: + MII_CLR(PC_MgmtClk); + DELAY(1); + MII_SET(PC_MgmtClk); + DELAY(1); + + if (ack) + return(1); + return(0); +} + +/* + * Write to a PHY register through the MII. + */ +static int +stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame) +{ + + /* + * Set up frame for TX. + */ + frame->mii_stdelim = STGE_MII_STARTDELIM; + frame->mii_opcode = STGE_MII_WRITEOP; + frame->mii_turnaround = STGE_MII_TURNAROUND; + + /* + * Turn on data output. + */ + MII_SET(PC_MgmtDir); + + stge_mii_sync(sc); + + stge_mii_send(sc, frame->mii_stdelim, 2); + stge_mii_send(sc, frame->mii_opcode, 2); + stge_mii_send(sc, frame->mii_phyaddr, 5); + stge_mii_send(sc, frame->mii_regaddr, 5); + stge_mii_send(sc, frame->mii_turnaround, 2); + stge_mii_send(sc, frame->mii_data, 16); + + /* Idle bit. */ + MII_SET(PC_MgmtClk); + DELAY(1); + MII_CLR(PC_MgmtClk); + DELAY(1); + + /* + * Turn off xmit. + */ + MII_CLR(PC_MgmtDir); + + return(0); +} + +/* + * sc_miibus_readreg: [mii interface function] + * + * Read a PHY register on the MII of the TC9021. + */ +static int +stge_miibus_readreg(device_t dev, int phy, int reg) +{ + struct stge_softc *sc; + struct stge_mii_frame frame; + int error; + + sc = device_get_softc(dev); + + if (reg == STGE_PhyCtrl) { + /* XXX allow ip1000phy read STGE_PhyCtrl register. */ + STGE_MII_LOCK(sc); + error = CSR_READ_1(sc, STGE_PhyCtrl); + STGE_MII_UNLOCK(sc); + return (error); + } + bzero(&frame, sizeof(frame)); + frame.mii_phyaddr = phy; + frame.mii_regaddr = reg; + + STGE_MII_LOCK(sc); + error = stge_mii_readreg(sc, &frame); + STGE_MII_UNLOCK(sc); + + if (error != 0) { + /* Don't show errors for PHY probe request */ + if (reg != 1) + device_printf(sc->sc_dev, "phy read fail\n"); + return (0); + } + return (frame.mii_data); +} + +/* + * stge_miibus_writereg: [mii interface function] + * + * Write a PHY register on the MII of the TC9021. + */ +static int +stge_miibus_writereg(device_t dev, int phy, int reg, int val) +{ + struct stge_softc *sc; + struct stge_mii_frame frame; + int error; + + sc = device_get_softc(dev); + + bzero(&frame, sizeof(frame)); + frame.mii_phyaddr = phy; + frame.mii_regaddr = reg; + frame.mii_data = val; + + STGE_MII_LOCK(sc); + error = stge_mii_writereg(sc, &frame); + STGE_MII_UNLOCK(sc); + + if (error != 0) + device_printf(sc->sc_dev, "phy write fail\n"); + return (0); +} + +/* + * stge_miibus_statchg: [mii interface function] + * + * Callback from MII layer when media changes. + */ +static void +stge_miibus_statchg(device_t dev) +{ + struct stge_softc *sc; + struct mii_data *mii; + + sc = device_get_softc(dev); + mii = device_get_softc(sc->sc_miibus); + + STGE_MII_LOCK(sc); + if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) { + STGE_MII_UNLOCK(sc); + return; + } + + sc->sc_MACCtrl = 0; + if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) + sc->sc_MACCtrl |= MC_DuplexSelect; + if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0) + sc->sc_MACCtrl |= MC_RxFlowControlEnable; + if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0) + sc->sc_MACCtrl |= MC_TxFlowControlEnable; + /* + * We can't access STGE_MACCtrl register in this context due to + * the races between MII layer and driver which accesses this + * register to program MAC. In order to solve the race, we defer + * STGE_MACCtrl programming until we know we are out of MII. + */ + taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task); + STGE_MII_UNLOCK(sc); +} + +/* + * stge_mediastatus: [ifmedia interface function] + * + * Get the current interface media status. + */ +static void +stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct stge_softc *sc; + struct mii_data *mii; + + sc = ifp->if_softc; + mii = device_get_softc(sc->sc_miibus); + + mii_pollstat(mii); + ifmr->ifm_status = mii->mii_media_status; + ifmr->ifm_active = mii->mii_media_active; +} + +/* + * stge_mediachange: [ifmedia interface function] + * + * Set hardware to newly-selected media. + */ +static int +stge_mediachange(struct ifnet *ifp) +{ + struct stge_softc *sc; + struct mii_data *mii; + + sc = ifp->if_softc; + mii = device_get_softc(sc->sc_miibus); + mii_mediachg(mii); + + return (0); +} + +static int +stge_eeprom_wait(struct stge_softc *sc) +{ + int i; + + for (i = 0; i < STGE_TIMEOUT; i++) { + DELAY(1000); + if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0) + return (0); + } + return (1); +} + +/* + * stge_read_eeprom: + * + * Read data from the serial EEPROM. + */ +static void +stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data) +{ + + if (stge_eeprom_wait(sc)) + device_printf(sc->sc_dev, "EEPROM failed to come ready\n"); + + CSR_WRITE_2(sc, STGE_EepromCtrl, + EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR)); + if (stge_eeprom_wait(sc)) + device_printf(sc->sc_dev, "EEPROM read timed out\n"); + *data = CSR_READ_2(sc, STGE_EepromData); +} + + +static int +stge_probe(device_t dev) +{ + struct stge_product *sp; + int i; + uint16_t vendor, devid; + + vendor = pci_get_vendor(dev); + devid = pci_get_device(dev); + sp = stge_products; + for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]); + i++, sp++) { + if (vendor == sp->stge_vendorid && + devid == sp->stge_deviceid) { + device_set_desc(dev, sp->stge_name); + return (BUS_PROBE_DEFAULT); + } + } + + return (ENXIO); +} + +static int +stge_attach(device_t dev) +{ + struct stge_softc *sc; + struct ifnet *ifp; + uint8_t enaddr[ETHER_ADDR_LEN]; + int error, i, rid; + uint16_t cmd; + uint32_t val; + + error = 0; + sc = device_get_softc(dev); + sc->sc_dev = dev; + + mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, + MTX_DEF); + mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF); + callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); + TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc); + + /* + * Map the device. + */ + pci_enable_busmaster(dev); + cmd = pci_read_config(dev, PCIR_COMMAND, 2); + val = pci_read_config(dev, PCIR_BAR(1), 4); + if ((val & 0x01) != 0) { + sc->sc_restype = SYS_RES_MEMORY; + sc->sc_rid = PCIR_BAR(1); + } else { + val = pci_read_config(dev, PCIR_BAR(0), 4); + if ((val & 0x01) == 0) { + device_printf(sc->sc_dev, "couldn't locate IO BAR\n"); + error = ENXIO; + goto fail; + } + sc->sc_restype = SYS_RES_IOPORT; + sc->sc_rid = PCIR_BAR(0); + } + sc->sc_res = bus_alloc_resource_any(dev, sc->sc_restype, &sc->sc_rid, + RF_ACTIVE); + if (sc->sc_res == NULL) { + device_printf(sc->sc_dev, "couldn't map %s\n", + sc->sc_restype == SYS_RES_MEMORY ? "memory" : "ports"); + error = ENXIO; + goto fail; + } + sc->sc_rev = pci_get_revid(dev); + sc->sc_st = rman_get_bustag(sc->sc_res); + sc->sc_sh = rman_get_bushandle(sc->sc_res); + + /* Allocate interrupt. */ + rid = 0; + sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); + if (sc->sc_irq == NULL) { + device_printf(sc->sc_dev, "couldn't map interrupt\n"); + error = ENXIO; + goto fail; + } + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, + "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0, + sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe"); + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, + "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0, + sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait"); + + /* Pull in device tunables. */ + sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; + error = resource_int_value(device_get_name(dev), device_get_unit(dev), + "rxint_nframe", &sc->sc_rxint_nframe); + if (error == 0) { + if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN || + sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) { + device_printf(dev, "rxint_nframe value out of range; " + "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT); + sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; + } + } + + sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; + error = resource_int_value(device_get_name(dev), device_get_unit(dev), + "rxint_dmawait", &sc->sc_rxint_dmawait); + if (error == 0) { + if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN || + sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) { + device_printf(dev, "rxint_dmawait value out of range; " + "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT); + sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; + } + } + + if ((error = stge_dma_alloc(sc) != 0)) + goto fail; + + /* + * Determine if we're copper or fiber. It affects how we + * reset the card. + */ + if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia) + sc->sc_usefiber = 1; + else + sc->sc_usefiber = 0; + + /* Load LED configuration from EEPROM. */ + stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led); + + /* + * Reset the chip to a known state. + */ + stge_reset(sc, STGE_RESET_FULL); + + /* + * Reading the station address from the EEPROM doesn't seem + * to work, at least on my sample boards. Instead, since + * the reset sequence does AutoInit, read it from the station + * address registers. For Sundance 1023 you can only read it + * from EEPROM. + */ + if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) { + uint16_t v; + + v = CSR_READ_2(sc, STGE_StationAddress0); + enaddr[0] = v & 0xff; + enaddr[1] = v >> 8; + v = CSR_READ_2(sc, STGE_StationAddress1); + enaddr[2] = v & 0xff; + enaddr[3] = v >> 8; + v = CSR_READ_2(sc, STGE_StationAddress2); + enaddr[4] = v & 0xff; + enaddr[5] = v >> 8; + sc->sc_stge1023 = 0; + } else { + uint16_t myaddr[ETHER_ADDR_LEN / 2]; + for (i = 0; i sc_stge1023 = 1; + } + + ifp = sc->sc_ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(sc->sc_dev, "failed to if_alloc()\n"); + error = ENXIO; + goto fail; + } + + ifp->if_softc = sc; + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = stge_ioctl; + ifp->if_start = stge_start; + ifp->if_watchdog = stge_watchdog; + ifp->if_init = stge_init; + ifp->if_mtu = ETHERMTU; + ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1; + IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); + IFQ_SET_READY(&ifp->if_snd); + /* Revision B3 and earlier chips have checksum bug. */ + if (sc->sc_rev >= 0x0c) { + ifp->if_hwassist = STGE_CSUM_FEATURES; + ifp->if_capabilities = IFCAP_HWCSUM; + } else { + ifp->if_hwassist = 0; + ifp->if_capabilities = 0; + } + ifp->if_capenable = ifp->if_capabilities; + + /* + * Read some important bits from the PhyCtrl register. + */ + sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) & + (PC_PhyDuplexPolarity | PC_PhyLnkPolarity); + + /* Set up MII bus. */ + if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange, + stge_mediastatus)) != 0) { + device_printf(sc->sc_dev, "no PHY found!\n"); + goto fail; + } + + ether_ifattach(ifp, enaddr); + + /* VLAN capability setup */ + if (sc->sc_rev >= 0x0c) + ifp->if_capabilities |= + (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM); + else + ifp->if_capabilities |= + (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING); + ifp->if_capenable = ifp->if_capabilities; +#ifdef DEVICE_POLLING + ifp->if_capabilities |= IFCAP_POLLING; +#endif + /* + * Tell the upper layer(s) we support long frames. + * Must appear after the call to ether_ifattach() because + * ether_ifattach() sets ifi_hdrlen to the default value. + */ + ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + + /* + * The manual recommends disabling early transmit, so we + * do. It's disabled anyway, if using IP checksumming, + * since the entire packet must be in the FIFO in order + * for the chip to perform the checksum. + */ + sc->sc_txthresh = 0x0fff; + + /* + * Disable MWI if the PCI layer tells us to. + */ + sc->sc_DMACtrl = 0; + if ((cmd & PCIM_CMD_MWRICEN) == 0) + sc->sc_DMACtrl |= DMAC_MWIDisable; + + /* + * Hookup IRQ + */ + error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, + stge_intr, sc, &sc->sc_ih); + if (error != 0) { + ether_ifdetach(ifp); + device_printf(sc->sc_dev, "couldn't set up IRQ\n"); + sc->sc_ifp = NULL; + goto fail; + } + +fail: + if (error != 0) + stge_detach(dev); + + return (error); +} + +static int +stge_detach(device_t dev) +{ + struct stge_softc *sc; + struct ifnet *ifp; + + sc = device_get_softc(dev); + + ifp = sc->sc_ifp; +#ifdef DEVICE_POLLING + if (ifp && ifp->if_capenable & IFCAP_POLLING) + ether_poll_deregister(ifp); +#endif + if (device_is_attached(dev)) { + STGE_LOCK(sc); + /* XXX */ + sc->sc_detach = 1; + stge_stop(sc); + STGE_UNLOCK(sc); + callout_drain(&sc->sc_tick_ch); + taskqueue_drain(taskqueue_swi, &sc->sc_link_task); + ether_ifdetach(ifp); + } + + if (sc->sc_miibus != NULL) { + device_delete_child(dev, sc->sc_miibus); + sc->sc_miibus = NULL; + } + bus_generic_detach(dev); + stge_dma_free(sc); + + if (ifp != NULL) { + if_free(ifp); + sc->sc_ifp = NULL; + } + + if (sc->sc_ih) { + bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); + sc->sc_ih = NULL; + } + if (sc->sc_irq) { + bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); + sc->sc_irq = NULL; + } + if (sc->sc_res) { + bus_release_resource(dev, sc->sc_restype, sc->sc_rid, + sc->sc_res); + sc->sc_res = NULL; + } + mtx_destroy(&sc->sc_mii_mtx); + mtx_destroy(&sc->sc_mtx); + + return (0); +} + +struct stge_dmamap_arg { + bus_addr_t stge_busaddr; +}; + +static void +stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct stge_dmamap_arg *ctx; + + if (error != 0) + return; + + ctx = (struct stge_dmamap_arg *)arg; + ctx->stge_busaddr = segs[0].ds_addr; +} + +static int +stge_dma_alloc(struct stge_softc *sc) +{ + struct stge_dmamap_arg ctx; + struct stge_txdesc *txd; + struct stge_rxdesc *rxd; + int error, i; + + /* create parent tag. */ + error = bus_dma_tag_create(NULL, /* parent */ + 1, 0, /* algnmnt, boundary */ + STGE_DMA_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ + 0, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.stge_parent_tag); + if (error != 0) { + device_printf(sc->sc_dev, "failed to create parent DMA tag\n"); + goto fail; + } + /* create tag for Tx ring. */ + error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ + STGE_RING_ALIGN, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + STGE_TX_RING_SZ, /* maxsize */ + 1, /* nsegments */ + STGE_TX_RING_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.stge_tx_ring_tag); + if (error != 0) { + device_printf(sc->sc_dev, + "failed to allocate Tx ring DMA tag\n"); + goto fail; + } + + /* create tag for Rx ring. */ + error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ + STGE_RING_ALIGN, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + STGE_RX_RING_SZ, /* maxsize */ + 1, /* nsegments */ + STGE_RX_RING_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.stge_rx_ring_tag); + if (error != 0) { + device_printf(sc->sc_dev, + "failed to allocate Rx ring DMA tag\n"); + goto fail; + } + + /* create tag for Tx buffers. */ + error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES * STGE_MAXTXSEGS, /* maxsize */ + STGE_MAXTXSEGS, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.stge_tx_tag); + if (error != 0) { + device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n"); + goto fail; + } + + /* create tag for Rx buffers. */ + error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES, /* maxsize */ + 1, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.stge_rx_tag); + if (error != 0) { + device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n"); + goto fail; + } + + /* allocate DMA'able memory and load the DMA map for Tx ring. */ + error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag, + (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, + &sc->sc_cdata.stge_tx_ring_map); + if (error != 0) { + device_printf(sc->sc_dev, + "failed to allocate DMA'able memory for Tx ring\n"); + goto fail; + } + + ctx.stge_busaddr = 0; + error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag, + sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring, + STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); + if (error != 0 || ctx.stge_busaddr == 0) { + device_printf(sc->sc_dev, + "failed to load DMA'able memory for Tx ring\n"); + goto fail; + } + sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr; + + /* allocate DMA'able memory and load the DMA map for Rx ring. */ + error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag, + (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, + &sc->sc_cdata.stge_rx_ring_map); + if (error != 0) { + device_printf(sc->sc_dev, + "failed to allocate DMA'able memory for Rx ring\n"); + goto fail; + } + + ctx.stge_busaddr = 0; + error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag, + sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring, + STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); + if (error != 0 || ctx.stge_busaddr == 0) { + device_printf(sc->sc_dev, + "failed to load DMA'able memory for Rx ring\n"); + goto fail; + } + sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr; + + /* create DMA maps for Tx buffers. */ + for (i = 0; i < STGE_TX_RING_CNT; i++) { + txd = &sc->sc_cdata.stge_txdesc[i]; + txd->tx_m = NULL; + txd->tx_dmamap = 0; + error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0, + &txd->tx_dmamap); + if (error != 0) { + device_printf(sc->sc_dev, + "failed to create Tx dmamap\n"); + goto fail; + } + } + /* create DMA maps for Rx buffers. */ + if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, + &sc->sc_cdata.stge_rx_sparemap)) != 0) { + device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n"); + goto fail; + } + for (i = 0; i < STGE_RX_RING_CNT; i++) { + rxd = &sc->sc_cdata.stge_rxdesc[i]; + rxd->rx_m = NULL; + rxd->rx_dmamap = 0; + error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, + &rxd->rx_dmamap); + if (error != 0) { + device_printf(sc->sc_dev, + "failed to create Rx dmamap\n"); + goto fail; + } + } + +fail: + return (error); +} + +static void +stge_dma_free(struct stge_softc *sc) +{ + struct stge_txdesc *txd; + struct stge_rxdesc *rxd; + int i; + + /* Tx ring */ + if (sc->sc_cdata.stge_tx_ring_tag) { + if (sc->sc_cdata.stge_tx_ring_map) + bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag, + sc->sc_cdata.stge_tx_ring_map); + if (sc->sc_cdata.stge_tx_ring_map && + sc->sc_rdata.stge_tx_ring) + bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag, + sc->sc_rdata.stge_tx_ring, + sc->sc_cdata.stge_tx_ring_map); + sc->sc_rdata.stge_tx_ring = NULL; + sc->sc_cdata.stge_tx_ring_map = 0; + bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag); + sc->sc_cdata.stge_tx_ring_tag = NULL; + } + /* Rx ring */ + if (sc->sc_cdata.stge_rx_ring_tag) { + if (sc->sc_cdata.stge_rx_ring_map) + bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag, + sc->sc_cdata.stge_rx_ring_map); + if (sc->sc_cdata.stge_rx_ring_map && + sc->sc_rdata.stge_rx_ring) + bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag, + sc->sc_rdata.stge_rx_ring, + sc->sc_cdata.stge_rx_ring_map); + sc->sc_rdata.stge_rx_ring = NULL; + sc->sc_cdata.stge_rx_ring_map = 0; + bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag); + sc->sc_cdata.stge_rx_ring_tag = NULL; + } + /* Tx buffers */ + if (sc->sc_cdata.stge_tx_tag) { + for (i = 0; i < STGE_TX_RING_CNT; i++) { + txd = &sc->sc_cdata.stge_txdesc[i]; + if (txd->tx_dmamap) { + bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag, + txd->tx_dmamap); + txd->tx_dmamap = 0; + } + } + bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag); + sc->sc_cdata.stge_tx_tag = NULL; + } + /* Rx buffers */ + if (sc->sc_cdata.stge_rx_tag) { + for (i = 0; i < STGE_RX_RING_CNT; i++) { + rxd = &sc->sc_cdata.stge_rxdesc[i]; + if (rxd->rx_dmamap) { + bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, + rxd->rx_dmamap); + rxd->rx_dmamap = 0; + } + } + if (sc->sc_cdata.stge_rx_sparemap) { + bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, + sc->sc_cdata.stge_rx_sparemap); + sc->sc_cdata.stge_rx_sparemap = 0; + } + bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag); + sc->sc_cdata.stge_rx_tag = NULL; + } + + if (sc->sc_cdata.stge_parent_tag) { + bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag); + sc->sc_cdata.stge_parent_tag = NULL; + } +} + +/* + * stge_shutdown: + * + * Make sure the interface is stopped at reboot time. + */ +static void +stge_shutdown(device_t dev) +{ + struct stge_softc *sc; + + sc = device_get_softc(dev); + + STGE_LOCK(sc); + stge_stop(sc); + STGE_UNLOCK(sc); +} + +static int +stge_suspend(device_t dev) +{ + struct stge_softc *sc; + + sc = device_get_softc(dev); + + STGE_LOCK(sc); + stge_stop(sc); + sc->sc_suspended = 1; + STGE_UNLOCK(sc); + + return (0); +} + +static int +stge_resume(device_t dev) +{ + struct stge_softc *sc; + struct ifnet *ifp; + + sc = device_get_softc(dev); + + STGE_LOCK(sc); + ifp = sc->sc_ifp; + if (ifp->if_flags & IFF_UP) + stge_init_locked(sc); + + sc->sc_suspended = 0; + STGE_UNLOCK(sc); + + return (0); +} + +static void +stge_dma_wait(struct stge_softc *sc) +{ + int i; + + for (i = 0; i < STGE_TIMEOUT; i++) { + DELAY(2); + if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0) + break; + } + + if (i == STGE_TIMEOUT) + device_printf(sc->sc_dev, "DMA wait timed out\n"); +} + +static int +stge_encap(struct stge_softc *sc, struct mbuf **m_head) +{ + struct stge_txdesc *txd; + struct stge_tfd *tfd; + struct mbuf *m, *n; + struct m_tag *mtag; + bus_dma_segment_t txsegs[STGE_MAXTXSEGS]; + int error, i, nsegs, si; + uint64_t csum_flags, tfc; + + STGE_LOCK_ASSERT(sc); + + if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL) + return (ENOBUFS); + + m = *m_head; + error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, + txd->tx_dmamap, m, txsegs, &nsegs, 0); + if (error == EFBIG) { + n = m_defrag(m, M_DONTWAIT); + if (n == NULL) { + m_freem(m); + m = NULL; + return (ENOMEM); + } + m = n; + error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, + txd->tx_dmamap, m, txsegs, &nsegs, 0); + if (error != 0) { + m_freem(m); + m = NULL; + return (error); + } + } else if (error != 0) + return (error); + if (nsegs == 0) { + m_freem(m); + m = NULL; + return (EIO); + } + + csum_flags = 0; + if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) { + if (m->m_pkthdr.csum_flags & CSUM_IP) + csum_flags |= TFD_IPChecksumEnable; + if (m->m_pkthdr.csum_flags & CSUM_TCP) + csum_flags |= TFD_TCPChecksumEnable; + else if (m->m_pkthdr.csum_flags & CSUM_UDP) + csum_flags |= TFD_UDPChecksumEnable; + } + + si = sc->sc_cdata.stge_tx_prod; + tfd = &sc->sc_rdata.stge_tx_ring[si]; + for (i = 0; i < nsegs; i++) + tfd->tfd_frags[i].frag_word0 = + htole64(FRAG_ADDR(txsegs[i].ds_addr) | + FRAG_LEN(txsegs[i].ds_len)); + sc->sc_cdata.stge_tx_cnt++; + + tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) | + TFD_FragCount(nsegs) | csum_flags; + if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) + tfc |= TFD_TxDMAIndicate; + + /* Update producer index. */ + sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT; + + /* Check if we have a VLAN tag to insert. */ + mtag = VLAN_OUTPUT_TAG(sc->sc_ifp, m); + if (mtag != NULL) + tfc |= TFD_VLANTagInsert | TFD_VID(VLAN_TAG_VALUE(mtag)); + tfd->tfd_control = htole64(tfc); + + /* Update Tx Queue. */ + STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q); + STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q); + txd->tx_m = m; + + /* Sync descriptors. */ + bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, + BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, + sc->sc_cdata.stge_tx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + return (0); +} + +/* + * stge_start: [ifnet interface function] + * + * Start packet transmission on the interface. + */ +static void +stge_start(struct ifnet *ifp) +{ + struct stge_softc *sc; + + sc = ifp->if_softc; + STGE_LOCK(sc); + stge_start_locked(ifp); + STGE_UNLOCK(sc); +} + +static void +stge_start_locked(struct ifnet *ifp) +{ + struct stge_softc *sc; + struct mbuf *m_head; + int enq; + + sc = ifp->if_softc; + + STGE_LOCK_ASSERT(sc); + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) + return; + + for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { + if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + + IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + /* + * Pack the data into the transmit ring. If we + * don't have room, set the OACTIVE flag and wait + * for the NIC to drain the ring. + */ + if (stge_encap(sc, &m_head)) { + if (m_head == NULL) + break; + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + + enq++; + /* + * If there's a BPF listener, bounce a copy of this frame + * to him. + */ + BPF_MTAP(ifp, m_head); + } + + if (enq > 0) { + /* Transmit */ + CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow); + + /* Set a timeout in case the chip goes out to lunch. */ + ifp->if_timer = 5; + } +} + +/* + * stge_watchdog: [ifnet interface function] + * + * Watchdog timer handler. + */ +static void +stge_watchdog(struct ifnet *ifp) +{ + struct stge_softc *sc; + + sc = ifp->if_softc; + + STGE_LOCK(sc); + if_printf(sc->sc_ifp, "device timeout\n"); + ifp->if_oerrors++; + stge_init_locked(sc); + STGE_UNLOCK(sc); +} + +/* + * stge_ioctl: [ifnet interface function] + * + * Handle control requests from the operator. + */ +static int +stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct stge_softc *sc; + struct ifreq *ifr; + struct mii_data *mii; + int error, mask; + + sc = ifp->if_softc; + ifr = (struct ifreq *)data; + error = 0; + switch (cmd) { + case SIOCSIFMTU: + if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > + (STGE_JUMBO_MTU - ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 ? + ETHER_VLAN_ENCAP_LEN : 0))) + error = EINVAL; + else if (ifp->if_mtu != ifr->ifr_mtu) { + ifp->if_mtu = ifr->ifr_mtu; + STGE_LOCK(sc); + stge_init_locked(sc); + STGE_UNLOCK(sc); + } + break; + case SIOCSIFFLAGS: + STGE_LOCK(sc); + if ((ifp->if_flags & IFF_UP) != 0) { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { + if (((ifp->if_flags ^ sc->sc_if_flags) + & IFF_PROMISC) != 0) + stge_set_filter(sc); + } else { + if (sc->sc_detach == 0) + stge_init_locked(sc); + } + } else { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + stge_stop(sc); + } + sc->sc_if_flags = ifp->if_flags; + STGE_UNLOCK(sc); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + STGE_LOCK(sc); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + stge_set_multi(sc); + STGE_UNLOCK(sc); + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + mii = device_get_softc(sc->sc_miibus); + error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); + break; + case SIOCSIFCAP: + mask = ifr->ifr_reqcap ^ ifp->if_capenable; +#ifdef DEVICE_POLLING + if ((mask & IFCAP_POLLING) != 0) { + if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { + error = ether_poll_register(stge_poll, ifp); + if (error != 0) + break; + STGE_LOCK(sc); + CSR_WRITE_2(sc, STGE_IntEnable, 0); + ifp->if_capenable |= IFCAP_POLLING; + STGE_UNLOCK(sc); + } else { + error = ether_poll_deregister(ifp); + if (error != 0) + break; + STGE_LOCK(sc); + CSR_WRITE_2(sc, STGE_IntEnable, + sc->sc_IntEnable); + ifp->if_capenable &= ~IFCAP_POLLING; + STGE_UNLOCK(sc); + } + } +#endif + if ((mask & IFCAP_HWCSUM) != 0) { + ifp->if_capenable ^= IFCAP_HWCSUM; + if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 && + (IFCAP_HWCSUM & ifp->if_capabilities) != 0) + ifp->if_hwassist = STGE_CSUM_FEATURES; + else + ifp->if_hwassist = 0; + } + if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { + STGE_LOCK(sc); + stge_vlan_setup(sc); + STGE_UNLOCK(sc); + } + } + STGE_VLAN_CAPABILITIES(ifp); + break; + default: + error = ether_ioctl(ifp, cmd, data); + break; + } + + return (error); +} + +static void +stge_link_task(void *arg, int pending) +{ + struct stge_softc *sc; + uint32_t v, ac; + int i; + + sc = (struct stge_softc *)arg; + STGE_LOCK(sc); + /* + * Update STGE_MACCtrl register depending on link status. + * (duplex, flow control etc) + */ + v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable); + v |= sc->sc_MACCtrl; + CSR_WRITE_4(sc, STGE_MACCtrl, v); + if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) { + /* Duplex setting changed, reset Tx/Rx functions. */ + ac = CSR_READ_4(sc, STGE_AsicCtrl); + ac |= AC_TxReset | AC_RxReset; + CSR_WRITE_4(sc, STGE_AsicCtrl, ac); + for (i = 0; i < STGE_TIMEOUT; i++) { + DELAY(100); + if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) + break; + } + if (i == STGE_TIMEOUT) + device_printf(sc->sc_dev, "reset failed to complete\n"); + } + STGE_UNLOCK(sc); +} + +static __inline int +stge_tx_error(struct stge_softc *sc) +{ + uint32_t txstat; + int error; + + for (error = 0;;) { + txstat = CSR_READ_4(sc, STGE_TxStatus); + if ((txstat & TS_TxComplete) == 0) + break; + /* Tx underrun */ + if ((txstat & TS_TxUnderrun) != 0) { + /* + * XXX + * There should be a more better way to recover + * from Tx underrun instead of a full reset. + */ + if (sc->sc_nerr++ < STGE_MAXERR) + device_printf(sc->sc_dev, "Tx underrun, " + "resetting...\n"); + if (sc->sc_nerr == STGE_MAXERR) + device_printf(sc->sc_dev, "too many errors; " + "not reporting any more\n"); + error = -1; + break; + } + /* Maximum/Late collisions, Re-enable Tx MAC. */ + if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0) + CSR_WRITE_4(sc, STGE_MACCtrl, + (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) | + MC_TxEnable); + } + + return (error); +} + +/* + * stge_intr: + * + * Interrupt service routine. + */ +static void +stge_intr(void *arg) +{ + struct stge_softc *sc; + struct ifnet *ifp; + int reinit; + uint16_t status; + + sc = (struct stge_softc *)arg; + ifp = sc->sc_ifp; + + STGE_LOCK(sc); + +#ifdef DEVICE_POLLING + if ((ifp->if_capenable & IFCAP_POLLING) != 0) + goto done_locked; +#endif + status = CSR_READ_2(sc, STGE_IntStatus); + if (sc->sc_suspended || (status & IS_InterruptStatus) == 0) + goto done_locked; + + /* Disable interrupts. */ + for (reinit = 0;;) { + status = CSR_READ_2(sc, STGE_IntStatusAck); + status &= sc->sc_IntEnable; + if (status == 0) + break; + /* Host interface errors. */ + if ((status & IS_HostError) != 0) { + device_printf(sc->sc_dev, + "Host interface error, resetting...\n"); + reinit = 1; + goto force_init; + } + + /* Receive interrupts. */ + if ((status & IS_RxDMAComplete) != 0) { + stge_rxeof(sc); + if ((status & IS_RFDListEnd) != 0) + CSR_WRITE_4(sc, STGE_DMACtrl, + DMAC_RxDMAPollNow); + } + + /* Transmit interrupts. */ + if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0) + stge_txeof(sc); + + /* Transmission errors.*/ + if ((status & IS_TxComplete) != 0) { + if ((reinit = stge_tx_error(sc)) != 0) + break; + } + } + +force_init: + if (reinit != 0) + stge_init_locked(sc); + + /* Re-enable interrupts. */ + CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); + + /* Try to get more packets going. */ + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + stge_start_locked(ifp); + +done_locked: + STGE_UNLOCK(sc); +} + +/* + * stge_txeof: + * + * Helper; handle transmit interrupts. + */ +static void +stge_txeof(struct stge_softc *sc) +{ + struct ifnet *ifp; + struct stge_txdesc *txd; + uint64_t control; + int cons; + + STGE_LOCK_ASSERT(sc); + + ifp = sc->sc_ifp; + + txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); + if (txd == NULL) + return; + bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, + sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD); + + /* + * Go through our Tx list and free mbufs for those + * frames which have been transmitted. + */ + for (cons = sc->sc_cdata.stge_tx_cons;; + cons = (cons + 1) % STGE_TX_RING_CNT) { + if (sc->sc_cdata.stge_tx_cnt <= 0) + break; + control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control); + if ((control & TFD_TFDDone) == 0) + break; + sc->sc_cdata.stge_tx_cnt--; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + + bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); + + /* Output counter is updated with statistics register */ + m_freem(txd->tx_m); + txd->tx_m = NULL; + STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q); + STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); + txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); + } + sc->sc_cdata.stge_tx_cons = cons; + if (sc->sc_cdata.stge_tx_cnt == 0) + ifp->if_timer = 0; + + bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, + sc->sc_cdata.stge_tx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); +} + +static __inline void +stge_discard_rxbuf(struct stge_softc *sc, int idx) +{ + struct stge_rfd *rfd; + + rfd = &sc->sc_rdata.stge_rx_ring[idx]; + rfd->rfd_status = 0; +} + +#ifndef __NO_STRICT_ALIGNMENT +/* + * It seems that TC9021's DMA engine has alignment restrictions in + * DMA scatter operations. The first DMA segment has no address + * alignment restrictins but the rest should be aligned on 4(?) bytes + * boundary. Otherwise it would corrupt random memory. Since we don't + * know which one is used for the first segment in advance we simply + * don't align at all. + * To avoid copying over an entire frame to align, we allocate a new + * mbuf and copy ethernet header to the new mbuf. The new mbuf is + * prepended into the existing mbuf chain. + */ +static __inline mbuf * +stge_fixup_rx(struct stge_softc *sc, struct mbuf *m) +{ + struct mbuf *n; + + n = NULL; + if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { + bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); + m->m_data += ETHER_HDR_LEN; + n = m; + } else { + MGETHDR(n, M_DONTWAIT, MT_DATA); + if (n != NULL) { + bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); + m->m_data += ETHER_HDR_LEN; + m->m_len -= ETHER_HDR_LEN; + n->m_len = ETHER_HDR_LEN; + M_MOVE_PKTHDR(n, m); + n->m_next = m; + } else + m_freem(m); + } + + return (n); +} +#endif + +/* + * stge_rxeof: + * + * Helper; handle receive interrupts. + */ +static void +stge_rxeof(struct stge_softc *sc) +{ + struct ifnet *ifp; + struct stge_rxdesc *rxd; + struct mbuf *mp, *m; + uint64_t status64; + uint32_t status; + int len, cons, prog; + + STGE_LOCK_ASSERT(sc); + + ifp = sc->sc_ifp; + + bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, + sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD); + + prog = 0; + for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT; + prog++, cons = (cons + 1) % STGE_RX_RING_CNT) { + status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status); + status = RFD_RxStatus(status64); + if ((status & RFD_RFDDone) == 0) + break; +#ifdef DEVICE_POLLING + if (ifp->if_capenable & IFCAP_POLLING) { + if (sc->sc_cdata.stge_rxcycles <= 0) + break; + sc->sc_cdata.stge_rxcycles--; + } +#endif + prog++; + rxd = &sc->sc_cdata.stge_rxdesc[cons]; + mp = rxd->rx_m; + + /* + * If the packet had an error, drop it. Note we count + * the error later in the periodic stats update. + */ + if ((status & RFD_FrameEnd) != 0 && (status & + (RFD_RxFIFOOverrun | RFD_RxRuntFrame | + RFD_RxAlignmentError | RFD_RxFCSError | + RFD_RxLengthError)) != 0) { + stge_discard_rxbuf(sc, cons); + if (sc->sc_cdata.stge_rxhead != NULL) { + m_freem(sc->sc_cdata.stge_rxhead); + STGE_RXCHAIN_RESET(sc); + } + continue; + } + /* + * Add a new receive buffer to the ring. + */ + if (stge_newbuf(sc, cons) != 0) { + ifp->if_iqdrops++; + stge_discard_rxbuf(sc, cons); + if (sc->sc_cdata.stge_rxhead != NULL) { + m_freem(sc->sc_cdata.stge_rxhead); + STGE_RXCHAIN_RESET(sc); + } + continue; + } + + if ((status & RFD_FrameEnd) != 0) { + len = RFD_RxDMAFrameLen(status) - + sc->sc_cdata.stge_rxlen; + mp->m_len = len; + sc->sc_cdata.stge_rxlen = RFD_RxDMAFrameLen(status); + } else + sc->sc_cdata.stge_rxlen += mp->m_len; + /* Chain mbufs. */ + if (sc->sc_cdata.stge_rxhead == NULL) { + sc->sc_cdata.stge_rxhead = mp; + sc->sc_cdata.stge_rxtail = mp; + } else { + mp->m_flags &= ~M_PKTHDR; + sc->sc_cdata.stge_rxtail->m_next = mp; + sc->sc_cdata.stge_rxtail = mp; + } + + if ((status & RFD_FrameEnd) != 0) { + m = sc->sc_cdata.stge_rxhead; + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = sc->sc_cdata.stge_rxlen; + + if (m->m_pkthdr.len > sc->sc_if_framesize) { + m_freem(m); + STGE_RXCHAIN_RESET(sc); + continue; + } + /* + * Set the incoming checksum information for + * the packet. + */ + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { + if ((status & RFD_IPDetected) != 0) { + m->m_pkthdr.csum_flags |= + CSUM_IP_CHECKED; + if ((status & RFD_IPError) == 0) + m->m_pkthdr.csum_flags |= + CSUM_IP_VALID; + } + if (((status & RFD_TCPDetected) != 0 && + (status & RFD_TCPError) == 0) || + ((status & RFD_UDPDetected) != 0 && + (status & RFD_UDPError) == 0)) { + m->m_pkthdr.csum_flags |= + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); + m->m_pkthdr.csum_data = 0xffff; + } + } + +#ifndef __NO_STRICT_ALIGNMENT + if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) { + if ((m = stge_fixup_rx(sc, m)) == NULL) { + STGE_RXCHAIN_RESET(sc); + continue; + } + } +#endif + /* Check for VLAN tagged packets. */ + if ((status & RFD_VLANDetected) != 0 && + (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) + STGE_VLAN_INPUT_TAG(ifp, m, RFD_TCI(status64)); + + STGE_UNLOCK(sc); + /* Pass it on. */ + (*ifp->if_input)(ifp, m); + STGE_LOCK(sc); + + STGE_RXCHAIN_RESET(sc); + } + } + + if (prog > 0) { + /* Update the consumer index. */ + sc->sc_cdata.stge_rx_cons = cons; + bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, + sc->sc_cdata.stge_rx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + } +} + +#ifdef DEVICE_POLLING +static void +stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) +{ + struct stge_softc *sc; + uint16_t status; + + sc = ifp->if_softc; + STGE_LOCK(sc); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { + STGE_UNLOCK(sc); + return; + } + + sc->sc_cdata.stge_rxcycles = count; + stge_rxeof(sc); + stge_txeof(sc); + + if (cmd == POLL_AND_CHECK_STATUS) { + status = CSR_READ_2(sc, STGE_IntStatus); + status &= sc->sc_IntEnable; + if (status != 0) { + if ((status & IS_HostError) != 0) { + device_printf(sc->sc_dev, + "Host interface error, resetting...\n"); + stge_init_locked(sc); + } + if ((status & IS_TxComplete) != 0) { + if (stge_tx_error(sc) != 0) + stge_init_locked(sc); + } + } + + } + + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + stge_start_locked(ifp); + + STGE_UNLOCK(sc); +} +#endif /* DEVICE_POLLING */ + +/* + * stge_tick: + * + * One second timer, used to tick the MII. + */ +static void +stge_tick(void *arg) +{ + struct stge_softc *sc; + struct mii_data *mii; + + sc = (struct stge_softc *)arg; + + STGE_LOCK_ASSERT(sc); + + mii = device_get_softc(sc->sc_miibus); + mii_tick(mii); + + /* Update statistics counters. */ + stge_stats_update(sc); + + /* + * Relcaim any pending Tx descriptors to release mbufs in a + * timely manner as we don't generate Tx completion interrupts + * for every frame. This limits the delay to a maximum of one + * second. + */ + if (sc->sc_cdata.stge_tx_cnt != 0) + stge_txeof(sc); + + callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); +} + +/* + * stge_stats_update: + * + * Read the TC9021 statistics counters. + */ +static void +stge_stats_update(struct stge_softc *sc) +{ + struct ifnet *ifp; + + STGE_LOCK_ASSERT(sc); + + ifp = sc->sc_ifp; + + CSR_READ_4(sc,STGE_OctetRcvOk); + + ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk); + + ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors); + + CSR_READ_4(sc, STGE_OctetXmtdOk); + + ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk); + + ifp->if_collisions += + CSR_READ_4(sc, STGE_LateCollisions) + + CSR_READ_4(sc, STGE_MultiColFrames) + + CSR_READ_4(sc, STGE_SingleColFrames); + + ifp->if_oerrors += + CSR_READ_2(sc, STGE_FramesAbortXSColls) + + CSR_READ_2(sc, STGE_FramesWEXDeferal); +} + +/* + * stge_reset: + * + * Perform a soft reset on the TC9021. + */ +static void +stge_reset(struct stge_softc *sc, uint32_t how) +{ + uint32_t ac; + uint8_t v; + int i, dv; + + STGE_LOCK_ASSERT(sc); + + dv = 5000; + ac = CSR_READ_4(sc, STGE_AsicCtrl); + switch (how) { + case STGE_RESET_TX: + ac |= AC_TxReset | AC_FIFO; + dv = 100; + break; + case STGE_RESET_RX: + ac |= AC_RxReset | AC_FIFO; + dv = 100; + break; + case STGE_RESET_FULL: + default: + /* + * Only assert RstOut if we're fiber. We need GMII clocks + * to be present in order for the reset to complete on fiber + * cards. + */ + ac |= AC_GlobalReset | AC_RxReset | AC_TxReset | + AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | + (sc->sc_usefiber ? AC_RstOut : 0); + break; + } + + CSR_WRITE_4(sc, STGE_AsicCtrl, ac); + + /* Account for reset problem at 10Mbps. */ + DELAY(dv); + + for (i = 0; i < STGE_TIMEOUT; i++) { + if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) + break; + DELAY(dv); + } + + if (i == STGE_TIMEOUT) + device_printf(sc->sc_dev, "reset failed to complete\n"); + + /* Set LED, from Linux IPG driver. */ + ac = CSR_READ_4(sc, STGE_AsicCtrl); + ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1); + if ((sc->sc_led & 0x01) != 0) + ac |= AC_LEDMode; + if ((sc->sc_led & 0x03) != 0) + ac |= AC_LEDModeBit1; + if ((sc->sc_led & 0x08) != 0) + ac |= AC_LEDSpeed; + CSR_WRITE_4(sc, STGE_AsicCtrl, ac); + + /* Set PHY, from Linux IPG driver */ + v = CSR_READ_1(sc, STGE_PhySet); + v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet); + v |= ((sc->sc_led & 0x70) >> 4); + CSR_WRITE_1(sc, STGE_PhySet, v); +} + +/* + * stge_init: [ ifnet interface function ] + * + * Initialize the interface. + */ +static void +stge_init(void *xsc) +{ + struct stge_softc *sc; + + sc = (struct stge_softc *)xsc; + STGE_LOCK(sc); + stge_init_locked(sc); + STGE_UNLOCK(sc); +} + +static void +stge_init_locked(struct stge_softc *sc) +{ + struct ifnet *ifp; + struct mii_data *mii; + uint16_t eaddr[3]; + uint32_t v; + int error; + + STGE_LOCK_ASSERT(sc); + + ifp = sc->sc_ifp; + mii = device_get_softc(sc->sc_miibus); + + /* + * Cancel any pending I/O. + */ + stge_stop(sc); + + /* Init descriptors. */ + error = stge_init_rx_ring(sc); + if (error != 0) { + device_printf(sc->sc_dev, + "initialization failed: no memory for rx buffers\n"); + stge_stop(sc); + goto out; + } + stge_init_tx_ring(sc); + + /* Set the station address. */ + bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); + CSR_WRITE_2(sc, STGE_StationAddress0, eaddr[0]); + CSR_WRITE_2(sc, STGE_StationAddress1, eaddr[1]); + CSR_WRITE_2(sc, STGE_StationAddress2, eaddr[2]); + + /* + * Set the statistics masks. Disable all the RMON stats, + * and disable selected stats in the non-RMON stats registers. + */ + CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff); + CSR_WRITE_4(sc, STGE_StatisticsMask, + (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) | + (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) | + (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) | + (1U << 21)); + + /* Set up the receive filter. */ + stge_set_filter(sc); + /* Program multicast filter. */ + stge_set_multi(sc); + + /* + * Give the transmit and receive ring to the chip. + */ + CSR_WRITE_4(sc, STGE_TFDListPtrHi, + STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0))); + CSR_WRITE_4(sc, STGE_TFDListPtrLo, + STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0))); + + CSR_WRITE_4(sc, STGE_RFDListPtrHi, + STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0))); + CSR_WRITE_4(sc, STGE_RFDListPtrLo, + STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0))); + + /* + * Initialize the Tx auto-poll period. It's OK to make this number + * large (255 is the max, but we use 127) -- we explicitly kick the + * transmit engine when there's actually a packet. + */ + CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); + + /* ..and the Rx auto-poll period. */ + CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); + + /* Initialize the Tx start threshold. */ + CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh); + + /* Rx DMA thresholds, from Linux */ + CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30); + CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30); + + /* Rx early threhold, from Linux */ + CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff); + + /* Tx DMA thresholds, from Linux */ + CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30); + CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04); + + /* + * Initialize the Rx DMA interrupt control register. We + * request an interrupt after every incoming packet, but + * defer it for sc_rxint_dmawait us. When the number of + * interrupts pending reaches STGE_RXINT_NFRAME, we stop + * deferring the interrupt, and signal it immediately. + */ + CSR_WRITE_4(sc, STGE_RxDMAIntCtrl, + RDIC_RxFrameCount(sc->sc_rxint_nframe) | + RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait))); + + /* + * Initialize the interrupt mask. + */ + sc->sc_IntEnable = IS_HostError | IS_TxComplete | + IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd; +#ifdef DEVICE_POLLING + /* Disable interrupts if we are polling. */ + if ((ifp->if_capenable & IFCAP_POLLING) != 0) + CSR_WRITE_2(sc, STGE_IntEnable, 0); + else +#endif + CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); + + /* + * Configure the DMA engine. + * XXX Should auto-tune TxBurstLimit. + */ + CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3)); + + /* + * Send a PAUSE frame when we reach 29,696 bytes in the Rx + * FIFO, and send an un-PAUSE frame when we reach 3056 bytes + * in the Rx FIFO. + */ + CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16); + CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16); + + /* + * Set the maximum frame size. + */ + sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 ? + ETHER_VLAN_ENCAP_LEN : 0); + CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize); + + /* + * Initialize MacCtrl -- do it before setting the media, + * as setting the media will actually program the register. + * + * Note: We have to poke the IFS value before poking + * anything else. + */ + /* Tx/Rx MAC should be disabled before programming IFS.*/ + CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit)); + + stge_vlan_setup(sc); + + if (sc->sc_rev >= 6) { /* >= B.2 */ + /* Multi-frag frame bug work-around. */ + CSR_WRITE_2(sc, STGE_DebugCtrl, + CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200); + + /* Tx Poll Now bug work-around. */ + CSR_WRITE_2(sc, STGE_DebugCtrl, + CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010); + /* Tx Poll Now bug work-around. */ + CSR_WRITE_2(sc, STGE_DebugCtrl, + CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020); + } + + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable; + CSR_WRITE_4(sc, STGE_MACCtrl, v); + /* + * It seems that transmitting frames without checking the state of + * Rx/Tx MAC wedge the hardware. + */ + stge_start_tx(sc); + stge_start_rx(sc); + + /* + * Set the current media. + */ + mii_mediachg(mii); + + /* + * Start the one second MII clock. + */ + callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); + + /* + * ...all done! + */ + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + + out: + if (error != 0) + device_printf(sc->sc_dev, "interface not running\n"); +} + +static void +stge_vlan_setup(struct stge_softc *sc) +{ + struct ifnet *ifp; + uint32_t v; + + ifp = sc->sc_ifp; + /* + * The NIC always copy a VLAN tag regardless of STGE_MACCtrl + * MC_AutoVLANuntagging bit. + * MC_AutoVLANtagging bit selects which VLAN source to use + * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert + * bit has priority over MC_AutoVLANtagging bit. So we always + * use TFC instead of STGE_VLANTag register. + */ + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) + v |= MC_AutoVLANuntagging; + else + v &= ~MC_AutoVLANuntagging; + CSR_WRITE_4(sc, STGE_MACCtrl, v); +} + +/* + * Stop transmission on the interface. + */ +static void +stge_stop(struct stge_softc *sc) +{ + struct ifnet *ifp; + struct stge_txdesc *txd; + struct stge_rxdesc *rxd; + uint32_t v; + int i; + + STGE_LOCK_ASSERT(sc); + /* + * Stop the one second clock. + */ + callout_stop(&sc->sc_tick_ch); + + /* + * Reset the chip to a known state. + */ + stge_reset(sc, STGE_RESET_FULL); + + /* + * Disable interrupts. + */ + CSR_WRITE_2(sc, STGE_IntEnable, 0); + + /* + * Stop receiver, transmitter, and stats update. + */ + stge_stop_rx(sc); + stge_stop_tx(sc); + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + v |= MC_StatisticsDisable; + CSR_WRITE_4(sc, STGE_MACCtrl, v); + + /* + * Stop the transmit and receive DMA. + */ + stge_dma_wait(sc); + CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); + CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0); + CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); + CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0); + + /* + * Free RX and TX mbufs still in the queues. + */ + for (i = 0; i < STGE_RX_RING_CNT; i++) { + rxd = &sc->sc_cdata.stge_rxdesc[i]; + if (rxd->rx_m != NULL) { + bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, + rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, + rxd->rx_dmamap); + m_freem(rxd->rx_m); + rxd->rx_m = NULL; + } + } + for (i = 0; i < STGE_TX_RING_CNT; i++) { + txd = &sc->sc_cdata.stge_txdesc[i]; + if (txd->tx_m != NULL) { + bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, + txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, + txd->tx_dmamap); + m_freem(txd->tx_m); + txd->tx_m = NULL; + } + } + + /* + * Mark the interface down and cancel the watchdog timer. + */ + ifp = sc->sc_ifp; + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + ifp->if_timer = 0; +} + +static void +stge_start_tx(struct stge_softc *sc) +{ + uint32_t v; + int i; + + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_TxEnabled) != 0) + return; + v |= MC_TxEnable; + CSR_WRITE_4(sc, STGE_MACCtrl, v); + CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); + for (i = STGE_TIMEOUT; i > 0; i--) { + DELAY(10); + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_TxEnabled) != 0) + break; + } + if (i == 0) + device_printf(sc->sc_dev, "Starting Tx MAC timed out\n"); +} + +static void +stge_start_rx(struct stge_softc *sc) +{ + uint32_t v; + int i; + + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_RxEnabled) != 0) + return; + v |= MC_RxEnable; + CSR_WRITE_4(sc, STGE_MACCtrl, v); + CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 64); + for (i = STGE_TIMEOUT; i > 0; i--) { + DELAY(10); + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_RxEnabled) != 0) + break; + } + if (i == 0) + device_printf(sc->sc_dev, "Starting Rx MAC timed out\n"); +} + +static void +stge_stop_tx(struct stge_softc *sc) +{ + uint32_t v; + int i; + + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_TxEnabled) == 0) + return; + v |= MC_TxDisable; + CSR_WRITE_4(sc, STGE_MACCtrl, v); + for (i = STGE_TIMEOUT; i > 0; i--) { + DELAY(10); + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_TxEnabled) == 0) + break; + } + if (i == 0) + device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n"); +} + +static void +stge_stop_rx(struct stge_softc *sc) +{ + uint32_t v; + int i; + + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_RxEnabled) == 0) + return; + v |= MC_RxDisable; + CSR_WRITE_4(sc, STGE_MACCtrl, v); + for (i = STGE_TIMEOUT; i > 0; i--) { + DELAY(10); + v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; + if ((v & MC_RxEnabled) == 0) + break; + } + if (i == 0) + device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n"); +} + +static void +stge_init_tx_ring(struct stge_softc *sc) +{ + struct stge_ring_data *rd; + struct stge_txdesc *txd; + bus_addr_t addr; + int i; + + STAILQ_INIT(&sc->sc_cdata.stge_txfreeq); + STAILQ_INIT(&sc->sc_cdata.stge_txbusyq); + + sc->sc_cdata.stge_tx_prod = 0; + sc->sc_cdata.stge_tx_cons = 0; + sc->sc_cdata.stge_tx_cnt = 0; + + rd = &sc->sc_rdata; + bzero(rd->stge_tx_ring, STGE_TX_RING_SZ); + for (i = 0; i < STGE_TX_RING_CNT; i++) { + if (i == (STGE_TX_RING_CNT - 1)) + addr = STGE_TX_RING_ADDR(sc, 0); + else + addr = STGE_TX_RING_ADDR(sc, i + 1); + rd->stge_tx_ring[i].tfd_next = htole64(addr); + rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone); + txd = &sc->sc_cdata.stge_txdesc[i]; + STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); + } + + bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, + sc->sc_cdata.stge_tx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + +} + +static int +stge_init_rx_ring(struct stge_softc *sc) +{ + struct stge_ring_data *rd; + bus_addr_t addr; + int i; + + sc->sc_cdata.stge_rx_cons = 0; + STGE_RXCHAIN_RESET(sc); + + rd = &sc->sc_rdata; + bzero(rd->stge_rx_ring, STGE_RX_RING_SZ); + for (i = 0; i < STGE_RX_RING_CNT; i++) { + if (stge_newbuf(sc, i) != 0) + return (ENOBUFS); + if (i == (STGE_RX_RING_CNT - 1)) + addr = STGE_RX_RING_ADDR(sc, 0); + else + addr = STGE_RX_RING_ADDR(sc, i + 1); + rd->stge_rx_ring[i].rfd_next = htole64(addr); + rd->stge_rx_ring[i].rfd_status = 0; + } + + bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, + sc->sc_cdata.stge_rx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + return (0); +} + +/* + * stge_newbuf: + * + * Add a receive buffer to the indicated descriptor. + */ +static int +stge_newbuf(struct stge_softc *sc, int idx) +{ + struct stge_rxdesc *rxd; + struct stge_rfd *rfd; + struct mbuf *m; + bus_dma_segment_t segs[1]; + bus_dmamap_t map; + int nsegs; + + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m == NULL) + return (ENOBUFS); + m->m_len = m->m_pkthdr.len = MCLBYTES; + /* + * The hardware requires 4bytes aligned DMA address when JUMBO + * frame is used. + */ + if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN)) + m_adj(m, ETHER_ALIGN); + + if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag, + sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) { + m_freem(m); + return (ENOBUFS); + } + KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); + + rxd = &sc->sc_cdata.stge_rxdesc[idx]; + if (rxd->rx_m != NULL) { + bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); + } + map = rxd->rx_dmamap; + rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap; + sc->sc_cdata.stge_rx_sparemap = map; + bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, + BUS_DMASYNC_PREREAD); + rxd->rx_m = m; + + rfd = &sc->sc_rdata.stge_rx_ring[idx]; + rfd->rfd_frag.frag_word0 = + htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len)); + rfd->rfd_status = 0; + + return (0); +} + +/* + * stge_set_filter: + * + * Set up the receive filter. + */ +static void +stge_set_filter(struct stge_softc *sc) +{ + struct ifnet *ifp; + uint16_t mode; + + STGE_LOCK_ASSERT(sc); + + ifp = sc->sc_ifp; + + mode = CSR_READ_2(sc, STGE_ReceiveMode); + mode |= RM_ReceiveUnicast; + if ((ifp->if_flags & IFF_BROADCAST) != 0) + mode |= RM_ReceiveBroadcast; + else + mode &= ~RM_ReceiveBroadcast; + if ((ifp->if_flags & IFF_PROMISC) != 0) + mode |= RM_ReceiveAllFrames; + else + mode &= ~RM_ReceiveAllFrames; + + CSR_WRITE_2(sc, STGE_ReceiveMode, mode); +} + +static void +stge_set_multi(struct stge_softc *sc) +{ + struct ifnet *ifp; + struct ifmultiaddr *ifma; + uint32_t crc; + uint32_t mchash[2]; + uint16_t mode; + int count; + + STGE_LOCK_ASSERT(sc); + + ifp = sc->sc_ifp; + + mode = CSR_READ_2(sc, STGE_ReceiveMode); + if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { + if ((ifp->if_flags & IFF_PROMISC) != 0) + mode |= RM_ReceiveAllFrames; + else if ((ifp->if_flags & IFF_ALLMULTI) != 0) + mode |= RM_ReceiveMulticast; + CSR_WRITE_2(sc, STGE_ReceiveMode, mode); + return; + } + + /* clear existing filters. */ + CSR_WRITE_4(sc, STGE_HashTable0, 0); + CSR_WRITE_4(sc, STGE_HashTable1, 0); + + /* + * Set up the multicast address filter by passing all multicast + * addresses through a CRC generator, and then using the low-order + * 6 bits as an index into the 64 bit multicast hash table. The + * high order bits select the register, while the rest of the bits + * select the bit within the register. + */ + + bzero(mchash, sizeof(mchash)); + + count = 0; + IF_ADDR_LOCK(sc->sc_ifp); + TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) + ifma->ifma_addr), ETHER_ADDR_LEN); + + /* Just want the 6 least significant bits. */ + crc &= 0x3f; + + /* Set the corresponding bit in the hash table. */ + mchash[crc >> 5] |= 1 << (crc & 0x1f); + count++; + } + IF_ADDR_UNLOCK(ifp); + + mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames); + if (count > 0) + mode |= RM_ReceiveMulticastHash; + else + mode &= ~RM_ReceiveMulticastHash; + + CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]); + CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]); + CSR_WRITE_2(sc, STGE_ReceiveMode, mode); +} + +static int +sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) +{ + int error, value; + + if (!arg1) + return (EINVAL); + value = *(int *)arg1; + error = sysctl_handle_int(oidp, &value, 0, req); + if (error || !req->newptr) + return (error); + if (value < low || value > high) + return (EINVAL); + *(int *)arg1 = value; + + return (0); +} + +static int +sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS) +{ + return (sysctl_int_range(oidp, arg1, arg2, req, + STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX)); +} + +static int +sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS) +{ + return (sysctl_int_range(oidp, arg1, arg2, req, + STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX)); +} --- /dev/null Wed Jul 12 09:22:12 2006 +++ sys/dev/stge/if_stgereg.h Wed Jul 12 09:21:20 2006 @@ -0,0 +1,701 @@ +/* $NetBSD: if_stgereg.h,v 1.3 2003/02/10 21:10:07 christos Exp $ */ + +/*- + * Copyright (c) 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +/* + * Sundance Technology PCI vendor ID + */ +#define VENDOR_SUNDANCETI 0x13f0 + +/* + * Tamarack Microelectronics PCI vendor ID + */ +#define VENDOR_TAMARACK 0x143d + +/* + * D-Link Systems PCI vendor ID + */ +#define VENDOR_DLINK 0x1186 + +/* + * Antares Microsystems PCI vendor ID + */ +#define VENDOR_ANTARES 0x1754 + +/* + * Sundance Technology device ID + */ +#define DEVICEID_SUNDANCETI_ST1023 0x1023 +#define DEVICEID_SUNDANCETI_ST2021 0x2021 +#define DEVICEID_TAMARACK_TC9021 0x1021 +#define DEVICEID_TAMARACK_TC9021_ALT 0x9021 + +/* + * D-Link Systems device ID + */ +#define DEVICEID_DLINK_DL4000 0x4000 + +/* + * Antares Microsystems device ID + */ +#define DEVICEID_ANTARES_TC9021 0x1021 + +/* + * Register description for the Sundance Tech. TC9021 10/100/1000 + * Ethernet controller. + * + * Note that while DMA addresses are all in 64-bit fields, only + * the lower 40 bits of a DMA address are valid. + */ +#if (BUS_SPACE_MAXADDR < 0xFFFFFFFFFF) +#define STGE_DMA_MAXADDR BUS_SPACE_MAXADDR +#else +#define STGE_DMA_MAXADDR 0xFFFFFFFFFF +#endif + +/* + * Register access macros + */ +#define CSR_WRITE_4(_sc, reg, val) \ + bus_space_write_4((_sc)->sc_st, (_sc)->sc_sh, (reg), (val)) +#define CSR_WRITE_2(_sc, reg, val) \ + bus_space_write_2((_sc)->sc_st, (_sc)->sc_sh, (reg), (val)) +#define CSR_WRITE_1(_sc, reg, val) \ + bus_space_write_1((_sc)->sc_st, (_sc)->sc_sh, (reg), (val)) + +#define CSR_READ_4(_sc, reg) \ + bus_space_read_4((_sc)->sc_st, (_sc)->sc_sh, (reg)) +#define CSR_READ_2(_sc, reg) \ + bus_space_read_2((_sc)->sc_st, (_sc)->sc_sh, (reg)) +#define CSR_READ_1(_sc, reg) \ + bus_space_read_1((_sc)->sc_st, (_sc)->sc_sh, (reg)) + +/* + * TC9021 buffer fragment descriptor. + */ +struct stge_frag { + uint64_t frag_word0; /* address, length */ +}; + +#define FRAG_ADDR(x) (((uint64_t)(x)) << 0) +#define FRAG_ADDR_MASK FRAG_ADDR(0xfffffffffULL) +#define FRAG_LEN(x) (((uint64_t)(x)) << 48) +#define FRAG_LEN_MASK FRAG_LEN(0xffffULL) + +/* + * TC9021 Transmit Frame Descriptor. Note the number of fragments + * here is arbitrary, but we can't have any more than 15. + */ +#define STGE_NTXFRAGS 15 +struct stge_tfd { + uint64_t tfd_next; /* next TFD in list */ + uint64_t tfd_control; /* control bits */ + /* the buffer fragments */ + struct stge_frag tfd_frags[STGE_NTXFRAGS]; +}; + +#define TFD_FrameId(x) ((x) << 0) +#define TFD_FrameId_MAX 0xffff +#define TFD_WordAlign(x) ((x) << 16) +#define TFD_WordAlign_dword 0 /* align to dword in TxFIFO */ +#define TFD_WordAlign_word 2 /* align to word in TxFIFO */ +#define TFD_WordAlign_disable 1 /* disable alignment */ +#define TFD_TCPChecksumEnable (1ULL << 18) +#define TFD_UDPChecksumEnable (1ULL << 19) +#define TFD_IPChecksumEnable (1ULL << 20) +#define TFD_FcsAppendDisable (1ULL << 21) +#define TFD_TxIndicate (1ULL << 22) +#define TFD_TxDMAIndicate (1ULL << 23) +#define TFD_FragCount(x) ((x) << 24) +#define TFD_VLANTagInsert (1ULL << 28) +#define TFD_TFDDone (1ULL << 31) +#define TFD_VID(x) (((uint64_t)(x)) << 32) +#define TFD_CFI (1ULL << 44) +#define TFD_UserPriority(x) (((uint64_t)(x)) << 45) + +/* + * TC9021 Receive Frame Descriptor. Each RFD has a single fragment + * in it, and the chip tells us the beginning and end of the frame. + */ +struct stge_rfd { + uint64_t rfd_next; /* next RFD in list */ + uint64_t rfd_status; /* status bits */ + struct stge_frag rfd_frag; /* the buffer */ +}; + +/* Low word of rfd_status */ +#define RFD_RxStatus(x) ((x) & 0xffffffff) +#define RFD_RxDMAFrameLen(x) ((x) & 0xffff) +#define RFD_RxFIFOOverrun 0x00010000 +#define RFD_RxRuntFrame 0x00020000 +#define RFD_RxAlignmentError 0x00040000 +#define RFD_RxFCSError 0x00080000 +#define RFD_RxOversizedFrame 0x00100000 +#define RFD_RxLengthError 0x00200000 +#define RFD_VLANDetected 0x00400000 +#define RFD_TCPDetected 0x00800000 +#define RFD_TCPError 0x01000000 +#define RFD_UDPDetected 0x02000000 +#define RFD_UDPError 0x04000000 +#define RFD_IPDetected 0x08000000 +#define RFD_IPError 0x10000000 +#define RFD_FrameStart 0x20000000 +#define RFD_FrameEnd 0x40000000 +#define RFD_RFDDone 0x80000000 +/* High word of rfd_status */ +#define RFD_TCI(x) ((((uint64_t)(x)) >> 32) & 0xffff) + +/* + * EEPROM offsets. + */ +#define STGE_EEPROM_ConfigParam 0x00 +#define STGE_EEPROM_AsicCtrl 0x01 +#define STGE_EEPROM_SubSystemVendorId 0x02 +#define STGE_EEPROM_SubSystemId 0x03 +#define STGE_EEPROM_LEDMode 0x06 +#define STGE_EEPROM_StationAddress0 0x10 +#define STGE_EEPROM_StationAddress1 0x11 +#define STGE_EEPROM_StationAddress2 0x12 + +/* + * The TC9021 register space. + */ + +#define STGE_DMACtrl 0x00 +#define DMAC_RxDMAComplete (1U << 3) +#define DMAC_RxDMAPollNow (1U << 4) +#define DMAC_TxDMAComplete (1U << 11) +#define DMAC_TxDMAPollNow (1U << 12) +#define DMAC_TxDMAInProg (1U << 15) +#define DMAC_RxEarlyDisable (1U << 16) +#define DMAC_MWIDisable (1U << 18) +#define DMAC_TxWriteBackDisable (1U << 19) +#define DMAC_TxBurstLimit(x) ((x) << 20) +#define DMAC_TargetAbort (1U << 30) +#define DMAC_MasterAbort (1U << 31) + +#define STGE_RxDMAStatus 0x08 + +#define STGE_TFDListPtrLo 0x10 + +#define STGE_TFDListPtrHi 0x14 + +#define STGE_TxDMABurstThresh 0x18 /* 8-bit */ + +#define STGE_TxDMAUrgentThresh 0x19 /* 8-bit */ + +#define STGE_TxDMAPollPeriod 0x1a /* 8-bit, 320ns increments */ + +#define STGE_RFDListPtrLo 0x1c + +#define STGE_RFDListPtrHi 0x20 + +#define STGE_RxDMABurstThresh 0x24 /* 8-bit */ + +#define STGE_RxDMAUrgentThresh 0x25 /* 8-bit */ + +#define STGE_RxDMAPollPeriod 0x26 /* 8-bit, 320ns increments */ + +#define STGE_RxDMAIntCtrl 0x28 +#define RDIC_RxFrameCount(x) ((x) & 0xff) +#define RDIC_PriorityThresh(x) ((x) << 10) +#define RDIC_RxDMAWaitTime(x) ((x) << 16) +/* + * Number of receive frames transferred via DMA before a Rx interrupt is issued. + */ +#define STGE_RXINT_NFRAME_DEFAULT 8 +#define STGE_RXINT_NFRAME_MIN 1 +#define STGE_RXINT_NFRAME_MAX 255 +/* + * Maximum amount of time (in 64ns increments) to wait before issuing a Rx + * interrupt if number of frames recevied is less than STGE_RXINT_NFRAME + * (STGE_RXINT_NFRAME_MIN <= STGE_RXINT_NFRAME <= STGE_RXINT_NFRAME_MAX) + */ +#define STGE_RXINT_DMAWAIT_DEFAULT 30 /* 30us */ +#define STGE_RXINT_DMAWAIT_MIN 0 +#define STGE_RXINT_DMAWAIT_MAX 4194 +#define STGE_RXINT_USECS2TICK(x) (((x) * 1000)/64) + +#define STGE_DebugCtrl 0x2c /* 16-bit */ +#define DC_GPIO0Ctrl (1U << 0) +#define DC_GPIO1Ctrl (1U << 1) +#define DC_GPIO0 (1U << 2) +#define DC_GPIO1 (1U << 3) + +#define STGE_AsicCtrl 0x30 +#define AC_ExpRomDisable (1U << 0) +#define AC_ExpRomSize (1U << 1) +#define AC_PhySpeed10 (1U << 4) +#define AC_PhySpeed100 (1U << 5) +#define AC_PhySpeed1000 (1U << 6) +#define AC_PhyMedia (1U << 7) +#define AC_ForcedConfig(x) ((x) << 8) +#define AC_ForcedConfig_MASK AC_ForcedConfig(7) +#define AC_D3ResetDisable (1U << 11) +#define AC_SpeedupMode (1U << 13) +#define AC_LEDMode (1U << 14) +#define AC_RstOutPolarity (1U << 15) +#define AC_GlobalReset (1U << 16) +#define AC_RxReset (1U << 17) +#define AC_TxReset (1U << 18) +#define AC_DMA (1U << 19) +#define AC_FIFO (1U << 20) +#define AC_Network (1U << 21) +#define AC_Host (1U << 22) +#define AC_AutoInit (1U << 23) +#define AC_RstOut (1U << 24) +#define AC_InterruptRequest (1U << 25) +#define AC_ResetBusy (1U << 26) +#define AC_LEDSpeed (1U << 27) +#define AC_LEDModeBit1 (1U << 29) + +#define STGE_FIFOCtrl 0x38 /* 16-bit */ +#define FC_RAMTestMode (1U << 0) +#define FC_Transmitting (1U << 14) +#define FC_Receiving (1U << 15) + +#define STGE_RxEarlyThresh 0x3a /* 16-bit */ + +#define STGE_FlowOffThresh 0x3c /* 16-bit */ + +#define STGE_FlowOnTresh 0x3e /* 16-bit */ + +#define STGE_TxStartThresh 0x44 /* 16-bit */ + +#define STGE_EepromData 0x48 /* 16-bit */ + +#define STGE_EepromCtrl 0x4a /* 16-bit */ +#define EC_EepromAddress(x) ((x) & 0xff) +#define EC_EepromOpcode(x) ((x) << 8) +#define EC_OP_WE 0 +#define EC_OP_WR 1 +#define EC_OP_RR 2 +#define EC_OP_ER 3 +#define EC_EepromBusy (1U << 15) + +#define STGE_ExpRomAddr 0x4c + +#define STGE_ExpRomData 0x50 /* 8-bit */ + +#define STGE_WakeEvent 0x51 /* 8-bit */ + +#define STGE_Countdown 0x54 +#define CD_Count(x) ((x) & 0xffff) +#define CD_CountdownSpeed (1U << 24) +#define CD_CountdownMode (1U << 25) +#define CD_CountdownIntEnabled (1U << 26) + +#define STGE_IntStatusAck 0x5a /* 16-bit */ + +#define STGE_IntEnable 0x5c /* 16-bit */ + +#define STGE_IntStatus 0x5e /* 16-bit */ + +#define IS_InterruptStatus (1U << 0) +#define IS_HostError (1U << 1) +#define IS_TxComplete (1U << 2) +#define IS_MACControlFrame (1U << 3) +#define IS_RxComplete (1U << 4) +#define IS_RxEarly (1U << 5) +#define IS_InRequested (1U << 6) +#define IS_UpdateStats (1U << 7) +#define IS_LinkEvent (1U << 8) +#define IS_TxDMAComplete (1U << 9) +#define IS_RxDMAComplete (1U << 10) +#define IS_RFDListEnd (1U << 11) +#define IS_RxDMAPriority (1U << 12) + +#define STGE_TxStatus 0x60 +#define TS_TxError (1U << 0) +#define TS_LateCollision (1U << 2) +#define TS_MaxCollisions (1U << 3) +#define TS_TxUnderrun (1U << 4) +#define TS_TxIndicateReqd (1U << 6) +#define TS_TxComplete (1U << 7) +#define TS_TxFrameId_get(x) ((x) >> 16) + +#define STGE_MACCtrl 0x6c +#define MC_IFSSelect(x) ((x) & 3) +#define MC_IFS96bit 0 +#define MC_IFS1024bit 1 +#define MC_IFS1792bit 2 +#define MC_IFS4352bit 3 + +#define MC_DuplexSelect (1U << 5) +#define MC_RcvLargeFrames (1U << 6) +#define MC_TxFlowControlEnable (1U << 7) +#define MC_RxFlowControlEnable (1U << 8) +#define MC_RcvFCS (1U << 9) +#define MC_FIFOLoopback (1U << 10) +#define MC_MACLoopback (1U << 11) +#define MC_AutoVLANtagging (1U << 12) +#define MC_AutoVLANuntagging (1U << 13) +#define MC_CollisionDetect (1U << 16) +#define MC_CarrierSense (1U << 17) +#define MC_StatisticsEnable (1U << 21) +#define MC_StatisticsDisable (1U << 22) +#define MC_StatisticsEnabled (1U << 23) +#define MC_TxEnable (1U << 24) +#define MC_TxDisable (1U << 25) +#define MC_TxEnabled (1U << 26) +#define MC_RxEnable (1U << 27) +#define MC_RxDisable (1U << 28) +#define MC_RxEnabled (1U << 29) +#define MC_Paused (1U << 30) +#define MC_MASK 0x7fe33fa3 + +#define STGE_VLANTag 0x70 + +#define STGE_PhySet 0x75 /* 8-bit */ +#define PS_MemLenb9b (1U << 0) +#define PS_MemLen (1U << 1) +#define PS_NonCompdet (1U << 2) + +#define STGE_PhyCtrl 0x76 /* 8-bit */ +#define PC_MgmtClk (1U << 0) +#define PC_MgmtData (1U << 1) +#define PC_MgmtDir (1U << 2) /* MAC->PHY */ +#define PC_PhyDuplexPolarity (1U << 3) +#define PC_PhyDuplexStatus (1U << 4) +#define PC_PhyLnkPolarity (1U << 5) +#define PC_LinkSpeed(x) (((x) >> 6) & 3) +#define PC_LinkSpeed_Down 0 +#define PC_LinkSpeed_10 1 +#define PC_LinkSpeed_100 2 +#define PC_LinkSpeed_1000 3 + +#define STGE_StationAddress0 0x78 /* 16-bit */ + +#define STGE_StationAddress1 0x7a /* 16-bit */ + +#define STGE_StationAddress2 0x7c /* 16-bit */ + +#define STGE_VLANHashTable 0x7e /* 16-bit */ + +#define STGE_VLANId 0x80 + +#define STGE_MaxFrameSize 0x86 + +#define STGE_ReceiveMode 0x88 /* 16-bit */ +#define RM_ReceiveUnicast (1U << 0) +#define RM_ReceiveMulticast (1U << 1) +#define RM_ReceiveBroadcast (1U << 2) +#define RM_ReceiveAllFrames (1U << 3) +#define RM_ReceiveMulticastHash (1U << 4) +#define RM_ReceiveIPMulticast (1U << 5) +#define RM_ReceiveVLANMatch (1U << 8) +#define RM_ReceiveVLANHash (1U << 9) + +#define STGE_HashTable0 0x8c + +#define STGE_HashTable1 0x90 + +#define STGE_RMONStatisticsMask 0x98 /* set to disable */ + +#define STGE_StatisticsMask 0x9c /* set to disable */ + +#define STGE_RxJumboFrames 0xbc /* 16-bit */ + +#define STGE_TCPCheckSumErrors 0xc0 /* 16-bit */ + +#define STGE_IPCheckSumErrors 0xc2 /* 16-bit */ + +#define STGE_UDPCheckSumErrors 0xc4 /* 16-bit */ + +#define STGE_TxJumboFrames 0xf4 /* 16-bit */ + +/* + * TC9021 statistics. Available memory and I/O mapped. + */ + +#define STGE_OctetRcvOk 0xa8 + +#define STGE_McstOctetRcvdOk 0xac + +#define STGE_BcstOctetRcvdOk 0xb0 + +#define STGE_FramesRcvdOk 0xb4 + +#define STGE_McstFramesRcvdOk 0xb8 + +#define STGE_BcstFramesRcvdOk 0xbe /* 16-bit */ + +#define STGE_MacControlFramesRcvd 0xc6 /* 16-bit */ + +#define STGE_FrameTooLongErrors 0xc8 /* 16-bit */ + +#define STGE_InRangeLengthErrors 0xca /* 16-bit */ + +#define STGE_FramesCheckSeqErrors 0xcc /* 16-bit */ + +#define STGE_FramesLostRxErrors 0xce /* 16-bit */ + +#define STGE_OctetXmtdOk 0xd0 + +#define STGE_McstOctetXmtdOk 0xd4 + +#define STGE_BcstOctetXmtdOk 0xd8 + +#define STGE_FramesXmtdOk 0xdc + +#define STGE_McstFramesXmtdOk 0xe0 + +#define STGE_FramesWDeferredXmt 0xe4 + +#define STGE_LateCollisions 0xe8 + +#define STGE_MultiColFrames 0xec + +#define STGE_SingleColFrames 0xf0 + +#define STGE_BcstFramesXmtdOk 0xf6 /* 16-bit */ + +#define STGE_CarrierSenseErrors 0xf8 /* 16-bit */ + +#define STGE_MacControlFramesXmtd 0xfa /* 16-bit */ + +#define STGE_FramesAbortXSColls 0xfc /* 16-bit */ + +#define STGE_FramesWEXDeferal 0xfe /* 16-bit */ + +/* + * RMON-compatible statistics. Only accessible if memory-mapped. + */ + +#define STGE_EtherStatsCollisions 0x100 + +#define STGE_EtherStatsOctetsTransmit 0x104 + +#define STGE_EtherStatsPktsTransmit 0x108 + +#define STGE_EtherStatsPkts64OctetsTransmit 0x10c + +#define STGE_EtherStatsPkts64to127OctetsTransmit 0x110 + +#define STGE_EtherStatsPkts128to255OctetsTransmit 0x114 + +#define STGE_EtherStatsPkts256to511OctetsTransmit 0x118 + +#define STGE_EtherStatsPkts512to1023OctetsTransmit 0x11c + +#define STGE_EtherStatsPkts1024to1518OctetsTransmit 0x120 + +#define STGE_EtherStatsCRCAlignErrors 0x124 + +#define STGE_EtherStatsUndersizePkts 0x128 + +#define STGE_EtherStatsFragments 0x12c + +#define STGE_EtherStatsJabbers 0x130 + +#define STGE_EtherStatsOctets 0x134 + +#define STGE_EtherStatsPkts 0x138 + +#define STGE_EtherStatsPkts64Octets 0x13c + +#define STGE_EtherStatsPkts65to127Octets 0x140 + +#define STGE_EtherStatsPkts128to255Octets 0x144 + +#define STGE_EtherStatsPkts256to511Octets 0x148 + +#define STGE_EtherStatsPkts512to1023Octets 0x14c + +#define STGE_EtherStatsPkts1024to1518Octets 0x150 + +/* + * Transmit descriptor list size. + */ +#define STGE_TX_RING_CNT 256 +#define STGE_TX_LOWAT (STGE_TX_RING_CNT/32) +#define STGE_TX_HIWAT (STGE_TX_RING_CNT - STGE_TX_LOWAT) + +/* + * Receive descriptor list size. + */ +#define STGE_RX_RING_CNT 256 + +#define STGE_MAXTXSEGS STGE_NTXFRAGS + +#define STGE_JUMBO_FRAMELEN 9022 +#define STGE_JUMBO_MTU \ + (STGE_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN) + +struct stge_txdesc { + struct mbuf *tx_m; /* head of our mbuf chain */ + bus_dmamap_t tx_dmamap; /* our DMA map */ + STAILQ_ENTRY(stge_txdesc) tx_q; +}; + +STAILQ_HEAD(stge_txdq, stge_txdesc); + +struct stge_rxdesc { + struct mbuf *rx_m; + bus_dmamap_t rx_dmamap; +}; + +#define STGE_ADDR_LO(x) ((u_int64_t) (x) & 0xffffffff) +#define STGE_ADDR_HI(x) ((u_int64_t) (x) >> 32) + +#define STGE_RING_ALIGN 8 + +struct stge_chain_data{ + bus_dma_tag_t stge_parent_tag; + bus_dma_tag_t stge_tx_tag; + struct stge_txdesc stge_txdesc[STGE_TX_RING_CNT]; + struct stge_txdq stge_txfreeq; + struct stge_txdq stge_txbusyq; + bus_dma_tag_t stge_rx_tag; + struct stge_rxdesc stge_rxdesc[STGE_RX_RING_CNT]; + bus_dma_tag_t stge_tx_ring_tag; + bus_dmamap_t stge_tx_ring_map; + bus_dma_tag_t stge_rx_ring_tag; + bus_dmamap_t stge_rx_ring_map; + bus_dmamap_t stge_rx_sparemap; + + int stge_tx_prod; + int stge_tx_cons; + int stge_tx_cnt; + int stge_rx_cons; +#ifdef DEVICE_POLLING + int stge_rxcycles; +#endif + int stge_rxlen; + struct mbuf *stge_rxhead; + struct mbuf *stge_rxtail; +}; + +struct stge_ring_data { + struct stge_tfd *stge_tx_ring; + bus_addr_t stge_tx_ring_paddr; + struct stge_rfd *stge_rx_ring; + bus_addr_t stge_rx_ring_paddr; +}; + +#define STGE_TX_RING_ADDR(sc, i) \ + ((sc)->sc_rdata.stge_tx_ring_paddr + sizeof(struct stge_tfd) * (i)) +#define STGE_RX_RING_ADDR(sc, i) \ + ((sc)->sc_rdata.stge_rx_ring_paddr + sizeof(struct stge_rfd) * (i)) + +#define STGE_TX_RING_SZ \ + (sizeof(struct stge_tfd) * STGE_TX_RING_CNT) +#define STGE_RX_RING_SZ \ + (sizeof(struct stge_rfd) * STGE_RX_RING_CNT) + +/* + * Software state per device. + */ +struct stge_softc { + struct ifnet *sc_ifp; /* interface info */ + device_t sc_dev; + device_t sc_miibus; + struct resource *sc_res; + struct resource *sc_irq; /* IRQ resource handle */ + int sc_restype; + int sc_rid; + bus_space_tag_t sc_st; /* bus space tag */ + bus_space_handle_t sc_sh; /* bus space handle */ + void *sc_ih; /* interrupt cookie */ + int sc_rev; /* silicon revision */ + + struct callout sc_tick_ch; /* tick callout */ + + struct stge_chain_data sc_cdata; + struct stge_ring_data sc_rdata; + int sc_if_flags; + int sc_if_framesize; + int sc_txthresh; /* Tx threshold */ + uint32_t sc_usefiber:1; /* if we're fiber */ + uint32_t sc_stge1023:1; /* are we a 1023 */ + uint32_t sc_DMACtrl; /* prototype DMACtrl reg. */ + uint32_t sc_MACCtrl; /* prototype MacCtrl reg. */ + uint16_t sc_IntEnable; /* prototype IntEnable reg. */ + uint16_t sc_led; /* LED conf. from EEPROM */ + uint8_t sc_PhyCtrl; /* prototype PhyCtrl reg. */ + int sc_suspended; + int sc_detach; + + int sc_rxint_nframe; + int sc_rxint_dmawait; + int sc_nerr; + + struct task sc_link_task; + struct mtx sc_mii_mtx; /* MII mutex */ + struct mtx sc_mtx; +}; + +#define STGE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) +#define STGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) +#define STGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) +#define STGE_MII_LOCK(_sc) mtx_lock(&(_sc)->sc_mii_mtx) +#define STGE_MII_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mii_mtx) + +#define STGE_MAXERR 5 + +#define STGE_RXCHAIN_RESET(_sc) \ +do { \ + (_sc)->sc_cdata.stge_rxhead = NULL; \ + (_sc)->sc_cdata.stge_rxtail = NULL; \ + (_sc)->sc_cdata.stge_rxlen = 0; \ +} while (/*CONSTCOND*/0) + +#define STGE_TIMEOUT 1000 + +struct stge_mii_frame { + uint8_t mii_stdelim; + uint8_t mii_opcode; + uint8_t mii_phyaddr; + uint8_t mii_regaddr; + uint8_t mii_turnaround; + uint16_t mii_data; +}; + +/* + * MII constants + */ +#define STGE_MII_STARTDELIM 0x01 +#define STGE_MII_READOP 0x02 +#define STGE_MII_WRITEOP 0x01 +#define STGE_MII_TURNAROUND 0x02 + +#define STGE_RESET_NONE 0x00 +#define STGE_RESET_TX 0x01 +#define STGE_RESET_RX 0x02 +#define STGE_RESET_FULL 0x04 --- sys/modules/Makefile.orig Tue Jun 27 14:30:26 2006 +++ sys/modules/Makefile Wed Jul 5 20:27:40 2006 @@ -231,6 +231,7 @@ ${_sr} \ ste \ ${_stg} \ + stge \ ${_streams} \ sym \ ${_syscons} \ --- /dev/null Wed Jul 12 09:22:12 2006 +++ sys/modules/stge/Makefile Wed Jul 5 20:27:03 2006 @@ -0,0 +1,9 @@ +# $FreeBSD$ + +.PATH: ${.CURDIR}/../../dev/stge + +KMOD= if_stge +SRCS= if_stge.c device_if.h bus_if.h pci_if.h +SRCS+= miibus_if.h + +.include --- sys/dev/mii/miidevs.orig Tue Apr 11 04:55:23 2006 +++ sys/dev/mii/miidevs Tue May 30 11:52:03 2006 @@ -68,6 +68,7 @@ oui XAQTI 0x00e0ae XaQti Corp. oui MARVELL 0x005043 Marvell Semiconductor oui xxMARVELL 0x000ac2 Marvell Semiconductor +oui ICPLUS 0x0090c3 IC Plus Corp. /* in the 79c873, AMD uses another OUI (which matches Davicom!) */ oui xxAMD 0x00606e Advanced Micro Devices @@ -185,3 +186,6 @@ model MARVELL E1000 0x0000 Marvell 88E1000 Gigabit PHY model MARVELL E1011 0x0002 Marvell 88E1011 Gigabit PHY model xxMARVELL E1000 0x0005 Marvell 88E1000 Gigabit PHY + +/* IC Plus Corp. PHYs */ +model ICPLUS IP1000A 0x0008 IC Plus 10/100/1000 media interface --- sys/modules/mii/Makefile.orig Wed Jul 5 20:08:43 2006 +++ sys/modules/mii/Makefile Wed Jul 5 20:09:15 2006 @@ -7,7 +7,7 @@ SRCS+= miibus_if.h miidevs.h device_if.h miibus_if.c e1000phy.c exphy.c nsphy.c SRCS+= mlphy.c tlphy.c rlphy.c amphy.c inphy.c tdkphy.c SRCS+= bmtphy.c brgphy.c xmphy.c pnaphy.c lxtphy.c qsphy.c acphy.c nsgphy.c -SRCS+= rgephy.c ruephy.c ciphy.c +SRCS+= rgephy.c ruephy.c ciphy.c gentbi.c ip1000phy.c EXPORT_SYMS= mii_mediachg \ mii_tick \