Index: dev/mii/e1000phy.c =================================================================== --- dev/mii/e1000phy.c (revision 201066) +++ dev/mii/e1000phy.c (working copy) @@ -50,7 +50,6 @@ #include #include -#include #include #include @@ -60,6 +59,9 @@ #include "miidevs.h" #include +/* XXX */ +#include +#include #include "miibus_if.h" @@ -69,6 +71,7 @@ struct e1000phy_softc { struct mii_softc mii_sc; int mii_model; + struct msk_mii_data *mmd; }; static device_method_t e1000phy_methods[] = { @@ -107,6 +110,7 @@ MII_PHY_DESC(MARVELL, E1111), MII_PHY_DESC(MARVELL, E1116), MII_PHY_DESC(MARVELL, E1118), + MII_PHY_DESC(MARVELL, E3016), MII_PHY_DESC(xxMARVELL, E1000), MII_PHY_DESC(xxMARVELL, E1011), MII_PHY_DESC(xxMARVELL, E1000_3), @@ -129,7 +133,7 @@ struct mii_softc *sc; struct mii_attach_args *ma; struct mii_data *mii; - int fast_ether; + struct ifnet *ifp; esc = device_get_softc(dev); sc = &esc->mii_sc; @@ -142,66 +146,50 @@ sc->mii_phy = ma->mii_phyno; sc->mii_service = e1000phy_service; sc->mii_pdata = mii; - sc->mii_anegticks = MII_ANEGTICKS_GIGE; mii->mii_instance++; - fast_ether = 0; esc->mii_model = MII_MODEL(ma->mii_id2); + ifp = sc->mii_pdata->mii_ifp; + if (strcmp(ifp->if_dname, "msk") == 0) { + /* XXX */ + esc->mmd = device_get_ivars( + device_get_parent(device_get_parent(dev))); + if (esc->mmd != NULL && + (esc->mmd->mii_flags & MIIF_HAVEFIBER) != 0) + sc->mii_flags |= MIIF_HAVEFIBER; + } + switch (esc->mii_model) { case MII_MODEL_MARVELL_E1011: case MII_MODEL_MARVELL_E1112: if (PHY_READ(sc, E1000_ESSR) & E1000_ESSR_FIBER_LINK) sc->mii_flags |= MIIF_HAVEFIBER; break; - case MII_MODEL_MARVELL_E3082: - /* 88E3082 10/100 Fast Ethernet PHY. */ - sc->mii_anegticks = MII_ANEGTICKS; - fast_ether = 1; + case MII_MODEL_MARVELL_E1149: + /* + * Some 88E1149 PHY's page select is initialized to + * point to other bank instead of copper/fiber bank + * which in turn resulted in wrong registers were + * accessed during PHY operation. It is believed that + * page 0 should be used for copper PHY so reinitialize + * E1000_EADR to select default copper PHY. If parent + * device know the type of PHY(either copper or fiber), + * that information should be used to select default + * type of PHY. + */ + PHY_WRITE(sc, E1000_EADR, 0); break; } e1000phy_reset(sc); + sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & ma->mii_capmask; + if (sc->mii_capabilities & BMSR_EXTSTAT) + sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR); device_printf(dev, " "); + mii_phy_add_media(sc); + printf("\n"); -#define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst), - E1000_CR_ISOLATE); - if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst), - E1000_CR_SPEED_10); - printf("10baseT, "); - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst), - E1000_CR_SPEED_10 | E1000_CR_FULL_DUPLEX); - printf("10baseT-FDX, "); - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst), - E1000_CR_SPEED_100); - printf("100baseTX, "); - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst), - E1000_CR_SPEED_100 | E1000_CR_FULL_DUPLEX); - printf("100baseTX-FDX, "); - if (fast_ether == 0) { - /* - * 1000BT-simplex not supported; driver must ignore - * this entry, but it must be present in order to - * manually set full-duplex. - */ - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, 0, - sc->mii_inst), E1000_CR_SPEED_1000); - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, - sc->mii_inst), - E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX); - printf("1000baseTX-FDX, "); - } - } else { - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, sc->mii_inst), - E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX); - printf("1000baseSX-FDX, "); - } - ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0); - printf("auto\n"); -#undef ADD - MIIBUS_MEDIAINIT(sc->mii_dev); return (0); } @@ -210,7 +198,7 @@ e1000phy_reset(struct mii_softc *sc) { struct e1000phy_softc *esc; - uint16_t reg; + uint16_t reg, page; esc = (struct e1000phy_softc *)sc; reg = PHY_READ(sc, E1000_SCR); @@ -219,12 +207,20 @@ PHY_WRITE(sc, E1000_SCR, reg); if (esc->mii_model == MII_MODEL_MARVELL_E1112) { /* Select 1000BASE-X only mode. */ + page = PHY_READ(sc, E1000_EADR); PHY_WRITE(sc, E1000_EADR, 2); reg = PHY_READ(sc, E1000_SCR); reg &= ~E1000_SCR_MODE_MASK; reg |= E1000_SCR_MODE_1000BX; PHY_WRITE(sc, E1000_SCR, reg); - PHY_WRITE(sc, E1000_EADR, 1); + if (esc->mmd != NULL && esc->mmd->pmd == 'P') { + /* Set SIGDET polarity low for SFP module. */ + PHY_WRITE(sc, E1000_EADR, 1); + reg = PHY_READ(sc, E1000_SCR); + reg |= E1000_SCR_FIB_SIGDET_POLARITY; + PHY_WRITE(sc, E1000_SCR, reg); + } + PHY_WRITE(sc, E1000_EADR, page); } } else { switch (esc->mii_model) { @@ -238,21 +234,33 @@ reg |= E1000_SCR_AUTO_X_MODE; if (esc->mii_model == MII_MODEL_MARVELL_E1116) reg &= ~E1000_SCR_POWER_DOWN; + reg |= E1000_SCR_ASSERT_CRS_ON_TX; break; case MII_MODEL_MARVELL_E3082: reg |= (E1000_SCR_AUTO_X_MODE >> 1); + reg |= E1000_SCR_ASSERT_CRS_ON_TX; break; + case MII_MODEL_MARVELL_E3016: + reg |= E1000_SCR_AUTO_MDIX; + reg &= ~(E1000_SCR_EN_DETECT | + E1000_SCR_SCRAMBLER_DISABLE); + reg |= E1000_SCR_LPNP; + /* XXX Enable class A driver for Yukon FE+ A0. */ + PHY_WRITE(sc, 0x1C, PHY_READ(sc, 0x1C) | 0x0001); + break; default: reg &= ~E1000_SCR_AUTO_X_MODE; + reg |= E1000_SCR_ASSERT_CRS_ON_TX; break; } - /* Enable CRS on TX. */ - reg |= E1000_SCR_ASSERT_CRS_ON_TX; - /* Auto correction for reversed cable polarity. */ - reg &= ~E1000_SCR_POLARITY_REVERSAL; + if (esc->mii_model != MII_MODEL_MARVELL_E3016) { + /* Auto correction for reversed cable polarity. */ + reg &= ~E1000_SCR_POLARITY_REVERSAL; + } PHY_WRITE(sc, E1000_SCR, reg); - if (esc->mii_model == MII_MODEL_MARVELL_E1116) { + if (esc->mii_model == MII_MODEL_MARVELL_E1116 || + esc->mii_model == MII_MODEL_MARVELL_E1149) { PHY_WRITE(sc, E1000_EADR, 2); reg = PHY_READ(sc, E1000_SCR); reg |= E1000_SCR_RGMII_POWER_UP; @@ -261,13 +269,33 @@ } } - switch (MII_MODEL(esc->mii_model)) { + switch (esc->mii_model) { case MII_MODEL_MARVELL_E3082: case MII_MODEL_MARVELL_E1112: + case MII_MODEL_MARVELL_E1118: + break; case MII_MODEL_MARVELL_E1116: - case MII_MODEL_MARVELL_E1118: case MII_MODEL_MARVELL_E1149: + page = PHY_READ(sc, E1000_EADR); + /* Select page 3, LED control register. */ + PHY_WRITE(sc, E1000_EADR, 3); + PHY_WRITE(sc, E1000_SCR, + E1000_SCR_LED_LOS(1) | /* Link/Act */ + E1000_SCR_LED_INIT(8) | /* 10Mbps */ + E1000_SCR_LED_STAT1(7) | /* 100Mbps */ + E1000_SCR_LED_STAT0(7)); /* 1000Mbps */ + /* Set blink rate. */ + PHY_WRITE(sc, E1000_IER, E1000_PULSE_DUR(E1000_PULSE_170MS) | + E1000_BLINK_RATE(E1000_BLINK_84MS)); + PHY_WRITE(sc, E1000_EADR, page); break; + case MII_MODEL_MARVELL_E3016: + /* LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED. */ + PHY_WRITE(sc, 0x16, 0x0B << 8 | 0x05 << 4 | 0x04); + /* Integrated register calibration workaround. */ + PHY_WRITE(sc, 0x1D, 17); + PHY_WRITE(sc, 0x1E, 0x3F60); + break; default: /* Force TX_CLK to 25MHz clock. */ reg = PHY_READ(sc, E1000_ESCR); @@ -324,12 +352,14 @@ speed = 0; switch (IFM_SUBTYPE(ife->ifm_media)) { case IFM_1000_T: - if (esc->mii_model == MII_MODEL_MARVELL_E3082) + if ((sc->mii_extcapabilities & + (EXTSR_1000TFDX | EXTSR_1000THDX)) == 0) return (EINVAL); speed = E1000_CR_SPEED_1000; break; case IFM_1000_SX: - if (esc->mii_model == MII_MODEL_MARVELL_E3082) + if ((sc->mii_extcapabilities & + (EXTSR_1000XFDX | EXTSR_1000XHDX)) == 0) return (EINVAL); speed = E1000_CR_SPEED_1000; break; @@ -375,7 +405,8 @@ PHY_WRITE(sc, E1000_1GCR, gig | E1000_1GCR_MS_ENABLE); } else { - if (esc->mii_model != MII_MODEL_MARVELL_E3082) + if ((sc->mii_extcapabilities & + (EXTSR_1000TFDX | EXTSR_1000THDX)) != 0) PHY_WRITE(sc, E1000_1GCR, 0); } PHY_WRITE(sc, E1000_AR, E1000_AR_SELECTOR_FIELD); @@ -398,8 +429,10 @@ /* * Only used for autonegotiation. */ - if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) + if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) { + sc->mii_ticks = 0; break; + } /* * check for link. @@ -412,8 +445,10 @@ } /* Announce link loss right after it happens. */ + if (sc->mii_ticks++ == 0) + break; if (sc->mii_ticks <= sc->mii_anegticks) - return (0); + break; sc->mii_ticks = 0; e1000phy_reset(sc); @@ -433,18 +468,14 @@ e1000phy_status(struct mii_softc *sc) { struct mii_data *mii = sc->mii_pdata; - int bmsr, bmcr, esr, gsr, ssr, isr, ar, lpar; + int bmcr, bmsr, gsr, ssr, ar, lpar; mii->mii_media_status = IFM_AVALID; mii->mii_media_active = IFM_ETHER; bmsr = PHY_READ(sc, E1000_SR) | PHY_READ(sc, E1000_SR); - esr = PHY_READ(sc, E1000_ESR); bmcr = PHY_READ(sc, E1000_CR); ssr = PHY_READ(sc, E1000_SSR); - isr = PHY_READ(sc, E1000_ISR); - ar = PHY_READ(sc, E1000_AR); - lpar = PHY_READ(sc, E1000_LPAR); if (bmsr & E1000_SR_LINK_STATUS) mii->mii_media_status |= IFM_ACTIVE; @@ -452,25 +483,34 @@ if (bmcr & E1000_CR_LOOPBACK) mii->mii_media_active |= IFM_LOOP; - if ((((bmcr & E1000_CR_AUTO_NEG_ENABLE) != 0) && - ((bmsr & E1000_SR_AUTO_NEG_COMPLETE) == 0)) || - ((ssr & E1000_SSR_LINK) == 0) || - ((ssr & E1000_SSR_SPD_DPLX_RESOLVED) == 0)) { + if ((bmcr & E1000_CR_AUTO_NEG_ENABLE) != 0 && + (ssr & E1000_SSR_SPD_DPLX_RESOLVED) == 0) { /* Erg, still trying, I guess... */ mii->mii_media_active |= IFM_NONE; return; } if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { - if (ssr & E1000_SSR_1000MBS) + switch (ssr & E1000_SSR_SPEED) { + case E1000_SSR_1000MBS: mii->mii_media_active |= IFM_1000_T; - else if (ssr & E1000_SSR_100MBS) + break; + case E1000_SSR_100MBS: mii->mii_media_active |= IFM_100_TX; - else + break; + case E1000_SSR_10MBS: mii->mii_media_active |= IFM_10_T; + break; + default: + mii->mii_media_active |= IFM_NONE; + return; + } } else { - if (ssr & E1000_SSR_1000MBS) - mii->mii_media_active |= IFM_1000_SX; + /* + * Some fiber PHY(88E1112) does not seem to set resolved + * speed so always assume we've got IFM_1000_SX. + */ + mii->mii_media_active |= IFM_1000_SX; } if (ssr & E1000_SSR_DUPLEX) @@ -479,6 +519,8 @@ mii->mii_media_active |= IFM_HDX; if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { + ar = PHY_READ(sc, E1000_AR); + lpar = PHY_READ(sc, E1000_LPAR); /* FLAG0==rx-flow-control FLAG1==tx-flow-control */ if ((ar & E1000_AR_PAUSE) && (lpar & E1000_LPAR_PAUSE)) { mii->mii_media_active |= IFM_FLAG0 | IFM_FLAG1; @@ -505,16 +547,19 @@ e1000phy_mii_phy_auto(struct e1000phy_softc *esc) { struct mii_softc *sc; + uint16_t reg; sc = &esc->mii_sc; - if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) - PHY_WRITE(sc, E1000_AR, E1000_AR_10T | E1000_AR_10T_FD | + if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { + reg = PHY_READ(sc, E1000_AR); + reg |= E1000_AR_10T | E1000_AR_10T_FD | E1000_AR_100TX | E1000_AR_100TX_FD | - E1000_AR_PAUSE | E1000_AR_ASM_DIR); - else + E1000_AR_PAUSE | E1000_AR_ASM_DIR; + PHY_WRITE(sc, E1000_AR, reg | E1000_AR_SELECTOR_FIELD); + } else PHY_WRITE(sc, E1000_AR, E1000_FA_1000X_FD | E1000_FA_1000X | E1000_FA_SYM_PAUSE | E1000_FA_ASYM_PAUSE); - if (esc->mii_model != MII_MODEL_MARVELL_E3082) + if ((sc->mii_extcapabilities & (EXTSR_1000TFDX | EXTSR_1000THDX)) != 0) PHY_WRITE(sc, E1000_1GCR, E1000_1GCR_1000T_FD | E1000_1GCR_1000T); PHY_WRITE(sc, E1000_CR, Index: dev/mii/e1000phyreg.h =================================================================== --- dev/mii/e1000phyreg.h (revision 201066) +++ dev/mii/e1000phyreg.h (working copy) @@ -236,8 +236,23 @@ #define E1000_SCR_TX_FIFO_DEPTH_10 0x8000 #define E1000_SCR_TX_FIFO_DEPTH_12 0xC000 +/* 88E3016 only */ +#define E1000_SCR_AUTO_MDIX 0x0030 +#define E1000_SCR_SIGDET_POLARITY 0x0040 +#define E1000_SCR_EXT_DISTANCE 0x0080 +#define E1000_SCR_FEFI_DISABLE 0x0100 +#define E1000_SCR_NLP_GEN_DISABLE 0x0800 +#define E1000_SCR_LPNP 0x1000 +#define E1000_SCR_NLP_CHK_DISABLE 0x2000 +#define E1000_SCR_EN_DETECT 0x4000 + #define E1000_SCR_EN_DETECT_MASK 0x0300 +/* 88E1112 page 1 fiber specific control */ +#define E1000_SCR_FIB_TX_DIS 0x0008 +#define E1000_SCR_FIB_SIGDET_POLARITY 0x0200 +#define E1000_SCR_FIB_FORCE_LINK 0x0400 + /* 88E1112 page 2 */ #define E1000_SCR_MODE_MASK 0x0380 #define E1000_SCR_MODE_AUTO 0x0180 @@ -246,9 +261,19 @@ /* 88E1116 page 0 */ #define E1000_SCR_POWER_DOWN 0x0004 -/* 88E1116 page 2 */ +/* 88E1116, 88E1149 page 2 */ #define E1000_SCR_RGMII_POWER_UP 0x0008 +/* 88E1116, 88E1149 page 3 */ +#define E1000_SCR_LED_STAT0_MASK 0x000F +#define E1000_SCR_LED_STAT1_MASK 0x00F0 +#define E1000_SCR_LED_INIT_MASK 0x0F00 +#define E1000_SCR_LED_LOS_MASK 0xF000 +#define E1000_SCR_LED_STAT0(x) ((x) & E1000_SCR_LED_STAT0_MASK) +#define E1000_SCR_LED_STAT1(x) ((x) & E1000_SCR_LED_STAT1_MASK) +#define E1000_SCR_LED_INIT(x) ((x) & E1000_SCR_LED_INIT_MASK) +#define E1000_SCR_LED_LOS(x) ((x) & E1000_SCR_LED_LOS_MASK) + #define E1000_SSR 0x11 /* special status register */ #define E1000_SSR_JABBER 0x0001 #define E1000_SSR_REV_POLARITY 0x0002 @@ -276,6 +301,26 @@ #define E1000_IER_SPEED_CHANGED 0x4000 #define E1000_IER_AUTO_NEG_ERR 0x8000 +/* 88E1116, 88E1149 page 3, LED timer control. */ +#define E1000_PULSE_MASK 0x7000 +#define E1000_PULSE_NO_STR 0 /* no pulse stretching */ +#define E1000_PULSE_21MS 1 /* 21 ms to 42 ms */ +#define E1000_PULSE_42MS 2 /* 42 ms to 84 ms */ +#define E1000_PULSE_84MS 3 /* 84 ms to 170 ms */ +#define E1000_PULSE_170MS 4 /* 170 ms to 340 ms */ +#define E1000_PULSE_340MS 5 /* 340 ms to 670 ms */ +#define E1000_PULSE_670MS 6 /* 670 ms to 1300 ms */ +#define E1000_PULSE_1300MS 7 /* 1300 ms to 2700 ms */ +#define E1000_PULSE_DUR(x) ((x) & E1000_PULSE_MASK) + +#define E1000_BLINK_MASK 0x0700 +#define E1000_BLINK_42MS 0 /* 42 ms */ +#define E1000_BLINK_84MS 1 /* 84 ms */ +#define E1000_BLINK_170MS 2 /* 170 ms */ +#define E1000_BLINK_340MS 3 /* 340 ms */ +#define E1000_BLINK_670MS 4 /* 670 ms */ +#define E1000_BLINK_RATE(x) ((x) & E1000_BLINK_MASK) + #define E1000_ISR 0x13 /* interrupt status reg */ #define E1000_ISR_JABBER 0x0001 #define E1000_ISR_POLARITY_CHANGE 0x0002 Index: dev/msk/if_msk.c =================================================================== --- dev/msk/if_msk.c (revision 201066) +++ dev/msk/if_msk.c (working copy) @@ -2,7 +2,7 @@ * * Name : sky2.c * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x - * Version: $Revision: 1.11 $ + * Version: $Revision: 1.23 $ * Date : $Date: 2005/12/22 09:04:11 $ * Purpose: Main driver source file * @@ -137,7 +137,6 @@ #include #include -#include #include #include @@ -154,6 +153,10 @@ /* Tunables. */ static int msi_disable = 0; TUNABLE_INT("hw.msk.msi_disable", &msi_disable); +static int legacy_intr = 0; +TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr); +static int jumbo_disable = 0; +TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable); #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) @@ -171,6 +174,8 @@ #define MSI_SUPPORT #undef TSO_SUPPORT + + /* * Devices supported by this driver. */ @@ -200,13 +205,21 @@ { VENDORID_MARVELL, DEVICEID_MRVL_8062X, "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8035, - "Marvell Yukon 88E8035 Gigabit Ethernet" }, + "Marvell Yukon 88E8035 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8036, - "Marvell Yukon 88E8036 Gigabit Ethernet" }, + "Marvell Yukon 88E8036 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8038, - "Marvell Yukon 88E8038 Gigabit Ethernet" }, + "Marvell Yukon 88E8038 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8039, - "Marvell Yukon 88E8039 Gigabit Ethernet" }, + "Marvell Yukon 88E8039 Fast Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_8040, + "Marvell Yukon 88E8040 Fast Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_8040T, + "Marvell Yukon 88E8040T Fast Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_8042, + "Marvell Yukon 88E8042 Fast Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_8048, + "Marvell Yukon 88E8048 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4361, "Marvell Yukon 88E8050 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4360, @@ -217,8 +230,20 @@ "Marvell Yukon 88E8055 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4364, "Marvell Yukon 88E8056 Gigabit Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_4365, + "Marvell Yukon 88E8070 Gigabit Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_436A, + "Marvell Yukon 88E8058 Gigabit Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_436B, + "Marvell Yukon 88E8071 Gigabit Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_436C, + "Marvell Yukon 88E8072 Gigabit Ethernet" }, + { VENDORID_MARVELL, DEVICEID_MRVL_4380, + "Marvell Yukon 88E8057 Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, "D-Link 550SX Gigabit Ethernet" }, + { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX, + "D-Link 560SX Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, "D-Link 560T Gigabit Ethernet" } }; @@ -226,15 +251,18 @@ static const char *model_name[] = { "Yukon XL", "Yukon EC Ultra", - "Yukon Unknown", + "Yukon EX", "Yukon EC", - "Yukon FE" + "Yukon FE", + "Yukon FE+", + "Yukon Supreme", + "Yukon Ultra 2" }; static int mskc_probe(device_t); static int mskc_attach(device_t); static int mskc_detach(device_t); -static void mskc_shutdown(device_t); +static int mskc_shutdown(device_t); static int mskc_setup_rambuffer(struct msk_softc *); static int mskc_suspend(device_t); static int mskc_resume(device_t); @@ -253,8 +281,11 @@ static int msk_handle_events(struct msk_softc *); static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); static void msk_intr_hwerr(struct msk_softc *); -static void msk_rxeof(struct msk_if_softc *, uint32_t, int); -static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); +#ifndef __NO_STRICT_ALIGNMENT +static __inline void msk_fixup_rx(struct mbuf *); +#endif +static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); +static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); static void msk_txeof(struct msk_if_softc *, int); static struct mbuf *msk_defrag(struct mbuf *, int, int); static int msk_encap(struct msk_if_softc *, struct mbuf **); @@ -263,6 +294,7 @@ static int msk_ioctl(struct ifnet *, u_long, caddr_t); static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); static void msk_set_rambuffer(struct msk_if_softc *); +static void msk_set_tx_stfwd(struct msk_if_softc *); static void msk_init(void *); static void msk_init_locked(struct msk_if_softc *); static void msk_stop(struct msk_if_softc *); @@ -274,9 +306,9 @@ static int msk_status_dma_alloc(struct msk_softc *); static void msk_status_dma_free(struct msk_softc *); static int msk_txrx_dma_alloc(struct msk_if_softc *); +static int msk_rx_dma_jalloc(struct msk_if_softc *); static void msk_txrx_dma_free(struct msk_if_softc *); -static void *msk_jalloc(struct msk_if_softc *); -static void msk_jfree(void *, void *); +static void msk_rx_dma_jfree(struct msk_if_softc *); static int msk_init_rx_ring(struct msk_if_softc *); static int msk_init_jumbo_rx_ring(struct msk_if_softc *); static void msk_init_tx_ring(struct msk_if_softc *); @@ -290,12 +322,14 @@ static int msk_miibus_readreg(device_t, int, int); static int msk_miibus_writereg(device_t, int, int, int); static void msk_miibus_statchg(device_t); -static void msk_link_task(void *, int); -static void msk_setmulti(struct msk_if_softc *); +static void msk_rxfilter(struct msk_if_softc *); static void msk_setvlan(struct msk_if_softc *, struct ifnet *); -static void msk_setpromisc(struct msk_if_softc *); +static void msk_stats_clear(struct msk_if_softc *); +static void msk_stats_update(struct msk_if_softc *); +static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS); +static void msk_sysctl_node(struct msk_if_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); @@ -354,11 +388,40 @@ DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0); DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); +static struct resource_spec msk_res_spec_io[] = { + { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, + { -1, 0, 0 } +}; + +static struct resource_spec msk_res_spec_mem[] = { + { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, + { -1, 0, 0 } +}; + +static struct resource_spec msk_irq_spec_legacy[] = { + { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, + { -1, 0, 0 } +}; + +static struct resource_spec msk_irq_spec_msi[] = { + { SYS_RES_IRQ, 1, RF_ACTIVE }, + { -1, 0, 0 } +}; + +static struct resource_spec msk_irq_spec_msi2[] = { + { SYS_RES_IRQ, 1, RF_ACTIVE }, + { SYS_RES_IRQ, 2, RF_ACTIVE }, + { -1, 0, 0 } +}; + static int msk_miibus_readreg(device_t dev, int phy, int reg) { struct msk_if_softc *sc_if; + if (phy != PHY_ADDR_MARV) + return (0); + sc_if = device_get_softc(dev); return (msk_phy_readreg(sc_if, phy, reg)); @@ -397,6 +460,9 @@ { struct msk_if_softc *sc_if; + if (phy != PHY_ADDR_MARV) + return (0); + sc_if = device_get_softc(dev); return (msk_phy_writereg(sc_if, phy, reg, val)); @@ -428,40 +494,44 @@ static void msk_miibus_statchg(device_t dev) { - struct msk_if_softc *sc_if; - - sc_if = device_get_softc(dev); - taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task); -} - -static void -msk_link_task(void *arg, int pending) -{ struct msk_softc *sc; struct msk_if_softc *sc_if; struct mii_data *mii; struct ifnet *ifp; uint32_t gmac; - sc_if = (struct msk_if_softc *)arg; + sc_if = device_get_softc(dev); sc = sc_if->msk_softc; - MSK_IF_LOCK(sc_if); + MSK_IF_LOCK_ASSERT(sc_if); mii = device_get_softc(sc_if->msk_miibus); ifp = sc_if->msk_ifp; - if (mii == NULL || ifp == NULL) { - MSK_IF_UNLOCK(sc_if); + if (mii == NULL || ifp == NULL || + (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; + + sc_if->msk_flags &= ~MSK_FLAG_LINK; + if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == + (IFM_AVALID | IFM_ACTIVE)) { + switch (IFM_SUBTYPE(mii->mii_media_active)) { + case IFM_10_T: + case IFM_100_TX: + sc_if->msk_flags |= MSK_FLAG_LINK; + break; + case IFM_1000_T: + case IFM_1000_SX: + case IFM_1000_LX: + case IFM_1000_CX: + if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) + sc_if->msk_flags |= MSK_FLAG_LINK; + break; + default: + break; + } } - if (mii->mii_media_status & IFM_ACTIVE) { - if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) - sc_if->msk_link = 1; - } else - sc_if->msk_link = 0; - - if (sc_if->msk_link != 0) { + if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) { /* Enable Tx FIFO Underrun. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); @@ -508,30 +578,27 @@ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); /* Enable PHY interrupt for FIFO underrun/overflow. */ - if (sc->msk_marvell_phy) - msk_phy_writereg(sc_if, PHY_ADDR_MARV, - PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); + msk_phy_writereg(sc_if, PHY_ADDR_MARV, + PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); } else { /* * Link state changed to down. * Disable PHY interrupts. */ - if (sc->msk_marvell_phy) - msk_phy_writereg(sc_if, PHY_ADDR_MARV, - PHY_MARV_INT_MASK, 0); + msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); /* Disable Rx/Tx MAC. */ gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); - gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); - GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); - /* Read again to ensure writing. */ - GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); + if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) { + gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); + /* Read again to ensure writing. */ + GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); + } } - - MSK_IF_UNLOCK(sc_if); } static void -msk_setmulti(struct msk_if_softc *sc_if) +msk_rxfilter(struct msk_if_softc *sc_if) { struct msk_softc *sc; struct ifnet *ifp; @@ -548,15 +615,14 @@ bzero(mchash, sizeof(mchash)); mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); - mode |= GM_RXCR_UCF_ENA; - if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { - if ((ifp->if_flags & IFF_PROMISC) != 0) - mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); - else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { - mchash[0] = 0xffff; - mchash[1] = 0xffff; - } + if ((ifp->if_flags & IFF_PROMISC) != 0) + mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { + mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; + mchash[0] = 0xffff; + mchash[1] = 0xffff; } else { + mode |= GM_RXCR_UCF_ENA; IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) @@ -569,7 +635,8 @@ mchash[crc >> 5] |= 1 << (crc & 0x1f); } IF_ADDR_UNLOCK(ifp); - mode |= GM_RXCR_MCF_ENA; + if (mchash[0] != 0 || mchash[1] != 0) + mode |= GM_RXCR_MCF_ENA; } GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, @@ -602,26 +669,6 @@ } } -static void -msk_setpromisc(struct msk_if_softc *sc_if) -{ - struct msk_softc *sc; - struct ifnet *ifp; - uint16_t mode; - - MSK_IF_LOCK_ASSERT(sc_if); - - sc = sc_if->msk_softc; - ifp = sc_if->msk_ifp; - - mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); - if (ifp->if_flags & IFF_PROMISC) - mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); - else - mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); - GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); -} - static int msk_init_rx_ring(struct msk_if_softc *sc_if) { @@ -764,7 +811,12 @@ return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; - m_adj(m, ETHER_ALIGN); + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) + m_adj(m, ETHER_ALIGN); +#ifndef __NO_STRICT_ALIGNMENT + else + m_adj(m, MSK_RX_BUF_ALIGN); +#endif if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, @@ -803,25 +855,21 @@ bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; - void *buf; - MGETHDR(m, M_DONTWAIT, MT_DATA); + m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); - buf = msk_jalloc(sc_if); - if (buf == NULL) { - m_freem(m); - return (ENOBUFS); - } - /* Attach the buffer to the mbuf. */ - MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, - EXT_NET_DRV); if ((m->m_flags & M_EXT) == 0) { m_freem(m); return (ENOBUFS); } - m->m_pkthdr.len = m->m_len = MSK_JLEN; - m_adj(m, ETHER_ALIGN); + m->m_len = m->m_pkthdr.len = MJUM9BYTES; + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) + m_adj(m, ETHER_ALIGN); +#ifndef __NO_STRICT_ALIGNMENT + else + m_adj(m, MSK_RX_BUF_ALIGN); +#endif if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, @@ -860,15 +908,16 @@ { struct msk_if_softc *sc_if; struct mii_data *mii; + int error; sc_if = ifp->if_softc; MSK_IF_LOCK(sc_if); mii = device_get_softc(sc_if->msk_miibus); - mii_mediachg(mii); + error = mii_mediachg(mii); MSK_IF_UNLOCK(sc_if); - return (0); + return (error); } /* @@ -882,6 +931,10 @@ sc_if = ifp->if_softc; MSK_IF_LOCK(sc_if); + if ((ifp->if_flags & IFF_UP) == 0) { + MSK_IF_UNLOCK(sc_if); + return; + } mii = device_get_softc(sc_if->msk_miibus); mii_pollstat(mii); @@ -904,38 +957,41 @@ switch(command) { case SIOCSIFMTU: - if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { + MSK_IF_LOCK(sc_if); + if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) error = EINVAL; - break; + else if (ifp->if_mtu != ifr->ifr_mtu) { + if (ifr->ifr_mtu > ETHERMTU) { + if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { + error = EINVAL; + MSK_IF_UNLOCK(sc_if); + break; + } + if ((sc_if->msk_flags & + MSK_FLAG_JUMBO_NOCSUM) != 0) { + ifp->if_hwassist &= + ~(MSK_CSUM_FEATURES | CSUM_TSO); + ifp->if_capenable &= + ~(IFCAP_TSO4 | IFCAP_TXCSUM); + VLAN_CAPABILITIES(ifp); + } + } + ifp->if_mtu = ifr->ifr_mtu; + msk_init_locked(sc_if); } - if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U && - ifr->ifr_mtu > MSK_MAX_FRAMELEN) { - error = EINVAL; - break; - } - MSK_IF_LOCK(sc_if); - ifp->if_mtu = ifr->ifr_mtu; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) - msk_init_locked(sc_if); MSK_IF_UNLOCK(sc_if); break; case SIOCSIFFLAGS: MSK_IF_LOCK(sc_if); if ((ifp->if_flags & IFF_UP) != 0) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { - if (((ifp->if_flags ^ sc_if->msk_if_flags) - & IFF_PROMISC) != 0) { - msk_setpromisc(sc_if); - msk_setmulti(sc_if); - } - } else { - if (sc_if->msk_detach == 0) - msk_init_locked(sc_if); - } - } else { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) - msk_stop(sc_if); - } + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && + ((ifp->if_flags ^ sc_if->msk_if_flags) & + (IFF_PROMISC | IFF_ALLMULTI)) != 0) + msk_rxfilter(sc_if); + else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0) + msk_init_locked(sc_if); + } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + msk_stop(sc_if); sc_if->msk_if_flags = ifp->if_flags; MSK_IF_UNLOCK(sc_if); break; @@ -943,7 +999,7 @@ case SIOCDELMULTI: MSK_IF_LOCK(sc_if); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) - msk_setmulti(sc_if); + msk_rxfilter(sc_if); MSK_IF_UNLOCK(sc_if); break; case SIOCGIFMEDIA: @@ -954,27 +1010,39 @@ case SIOCSIFCAP: MSK_IF_LOCK(sc_if); mask = ifr->ifr_reqcap ^ ifp->if_capenable; - if ((mask & IFCAP_TXCSUM) != 0) { + if ((mask & IFCAP_TXCSUM) != 0 && + (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; - if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && - (IFCAP_TXCSUM & ifp->if_capabilities) != 0) + if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) ifp->if_hwassist |= MSK_CSUM_FEATURES; else ifp->if_hwassist &= ~MSK_CSUM_FEATURES; } - if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { + if ((mask & IFCAP_RXCSUM) != 0 && + (IFCAP_RXCSUM & ifp->if_capabilities) != 0) + ifp->if_capenable ^= IFCAP_RXCSUM; + if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && + (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; msk_setvlan(sc_if, ifp); } - - if ((mask & IFCAP_TSO4) != 0) { + if ((mask & IFCAP_VLAN_HWCSUM) != 0 && + (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0) + ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; + if ((mask & IFCAP_TSO4) != 0 && + (IFCAP_TSO4 & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TSO4; - if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && - (IFCAP_TSO4 & ifp->if_capabilities) != 0) + if ((IFCAP_TSO4 & ifp->if_capenable) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } + if (ifp->if_mtu > ETHERMTU && + (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { + ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); + ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); + } + VLAN_CAPABILITIES(ifp); MSK_IF_UNLOCK(sc_if); break; @@ -1010,64 +1078,40 @@ static int mskc_setup_rambuffer(struct msk_softc *sc) { - int totqsize, minqsize; - int avail, next; + int next; int i; - uint8_t val; /* Get adapter SRAM size. */ - val = CSR_READ_1(sc, B2_E_0); - sc->msk_ramsize = (val == 0) ? 128 : val * 4; - if (sc->msk_hw_id == CHIP_ID_YUKON_FE) - sc->msk_ramsize = 4 * 4; + sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; if (bootverbose) device_printf(sc->msk_dev, "RAM buffer size : %dKB\n", sc->msk_ramsize); + if (sc->msk_ramsize == 0) + return (0); - totqsize = sc->msk_ramsize * sc->msk_num_port; - minqsize = MSK_MIN_RXQ_SIZE + MSK_MIN_TXQ_SIZE; - if (minqsize > sc->msk_ramsize) - minqsize = sc->msk_ramsize; - - if (minqsize * sc->msk_num_port > totqsize) { - device_printf(sc->msk_dev, - "not enough RAM buffer memory : %d/%dKB\n", - minqsize * sc->msk_num_port, totqsize); - return (ENOSPC); - } - - avail = totqsize; - if (sc->msk_num_port > 1) { - /* - * Divide up the memory evenly so that everyone gets a - * fair share for dual port adapters. - */ - avail = sc->msk_ramsize; - } - - /* Take away the minimum memory for active queues. */ - avail -= minqsize; - /* Rx queue gets the minimum + 80% of the rest. */ - sc->msk_rxqsize = - (avail * MSK_RAM_QUOTA_RX) / 100 + MSK_MIN_RXQ_SIZE; - avail -= (sc->msk_rxqsize - MSK_MIN_RXQ_SIZE); - sc->msk_txqsize = avail + MSK_MIN_TXQ_SIZE; - + sc->msk_pflags |= MSK_FLAG_RAMBUF; + /* + * Give receiver 2/3 of memory and round down to the multiple + * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple + * of 1024. + */ + sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); + sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; for (i = 0, next = 0; i < sc->msk_num_port; i++) { sc->msk_rxqstart[i] = next; - sc->msk_rxqend[i] = next + (sc->msk_rxqsize * 1024) - 1; + sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; next = sc->msk_rxqend[i] + 1; sc->msk_txqstart[i] = next; - sc->msk_txqend[i] = next + (sc->msk_txqsize * 1024) - 1; + sc->msk_txqend[i] = next + sc->msk_txqsize - 1; next = sc->msk_txqend[i] + 1; if (bootverbose) { device_printf(sc->msk_dev, "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, - sc->msk_rxqsize, sc->msk_rxqstart[i], + sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], sc->msk_rxqend[i]); device_printf(sc->msk_dev, "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, - sc->msk_txqsize, sc->msk_txqstart[i], + sc->msk_txqsize / 1024, sc->msk_txqstart[i], sc->msk_txqend[i]); } } @@ -1078,7 +1122,7 @@ static void msk_phy_power(struct msk_softc *sc, int mode) { - uint32_t val; + uint32_t our, val; int i; switch (mode) { @@ -1104,17 +1148,23 @@ val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); - if (sc->msk_hw_id == CHIP_ID_YUKON_XL && - sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { - /* Deassert Low Power for 1st PHY. */ - val |= PCI_Y2_PHY1_COMA; - if (sc->msk_num_port > 1) - val |= PCI_Y2_PHY2_COMA; - } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { - uint32_t our; + if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { + if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { + /* Deassert Low Power for 1st PHY. */ + val |= PCI_Y2_PHY1_COMA; + if (sc->msk_num_port > 1) + val |= PCI_Y2_PHY2_COMA; + } + } + /* Release PHY from PowerDown/COMA mode. */ + pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); + switch (sc->msk_hw_id) { + case CHIP_ID_YUKON_EC_U: + case CHIP_ID_YUKON_EX: + case CHIP_ID_YUKON_FE_P: + case CHIP_ID_YUKON_UL_2: + CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF); - CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); - /* Enable all clocks. */ pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); @@ -1122,11 +1172,22 @@ PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); /* Set all bits to 0 except bits 15..12. */ pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); - /* Set to default value. */ - pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); + our = pci_read_config(sc->msk_dev, PCI_OUR_REG_5, 4); + our &= PCI_CTL_TIM_VMAIN_AV_MSK; + pci_write_config(sc->msk_dev, PCI_OUR_REG_5, our, 4); + pci_write_config(sc->msk_dev, PCI_CFG_REG_1, 0, 4); + /* + * Disable status race, workaround for + * Yukon EC Ultra & Yukon EX. + */ + val = CSR_READ_4(sc, B2_GP_IO); + val |= GLB_GPIO_STAT_RACE_DIS; + CSR_WRITE_4(sc, B2_GP_IO, val); + CSR_READ_4(sc, B2_GP_IO); + break; + default: + break; } - /* Release PHY from PowerDown/COMA mode. */ - pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); for (i = 0; i < sc->msk_num_port; i++) { CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_SET); @@ -1177,10 +1238,18 @@ CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); /* Disable ASF. */ - if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { - CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); - CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); - } + if (sc->msk_hw_id == CHIP_ID_YUKON_EX) { + status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); + /* Clear AHB bridge & microcontroller reset. */ + status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | + Y2_ASF_HCU_CCSR_CPU_RST_MODE); + /* Clear ASF microcontroller state. */ + status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK; + CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); + } else + CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); + CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); + /* * Since we disabled ASF, S/W reset is required for Power Management. */ @@ -1232,6 +1301,10 @@ CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); + if (sc->msk_hw_id == CHIP_ID_YUKON_EX) + CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), + GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | + GMC_BYP_RETR_ON); } CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); @@ -1337,7 +1410,8 @@ CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); /* Set the status list last index. */ CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); - if (HW_FEATURE(sc, HWF_WA_DEV_43_418)) { + if (sc->msk_hw_id == CHIP_ID_YUKON_EC && + sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { /* WA for dev. #4.3 */ CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); /* WA for dev. #4.18 */ @@ -1346,14 +1420,19 @@ } else { CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); - CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, - HW_FEATURE(sc, HWF_WA_DEV_4109) ? 0x10 : 0x04); + if (sc->msk_hw_id == CHIP_ID_YUKON_XL && + sc->msk_hw_rev == CHIP_REV_YU_XL_A0) + CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); + else + CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); } /* * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. */ CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); + CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, MSK_USECS(sc, 30)); + CSR_WRITE_4(sc, STAT_LEV_TIMER_INI, MSK_USECS(sc, 50)); /* Enable status unit. */ CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); @@ -1391,6 +1470,7 @@ struct msk_softc *sc; struct msk_if_softc *sc_if; struct ifnet *ifp; + struct msk_mii_data *mmd; int i, port, error; uint8_t eaddr[6]; @@ -1400,11 +1480,13 @@ error = 0; sc_if = device_get_softc(dev); sc = device_get_softc(device_get_parent(dev)); - port = *(int *)device_get_ivars(dev); + mmd = device_get_ivars(dev); + port = mmd->port; sc_if->msk_if_dev = dev; sc_if->msk_port = port; sc_if->msk_softc = sc; + sc_if->msk_flags = sc->msk_pflags; sc->msk_if[port] = sc_if; /* Setup Tx/Rx queue register offsets. */ if (port == MSK_PORT_A) { @@ -1418,10 +1500,11 @@ } callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); - TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if); + msk_sysctl_node(sc_if); if ((error = msk_txrx_dma_alloc(sc_if) != 0)) goto fail; + msk_rx_dma_jalloc(sc_if); ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { @@ -1445,13 +1528,15 @@ * compute the checksum? I think there is no reason to spend time to * make Rx checksum offload work on Yukon II hardware. */ - ifp->if_capabilities = IFCAP_TXCSUM; - ifp->if_hwassist = MSK_CSUM_FEATURES; - if (sc->msk_hw_id != CHIP_ID_YUKON_EC_U) { - /* It seems Yukon EC Ultra doesn't support TSO. */ - ifp->if_capabilities |= IFCAP_TSO4; - ifp->if_hwassist |= CSUM_TSO; - } + ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; + /* + * Enable Rx checksum offloading if controller support new + * descriptor format. + */ + if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && + (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) + ifp->if_capabilities |= IFCAP_RXCSUM; + ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; ifp->if_capenable = ifp->if_capabilities; ifp->if_ioctl = msk_ioctl; ifp->if_start = msk_start; @@ -1485,9 +1570,23 @@ MSK_IF_LOCK(sc_if); /* VLAN capability setup */ - ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; - if (ifp->if_capabilities & IFCAP_HWCSUM) - ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; + ifp->if_capabilities |= IFCAP_VLAN_MTU; + if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) { + /* + * Due to Tx checksum offload hardware bugs, msk(4) manually + * computes checksum for short frames. For VLAN tagged frames + * this workaround does not work so disable checksum offload + * for VLAN interface. + */ + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; + /* + * Enable Rx checksum offloading for VLAN taggedd frames + * if controller support new descriptor format. + */ + if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && + (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) + ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; + } ifp->if_capenable = ifp->if_capabilities; /* @@ -1509,10 +1608,6 @@ error = ENXIO; goto fail; } - /* Check whether PHY Id is MARVELL. */ - if (msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_ID0) - == PHY_MARV_ID0_VAL) - sc->msk_marvell_phy = 1; fail: if (error != 0) { @@ -1532,10 +1627,8 @@ mskc_attach(device_t dev) { struct msk_softc *sc; - int error, *port, reg, rid; -#ifdef MSI_SUPPORT - int i, msic; -#endif + struct msk_mii_data *mmd; + int error, msic, msir, reg; sc = device_get_softc(dev); sc->msk_dev = dev; @@ -1549,27 +1642,21 @@ /* Allocate I/O resource */ #ifdef MSK_USEIOSPACE - sc->msk_res_type = SYS_RES_IOPORT; - sc->msk_res_id = PCIR_BAR(1); + sc->msk_res_spec = msk_res_spec_io; #else - sc->msk_res_type = SYS_RES_MEMORY; - sc->msk_res_id = PCIR_BAR(0); + sc->msk_res_spec = msk_res_spec_mem; #endif - sc->msk_res[0] = bus_alloc_resource_any(dev, sc->msk_res_type, - &sc->msk_res_id, RF_ACTIVE); - if (sc->msk_res[0] == NULL) { - if (sc->msk_res_type == SYS_RES_MEMORY) { - sc->msk_res_type = SYS_RES_IOPORT; - sc->msk_res_id = PCIR_BAR(1); - } else { - sc->msk_res_type = SYS_RES_MEMORY; - sc->msk_res_id = PCIR_BAR(0); - } - sc->msk_res[0] = bus_alloc_resource_any(dev, sc->msk_res_type, - &sc->msk_res_id, RF_ACTIVE); - if (sc->msk_res[0] == NULL) { + sc->msk_irq_spec = msk_irq_spec_legacy; + error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); + if (error) { + if (sc->msk_res_spec == msk_res_spec_mem) + sc->msk_res_spec = msk_res_spec_io; + else + sc->msk_res_spec = msk_res_spec_mem; + error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); + if (error) { device_printf(dev, "couldn't allocate %s resources\n", - sc->msk_res_type == SYS_RES_MEMORY ? "memory" : + sc->msk_res_spec == msk_res_spec_mem ? "memory" : "I/O"); mtx_destroy(&sc->msk_mtx); return (ENXIO); @@ -1581,7 +1668,8 @@ sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; /* Bail out if chip is not recognized. */ if (sc->msk_hw_id < CHIP_ID_YUKON_XL || - sc->msk_hw_id > CHIP_ID_YUKON_FE) { + sc->msk_hw_id > CHIP_ID_YUKON_UL_2 || + sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", sc->msk_hw_id, sc->msk_hw_rev); mtx_destroy(&sc->msk_mtx); @@ -1610,10 +1698,6 @@ CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); - if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') - sc->msk_coppertype = 0; - else - sc->msk_coppertype = 1; /* Check number of MACs. */ sc->msk_num_port = 1; if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == @@ -1630,77 +1714,69 @@ else sc->msk_bustype = MSK_PCI_BUS; - /* Get H/W features(bugs). */ switch (sc->msk_hw_id) { case CHIP_ID_YUKON_EC: sc->msk_clock = 125; /* 125 Mhz */ - if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { - sc->msk_hw_feature = - HWF_WA_DEV_42 | HWF_WA_DEV_46 | HWF_WA_DEV_43_418 | - HWF_WA_DEV_420 | HWF_WA_DEV_423 | - HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 | - HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | - HWF_WA_DEV_4152 | HWF_WA_DEV_4167; - } else { - /* A2/A3 */ - sc->msk_hw_feature = - HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 | - HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | - HWF_WA_DEV_4152 | HWF_WA_DEV_4167; - } + sc->msk_pflags |= MSK_FLAG_JUMBO; break; case CHIP_ID_YUKON_EC_U: sc->msk_clock = 125; /* 125 Mhz */ - if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { - sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_483 | - HWF_WA_DEV_4109; - } else if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { - uint16_t v; - - sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 | - HWF_WA_DEV_4185; - v = CSR_READ_2(sc, Q_ADDR(Q_XA1, Q_WM)); - if (v == 0) - sc->msk_hw_feature |= HWF_WA_DEV_4185CS | - HWF_WA_DEV_4200; - } + sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM; break; + case CHIP_ID_YUKON_EX: + sc->msk_clock = 125; /* 125 Mhz */ + sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | + MSK_FLAG_AUTOTX_CSUM; + /* + * Yukon Extreme seems to have silicon bug for + * automatic Tx checksum calculation capability. + */ + if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) + sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM; + /* + * Yukon Extreme A0 could not use store-and-forward + * for jumbo frames, so disable Tx checksum + * offloading for jumbo frames. + */ + if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) + sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM; + break; case CHIP_ID_YUKON_FE: sc->msk_clock = 100; /* 100 Mhz */ - sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 | - HWF_WA_DEV_4152 | HWF_WA_DEV_4167; + sc->msk_pflags |= MSK_FLAG_FASTETHER; break; + case CHIP_ID_YUKON_FE_P: + sc->msk_clock = 50; /* 50 Mhz */ + sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 | + MSK_FLAG_AUTOTX_CSUM; + if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { + /* + * XXX + * FE+ A0 has status LE writeback bug so msk(4) + * does not rely on status word of received frame + * in msk_rxeof() which in turn disables all + * hardware assistance bits reported by the status + * word as well as validity of the recevied frame. + * Just pass received frames to upper stack with + * minimal test and let upper stack handle them. + */ + sc->msk_pflags |= MSK_FLAG_NOHWVLAN | + MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM; + } + break; case CHIP_ID_YUKON_XL: sc->msk_clock = 156; /* 156 Mhz */ - switch (sc->msk_hw_rev) { - case CHIP_REV_YU_XL_A0: - sc->msk_hw_feature = - HWF_WA_DEV_427 | HWF_WA_DEV_463 | HWF_WA_DEV_472 | - HWF_WA_DEV_479 | HWF_WA_DEV_483 | HWF_WA_DEV_4115 | - HWF_WA_DEV_4152 | HWF_WA_DEV_4167; - break; - case CHIP_REV_YU_XL_A1: - sc->msk_hw_feature = - HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | - HWF_WA_DEV_4115 | HWF_WA_DEV_4152 | HWF_WA_DEV_4167; - break; - case CHIP_REV_YU_XL_A2: - sc->msk_hw_feature = - HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | - HWF_WA_DEV_4115 | HWF_WA_DEV_4167; - break; - case CHIP_REV_YU_XL_A3: - sc->msk_hw_feature = - HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | - HWF_WA_DEV_4115; - } + sc->msk_pflags |= MSK_FLAG_JUMBO; break; + case CHIP_ID_YUKON_UL_2: + sc->msk_clock = 156; /* 156 Mhz */ + sc->msk_pflags |= MSK_FLAG_JUMBO; + break; default: sc->msk_clock = 156; /* 156 Mhz */ - sc->msk_hw_feature = 0; + break; } -#ifdef MSI_SUPPORT /* Allocate IRQ resources. */ msic = pci_msi_count(dev); if (bootverbose) @@ -1714,45 +1790,36 @@ * port cards with separate MSI messages, so for now I disable MSI * on dual port cards. */ - if (msic == 2 && msi_disable == 0 && sc->msk_num_port == 1 && - pci_alloc_msi(dev, &msic) == 0) { - if (msic == 2) { - sc->msk_msi = 1; - } else - pci_release_msi(dev); - } - - if (sc->msk_msi == 0) { - rid = 0; - sc->msk_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, - RF_SHAREABLE | RF_ACTIVE); - if (sc->msk_irq[0] == NULL) { - device_printf(dev, "couldn't allocate IRQ resources\n"); - error = ENXIO; - goto fail; - } - } else { - for (i = 0, rid = 1; i < 2; i++, rid++) { - sc->msk_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, - &rid, RF_ACTIVE); - if (sc->msk_irq[i] == NULL) { - device_printf(dev, - "couldn't allocate IRQ resources\n"); - error = ENXIO; - goto fail; + if (legacy_intr != 0) + msi_disable = 1; + if (msi_disable == 0) { + switch (msic) { + case 2: + case 1: /* 88E8058 reports 1 MSI message */ + msir = msic; + if (sc->msk_num_port == 1 && + pci_alloc_msi(dev, &msir) == 0) { + if (msic == msir) { + sc->msk_pflags |= MSK_FLAG_MSI; + sc->msk_irq_spec = msic == 2 ? + msk_irq_spec_msi2 : + msk_irq_spec_msi; + } else + pci_release_msi(dev); } + break; + default: + device_printf(dev, + "Unexpected number of MSI messages : %d\n", msic); + break; } } -#else - rid = 0; - sc->msk_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, - RF_SHAREABLE | RF_ACTIVE); - if (sc->msk_irq[0] == NULL) { + + error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); + if (error) { device_printf(dev, "couldn't allocate IRQ resources\n"); - error = ENXIO; goto fail; } -#endif if ((error = msk_status_dma_alloc(sc)) != 0) goto fail; @@ -1774,15 +1841,18 @@ error = ENXIO; goto fail; } - port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); - if (port == NULL) { + mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); + if (mmd == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_A\n"); error = ENXIO; goto fail; } - *port = MSK_PORT_A; - device_set_ivars(sc->msk_devs[MSK_PORT_A], port); + mmd->port = MSK_PORT_A; + mmd->pmd = sc->msk_pmd; + if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') + mmd->mii_flags |= MIIF_HAVEFIBER; + device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd); if (sc->msk_num_port > 1) { sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); @@ -1791,15 +1861,18 @@ error = ENXIO; goto fail; } - port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); - if (port == NULL) { + mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); + if (mmd == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_B\n"); error = ENXIO; goto fail; } - *port = MSK_PORT_B; - device_set_ivars(sc->msk_devs[MSK_PORT_B], port); + mmd->port = MSK_PORT_B; + mmd->pmd = sc->msk_pmd; + if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') + mmd->mii_flags |= MIIF_HAVEFIBER; + device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd); } error = bus_generic_attach(dev); @@ -1808,18 +1881,19 @@ goto fail; } + /* Hook interrupt last to avoid having to lock softc. */ TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc); sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->msk_tq); taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->msk_dev)); - /* Hook interrupt last to avoid having to lock softc. */ error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | INTR_MPSAFE | INTR_FAST, msk_intr, sc, &sc->msk_intrhand[0]); if (error != 0) { device_printf(dev, "couldn't set up interrupt handler\n"); - taskqueue_free(sc->msk_tq); + if (legacy_intr == 0) + taskqueue_free(sc->msk_tq); sc->msk_tq = NULL; goto fail; } @@ -1852,13 +1926,12 @@ ifp = sc_if->msk_ifp; if (device_is_attached(dev)) { /* XXX */ - sc_if->msk_detach = 1; + sc_if->msk_flags |= MSK_FLAG_DETACH; msk_stop(sc_if); /* Can't hold locks while calling detach. */ MSK_IF_UNLOCK(sc_if); callout_drain(&sc_if->msk_tick_ch); taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task); - taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task); ether_ifdetach(ifp); MSK_IF_LOCK(sc_if); } @@ -1874,6 +1947,7 @@ * } */ + msk_rx_dma_jfree(sc_if); msk_txrx_dma_free(sc_if); bus_generic_detach(dev); @@ -1922,7 +1996,7 @@ msk_status_dma_free(sc); - if (sc->msk_tq != NULL) { + if (legacy_intr == 0 && sc->msk_tq != NULL) { taskqueue_drain(sc->msk_tq, &sc->msk_int_task); taskqueue_free(sc->msk_tq); sc->msk_tq = NULL; @@ -1935,33 +2009,10 @@ bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]); sc->msk_intrhand[1] = NULL; } -#ifdef MSI_SUPPORT - if (sc->msk_msi) { - int i, rid; - for (i = 0, rid = 1; i < 2; i++, rid++) { - if (sc->msk_irq[i] != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, rid, - sc->msk_irq[i]); - sc->msk_irq[i] = NULL; - } - } + bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); + if ((sc->msk_pflags & MSK_FLAG_MSI) != 0) pci_release_msi(dev); - } else { - if (sc->msk_irq[0] != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, 0, - sc->msk_irq[0]); - sc->msk_irq[0] = NULL; - } - } -#else - if (sc->msk_irq[0] != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, 0, sc->msk_irq[0]); - sc->msk_irq[0] = NULL; - } -#endif - if (sc->msk_res[0] != NULL) - bus_release_resource(dev, sc->msk_res_type, sc->msk_res_id, - sc->msk_res[0]); + bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); mtx_destroy(&sc->msk_mtx); return (0); @@ -2057,15 +2108,9 @@ struct msk_dmamap_arg ctx; struct msk_txdesc *txd; struct msk_rxdesc *rxd; - struct msk_rxdesc *jrxd; - struct msk_jpool_entry *entry; - uint8_t *ptr; + bus_size_t rxalign; int error, i; - mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF); - SLIST_INIT(&sc_if->msk_jfree_listhead); - SLIST_INIT(&sc_if->msk_jinuse_listhead); - /* Create parent DMA tag. */ /* * XXX @@ -2137,42 +2182,6 @@ goto fail; } - /* Create tag for jumbo Rx ring. */ - error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ - MSK_RING_ALIGN, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MSK_JUMBO_RX_RING_SZ, /* maxsize */ - 1, /* nsegments */ - MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); - if (error != 0) { - device_printf(sc_if->msk_if_dev, - "failed to create jumbo Rx ring DMA tag\n"); - goto fail; - } - - /* Create tag for jumbo buffer blocks. */ - error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ - PAGE_SIZE, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MSK_JMEM, /* maxsize */ - 1, /* nsegments */ - MSK_JMEM, /* maxsegsize */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc_if->msk_cdata.msk_jumbo_tag); - if (error != 0) { - device_printf(sc_if->msk_if_dev, - "failed to create jumbo Rx buffer block DMA tag\n"); - goto fail; - } - /* Create tag for Tx buffers. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1, 0, /* alignment, boundary */ @@ -2191,9 +2200,16 @@ goto fail; } + rxalign = 1; + /* + * Workaround hardware hang which seems to happen when Rx buffer + * is not aligned on multiple of FIFO word(8 bytes). + */ + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) + rxalign = MSK_RX_BUF_ALIGN; /* Create tag for Rx buffers. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ - 1, 0, /* alignment, boundary */ + rxalign, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ @@ -2209,24 +2225,6 @@ goto fail; } - /* Create tag for jumbo Rx buffers. */ - error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ - PAGE_SIZE, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ - MSK_MAXRXSEGS, /* nsegments */ - MSK_JLEN, /* maxsegsize */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc_if->msk_cdata.msk_jumbo_rx_tag); - if (error != 0) { - device_printf(sc_if->msk_if_dev, - "failed to create jumbo Rx DMA tag\n"); - goto fail; - } - /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | @@ -2269,29 +2267,6 @@ } sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; - /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ - error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, - (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, - BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, - &sc_if->msk_cdata.msk_jumbo_rx_ring_map); - if (error != 0) { - device_printf(sc_if->msk_if_dev, - "failed to allocate DMA'able memory for jumbo Rx ring\n"); - goto fail; - } - - ctx.msk_busaddr = 0; - error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, - sc_if->msk_cdata.msk_jumbo_rx_ring_map, - sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, - msk_dmamap_cb, &ctx, 0); - if (error != 0) { - device_printf(sc_if->msk_if_dev, - "failed to load DMA'able memory for jumbo Rx ring\n"); - goto fail; - } - sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; - /* Create DMA maps for Tx buffers. */ for (i = 0; i < MSK_TX_RING_CNT; i++) { txd = &sc_if->msk_cdata.msk_txdesc[i]; @@ -2324,12 +2299,97 @@ goto fail; } } + +fail: + return (error); +} + +static int +msk_rx_dma_jalloc(struct msk_if_softc *sc_if) +{ + struct msk_dmamap_arg ctx; + struct msk_rxdesc *jrxd; + bus_size_t rxalign; + int error, i; + + if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { + sc_if->msk_flags &= ~MSK_FLAG_JUMBO; + device_printf(sc_if->msk_if_dev, + "disabling jumbo frame support\n"); + return (0); + } + /* Create tag for jumbo Rx ring. */ + error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ + MSK_RING_ALIGN, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MSK_JUMBO_RX_RING_SZ, /* maxsize */ + 1, /* nsegments */ + MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); + if (error != 0) { + device_printf(sc_if->msk_if_dev, + "failed to create jumbo Rx ring DMA tag\n"); + goto jumbo_fail; + } + + rxalign = 1; + /* + * Workaround hardware hang which seems to happen when Rx buffer + * is not aligned on multiple of FIFO word(8 bytes). + */ + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) + rxalign = MSK_RX_BUF_ALIGN; + /* Create tag for jumbo Rx buffers. */ + error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ + rxalign, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MJUM9BYTES, /* maxsize */ + 1, /* nsegments */ + MJUM9BYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->msk_cdata.msk_jumbo_rx_tag); + if (error != 0) { + device_printf(sc_if->msk_if_dev, + "failed to create jumbo Rx DMA tag\n"); + goto jumbo_fail; + } + + /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ + error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, + (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, + BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, + &sc_if->msk_cdata.msk_jumbo_rx_ring_map); + if (error != 0) { + device_printf(sc_if->msk_if_dev, + "failed to allocate DMA'able memory for jumbo Rx ring\n"); + goto jumbo_fail; + } + + ctx.msk_busaddr = 0; + error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, + sc_if->msk_cdata.msk_jumbo_rx_ring_map, + sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, + msk_dmamap_cb, &ctx, 0); + if (error != 0) { + device_printf(sc_if->msk_if_dev, + "failed to load DMA'able memory for jumbo Rx ring\n"); + goto jumbo_fail; + } + sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; + /* Create DMA maps for jumbo Rx buffers. */ if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { device_printf(sc_if->msk_if_dev, "failed to create spare jumbo Rx dmamap\n"); - goto fail; + goto jumbo_fail; } for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; @@ -2340,54 +2400,17 @@ if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create jumbo Rx dmamap\n"); - goto fail; + goto jumbo_fail; } } - /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ - error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, - (void **)&sc_if->msk_rdata.msk_jumbo_buf, - BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, - &sc_if->msk_cdata.msk_jumbo_map); - if (error != 0) { - device_printf(sc_if->msk_if_dev, - "failed to allocate DMA'able memory for jumbo buf\n"); - goto fail; - } + return (0); - ctx.msk_busaddr = 0; - error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, - sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, - MSK_JMEM, msk_dmamap_cb, &ctx, 0); - if (error != 0) { - device_printf(sc_if->msk_if_dev, - "failed to load DMA'able memory for jumbobuf\n"); - goto fail; - } - sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; - - /* - * Now divide it up into 9K pieces and save the addresses - * in an array. - */ - ptr = sc_if->msk_rdata.msk_jumbo_buf; - for (i = 0; i < MSK_JSLOTS; i++) { - sc_if->msk_cdata.msk_jslots[i] = ptr; - ptr += MSK_JLEN; - entry = malloc(sizeof(struct msk_jpool_entry), - M_DEVBUF, M_WAITOK); - if (entry == NULL) { - device_printf(sc_if->msk_if_dev, - "no memory for jumbo buffers!\n"); - error = ENOMEM; - goto fail; - } - entry->slot = i; - SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, - jpool_entries); - } - -fail: +jumbo_fail: + msk_rx_dma_jfree(sc_if); + device_printf(sc_if->msk_if_dev, "disabling jumbo frame support " + "due to resource shortage\n"); + sc_if->msk_flags &= ~MSK_FLAG_JUMBO; return (error); } @@ -2396,39 +2419,8 @@ { struct msk_txdesc *txd; struct msk_rxdesc *rxd; - struct msk_rxdesc *jrxd; - struct msk_jpool_entry *entry; int i; - MSK_JLIST_LOCK(sc_if); - while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { - device_printf(sc_if->msk_if_dev, - "asked to free buffer that is in use!\n"); - SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); - SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, - jpool_entries); - } - - while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { - entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); - SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); - free(entry, M_DEVBUF); - } - MSK_JLIST_UNLOCK(sc_if); - - /* Destroy jumbo buffer block. */ - if (sc_if->msk_cdata.msk_jumbo_map) - bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, - sc_if->msk_cdata.msk_jumbo_map); - - if (sc_if->msk_rdata.msk_jumbo_buf) { - bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, - sc_if->msk_rdata.msk_jumbo_buf, - sc_if->msk_cdata.msk_jumbo_map); - sc_if->msk_rdata.msk_jumbo_buf = NULL; - sc_if->msk_cdata.msk_jumbo_map = NULL; - } - /* Tx ring. */ if (sc_if->msk_cdata.msk_tx_ring_tag) { if (sc_if->msk_cdata.msk_tx_ring_map) @@ -2459,21 +2451,6 @@ bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); sc_if->msk_cdata.msk_rx_ring_tag = NULL; } - /* Jumbo Rx ring. */ - if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { - if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) - bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, - sc_if->msk_cdata.msk_jumbo_rx_ring_map); - if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && - sc_if->msk_rdata.msk_jumbo_rx_ring) - bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, - sc_if->msk_rdata.msk_jumbo_rx_ring, - sc_if->msk_cdata.msk_jumbo_rx_ring_map); - sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; - sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; - bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); - sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; - } /* Tx buffers. */ if (sc_if->msk_cdata.msk_tx_tag) { for (i = 0; i < MSK_TX_RING_CNT; i++) { @@ -2505,6 +2482,33 @@ bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); sc_if->msk_cdata.msk_rx_tag = NULL; } + if (sc_if->msk_cdata.msk_parent_tag) { + bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); + sc_if->msk_cdata.msk_parent_tag = NULL; + } +} + +static void +msk_rx_dma_jfree(struct msk_if_softc *sc_if) +{ + struct msk_rxdesc *jrxd; + int i; + + /* Jumbo Rx ring. */ + if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { + if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) + bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, + sc_if->msk_cdata.msk_jumbo_rx_ring_map); + if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && + sc_if->msk_rdata.msk_jumbo_rx_ring) + bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, + sc_if->msk_rdata.msk_jumbo_rx_ring, + sc_if->msk_cdata.msk_jumbo_rx_ring_map); + sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; + sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; + bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); + sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; + } /* Jumbo Rx buffers. */ if (sc_if->msk_cdata.msk_jumbo_rx_tag) { for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { @@ -2524,72 +2528,9 @@ bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; } - - if (sc_if->msk_cdata.msk_parent_tag) { - bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); - sc_if->msk_cdata.msk_parent_tag = NULL; - } - mtx_destroy(&sc_if->msk_jlist_mtx); } /* - * Allocate a jumbo buffer. - */ -static void * -msk_jalloc(struct msk_if_softc *sc_if) -{ - struct msk_jpool_entry *entry; - - MSK_JLIST_LOCK(sc_if); - - entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); - - if (entry == NULL) { - MSK_JLIST_UNLOCK(sc_if); - return (NULL); - } - - SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); - SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); - - MSK_JLIST_UNLOCK(sc_if); - - return (sc_if->msk_cdata.msk_jslots[entry->slot]); -} - -/* - * Release a jumbo buffer. - */ -static void -msk_jfree(void *buf, void *args) -{ - struct msk_if_softc *sc_if; - struct msk_jpool_entry *entry; - int i; - - /* Extract the softc struct pointer. */ - sc_if = (struct msk_if_softc *)args; - KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); - - MSK_JLIST_LOCK(sc_if); - /* Calculate the slot this buffer belongs to. */ - i = ((vm_offset_t)buf - - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; - KASSERT(i >= 0 && i < MSK_JSLOTS, - ("%s: asked to free buffer that we don't manage!", __func__)); - - entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); - KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); - entry->slot = i; - SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); - SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); - if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) - wakeup(sc_if); - - MSK_JLIST_UNLOCK(sc_if); -} - -/* * It's copy of ath_defrag(ath(4)). * * Defragment an mbuf chain, returning at most maxfrags separate @@ -2695,7 +2636,10 @@ tcp_offset = offset = 0; m = *m_head; - if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) { + if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && + (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) || + ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && + (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) { /* * Since mbuf has no protocol specific structure information * in it we have to inspect protocol information here to @@ -2709,7 +2653,16 @@ struct ip *ip; struct tcphdr *tcp; - /* TODO check for M_WRITABLE(m) */ + if (M_WRITABLE(m) == 0) { + /* Get a writable copy. */ + m = m_dup(*m_head, M_DONTWAIT); + m_freem(*m_head); + if (m == NULL) { + *m_head = NULL; + return (ENOBUFS); + } + *m_head = m; + } offset = sizeof(struct ether_header); m = m_pullup(m, offset); @@ -2745,16 +2698,21 @@ * resort to S/W checksum routine when we encounter short * TCP frames. * Short UDP packets appear to be handled correctly by - * Yukon II. + * Yukon II. Also I assume this bug does not happen on + * controllers that use newer descriptor format or + * automatic Tx checksum calaulcation. */ - if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && + if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && + (m->m_pkthdr.len < MSK_MIN_FRAMELEN) && (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { - uint16_t csum; - - csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - - (ip->ip_hl << 2), offset); + m = m_pullup(m, offset + sizeof(struct tcphdr)); + if (m == NULL) { + *m_head = NULL; + return (ENOBUFS); + } *(uint16_t *)(m->m_data + offset + - m->m_pkthdr.csum_data) = csum; + m->m_pkthdr.csum_data) = in_cksum_skip(m, + m->m_pkthdr.len, offset); m->m_pkthdr.csum_flags &= ~CSUM_TCP; } if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { @@ -2812,11 +2770,18 @@ /* Check TSO support. */ #ifdef TSO_SUPPORT if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { - tso_mtu = offset + m->m_pkthdr.tso_segsz; + if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) + tso_mtu = m->m_pkthdr.tso_segsz; + else + tso_mtu = offset + m->m_pkthdr.tso_segsz; if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_addr = htole32(tso_mtu); - tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER); + if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) + tx_le->msk_control = htole32(OP_MSS | HW_OWNER); + else + tx_le->msk_control = + htole32(OP_LRGLEN | HW_OWNER); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); sc_if->msk_cdata.msk_tso_mtu = tso_mtu; @@ -2842,15 +2807,21 @@ } /* Check if we have to handle checksum offload. */ if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { - tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; - tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) - & 0xffff) | ((uint32_t)tcp_offset << 16)); - tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); - control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; - if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) - control |= UDPTCP; - sc_if->msk_cdata.msk_tx_cnt++; - MSK_INC(prod, MSK_TX_RING_CNT); + if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0) + control |= CALSUM; + else { + tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; + tx_le->msk_addr = htole32(((tcp_offset + + m->m_pkthdr.csum_data) & 0xffff) | + ((uint32_t)tcp_offset << 16)); + tx_le->msk_control = htole32(1 << 16 | + (OP_TCPLISW | HW_OWNER)); + control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; + if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) + control |= UDPTCP; + sc_if->msk_cdata.msk_tx_cnt++; + MSK_INC(prod, MSK_TX_RING_CNT); + } } si = prod; @@ -2921,7 +2892,7 @@ MSK_IF_LOCK(sc_if); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING || sc_if->msk_link == 0) { + IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) { MSK_IF_UNLOCK(sc_if); return; } @@ -2978,11 +2949,12 @@ if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) return; ifp = sc_if->msk_ifp; - if (sc_if->msk_link == 0) { + if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) { if (bootverbose) if_printf(sc_if->msk_ifp, "watchdog timeout " "(missed link)\n"); ifp->if_oerrors++; + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; msk_init_locked(sc_if); return; } @@ -3007,12 +2979,13 @@ if_printf(ifp, "watchdog timeout\n"); ifp->if_oerrors++; + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; msk_init_locked(sc_if); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task); } -static void +static int mskc_shutdown(device_t dev) { struct msk_softc *sc; @@ -3035,6 +3008,7 @@ CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); MSK_UNLOCK(sc); + return (0); } static int @@ -3064,7 +3038,7 @@ /* Put hardware reset. */ CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); - sc->msk_suspended = 1; + sc->msk_pflags |= MSK_FLAG_SUSPEND; MSK_UNLOCK(sc); @@ -3084,18 +3058,39 @@ mskc_reset(sc); for (i = 0; i < sc->msk_num_port; i++) { if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && - ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) + ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) { + sc->msk_if[i]->msk_ifp->if_drv_flags &= + ~IFF_DRV_RUNNING; msk_init_locked(sc->msk_if[i]); + } } - sc->msk_suspended = 0; + sc->msk_pflags &= ~MSK_FLAG_SUSPEND; MSK_UNLOCK(sc); return (0); } +#ifndef __NO_STRICT_ALIGNMENT +static __inline void +msk_fixup_rx(struct mbuf *m) +{ + int i; + uint16_t *src, *dst; + + src = mtod(m, uint16_t *); + dst = src - 3; + + for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) + *dst++ = *src++; + + m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN); +} +#endif + static void -msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) +msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, + int len) { struct mbuf *m; struct ifnet *ifp; @@ -3112,7 +3107,18 @@ if ((status & GMR_FS_VLAN) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) rxlen -= ETHER_VLAN_ENCAP_LEN; - if (len > sc_if->msk_framesize || + if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) { + /* + * For controllers that returns bogus status code + * just do minimal check and let upper stack + * handle this frame. + */ + if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { + ifp->if_ierrors++; + msk_discard_rxbuf(sc_if, cons); + break; + } + } else if (len > sc_if->msk_framesize || ((status & GMR_FS_ANY_ERR) != 0) || ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { /* Don't count flow-control packet as errors. */ @@ -3131,7 +3137,23 @@ } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; +#ifndef __NO_STRICT_ALIGNMENT + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) + msk_fixup_rx(m); +#endif ifp->if_ipackets++; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && + (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { + m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; + if ((control & CSS_IPV4_CSUM_OK) != 0) + m->m_pkthdr.csum_flags |= CSUM_IP_VALID; + if ((control & (CSS_TCP | CSS_UDP)) != 0 && + (control & (CSS_TCPUDP_CSUM_OK)) != 0) { + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | + CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xffff; + } + } /* Check for VLAN tagged packets. */ if ((status & GMR_FS_VLAN) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { @@ -3147,7 +3169,8 @@ } static void -msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) +msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, + int len) { struct mbuf *m; struct ifnet *ifp; @@ -3183,7 +3206,23 @@ } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; +#ifndef __NO_STRICT_ALIGNMENT + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) + msk_fixup_rx(m); +#endif ifp->if_ipackets++; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && + (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { + m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; + if ((control & CSS_IPV4_CSUM_OK) != 0) + m->m_pkthdr.csum_flags |= CSUM_IP_VALID; + if ((control & (CSS_TCP | CSS_UDP)) != 0 && + (control & (CSS_TCPUDP_CSUM_OK)) != 0) { + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | + CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xffff; + } + } /* Check for VLAN tagged packets. */ if ((status & GMR_FS_VLAN) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { @@ -3263,6 +3302,8 @@ mii = device_get_softc(sc_if->msk_miibus); mii_tick(mii); + if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) + msk_miibus_statchg(sc_if->msk_if_dev); msk_watchdog(sc_if); callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); } @@ -3272,15 +3313,12 @@ { uint16_t status; - if (sc_if->msk_softc->msk_marvell_phy) { - msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); - status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, - PHY_MARV_INT_STAT); - /* Handle FIFO Underrun/Overflow? */ - if ((status & PHY_M_IS_FIFO_ERROR)) - device_printf(sc_if->msk_if_dev, - "PHY FIFO underrun/overflow.\n"); - } + msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); + status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); + /* Handle FIFO Underrun/Overflow? */ + if ((status & PHY_M_IS_FIFO_ERROR)) + device_printf(sc_if->msk_if_dev, + "PHY FIFO underrun/overflow.\n"); } static void @@ -3293,11 +3331,9 @@ status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); /* GMAC Rx FIFO overrun. */ - if ((status & GM_IS_RX_FF_OR) != 0) { + if ((status & GM_IS_RX_FF_OR) != 0) CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); - device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); - } /* GMAC Tx FIFO underrun. */ if ((status & GM_IS_TX_FF_UR) != 0) { CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), @@ -3445,7 +3481,7 @@ struct msk_softc *sc; sc = sc_if->msk_softc; - if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN)) + if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) bus_dmamap_sync( sc_if->msk_cdata.msk_jumbo_rx_ring_tag, sc_if->msk_cdata.msk_jumbo_rx_ring_map, @@ -3512,10 +3548,11 @@ sc_if->msk_vtag = ntohs(len); break; case OP_RXSTAT: - if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) - msk_jumbo_rxeof(sc_if, status, len); + if (sc_if->msk_framesize > + (MCLBYTES - MSK_RX_BUF_ALIGN)) + msk_jumbo_rxeof(sc_if, status, control, len); else - msk_rxeof(sc_if, status, len); + msk_rxeof(sc_if, status, control, len); rxprog++; /* * Because there is no way to sync single Rx LE @@ -3592,7 +3629,8 @@ /* Get interrupt source. */ status = CSR_READ_4(sc, B0_ISRC); - if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || + if (status == 0 || status == 0xffffffff || + (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || (status & sc->msk_intrmask) == 0) goto done; @@ -3631,6 +3669,11 @@ if ((status & Y2_IS_STAT_BMU) != 0) CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); + if (CSR_READ_1(sc, STAT_TX_TIMER_CTRL) == TIM_START) { + CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_STOP); + CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); + } + if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); @@ -3651,6 +3694,48 @@ } static void +msk_set_tx_stfwd(struct msk_if_softc *sc_if) +{ + struct msk_softc *sc; + struct ifnet *ifp; + + ifp = sc_if->msk_ifp; + sc = sc_if->msk_softc; + switch (sc->msk_hw_id) { + case CHIP_ID_YUKON_EX: + if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) + goto yukon_ex_workaround; + if (ifp->if_mtu > ETHERMTU) + CSR_WRITE_4(sc, + MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), + TX_JUMBO_ENA | TX_STFW_ENA); + else + CSR_WRITE_4(sc, + MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), + TX_JUMBO_DIS | TX_STFW_ENA); + break; + default: +yukon_ex_workaround: + if (ifp->if_mtu > ETHERMTU) { + /* Set Tx GMAC FIFO Almost Empty Threshold. */ + CSR_WRITE_4(sc, + MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), + MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); + /* Disable Store & Forward mode for Tx. */ + CSR_WRITE_4(sc, + MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), + TX_JUMBO_ENA | TX_STFW_DIS); + } else { + /* Enable Store & Forward mode for Tx. */ + CSR_WRITE_4(sc, + MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), + TX_JUMBO_DIS | TX_STFW_ENA); + } + break; + } +} + +static void msk_init(void *xsc) { struct msk_if_softc *sc_if = xsc; @@ -3668,6 +3753,7 @@ struct mii_data *mii; uint16_t eaddr[ETHER_ADDR_LEN / 2]; uint16_t gmac; + uint32_t reg; int error, i; MSK_IF_LOCK_ASSERT(sc_if); @@ -3676,38 +3762,44 @@ sc = sc_if->msk_softc; mii = device_get_softc(sc_if->msk_miibus); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + return; + error = 0; /* Cancel pending I/O and free all Rx/Tx buffers. */ msk_stop(sc_if); - sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + - ETHER_VLAN_ENCAP_LEN; + if (ifp->if_mtu < ETHERMTU) + sc_if->msk_framesize = ETHERMTU; + else + sc_if->msk_framesize = ifp->if_mtu; + sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + if (ifp->if_mtu > ETHERMTU && + (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { + ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); + ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); + } + /* GMAC Control reset. */ + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); + if (sc->msk_hw_id == CHIP_ID_YUKON_EX) + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), + GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | + GMC_BYP_RETR_ON); + /* - * Initialize GMAC first. - * Without this initialization, Rx MAC did not work as expected - * and Rx MAC garbled status LEs and it resulted in out-of-order - * or duplicated frame delivery which in turn showed very poor - * Rx performance.(I had to write a packet analysis code that - * could be embeded in driver to diagnose this issue.) - * I've spent almost 2 months to fix this issue. If I have had - * datasheet for Yukon II I wouldn't have encountered this. :-( + * Initialize GMAC first such that speed/duplex/flow-control + * parameters are renegotiated when interface is brought up. */ - gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; - GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); + GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); /* Dummy read the Interrupt Source Register. */ CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); - /* Set MIB Clear Counter Mode. */ - gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); - GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); - /* Read all MIB Counters with Clear Mode set. */ - for (i = 0; i < GM_MIB_CNT_SIZE; i++) - GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); - /* Clear MIB Clear Counter Mode. */ - gmac &= ~GM_PAR_MIB_CLR; - GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); + /* Clear MIB stats. */ + msk_stats_clear(sc_if); /* Disable FCS. */ GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); @@ -3726,7 +3818,7 @@ gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) + if (ifp->if_mtu > ETHERMTU) gmac |= GM_SMOD_JUMBO_ENA; GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); @@ -3747,23 +3839,35 @@ /* Configure Rx MAC FIFO. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); - CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), - GMF_OPER_ON | GMF_RX_F_FL_ON); + reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || + sc->msk_hw_id == CHIP_ID_YUKON_EX) + reg |= GMF_RX_OVER_ON; + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); - /* Set promiscuous mode. */ - msk_setpromisc(sc_if); + /* Set receive filter. */ + msk_rxfilter(sc_if); - /* Set multicast filter. */ - msk_setmulti(sc_if); + if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { + /* Clear flush mask - HW bug. */ + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); + } else { + /* Flush Rx MAC FIFO on any flow control or error. */ + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), + GMR_FS_ANY_ERR); + } - /* Flush Rx MAC FIFO on any flow control or error. */ - CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), - GMR_FS_ANY_ERR); + /* + * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word + * due to hardware hang on receipt of pause frames. + */ + reg = RX_GMF_FL_THR_DEF + 1; + /* Another magic for Yukon FE+ - From Linux. */ + if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && + sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) + reg = 0x178; + CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); - /* Set Rx FIFO flush threshold to 64 bytes. */ - CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), - RX_GMF_FL_THR_DEF); - /* Configure Tx MAC FIFO. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); @@ -3772,30 +3876,24 @@ /* Configure hardware VLAN tag insertion/stripping. */ msk_setvlan(sc_if, ifp); - /* XXX It seems STFW is requried for all cases. */ - CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA); - - if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { /* Set Rx Pause threshould. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), MSK_ECU_LLPP); CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), MSK_ECU_ULPP); - if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { - /* - * Can't sure the following code is needed as Yukon - * Yukon EC Ultra may not support jumbo frames. - * - * Set Tx GMAC FIFO Almost Empty Threshold. - */ - CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), - MSK_ECU_AE_THR); - /* Disable Store & Forward mode for Tx. */ - CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), - TX_STFW_DIS); - } + /* Configure store-and-forward for Tx. */ + msk_set_tx_stfwd(sc_if); } + if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && + sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { + /* Disable dynamic watermark - from Linux. */ + reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); + reg &= ~0x03; + CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); + } + /* * Disable Force Sync bit and Alloc bit in Tx RAM interface * arbiter as we don't use Sync Tx queue. @@ -3816,10 +3914,23 @@ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); - if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && - sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { - /* Fix for Yukon-EC Ultra: set BMU FIFO level */ - CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); + switch (sc->msk_hw_id) { + case CHIP_ID_YUKON_EC_U: + if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { + /* Fix for Yukon-EC Ultra: set BMU FIFO level */ + CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), + MSK_ECU_TXFF_LEV); + } + break; + case CHIP_ID_YUKON_EX: + /* + * Yukon Extreme seems to have silicon bug for + * automatic Tx checksum calculation capability. + */ + if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) + CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), + F_TX_CHK_AUTO_OFF); + break; } /* Setup Rx Queue Bus Memory Interface. */ @@ -3840,7 +3951,7 @@ /* Disable Rx checksum offload and RSS hash. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); - if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { + if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) { msk_set_prefetch(sc, sc_if->msk_rxq, sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, MSK_JUMBO_RX_RING_CNT - 1); @@ -3871,7 +3982,7 @@ CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); CSR_READ_4(sc, B0_IMSK); - sc_if->msk_link = 0; + sc_if->msk_flags &= ~MSK_FLAG_LINK; mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; @@ -3887,6 +3998,8 @@ int ltpp, utpp; sc = sc_if->msk_softc; + if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) + return; /* Setup Rx Queue. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); @@ -3990,6 +4103,8 @@ GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); /* Read again to ensure writing. */ GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); + /* Update stats and clear counters. */ + msk_stats_update(sc_if); /* Stop Tx BMU. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); @@ -3998,7 +4113,7 @@ if ((val & (BMU_STOP | BMU_IDLE)) == 0) { CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); - CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); + val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); } else break; DELAY(1); @@ -4011,8 +4126,7 @@ /* Disable all GMAC interrupt. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); /* Disable PHY interrupt. */ - if (sc->msk_marvell_phy) - msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); + msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); /* Disable the RAM Interface Arbiter. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); @@ -4103,10 +4217,267 @@ * Mark the interface down. */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); - sc_if->msk_link = 0; + sc_if->msk_flags &= ~MSK_FLAG_LINK; } +/* + * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower + * counter clears high 16 bits of the counter such that accessing + * lower 16 bits should be the last operation. + */ +#define MSK_READ_MIB32(x, y) \ + (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \ + (uint32_t)GMAC_READ_2(sc, x, y) +#define MSK_READ_MIB64(x, y) \ + (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \ + (uint64_t)MSK_READ_MIB32(x, y) + +static void +msk_stats_clear(struct msk_if_softc *sc_if) +{ + struct msk_softc *sc; + uint32_t reg; + uint16_t gmac; + int i; + + MSK_IF_LOCK_ASSERT(sc_if); + + sc = sc_if->msk_softc; + /* Set MIB Clear Counter Mode. */ + gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); + GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); + /* Read all MIB Counters with Clear Mode set. */ + for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t)) + reg = MSK_READ_MIB32(sc_if->msk_port, i); + /* Clear MIB Clear Counter Mode. */ + gmac &= ~GM_PAR_MIB_CLR; + GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); +} + +static void +msk_stats_update(struct msk_if_softc *sc_if) +{ + struct msk_softc *sc; + struct ifnet *ifp; + struct msk_hw_stats *stats; + uint16_t gmac; + uint32_t reg; + + MSK_IF_LOCK_ASSERT(sc_if); + + ifp = sc_if->msk_ifp; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + return; + sc = sc_if->msk_softc; + stats = &sc_if->msk_stats; + /* Set MIB Clear Counter Mode. */ + gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); + GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); + + /* Rx stats. */ + stats->rx_ucast_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK); + stats->rx_bcast_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK); + stats->rx_pause_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE); + stats->rx_mcast_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK); + stats->rx_crc_errs += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR); + reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1); + stats->rx_good_octets += + MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO); + stats->rx_bad_octets += + MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO); + stats->rx_runts += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT); + stats->rx_runt_errs += + MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG); + stats->rx_pkts_64 += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B); + stats->rx_pkts_65_127 += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B); + stats->rx_pkts_128_255 += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B); + stats->rx_pkts_256_511 += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B); + stats->rx_pkts_512_1023 += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B); + stats->rx_pkts_1024_1518 += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B); + stats->rx_pkts_1519_max += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ); + stats->rx_pkts_too_long += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR); + stats->rx_pkts_jabbers += + MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT); + reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2); + stats->rx_fifo_oflows += + MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV); + reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3); + + /* Tx stats. */ + stats->tx_ucast_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK); + stats->tx_bcast_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK); + stats->tx_pause_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE); + stats->tx_mcast_frames += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK); + stats->tx_octets += + MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO); + stats->tx_pkts_64 += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B); + stats->tx_pkts_65_127 += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B); + stats->tx_pkts_128_255 += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B); + stats->tx_pkts_256_511 += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B); + stats->tx_pkts_512_1023 += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B); + stats->tx_pkts_1024_1518 += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B); + stats->tx_pkts_1519_max += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ); + reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1); + stats->tx_colls += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL); + stats->tx_late_colls += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL); + stats->tx_excess_colls += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL); + stats->tx_multi_colls += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL); + stats->tx_single_colls += + MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL); + stats->tx_underflows += + MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR); + /* Clear MIB Clear Counter Mode. */ + gmac &= ~GM_PAR_MIB_CLR; + GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); +} + static int +msk_sysctl_stat32(SYSCTL_HANDLER_ARGS) +{ + struct msk_softc *sc; + struct msk_if_softc *sc_if; + uint32_t result, *stat; + int off; + + sc_if = (struct msk_if_softc *)arg1; + sc = sc_if->msk_softc; + off = arg2; + stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off); + + MSK_IF_LOCK(sc_if); + result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); + result += *stat; + MSK_IF_UNLOCK(sc_if); + + return (sysctl_handle_int(oidp, &result, 0, req)); +} + + +#undef MSK_READ_MIB32 + +#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \ + SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ + sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \ + "IU", d) + +static void +msk_sysctl_node(struct msk_if_softc *sc_if) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid_list *child, *schild; + struct sysctl_oid *tree; + + ctx = device_get_sysctl_ctx(sc_if->msk_if_dev); + child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev)); + + tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, + NULL, "MSK Statistics"); + schild = child = SYSCTL_CHILDREN(tree); + tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, + NULL, "MSK RX Statistics"); + child = SYSCTL_CHILDREN(tree); + MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", + child, rx_ucast_frames, "Good unicast frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", + child, rx_bcast_frames, "Good broadcast frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", + child, rx_pause_frames, "Pause frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", + child, rx_mcast_frames, "Multicast frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs", + child, rx_crc_errs, "CRC errors"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", + child, rx_pkts_64, "64 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", + child, rx_pkts_65_127, "65 to 127 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", + child, rx_pkts_128_255, "128 to 255 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", + child, rx_pkts_256_511, "256 to 511 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", + child, rx_pkts_512_1023, "512 to 1023 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", + child, rx_pkts_1024_1518, "1024 to 1518 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", + child, rx_pkts_1519_max, "1519 to max frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long", + child, rx_pkts_too_long, "frames too long"); + MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers", + child, rx_pkts_jabbers, "Jabber errors"); + MSK_SYSCTL_STAT32(sc_if, ctx, "overflows", + child, rx_fifo_oflows, "FIFO overflows"); + + tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, + NULL, "MSK TX Statistics"); + child = SYSCTL_CHILDREN(tree); + MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", + child, tx_ucast_frames, "Unicast frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", + child, tx_bcast_frames, "Broadcast frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", + child, tx_pause_frames, "Pause frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", + child, tx_mcast_frames, "Multicast frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", + child, tx_pkts_64, "64 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", + child, tx_pkts_65_127, "65 to 127 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", + child, tx_pkts_128_255, "128 to 255 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", + child, tx_pkts_256_511, "256 to 511 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", + child, tx_pkts_512_1023, "512 to 1023 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", + child, tx_pkts_1024_1518, "1024 to 1518 bytes frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", + child, tx_pkts_1519_max, "1519 to max frames"); + MSK_SYSCTL_STAT32(sc_if, ctx, "colls", + child, tx_colls, "Collisions"); + MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls", + child, tx_late_colls, "Late collisions"); + MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls", + child, tx_excess_colls, "Excessive collisions"); + MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls", + child, tx_multi_colls, "Multiple collisions"); + MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls", + child, tx_single_colls, "Single collisions"); + MSK_SYSCTL_STAT32(sc_if, ctx, "underflows", + child, tx_underflows, "FIFO underflows"); +} + +#undef MSK_SYSCTL_STAT32 + +static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; Index: dev/msk/if_mskreg.h =================================================================== --- dev/msk/if_mskreg.h (revision 201066) +++ dev/msk/if_mskreg.h (working copy) @@ -130,17 +130,27 @@ #define DEVICEID_MRVL_8035 0x4350 #define DEVICEID_MRVL_8036 0x4351 #define DEVICEID_MRVL_8038 0x4352 -#define DEVICEID_MRVL_8039 0X4353 +#define DEVICEID_MRVL_8039 0x4353 +#define DEVICEID_MRVL_8040 0x4354 +#define DEVICEID_MRVL_8040T 0x4355 +#define DEVICEID_MRVL_8042 0x4357 +#define DEVICEID_MRVL_8048 0x435A #define DEVICEID_MRVL_4360 0x4360 #define DEVICEID_MRVL_4361 0x4361 #define DEVICEID_MRVL_4362 0x4362 #define DEVICEID_MRVL_4363 0x4363 #define DEVICEID_MRVL_4364 0x4364 +#define DEVICEID_MRVL_4365 0x4365 +#define DEVICEID_MRVL_436A 0x436A +#define DEVICEID_MRVL_436B 0x436B +#define DEVICEID_MRVL_436C 0x436C +#define DEVICEID_MRVL_4380 0x4380 /* * D-Link gigabit ethernet device ID */ #define DEVICEID_DLINK_DGE550SX 0x4001 +#define DEVICEID_DLINK_DGE560SX 0x4002 #define DEVICEID_DLINK_DGE560T 0x4b00 #define BIT_31 (1 << 31) @@ -220,6 +230,8 @@ #define PCI_OUR_REG_3 0x80 /* 32 bit Our Register 3 */ #define PCI_OUR_REG_4 0x84 /* 32 bit Our Register 4 */ #define PCI_OUR_REG_5 0x88 /* 32 bit Our Register 5 */ +#define PCI_CFG_REG_0 0x90 /* 32 bit Config Register 0 */ +#define PCI_CFG_REG_1 0x94 /* 32 bit Config Register 1 */ /* PCI Express Capability */ #define PEX_CAP_ID 0xe0 /* 8 bit PEX Capability ID */ @@ -320,6 +332,56 @@ #define PCI_CLK_GATE_PEX_UNIT_ENA BIT_1 /* Enable Gate PEX Unit Clock */ #define PCI_CLK_GATE_ROOT_COR_ENA BIT_0 /* Enable Gate Root Core Clock */ +/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */ + /* Bit 31..27: for A3 & later */ +#define PCI_CTL_DIV_CORE_CLK_ENA BIT_31 /* Divide Core Clock Enable */ +#define PCI_CTL_SRESET_VMAIN_AV BIT_30 /* Soft Reset for Vmain_av De-Glitch */ +#define PCI_CTL_BYPASS_VMAIN_AV BIT_29 /* Bypass En. for Vmain_av De-Glitch */ +#define PCI_CTL_TIM_VMAIN_AV1 BIT_28 /* Bit 28..27: Timer Vmain_av Mask */ +#define PCI_CTL_TIM_VMAIN_AV0 BIT_27 /* Bit 28..27: Timer Vmain_av Mask */ +#define PCI_CTL_TIM_VMAIN_AV_MSK (BIT_28 | BIT_27) + /* Bit 26..16: Release Clock on Event */ +#define PCI_REL_PCIE_RST_DE_ASS BIT_26 /* PCIe Reset De-Asserted */ +#define PCI_REL_GPHY_REC_PACKET BIT_25 /* GPHY Received Packet */ +#define PCI_REL_INT_FIFO_N_EMPTY BIT_24 /* Internal FIFO Not Empty */ +#define PCI_REL_MAIN_PWR_AVAIL BIT_23 /* Main Power Available */ +#define PCI_REL_CLKRUN_REQ_REL BIT_22 /* CLKRUN Request Release */ +#define PCI_REL_PCIE_RESET_ASS BIT_21 /* PCIe Reset Asserted */ +#define PCI_REL_PME_ASSERTED BIT_20 /* PME Asserted */ +#define PCI_REL_PCIE_EXIT_L1_ST BIT_19 /* PCIe Exit L1 State */ +#define PCI_REL_LOADER_NOT_FIN BIT_18 /* EPROM Loader Not Finished */ +#define PCI_REL_PCIE_RX_EX_IDLE BIT_17 /* PCIe Rx Exit Electrical Idle State */ +#define PCI_REL_GPHY_LINK_UP BIT_16 /* GPHY Link Up */ + /* Bit 10.. 0: Mask for Gate Clock */ +#define PCI_GAT_PCIE_RST_ASSERTED BIT_10 /* PCIe Reset Asserted */ +#define PCI_GAT_GPHY_N_REC_PACKET BIT_9 /* GPHY Not Received Packet */ +#define PCI_GAT_INT_FIFO_EMPTY BIT_8 /* Internal FIFO Empty */ +#define PCI_GAT_MAIN_PWR_N_AVAIL BIT_7 /* Main Power Not Available */ +#define PCI_GAT_CLKRUN_REQ_REL BIT_6 /* CLKRUN Not Requested */ +#define PCI_GAT_PCIE_RESET_ASS BIT_5 /* PCIe Reset Asserted */ +#define PCI_GAT_PME_DE_ASSERTED BIT_4 /* PME De-Asserted */ +#define PCI_GAT_PCIE_ENTER_L1_ST BIT_3 /* PCIe Enter L1 State */ +#define PCI_GAT_LOADER_FINISHED BIT_2 /* EPROM Loader Finished */ +#define PCI_GAT_PCIE_RX_EL_IDLE BIT_1 /* PCIe Rx Electrical Idle State */ +#define PCI_GAT_GPHY_LINK_DOWN BIT_0 /* GPHY Link Down */ + +/* PCI_CFG_REG_1 32 bit Config Register 1 */ +#define PCI_CF1_DIS_REL_EVT_RST BIT_24 /* Dis. Rel. Event during PCIE reset */ + /* Bit 23..21: Release Clock on Event */ +#define PCI_CF1_REL_LDR_NOT_FIN BIT_23 /* EEPROM Loader Not Finished */ +#define PCI_CF1_REL_VMAIN_AVLBL BIT_22 /* Vmain available */ +#define PCI_CF1_REL_PCIE_RESET BIT_21 /* PCI-E reset */ + /* Bit 20..18: Gate Clock on Event */ +#define PCI_CF1_GAT_LDR_NOT_FIN BIT_20 /* EEPROM Loader Finished */ +#define PCI_CF1_GAT_PCIE_RX_IDLE BIT_19 /* PCI-E Rx Electrical idle */ +#define PCI_CF1_GAT_PCIE_RESET BIT_18 /* PCI-E Reset */ +#define PCI_CF1_PRST_PHY_CLKREQ BIT_17 /* Enable PCI-E rst & PM2PHY gen. CLKREQ */ +#define PCI_CF1_PCIE_RST_CLKREQ BIT_16 /* Enable PCI-E rst generate CLKREQ */ + +#define PCI_CF1_ENA_CFG_LDR_DONE BIT_8 /* Enable core level Config loader done */ +#define PCI_CF1_ENA_TXBMU_RD_IDLE BIT_1 /* Enable TX BMU Read IDLE for ASPM */ +#define PCI_CF1_ENA_TXBMU_WR_IDLE BIT_0 /* Enable TX BMU Write IDLE for ASPM */ + /* PEX_DEV_CTRL 16 bit PEX Device Control (Yukon-2) */ #define PEX_DC_MAX_RRS_MSK (7<<12) /* Bit 14..12: Max. Read Request Size */ #define PEX_DC_EN_NO_SNOOP BIT_11 /* Enable No Snoop */ @@ -616,6 +678,7 @@ #define B28_Y2_SMB_CSD_REG 0x0e44 /* 32 bit ASF SMB Control/Status/Data */ #define B28_Y2_ASF_IRQ_V_BASE 0x0e60 /* 32 bit ASF IRQ Vector Base */ #define B28_Y2_ASF_STAT_CMD 0x0e68 /* 32 bit ASF Status and Command Reg */ +#define B28_Y2_ASF_HCU_CCSR 0x0e68 /* 32 bit ASF HCU CCSR (Yukon EX) */ #define B28_Y2_ASF_HOST_COM 0x0e6c /* 32 bit ASF Host Communication Reg */ #define B28_Y2_DATA_REG_1 0x0e70 /* 32 bit ASF/Host Data Register 1 */ #define B28_Y2_DATA_REG_2 0x0e74 /* 32 bit ASF/Host Data Register 2 */ @@ -825,8 +888,12 @@ #define CHIP_ID_YUKON_LP 0xb2 /* Chip ID for YUKON-LP */ #define CHIP_ID_YUKON_XL 0xb3 /* Chip ID for YUKON-2 XL */ #define CHIP_ID_YUKON_EC_U 0xb4 /* Chip ID for YUKON-2 EC Ultra */ +#define CHIP_ID_YUKON_EX 0xb5 /* Chip ID for YUKON-2 Extreme */ #define CHIP_ID_YUKON_EC 0xb6 /* Chip ID for YUKON-2 EC */ #define CHIP_ID_YUKON_FE 0xb7 /* Chip ID for YUKON-2 FE */ +#define CHIP_ID_YUKON_FE_P 0xb8 /* Chip ID for YUKON-2 FE+ */ +#define CHIP_ID_YUKON_SUPR 0xb9 /* Chip ID for YUKON-2 Supreme */ +#define CHIP_ID_YUKON_UL_2 0xba /* Chip ID for YUKON-2 Ultra 2 */ #define CHIP_REV_YU_XL_A0 0 /* Chip Rev. for Yukon-2 A0 */ #define CHIP_REV_YU_XL_A1 1 /* Chip Rev. for Yukon-2 A1 */ @@ -837,9 +904,14 @@ #define CHIP_REV_YU_EC_A2 1 /* Chip Rev. for Yukon-EC A2 */ #define CHIP_REV_YU_EC_A3 2 /* Chip Rev. for Yukon-EC A3 */ -#define CHIP_REV_YU_EC_U_A0 0 -#define CHIP_REV_YU_EC_U_A1 1 +#define CHIP_REV_YU_EC_U_A0 1 +#define CHIP_REV_YU_EC_U_A1 2 +#define CHIP_REV_YU_FE_P_A0 0 /* Chip Rev. for Yukon-2 FE+ A0 */ + +#define CHIP_REV_YU_EX_A0 1 /* Chip Rev. for Yukon-2 EX A0 */ +#define CHIP_REV_YU_EX_B0 2 /* Chip Rev. for Yukon-2 EX B0 */ + /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ #define Y2_STATUS_LNK2_INAC BIT_7 /* Status Link 2 inactiv (0 = activ) */ #define Y2_CLK_GAT_LNK2_DIS BIT_6 /* Disable clock gating Link 2 */ @@ -904,6 +976,18 @@ #define TST_CFG_WRITE_ON BIT_1 /* Enable Config Reg WR */ #define TST_CFG_WRITE_OFF BIT_0 /* Disable Config Reg WR */ +/* B2_GP_IO */ +#define GLB_GPIO_CLK_DEB_ENA BIT_31 /* Clock Debug Enable */ +#define GLB_GPIO_CLK_DBG_MSK 0x3c000000 /* Clock Debug */ + +#define GLB_GPIO_INT_RST_D3_DIS BIT_15 /* Disable Internal Reset After D3 to D0 */ +#define GLB_GPIO_LED_PAD_SPEED_UP BIT_14 /* LED PAD Speed Up */ +#define GLB_GPIO_STAT_RACE_DIS BIT_13 /* Status Race Disable */ +#define GLB_GPIO_TEST_SEL_MSK 0x00001800 /* Testmode Select */ +#define GLB_GPIO_TEST_SEL_BASE BIT_11 +#define GLB_GPIO_RAND_ENA BIT_10 /* Random Enable */ +#define GLB_GPIO_RAND_BIT_1 BIT_9 /* Random Bit 1 */ + /* B2_I2C_CTRL 32 bit I2C HW Control Register */ #define I2C_FLAG BIT_31 /* Start read/write if WR */ #define I2C_ADDR (0x7fff<<16) /* Bit 30..16: Addr to be RD/WR */ @@ -1025,13 +1109,16 @@ /* Bit 10..0: same as for Rx */ /* Q_F 32 bit Flag Register */ -#define F_ALM_FULL BIT_27 /* Rx FIFO: almost full */ -#define F_EMPTY BIT_27 /* Tx FIFO: empty flag */ -#define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */ -#define F_WM_REACHED BIT_25 /* Watermark reached */ -#define F_M_RX_RAM_DIS BIT_24 /* MAC Rx RAM Read Port disable */ -#define F_FIFO_LEVEL (0x1f<<16) /* Bit 23..16: # of Qwords in FIFO */ -#define F_WATER_MARK 0x0007ff /* Bit 10.. 0: Watermark */ +#define F_TX_CHK_AUTO_OFF BIT_31 /* Tx checksum auto-calc Off(Yukon EX)*/ +#define F_TX_CHK_AUTO_ON BIT_30 /* Tx checksum auto-calc On(Yukon EX)*/ +#define F_ALM_FULL BIT_28 /* Rx FIFO: almost full */ +#define F_EMPTY BIT_27 /* Tx FIFO: empty flag */ +#define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */ +#define F_WM_REACHED BIT_25 /* Watermark reached */ +#define F_M_RX_RAM_DIS BIT_24 /* MAC Rx RAM Read Port disable */ +#define F_FIFO_LEVEL (0x1f<<16) + /* Bit 23..16: # of Qwords in FIFO */ +#define F_WATER_MARK 0x0007ff/* Bit 10.. 0: Watermark */ /* Queue Prefetch Unit Offsets, use Y2_PREF_Q_ADDR() to address (Yukon-2 only)*/ /* PREF_UNIT_CTRL_REG 32 bit Prefetch Control register */ @@ -1083,8 +1170,9 @@ /* Threshold values for Yukon-EC Ultra */ #define MSK_ECU_ULPP 0x0080 /* Upper Pause Threshold (multiples of 8) */ #define MSK_ECU_LLPP 0x0060 /* Lower Pause Threshold (multiples of 8) */ -#define MSK_ECU_AE_THR 0x0180 /* Almost Empty Threshold */ +#define MSK_ECU_AE_THR 0x0070 /* Almost Empty Threshold */ #define MSK_ECU_TXFF_LEV 0x01a0 /* Tx BMU FIFO Level */ +#define MSK_ECU_JUMBO_WM 0x01 #define MSK_BMU_RX_WM 0x600 /* BMU Rx Watermark */ #define MSK_BMU_TX_WM 0x600 /* BMU Tx Watermark */ @@ -1612,6 +1700,8 @@ (GM_MIB_CNT_BASE + 24) /* Multicast Frames Received OK */ #define GM_RXF_FCS_ERR \ (GM_MIB_CNT_BASE + 32) /* Rx Frame Check Seq. Error */ +#define GM_RXF_SPARE1 \ + (GM_MIB_CNT_BASE + 40) /* Rx spare 1 */ #define GM_RXO_OK_LO \ (GM_MIB_CNT_BASE + 48) /* Octets Received OK Low */ #define GM_RXO_OK_HI \ @@ -1642,8 +1732,12 @@ (GM_MIB_CNT_BASE + 152) /* Rx Frame too Long Error */ #define GM_RXF_JAB_PKT \ (GM_MIB_CNT_BASE + 160) /* Rx Jabber Packet Frame */ +#define GM_RXF_SPARE2 \ + (GM_MIB_CNT_BASE + 168) /* Rx spare 2 */ #define GM_RXE_FIFO_OV \ (GM_MIB_CNT_BASE + 176) /* Rx FIFO overflow Event */ +#define GM_RXF_SPARE3 \ + (GM_MIB_CNT_BASE + 184) /* Rx spare 3 */ #define GM_TXF_UC_OK \ (GM_MIB_CNT_BASE + 192) /* Unicast Frames Xmitted OK */ #define GM_TXF_BC_OK \ @@ -1670,6 +1764,8 @@ (GM_MIB_CNT_BASE + 280) /* 1024-1518 Byte Tx Frame */ #define GM_TXF_MAX_SZ \ (GM_MIB_CNT_BASE + 288) /* 1519-MaxSize Byte Tx Frame */ +#define GM_TXF_SPARE1 \ + (GM_MIB_CNT_BASE + 296) /* Tx spare 1 */ #define GM_TXF_COL \ (GM_MIB_CNT_BASE + 304) /* Tx Collision */ #define GM_TXF_LAT_COL \ @@ -1816,6 +1912,7 @@ GMR_FS_LONG_ERR | \ GMR_FS_MII_ERR | \ GMR_FS_BAD_FC | \ + GMR_FS_GOOD_FC | \ GMR_FS_UN_SIZE | \ GMR_FS_JABBER) @@ -1844,6 +1941,10 @@ #define RX_TRUNC_OFF BIT_26 /* disable packet truncation */ #define RX_VLAN_STRIP_ON BIT_25 /* enable VLAN stripping */ #define RX_VLAN_STRIP_OFF BIT_24 /* disable VLAN stripping */ +#define GMF_RX_OVER_ON BIT_19 /* enable flushing on receive overrun */ +#define GMF_RX_OVER_OFF BIT_18 /* disable flushing on receive overrun */ +#define GMF_ASF_RX_OVER_ON BIT_17 /* enable flushing of ASF when overrun */ +#define GMF_ASF_RX_OVER_OFF BIT_16 /* disable flushing of ASF when overrun */ #define GMF_WP_TST_ON BIT_14 /* Write Pointer Test On */ #define GMF_WP_TST_OFF BIT_13 /* Write Pointer Test Off */ #define GMF_WP_STEP BIT_12 /* Write Pointer Step/Increment */ @@ -1864,6 +1965,8 @@ #define TX_STFW_ENA BIT_30 /* Enable Store & Forward (Yukon-EC Ultra) */ #define TX_VLAN_TAG_ON BIT_25 /* enable VLAN tagging */ #define TX_VLAN_TAG_OFF BIT_24 /* disable VLAN tagging */ +#define TX_JUMBO_ENA BIT_23 /* Enable Jumbo Mode (Yukon-EC Ultra) */ +#define TX_JUMBO_DIS BIT_22 /* Disable Jumbo Mode (Yukon-EC Ultra) */ #define GMF_WSP_TST_ON BIT_18 /* Write Shadow Pointer Test On */ #define GMF_WSP_TST_OFF BIT_17 /* Write Shadow Pointer Test Off */ #define GMF_WSP_STEP BIT_16 /* Write Shadow Pointer Step/Increment */ @@ -1903,6 +2006,28 @@ #define Y2_ASF_UC_STATE (3<<2) /* ASF uC State */ #define Y2_ASF_CLK_HALT 0 /* ASF system clock stopped */ +/* B28_Y2_ASF_HCU_CCSR 32bit CPU Control and Status Register (Yukon EX) */ +#define Y2_ASF_HCU_CCSR_SMBALERT_MONITOR BIT_27 /* SMBALERT pin monitor */ +#define Y2_ASF_HCU_CCSR_CPU_SLEEP BIT_26 /* CPU sleep status */ +#define Y2_ASF_HCU_CCSR_CS_TO BIT_25 /* Clock Stretching Timeout */ +#define Y2_ASF_HCU_CCSR_WDOG BIT_24 /* Watchdog Reset */ +#define Y2_ASF_HCU_CCSR_CLR_IRQ_HOST BIT_17 /* Clear IRQ_HOST */ +#define Y2_ASF_HCU_CCSR_SET_IRQ_HCU BIT_16 /* Set IRQ_HCU */ +#define Y2_ASF_HCU_CCSR_AHB_RST BIT_9 /* Reset AHB bridge */ +#define Y2_ASF_HCU_CCSR_CPU_RST_MODE BIT_8 /* CPU Reset Mode */ +#define Y2_ASF_HCU_CCSR_SET_SYNC_CPU BIT_5 +#define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE1 BIT_4 +#define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE0 BIT_3 +#define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK (BIT_4 | BIT_3) /* CPU Clock Divide */ +#define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_BASE BIT_3 +#define Y2_ASF_HCU_CCSR_OS_PRSNT BIT_2 /* ASF OS Present */ + /* Microcontroller State */ +#define Y2_ASF_HCU_CCSR_UC_STATE_MSK 3 +#define Y2_ASF_HCU_CCSR_UC_STATE_BASE BIT_0 +#define Y2_ASF_HCU_CCSR_ASF_RESET 0 +#define Y2_ASF_HCU_CCSR_ASF_HALTED BIT_1 +#define Y2_ASF_HCU_CCSR_ASF_RUNNING BIT_0 + /* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */ /* This register is used by the ASF firmware */ #define Y2_ASF_CLR_ASFI BIT_1 /* Clear host IRQ */ @@ -1916,6 +2041,14 @@ #define SC_STAT_RST_SET BIT_0 /* Set Status Unit Reset */ /* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +#define GMC_SEC_RST BIT_15 /* MAC SEC RST */ +#define GMC_SEC_RST_OFF BIT_14 /* MAC SEC RST Off */ +#define GMC_BYP_MACSECRX_ON BIT_13 /* Bypass MAC SEC RX */ +#define GMC_BYP_MACSECRX_OFF BIT_12 /* Bypass MAC SEC RX Off */ +#define GMC_BYP_MACSECTX_ON BIT_11 /* Bypass MAC SEC TX */ +#define GMC_BYP_MACSECTX_OFF BIT_10 /* Bypass MAC SEC TX Off */ +#define GMC_BYP_RETR_ON BIT_9 /* Bypass MAC retransmit FIFO On */ +#define GMC_BYP_RETR_OFF BIT_8 /* Bypass MAC retransmit FIFO Off */ #define GMC_H_BURST_ON BIT_7 /* Half Duplex Burst Mode On */ #define GMC_H_BURST_OFF BIT_6 /* Half Duplex Burst Mode Off */ #define GMC_F_LOOPB_ON BIT_5 /* FIFO Loopback On */ @@ -2021,35 +2154,6 @@ /* GPHY address (bits 15..11 of SMI control reg) */ #define PHY_ADDR_MARV 0 -/*-RMV- DWORD 1: Deviations */ -#define HWF_WA_DEV_4200 0x10200000UL /*-RMV- 4.200 (D3 Blue Screen)*/ -#define HWF_WA_DEV_4185CS 0x10100000UL /*-RMV- 4.185 (ECU 100 CS cal)*/ -#define HWF_WA_DEV_4185 0x10080000UL /*-RMV- 4.185 (ECU Tx h check)*/ -#define HWF_WA_DEV_4167 0x10040000UL /*-RMV- 4.167 (Rx OvSize Hang)*/ -#define HWF_WA_DEV_4152 0x10020000UL /*-RMV- 4.152 (RSS issue) */ -#define HWF_WA_DEV_4115 0x10010000UL /*-RMV- 4.115 (Rx MAC FIFO) */ -#define HWF_WA_DEV_4109 0x10008000UL /*-RMV- 4.109 (BIU hang) */ -#define HWF_WA_DEV_483 0x10004000UL /*-RMV- 4.83 (Rx TCP wrong) */ -#define HWF_WA_DEV_479 0x10002000UL /*-RMV- 4.79 (Rx BMU hang II) */ -#define HWF_WA_DEV_472 0x10001000UL /*-RMV- 4.72 (GPHY2 MDC clk) */ -#define HWF_WA_DEV_463 0x10000800UL /*-RMV- 4.63 (Rx BMU hang I) */ -#define HWF_WA_DEV_427 0x10000400UL /*-RMV- 4.27 (Tx Done Rep) */ -#define HWF_WA_DEV_42 0x10000200UL /*-RMV- 4.2 (pref unit burst) */ -#define HWF_WA_DEV_46 0x10000100UL /*-RMV- 4.6 (CPU crash II) */ -#define HWF_WA_DEV_43_418 0x10000080UL /*-RMV- 4.3 & 4.18 (PCI unexp */ -/*-RMV- compl&Stat BMU deadl) */ -#define HWF_WA_DEV_420 0x10000040UL /*-RMV- 4.20 (Status BMU ov) */ -#define HWF_WA_DEV_423 0x10000020UL /*-RMV- 4.23 (TCP Segm Hang) */ -#define HWF_WA_DEV_424 0x10000010UL /*-RMV- 4.24 (MAC reg overwr) */ -#define HWF_WA_DEV_425 0x10000008UL /*-RMV- 4.25 (Magic packet */ -/*-RMV- with odd offset) */ -#define HWF_WA_DEV_428 0x10000004UL /*-RMV- 4.28 (Poll-U &BigEndi)*/ -#define HWF_WA_FIFO_FLUSH_YLA0 0x10000002UL /*-RMV- dis Rx GMAC FIFO Flush*/ - -#define HW_FEATURE(sc, f) \ - (((((sc)->msk_hw_feature & 0x30000000) >> 28) & ((f) & 0x0fffffff)) != 0) - - #define MSK_ADDR_LO(x) ((uint64_t) (x) & 0xffffffffUL) #define MSK_ADDR_HI(x) ((uint64_t) (x) >> 32) @@ -2138,6 +2242,8 @@ #define OP_ADDR64VLAN (OP_ADDR64 | OP_VLAN) #define OP_LRGLEN 0x24000000 #define OP_LRGLENVLAN (OP_LRGLEN | OP_VLAN) +#define OP_MSS 0x28000000 +#define OP_MSSVLAN (OP_MSS | OP_VLAN) #define OP_BUFFER 0x40000000 #define OP_PACKET 0x41000000 #define OP_LARGESEND 0x43000000 @@ -2156,8 +2262,19 @@ #define OP_PUTIDX 0x70000000 #define STLE_OP_MASK 0xff000000 +#define STLE_CSS_MASK 0x00ff0000 #define STLE_LEN_MASK 0x0000ffff +/* CSS defined in status LE(valid for descriptor V2 format). */ +#define CSS_TCPUDP_CSUM_OK 0x00800000 +#define CSS_UDP 0x00400000 +#define CSS_TCP 0x00200000 +#define CSS_IPFRAG 0x00100000 +#define CSS_IPV6 0x00080000 +#define CSS_IPV4_CSUM_OK 0x00040000 +#define CSS_IPV4 0x00020000 +#define CSS_PORT 0x00010000 + /* Descriptor Bit Definition */ /* TxCtrl Transmit Buffer Control Field */ /* RxCtrl Receive Buffer Control Field */ @@ -2182,12 +2299,12 @@ #define MSK_TX_RING_CNT 256 #define MSK_RX_RING_CNT 256 +#define MSK_RX_BUF_ALIGN 8 #define MSK_JUMBO_RX_RING_CNT MSK_RX_RING_CNT #define MSK_STAT_RING_CNT ((1 + 3) * (MSK_TX_RING_CNT + MSK_RX_RING_CNT)) #define MSK_MAXTXSEGS 32 #define MSK_TSO_MAXSGSIZE 4096 #define MSK_TSO_MAXSIZE (65535 + sizeof(struct ether_vlan_header)) -#define MSK_MAXRXSEGS 32 /* * It seems that the hardware requires extra decriptors(LEs) to offload @@ -2214,21 +2331,7 @@ #define MSK_MAX_FRAMELEN \ (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN) #define MSK_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN) -#define MSK_JSLOTS ((MSK_RX_RING_CNT * 3) / 2) -#define MSK_JRAWLEN (MSK_JUMBO_FRAMELEN + ETHER_ALIGN) -#define MSK_JLEN (MSK_JRAWLEN + (sizeof(uint64_t) - \ - (MSK_JRAWLEN % sizeof(uint64_t)))) -#define MSK_JPAGESZ PAGE_SIZE -#define MSK_RESID \ - (MSK_JPAGESZ - (MSK_JLEN * MSK_JSLOTS) % MSK_JPAGESZ) -#define MSK_JMEM ((MSK_JLEN * MSK_JSLOTS) + MSK_RESID) - -struct msk_jpool_entry { - int slot; - SLIST_ENTRY(msk_jpool_entry) jpool_entries; -}; - struct msk_txdesc { struct mbuf *tx_m; bus_dmamap_t tx_dmamap; @@ -2253,10 +2356,6 @@ bus_dmamap_t msk_rx_ring_map; bus_dmamap_t msk_rx_sparemap; bus_dma_tag_t msk_jumbo_rx_tag; - bus_dma_tag_t msk_jumbo_tag; - bus_dmamap_t msk_jumbo_map; - bus_dma_tag_t msk_jumbo_mtag; - caddr_t msk_jslots[MSK_JSLOTS]; struct msk_rxdesc msk_jumbo_rxdesc[MSK_JUMBO_RX_RING_CNT]; bus_dma_tag_t msk_jumbo_rx_ring_tag; bus_dmamap_t msk_jumbo_rx_ring_map; @@ -2278,8 +2377,6 @@ bus_addr_t msk_rx_ring_paddr; struct msk_rx_desc *msk_jumbo_rx_ring; bus_addr_t msk_jumbo_rx_ring_paddr; - void *msk_jumbo_buf; - bus_addr_t msk_jumbo_buf_paddr; }; #define MSK_TX_RING_ADDR(sc, i) \ @@ -2311,15 +2408,67 @@ #define MSK_TX_TIMEOUT 5 #define MSK_PUT_WM 10 +struct msk_mii_data { + int port; + uint32_t pmd; + int mii_flags; +}; + /* Forward decl. */ struct msk_if_softc; +struct msk_hw_stats { + /* Rx stats. */ + uint32_t rx_ucast_frames; + uint32_t rx_bcast_frames; + uint32_t rx_pause_frames; + uint32_t rx_mcast_frames; + uint32_t rx_crc_errs; + uint32_t rx_spare1; + uint64_t rx_good_octets; + uint64_t rx_bad_octets; + uint32_t rx_runts; + uint32_t rx_runt_errs; + uint32_t rx_pkts_64; + uint32_t rx_pkts_65_127; + uint32_t rx_pkts_128_255; + uint32_t rx_pkts_256_511; + uint32_t rx_pkts_512_1023; + uint32_t rx_pkts_1024_1518; + uint32_t rx_pkts_1519_max; + uint32_t rx_pkts_too_long; + uint32_t rx_pkts_jabbers; + uint32_t rx_spare2; + uint32_t rx_fifo_oflows; + uint32_t rx_spare3; + /* Tx stats. */ + uint32_t tx_ucast_frames; + uint32_t tx_bcast_frames; + uint32_t tx_pause_frames; + uint32_t tx_mcast_frames; + uint64_t tx_octets; + uint32_t tx_pkts_64; + uint32_t tx_pkts_65_127; + uint32_t tx_pkts_128_255; + uint32_t tx_pkts_256_511; + uint32_t tx_pkts_512_1023; + uint32_t tx_pkts_1024_1518; + uint32_t tx_pkts_1519_max; + uint32_t tx_spare1; + uint32_t tx_colls; + uint32_t tx_late_colls; + uint32_t tx_excess_colls; + uint32_t tx_multi_colls; + uint32_t tx_single_colls; + uint32_t tx_underflows; +}; + /* Softc for the Marvell Yukon II controller. */ struct msk_softc { struct resource *msk_res[1]; /* I/O resource */ - int msk_res_type; - int msk_res_id; + struct resource_spec *msk_res_spec; struct resource *msk_irq[2]; /* IRQ resources */ + struct resource_spec *msk_irq_spec; void *msk_intrhand[2]; /* irq handler handle */ device_t msk_dev; uint8_t msk_hw_id; @@ -2328,14 +2477,10 @@ uint8_t msk_num_port; int msk_ramsize; /* amount of SRAM on NIC */ uint32_t msk_pmd; /* physical media type */ - uint32_t msk_coppertype; uint32_t msk_intrmask; uint32_t msk_intrhwemask; - int msk_suspended; - int msk_hw_feature; + uint32_t msk_pflags; int msk_clock; - int msk_marvell_phy; - int msk_msi; struct msk_if_softc *msk_if[2]; device_t msk_devs[2]; int msk_txqsize; @@ -2373,7 +2518,20 @@ int msk_framesize; int msk_phytype; int msk_phyaddr; - int msk_link; + uint32_t msk_flags; +#define MSK_FLAG_MSI 0x0001 +#define MSK_FLAG_FASTETHER 0x0004 +#define MSK_FLAG_JUMBO 0x0008 +#define MSK_FLAG_JUMBO_NOCSUM 0x0010 +#define MSK_FLAG_RAMBUF 0x0020 +#define MSK_FLAG_DESCV2 0x0040 +#define MSK_FLAG_AUTOTX_CSUM 0x0080 +#define MSK_FLAG_NOHWVLAN 0x0100 +#define MSK_FLAG_NORXCHK 0x0200 +#define MSK_FLAG_NORX_CSUM 0x0400 +#define MSK_FLAG_SUSPEND 0x2000 +#define MSK_FLAG_DETACH 0x4000 +#define MSK_FLAG_LINK 0x8000 struct callout msk_tick_ch; int msk_watchdog_timer; uint32_t msk_txq; /* Tx. Async Queue offset */ @@ -2382,19 +2540,12 @@ struct msk_chain_data msk_cdata; struct msk_ring_data msk_rdata; struct msk_softc *msk_softc; /* parent controller */ - struct task msk_link_task; + struct msk_hw_stats msk_stats; struct task msk_tx_task; int msk_if_flags; - int msk_detach; uint16_t msk_vtag; /* VLAN tag id. */ - SLIST_HEAD(__msk_jfreehead, msk_jpool_entry) msk_jfree_listhead; - SLIST_HEAD(__msk_jinusehead, msk_jpool_entry) msk_jinuse_listhead; - struct mtx msk_jlist_mtx; }; -#define MSK_JLIST_LOCK(_sc) mtx_lock(&(_sc)->msk_jlist_mtx) -#define MSK_JLIST_UNLOCK(_sc) mtx_unlock(&(_sc)->msk_jlist_mtx) - #define MSK_TIMEOUT 1000 #define MSK_PHY_POWERUP 1 #define MSK_PHY_POWERDOWN 0