Index: if_axe.c =================================================================== --- if_axe.c (revision 224468) +++ if_axe.c (working copy) @@ -76,25 +76,31 @@ * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf */ -#include -#include #include -#include -#include #include +#include +#include +#include #include -#include +#include +#include +#include #include -#include #include -#include +#include +#include #include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include + +#include +#include + #include #include #include @@ -122,6 +128,8 @@ */ #define AXE_178_MAX_FRAME_BURST 1 +#define AXE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) + #ifdef USB_DEBUG static int axe_debug = 0; @@ -186,6 +194,7 @@ static uether_fn_t axe_setmulti; static uether_fn_t axe_setpromisc; +static int axe_attach_post_sub(struct usb_ether *); static int axe_ifmedia_upd(struct ifnet *); static void axe_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int axe_cmd(struct axe_softc *, int, int, int, void *); @@ -195,6 +204,11 @@ static void axe_ax88772a_init(struct axe_softc *); static void axe_ax88772b_init(struct axe_softc *); static int axe_get_phyno(struct axe_softc *, int); +static int axe_ioctl(struct ifnet *, u_long, caddr_t); +static int axe_rx_frame(struct usb_ether *, struct usb_page_cache *, int); +static int axe_rxeof(struct usb_ether *, struct usb_page_cache *, + unsigned int offset, unsigned int, struct axe_csum_hdr *); +static void axe_csum_cfg(struct usb_ether *); static const struct usb_config axe_config[AXE_N_TRANSFER] = { @@ -266,6 +280,7 @@ static const struct usb_ether_methods axe_ue_methods = { .ue_attach_post = axe_attach_post, + .ue_attach_post_sub = axe_attach_post_sub, .ue_start = axe_start, .ue_init = axe_init, .ue_stop = axe_stop, @@ -304,9 +319,6 @@ uint16_t val; int locked; - if (sc->sc_phyno != phy) - return (0); - locked = mtx_owned(&sc->sc_mtx); if (!locked) AXE_LOCK(sc); @@ -338,10 +350,6 @@ int locked; val = htole32(val); - - if (sc->sc_phyno != phy) - return (0); - locked = mtx_owned(&sc->sc_mtx); if (!locked) AXE_LOCK(sc); @@ -372,7 +380,7 @@ if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; - + sc->sc_flags &= ~AXE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { @@ -390,14 +398,23 @@ break; } } - + /* Lost link, do nothing. */ if ((sc->sc_flags & AXE_FLAG_LINK) == 0) goto done; - + val = 0; - if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) + if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { val |= AXE_MEDIA_FULL_DUPLEX; + if (AXE_IS_178_FAMILY(sc)) { + if ((IFM_OPTIONS(mii->mii_media_active) & + IFM_ETH_TXPAUSE) != 0) + val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN; + if ((IFM_OPTIONS(mii->mii_media_active) & + IFM_ETH_RXPAUSE) != 0) + val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN; + } + } if (AXE_IS_178_FAMILY(sc)) { val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC; if ((sc->sc_flags & AXE_FLAG_178) != 0) @@ -845,6 +862,53 @@ axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->sc_ipgs); } +static int +axe_attach_post_sub(struct usb_ether *ue) +{ + struct axe_softc *sc; + struct ifnet *ifp; + u_int adv_pause; + int error; + + sc = uether_getsc(ue); + ifp = ue->ue_ifp; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_start = uether_start; + ifp->if_ioctl = axe_ioctl; + ifp->if_init = uether_init; + IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); + ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; + IFQ_SET_READY(&ifp->if_snd); + + if (AXE_IS_178_FAMILY(sc)) + ifp->if_capabilities |= IFCAP_VLAN_MTU; + if (sc->sc_flags & AXE_FLAG_772B) { + ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_RXCSUM; + ifp->if_hwassist = AXE_CSUM_FEATURES; + /* + * Checksum offloading of AX88772B also works with VLAN + * tagged frames but there is no way to take advantage + * of the feature because vlan(4) assumes + * IFCAP_VLAN_HWTAGGING is prerequisite condition to + * support checksum offloading with VLAN. VLAN hardware + * tagging support of AX88772B is very limited so it's + * not possible to announce IFCAP_VLAN_HWTAGGING. + */ + } + ifp->if_capenable = ifp->if_capabilities; + if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B | AXE_FLAG_178)) + adv_pause = MIIF_DOPAUSE; + else + adv_pause = 0; + mtx_lock(&Giant); + error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, + uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, + BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, adv_pause); + mtx_unlock(&Giant); + + return (error); +} + /* * Probe for a AX88172 chip. */ @@ -930,53 +994,16 @@ { struct axe_softc *sc = usbd_xfer_softc(xfer); struct usb_ether *ue = &sc->sc_ue; - struct ifnet *ifp = uether_getifp(ue); - struct axe_sframe_hdr hdr; struct usb_page_cache *pc; - int err, pos, len; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: - pos = 0; - len = 0; - err = 0; - pc = usbd_xfer_get_frame(xfer, 0); - if (AXE_IS_178_FAMILY(sc)) { - while (pos < actlen) { - if ((pos + sizeof(hdr)) > actlen) { - /* too little data */ - err = EINVAL; - break; - } - usbd_copy_out(pc, pos, &hdr, sizeof(hdr)); + axe_rx_frame(ue, pc, actlen); - if ((hdr.len ^ hdr.ilen) != 0xFFFF) { - /* we lost sync */ - err = EINVAL; - break; - } - pos += sizeof(hdr); - - len = le16toh(hdr.len); - if ((pos + len) > actlen) { - /* invalid length */ - err = EINVAL; - break; - } - uether_rxbuf(ue, pc, pos, len); - - pos += len + (len % 2); - } - } else - uether_rxbuf(ue, pc, 0, actlen); - - if (err != 0) - ifp->if_ierrors++; - /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: @@ -998,6 +1025,130 @@ } } +static int +axe_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) +{ + struct axe_softc *sc; + struct axe_sframe_hdr hdr; + struct axe_csum_hdr csum_hdr; + int error, len, pos; + + sc = uether_getsc(ue); + pos = 0; + len = 0; + error = 0; + if ((sc->sc_flags & AXE_FLAG_STD_FRAME) != 0) { + while (pos < actlen) { + if ((pos + sizeof(hdr)) > actlen) { + /* too little data */ + error = EINVAL; + break; + } + usbd_copy_out(pc, pos, &hdr, sizeof(hdr)); + + if ((hdr.len ^ hdr.ilen) != 0xFFFF) { + /* we lost sync */ + error = EINVAL; + break; + } + pos += sizeof(hdr); + len = le16toh(hdr.len); + if (pos + len > actlen) { + /* invalid length */ + error = EINVAL; + break; + } + axe_rxeof(ue, pc, pos, len, NULL); + pos += len + (len % 2); + } + } else if ((sc->sc_flags & AXE_FLAG_CSUM_FRAME) != 0) { + while (pos < actlen) { + if ((pos + sizeof(csum_hdr)) > actlen) { + /* too little data */ + error = EINVAL; + break; + } + usbd_copy_out(pc, pos, &csum_hdr, sizeof(csum_hdr)); + + csum_hdr.len = le16toh(csum_hdr.len); + csum_hdr.ilen = le16toh(csum_hdr.ilen); + csum_hdr.cstatus = le16toh(csum_hdr.cstatus); + if ((AXE_RXBYTES(csum_hdr.len) ^ + AXE_RXBYTES(csum_hdr.ilen)) != 0x7FF) { + /* we lost sync */ + error = EINVAL; + break; + } + /* + * Get total transferred frame length including + * checksum header. The length should be multiple + * of 4. + */ + len = sizeof(csum_hdr) + AXE_RXBYTES(csum_hdr.len); + len = (len + 3) & ~3; + if (pos + len > actlen) { + /* invalid length */ + error = EINVAL; + break; + } + axe_rxeof(ue, pc, pos + sizeof(csum_hdr), + AXE_RXBYTES(csum_hdr.len), &csum_hdr); + pos += len; + } + } else + axe_rxeof(ue, pc, 0, actlen, NULL); + + if (error != 0) + ue->ue_ifp->if_ierrors++; + return (error); +} + +static int +axe_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, + unsigned int len, struct axe_csum_hdr *csum_hdr) +{ + struct ifnet *ifp = ue->ue_ifp; + struct mbuf *m; + + if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { + ifp->if_ierrors++; + return (EINVAL); + } + + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m == NULL) { + ifp->if_iqdrops++; + return (ENOMEM); + } + m->m_len = m->m_pkthdr.len = MCLBYTES; + m_adj(m, ETHER_ALIGN); + + usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); + + ifp->if_ipackets++; + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len = len; + + if (csum_hdr != NULL && csum_hdr->cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) { + if ((csum_hdr->cstatus & (AXE_CSUM_HDR_L4_CSUM_ERR | + AXE_CSUM_HDR_L3_CSUM_ERR)) == 0) { + m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | + CSUM_IP_VALID; + if ((csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) == + AXE_CSUM_HDR_L4_TYPE_TCP || + (csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) == + AXE_CSUM_HDR_L4_TYPE_UDP) { + m->m_pkthdr.csum_flags |= + CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xffff; + } + } + } + + _IF_ENQUEUE(&ue->ue_rxq, m); + return (0); +} + #if ((AXE_BULK_BUF_SIZE >= 0x10000) || (AXE_BULK_BUF_SIZE < (MCLBYTES+4))) #error "Please update axe_bulk_write_callback()!" #endif @@ -1040,6 +1191,21 @@ if (AXE_IS_178_FAMILY(sc)) { hdr.len = htole16(m->m_pkthdr.len); hdr.ilen = ~hdr.len; + /* + * If upper stack computed checksum, driver + * should tell controller not to insert + * computed checksum for checksum offloading + * enabled controller. + */ + if (ifp->if_capabilities & IFCAP_TXCSUM) { + if ((m->m_pkthdr.csum_flags & + AXE_CSUM_FEATURES) != 0) + hdr.len |= htole16( + AXE_TX_CSUM_PSEUDO_HDR); + else + hdr.len |= htole16( + AXE_TX_CSUM_DIS); + } usbd_copy_in(pc, pos, &hdr, sizeof(hdr)); pos += sizeof(hdr); usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); @@ -1131,6 +1297,34 @@ } static void +axe_csum_cfg(struct usb_ether *ue) +{ + struct axe_softc *sc; + struct ifnet *ifp; + uint16_t csum1, csum2; + + sc = uether_getsc(ue); + AXE_LOCK_ASSERT(sc, MA_OWNED); + + if ((sc->sc_flags & AXE_FLAG_772B) != 0) { + ifp = uether_getifp(ue); + csum1 = 0; + csum2 = 0; + if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) + csum1 |= AXE_TXCSUM_IP | AXE_TXCSUM_TCP | + AXE_TXCSUM_UDP; + axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL); + csum1 = 0; + csum2 = 0; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + csum1 |= AXE_RXCSUM_IP | AXE_RXCSUM_IPVE | + AXE_RXCSUM_TCP | AXE_RXCSUM_UDP | AXE_RXCSUM_ICMP | + AXE_RXCSUM_IGMP; + axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL); + } +} + +static void axe_init(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); @@ -1147,27 +1341,36 @@ axe_reset(sc); - /* Set MAC address. */ - if (AXE_IS_178_FAMILY(sc)) + /* Set MAC address and transmitter IPG values. */ + if (AXE_IS_178_FAMILY(sc)) { axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp)); - else - axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp)); - - /* Set transmitter IPG values */ - if (AXE_IS_178_FAMILY(sc)) axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->sc_ipgs[2], (sc->sc_ipgs[1] << 8) | (sc->sc_ipgs[0]), NULL); - else { + } else { + axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp)); axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->sc_ipgs[0], NULL); axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->sc_ipgs[1], NULL); axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->sc_ipgs[2], NULL); } - /* AX88772B uses different maximum frame burst configuration. */ - if (sc->sc_flags & AXE_FLAG_772B) + if (AXE_IS_178_FAMILY(sc)) { + sc->sc_flags &= ~(AXE_FLAG_STD_FRAME | AXE_FLAG_CSUM_FRAME); + if ((sc->sc_flags & AXE_FLAG_772B) != 0 && + (ifp->if_capenable & IFCAP_RXCSUM) != 0) + sc->sc_flags |= AXE_FLAG_CSUM_FRAME; + else + sc->sc_flags |= AXE_FLAG_STD_FRAME; + } + + /* Configure TX/RX checksum offloading. */ + axe_csum_cfg(ue); + + if (sc->sc_flags & AXE_FLAG_772B) { + /* AX88772B uses different maximum frame burst configuration. */ axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG, ax88772b_mfb_table[AX88772B_MFB_16K].threshold, ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL); + } /* Enable receiver, set RX mode. */ rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE); @@ -1175,11 +1378,17 @@ if (sc->sc_flags & AXE_FLAG_772B) { /* * Select RX header format type 1. Aligning IP - * header on 4 byte boundary is not needed + * header on 4 byte boundary is not needed when + * checksum offloading feature is not used * because we always copy the received frame in - * RX handler. + * RX handler. When RX checksum offloading is + * active, aligning IP header is required to + * reflect actual frame length including RX + * header size. */ rxmode |= AXE_772B_RXCMD_HDR_TYPE_1; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN; } else { /* * Default Rx buffer size is too small to get @@ -1208,7 +1417,6 @@ ifp->if_drv_flags |= IFF_DRV_RUNNING; /* Switch to selected media. */ axe_ifmedia_upd(ifp); - axe_start(ue); } static void @@ -1250,3 +1458,45 @@ usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_WR]); usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_RD]); } + +static int +axe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct usb_ether *ue = ifp->if_softc; + struct axe_softc *sc; + struct ifreq *ifr; + int error, mask, reinit; + + sc = uether_getsc(ue); + ifr = (struct ifreq *)data; + error = 0; + reinit = 0; + if (cmd == SIOCSIFCAP) { + AXE_LOCK(sc); + mask = ifr->ifr_reqcap ^ ifp->if_capenable; + if ((mask & IFCAP_TXCSUM) != 0 && + (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { + ifp->if_capenable ^= IFCAP_TXCSUM; + if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) + ifp->if_hwassist |= AXE_CSUM_FEATURES; + else + ifp->if_hwassist &= ~AXE_CSUM_FEATURES; + reinit++; + } + if ((mask & IFCAP_RXCSUM) != 0 && + (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { + ifp->if_capenable ^= IFCAP_RXCSUM; + reinit++; + } + if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + else + reinit = 0; + AXE_UNLOCK(sc); + if (reinit > 0) + uether_init(ue); + } else + error = uether_ioctl(ifp, cmd, data); + + return (error); +} Index: if_axereg.h =================================================================== --- if_axereg.h (revision 224468) +++ if_axereg.h (working copy) @@ -97,6 +97,10 @@ #define AXE_CMD_WRITE_VLAN_CTRL 0x4028 #define AXE_772B_CMD_RXCTL_WRITE_CFG 0x012A +#define AXE_772B_CMD_READ_RXCSUM 0x002B +#define AXE_772B_CMD_WRITE_RXCSUM 0x012C +#define AXE_772B_CMD_READ_TXCSUM 0x002D +#define AXE_772B_CMD_WRITE_TXCSUM 0x012E #define AXE_SW_RESET_CLEAR 0x00 #define AXE_SW_RESET_RR 0x01 @@ -199,6 +203,40 @@ #define AXE_VLAN_CTRL_VID1_MASK 0x00000FFF #define AXE_VLAN_CTRL_VID2_MASK 0x0FFF0000 +#define AXE_RXCSUM_IP 0x0001 +#define AXE_RXCSUM_IPVE 0x0002 +#define AXE_RXCSUM_IPV6E 0x0004 +#define AXE_RXCSUM_TCP 0x0008 +#define AXE_RXCSUM_UDP 0x0010 +#define AXE_RXCSUM_ICMP 0x0020 +#define AXE_RXCSUM_IGMP 0x0040 +#define AXE_RXCSUM_ICMP6 0x0080 +#define AXE_RXCSUM_TCPV6 0x0100 +#define AXE_RXCSUM_UDPV6 0x0200 +#define AXE_RXCSUM_ICMPV6 0x0400 +#define AXE_RXCSUM_IGMPV6 0x0800 +#define AXE_RXCSUM_ICMP6V6 0x1000 +#define AXE_RXCSUM_FOPC 0x8000 + +#define AXE_RXCSUM_64TE 0x0100 +#define AXE_RXCSUM_PPPOE 0x0200 +#define AXE_RXCSUM_RPCE 0x8000 + +#define AXE_TXCSUM_IP 0x0001 +#define AXE_TXCSUM_TCP 0x0002 +#define AXE_TXCSUM_UDP 0x0004 +#define AXE_TXCSUM_ICMP 0x0008 +#define AXE_TXCSUM_IGMP 0x0010 +#define AXE_TXCSUM_ICMP6 0x0020 +#define AXE_TXCSUM_TCPV6 0x0100 +#define AXE_TXCSUM_UDPV6 0x0200 +#define AXE_TXCSUM_ICMPV6 0x0400 +#define AXE_TXCSUM_IGMPV6 0x0800 +#define AXE_TXCSUM_ICMP6V6 0x1000 + +#define AXE_TXCSUM_64TE 0x0001 +#define AXE_TXCSUM_PPPOE 0x0002 + #define AXE_BULK_BUF_SIZE 16384 /* bytes */ #define AXE_CTL_READ 0x01 @@ -230,6 +268,58 @@ uint16_t ilen; } __packed; +#define AXE_TX_CSUM_PSEUDO_HDR 0x4000 +#define AXE_TX_CSUM_DIS 0x8000 + +/* + * When RX checksum offloading is enabled, AX88772B uses new RX header + * format and it's not compatible with previous RX header format. In + * addition, IP header align option should be enabled to get correct + * frame size including RX header. Total transferred size including + * the RX header is multiple of 4 and controller pads bytes if it is + * necessary to meet the condition. + * This driver does not enable partial checksum feature which will + * compute 16bit checksum from 14th byte to the end of the frame. If + * this feature is enabled computed checksum value is embedded into + * header which in turn means it uses different header format. + */ +struct axe_csum_hdr { + uint16_t len; +#define AXE_CSUM_HDR_LEN_MASK 0x07FF +#define AXE_CSUM_HDR_CRC_ERR 0x1000 +#define AXE_CSUM_HDR_MII_ERR 0x2000 +#define AXE_CSUM_HDR_RUNT 0x4000 +#define AXE_CSUM_HDR_BMCAST 0x8000 + uint16_t ilen; + uint16_t cstatus; +#define AXE_CSUM_HDR_VLAN_MASK 0x0007 +#define AXE_CSUM_HDR_VLAN_STRIP 0x0008 +#define AXE_CSUM_HDR_VLAN_PRI_MASK 0x0070 +#define AXE_CSUM_HDR_L4_CSUM_ERR 0x0100 +#define AXE_CSUM_HDR_L3_CSUM_ERR 0x0200 +#define AXE_CSUM_HDR_L4_TYPE_UDP 0x0400 +#define AXE_CSUM_HDR_L4_TYPE_ICMP 0x0800 +#define AXE_CSUM_HDR_L4_TYPE_IGMP 0x0C00 +#define AXE_CSUM_HDR_L4_TYPE_TCP 0x1000 +#define AXE_CSUM_HDR_L4_TYPE_TCPV6 0x1400 +#define AXE_CSUM_HDR_L4_TYPE_MASK 0x1C00 +#define AXE_CSUM_HDR_L3_TYPE_IPV4 0x2000 +#define AXE_CSUM_HDR_L3_TYPE_IPV6 0x4000 + +#ifdef AXE_APPEND_PARTIAL_CSUM + /* + * These members present only when partial checksum + * offloading is enabled. The checksum value is simple + * 16bit sum of received frame starting at offset 14 of + * the frame to the end of the frame excluding FCS bytes. + */ + uint16_t csum_value; + uint16_t dummy; +#endif +} __packed; + +#define AXE_RXBYTES(x) ((x) & AXE_CSUM_HDR_LEN_MASK) + #define GET_MII(sc) uether_getmii(&(sc)->sc_ue) /* The interrupt endpoint is currently unused by the ASIX part. */ @@ -247,6 +337,8 @@ int sc_flags; #define AXE_FLAG_LINK 0x0001 +#define AXE_FLAG_STD_FRAME 0x0010 +#define AXE_FLAG_CSUM_FRAME 0x0020 #define AXE_FLAG_772 0x1000 /* AX88772 */ #define AXE_FLAG_772A 0x2000 /* AX88772A */ #define AXE_FLAG_772B 0x4000 /* AX88772B */