[BACK]Return to if_ixgb.c CVS log [TXT][DIR] Up to [local] / sys / dev / pci

Annotation of sys/dev/pci/if_ixgb.c, Revision 1.1.1.1

1.1       nbrk        1: /**************************************************************************
                      2:
                      3: Copyright (c) 2001-2005, Intel Corporation
                      4: All rights reserved.
                      5:
                      6: Redistribution and use in source and binary forms, with or without
                      7: modification, are permitted provided that the following conditions are met:
                      8:
                      9:  1. Redistributions of source code must retain the above copyright notice,
                     10:     this list of conditions and the following disclaimer.
                     11:
                     12:  2. Redistributions in binary form must reproduce the above copyright
                     13:     notice, this list of conditions and the following disclaimer in the
                     14:     documentation and/or other materials provided with the distribution.
                     15:
                     16:  3. Neither the name of the Intel Corporation nor the names of its
                     17:     contributors may be used to endorse or promote products derived from
                     18:     this software without specific prior written permission.
                     19:
                     20: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
                     21: AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     22: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     23: ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
                     24: LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     25: CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     26: SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     27: INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     28: CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     29: ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     30: POSSIBILITY OF SUCH DAMAGE.
                     31:
                     32: ***************************************************************************/
                     33:
                     34: /* $OpenBSD: if_ixgb.c,v 1.36 2006/12/04 14:35:20 reyk Exp $ */
                     35:
                     36: #include <dev/pci/if_ixgb.h>
                     37:
                     38: /*********************************************************************
                     39:  *  Set this to one to display debug statistics
                     40:  *********************************************************************/
                     41: int             ixgb_display_debug_stats = 0;
                     42:
                     43: /*********************************************************************
                     44:  *  Driver version
                     45:  *********************************************************************/
                     46:
                     47: char ixgb_driver_version[] = "6.1.0";
                     48:
                     49: /*********************************************************************
                     50:  *  PCI Device ID Table
                     51:  *********************************************************************/
                     52:
                     53: const struct pci_matchid ixgb_devices[] = {
                     54:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX },
                     55:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR },
                     56:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR },
                     57:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 },
                     58: };
                     59:
                     60: /*********************************************************************
                     61:  *  Function prototypes
                     62:  *********************************************************************/
                     63: int  ixgb_probe(struct device *, void *, void *);
                     64: void ixgb_attach(struct device *, struct device *, void *);
                     65: void ixgb_shutdown(void *);
                     66: int  ixgb_intr(void *);
                     67: void ixgb_power(int, void *);
                     68: void ixgb_start(struct ifnet *);
                     69: int  ixgb_ioctl(struct ifnet *, u_long, caddr_t);
                     70: void ixgb_watchdog(struct ifnet *);
                     71: void ixgb_init(void *);
                     72: void ixgb_stop(void *);
                     73: void ixgb_media_status(struct ifnet *, struct ifmediareq *);
                     74: int  ixgb_media_change(struct ifnet *);
                     75: void ixgb_identify_hardware(struct ixgb_softc *);
                     76: int  ixgb_allocate_pci_resources(struct ixgb_softc *);
                     77: void ixgb_free_pci_resources(struct ixgb_softc *);
                     78: void ixgb_local_timer(void *);
                     79: int  ixgb_hardware_init(struct ixgb_softc *);
                     80: void ixgb_setup_interface(struct ixgb_softc *);
                     81: int  ixgb_setup_transmit_structures(struct ixgb_softc *);
                     82: void ixgb_initialize_transmit_unit(struct ixgb_softc *);
                     83: int  ixgb_setup_receive_structures(struct ixgb_softc *);
                     84: void ixgb_initialize_receive_unit(struct ixgb_softc *);
                     85: void ixgb_enable_intr(struct ixgb_softc *);
                     86: void ixgb_disable_intr(struct ixgb_softc *);
                     87: void ixgb_free_transmit_structures(struct ixgb_softc *);
                     88: void ixgb_free_receive_structures(struct ixgb_softc *);
                     89: void ixgb_update_stats_counters(struct ixgb_softc *);
                     90: void ixgb_txeof(struct ixgb_softc *);
                     91: int  ixgb_allocate_receive_structures(struct ixgb_softc *);
                     92: int  ixgb_allocate_transmit_structures(struct ixgb_softc *);
                     93: void ixgb_rxeof(struct ixgb_softc *, int);
                     94: void
                     95: ixgb_receive_checksum(struct ixgb_softc *,
                     96:                      struct ixgb_rx_desc * rx_desc,
                     97:                      struct mbuf *);
                     98: void
                     99: ixgb_transmit_checksum_setup(struct ixgb_softc *,
                    100:                             struct mbuf *,
                    101:                             u_int8_t *);
                    102: void ixgb_set_promisc(struct ixgb_softc *);
                    103: void ixgb_set_multi(struct ixgb_softc *);
                    104: void ixgb_print_hw_stats(struct ixgb_softc *);
                    105: void ixgb_update_link_status(struct ixgb_softc *);
                    106: int
                    107: ixgb_get_buf(struct ixgb_softc *, int i,
                    108:             struct mbuf *);
                    109: int  ixgb_encap(struct ixgb_softc *, struct mbuf *);
                    110: int
                    111: ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
                    112:                struct ixgb_dma_alloc *, int);
                    113: void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
                    114:
                    115: /*********************************************************************
                    116:  *  OpenBSD Device Interface Entry Points
                    117:  *********************************************************************/
                    118:
                    119: struct cfattach ixgb_ca = {
                    120:        sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
                    121: };
                    122:
                    123: struct cfdriver ixgb_cd = {
                    124:        0, "ixgb", DV_IFNET
                    125: };
                    126:
                    127: /* some defines for controlling descriptor fetches in h/w */
                    128: #define RXDCTL_PTHRESH_DEFAULT 0       /* chip considers prefech below this */
                    129: #define RXDCTL_HTHRESH_DEFAULT 0       /* chip will only prefetch if tail is
                    130:                                         * pushed this many descriptors from
                    131:                                         * head */
                    132: #define RXDCTL_WTHRESH_DEFAULT 0       /* chip writes back at this many or RXT0 */
                    133:
                    134:
                    135: /*********************************************************************
                    136:  *  Device identification routine
                    137:  *
                    138:  *  ixgb_probe determines if the driver should be loaded on
                    139:  *  adapter based on PCI vendor/device id of the adapter.
                    140:  *
                    141:  *  return 0 on no match, positive on match
                    142:  *********************************************************************/
                    143:
                    144: int
                    145: ixgb_probe(struct device *parent, void *match, void *aux)
                    146: {
                    147:        INIT_DEBUGOUT("ixgb_probe: begin");
                    148:
                    149:        return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
                    150:            sizeof(ixgb_devices)/sizeof(ixgb_devices[0])));
                    151: }
                    152:
                    153: /*********************************************************************
                    154:  *  Device initialization routine
                    155:  *
                    156:  *  The attach entry point is called when the driver is being loaded.
                    157:  *  This routine identifies the type of hardware, allocates all resources
                    158:  *  and initializes the hardware.
                    159:  *
                    160:  *********************************************************************/
                    161:
                    162: void
                    163: ixgb_attach(struct device *parent, struct device *self, void *aux)
                    164: {
                    165:        struct pci_attach_args *pa = aux;
                    166:        struct ixgb_softc *sc;
                    167:        int             tsize, rsize;
                    168:
                    169:        INIT_DEBUGOUT("ixgb_attach: begin");
                    170:
                    171:        sc = (struct ixgb_softc *)self;
                    172:        sc->osdep.ixgb_pa = *pa;
                    173:
                    174:        timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
                    175:
                    176:        /* Determine hardware revision */
                    177:        ixgb_identify_hardware(sc);
                    178:
                    179:        /* Parameters (to be read from user) */
                    180:        sc->num_tx_desc = IXGB_MAX_TXD;
                    181:        sc->num_rx_desc = IXGB_MAX_RXD;
                    182:        sc->tx_int_delay = TIDV;
                    183:        sc->rx_int_delay = RDTR;
                    184:        sc->rx_buffer_len = IXGB_RXBUFFER_2048;
                    185:
                    186:        /*
                    187:         * These parameters control the automatic generation(Tx) and
                    188:         * response(Rx) to Ethernet PAUSE frames.
                    189:         */
                    190:        sc->hw.fc.high_water = FCRTH;
                    191:        sc->hw.fc.low_water = FCRTL;
                    192:        sc->hw.fc.pause_time = FCPAUSE;
                    193:        sc->hw.fc.send_xon = TRUE;
                    194:        sc->hw.fc.type = FLOW_CONTROL;
                    195:
                    196:        /* Set the max frame size assuming standard ethernet sized frames */
                    197:        sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE;
                    198:
                    199:        if (ixgb_allocate_pci_resources(sc)) {
                    200:                printf("%s: Allocation of PCI resources failed\n",
                    201:                       sc->sc_dv.dv_xname);
                    202:                goto err_pci;
                    203:        }
                    204:
                    205:        tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),
                    206:            IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc));
                    207:        tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE);
                    208:
                    209:        /* Allocate Transmit Descriptor ring */
                    210:        if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
                    211:                printf("%s: Unable to allocate TxDescriptor memory\n",
                    212:                       sc->sc_dv.dv_xname);
                    213:                goto err_tx_desc;
                    214:        }
                    215:        sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
                    216:
                    217:        rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),
                    218:            IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc));
                    219:        rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE);
                    220:
                    221:        /* Allocate Receive Descriptor ring */
                    222:        if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
                    223:                printf("%s: Unable to allocate rx_desc memory\n",
                    224:                       sc->sc_dv.dv_xname);
                    225:                goto err_rx_desc;
                    226:        }
                    227:        sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
                    228:
                    229:        /* Initialize the hardware */
                    230:        if (ixgb_hardware_init(sc)) {
                    231:                printf("%s: Unable to initialize the hardware\n",
                    232:                       sc->sc_dv.dv_xname);
                    233:                goto err_hw_init;
                    234:        }
                    235:
                    236:        /* Setup OS specific network interface */
                    237:        ixgb_setup_interface(sc);
                    238:
                    239:        /* Initialize statistics */
                    240:        ixgb_clear_hw_cntrs(&sc->hw);
                    241:        ixgb_update_stats_counters(sc);
                    242:        ixgb_update_link_status(sc);
                    243:
                    244:        printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
                    245:
                    246:        INIT_DEBUGOUT("ixgb_attach: end");
                    247:        sc->sc_powerhook = powerhook_establish(ixgb_power, sc);
                    248:        sc->sc_shutdownhook = shutdownhook_establish(ixgb_shutdown, sc);
                    249:        return;
                    250:
                    251: err_hw_init:
                    252:        ixgb_dma_free(sc, &sc->rxdma);
                    253: err_rx_desc:
                    254:        ixgb_dma_free(sc, &sc->txdma);
                    255: err_tx_desc:
                    256: err_pci:
                    257:        ixgb_free_pci_resources(sc);
                    258: }
                    259:
                    260: void
                    261: ixgb_power(int why, void *arg)
                    262: {
                    263:        struct ixgb_softc *sc = (struct ixgb_softc *)arg;
                    264:        struct ifnet *ifp;
                    265:
                    266:        if (why == PWR_RESUME) {
                    267:                ifp = &sc->interface_data.ac_if;
                    268:                if (ifp->if_flags & IFF_UP)
                    269:                        ixgb_init(sc);
                    270:        }
                    271: }
                    272:
                    273: /*********************************************************************
                    274:  *
                    275:  *  Shutdown entry point
                    276:  *
                    277:  **********************************************************************/
                    278:
                    279: void
                    280: ixgb_shutdown(void *arg)
                    281: {
                    282:        struct ixgb_softc *sc = arg;
                    283:
                    284:        ixgb_stop(sc);
                    285: }
                    286:
                    287: /*********************************************************************
                    288:  *  Transmit entry point
                    289:  *
                    290:  *  ixgb_start is called by the stack to initiate a transmit.
                    291:  *  The driver will remain in this routine as long as there are
                    292:  *  packets to transmit and transmit resources are available.
                    293:  *  In case resources are not available stack is notified and
                    294:  *  the packet is requeued.
                    295:  **********************************************************************/
                    296:
                    297: void
                    298: ixgb_start(struct ifnet *ifp)
                    299: {
                    300:        struct mbuf    *m_head;
                    301:        struct ixgb_softc *sc = ifp->if_softc;
                    302:
                    303:        if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
                    304:                return;
                    305:
                    306:        if (!sc->link_active)
                    307:                return;
                    308:
                    309:        for (;;) {
                    310:                IFQ_POLL(&ifp->if_snd, m_head);
                    311:
                    312:                if (m_head == NULL)
                    313:                        break;
                    314:
                    315:                if (ixgb_encap(sc, m_head)) {
                    316:                        ifp->if_flags |= IFF_OACTIVE;
                    317:                        break;
                    318:                }
                    319:
                    320:                IFQ_DEQUEUE(&ifp->if_snd, m_head);
                    321:
                    322: #if NBPFILTER > 0
                    323:                /* Send a copy of the frame to the BPF listener */
                    324:                if (ifp->if_bpf)
                    325:                        bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
                    326: #endif
                    327:
                    328:                /* Set timeout in case hardware has problems transmitting */
                    329:                ifp->if_timer = IXGB_TX_TIMEOUT;
                    330:        }
                    331: }
                    332:
                    333: /*********************************************************************
                    334:  *  Ioctl entry point
                    335:  *
                    336:  *  ixgb_ioctl is called when the user wants to configure the
                    337:  *  interface.
                    338:  *
                    339:  *  return 0 on success, positive on failure
                    340:  **********************************************************************/
                    341:
                    342: int
                    343: ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
                    344: {
                    345:         int            s, error = 0;
                    346:        struct ifreq   *ifr = (struct ifreq *) data;
                    347:        struct ifaddr  *ifa = (struct ifaddr *)data;
                    348:        struct ixgb_softc *sc = ifp->if_softc;
                    349:
                    350:        s = splnet();
                    351:
                    352:        if ((error = ether_ioctl(ifp, &sc->interface_data, command, data)) > 0) {
                    353:                splx(s);
                    354:                return (error);
                    355:        }
                    356:
                    357:        switch (command) {
                    358:        case SIOCSIFADDR:
                    359:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
                    360:                               "Addr)");
                    361:                ifp->if_flags |= IFF_UP;
                    362:                if (!(ifp->if_flags & IFF_RUNNING))
                    363:                        ixgb_init(sc);
                    364: #ifdef INET
                    365:                if (ifa->ifa_addr->sa_family == AF_INET)
                    366:                        arp_ifinit(&sc->interface_data, ifa);
                    367: #endif /* INET */
                    368:                break;
                    369:        case SIOCSIFMTU:
                    370:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
                    371:                if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
                    372:                        error = EINVAL;
                    373:                else if (ifp->if_mtu != ifr->ifr_mtu)
                    374:                        ifp->if_mtu = ifr->ifr_mtu;
                    375:                break;
                    376:        case SIOCSIFFLAGS:
                    377:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
                    378:                if (ifp->if_flags & IFF_UP) {
                    379:                        /*
                    380:                         * If only the PROMISC or ALLMULTI flag changes, then
                    381:                         * don't do a full re-init of the chip, just update
                    382:                         * the Rx filter.
                    383:                         */
                    384:                        if ((ifp->if_flags & IFF_RUNNING) &&
                    385:                            ((ifp->if_flags ^ sc->if_flags) &
                    386:                             (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
                    387:                                ixgb_set_promisc(sc);
                    388:                        } else {
                    389:                                if (!(ifp->if_flags & IFF_RUNNING))
                    390:                                        ixgb_init(sc);
                    391:                        }
                    392:                } else {
                    393:                        if (ifp->if_flags & IFF_RUNNING)
                    394:                                ixgb_stop(sc);
                    395:                }
                    396:                sc->if_flags = ifp->if_flags;
                    397:                break;
                    398:        case SIOCADDMULTI:
                    399:        case SIOCDELMULTI:
                    400:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
                    401:                error = (command == SIOCADDMULTI)
                    402:                        ? ether_addmulti(ifr, &sc->interface_data)
                    403:                        : ether_delmulti(ifr, &sc->interface_data);
                    404:
                    405:                if (error == ENETRESET) {
                    406:                        if (ifp->if_flags & IFF_RUNNING) {
                    407:                                ixgb_disable_intr(sc);
                    408:                                ixgb_set_multi(sc);
                    409:                                ixgb_enable_intr(sc);
                    410:                        }
                    411:                        error = 0;
                    412:                }
                    413:                break;
                    414:        case SIOCSIFMEDIA:
                    415:        case SIOCGIFMEDIA:
                    416:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
                    417:                error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
                    418:                break;
                    419:        default:
                    420:                IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
                    421:                error = ENOTTY;
                    422:        }
                    423:
                    424:        splx(s);
                    425:        return (error);
                    426: }
                    427:
                    428: /*********************************************************************
                    429:  *  Watchdog entry point
                    430:  *
                    431:  *  This routine is called whenever hardware quits transmitting.
                    432:  *
                    433:  **********************************************************************/
                    434:
                    435: void
                    436: ixgb_watchdog(struct ifnet * ifp)
                    437: {
                    438:        struct ixgb_softc *sc = ifp->if_softc;
                    439:
                    440:        /*
                    441:         * If we are in this routine because of pause frames, then don't
                    442:         * reset the hardware.
                    443:         */
                    444:        if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) {
                    445:                ifp->if_timer = IXGB_TX_TIMEOUT;
                    446:                return;
                    447:        }
                    448:
                    449:        printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
                    450:
                    451:        ixgb_init(sc);
                    452:
                    453:        sc->watchdog_events++;
                    454: }
                    455:
                    456: /*********************************************************************
                    457:  *  Init entry point
                    458:  *
                    459:  *  This routine is used in two ways. It is used by the stack as
                    460:  *  init entry point in network interface structure. It is also used
                    461:  *  by the driver as a hw/sw initialization routine to get to a
                    462:  *  consistent state.
                    463:  *
                    464:  **********************************************************************/
                    465:
                    466: void
                    467: ixgb_init(void *arg)
                    468: {
                    469:        struct ixgb_softc *sc = arg;
                    470:        struct ifnet   *ifp = &sc->interface_data.ac_if;
                    471:        uint32_t temp_reg;
                    472:        int s;
                    473:
                    474:        INIT_DEBUGOUT("ixgb_init: begin");
                    475:
                    476:        s = splnet();
                    477:
                    478:        ixgb_stop(sc);
                    479:
                    480:        /* Get the latest mac address, User can use a LAA */
                    481:        bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
                    482:              IXGB_ETH_LENGTH_OF_ADDRESS);
                    483:
                    484:        /* Initialize the hardware */
                    485:        if (ixgb_hardware_init(sc)) {
                    486:                printf("%s: Unable to initialize the hardware\n",
                    487:                       sc->sc_dv.dv_xname);
                    488:                splx(s);
                    489:                return;
                    490:        }
                    491:
                    492:        /* Prepare transmit descriptors and buffers */
                    493:        if (ixgb_setup_transmit_structures(sc)) {
                    494:                printf("%s: Could not setup transmit structures\n",
                    495:                       sc->sc_dv.dv_xname);
                    496:                ixgb_stop(sc);
                    497:                splx(s);
                    498:                return;
                    499:        }
                    500:        ixgb_initialize_transmit_unit(sc);
                    501:
                    502:        /* Setup Multicast table */
                    503:        ixgb_set_multi(sc);
                    504:
                    505:        /* Prepare receive descriptors and buffers */
                    506:        if (ixgb_setup_receive_structures(sc)) {
                    507:                printf("%s: Could not setup receive structures\n",
                    508:                       sc->sc_dv.dv_xname);
                    509:                ixgb_stop(sc);
                    510:                splx(s);
                    511:                return;
                    512:        }
                    513:        ixgb_initialize_receive_unit(sc);
                    514:
                    515:        /* Don't lose promiscuous settings */
                    516:        ixgb_set_promisc(sc);
                    517:
                    518:        ifp->if_flags |= IFF_RUNNING;
                    519:        ifp->if_flags &= ~IFF_OACTIVE;
                    520:
                    521:        /* Enable jumbo frames */
                    522:        IXGB_WRITE_REG(&sc->hw, MFRMS,
                    523:            sc->hw.max_frame_size << IXGB_MFRMS_SHIFT);
                    524:        temp_reg = IXGB_READ_REG(&sc->hw, CTRL0);
                    525:        temp_reg |= IXGB_CTRL0_JFE;
                    526:        IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg);
                    527:
                    528:        timeout_add(&sc->timer_handle, hz);
                    529:        ixgb_clear_hw_cntrs(&sc->hw);
                    530:        ixgb_enable_intr(sc);
                    531:
                    532:        splx(s);
                    533: }
                    534:
                    535: /*********************************************************************
                    536:  *
                    537:  *  Interrupt Service routine
                    538:  *
                    539:  **********************************************************************/
                    540:
                    541: int
                    542: ixgb_intr(void *arg)
                    543: {
                    544:        struct ixgb_softc *sc = arg;
                    545:        struct ifnet    *ifp;
                    546:        u_int32_t       reg_icr;
                    547:        boolean_t       rxdmt0 = FALSE;
                    548:        int claimed = 0;
                    549:
                    550:        ifp = &sc->interface_data.ac_if;
                    551:
                    552:        for (;;) {
                    553:                reg_icr = IXGB_READ_REG(&sc->hw, ICR);
                    554:                if (reg_icr == 0)
                    555:                        break;
                    556:
                    557:                claimed = 1;
                    558:
                    559:                if (reg_icr & IXGB_INT_RXDMT0)
                    560:                        rxdmt0 = TRUE;
                    561:
                    562:                if (ifp->if_flags & IFF_RUNNING) {
                    563:                        ixgb_rxeof(sc, -1);
                    564:                        ixgb_txeof(sc);
                    565:                }
                    566:
                    567:                /* Link status change */
                    568:                if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
                    569:                        timeout_del(&sc->timer_handle);
                    570:                        ixgb_check_for_link(&sc->hw);
                    571:                        ixgb_update_link_status(sc);
                    572:                        timeout_add(&sc->timer_handle, hz);
                    573:                }
                    574:
                    575:                if (rxdmt0 && sc->raidc) {
                    576:                        IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0);
                    577:                        IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0);
                    578:                }
                    579:        }
                    580:
                    581:        if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
                    582:                ixgb_start(ifp);
                    583:
                    584:        return (claimed);
                    585: }
                    586:
                    587:
                    588: /*********************************************************************
                    589:  *
                    590:  *  Media Ioctl callback
                    591:  *
                    592:  *  This routine is called whenever the user queries the status of
                    593:  *  the interface using ifconfig.
                    594:  *
                    595:  **********************************************************************/
                    596: void
                    597: ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
                    598: {
                    599:        struct ixgb_softc *sc = ifp->if_softc;
                    600:
                    601:        INIT_DEBUGOUT("ixgb_media_status: begin");
                    602:
                    603:        ixgb_check_for_link(&sc->hw);
                    604:        ixgb_update_link_status(sc);
                    605:
                    606:        ifmr->ifm_status = IFM_AVALID;
                    607:        ifmr->ifm_active = IFM_ETHER;
                    608:
                    609:        if (!sc->hw.link_up) {
                    610:                ifmr->ifm_active |= IFM_NONE;
                    611:                return;
                    612:        }
                    613:
                    614:        ifmr->ifm_status |= IFM_ACTIVE;
                    615:        if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
                    616:            (sc->hw.phy_type == ixgb_phy_type_txn17401))
                    617:                ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
                    618:        else
                    619:                ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
                    620:
                    621:        return;
                    622: }
                    623:
                    624: /*********************************************************************
                    625:  *
                    626:  *  Media Ioctl callback
                    627:  *
                    628:  *  This routine is called when the user changes speed/duplex using
                    629:  *  media/mediopt option with ifconfig.
                    630:  *
                    631:  **********************************************************************/
                    632: int
                    633: ixgb_media_change(struct ifnet * ifp)
                    634: {
                    635:        struct ixgb_softc *sc = ifp->if_softc;
                    636:        struct ifmedia *ifm = &sc->media;
                    637:
                    638:        INIT_DEBUGOUT("ixgb_media_change: begin");
                    639:
                    640:        if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
                    641:                return (EINVAL);
                    642:
                    643:        return (0);
                    644: }
                    645:
                    646: /*********************************************************************
                    647:  *
                    648:  *  This routine maps the mbufs to tx descriptors.
                    649:  *
                    650:  *  return 0 on success, positive on failure
                    651:  **********************************************************************/
                    652:
                    653: int
                    654: ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
                    655: {
                    656:        u_int8_t        txd_popts;
                    657:        int             i, j, error = 0;
                    658:        bus_dmamap_t    map;
                    659:
                    660:        struct ixgb_buffer *tx_buffer;
                    661:        struct ixgb_tx_desc *current_tx_desc = NULL;
                    662:
                    663:        /*
                    664:         * Force a cleanup if number of TX descriptors available hits the
                    665:         * threshold
                    666:         */
                    667:        if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
                    668:                ixgb_txeof(sc);
                    669:                /* Now do we at least have a minimal? */
                    670:                if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
                    671:                        sc->no_tx_desc_avail1++;
                    672:                        return (ENOBUFS);
                    673:                }
                    674:        }
                    675:
                    676:        /*
                    677:         * Map the packet for DMA.
                    678:         */
                    679:        tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
                    680:        map = tx_buffer->map;
                    681:
                    682:        error = bus_dmamap_load_mbuf(sc->txtag, map,
                    683:                                     m_head, BUS_DMA_NOWAIT);
                    684:        if (error != 0) {
                    685:                sc->no_tx_dma_setup++;
                    686:                return (error);
                    687:        }
                    688:        IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
                    689:
                    690:        if (map->dm_nsegs > sc->num_tx_desc_avail)
                    691:                goto fail;
                    692:
                    693: #ifdef IXGB_CSUM_OFFLOAD
                    694:        ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
                    695: #else
                    696:        txd_popts = 0;
                    697: #endif
                    698:
                    699:        i = sc->next_avail_tx_desc;
                    700:        for (j = 0; j < map->dm_nsegs; j++) {
                    701:                tx_buffer = &sc->tx_buffer_area[i];
                    702:                current_tx_desc = &sc->tx_desc_base[i];
                    703:
                    704:                current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
                    705:                current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
                    706:                current_tx_desc->popts = txd_popts;
                    707:                if (++i == sc->num_tx_desc)
                    708:                        i = 0;
                    709:
                    710:                tx_buffer->m_head = NULL;
                    711:        }
                    712:
                    713:        sc->num_tx_desc_avail -= map->dm_nsegs;
                    714:        sc->next_avail_tx_desc = i;
                    715:
                    716:        tx_buffer->m_head = m_head;
                    717:        bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
                    718:            BUS_DMASYNC_PREWRITE);
                    719:
                    720:        /*
                    721:         * Last Descriptor of Packet needs End Of Packet (EOP)
                    722:         */
                    723:        current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP);
                    724:
                    725:        /*
                    726:         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
                    727:         * that this frame is available to transmit.
                    728:         */
                    729:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                    730:            sc->txdma.dma_map->dm_mapsize,
                    731:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                    732:        IXGB_WRITE_REG(&sc->hw, TDT, i);
                    733:
                    734:        return (0);
                    735:
                    736: fail:
                    737:        sc->no_tx_desc_avail2++;
                    738:        bus_dmamap_unload(sc->txtag, map);
                    739:        return (ENOBUFS);
                    740: }
                    741:
                    742: void
                    743: ixgb_set_promisc(struct ixgb_softc *sc)
                    744: {
                    745:
                    746:        u_int32_t       reg_rctl;
                    747:        struct ifnet   *ifp = &sc->interface_data.ac_if;
                    748:
                    749:        reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
                    750:
                    751:        if (ifp->if_flags & IFF_PROMISC) {
                    752:                reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
                    753:        } else if (ifp->if_flags & IFF_ALLMULTI) {
                    754:                reg_rctl |= IXGB_RCTL_MPE;
                    755:                reg_rctl &= ~IXGB_RCTL_UPE;
                    756:        } else {
                    757:                reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
                    758:        }
                    759:        IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                    760: }
                    761:
                    762: /*********************************************************************
                    763:  *  Multicast Update
                    764:  *
                    765:  *  This routine is called whenever multicast address list is updated.
                    766:  *
                    767:  **********************************************************************/
                    768:
                    769: void
                    770: ixgb_set_multi(struct ixgb_softc *sc)
                    771: {
                    772:        u_int32_t       reg_rctl = 0;
                    773:        u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
                    774:        int             mcnt = 0;
                    775:        struct ifnet   *ifp = &sc->interface_data.ac_if;
                    776:        struct arpcom *ac = &sc->interface_data;
                    777:        struct ether_multi *enm;
                    778:        struct ether_multistep step;
                    779:
                    780:        IOCTL_DEBUGOUT("ixgb_set_multi: begin");
                    781:
                    782:        ETHER_FIRST_MULTI(step, ac, enm);
                    783:        while (enm != NULL) {
                    784:                if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
                    785:                        ifp->if_flags |= IFF_ALLMULTI;
                    786:                        mcnt = MAX_NUM_MULTICAST_ADDRESSES;
                    787:                }
                    788:                if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
                    789:                        break;
                    790:                bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS],
                    791:                      IXGB_ETH_LENGTH_OF_ADDRESS);
                    792:                mcnt++;
                    793:                ETHER_NEXT_MULTI(step, enm);
                    794:        }
                    795:
                    796:        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
                    797:                reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
                    798:                reg_rctl |= IXGB_RCTL_MPE;
                    799:                IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                    800:        } else
                    801:                ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
                    802: }
                    803:
                    804:
                    805: /*********************************************************************
                    806:  *  Timer routine
                    807:  *
                    808:  *  This routine checks for link status and updates statistics.
                    809:  *
                    810:  **********************************************************************/
                    811:
                    812: void
                    813: ixgb_local_timer(void *arg)
                    814: {
                    815:        struct ifnet   *ifp;
                    816:        struct ixgb_softc *sc = arg;
                    817:        int s;
                    818:
                    819:        ifp = &sc->interface_data.ac_if;
                    820:
                    821:        s = splnet();
                    822:
                    823:        ixgb_check_for_link(&sc->hw);
                    824:        ixgb_update_link_status(sc);
                    825:        ixgb_update_stats_counters(sc);
                    826:        if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING)
                    827:                ixgb_print_hw_stats(sc);
                    828:
                    829:        timeout_add(&sc->timer_handle, hz);
                    830:
                    831:        splx(s);
                    832: }
                    833:
                    834: void
                    835: ixgb_update_link_status(struct ixgb_softc *sc)
                    836: {
                    837:        struct ifnet *ifp = &sc->interface_data.ac_if;
                    838:
                    839:        if (sc->hw.link_up) {
                    840:                if (!sc->link_active) {
                    841:                        ifp->if_baudrate = 1000000000;
                    842:                        sc->link_active = 1;
                    843:                        ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
                    844:                        if_link_state_change(ifp);
                    845:                }
                    846:        } else {
                    847:                if (sc->link_active) {
                    848:                        ifp->if_baudrate = 0;
                    849:                        sc->link_active = 0;
                    850:                        ifp->if_link_state = LINK_STATE_DOWN;
                    851:                        if_link_state_change(ifp);
                    852:                }
                    853:        }
                    854: }
                    855:
                    856: /*********************************************************************
                    857:  *
                    858:  *  This routine disables all traffic on the adapter by issuing a
                    859:  *  global reset on the MAC and deallocates TX/RX buffers.
                    860:  *
                    861:  **********************************************************************/
                    862:
                    863: void
                    864: ixgb_stop(void *arg)
                    865: {
                    866:        struct ifnet   *ifp;
                    867:        struct ixgb_softc *sc = arg;
                    868:        ifp = &sc->interface_data.ac_if;
                    869:
                    870:        INIT_DEBUGOUT("ixgb_stop: begin\n");
                    871:        ixgb_disable_intr(sc);
                    872:        sc->hw.adapter_stopped = FALSE;
                    873:        ixgb_adapter_stop(&sc->hw);
                    874:        timeout_del(&sc->timer_handle);
                    875:
                    876:        /* Tell the stack that the interface is no longer active */
                    877:        ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
                    878:
                    879:        ixgb_free_transmit_structures(sc);
                    880:        ixgb_free_receive_structures(sc);
                    881: }
                    882:
                    883:
                    884: /*********************************************************************
                    885:  *
                    886:  *  Determine hardware revision.
                    887:  *
                    888:  **********************************************************************/
                    889: void
                    890: ixgb_identify_hardware(struct ixgb_softc *sc)
                    891: {
                    892:        u_int32_t       reg;
                    893:        struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
                    894:
                    895:        /* Make sure our PCI config space has the necessary stuff set */
                    896:        sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
                    897:                                            PCI_COMMAND_STATUS_REG);
                    898:
                    899:        /* Save off the information about this board */
                    900:        sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
                    901:        sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
                    902:
                    903:        reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
                    904:        sc->hw.revision_id = PCI_REVISION(reg);
                    905:
                    906:        reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
                    907:        sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
                    908:        sc->hw.subsystem_id = PCI_PRODUCT(reg);
                    909:
                    910:        /* Set MacType, etc. based on this PCI info */
                    911:        switch (sc->hw.device_id) {
                    912:        case IXGB_DEVICE_ID_82597EX:
                    913:        case IXGB_DEVICE_ID_82597EX_SR:
                    914:        case IXGB_DEVICE_ID_82597EX_LR:
                    915:        case IXGB_DEVICE_ID_82597EX_CX4:
                    916:                sc->hw.mac_type = ixgb_82597;
                    917:                break;
                    918:        default:
                    919:                INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id);
                    920:                printf("%s: unsupported device id 0x%x\n",
                    921:                    sc->sc_dv.dv_xname, sc->hw.device_id);
                    922:        }
                    923: }
                    924:
                    925: int
                    926: ixgb_allocate_pci_resources(struct ixgb_softc *sc)
                    927:
                    928: {
                    929:        int val;
                    930:        pci_intr_handle_t       ih;
                    931:        const char              *intrstr = NULL;
                    932:        struct pci_attach_args *pa =  &sc->osdep.ixgb_pa;
                    933:        pci_chipset_tag_t       pc = pa->pa_pc;
                    934:
                    935:        val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA);
                    936:        if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
                    937:                printf(": mmba is not mem space\n");
                    938:                return (ENXIO);
                    939:        }
                    940:        if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
                    941:            &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
                    942:            &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
                    943:                printf(": cannot find mem space\n");
                    944:                return (ENXIO);
                    945:        }
                    946:
                    947:        if (pci_intr_map(pa, &ih)) {
                    948:                printf(": couldn't map interrupt\n");
                    949:                return (ENXIO);
                    950:        }
                    951:
                    952:        sc->hw.back = &sc->osdep;
                    953:
                    954:        intrstr = pci_intr_string(pc, ih);
                    955:        sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc,
                    956:                                            sc->sc_dv.dv_xname);
                    957:        if (sc->sc_intrhand == NULL) {
                    958:                printf(": couldn't establish interrupt");
                    959:                if (intrstr != NULL)
                    960:                        printf(" at %s", intrstr);
                    961:                printf("\n");
                    962:                return (ENXIO);
                    963:        }
                    964:        printf(": %s", intrstr);
                    965:
                    966:        return (0);
                    967: }
                    968:
                    969: void
                    970: ixgb_free_pci_resources(struct ixgb_softc *sc)
                    971: {
                    972:        struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
                    973:        pci_chipset_tag_t       pc = pa->pa_pc;
                    974:
                    975:        if (sc->sc_intrhand)
                    976:                pci_intr_disestablish(pc, sc->sc_intrhand);
                    977:        sc->sc_intrhand = 0;
                    978:
                    979:        if (sc->osdep.ixgb_membase)
                    980:                bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
                    981:                                sc->osdep.ixgb_memsize);
                    982:        sc->osdep.ixgb_membase = 0;
                    983: }
                    984:
                    985: /*********************************************************************
                    986:  *
                    987:  *  Initialize the hardware to a configuration as specified by the
                    988:  *  adapter structure. The controller is reset, the EEPROM is
                    989:  *  verified, the MAC address is set, then the shared initialization
                    990:  *  routines are called.
                    991:  *
                    992:  **********************************************************************/
                    993: int
                    994: ixgb_hardware_init(struct ixgb_softc *sc)
                    995: {
                    996:        /* Issue a global reset */
                    997:        sc->hw.adapter_stopped = FALSE;
                    998:        ixgb_adapter_stop(&sc->hw);
                    999:
                   1000:        /* Make sure we have a good EEPROM before we read from it */
                   1001:        if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
                   1002:                printf("%s: The EEPROM Checksum Is Not Valid\n",
                   1003:                       sc->sc_dv.dv_xname);
                   1004:                return (EIO);
                   1005:        }
                   1006:        if (!ixgb_init_hw(&sc->hw)) {
                   1007:                printf("%s: Hardware Initialization Failed",
                   1008:                       sc->sc_dv.dv_xname);
                   1009:                return (EIO);
                   1010:        }
                   1011:        bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
                   1012:              IXGB_ETH_LENGTH_OF_ADDRESS);
                   1013:
                   1014:        return (0);
                   1015: }
                   1016:
                   1017: /*********************************************************************
                   1018:  *
                   1019:  *  Setup networking device structure and register an interface.
                   1020:  *
                   1021:  **********************************************************************/
                   1022: void
                   1023: ixgb_setup_interface(struct ixgb_softc *sc)
                   1024: {
                   1025:        struct ifnet   *ifp;
                   1026:        INIT_DEBUGOUT("ixgb_setup_interface: begin");
                   1027:
                   1028:        ifp = &sc->interface_data.ac_if;
                   1029:        strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
                   1030:
                   1031:        ifp->if_softc = sc;
                   1032:        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
                   1033:        ifp->if_ioctl = ixgb_ioctl;
                   1034:        ifp->if_start = ixgb_start;
                   1035:        ifp->if_watchdog = ixgb_watchdog;
                   1036:        ifp->if_hardmtu =
                   1037:                IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN;
                   1038:        IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
                   1039:        IFQ_SET_READY(&ifp->if_snd);
                   1040:
                   1041:        ifp->if_capabilities = IFCAP_VLAN_MTU;
                   1042:
                   1043: #ifdef IXGB_CSUM_OFFLOAD
                   1044:        ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
                   1045: #endif
                   1046:
                   1047:        /*
                   1048:         * Specify the media types supported by this adapter and register
                   1049:         * callbacks to update media and link information
                   1050:         */
                   1051:        ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change,
                   1052:                     ixgb_media_status);
                   1053:        if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
                   1054:            (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
                   1055:                ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR |
                   1056:                    IFM_FDX, 0, NULL);
                   1057:        } else {
                   1058:                ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR |
                   1059:                    IFM_FDX, 0, NULL);
                   1060:        }
                   1061:        ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
                   1062:        ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
                   1063:
                   1064:        if_attach(ifp);
                   1065:        ether_ifattach(ifp);
                   1066: }
                   1067:
                   1068: /********************************************************************
                   1069:  * Manage DMA'able memory.
                   1070:  *******************************************************************/
                   1071: int
                   1072: ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
                   1073:                struct ixgb_dma_alloc * dma, int mapflags)
                   1074: {
                   1075:        int r;
                   1076:
                   1077:        dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
                   1078:        r = bus_dmamap_create(dma->dma_tag, size, 1,
                   1079:            size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
                   1080:        if (r != 0) {
                   1081:                printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
                   1082:                        "error %u\n", sc->sc_dv.dv_xname, r);
                   1083:                goto fail_0;
                   1084:        }
                   1085:
                   1086:        r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
                   1087:            1, &dma->dma_nseg, BUS_DMA_NOWAIT);
                   1088:        if (r != 0) {
                   1089:                printf("%s: ixgb_dma_malloc: bus_dmammem_alloc failed; "
                   1090:                        "size %lu, error %d\n", sc->sc_dv.dv_xname,
                   1091:                        (unsigned long)size, r);
                   1092:                goto fail_1;
                   1093:        }
                   1094:
                   1095:        r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
                   1096:            &dma->dma_vaddr, BUS_DMA_NOWAIT);
                   1097:        if (r != 0) {
                   1098:                printf("%s: ixgb_dma_malloc: bus_dmammem_map failed; "
                   1099:                        "size %lu, error %d\n", sc->sc_dv.dv_xname,
                   1100:                        (unsigned long)size, r);
                   1101:                goto fail_2;
                   1102:        }
                   1103:
                   1104:        r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,
                   1105:                            dma->dma_vaddr,
                   1106:                            size,
                   1107:                            NULL,
                   1108:                            mapflags | BUS_DMA_NOWAIT);
                   1109:        if (r != 0) {
                   1110:                printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
                   1111:                        "error %u\n", sc->sc_dv.dv_xname, r);
                   1112:                goto fail_3;
                   1113:        }
                   1114:
                   1115:        dma->dma_size = size;
                   1116:        return (0);
                   1117:
                   1118: fail_3:
                   1119:        bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
                   1120: fail_2:
                   1121:        bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
                   1122: fail_1:
                   1123:        bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
                   1124: fail_0:
                   1125:        dma->dma_map = NULL;
                   1126:        dma->dma_tag = NULL;
                   1127:
                   1128:        return (r);
                   1129: }
                   1130:
                   1131: void
                   1132: ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
                   1133: {
                   1134:        if (dma->dma_tag == NULL)
                   1135:                return;
                   1136:
                   1137:        if (dma->dma_map != NULL) {
                   1138:                bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
                   1139:                    dma->dma_map->dm_mapsize,
                   1140:                    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
                   1141:                bus_dmamap_unload(dma->dma_tag, dma->dma_map);
                   1142:                bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
                   1143:                bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
                   1144:                bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
                   1145:        }
                   1146: }
                   1147:
                   1148: /*********************************************************************
                   1149:  *
                   1150:  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
                   1151:  *  the information needed to transmit a packet on the wire.
                   1152:  *
                   1153:  **********************************************************************/
                   1154: int
                   1155: ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
                   1156: {
                   1157:        if (!(sc->tx_buffer_area =
                   1158:              (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
                   1159:                                            sc->num_tx_desc, M_DEVBUF,
                   1160:                                            M_NOWAIT))) {
                   1161:                printf("%s: Unable to allocate tx_buffer memory\n",
                   1162:                       sc->sc_dv.dv_xname);
                   1163:                return (ENOMEM);
                   1164:        }
                   1165:        bzero(sc->tx_buffer_area,
                   1166:              sizeof(struct ixgb_buffer) * sc->num_tx_desc);
                   1167:
                   1168:        return (0);
                   1169: }
                   1170:
                   1171: /*********************************************************************
                   1172:  *
                   1173:  *  Allocate and initialize transmit structures.
                   1174:  *
                   1175:  **********************************************************************/
                   1176: int
                   1177: ixgb_setup_transmit_structures(struct ixgb_softc *sc)
                   1178: {
                   1179:        struct  ixgb_buffer *tx_buffer;
                   1180:        int error, i;
                   1181:
                   1182:        if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
                   1183:                goto fail;
                   1184:
                   1185:        bzero((void *)sc->tx_desc_base,
                   1186:              (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc);
                   1187:
                   1188:        sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
                   1189:
                   1190:        tx_buffer = sc->tx_buffer_area;
                   1191:        for (i = 0; i < sc->num_tx_desc; i++) {
                   1192:                error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,
                   1193:                            IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,
                   1194:                            BUS_DMA_NOWAIT, &tx_buffer->map);
                   1195:                if (error != 0) {
                   1196:                        printf("%s: Unable to create TX DMA map\n",
                   1197:                            sc->sc_dv.dv_xname);
                   1198:                        goto fail;
                   1199:                }
                   1200:                tx_buffer++;
                   1201:        }
                   1202:
                   1203:        sc->next_avail_tx_desc = 0;
                   1204:        sc->oldest_used_tx_desc = 0;
                   1205:
                   1206:        /* Set number of descriptors available */
                   1207:        sc->num_tx_desc_avail = sc->num_tx_desc;
                   1208:
                   1209:        /* Set checksum context */
                   1210:        sc->active_checksum_context = OFFLOAD_NONE;
                   1211:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                   1212:           sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1213:
                   1214:        return (0);
                   1215:
                   1216: fail:
                   1217:        ixgb_free_transmit_structures(sc);
                   1218:        return (error);
                   1219: }
                   1220:
                   1221: /*********************************************************************
                   1222:  *
                   1223:  *  Enable transmit unit.
                   1224:  *
                   1225:  **********************************************************************/
                   1226: void
                   1227: ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
                   1228: {
                   1229:        u_int32_t       reg_tctl;
                   1230:        u_int64_t       bus_addr;
                   1231:
                   1232:        /* Setup the Base and Length of the Tx Descriptor Ring */
                   1233:        bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
                   1234:        IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
                   1235:        IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
                   1236:        IXGB_WRITE_REG(&sc->hw, TDLEN,
                   1237:                       sc->num_tx_desc *
                   1238:                       sizeof(struct ixgb_tx_desc));
                   1239:
                   1240:        /* Setup the HW Tx Head and Tail descriptor pointers */
                   1241:        IXGB_WRITE_REG(&sc->hw, TDH, 0);
                   1242:        IXGB_WRITE_REG(&sc->hw, TDT, 0);
                   1243:
                   1244:        HW_DEBUGOUT2("Base = %x, Length = %x\n",
                   1245:                     IXGB_READ_REG(&sc->hw, TDBAL),
                   1246:                     IXGB_READ_REG(&sc->hw, TDLEN));
                   1247:
                   1248:        IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
                   1249:
                   1250:        /* Program the Transmit Control Register */
                   1251:        reg_tctl = IXGB_READ_REG(&sc->hw, TCTL);
                   1252:        reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
                   1253:        IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl);
                   1254:
                   1255:        /* Setup Transmit Descriptor Settings for this adapter */
                   1256:        sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
                   1257:
                   1258:        if (sc->tx_int_delay > 0)
                   1259:                sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
                   1260: }
                   1261:
                   1262: /*********************************************************************
                   1263:  *
                   1264:  *  Free all transmit related data structures.
                   1265:  *
                   1266:  **********************************************************************/
                   1267: void
                   1268: ixgb_free_transmit_structures(struct ixgb_softc *sc)
                   1269: {
                   1270:        struct ixgb_buffer *tx_buffer;
                   1271:        int             i;
                   1272:
                   1273:        INIT_DEBUGOUT("free_transmit_structures: begin");
                   1274:
                   1275:        if (sc->tx_buffer_area != NULL) {
                   1276:                tx_buffer = sc->tx_buffer_area;
                   1277:                for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
                   1278:                        if (tx_buffer->map != NULL &&
                   1279:                            tx_buffer->map->dm_nsegs > 0) {
                   1280:                                bus_dmamap_sync(sc->txtag, tx_buffer->map,
                   1281:                                    0, tx_buffer->map->dm_mapsize,
                   1282:                                    BUS_DMASYNC_POSTWRITE);
                   1283:                                bus_dmamap_unload(sc->txtag,
                   1284:                                    tx_buffer->map);
                   1285:                        }
                   1286:
                   1287:                        if (tx_buffer->m_head != NULL) {
                   1288:                                m_freem(tx_buffer->m_head);
                   1289:                                tx_buffer->m_head = NULL;
                   1290:                        }
                   1291:                        if (tx_buffer->map != NULL) {
                   1292:                                bus_dmamap_destroy(sc->txtag,
                   1293:                                    tx_buffer->map);
                   1294:                                tx_buffer->map = NULL;
                   1295:                        }
                   1296:                }
                   1297:        }
                   1298:        if (sc->tx_buffer_area != NULL) {
                   1299:                free(sc->tx_buffer_area, M_DEVBUF);
                   1300:                sc->tx_buffer_area = NULL;
                   1301:        }
                   1302:        if (sc->txtag != NULL) {
                   1303:                sc->txtag = NULL;
                   1304:        }
                   1305: }
                   1306:
                   1307: /*********************************************************************
                   1308:  *
                   1309:  *  The offload context needs to be set when we transfer the first
                   1310:  *  packet of a particular protocol (TCP/UDP). We change the
                   1311:  *  context only if the protocol type changes.
                   1312:  *
                   1313:  **********************************************************************/
                   1314: void
                   1315: ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
                   1316:                             struct mbuf *mp,
                   1317:                             u_int8_t *txd_popts)
                   1318: {
                   1319:        struct ixgb_context_desc *TXD;
                   1320:        struct ixgb_buffer *tx_buffer;
                   1321:        int             curr_txd;
                   1322:
                   1323:        if (mp->m_pkthdr.csum_flags) {
                   1324:
                   1325:                if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) {
                   1326:                        *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
                   1327:                        if (sc->active_checksum_context == OFFLOAD_TCP_IP)
                   1328:                                return;
                   1329:                        else
                   1330:                                sc->active_checksum_context = OFFLOAD_TCP_IP;
                   1331:
                   1332:                } else if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
                   1333:                        *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
                   1334:                        if (sc->active_checksum_context == OFFLOAD_UDP_IP)
                   1335:                                return;
                   1336:                        else
                   1337:                                sc->active_checksum_context = OFFLOAD_UDP_IP;
                   1338:                } else {
                   1339:                        *txd_popts = 0;
                   1340:                        return;
                   1341:                }
                   1342:        } else {
                   1343:                *txd_popts = 0;
                   1344:                return;
                   1345:        }
                   1346:
                   1347:        /*
                   1348:         * If we reach this point, the checksum offload context needs to be
                   1349:         * reset.
                   1350:         */
                   1351:        curr_txd = sc->next_avail_tx_desc;
                   1352:        tx_buffer = &sc->tx_buffer_area[curr_txd];
                   1353:        TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
                   1354:
                   1355:        TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
                   1356:        TXD->tucse = 0;
                   1357:
                   1358:        TXD->mss = 0;
                   1359:
                   1360:        if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
                   1361:                TXD->tucso =
                   1362:                        ENET_HEADER_SIZE + sizeof(struct ip) +
                   1363:                        offsetof(struct tcphdr, th_sum);
                   1364:        } else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
                   1365:                TXD->tucso =
                   1366:                        ENET_HEADER_SIZE + sizeof(struct ip) +
                   1367:                        offsetof(struct udphdr, uh_sum);
                   1368:        }
                   1369:        TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |
                   1370:            IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE);
                   1371:
                   1372:        tx_buffer->m_head = NULL;
                   1373:
                   1374:        if (++curr_txd == sc->num_tx_desc)
                   1375:                curr_txd = 0;
                   1376:
                   1377:        sc->num_tx_desc_avail--;
                   1378:        sc->next_avail_tx_desc = curr_txd;
                   1379: }
                   1380:
                   1381: /**********************************************************************
                   1382:  *
                   1383:  *  Examine each tx_buffer in the used queue. If the hardware is done
                   1384:  *  processing the packet then free associated resources. The
                   1385:  *  tx_buffer is put back on the free queue.
                   1386:  *
                   1387:  **********************************************************************/
                   1388: void
                   1389: ixgb_txeof(struct ixgb_softc *sc)
                   1390: {
                   1391:        int             i, num_avail;
                   1392:        struct ixgb_buffer *tx_buffer;
                   1393:        struct ixgb_tx_desc *tx_desc;
                   1394:        struct ifnet    *ifp = &sc->interface_data.ac_if;
                   1395:
                   1396:        if (sc->num_tx_desc_avail == sc->num_tx_desc)
                   1397:                return;
                   1398:
                   1399:        num_avail = sc->num_tx_desc_avail;
                   1400:        i = sc->oldest_used_tx_desc;
                   1401:
                   1402:        tx_buffer = &sc->tx_buffer_area[i];
                   1403:        tx_desc = &sc->tx_desc_base[i];
                   1404:
                   1405:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                   1406:            sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
                   1407:        while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
                   1408:
                   1409:                tx_desc->status = 0;
                   1410:                num_avail++;
                   1411:
                   1412:                if (tx_buffer->m_head != NULL) {
                   1413:                        ifp->if_opackets++;
                   1414:
                   1415:                        if (tx_buffer->map->dm_nsegs > 0) {
                   1416:                                bus_dmamap_sync(sc->txtag, tx_buffer->map,
                   1417:                                    0, tx_buffer->map->dm_mapsize,
                   1418:                                    BUS_DMASYNC_POSTWRITE);
                   1419:                                bus_dmamap_unload(sc->txtag, tx_buffer->map);
                   1420:                        }
                   1421:
                   1422:                        m_freem(tx_buffer->m_head);
                   1423:                        tx_buffer->m_head = NULL;
                   1424:                }
                   1425:                if (++i == sc->num_tx_desc)
                   1426:                        i = 0;
                   1427:
                   1428:                tx_buffer = &sc->tx_buffer_area[i];
                   1429:                tx_desc = &sc->tx_desc_base[i];
                   1430:        }
                   1431:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                   1432:            sc->txdma.dma_map->dm_mapsize,
                   1433:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1434:
                   1435:        sc->oldest_used_tx_desc = i;
                   1436:
                   1437:        /*
                   1438:         * If we have enough room, clear IFF_OACTIVE to tell the stack that
                   1439:         * it is OK to send packets. If there are no pending descriptors,
                   1440:         * clear the timeout. Otherwise, if some descriptors have been freed,
                   1441:         * restart the timeout.
                   1442:         */
                   1443:        if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
                   1444:                ifp->if_flags &= ~IFF_OACTIVE;
                   1445:                /* All clean, turn off the timer */
                   1446:                if (num_avail == sc->num_tx_desc)
                   1447:                        ifp->if_timer = 0;
                   1448:                /* Some cleaned, reset the timer */
                   1449:                else if (num_avail != sc->num_tx_desc_avail)
                   1450:                        ifp->if_timer = IXGB_TX_TIMEOUT;
                   1451:        }
                   1452:        sc->num_tx_desc_avail = num_avail;
                   1453: }
                   1454:
                   1455:
                   1456: /*********************************************************************
                   1457:  *
                   1458:  *  Get a buffer from system mbuf buffer pool.
                   1459:  *
                   1460:  **********************************************************************/
                   1461: int
                   1462: ixgb_get_buf(struct ixgb_softc *sc, int i,
                   1463:             struct mbuf *nmp)
                   1464: {
                   1465:        struct mbuf *mp = nmp;
                   1466:        struct ixgb_buffer *rx_buffer;
                   1467:        struct ifnet   *ifp;
                   1468:        int             error;
                   1469:
                   1470:        ifp = &sc->interface_data.ac_if;
                   1471:
                   1472:        if (mp == NULL) {
                   1473:                MGETHDR(mp, M_DONTWAIT, MT_DATA);
                   1474:                if (mp == NULL) {
                   1475:                        sc->mbuf_alloc_failed++;
                   1476:                        return (ENOBUFS);
                   1477:                }
                   1478:                MCLGET(mp, M_DONTWAIT);
                   1479:                if ((mp->m_flags & M_EXT) == 0) {
                   1480:                        m_freem(mp);
                   1481:                        sc->mbuf_cluster_failed++;
                   1482:                        return (ENOBUFS);
                   1483:                }
                   1484:                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
                   1485:        } else {
                   1486:                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
                   1487:                mp->m_data = mp->m_ext.ext_buf;
                   1488:                mp->m_next = NULL;
                   1489:        }
                   1490:
                   1491:        if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
                   1492:                m_adj(mp, ETHER_ALIGN);
                   1493:
                   1494:        rx_buffer = &sc->rx_buffer_area[i];
                   1495:
                   1496:        /*
                   1497:         * Using memory from the mbuf cluster pool, invoke the bus_dma
                   1498:         * machinery to arrange the memory mapping.
                   1499:         */
                   1500:        error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
                   1501:            mp, BUS_DMA_NOWAIT);
                   1502:        if (error) {
                   1503:                m_free(mp);
                   1504:                return (error);
                   1505:        }
                   1506:        rx_buffer->m_head = mp;
                   1507:        sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
                   1508:        bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
                   1509:            rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
                   1510:
                   1511:        return (0);
                   1512: }
                   1513:
                   1514: /*********************************************************************
                   1515:  *
                   1516:  *  Allocate memory for rx_buffer structures. Since we use one
                   1517:  *  rx_buffer per received packet, the maximum number of rx_buffer's
                   1518:  *  that we'll need is equal to the number of receive descriptors
                   1519:  *  that we've allocated.
                   1520:  *
                   1521:  **********************************************************************/
                   1522: int
                   1523: ixgb_allocate_receive_structures(struct ixgb_softc *sc)
                   1524: {
                   1525:        int             i, error;
                   1526:        struct ixgb_buffer *rx_buffer;
                   1527:
                   1528:        if (!(sc->rx_buffer_area =
                   1529:              (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
                   1530:                                            sc->num_rx_desc, M_DEVBUF,
                   1531:                                            M_NOWAIT))) {
                   1532:                printf("%s: Unable to allocate rx_buffer memory\n",
                   1533:                       sc->sc_dv.dv_xname);
                   1534:                return (ENOMEM);
                   1535:        }
                   1536:
                   1537:        bzero(sc->rx_buffer_area,
                   1538:              sizeof(struct ixgb_buffer) * sc->num_rx_desc);
                   1539:
                   1540:        sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
                   1541:
                   1542:        rx_buffer = sc->rx_buffer_area;
                   1543:        for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
                   1544:                error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
                   1545:                                          MCLBYTES, 0, BUS_DMA_NOWAIT,
                   1546:                                          &rx_buffer->map);
                   1547:                if (error != 0) {
                   1548:                        printf("%s: ixgb_allocate_receive_structures: "
                   1549:                               "bus_dmamap_create failed; error %u\n",
                   1550:                               sc->sc_dv.dv_xname, error);
                   1551:                        goto fail;
                   1552:                }
                   1553:        }
                   1554:
                   1555:        for (i = 0; i < sc->num_rx_desc; i++) {
                   1556:                error = ixgb_get_buf(sc, i, NULL);
                   1557:                if (error != 0)
                   1558:                        goto fail;
                   1559:        }
                   1560:        bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
                   1561:            sc->rxdma.dma_map->dm_mapsize,
                   1562:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1563:
                   1564:        return (0);
                   1565:
                   1566: fail:
                   1567:        ixgb_free_receive_structures(sc);
                   1568:        return (error);
                   1569: }
                   1570:
                   1571: /*********************************************************************
                   1572:  *
                   1573:  *  Allocate and initialize receive structures.
                   1574:  *
                   1575:  **********************************************************************/
                   1576: int
                   1577: ixgb_setup_receive_structures(struct ixgb_softc *sc)
                   1578: {
                   1579:        bzero((void *)sc->rx_desc_base,
                   1580:              (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc);
                   1581:
                   1582:        if (ixgb_allocate_receive_structures(sc))
                   1583:                return (ENOMEM);
                   1584:
                   1585:        /* Setup our descriptor pointers */
                   1586:        sc->next_rx_desc_to_check = 0;
                   1587:        sc->next_rx_desc_to_use = 0;
                   1588:        return (0);
                   1589: }
                   1590:
                   1591: /*********************************************************************
                   1592:  *
                   1593:  *  Enable receive unit.
                   1594:  *
                   1595:  **********************************************************************/
                   1596: void
                   1597: ixgb_initialize_receive_unit(struct ixgb_softc *sc)
                   1598: {
                   1599:        u_int32_t       reg_rctl;
                   1600:        u_int32_t       reg_rxcsum;
                   1601:        u_int32_t       reg_rxdctl;
                   1602:        struct ifnet   *ifp;
                   1603:        u_int64_t       bus_addr;
                   1604:
                   1605:        ifp = &sc->interface_data.ac_if;
                   1606:
                   1607:        /*
                   1608:         * Make sure receives are disabled while setting up the descriptor
                   1609:         * ring
                   1610:         */
                   1611:        reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
                   1612:        IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
                   1613:
                   1614:        /* Set the Receive Delay Timer Register */
                   1615:        IXGB_WRITE_REG(&sc->hw, RDTR,
                   1616:                       sc->rx_int_delay);
                   1617:
                   1618:        /* Setup the Base and Length of the Rx Descriptor Ring */
                   1619:        bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
                   1620:        IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
                   1621:        IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
                   1622:        IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
                   1623:                       sizeof(struct ixgb_rx_desc));
                   1624:
                   1625:        /* Setup the HW Rx Head and Tail Descriptor Pointers */
                   1626:        IXGB_WRITE_REG(&sc->hw, RDH, 0);
                   1627:
                   1628:        IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
                   1629:
                   1630:        reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
                   1631:                | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
                   1632:                | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
                   1633:        IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl);
                   1634:
                   1635:        sc->raidc = 1;
                   1636:        if (sc->raidc) {
                   1637:                uint32_t        raidc;
                   1638:                uint8_t         poll_threshold;
                   1639: #define IXGB_RAIDC_POLL_DEFAULT 120
                   1640:
                   1641:                poll_threshold = ((sc->num_rx_desc - 1) >> 3);
                   1642:                poll_threshold >>= 1;
                   1643:                poll_threshold &= 0x3F;
                   1644:                raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
                   1645:                        (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
                   1646:                        (sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
                   1647:                        poll_threshold;
                   1648:                IXGB_WRITE_REG(&sc->hw, RAIDC, raidc);
                   1649:        }
                   1650:
                   1651:        /* Enable Receive Checksum Offload for TCP and UDP ? */
                   1652:        reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM);
                   1653:        reg_rxcsum |= IXGB_RXCSUM_TUOFL;
                   1654:        IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
                   1655:
                   1656:        /* Setup the Receive Control Register */
                   1657:        reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
                   1658:        reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
                   1659:        reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
                   1660:                IXGB_RCTL_CFF |
                   1661:                (sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
                   1662:
                   1663:        switch (sc->rx_buffer_len) {
                   1664:        default:
                   1665:        case IXGB_RXBUFFER_2048:
                   1666:                reg_rctl |= IXGB_RCTL_BSIZE_2048;
                   1667:                break;
                   1668:        case IXGB_RXBUFFER_4096:
                   1669:                reg_rctl |= IXGB_RCTL_BSIZE_4096;
                   1670:                break;
                   1671:        case IXGB_RXBUFFER_8192:
                   1672:                reg_rctl |= IXGB_RCTL_BSIZE_8192;
                   1673:                break;
                   1674:        case IXGB_RXBUFFER_16384:
                   1675:                reg_rctl |= IXGB_RCTL_BSIZE_16384;
                   1676:                break;
                   1677:        }
                   1678:
                   1679:        reg_rctl |= IXGB_RCTL_RXEN;
                   1680:
                   1681:        /* Enable Receives */
                   1682:        IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                   1683: }
                   1684:
                   1685: /*********************************************************************
                   1686:  *
                   1687:  *  Free receive related data structures.
                   1688:  *
                   1689:  **********************************************************************/
                   1690: void
                   1691: ixgb_free_receive_structures(struct ixgb_softc *sc)
                   1692: {
                   1693:        struct ixgb_buffer *rx_buffer;
                   1694:        int             i;
                   1695:
                   1696:        INIT_DEBUGOUT("free_receive_structures: begin");
                   1697:
                   1698:        if (sc->rx_buffer_area != NULL) {
                   1699:                rx_buffer = sc->rx_buffer_area;
                   1700:                for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
                   1701:                        if (rx_buffer->map != NULL &&
                   1702:                            rx_buffer->map->dm_nsegs > 0) {
                   1703:                                bus_dmamap_sync(sc->rxtag, rx_buffer->map,
                   1704:                                    0, rx_buffer->map->dm_mapsize,
                   1705:                                    BUS_DMASYNC_POSTREAD);
                   1706:                                bus_dmamap_unload(sc->rxtag,
                   1707:                                    rx_buffer->map);
                   1708:                        }
                   1709:                        if (rx_buffer->m_head != NULL) {
                   1710:                                m_freem(rx_buffer->m_head);
                   1711:                                rx_buffer->m_head = NULL;
                   1712:                        }
                   1713:                        if (rx_buffer->map != NULL) {
                   1714:                                bus_dmamap_destroy(sc->rxtag,
                   1715:                                    rx_buffer->map);
                   1716:                                rx_buffer->map = NULL;
                   1717:                        }
                   1718:                }
                   1719:        }
                   1720:        if (sc->rx_buffer_area != NULL) {
                   1721:                free(sc->rx_buffer_area, M_DEVBUF);
                   1722:                sc->rx_buffer_area = NULL;
                   1723:        }
                   1724:        if (sc->rxtag != NULL)
                   1725:                sc->rxtag = NULL;
                   1726: }
                   1727:
                   1728: /*********************************************************************
                   1729:  *
                   1730:  *  This routine executes in interrupt context. It replenishes
                   1731:  *  the mbufs in the descriptor and sends data which has been
                   1732:  *  dma'ed into host memory to upper layer.
                   1733:  *
                   1734:  *  We loop at most count times if count is > 0, or until done if
                   1735:  *  count < 0.
                   1736:  *
                   1737:  *********************************************************************/
                   1738: void
                   1739: ixgb_rxeof(struct ixgb_softc *sc, int count)
                   1740: {
                   1741:        struct ifnet   *ifp;
                   1742:        struct mbuf    *mp;
                   1743:        int             eop = 0;
                   1744:        int             len;
                   1745:        u_int8_t        accept_frame = 0;
                   1746:        int             i;
                   1747:        int             next_to_use = 0;
                   1748:        int             eop_desc;
                   1749:
                   1750:        /* Pointer to the receive descriptor being examined. */
                   1751:        struct ixgb_rx_desc *current_desc;
                   1752:
                   1753:        ifp = &sc->interface_data.ac_if;
                   1754:        i = sc->next_rx_desc_to_check;
                   1755:        next_to_use = sc->next_rx_desc_to_use;
                   1756:        eop_desc = sc->next_rx_desc_to_check;
                   1757:        current_desc = &sc->rx_desc_base[i];
                   1758:        bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
                   1759:            sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
                   1760:
                   1761:        if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD))
                   1762:                return;
                   1763:
                   1764:        while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) &&
                   1765:                    (count != 0) &&
                   1766:                    (ifp->if_flags & IFF_RUNNING)) {
                   1767:
                   1768:                mp = sc->rx_buffer_area[i].m_head;
                   1769:                bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
                   1770:                    0, sc->rx_buffer_area[i].map->dm_mapsize,
                   1771:                    BUS_DMASYNC_POSTREAD);
                   1772:                bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
                   1773:
                   1774:                accept_frame = 1;
                   1775:                if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
                   1776:                        count--;
                   1777:                        eop = 1;
                   1778:                } else {
                   1779:                        eop = 0;
                   1780:                }
                   1781:                len = current_desc->length;
                   1782:
                   1783:                if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
                   1784:                            IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
                   1785:                                            IXGB_RX_DESC_ERRORS_RXE))
                   1786:                        accept_frame = 0;
                   1787:                if (accept_frame) {
                   1788:
                   1789:                        /* Assign correct length to the current fragment */
                   1790:                        mp->m_len = len;
                   1791:
                   1792:                        if (sc->fmp == NULL) {
                   1793:                                mp->m_pkthdr.len = len;
                   1794:                                sc->fmp = mp;   /* Store the first mbuf */
                   1795:                                sc->lmp = mp;
                   1796:                        } else {
                   1797:                                /* Chain mbuf's together */
                   1798:                                mp->m_flags &= ~M_PKTHDR;
                   1799:                                sc->lmp->m_next = mp;
                   1800:                                sc->lmp = sc->lmp->m_next;
                   1801:                                sc->fmp->m_pkthdr.len += len;
                   1802:                        }
                   1803:
                   1804:                        if (eop) {
                   1805:                                eop_desc = i;
                   1806:                                sc->fmp->m_pkthdr.rcvif = ifp;
                   1807:                                ifp->if_ipackets++;
                   1808:
                   1809: #if NBPFILTER > 0
                   1810:                                /*
                   1811:                                 * Handle BPF listeners. Let the BPF
                   1812:                                 * user see the packet.
                   1813:                                 */
                   1814:                                if (ifp->if_bpf)
                   1815:                                        bpf_mtap(ifp->if_bpf, sc->fmp,
                   1816:                                            BPF_DIRECTION_IN);
                   1817: #endif
                   1818:
                   1819:                                ixgb_receive_checksum(sc, current_desc,
                   1820:                                                      sc->fmp);
                   1821:                                ether_input_mbuf(ifp, sc->fmp);
                   1822:                                sc->fmp = NULL;
                   1823:                                sc->lmp = NULL;
                   1824:                        }
                   1825:                        sc->rx_buffer_area[i].m_head = NULL;
                   1826:                } else {
                   1827:                        sc->dropped_pkts++;
                   1828:                        if (sc->fmp != NULL)
                   1829:                                m_freem(sc->fmp);
                   1830:                        sc->fmp = NULL;
                   1831:                        sc->lmp = NULL;
                   1832:                }
                   1833:
                   1834:                /* Zero out the receive descriptors status  */
                   1835:                current_desc->status = 0;
                   1836:                bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
                   1837:                    sc->rxdma.dma_map->dm_mapsize,
                   1838:                    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1839:
                   1840:                /* Advance our pointers to the next descriptor */
                   1841:                if (++i == sc->num_rx_desc) {
                   1842:                        i = 0;
                   1843:                        current_desc = sc->rx_desc_base;
                   1844:                } else
                   1845:                        current_desc++;
                   1846:        }
                   1847:        sc->next_rx_desc_to_check = i;
                   1848:
                   1849:        if (--i < 0)
                   1850:                i = (sc->num_rx_desc - 1);
                   1851:
                   1852:        /*
                   1853:         * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
                   1854:         * memory corruption). Avoid using and re-submitting the most recently received RX
                   1855:         * descriptor back to hardware.
                   1856:         *
                   1857:         * if(Last written back descriptor == EOP bit set descriptor)
                   1858:         *      then avoid re-submitting the most recently received RX descriptor
                   1859:         *      back to hardware.
                   1860:         * if(Last written back descriptor != EOP bit set descriptor)
                   1861:         *      then avoid re-submitting the most recently received RX descriptors
                   1862:         *      till last EOP bit set descriptor.
                   1863:         */
                   1864:        if (eop_desc != i) {
                   1865:                if (++eop_desc == sc->num_rx_desc)
                   1866:                        eop_desc = 0;
                   1867:                i = eop_desc;
                   1868:        }
                   1869:        /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
                   1870:        while (next_to_use != i) {
                   1871:                current_desc = &sc->rx_desc_base[next_to_use];
                   1872:                if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
                   1873:                            IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
                   1874:                                             IXGB_RX_DESC_ERRORS_RXE))) {
                   1875:                        mp = sc->rx_buffer_area[next_to_use].m_head;
                   1876:                        ixgb_get_buf(sc, next_to_use, mp);
                   1877:                } else {
                   1878:                        if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS)
                   1879:                                break;
                   1880:                }
                   1881:                /* Advance our pointers to the next descriptor */
                   1882:                if (++next_to_use == sc->num_rx_desc)
                   1883:                        next_to_use = 0;
                   1884:        }
                   1885:        sc->next_rx_desc_to_use = next_to_use;
                   1886:        if (--next_to_use < 0)
                   1887:                 next_to_use = (sc->num_rx_desc - 1);
                   1888:         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
                   1889:         IXGB_WRITE_REG(&sc->hw, RDT, next_to_use);
                   1890: }
                   1891:
                   1892: /*********************************************************************
                   1893:  *
                   1894:  *  Verify that the hardware indicated that the checksum is valid.
                   1895:  *  Inform the stack about the status of checksum so that stack
                   1896:  *  doesn't spend time verifying the checksum.
                   1897:  *
                   1898:  *********************************************************************/
                   1899: void
                   1900: ixgb_receive_checksum(struct ixgb_softc *sc,
                   1901:                      struct ixgb_rx_desc *rx_desc,
                   1902:                      struct mbuf *mp)
                   1903: {
                   1904:        if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
                   1905:                mp->m_pkthdr.csum_flags = 0;
                   1906:                return;
                   1907:        }
                   1908:
                   1909:        if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
                   1910:                /* Did it pass? */
                   1911:                if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
                   1912:                        /* IP Checksum Good */
                   1913:                        mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
                   1914:
                   1915:                } else {
                   1916:                        mp->m_pkthdr.csum_flags = 0;
                   1917:                }
                   1918:        }
                   1919:        if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
                   1920:                /* Did it pass? */
                   1921:                if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
                   1922:                        mp->m_pkthdr.csum_flags |=
                   1923:                                M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
                   1924:                }
                   1925:        }
                   1926: }
                   1927:
                   1928: void
                   1929: ixgb_enable_intr(struct ixgb_softc *sc)
                   1930: {
                   1931:        IXGB_WRITE_REG(&sc->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
                   1932:                            IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
                   1933: }
                   1934:
                   1935: void
                   1936: ixgb_disable_intr(struct ixgb_softc *sc)
                   1937: {
                   1938:        IXGB_WRITE_REG(&sc->hw, IMC, ~0);
                   1939: }
                   1940:
                   1941: void
                   1942: ixgb_write_pci_cfg(struct ixgb_hw *hw,
                   1943:                   uint32_t reg,
                   1944:                   uint16_t *value)
                   1945: {
                   1946:        struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
                   1947:        pci_chipset_tag_t pc = pa->pa_pc;
                   1948:        /* Should we do read/mask/write...?  16 vs 32 bit!!! */
                   1949:        pci_conf_write(pc, pa->pa_tag, reg, *value);
                   1950: }
                   1951:
                   1952: /**********************************************************************
                   1953:  *
                   1954:  *  Update the board statistics counters.
                   1955:  *
                   1956:  **********************************************************************/
                   1957: void
                   1958: ixgb_update_stats_counters(struct ixgb_softc *sc)
                   1959: {
                   1960:        struct ifnet   *ifp;
                   1961:
                   1962:        sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS);
                   1963:        sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL);
                   1964:        sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH);
                   1965:        sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL);
                   1966:        sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH);
                   1967:        sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL);
                   1968:        sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH);
                   1969:        sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL);
                   1970:        sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH);
                   1971:        sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC);
                   1972:
                   1973:        sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC);
                   1974:        sc->stats.dc += IXGB_READ_REG(&sc->hw, DC);
                   1975:        sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC);
                   1976:        sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC);
                   1977:        sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC);
                   1978:        sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC);
                   1979:        sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC);
                   1980:        sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL);
                   1981:        sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH);
                   1982:        sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL);
                   1983:        sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH);
                   1984:        sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC);
                   1985:        sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC);
                   1986:        sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC);
                   1987:        sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL);
                   1988:        sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH);
                   1989:        sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL);
                   1990:        sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH);
                   1991:        sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL);
                   1992:        sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH);
                   1993:        sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL);
                   1994:        sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH);
                   1995:        sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C);
                   1996:        sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL);
                   1997:        sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH);
                   1998:        sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL);
                   1999:        sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH);
                   2000:
                   2001:        sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL);
                   2002:        sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH);
                   2003:        sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL);
                   2004:        sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH);
                   2005:        sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL);
                   2006:        sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH);
                   2007:        sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC);
                   2008:        sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC);
                   2009:        sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC);
                   2010:        sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL);
                   2011:        sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH);
                   2012:        sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL);
                   2013:        sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH);
                   2014:        sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL);
                   2015:        sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH);
                   2016:        sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC);
                   2017:        sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC);
                   2018:        sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC);
                   2019:        sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC);
                   2020:        sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC);
                   2021:        sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC);
                   2022:        sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC);
                   2023:
                   2024:        ifp = &sc->interface_data.ac_if;
                   2025:
                   2026:        /* Fill out the OS statistics structure */
                   2027:        ifp->if_collisions = 0;
                   2028:
                   2029:        /* Rx Errors */
                   2030:        ifp->if_ierrors =
                   2031:                sc->dropped_pkts +
                   2032:                sc->stats.crcerrs +
                   2033:                sc->stats.rnbc +
                   2034:                sc->stats.mpc +
                   2035:                sc->stats.rlec;
                   2036:
                   2037:        /* Tx Errors */
                   2038:        ifp->if_oerrors =
                   2039:                sc->watchdog_events;
                   2040: }
                   2041:
                   2042: /**********************************************************************
                   2043:  *
                   2044:  *  This routine is called only when ixgb_display_debug_stats is enabled.
                   2045:  *  This routine provides a way to take a look at important statistics
                   2046:  *  maintained by the driver and hardware.
                   2047:  *
                   2048:  **********************************************************************/
                   2049: void
                   2050: ixgb_print_hw_stats(struct ixgb_softc *sc)
                   2051: {
                   2052:        char            buf_speed[100], buf_type[100];
                   2053:        ixgb_bus_speed  bus_speed;
                   2054:        ixgb_bus_type   bus_type;
                   2055:        const char * const unit = sc->sc_dv.dv_xname;
                   2056:
                   2057:        bus_speed = sc->hw.bus.speed;
                   2058:        bus_type = sc->hw.bus.type;
                   2059:        snprintf(buf_speed, sizeof(buf_speed),
                   2060:                bus_speed == ixgb_bus_speed_33 ? "33MHz" :
                   2061:                bus_speed == ixgb_bus_speed_66 ? "66MHz" :
                   2062:                bus_speed == ixgb_bus_speed_100 ? "100MHz" :
                   2063:                bus_speed == ixgb_bus_speed_133 ? "133MHz" :
                   2064:                "UNKNOWN");
                   2065:        printf("%s: PCI_Bus_Speed = %s\n", unit,
                   2066:                buf_speed);
                   2067:
                   2068:        snprintf(buf_type, sizeof(buf_type),
                   2069:                bus_type == ixgb_bus_type_pci ? "PCI" :
                   2070:                bus_type == ixgb_bus_type_pcix ? "PCI-X" :
                   2071:                "UNKNOWN");
                   2072:        printf("%s: PCI_Bus_Type = %s\n", unit,
                   2073:                buf_type);
                   2074:
                   2075:        printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
                   2076:                sc->no_tx_desc_avail1);
                   2077:        printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
                   2078:                sc->no_tx_desc_avail2);
                   2079:        printf("%s: Std Mbuf Failed = %ld\n", unit,
                   2080:                sc->mbuf_alloc_failed);
                   2081:        printf("%s: Std Cluster Failed = %ld\n", unit,
                   2082:                sc->mbuf_cluster_failed);
                   2083:
                   2084:        printf("%s: Defer count = %lld\n", unit,
                   2085:                (long long)sc->stats.dc);
                   2086:        printf("%s: Missed Packets = %lld\n", unit,
                   2087:                (long long)sc->stats.mpc);
                   2088:        printf("%s: Receive No Buffers = %lld\n", unit,
                   2089:                (long long)sc->stats.rnbc);
                   2090:        printf("%s: Receive length errors = %lld\n", unit,
                   2091:                (long long)sc->stats.rlec);
                   2092:        printf("%s: Crc errors = %lld\n", unit,
                   2093:                (long long)sc->stats.crcerrs);
                   2094:        printf("%s: Driver dropped packets = %ld\n", unit,
                   2095:                sc->dropped_pkts);
                   2096:
                   2097:        printf("%s: XON Rcvd = %lld\n", unit,
                   2098:                (long long)sc->stats.xonrxc);
                   2099:        printf("%s: XON Xmtd = %lld\n", unit,
                   2100:                (long long)sc->stats.xontxc);
                   2101:        printf("%s: XOFF Rcvd = %lld\n", unit,
                   2102:                (long long)sc->stats.xoffrxc);
                   2103:        printf("%s: XOFF Xmtd = %lld\n", unit,
                   2104:                (long long)sc->stats.xofftxc);
                   2105:
                   2106:        printf("%s: Good Packets Rcvd = %lld\n", unit,
                   2107:                (long long)sc->stats.gprcl);
                   2108:        printf("%s: Good Packets Xmtd = %lld\n", unit,
                   2109:                (long long)sc->stats.gptcl);
                   2110:
                   2111:        printf("%s: Jumbo frames recvd = %lld\n", unit,
                   2112:                (long long)sc->stats.jprcl);
                   2113:        printf("%s: Jumbo frames Xmtd = %lld\n", unit,
                   2114:                (long long)sc->stats.jptcl);
                   2115: }

CVSweb