[BACK]Return to if_em.c CVS log [TXT][DIR] Up to [local] / sys / dev / pci

Annotation of sys/dev/pci/if_em.c, Revision 1.1.1.1

1.1       nbrk        1: /**************************************************************************
                      2:
                      3: Copyright (c) 2001-2003, Intel Corporation
                      4: All rights reserved.
                      5:
                      6: Redistribution and use in source and binary forms, with or without
                      7: modification, are permitted provided that the following conditions are met:
                      8:
                      9:  1. Redistributions of source code must retain the above copyright notice,
                     10:     this list of conditions and the following disclaimer.
                     11:
                     12:  2. Redistributions in binary form must reproduce the above copyright
                     13:     notice, this list of conditions and the following disclaimer in the
                     14:     documentation and/or other materials provided with the distribution.
                     15:
                     16:  3. Neither the name of the Intel Corporation nor the names of its
                     17:     contributors may be used to endorse or promote products derived from
                     18:     this software without specific prior written permission.
                     19:
                     20: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
                     21: AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     22: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     23: ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
                     24: LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     25: CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     26: SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     27: INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     28: CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     29: ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     30: POSSIBILITY OF SUCH DAMAGE.
                     31:
                     32: ***************************************************************************/
                     33:
                     34: /* $OpenBSD: if_em.c,v 1.172 2007/05/31 01:04:57 henning Exp $ */
                     35: /* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
                     36:
                     37: #include <dev/pci/if_em.h>
                     38:
                     39: /*********************************************************************
                     40:  *  Set this to one to display debug statistics
                     41:  *********************************************************************/
                     42: int             em_display_debug_stats = 0;
                     43:
                     44: /*********************************************************************
                     45:  *  Driver version
                     46:  *********************************************************************/
                     47:
                     48: char em_driver_version[] = "6.2.9";
                     49:
                     50: /*********************************************************************
                     51:  *  PCI Device ID Table
                     52:  *********************************************************************/
                     53: const struct pci_matchid em_devices[] = {
                     54:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT },
                     55:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT },
                     56:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT },
                     57:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT },
                     58:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
                     59:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
                     60:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
                     61:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
                     62:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP },
                     63:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
                     64:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE },
                     65:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER },
                     66:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM },
                     67:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI },
                     68:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_LF },
                     69:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE },
                     70:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
                     71:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER },
                     72:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER },
                     73:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER },
                     74:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER },
                     75:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER },
                     76:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
                     77:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER },
                     78:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER },
                     79:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER },
                     80:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER },
                     81:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES },
                     82:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER },
                     83:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER },
                     84:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR },
                     85:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER },
                     86:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER },
                     87:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE },
                     88:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR },
                     89:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K },
                     90:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES },
                     91:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_2 },
                     92:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
                     93:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE },
                     94:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI },
                     95:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AF },
                     96:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AT },
                     97:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER },
                     98:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER },
                     99:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR },
                    100:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP },
                    101:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR },
                    102:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES },
                    103:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER },
                    104:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER },
                    105:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES },
                    106:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI },
                    107:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E },
                    108:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT },
                    109:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_PM },
                    110:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L },
                    111:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_1 },
                    112:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_2 },
                    113:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573V_PM },
                    114:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT },
                    115:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_AMT },
                    116:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_C },
                    117:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE },
                    118:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_G },
                    119:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_GT },
                    120:        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M }
                    121: };
                    122:
                    123: /*********************************************************************
                    124:  *  Function prototypes
                    125:  *********************************************************************/
                    126: int  em_probe(struct device *, void *, void *);
                    127: void em_attach(struct device *, struct device *, void *);
                    128: void em_shutdown(void *);
                    129: int  em_intr(void *);
                    130: void em_power(int, void *);
                    131: void em_start(struct ifnet *);
                    132: int  em_ioctl(struct ifnet *, u_long, caddr_t);
                    133: void em_watchdog(struct ifnet *);
                    134: void em_init(void *);
                    135: void em_stop(void *);
                    136: void em_media_status(struct ifnet *, struct ifmediareq *);
                    137: int  em_media_change(struct ifnet *);
                    138: void em_identify_hardware(struct em_softc *);
                    139: int  em_allocate_pci_resources(struct em_softc *);
                    140: void em_free_pci_resources(struct em_softc *);
                    141: void em_local_timer(void *);
                    142: int  em_hardware_init(struct em_softc *);
                    143: void em_setup_interface(struct em_softc *);
                    144: int  em_setup_transmit_structures(struct em_softc *);
                    145: void em_initialize_transmit_unit(struct em_softc *);
                    146: int  em_setup_receive_structures(struct em_softc *);
                    147: void em_initialize_receive_unit(struct em_softc *);
                    148: void em_enable_intr(struct em_softc *);
                    149: void em_disable_intr(struct em_softc *);
                    150: void em_free_transmit_structures(struct em_softc *);
                    151: void em_free_receive_structures(struct em_softc *);
                    152: void em_update_stats_counters(struct em_softc *);
                    153: void em_txeof(struct em_softc *);
                    154: int  em_allocate_receive_structures(struct em_softc *);
                    155: int  em_allocate_transmit_structures(struct em_softc *);
                    156: void em_rxeof(struct em_softc *, int);
                    157: void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
                    158:                         struct mbuf *);
                    159: #ifdef EM_CSUM_OFFLOAD
                    160: void em_transmit_checksum_setup(struct em_softc *, struct mbuf *,
                    161:                                u_int32_t *, u_int32_t *);
                    162: #endif
                    163: void em_set_promisc(struct em_softc *);
                    164: void em_set_multi(struct em_softc *);
                    165: void em_print_hw_stats(struct em_softc *);
                    166: void em_update_link_status(struct em_softc *);
                    167: int  em_get_buf(struct em_softc *, int);
                    168: int  em_encap(struct em_softc *, struct mbuf *);
                    169: void em_smartspeed(struct em_softc *);
                    170: int  em_82547_fifo_workaround(struct em_softc *, int);
                    171: void em_82547_update_fifo_head(struct em_softc *, int);
                    172: int  em_82547_tx_fifo_reset(struct em_softc *);
                    173: void em_82547_move_tail(void *arg);
                    174: void em_82547_move_tail_locked(struct em_softc *);
                    175: int  em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *,
                    176:                   int);
                    177: void em_dma_free(struct em_softc *, struct em_dma_alloc *);
                    178: int  em_is_valid_ether_addr(u_int8_t *);
                    179: u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
                    180:                              PDESC_ARRAY desc_array);
                    181:
                    182: /*********************************************************************
                    183:  *  OpenBSD Device Interface Entry Points
                    184:  *********************************************************************/
                    185:
                    186: struct cfattach em_ca = {
                    187:        sizeof(struct em_softc), em_probe, em_attach
                    188: };
                    189:
                    190: struct cfdriver em_cd = {
                    191:        0, "em", DV_IFNET
                    192: };
                    193:
                    194: static int em_smart_pwr_down = FALSE;
                    195:
                    196: /*********************************************************************
                    197:  *  Device identification routine
                    198:  *
                    199:  *  em_probe determines if the driver should be loaded on
                    200:  *  adapter based on PCI vendor/device id of the adapter.
                    201:  *
                    202:  *  return 0 on no match, positive on match
                    203:  *********************************************************************/
                    204:
                    205: int
                    206: em_probe(struct device *parent, void *match, void *aux)
                    207: {
                    208:        INIT_DEBUGOUT("em_probe: begin");
                    209:
                    210:        return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
                    211:            sizeof(em_devices)/sizeof(em_devices[0])));
                    212: }
                    213:
                    214: /*********************************************************************
                    215:  *  Device initialization routine
                    216:  *
                    217:  *  The attach entry point is called when the driver is being loaded.
                    218:  *  This routine identifies the type of hardware, allocates all resources
                    219:  *  and initializes the hardware.
                    220:  *
                    221:  *********************************************************************/
                    222:
                    223: void
                    224: em_attach(struct device *parent, struct device *self, void *aux)
                    225: {
                    226:        struct pci_attach_args *pa = aux;
                    227:        struct em_softc *sc;
                    228:        int             tsize, rsize;
                    229:
                    230:        INIT_DEBUGOUT("em_attach: begin");
                    231:
                    232:        sc = (struct em_softc *)self;
                    233:        sc->osdep.em_pa = *pa;
                    234:
                    235:        timeout_set(&sc->timer_handle, em_local_timer, sc);
                    236:        timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
                    237:
                    238:        /* Determine hardware revision */
                    239:        em_identify_hardware(sc);
                    240:
                    241:        /* Parameters (to be read from user) */
                    242:        sc->num_tx_desc = EM_MIN_TXD;
                    243:        sc->num_rx_desc = EM_MIN_RXD;
                    244:        sc->tx_int_delay = EM_TIDV;
                    245:        sc->tx_abs_int_delay = EM_TADV;
                    246:        sc->rx_int_delay = EM_RDTR;
                    247:        sc->rx_abs_int_delay = EM_RADV;
                    248:        sc->hw.autoneg = DO_AUTO_NEG;
                    249:        sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
                    250:        sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
                    251:        sc->hw.tbi_compatibility_en = TRUE;
                    252:        sc->rx_buffer_len = EM_RXBUFFER_2048;
                    253:
                    254:        sc->hw.phy_init_script = 1;
                    255:        sc->hw.phy_reset_disable = FALSE;
                    256:
                    257: #ifndef EM_MASTER_SLAVE
                    258:        sc->hw.master_slave = em_ms_hw_default;
                    259: #else
                    260:        sc->hw.master_slave = EM_MASTER_SLAVE;
                    261: #endif
                    262:
                    263:        /*
                    264:         * This controls when hardware reports transmit completion
                    265:         * status.
                    266:         */
                    267:        sc->hw.report_tx_early = 1;
                    268:
                    269:        if (em_allocate_pci_resources(sc)) {
                    270:                printf("%s: Allocation of PCI resources failed\n",
                    271:                    sc->sc_dv.dv_xname);
                    272:                goto err_pci;
                    273:        }
                    274:
                    275:        /* Initialize eeprom parameters */
                    276:        em_init_eeprom_params(&sc->hw);
                    277:
                    278:        /*
                    279:         * Set the max frame size assuming standard Ethernet
                    280:         * sized frames.
                    281:         */
                    282:        switch (sc->hw.mac_type) {
                    283:                case em_82573:
                    284:                {
                    285:                        uint16_t        eeprom_data = 0;
                    286:
                    287:                        /*
                    288:                         * 82573 only supports Jumbo frames
                    289:                         * if ASPM is disabled.
                    290:                         */
                    291:                        em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_3,
                    292:                            1, &eeprom_data);
                    293:                        if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
                    294:                                sc->hw.max_frame_size = ETHER_MAX_LEN;
                    295:                                break;
                    296:                        }
                    297:                        /* Allow Jumbo frames - FALLTHROUGH */
                    298:                }
                    299:                case em_82571:
                    300:                case em_82572:
                    301:                case em_80003es2lan:    /* Limit Jumbo Frame size */
                    302:                        sc->hw.max_frame_size = 9234;
                    303:                        break;
                    304:                case em_ich8lan:
                    305:                        /* ICH8 does not support jumbo frames */
                    306:                        sc->hw.max_frame_size = ETHER_MAX_LEN;
                    307:                        break;
                    308:                default:
                    309:                        sc->hw.max_frame_size =
                    310:                            MAX_JUMBO_FRAME_SIZE;
                    311:        }
                    312:
                    313:        sc->hw.min_frame_size =
                    314:            ETHER_MIN_LEN + ETHER_CRC_LEN;
                    315:
                    316:        if (sc->hw.mac_type >= em_82544)
                    317:            tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
                    318:                EM_MAX_TXD * sizeof(struct em_tx_desc));
                    319:        else
                    320:            tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
                    321:                EM_MAX_TXD_82543 * sizeof(struct em_tx_desc));
                    322:        tsize = EM_ROUNDUP(tsize, PAGE_SIZE);
                    323:
                    324:        /* Allocate Transmit Descriptor ring */
                    325:        if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
                    326:                printf("%s: Unable to allocate tx_desc memory\n",
                    327:                       sc->sc_dv.dv_xname);
                    328:                goto err_tx_desc;
                    329:        }
                    330:        sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
                    331:
                    332:        rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
                    333:            EM_MAX_RXD * sizeof(struct em_rx_desc));
                    334:        rsize = EM_ROUNDUP(rsize, PAGE_SIZE);
                    335:
                    336:        /* Allocate Receive Descriptor ring */
                    337:        if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
                    338:                printf("%s: Unable to allocate rx_desc memory\n",
                    339:                       sc->sc_dv.dv_xname);
                    340:                goto err_rx_desc;
                    341:        }
                    342:        sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
                    343:
                    344:        /* Initialize the hardware */
                    345:        if (em_hardware_init(sc)) {
                    346:                printf("%s: Unable to initialize the hardware\n",
                    347:                       sc->sc_dv.dv_xname);
                    348:                goto err_hw_init;
                    349:        }
                    350:
                    351:        /* Copy the permanent MAC address out of the EEPROM */
                    352:        if (em_read_mac_addr(&sc->hw) < 0) {
                    353:                printf("%s: EEPROM read error while reading mac address\n",
                    354:                       sc->sc_dv.dv_xname);
                    355:                goto err_mac_addr;
                    356:        }
                    357:
                    358:        if (!em_is_valid_ether_addr(sc->hw.mac_addr)) {
                    359:                printf("%s: Invalid mac address\n", sc->sc_dv.dv_xname);
                    360:                goto err_mac_addr;
                    361:        }
                    362:
                    363:        bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
                    364:              ETHER_ADDR_LEN);
                    365:
                    366:        /* Setup OS specific network interface */
                    367:        em_setup_interface(sc);
                    368:
                    369:        /* Initialize statistics */
                    370:        em_clear_hw_cntrs(&sc->hw);
                    371:        em_update_stats_counters(sc);
                    372:        sc->hw.get_link_status = 1;
                    373:        em_update_link_status(sc);
                    374:
                    375:        printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
                    376:
                    377:        /* Indicate SOL/IDER usage */
                    378:        if (em_check_phy_reset_block(&sc->hw))
                    379:                printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
                    380:                    sc->sc_dv.dv_xname);
                    381:
                    382:        /* Identify 82544 on PCI-X */
                    383:        em_get_bus_info(&sc->hw);
                    384:        if (sc->hw.bus_type == em_bus_type_pcix &&
                    385:            sc->hw.mac_type == em_82544)
                    386:                sc->pcix_82544 = TRUE;
                    387:         else
                    388:                sc->pcix_82544 = FALSE;
                    389:        INIT_DEBUGOUT("em_attach: end");
                    390:        sc->sc_powerhook = powerhook_establish(em_power, sc);
                    391:        sc->sc_shutdownhook = shutdownhook_establish(em_shutdown, sc);
                    392:        return;
                    393:
                    394: err_mac_addr:
                    395: err_hw_init:
                    396:        em_dma_free(sc, &sc->rxdma);
                    397: err_rx_desc:
                    398:        em_dma_free(sc, &sc->txdma);
                    399: err_tx_desc:
                    400: err_pci:
                    401:        em_free_pci_resources(sc);
                    402: }
                    403:
                    404: void
                    405: em_power(int why, void *arg)
                    406: {
                    407:        struct em_softc *sc = (struct em_softc *)arg;
                    408:        struct ifnet *ifp;
                    409:
                    410:        if (why == PWR_RESUME) {
                    411:                ifp = &sc->interface_data.ac_if;
                    412:                if (ifp->if_flags & IFF_UP)
                    413:                        em_init(sc);
                    414:        }
                    415: }
                    416:
                    417: /*********************************************************************
                    418:  *
                    419:  *  Shutdown entry point
                    420:  *
                    421:  **********************************************************************/
                    422:
                    423: void
                    424: em_shutdown(void *arg)
                    425: {
                    426:        struct em_softc *sc = arg;
                    427:
                    428:        em_stop(sc);
                    429: }
                    430:
                    431: /*********************************************************************
                    432:  *  Transmit entry point
                    433:  *
                    434:  *  em_start is called by the stack to initiate a transmit.
                    435:  *  The driver will remain in this routine as long as there are
                    436:  *  packets to transmit and transmit resources are available.
                    437:  *  In case resources are not available stack is notified and
                    438:  *  the packet is requeued.
                    439:  **********************************************************************/
                    440:
                    441: void
                    442: em_start(struct ifnet *ifp)
                    443: {
                    444:        struct mbuf    *m_head;
                    445:        struct em_softc *sc = ifp->if_softc;
                    446:
                    447:        if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
                    448:                return;
                    449:
                    450:        if (!sc->link_active)
                    451:                return;
                    452:
                    453:        for (;;) {
                    454:                IFQ_POLL(&ifp->if_snd, m_head);
                    455:
                    456:                if (m_head == NULL)
                    457:                        break;
                    458:
                    459:                if (em_encap(sc, m_head)) {
                    460:                        ifp->if_flags |= IFF_OACTIVE;
                    461:                        break;
                    462:                }
                    463:
                    464:                IFQ_DEQUEUE(&ifp->if_snd, m_head);
                    465:
                    466: #if NBPFILTER > 0
                    467:                /* Send a copy of the frame to the BPF listener */
                    468:                if (ifp->if_bpf)
                    469:                        bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
                    470: #endif
                    471:
                    472:                /* Set timeout in case hardware has problems transmitting */
                    473:                ifp->if_timer = EM_TX_TIMEOUT;
                    474:        }
                    475: }
                    476:
                    477: /*********************************************************************
                    478:  *  Ioctl entry point
                    479:  *
                    480:  *  em_ioctl is called when the user wants to configure the
                    481:  *  interface.
                    482:  *
                    483:  *  return 0 on success, positive on failure
                    484:  **********************************************************************/
                    485:
                    486: int
                    487: em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
                    488: {
                    489:        int             error = 0;
                    490:        struct ifreq   *ifr = (struct ifreq *) data;
                    491:        struct ifaddr  *ifa = (struct ifaddr *)data;
                    492:        struct em_softc *sc = ifp->if_softc;
                    493:        int s;
                    494:
                    495:        s = splnet();
                    496:
                    497:        if ((error = ether_ioctl(ifp, &sc->interface_data, command, data)) > 0) {
                    498:                splx(s);
                    499:                return (error);
                    500:        }
                    501:
                    502:        switch (command) {
                    503:        case SIOCSIFADDR:
                    504:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
                    505:                               "Addr)");
                    506:                if (!(ifp->if_flags & IFF_UP)) {
                    507:                        ifp->if_flags |= IFF_UP;
                    508:                        em_init(sc);
                    509:                }
                    510: #ifdef INET
                    511:                if (ifa->ifa_addr->sa_family == AF_INET)
                    512:                        arp_ifinit(&sc->interface_data, ifa);
                    513: #endif /* INET */
                    514:                break;
                    515:        case SIOCSIFMTU:
                    516:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
                    517:                if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
                    518:                        error = EINVAL;
                    519:                else if (ifp->if_mtu != ifr->ifr_mtu)
                    520:                        ifp->if_mtu = ifr->ifr_mtu;
                    521:                break;
                    522:        case SIOCSIFFLAGS:
                    523:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
                    524:                if (ifp->if_flags & IFF_UP) {
                    525:                        /*
                    526:                         * If only the PROMISC or ALLMULTI flag changes, then
                    527:                         * don't do a full re-init of the chip, just update
                    528:                         * the Rx filter.
                    529:                         */
                    530:                        if ((ifp->if_flags & IFF_RUNNING) &&
                    531:                            ((ifp->if_flags ^ sc->if_flags) &
                    532:                             (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
                    533:                                em_set_promisc(sc);
                    534:                        } else {
                    535:                                if (!(ifp->if_flags & IFF_RUNNING))
                    536:                                        em_init(sc);
                    537:                        }
                    538:                } else {
                    539:                        if (ifp->if_flags & IFF_RUNNING)
                    540:                                em_stop(sc);
                    541:                }
                    542:                sc->if_flags = ifp->if_flags;
                    543:                break;
                    544:        case SIOCADDMULTI:
                    545:        case SIOCDELMULTI:
                    546:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
                    547:                error = (command == SIOCADDMULTI)
                    548:                        ? ether_addmulti(ifr, &sc->interface_data)
                    549:                        : ether_delmulti(ifr, &sc->interface_data);
                    550:
                    551:                if (error == ENETRESET) {
                    552:                        if (ifp->if_flags & IFF_RUNNING) {
                    553:                                em_disable_intr(sc);
                    554:                                em_set_multi(sc);
                    555:                                if (sc->hw.mac_type == em_82542_rev2_0)
                    556:                                        em_initialize_receive_unit(sc);
                    557:                                em_enable_intr(sc);
                    558:                        }
                    559:                        error = 0;
                    560:                }
                    561:                break;
                    562:        case SIOCSIFMEDIA:
                    563:                /* Check SOL/IDER usage */
                    564:                if (em_check_phy_reset_block(&sc->hw)) {
                    565:                        printf("%s: Media change is blocked due to SOL/IDER session.\n",
                    566:                            sc->sc_dv.dv_xname);
                    567:                        break;
                    568:                }
                    569:        case SIOCGIFMEDIA:
                    570:                IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
                    571:                error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
                    572:                break;
                    573:        default:
                    574:                IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
                    575:                error = ENOTTY;
                    576:        }
                    577:
                    578:        splx(s);
                    579:        return (error);
                    580: }
                    581:
                    582: /*********************************************************************
                    583:  *  Watchdog entry point
                    584:  *
                    585:  *  This routine is called whenever hardware quits transmitting.
                    586:  *
                    587:  **********************************************************************/
                    588:
                    589: void
                    590: em_watchdog(struct ifnet *ifp)
                    591: {
                    592:        struct em_softc *sc = ifp->if_softc;
                    593:
                    594:        /* If we are in this routine because of pause frames, then
                    595:         * don't reset the hardware.
                    596:         */
                    597:        if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
                    598:                ifp->if_timer = EM_TX_TIMEOUT;
                    599:                return;
                    600:        }
                    601:
                    602:        printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
                    603:
                    604:        em_init(sc);
                    605:
                    606:        sc->watchdog_events++;
                    607: }
                    608:
                    609: /*********************************************************************
                    610:  *  Init entry point
                    611:  *
                    612:  *  This routine is used in two ways. It is used by the stack as
                    613:  *  init entry point in network interface structure. It is also used
                    614:  *  by the driver as a hw/sw initialization routine to get to a
                    615:  *  consistent state.
                    616:  *
                    617:  **********************************************************************/
                    618:
                    619: void
                    620: em_init(void *arg)
                    621: {
                    622:        struct em_softc *sc = arg;
                    623:        struct ifnet   *ifp = &sc->interface_data.ac_if;
                    624:        uint32_t        pba;
                    625:        int s;
                    626:
                    627:        s = splnet();
                    628:
                    629:        INIT_DEBUGOUT("em_init: begin");
                    630:
                    631:        em_stop(sc);
                    632:
                    633:        if (ifp->if_flags & IFF_UP) {
                    634:                if (sc->hw.mac_type >= em_82544)
                    635:                    sc->num_tx_desc = EM_MAX_TXD;
                    636:                else
                    637:                    sc->num_tx_desc = EM_MAX_TXD_82543;
                    638:                sc->num_rx_desc = EM_MAX_RXD;
                    639:        } else {
                    640:                sc->num_tx_desc = EM_MIN_TXD;
                    641:                sc->num_rx_desc = EM_MIN_RXD;
                    642:        }
                    643:        IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
                    644:
                    645:        /*
                    646:         * Packet Buffer Allocation (PBA)
                    647:         * Writing PBA sets the receive portion of the buffer
                    648:         * the remainder is used for the transmit buffer.
                    649:         *
                    650:         * Devices before the 82547 had a Packet Buffer of 64K.
                    651:         *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
                    652:         * After the 82547 the buffer was reduced to 40K.
                    653:         *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
                    654:         *   Note: default does not leave enough room for Jumbo Frame >10k.
                    655:         */
                    656:        switch (sc->hw.mac_type) {
                    657:        case em_82547:
                    658:        case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
                    659:                if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
                    660:                        pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
                    661:                else
                    662:                        pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
                    663:                sc->tx_fifo_head = 0;
                    664:                sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
                    665:                sc->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
                    666:                break;
                    667:        case em_82571:
                    668:        case em_82572: /* Total Packet Buffer on these is 48k */
                    669:        case em_80003es2lan:
                    670:                pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
                    671:                break;
                    672:        case em_82573: /* 82573: Total Packet Buffer is 32K */
                    673:                /* Jumbo frames not supported */
                    674:                pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
                    675:                break;
                    676:        case em_ich8lan:
                    677:                pba = E1000_PBA_8K;
                    678:                break;
                    679:        default:
                    680:                /* Devices before 82547 had a Packet Buffer of 64K.   */
                    681:                if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
                    682:                        pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
                    683:                else
                    684:                        pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
                    685:        }
                    686:        INIT_DEBUGOUT1("em_init: pba=%dK",pba);
                    687:        E1000_WRITE_REG(&sc->hw, PBA, pba);
                    688:
                    689:        /* Get the latest mac address, User can use a LAA */
                    690:        bcopy(sc->interface_data.ac_enaddr, sc->hw.mac_addr,
                    691:              ETHER_ADDR_LEN);
                    692:
                    693:        /* Initialize the hardware */
                    694:        if (em_hardware_init(sc)) {
                    695:                printf("%s: Unable to initialize the hardware\n",
                    696:                       sc->sc_dv.dv_xname);
                    697:                splx(s);
                    698:                return;
                    699:        }
                    700:        em_update_link_status(sc);
                    701:
                    702:        /* Prepare transmit descriptors and buffers */
                    703:        if (em_setup_transmit_structures(sc)) {
                    704:                printf("%s: Could not setup transmit structures\n",
                    705:                       sc->sc_dv.dv_xname);
                    706:                em_stop(sc);
                    707:                splx(s);
                    708:                return;
                    709:        }
                    710:        em_initialize_transmit_unit(sc);
                    711:
                    712:        /* Setup Multicast table */
                    713:        em_set_multi(sc);
                    714:
                    715:        /* Prepare receive descriptors and buffers */
                    716:        if (em_setup_receive_structures(sc)) {
                    717:                printf("%s: Could not setup receive structures\n",
                    718:                       sc->sc_dv.dv_xname);
                    719:                em_stop(sc);
                    720:                splx(s);
                    721:                return;
                    722:        }
                    723:        em_initialize_receive_unit(sc);
                    724:
                    725:        /* Don't lose promiscuous settings */
                    726:        em_set_promisc(sc);
                    727:
                    728:        ifp->if_flags |= IFF_RUNNING;
                    729:        ifp->if_flags &= ~IFF_OACTIVE;
                    730:
                    731:        timeout_add(&sc->timer_handle, hz);
                    732:        em_clear_hw_cntrs(&sc->hw);
                    733:        em_enable_intr(sc);
                    734:
                    735:        /* Don't reset the phy next time init gets called */
                    736:        sc->hw.phy_reset_disable = TRUE;
                    737:
                    738:        splx(s);
                    739: }
                    740:
                    741: /*********************************************************************
                    742:  *
                    743:  *  Interrupt Service routine
                    744:  *
                    745:  **********************************************************************/
                    746: int
                    747: em_intr(void *arg)
                    748: {
                    749:        struct em_softc  *sc = arg;
                    750:        struct ifnet    *ifp;
                    751:        u_int32_t       reg_icr, test_icr;
                    752:        int claimed = 0;
                    753:
                    754:        ifp = &sc->interface_data.ac_if;
                    755:
                    756:        for (;;) {
                    757:                test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR);
                    758:                if (sc->hw.mac_type >= em_82571)
                    759:                        test_icr = (reg_icr & E1000_ICR_INT_ASSERTED);
                    760:                if (!test_icr)
                    761:                        break;
                    762:
                    763:                claimed = 1;
                    764:
                    765:                if (ifp->if_flags & IFF_RUNNING) {
                    766:                        em_rxeof(sc, -1);
                    767:                        em_txeof(sc);
                    768:                }
                    769:
                    770:                /* Link status change */
                    771:                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
                    772:                        timeout_del(&sc->timer_handle);
                    773:                        sc->hw.get_link_status = 1;
                    774:                        em_check_for_link(&sc->hw);
                    775:                        em_update_link_status(sc);
                    776:                        timeout_add(&sc->timer_handle, hz);
                    777:                }
                    778:
                    779:                if (reg_icr & E1000_ICR_RXO)
                    780:                        sc->rx_overruns++;
                    781:        }
                    782:
                    783:        if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
                    784:                em_start(ifp);
                    785:
                    786:        return (claimed);
                    787: }
                    788:
                    789: /*********************************************************************
                    790:  *
                    791:  *  Media Ioctl callback
                    792:  *
                    793:  *  This routine is called whenever the user queries the status of
                    794:  *  the interface using ifconfig.
                    795:  *
                    796:  **********************************************************************/
                    797: void
                    798: em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
                    799: {
                    800:        struct em_softc *sc = ifp->if_softc;
                    801:        u_char fiber_type = IFM_1000_SX;
                    802:        u_int16_t ar, lpar, gsr;
                    803:
                    804:        INIT_DEBUGOUT("em_media_status: begin");
                    805:
                    806:        em_check_for_link(&sc->hw);
                    807:        em_update_link_status(sc);
                    808:
                    809:        ifmr->ifm_status = IFM_AVALID;
                    810:        ifmr->ifm_active = IFM_ETHER;
                    811:
                    812:        if (!sc->link_active) {
                    813:                ifmr->ifm_active |= IFM_NONE;
                    814:                return;
                    815:        }
                    816:
                    817:        ifmr->ifm_status |= IFM_ACTIVE;
                    818:
                    819:        if (sc->hw.media_type == em_media_type_fiber ||
                    820:            sc->hw.media_type == em_media_type_internal_serdes) {
                    821:                if (sc->hw.mac_type == em_82545)
                    822:                        fiber_type = IFM_1000_LX;
                    823:                ifmr->ifm_active |= fiber_type | IFM_FDX;
                    824:        } else {
                    825:                switch (sc->link_speed) {
                    826:                case 10:
                    827:                        ifmr->ifm_active |= IFM_10_T;
                    828:                        break;
                    829:                case 100:
                    830:                        ifmr->ifm_active |= IFM_100_TX;
                    831:                        break;
                    832:                case 1000:
                    833:                        ifmr->ifm_active |= IFM_1000_T;
                    834:                        break;
                    835:                }
                    836:
                    837:                if (sc->link_duplex == FULL_DUPLEX)
                    838:                        ifmr->ifm_active |= IFM_FDX;
                    839:                else
                    840:                        ifmr->ifm_active |= IFM_HDX;
                    841:
                    842:                if (ifmr->ifm_active & IFM_FDX) {
                    843:                        em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV, &ar);
                    844:                        em_read_phy_reg(&sc->hw, PHY_LP_ABILITY, &lpar);
                    845:
                    846:                        if ((ar & NWAY_AR_PAUSE) && (lpar & NWAY_LPAR_PAUSE))
                    847:                                ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE |
                    848:                                                    IFM_ETH_RXPAUSE;
                    849:                        else if (!(ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
                    850:                            (lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
                    851:                                ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
                    852:                        else if ((ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
                    853:                            !(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
                    854:                                ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
                    855:                }
                    856:
                    857:                if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_1000_T) {
                    858:                        em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &gsr);
                    859:                        if (gsr & SR_1000T_MS_CONFIG_RES)
                    860:                                ifmr->ifm_active |= IFM_ETH_MASTER;
                    861:                }
                    862:        }
                    863: }
                    864:
                    865: /*********************************************************************
                    866:  *
                    867:  *  Media Ioctl callback
                    868:  *
                    869:  *  This routine is called when the user changes speed/duplex using
                    870:  *  media/mediopt option with ifconfig.
                    871:  *
                    872:  **********************************************************************/
                    873: int
                    874: em_media_change(struct ifnet *ifp)
                    875: {
                    876:        struct em_softc *sc = ifp->if_softc;
                    877:        struct ifmedia  *ifm = &sc->media;
                    878:
                    879:        INIT_DEBUGOUT("em_media_change: begin");
                    880:
                    881:        if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
                    882:                return (EINVAL);
                    883:
                    884:        switch (IFM_SUBTYPE(ifm->ifm_media)) {
                    885:        case IFM_AUTO:
                    886:                sc->hw.autoneg = DO_AUTO_NEG;
                    887:                sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
                    888:                break;
                    889:        case IFM_1000_LX:
                    890:        case IFM_1000_SX:
                    891:        case IFM_1000_T:
                    892:                sc->hw.autoneg = DO_AUTO_NEG;
                    893:                sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
                    894:                break;
                    895:        case IFM_100_TX:
                    896:                sc->hw.autoneg = FALSE;
                    897:                sc->hw.autoneg_advertised = 0;
                    898:                if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
                    899:                        sc->hw.forced_speed_duplex = em_100_full;
                    900:                else
                    901:                        sc->hw.forced_speed_duplex = em_100_half;
                    902:                break;
                    903:        case IFM_10_T:
                    904:                sc->hw.autoneg = FALSE;
                    905:                sc->hw.autoneg_advertised = 0;
                    906:                if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
                    907:                        sc->hw.forced_speed_duplex = em_10_full;
                    908:                else
                    909:                        sc->hw.forced_speed_duplex = em_10_half;
                    910:                break;
                    911:        default:
                    912:                printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
                    913:        }
                    914:
                    915:        /*
                    916:         * As the speed/duplex settings may have changed we need to
                    917:         * reset the PHY.
                    918:         */
                    919:        sc->hw.phy_reset_disable = FALSE;
                    920:
                    921:        em_init(sc);
                    922:
                    923:        return (0);
                    924: }
                    925:
                    926: /*********************************************************************
                    927:  *
                    928:  *  This routine maps the mbufs to tx descriptors.
                    929:  *
                    930:  *  return 0 on success, positive on failure
                    931:  **********************************************************************/
                    932: int
                    933: em_encap(struct em_softc *sc, struct mbuf *m_head)
                    934: {
                    935:        u_int32_t       txd_upper;
                    936:        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
                    937:        int             i, j, first, error = 0, last = 0;
                    938:        bus_dmamap_t    map;
                    939:
                    940:        /* For 82544 Workaround */
                    941:        DESC_ARRAY              desc_array;
                    942:        u_int32_t               array_elements;
                    943:        u_int32_t               counter;
                    944:
                    945:        struct em_buffer   *tx_buffer, *tx_buffer_mapped;
                    946:        struct em_tx_desc *current_tx_desc = NULL;
                    947:
                    948:        /*
                    949:         * Force a cleanup if number of TX descriptors
                    950:         * available hits the threshold
                    951:         */
                    952:        if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
                    953:                em_txeof(sc);
                    954:                /* Now do we at least have a minimal? */
                    955:                if (sc->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
                    956:                        sc->no_tx_desc_avail1++;
                    957:                        return (ENOBUFS);
                    958:                }
                    959:        }
                    960:
                    961:        /*
                    962:         * Map the packet for DMA.
                    963:         *
                    964:         * Capture the first descriptor index,
                    965:         * this descriptor will have the index
                    966:         * of the EOP which is the only one that
                    967:         * no gets a DONE bit writeback.
                    968:         */
                    969:        first = sc->next_avail_tx_desc;
                    970:        tx_buffer = &sc->tx_buffer_area[first];
                    971:        tx_buffer_mapped = tx_buffer;
                    972:        map = tx_buffer->map;
                    973:
                    974:        error = bus_dmamap_load_mbuf(sc->txtag, map, m_head, BUS_DMA_NOWAIT);
                    975:        if (error != 0) {
                    976:                sc->no_tx_dma_setup++;
                    977:                return (error);
                    978:        }
                    979:        EM_KASSERT(map->dm_nsegs!= 0, ("em_encap: empty packet"));
                    980:
                    981:        if (map->dm_nsegs > sc->num_tx_desc_avail - 2)
                    982:                goto fail;
                    983:
                    984: #ifdef EM_CSUM_OFFLOAD
                    985:        if (sc->hw.mac_type >= em_82543)
                    986:                em_transmit_checksum_setup(sc, m_head, &txd_upper, &txd_lower);
                    987:        else
                    988:                txd_upper = txd_lower = 0;
                    989: #else
                    990:        txd_upper = txd_lower = 0;
                    991: #endif
                    992:
                    993:        i = sc->next_avail_tx_desc;
                    994:        if (sc->pcix_82544)
                    995:                txd_saved = i;
                    996:
                    997:        for (j = 0; j < map->dm_nsegs; j++) {
                    998:                /* If sc is 82544 and on PCI-X bus */
                    999:                if (sc->pcix_82544) {
                   1000:                        /*
                   1001:                         * Check the Address and Length combination and
                   1002:                         * split the data accordingly
                   1003:                         */
                   1004:                        array_elements = em_fill_descriptors(map->dm_segs[j].ds_addr,
                   1005:                                                             map->dm_segs[j].ds_len,
                   1006:                                                             &desc_array);
                   1007:                        for (counter = 0; counter < array_elements; counter++) {
                   1008:                                if (txd_used == sc->num_tx_desc_avail) {
                   1009:                                        sc->next_avail_tx_desc = txd_saved;
                   1010:                                        goto fail;
                   1011:                                }
                   1012:                                tx_buffer = &sc->tx_buffer_area[i];
                   1013:                                current_tx_desc = &sc->tx_desc_base[i];
                   1014:                                current_tx_desc->buffer_addr = htole64(
                   1015:                                        desc_array.descriptor[counter].address);
                   1016:                                current_tx_desc->lower.data = htole32(
                   1017:                                        (sc->txd_cmd | txd_lower |
                   1018:                                         (u_int16_t)desc_array.descriptor[counter].length));
                   1019:                                current_tx_desc->upper.data = htole32((txd_upper));
                   1020:                                last = i;
                   1021:                                if (++i == sc->num_tx_desc)
                   1022:                                        i = 0;
                   1023:
                   1024:                                tx_buffer->m_head = NULL;
                   1025:                                tx_buffer->next_eop = -1;
                   1026:                                txd_used++;
                   1027:                        }
                   1028:                } else {
                   1029:                        tx_buffer = &sc->tx_buffer_area[i];
                   1030:                        current_tx_desc = &sc->tx_desc_base[i];
                   1031:
                   1032:                        current_tx_desc->buffer_addr = htole64(map->dm_segs[j].ds_addr);
                   1033:                        current_tx_desc->lower.data = htole32(
                   1034:                                sc->txd_cmd | txd_lower | map->dm_segs[j].ds_len);
                   1035:                        current_tx_desc->upper.data = htole32(txd_upper);
                   1036:                        last = i;
                   1037:                        if (++i == sc->num_tx_desc)
                   1038:                                i = 0;
                   1039:
                   1040:                        tx_buffer->m_head = NULL;
                   1041:                        tx_buffer->next_eop = -1;
                   1042:                }
                   1043:        }
                   1044:
                   1045:        sc->next_avail_tx_desc = i;
                   1046:        if (sc->pcix_82544)
                   1047:                sc->num_tx_desc_avail -= txd_used;
                   1048:        else
                   1049:                sc->num_tx_desc_avail -= map->dm_nsegs;
                   1050:
                   1051:        tx_buffer->m_head = m_head;
                   1052:        tx_buffer_mapped->map = tx_buffer->map;
                   1053:        tx_buffer->map = map;
                   1054:        bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
                   1055:            BUS_DMASYNC_PREWRITE);
                   1056:
                   1057:        /*
                   1058:         * Last Descriptor of Packet
                   1059:         * needs End Of Packet (EOP)
                   1060:         * and Report Status (RS)
                   1061:         */
                   1062:        current_tx_desc->lower.data |=
                   1063:            htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
                   1064:
                   1065:        /*
                   1066:         * Keep track in the first buffer which
                   1067:         * descriptor will be written back
                   1068:         */
                   1069:        tx_buffer = &sc->tx_buffer_area[first];
                   1070:        tx_buffer->next_eop = last;
                   1071:
                   1072:        /*
                   1073:         * Advance the Transmit Descriptor Tail (Tdt),
                   1074:         * this tells the E1000 that this frame is
                   1075:         * available to transmit.
                   1076:         */
                   1077:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                   1078:            sc->txdma.dma_map->dm_mapsize,
                   1079:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1080:        if (sc->hw.mac_type == em_82547 &&
                   1081:            sc->link_duplex == HALF_DUPLEX) {
                   1082:                em_82547_move_tail_locked(sc);
                   1083:        } else {
                   1084:                E1000_WRITE_REG(&sc->hw, TDT, i);
                   1085:                if (sc->hw.mac_type == em_82547)
                   1086:                        em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
                   1087:        }
                   1088:
                   1089:        return (0);
                   1090:
                   1091: fail:
                   1092:        sc->no_tx_desc_avail2++;
                   1093:        bus_dmamap_unload(sc->txtag, map);
                   1094:        return (ENOBUFS);
                   1095: }
                   1096:
                   1097: /*********************************************************************
                   1098:  *
                   1099:  * 82547 workaround to avoid controller hang in half-duplex environment.
                   1100:  * The workaround is to avoid queuing a large packet that would span
                   1101:  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
                   1102:  * in this case. We do that only when FIFO is quiescent.
                   1103:  *
                   1104:  **********************************************************************/
                   1105: void
                   1106: em_82547_move_tail_locked(struct em_softc *sc)
                   1107: {
                   1108:        uint16_t hw_tdt;
                   1109:        uint16_t sw_tdt;
                   1110:        struct em_tx_desc *tx_desc;
                   1111:        uint16_t length = 0;
                   1112:        boolean_t eop = 0;
                   1113:
                   1114:        hw_tdt = E1000_READ_REG(&sc->hw, TDT);
                   1115:        sw_tdt = sc->next_avail_tx_desc;
                   1116:
                   1117:        while (hw_tdt != sw_tdt) {
                   1118:                tx_desc = &sc->tx_desc_base[hw_tdt];
                   1119:                length += tx_desc->lower.flags.length;
                   1120:                eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
                   1121:                if (++hw_tdt == sc->num_tx_desc)
                   1122:                        hw_tdt = 0;
                   1123:
                   1124:                if (eop) {
                   1125:                        if (em_82547_fifo_workaround(sc, length)) {
                   1126:                                sc->tx_fifo_wrk_cnt++;
                   1127:                                timeout_add(&sc->tx_fifo_timer_handle, 1);
                   1128:                                break;
                   1129:                        }
                   1130:                        E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
                   1131:                        em_82547_update_fifo_head(sc, length);
                   1132:                        length = 0;
                   1133:                }
                   1134:        }
                   1135: }
                   1136:
                   1137: void
                   1138: em_82547_move_tail(void *arg)
                   1139: {
                   1140:        struct em_softc *sc = arg;
                   1141:        int s;
                   1142:
                   1143:        s = splnet();
                   1144:        em_82547_move_tail_locked(sc);
                   1145:        splx(s);
                   1146: }
                   1147:
                   1148: int
                   1149: em_82547_fifo_workaround(struct em_softc *sc, int len)
                   1150: {
                   1151:        int fifo_space, fifo_pkt_len;
                   1152:
                   1153:        fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
                   1154:
                   1155:        if (sc->link_duplex == HALF_DUPLEX) {
                   1156:                fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
                   1157:
                   1158:                if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
                   1159:                        if (em_82547_tx_fifo_reset(sc))
                   1160:                                return (0);
                   1161:                        else
                   1162:                                return (1);
                   1163:                }
                   1164:        }
                   1165:
                   1166:        return (0);
                   1167: }
                   1168:
                   1169: void
                   1170: em_82547_update_fifo_head(struct em_softc *sc, int len)
                   1171: {
                   1172:        int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
                   1173:
                   1174:        /* tx_fifo_head is always 16 byte aligned */
                   1175:        sc->tx_fifo_head += fifo_pkt_len;
                   1176:        if (sc->tx_fifo_head >= sc->tx_fifo_size)
                   1177:                sc->tx_fifo_head -= sc->tx_fifo_size;
                   1178: }
                   1179:
                   1180: int
                   1181: em_82547_tx_fifo_reset(struct em_softc *sc)
                   1182: {
                   1183:        uint32_t tctl;
                   1184:
                   1185:        if ((E1000_READ_REG(&sc->hw, TDT) ==
                   1186:             E1000_READ_REG(&sc->hw, TDH)) &&
                   1187:            (E1000_READ_REG(&sc->hw, TDFT) ==
                   1188:             E1000_READ_REG(&sc->hw, TDFH)) &&
                   1189:            (E1000_READ_REG(&sc->hw, TDFTS) ==
                   1190:             E1000_READ_REG(&sc->hw, TDFHS)) &&
                   1191:            (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
                   1192:
                   1193:                /* Disable TX unit */
                   1194:                tctl = E1000_READ_REG(&sc->hw, TCTL);
                   1195:                E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
                   1196:
                   1197:                /* Reset FIFO pointers */
                   1198:                E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr);
                   1199:                E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr);
                   1200:                E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr);
                   1201:                E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr);
                   1202:
                   1203:                /* Re-enable TX unit */
                   1204:                E1000_WRITE_REG(&sc->hw, TCTL, tctl);
                   1205:                E1000_WRITE_FLUSH(&sc->hw);
                   1206:
                   1207:                sc->tx_fifo_head = 0;
                   1208:                sc->tx_fifo_reset_cnt++;
                   1209:
                   1210:                return (TRUE);
                   1211:        } else
                   1212:                return (FALSE);
                   1213: }
                   1214:
                   1215: void
                   1216: em_set_promisc(struct em_softc *sc)
                   1217: {
                   1218:        u_int32_t       reg_rctl;
                   1219:        struct ifnet   *ifp = &sc->interface_data.ac_if;
                   1220:
                   1221:        reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
                   1222:
                   1223:        if (ifp->if_flags & IFF_PROMISC) {
                   1224:                reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
                   1225:        } else if (ifp->if_flags & IFF_ALLMULTI) {
                   1226:                reg_rctl |= E1000_RCTL_MPE;
                   1227:                reg_rctl &= ~E1000_RCTL_UPE;
                   1228:        } else {
                   1229:                reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
                   1230:        }
                   1231:        E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                   1232: }
                   1233:
                   1234: /*********************************************************************
                   1235:  *  Multicast Update
                   1236:  *
                   1237:  *  This routine is called whenever multicast address list is updated.
                   1238:  *
                   1239:  **********************************************************************/
                   1240:
                   1241: void
                   1242: em_set_multi(struct em_softc *sc)
                   1243: {
                   1244:        u_int32_t reg_rctl = 0;
                   1245:        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
                   1246:        int mcnt = 0;
                   1247:        struct ifnet *ifp = &sc->interface_data.ac_if;
                   1248:        struct arpcom *ac = &sc->interface_data;
                   1249:        struct ether_multi *enm;
                   1250:        struct ether_multistep step;
                   1251:
                   1252:        IOCTL_DEBUGOUT("em_set_multi: begin");
                   1253:
                   1254:        if (sc->hw.mac_type == em_82542_rev2_0) {
                   1255:                reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
                   1256:                if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
                   1257:                        em_pci_clear_mwi(&sc->hw);
                   1258:                reg_rctl |= E1000_RCTL_RST;
                   1259:                E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                   1260:                msec_delay(5);
                   1261:        }
                   1262:        ETHER_FIRST_MULTI(step, ac, enm);
                   1263:        while (enm != NULL) {
                   1264:                if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
                   1265:                        ifp->if_flags |= IFF_ALLMULTI;
                   1266:                        mcnt = MAX_NUM_MULTICAST_ADDRESSES;
                   1267:                }
                   1268:                if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
                   1269:                        break;
                   1270:                bcopy(enm->enm_addrlo, &mta[mcnt*ETH_LENGTH_OF_ADDRESS],
                   1271:                      ETH_LENGTH_OF_ADDRESS);
                   1272:                mcnt++;
                   1273:                ETHER_NEXT_MULTI(step, enm);
                   1274:        }
                   1275:
                   1276:        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
                   1277:                reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
                   1278:                reg_rctl |= E1000_RCTL_MPE;
                   1279:                E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                   1280:        } else
                   1281:                em_mc_addr_list_update(&sc->hw, mta, mcnt, 0, 1);
                   1282:
                   1283:        if (sc->hw.mac_type == em_82542_rev2_0) {
                   1284:                reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
                   1285:                reg_rctl &= ~E1000_RCTL_RST;
                   1286:                E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                   1287:                msec_delay(5);
                   1288:                if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
                   1289:                        em_pci_set_mwi(&sc->hw);
                   1290:        }
                   1291: }
                   1292:
                   1293: /*********************************************************************
                   1294:  *  Timer routine
                   1295:  *
                   1296:  *  This routine checks for link status and updates statistics.
                   1297:  *
                   1298:  **********************************************************************/
                   1299:
                   1300: void
                   1301: em_local_timer(void *arg)
                   1302: {
                   1303:        struct ifnet   *ifp;
                   1304:        struct em_softc *sc = arg;
                   1305:        int s;
                   1306:
                   1307:        ifp = &sc->interface_data.ac_if;
                   1308:
                   1309:        s = splnet();
                   1310:
                   1311:        em_check_for_link(&sc->hw);
                   1312:        em_update_link_status(sc);
                   1313:        em_update_stats_counters(sc);
                   1314:        if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
                   1315:                em_print_hw_stats(sc);
                   1316:        em_smartspeed(sc);
                   1317:
                   1318:        timeout_add(&sc->timer_handle, hz);
                   1319:
                   1320:        splx(s);
                   1321: }
                   1322:
                   1323: void
                   1324: em_update_link_status(struct em_softc *sc)
                   1325: {
                   1326:        struct ifnet *ifp = &sc->interface_data.ac_if;
                   1327:
                   1328:        if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
                   1329:                if (sc->link_active == 0) {
                   1330:                        em_get_speed_and_duplex(&sc->hw,
                   1331:                                                &sc->link_speed,
                   1332:                                                &sc->link_duplex);
                   1333:                        /* Check if we may set SPEED_MODE bit on PCI-E */
                   1334:                        if ((sc->link_speed == SPEED_1000) &&
                   1335:                            ((sc->hw.mac_type == em_82571) ||
                   1336:                            (sc->hw.mac_type == em_82572))) {
                   1337:                                int tarc0;
                   1338:
                   1339:                                tarc0 = E1000_READ_REG(&sc->hw, TARC0);
                   1340:                                tarc0 |= SPEED_MODE_BIT;
                   1341:                                E1000_WRITE_REG(&sc->hw, TARC0, tarc0);
                   1342:                        }
                   1343:                        sc->link_active = 1;
                   1344:                        sc->smartspeed = 0;
                   1345:                        ifp->if_baudrate = sc->link_speed * 1000000;
                   1346:                        if (sc->link_duplex == FULL_DUPLEX)
                   1347:                                ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
                   1348:                        else if (sc->link_duplex == HALF_DUPLEX)
                   1349:                                ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
                   1350:                        else
                   1351:                                ifp->if_link_state = LINK_STATE_UP;
                   1352:                        if_link_state_change(ifp);
                   1353:                }
                   1354:        } else {
                   1355:                if (sc->link_active == 1) {
                   1356:                        ifp->if_baudrate = sc->link_speed = 0;
                   1357:                        sc->link_duplex = 0;
                   1358:                        sc->link_active = 0;
                   1359:                        ifp->if_link_state = LINK_STATE_DOWN;
                   1360:                        if_link_state_change(ifp);
                   1361:                }
                   1362:        }
                   1363: }
                   1364:
                   1365: /*********************************************************************
                   1366:  *
                   1367:  *  This routine disables all traffic on the adapter by issuing a
                   1368:  *  global reset on the MAC and deallocates TX/RX buffers.
                   1369:  *
                   1370:  **********************************************************************/
                   1371:
                   1372: void
                   1373: em_stop(void *arg)
                   1374: {
                   1375:        struct ifnet   *ifp;
                   1376:        struct em_softc *sc = arg;
                   1377:        ifp = &sc->interface_data.ac_if;
                   1378:
                   1379:        INIT_DEBUGOUT("em_stop: begin");
                   1380:        em_disable_intr(sc);
                   1381:        em_reset_hw(&sc->hw);
                   1382:        timeout_del(&sc->timer_handle);
                   1383:        timeout_del(&sc->tx_fifo_timer_handle);
                   1384:
                   1385:        /* Tell the stack that the interface is no longer active */
                   1386:        ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
                   1387:
                   1388:        em_free_transmit_structures(sc);
                   1389:        em_free_receive_structures(sc);
                   1390: }
                   1391:
                   1392: /*********************************************************************
                   1393:  *
                   1394:  *  Determine hardware revision.
                   1395:  *
                   1396:  **********************************************************************/
                   1397: void
                   1398: em_identify_hardware(struct em_softc *sc)
                   1399: {
                   1400:        u_int32_t reg;
                   1401:        struct pci_attach_args *pa = &sc->osdep.em_pa;
                   1402:
                   1403:        /* Make sure our PCI config space has the necessary stuff set */
                   1404:        sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
                   1405:                                            PCI_COMMAND_STATUS_REG);
                   1406:
                   1407:        /* Save off the information about this board */
                   1408:        sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
                   1409:        sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
                   1410:
                   1411:        reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
                   1412:        sc->hw.revision_id = PCI_REVISION(reg);
                   1413:
                   1414:        reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
                   1415:        sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
                   1416:        sc->hw.subsystem_id = PCI_PRODUCT(reg);
                   1417:
                   1418:        /* Identify the MAC */
                   1419:        if (em_set_mac_type(&sc->hw))
                   1420:                printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
                   1421:
                   1422:        if (sc->hw.mac_type == em_82541 ||
                   1423:            sc->hw.mac_type == em_82541_rev_2 ||
                   1424:            sc->hw.mac_type == em_82547 ||
                   1425:            sc->hw.mac_type == em_82547_rev_2)
                   1426:                sc->hw.phy_init_script = TRUE;
                   1427: }
                   1428:
                   1429: int
                   1430: em_allocate_pci_resources(struct em_softc *sc)
                   1431: {
                   1432:        int             val, rid;
                   1433:        pci_intr_handle_t       ih;
                   1434:        const char              *intrstr = NULL;
                   1435:        struct pci_attach_args *pa = &sc->osdep.em_pa;
                   1436:        pci_chipset_tag_t       pc = pa->pa_pc;
                   1437:
                   1438:        val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
                   1439:        if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
                   1440:                printf(": mmba is not mem space\n");
                   1441:                return (ENXIO);
                   1442:        }
                   1443:        if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
                   1444:            &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
                   1445:            &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
                   1446:                printf(": cannot find mem space\n");
                   1447:                return (ENXIO);
                   1448:        }
                   1449:
                   1450:        if (sc->hw.mac_type > em_82543) {
                   1451:                /* Figure out where our I/O BAR is ? */
                   1452:                for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END;) {
                   1453:                        val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
                   1454:                        if (PCI_MAPREG_TYPE(val) == PCI_MAPREG_TYPE_IO) {
                   1455:                                sc->io_rid = rid;
                   1456:                                break;
                   1457:                        }
                   1458:                        rid += 4;
                   1459:                        if (PCI_MAPREG_MEM_TYPE(val) ==
                   1460:                            PCI_MAPREG_MEM_TYPE_64BIT)
                   1461:                                rid += 4;       /* skip high bits, too */
                   1462:                }
                   1463:
                   1464:                if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
                   1465:                    &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
                   1466:                    &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
                   1467:                        printf(": cannot find i/o space\n");
                   1468:                        return (ENXIO);
                   1469:                }
                   1470:
                   1471:                sc->hw.io_base = 0;
                   1472:        }
                   1473:
                   1474:        /* for ICH8 we need to find the flash memory */
                   1475:        if (sc->hw.mac_type == em_ich8lan) {
                   1476:                val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH);
                   1477:                if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
                   1478:                        printf(": flash is not mem space\n");
                   1479:                        return (ENXIO);
                   1480:                }
                   1481:
                   1482:                if (pci_mapreg_map(pa, EM_FLASH, PCI_MAPREG_MEM_TYPE(val), 0,
                   1483:                    &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
                   1484:                    &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
                   1485:                        printf(": cannot find mem space\n");
                   1486:                        return (ENXIO);
                   1487:                }
                   1488:         }
                   1489:
                   1490:        if (pci_intr_map(pa, &ih)) {
                   1491:                printf(": couldn't map interrupt\n");
                   1492:                return (ENXIO);
                   1493:        }
                   1494:
                   1495:        sc->hw.back = &sc->osdep;
                   1496:
                   1497:        intrstr = pci_intr_string(pc, ih);
                   1498:        sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
                   1499:                                              sc->sc_dv.dv_xname);
                   1500:        if (sc->sc_intrhand == NULL) {
                   1501:                printf(": couldn't establish interrupt");
                   1502:                if (intrstr != NULL)
                   1503:                        printf(" at %s", intrstr);
                   1504:                printf("\n");
                   1505:                return (ENXIO);
                   1506:        }
                   1507:        printf(": %s", intrstr);
                   1508:
                   1509:        return (0);
                   1510: }
                   1511:
                   1512: void
                   1513: em_free_pci_resources(struct em_softc *sc)
                   1514: {
                   1515:        struct pci_attach_args *pa = &sc->osdep.em_pa;
                   1516:        pci_chipset_tag_t       pc = pa->pa_pc;
                   1517:
                   1518:        if (sc->sc_intrhand)
                   1519:                pci_intr_disestablish(pc, sc->sc_intrhand);
                   1520:        sc->sc_intrhand = 0;
                   1521:
                   1522:        if (sc->osdep.em_flashbase)
                   1523:                bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
                   1524:                                sc->osdep.em_flashsize);
                   1525:        sc->osdep.em_flashbase = 0;
                   1526:
                   1527:        if (sc->osdep.em_iobase)
                   1528:                bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
                   1529:                                sc->osdep.em_iosize);
                   1530:        sc->osdep.em_iobase = 0;
                   1531:
                   1532:        if (sc->osdep.em_membase)
                   1533:                bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
                   1534:                                sc->osdep.em_memsize);
                   1535:        sc->osdep.em_membase = 0;
                   1536: }
                   1537:
                   1538: /*********************************************************************
                   1539:  *
                   1540:  *  Initialize the hardware to a configuration as specified by the
                   1541:  *  em_softc structure. The controller is reset, the EEPROM is
                   1542:  *  verified, the MAC address is set, then the shared initialization
                   1543:  *  routines are called.
                   1544:  *
                   1545:  **********************************************************************/
                   1546: int
                   1547: em_hardware_init(struct em_softc *sc)
                   1548: {
                   1549:        u_int16_t rx_buffer_size;
                   1550:
                   1551:        INIT_DEBUGOUT("em_hardware_init: begin");
                   1552:        /* Issue a global reset */
                   1553:        em_reset_hw(&sc->hw);
                   1554:
                   1555:        /* When hardware is reset, fifo_head is also reset */
                   1556:        sc->tx_fifo_head = 0;
                   1557:
                   1558:        /* Make sure we have a good EEPROM before we read from it */
                   1559:        if (em_validate_eeprom_checksum(&sc->hw) < 0) {
                   1560:                /*
                   1561:                 * Some PCIe parts fail the first check due to
                   1562:                 * the link being in sleep state, call it again,
                   1563:                 * if it fails a second time its a real issue.
                   1564:                 */
                   1565:                if (em_validate_eeprom_checksum(&sc->hw) < 0) {
                   1566:                        printf("%s: The EEPROM Checksum Is Not Valid\n",
                   1567:                               sc->sc_dv.dv_xname);
                   1568:                        return (EIO);
                   1569:                }
                   1570:        }
                   1571:
                   1572:        if (em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
                   1573:                printf("%s: EEPROM read error while reading part number\n",
                   1574:                       sc->sc_dv.dv_xname);
                   1575:                return (EIO);
                   1576:        }
                   1577:
                   1578:        /* Set up smart power down as default off on newer adapters */
                   1579:        if (!em_smart_pwr_down &&
                   1580:             (sc->hw.mac_type == em_82571 ||
                   1581:              sc->hw.mac_type == em_82572)) {
                   1582:                uint16_t phy_tmp = 0;
                   1583:
                   1584:                /* Speed up time to link by disabling smart power down */
                   1585:                em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
                   1586:                phy_tmp &= ~IGP02E1000_PM_SPD;
                   1587:                em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
                   1588:        }
                   1589:
                   1590:        /*
                   1591:         * These parameters control the automatic generation (Tx) and
                   1592:         * response (Rx) to Ethernet PAUSE frames.
                   1593:         * - High water mark should allow for at least two frames to be
                   1594:         *   received after sending an XOFF.
                   1595:         * - Low water mark works best when it is very near the high water mark.
                   1596:         *   This allows the receiver to restart by sending XON when it has
                   1597:         *   drained a bit.  Here we use an arbitary value of 1500 which will
                   1598:         *   restart after one full frame is pulled from the buffer.  There
                   1599:         *   could be several smaller frames in the buffer and if so they will
                   1600:         *   not trigger the XON until their total number reduces the buffer
                   1601:         *   by 1500.
                   1602:         * - The pause time is fairly large at 1000 x 512ns = 512 usec.
                   1603:         */
                   1604:        rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA) & 0xffff) << 10 );
                   1605:
                   1606:        sc->hw.fc_high_water = rx_buffer_size -
                   1607:            EM_ROUNDUP(sc->hw.max_frame_size, 1024);
                   1608:        sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
                   1609:        if (sc->hw.mac_type == em_80003es2lan)
                   1610:                sc->hw.fc_pause_time = 0xFFFF;
                   1611:        else
                   1612:                sc->hw.fc_pause_time = 1000;
                   1613:        sc->hw.fc_send_xon = TRUE;
                   1614:        sc->hw.fc = E1000_FC_FULL;
                   1615:
                   1616:        if (em_init_hw(&sc->hw) < 0) {
                   1617:                printf("%s: Hardware Initialization Failed",
                   1618:                       sc->sc_dv.dv_xname);
                   1619:                return (EIO);
                   1620:        }
                   1621:
                   1622:        em_check_for_link(&sc->hw);
                   1623:
                   1624:        return (0);
                   1625: }
                   1626:
                   1627: /*********************************************************************
                   1628:  *
                   1629:  *  Setup networking device structure and register an interface.
                   1630:  *
                   1631:  **********************************************************************/
                   1632: void
                   1633: em_setup_interface(struct em_softc *sc)
                   1634: {
                   1635:        struct ifnet   *ifp;
                   1636:        u_char fiber_type = IFM_1000_SX;
                   1637:
                   1638:        INIT_DEBUGOUT("em_setup_interface: begin");
                   1639:
                   1640:        ifp = &sc->interface_data.ac_if;
                   1641:        strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
                   1642:        ifp->if_softc = sc;
                   1643:        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
                   1644:        ifp->if_ioctl = em_ioctl;
                   1645:        ifp->if_start = em_start;
                   1646:        ifp->if_watchdog = em_watchdog;
                   1647:        ifp->if_hardmtu =
                   1648:                sc->hw.max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN;
                   1649:        IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
                   1650:        IFQ_SET_READY(&ifp->if_snd);
                   1651:
                   1652:        ifp->if_capabilities = IFCAP_VLAN_MTU;
                   1653:
                   1654: #ifdef EM_CSUM_OFFLOAD
                   1655:        if (sc->hw.mac_type >= em_82543)
                   1656:                ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
                   1657: #endif
                   1658:
                   1659:        /*
                   1660:         * Specify the media types supported by this adapter and register
                   1661:         * callbacks to update media and link information
                   1662:         */
                   1663:        ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
                   1664:                     em_media_status);
                   1665:        if (sc->hw.media_type == em_media_type_fiber ||
                   1666:            sc->hw.media_type == em_media_type_internal_serdes) {
                   1667:                if (sc->hw.mac_type == em_82545)
                   1668:                        fiber_type = IFM_1000_LX;
                   1669:                ifmedia_add(&sc->media, IFM_ETHER | fiber_type | IFM_FDX,
                   1670:                            0, NULL);
                   1671:                ifmedia_add(&sc->media, IFM_ETHER | fiber_type,
                   1672:                            0, NULL);
                   1673:        } else {
                   1674:                ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
                   1675:                ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
                   1676:                            0, NULL);
                   1677:                ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
                   1678:                            0, NULL);
                   1679:                ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
                   1680:                            0, NULL);
                   1681:                if (sc->hw.phy_type != em_phy_ife) {
                   1682:                        ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
                   1683:                                    0, NULL);
                   1684:                        ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
                   1685:                }
                   1686:        }
                   1687:        ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
                   1688:        ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
                   1689:
                   1690:        if_attach(ifp);
                   1691:        ether_ifattach(ifp);
                   1692: }
                   1693:
                   1694:
                   1695: /*********************************************************************
                   1696:  *
                   1697:  *  Workaround for SmartSpeed on 82541 and 82547 controllers
                   1698:  *
                   1699:  **********************************************************************/
                   1700: void
                   1701: em_smartspeed(struct em_softc *sc)
                   1702: {
                   1703:        uint16_t phy_tmp;
                   1704:
                   1705:        if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
                   1706:            !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
                   1707:                return;
                   1708:
                   1709:        if (sc->smartspeed == 0) {
                   1710:                /* If Master/Slave config fault is asserted twice,
                   1711:                 * we assume back-to-back */
                   1712:                em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
                   1713:                if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
                   1714:                        return;
                   1715:                em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
                   1716:                if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
                   1717:                        em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
                   1718:                                        &phy_tmp);
                   1719:                        if (phy_tmp & CR_1000T_MS_ENABLE) {
                   1720:                                phy_tmp &= ~CR_1000T_MS_ENABLE;
                   1721:                                em_write_phy_reg(&sc->hw,
                   1722:                                                    PHY_1000T_CTRL, phy_tmp);
                   1723:                                sc->smartspeed++;
                   1724:                                if (sc->hw.autoneg &&
                   1725:                                    !em_phy_setup_autoneg(&sc->hw) &&
                   1726:                                    !em_read_phy_reg(&sc->hw, PHY_CTRL,
                   1727:                                                       &phy_tmp)) {
                   1728:                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
                   1729:                                                    MII_CR_RESTART_AUTO_NEG);
                   1730:                                        em_write_phy_reg(&sc->hw,
                   1731:                                                         PHY_CTRL, phy_tmp);
                   1732:                                }
                   1733:                        }
                   1734:                }
                   1735:                return;
                   1736:        } else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
                   1737:                /* If still no link, perhaps using 2/3 pair cable */
                   1738:                em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
                   1739:                phy_tmp |= CR_1000T_MS_ENABLE;
                   1740:                em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
                   1741:                if (sc->hw.autoneg &&
                   1742:                    !em_phy_setup_autoneg(&sc->hw) &&
                   1743:                    !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
                   1744:                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
                   1745:                                    MII_CR_RESTART_AUTO_NEG);
                   1746:                        em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
                   1747:                }
                   1748:        }
                   1749:        /* Restart process after EM_SMARTSPEED_MAX iterations */
                   1750:        if (sc->smartspeed++ == EM_SMARTSPEED_MAX)
                   1751:                sc->smartspeed = 0;
                   1752: }
                   1753:
                   1754: /*
                   1755:  * Manage DMA'able memory.
                   1756:  */
                   1757: int
                   1758: em_dma_malloc(struct em_softc *sc, bus_size_t size,
                   1759:     struct em_dma_alloc *dma, int mapflags)
                   1760: {
                   1761:        int r;
                   1762:
                   1763:        dma->dma_tag = sc->osdep.em_pa.pa_dmat;
                   1764:        r = bus_dmamap_create(dma->dma_tag, size, 1,
                   1765:            size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
                   1766:        if (r != 0) {
                   1767:                printf("%s: em_dma_malloc: bus_dmamap_create failed; "
                   1768:                        "error %u\n", sc->sc_dv.dv_xname, r);
                   1769:                goto fail_0;
                   1770:        }
                   1771:
                   1772:        r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
                   1773:            1, &dma->dma_nseg, BUS_DMA_NOWAIT);
                   1774:        if (r != 0) {
                   1775:                printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
                   1776:                        "size %lu, error %d\n", sc->sc_dv.dv_xname,
                   1777:                        (unsigned long)size, r);
                   1778:                goto fail_1;
                   1779:        }
                   1780:
                   1781:        r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
                   1782:            &dma->dma_vaddr, BUS_DMA_NOWAIT);
                   1783:        if (r != 0) {
                   1784:                printf("%s: em_dma_malloc: bus_dmammem_map failed; "
                   1785:                        "size %lu, error %d\n", sc->sc_dv.dv_xname,
                   1786:                        (unsigned long)size, r);
                   1787:                goto fail_2;
                   1788:        }
                   1789:
                   1790:        r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
                   1791:                            dma->dma_vaddr,
                   1792:                            size,
                   1793:                            NULL,
                   1794:                            mapflags | BUS_DMA_NOWAIT);
                   1795:        if (r != 0) {
                   1796:                printf("%s: em_dma_malloc: bus_dmamap_load failed; "
                   1797:                        "error %u\n", sc->sc_dv.dv_xname, r);
                   1798:                goto fail_3;
                   1799:        }
                   1800:
                   1801:        dma->dma_size = size;
                   1802:        return (0);
                   1803:
                   1804: fail_3:
                   1805:        bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
                   1806: fail_2:
                   1807:        bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
                   1808: fail_1:
                   1809:        bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
                   1810: fail_0:
                   1811:        dma->dma_map = NULL;
                   1812:        dma->dma_tag = NULL;
                   1813:
                   1814:        return (r);
                   1815: }
                   1816:
                   1817: void
                   1818: em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
                   1819: {
                   1820:        if (dma->dma_tag == NULL)
                   1821:                return;
                   1822:
                   1823:        if (dma->dma_map != NULL) {
                   1824:                bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
                   1825:                    dma->dma_map->dm_mapsize,
                   1826:                    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
                   1827:                bus_dmamap_unload(dma->dma_tag, dma->dma_map);
                   1828:                bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
                   1829:                bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
                   1830:                bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
                   1831:        }
                   1832:        dma->dma_tag = NULL;
                   1833: }
                   1834:
                   1835: /*********************************************************************
                   1836:  *
                   1837:  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
                   1838:  *  the information needed to transmit a packet on the wire.
                   1839:  *
                   1840:  **********************************************************************/
                   1841: int
                   1842: em_allocate_transmit_structures(struct em_softc *sc)
                   1843: {
                   1844:        if (!(sc->tx_buffer_area =
                   1845:              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
                   1846:                                             sc->num_tx_desc, M_DEVBUF,
                   1847:                                             M_NOWAIT))) {
                   1848:                printf("%s: Unable to allocate tx_buffer memory\n",
                   1849:                       sc->sc_dv.dv_xname);
                   1850:                return (ENOMEM);
                   1851:        }
                   1852:
                   1853:        bzero(sc->tx_buffer_area,
                   1854:              sizeof(struct em_buffer) * sc->num_tx_desc);
                   1855:
                   1856:        return (0);
                   1857: }
                   1858:
                   1859: /*********************************************************************
                   1860:  *
                   1861:  *  Allocate and initialize transmit structures.
                   1862:  *
                   1863:  **********************************************************************/
                   1864: int
                   1865: em_setup_transmit_structures(struct em_softc *sc)
                   1866: {
                   1867:        struct  em_buffer *tx_buffer;
                   1868:        int error, i;
                   1869:
                   1870:        if ((error = em_allocate_transmit_structures(sc)) != 0)
                   1871:                goto fail;
                   1872:
                   1873:        bzero((void *) sc->tx_desc_base,
                   1874:              (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
                   1875:
                   1876:        sc->txtag = sc->osdep.em_pa.pa_dmat;
                   1877:
                   1878:        tx_buffer = sc->tx_buffer_area;
                   1879:        for (i = 0; i < sc->num_tx_desc; i++) {
                   1880:                error = bus_dmamap_create(sc->txtag, MAX_JUMBO_FRAME_SIZE,
                   1881:                            EM_MAX_SCATTER, MAX_JUMBO_FRAME_SIZE, 0,
                   1882:                            BUS_DMA_NOWAIT, &tx_buffer->map);
                   1883:                if (error != 0) {
                   1884:                        printf("%s: Unable to create TX DMA map\n",
                   1885:                            sc->sc_dv.dv_xname);
                   1886:                        goto fail;
                   1887:                }
                   1888:                tx_buffer++;
                   1889:        }
                   1890:
                   1891:        sc->next_avail_tx_desc = 0;
                   1892:        sc->next_tx_to_clean = 0;
                   1893:
                   1894:        /* Set number of descriptors available */
                   1895:        sc->num_tx_desc_avail = sc->num_tx_desc;
                   1896:
                   1897:        /* Set checksum context */
                   1898:        sc->active_checksum_context = OFFLOAD_NONE;
                   1899:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                   1900:            sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1901:
                   1902:        return (0);
                   1903:
                   1904: fail:
                   1905:        em_free_transmit_structures(sc);
                   1906:        return (error);
                   1907: }
                   1908:
                   1909: /*********************************************************************
                   1910:  *
                   1911:  *  Enable transmit unit.
                   1912:  *
                   1913:  **********************************************************************/
                   1914: void
                   1915: em_initialize_transmit_unit(struct em_softc *sc)
                   1916: {
                   1917:        u_int32_t       reg_tctl, reg_tipg = 0;
                   1918:        u_int64_t       bus_addr;
                   1919:
                   1920:        INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
                   1921:
                   1922:        /* Setup the Base and Length of the Tx Descriptor Ring */
                   1923:        bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
                   1924:        E1000_WRITE_REG(&sc->hw, TDLEN,
                   1925:                        sc->num_tx_desc *
                   1926:                        sizeof(struct em_tx_desc));
                   1927:        E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
                   1928:        E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
                   1929:
                   1930:        /* Setup the HW Tx Head and Tail descriptor pointers */
                   1931:        E1000_WRITE_REG(&sc->hw, TDT, 0);
                   1932:        E1000_WRITE_REG(&sc->hw, TDH, 0);
                   1933:
                   1934:        HW_DEBUGOUT2("Base = %x, Length = %x\n",
                   1935:                     E1000_READ_REG(&sc->hw, TDBAL),
                   1936:                     E1000_READ_REG(&sc->hw, TDLEN));
                   1937:
                   1938:        /* Set the default values for the Tx Inter Packet Gap timer */
                   1939:        switch (sc->hw.mac_type) {
                   1940:        case em_82542_rev2_0:
                   1941:        case em_82542_rev2_1:
                   1942:                reg_tipg = DEFAULT_82542_TIPG_IPGT;
                   1943:                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
                   1944:                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
                   1945:                break;
                   1946:        case em_80003es2lan:
                   1947:                reg_tipg = DEFAULT_82543_TIPG_IPGR1;
                   1948:                reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
                   1949:                break;
                   1950:        default:
                   1951:                if (sc->hw.media_type == em_media_type_fiber ||
                   1952:                    sc->hw.media_type == em_media_type_internal_serdes)
                   1953:                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
                   1954:                else
                   1955:                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
                   1956:                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
                   1957:                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
                   1958:        }
                   1959:
                   1960:        E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
                   1961:        E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
                   1962:        if (sc->hw.mac_type >= em_82540)
                   1963:                E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
                   1964:
                   1965:        /* Program the Transmit Control Register */
                   1966:        reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
                   1967:                   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
                   1968:        if (sc->hw.mac_type >= em_82571)
                   1969:                reg_tctl |= E1000_TCTL_MULR;
                   1970:        if (sc->link_duplex == FULL_DUPLEX)
                   1971:                reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
                   1972:        else
                   1973:                reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
                   1974:        /* This write will effectively turn on the transmit unit */
                   1975:        E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
                   1976:
                   1977:        /* Setup Transmit Descriptor Base Settings */
                   1978:        sc->txd_cmd = E1000_TXD_CMD_IFCS;
                   1979:
                   1980:        if (sc->tx_int_delay > 0)
                   1981:                sc->txd_cmd |= E1000_TXD_CMD_IDE;
                   1982: }
                   1983:
                   1984: /*********************************************************************
                   1985:  *
                   1986:  *  Free all transmit related data structures.
                   1987:  *
                   1988:  **********************************************************************/
                   1989: void
                   1990: em_free_transmit_structures(struct em_softc *sc)
                   1991: {
                   1992:        struct em_buffer   *tx_buffer;
                   1993:        int             i;
                   1994:
                   1995:        INIT_DEBUGOUT("free_transmit_structures: begin");
                   1996:
                   1997:        if (sc->tx_buffer_area != NULL) {
                   1998:                tx_buffer = sc->tx_buffer_area;
                   1999:                for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
                   2000:                        if (tx_buffer->map != NULL &&
                   2001:                            tx_buffer->map->dm_nsegs > 0) {
                   2002:                                bus_dmamap_sync(sc->txtag, tx_buffer->map,
                   2003:                                    0, tx_buffer->map->dm_mapsize,
                   2004:                                    BUS_DMASYNC_POSTWRITE);
                   2005:                                bus_dmamap_unload(sc->txtag,
                   2006:                                    tx_buffer->map);
                   2007:                        }
                   2008:                        if (tx_buffer->m_head != NULL) {
                   2009:                                m_freem(tx_buffer->m_head);
                   2010:                                tx_buffer->m_head = NULL;
                   2011:                        }
                   2012:                        if (tx_buffer->map != NULL) {
                   2013:                                bus_dmamap_destroy(sc->txtag,
                   2014:                                    tx_buffer->map);
                   2015:                                tx_buffer->map = NULL;
                   2016:                        }
                   2017:                }
                   2018:        }
                   2019:        if (sc->tx_buffer_area != NULL) {
                   2020:                free(sc->tx_buffer_area, M_DEVBUF);
                   2021:                sc->tx_buffer_area = NULL;
                   2022:        }
                   2023:        if (sc->txtag != NULL)
                   2024:                sc->txtag = NULL;
                   2025: }
                   2026:
                   2027: #ifdef EM_CSUM_OFFLOAD
                   2028: /*********************************************************************
                   2029:  *
                   2030:  *  The offload context needs to be set when we transfer the first
                   2031:  *  packet of a particular protocol (TCP/UDP). We change the
                   2032:  *  context only if the protocol type changes.
                   2033:  *
                   2034:  **********************************************************************/
                   2035: void
                   2036: em_transmit_checksum_setup(struct em_softc *sc, struct mbuf *mp,
                   2037:     u_int32_t *txd_upper, u_int32_t *txd_lower)
                   2038: {
                   2039:        struct em_context_desc *TXD;
                   2040:        struct em_buffer *tx_buffer;
                   2041:        int curr_txd;
                   2042:
                   2043:        if (mp->m_pkthdr.csum_flags) {
                   2044:                if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) {
                   2045:                        *txd_upper = E1000_TXD_POPTS_TXSM << 8;
                   2046:                        *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
                   2047:                        if (sc->active_checksum_context == OFFLOAD_TCP_IP)
                   2048:                                return;
                   2049:                        else
                   2050:                                sc->active_checksum_context = OFFLOAD_TCP_IP;
                   2051:                } else if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
                   2052:                        *txd_upper = E1000_TXD_POPTS_TXSM << 8;
                   2053:                        *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
                   2054:                        if (sc->active_checksum_context == OFFLOAD_UDP_IP)
                   2055:                                return;
                   2056:                        else
                   2057:                                sc->active_checksum_context = OFFLOAD_UDP_IP;
                   2058:                } else {
                   2059:                        *txd_upper = 0;
                   2060:                        *txd_lower = 0;
                   2061:                        return;
                   2062:                }
                   2063:        } else {
                   2064:                *txd_upper = 0;
                   2065:                *txd_lower = 0;
                   2066:                return;
                   2067:        }
                   2068:
                   2069:        /* If we reach this point, the checksum offload context
                   2070:         * needs to be reset.
                   2071:         */
                   2072:        curr_txd = sc->next_avail_tx_desc;
                   2073:        tx_buffer = &sc->tx_buffer_area[curr_txd];
                   2074:        TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
                   2075:
                   2076:        TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
                   2077:        TXD->lower_setup.ip_fields.ipcso =
                   2078:            ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
                   2079:        TXD->lower_setup.ip_fields.ipcse =
                   2080:            htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
                   2081:
                   2082:        TXD->upper_setup.tcp_fields.tucss =
                   2083:            ETHER_HDR_LEN + sizeof(struct ip);
                   2084:        TXD->upper_setup.tcp_fields.tucse = htole16(0);
                   2085:
                   2086:        if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
                   2087:                TXD->upper_setup.tcp_fields.tucso =
                   2088:                    ETHER_HDR_LEN + sizeof(struct ip) +
                   2089:                    offsetof(struct tcphdr, th_sum);
                   2090:        } else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
                   2091:                TXD->upper_setup.tcp_fields.tucso =
                   2092:                    ETHER_HDR_LEN + sizeof(struct ip) +
                   2093:                    offsetof(struct udphdr, uh_sum);
                   2094:        }
                   2095:
                   2096:        TXD->tcp_seg_setup.data = htole32(0);
                   2097:        TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
                   2098:
                   2099:        tx_buffer->m_head = NULL;
                   2100:        tx_buffer->next_eop = -1;
                   2101:
                   2102:        if (++curr_txd == sc->num_tx_desc)
                   2103:                curr_txd = 0;
                   2104:
                   2105:        sc->num_tx_desc_avail--;
                   2106:        sc->next_avail_tx_desc = curr_txd;
                   2107: }
                   2108: #endif /* EM_CSUM_OFFLOAD */
                   2109:
                   2110: /**********************************************************************
                   2111:  *
                   2112:  *  Examine each tx_buffer in the used queue. If the hardware is done
                   2113:  *  processing the packet then free associated resources. The
                   2114:  *  tx_buffer is put back on the free queue.
                   2115:  *
                   2116:  **********************************************************************/
                   2117: void
                   2118: em_txeof(struct em_softc *sc)
                   2119: {
                   2120:        int first, last, done, num_avail;
                   2121:        struct em_buffer *tx_buffer;
                   2122:        struct em_tx_desc   *tx_desc, *eop_desc;
                   2123:        struct ifnet   *ifp = &sc->interface_data.ac_if;
                   2124:
                   2125:        if (sc->num_tx_desc_avail == sc->num_tx_desc)
                   2126:                return;
                   2127:
                   2128:        num_avail = sc->num_tx_desc_avail;
                   2129:        first = sc->next_tx_to_clean;
                   2130:        tx_desc = &sc->tx_desc_base[first];
                   2131:        tx_buffer = &sc->tx_buffer_area[first];
                   2132:        last = tx_buffer->next_eop;
                   2133:        eop_desc = &sc->tx_desc_base[last];
                   2134:
                   2135:        /*
                   2136:         * What this does is get the index of the
                   2137:         * first descriptor AFTER the EOP of the
                   2138:         * first packet, that way we can do the
                   2139:         * simple comparison on the inner while loop.
                   2140:         */
                   2141:        if (++last == sc->num_tx_desc)
                   2142:                last = 0;
                   2143:        done = last;
                   2144:
                   2145:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                   2146:            sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
                   2147:        while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
                   2148:                /* We clean the range of the packet */
                   2149:                while (first != done) {
                   2150:                        tx_desc->upper.data = 0;
                   2151:                        tx_desc->lower.data = 0;
                   2152:                        num_avail++;
                   2153:
                   2154:                        if (tx_buffer->m_head != NULL) {
                   2155:                                ifp->if_opackets++;
                   2156:                                if (tx_buffer->map->dm_nsegs > 0) {
                   2157:                                        bus_dmamap_sync(sc->txtag,
                   2158:                                            tx_buffer->map, 0,
                   2159:                                            tx_buffer->map->dm_mapsize,
                   2160:                                            BUS_DMASYNC_POSTWRITE);
                   2161:                                        bus_dmamap_unload(sc->txtag,
                   2162:                                            tx_buffer->map);
                   2163:                                }
                   2164:                                m_freem(tx_buffer->m_head);
                   2165:                                tx_buffer->m_head = NULL;
                   2166:                        }
                   2167:                        tx_buffer->next_eop = -1;
                   2168:
                   2169:                        if (++first == sc->num_tx_desc)
                   2170:                                first = 0;
                   2171:
                   2172:                        tx_buffer = &sc->tx_buffer_area[first];
                   2173:                        tx_desc = &sc->tx_desc_base[first];
                   2174:                }
                   2175:                /* See if we can continue to the next packet */
                   2176:                last = tx_buffer->next_eop;
                   2177:                if (last != -1) {
                   2178:                        eop_desc = &sc->tx_desc_base[last];
                   2179:                        /* Get new done point */
                   2180:                        if (++last == sc->num_tx_desc)
                   2181:                                last = 0;
                   2182:                        done = last;
                   2183:                } else
                   2184:                        break;
                   2185:        }
                   2186:        bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
                   2187:            sc->txdma.dma_map->dm_mapsize,
                   2188:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   2189:
                   2190:        sc->next_tx_to_clean = first;
                   2191:
                   2192:        /*
                   2193:         * If we have enough room, clear IFF_OACTIVE to tell the stack
                   2194:         * that it is OK to send packets.
                   2195:         * If there are no pending descriptors, clear the timeout. Otherwise,
                   2196:         * if some descriptors have been freed, restart the timeout.
                   2197:         */
                   2198:        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
                   2199:                ifp->if_flags &= ~IFF_OACTIVE;
                   2200:                /* All clean, turn off the timer */
                   2201:                if (num_avail == sc->num_tx_desc)
                   2202:                        ifp->if_timer = 0;
                   2203:                /* Some cleaned, reset the timer */
                   2204:                else if (num_avail != sc->num_tx_desc_avail)
                   2205:                        ifp->if_timer = EM_TX_TIMEOUT;
                   2206:        }
                   2207:        sc->num_tx_desc_avail = num_avail;
                   2208: }
                   2209:
                   2210: /*********************************************************************
                   2211:  *
                   2212:  *  Get a buffer from system mbuf buffer pool.
                   2213:  *
                   2214:  **********************************************************************/
                   2215: int
                   2216: em_get_buf(struct em_softc *sc, int i)
                   2217: {
                   2218:        struct mbuf    *m;
                   2219:        bus_dmamap_t    map;
                   2220:        struct em_buffer *rx_buffer;
                   2221:        int error;
                   2222:
                   2223:        MGETHDR(m, M_DONTWAIT, MT_DATA);
                   2224:        if (m == NULL) {
                   2225:                sc->mbuf_alloc_failed++;
                   2226:                return (ENOBUFS);
                   2227:        }
                   2228:        MCLGET(m, M_DONTWAIT);
                   2229:        if ((m->m_flags & M_EXT) == 0) {
                   2230:                m_freem(m);
                   2231:                sc->mbuf_cluster_failed++;
                   2232:                return (ENOBUFS);
                   2233:        }
                   2234:        m->m_len = m->m_pkthdr.len = MCLBYTES;
                   2235:
                   2236:        if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
                   2237:                m_adj(m, ETHER_ALIGN);
                   2238:
                   2239:        /*
                   2240:         * Using memory from the mbuf cluster pool, invoke the
                   2241:         * bus_dma machinery to arrange the memory mapping.
                   2242:         */
                   2243:        error = bus_dmamap_load_mbuf(sc->rxtag, sc->rx_sparemap,
                   2244:            m, BUS_DMA_NOWAIT);
                   2245:        if (error) {
                   2246:                m_free(m);
                   2247:                return (error);
                   2248:        }
                   2249:
                   2250:        rx_buffer = &sc->rx_buffer_area[i];
                   2251:        if (rx_buffer->m_head != NULL)
                   2252:                bus_dmamap_unload(sc->rxtag, rx_buffer->map);
                   2253:
                   2254:        map = rx_buffer->map;
                   2255:        rx_buffer->map = sc->rx_sparemap;
                   2256:        sc->rx_sparemap = map;
                   2257:
                   2258:        bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
                   2259:            rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
                   2260:
                   2261:        rx_buffer->m_head = m;
                   2262:
                   2263:        sc->rx_desc_base[i].buffer_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
                   2264:
                   2265:        return (0);
                   2266: }
                   2267:
                   2268: /*********************************************************************
                   2269:  *
                   2270:  *  Allocate memory for rx_buffer structures. Since we use one
                   2271:  *  rx_buffer per received packet, the maximum number of rx_buffer's
                   2272:  *  that we'll need is equal to the number of receive descriptors
                   2273:  *  that we've allocated.
                   2274:  *
                   2275:  **********************************************************************/
                   2276: int
                   2277: em_allocate_receive_structures(struct em_softc *sc)
                   2278: {
                   2279:        int             i, error;
                   2280:        struct em_buffer *rx_buffer;
                   2281:
                   2282:        if (!(sc->rx_buffer_area =
                   2283:              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
                   2284:                                             sc->num_rx_desc, M_DEVBUF,
                   2285:                                             M_NOWAIT))) {
                   2286:                printf("%s: Unable to allocate rx_buffer memory\n",
                   2287:                       sc->sc_dv.dv_xname);
                   2288:                return (ENOMEM);
                   2289:        }
                   2290:
                   2291:        bzero(sc->rx_buffer_area,
                   2292:              sizeof(struct em_buffer) * sc->num_rx_desc);
                   2293:
                   2294:        sc->rxtag = sc->osdep.em_pa.pa_dmat;
                   2295:
                   2296:        error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1, MCLBYTES,
                   2297:                    0, BUS_DMA_NOWAIT, &sc->rx_sparemap);
                   2298:        if (error != 0) {
                   2299:                printf("%s: em_allocate_receive_structures: "
                   2300:                    "bus_dmamap_create failed; error %u\n",
                   2301:                    sc->sc_dv.dv_xname, error);
                   2302:                goto fail;
                   2303:        }
                   2304:
                   2305:        rx_buffer = sc->rx_buffer_area;
                   2306:        for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
                   2307:                error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
                   2308:                                        MCLBYTES, 0, BUS_DMA_NOWAIT,
                   2309:                                        &rx_buffer->map);
                   2310:                if (error != 0) {
                   2311:                        printf("%s: em_allocate_receive_structures: "
                   2312:                            "bus_dmamap_create failed; error %u\n",
                   2313:                            sc->sc_dv.dv_xname, error);
                   2314:                        goto fail;
                   2315:                }
                   2316:        }
                   2317:
                   2318:        for (i = 0; i < sc->num_rx_desc; i++) {
                   2319:                error = em_get_buf(sc, i);
                   2320:                if (error != 0)
                   2321:                        goto fail;
                   2322:         }
                   2323:        bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
                   2324:            sc->rxdma.dma_map->dm_mapsize,
                   2325:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   2326:
                   2327:         return (0);
                   2328:
                   2329: fail:
                   2330:        em_free_receive_structures(sc);
                   2331:        return (error);
                   2332: }
                   2333:
                   2334: /*********************************************************************
                   2335:  *
                   2336:  *  Allocate and initialize receive structures.
                   2337:  *
                   2338:  **********************************************************************/
                   2339: int
                   2340: em_setup_receive_structures(struct em_softc *sc)
                   2341: {
                   2342:        bzero((void *) sc->rx_desc_base,
                   2343:            (sizeof(struct em_rx_desc)) * sc->num_rx_desc);
                   2344:
                   2345:        if (em_allocate_receive_structures(sc))
                   2346:                return (ENOMEM);
                   2347:
                   2348:        /* Setup our descriptor pointers */
                   2349:        sc->next_rx_desc_to_check = 0;
                   2350:        return (0);
                   2351: }
                   2352:
                   2353: /*********************************************************************
                   2354:  *
                   2355:  *  Enable receive unit.
                   2356:  *
                   2357:  **********************************************************************/
                   2358: void
                   2359: em_initialize_receive_unit(struct em_softc *sc)
                   2360: {
                   2361:        u_int32_t       reg_rctl;
                   2362:        u_int32_t       reg_rxcsum;
                   2363:        struct ifnet    *ifp;
                   2364:        u_int64_t       bus_addr;
                   2365:
                   2366:        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
                   2367:        ifp = &sc->interface_data.ac_if;
                   2368:
                   2369:        /* Make sure receives are disabled while setting up the descriptor ring */
                   2370:        E1000_WRITE_REG(&sc->hw, RCTL, 0);
                   2371:
                   2372:        /* Set the Receive Delay Timer Register */
                   2373:        E1000_WRITE_REG(&sc->hw, RDTR,
                   2374:                        sc->rx_int_delay | E1000_RDT_FPDB);
                   2375:
                   2376:        if (sc->hw.mac_type >= em_82540) {
                   2377:                E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
                   2378:
                   2379:                /* Set the interrupt throttling rate.  Value is calculated
                   2380:                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
                   2381:                E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
                   2382:        }
                   2383:
                   2384:        /* Setup the Base and Length of the Rx Descriptor Ring */
                   2385:        bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
                   2386:        E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
                   2387:                        sizeof(struct em_rx_desc));
                   2388:        E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
                   2389:        E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
                   2390:
                   2391:        /* Setup the Receive Control Register */
                   2392:        reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
                   2393:            E1000_RCTL_RDMTS_HALF |
                   2394:            (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
                   2395:
                   2396:        if (sc->hw.tbi_compatibility_on == TRUE)
                   2397:                reg_rctl |= E1000_RCTL_SBP;
                   2398:
                   2399:        switch (sc->rx_buffer_len) {
                   2400:        default:
                   2401:        case EM_RXBUFFER_2048:
                   2402:                reg_rctl |= E1000_RCTL_SZ_2048;
                   2403:                break;
                   2404:        case EM_RXBUFFER_4096:
                   2405:                reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
                   2406:                break;
                   2407:        case EM_RXBUFFER_8192:
                   2408:                reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
                   2409:                break;
                   2410:        case EM_RXBUFFER_16384:
                   2411:                reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
                   2412:                break;
                   2413:        }
                   2414:
                   2415:        if (sc->hw.max_frame_size != ETHER_MAX_LEN)
                   2416:                reg_rctl |= E1000_RCTL_LPE;
                   2417:
                   2418:        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
                   2419:        if (sc->hw.mac_type >= em_82543) {
                   2420:                reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
                   2421:                reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
                   2422:                E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
                   2423:        }
                   2424:
                   2425:        /* Enable Receives */
                   2426:        E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
                   2427:
                   2428:        /* Setup the HW Rx Head and Tail Descriptor Pointers */
                   2429:        E1000_WRITE_REG(&sc->hw, RDH, 0);
                   2430:        E1000_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
                   2431: }
                   2432:
                   2433: /*********************************************************************
                   2434:  *
                   2435:  *  Free receive related data structures.
                   2436:  *
                   2437:  **********************************************************************/
                   2438: void
                   2439: em_free_receive_structures(struct em_softc *sc)
                   2440: {
                   2441:        struct em_buffer   *rx_buffer;
                   2442:        int             i;
                   2443:
                   2444:        INIT_DEBUGOUT("free_receive_structures: begin");
                   2445:
                   2446:        if (sc->rx_sparemap) {
                   2447:                bus_dmamap_destroy(sc->rxtag, sc->rx_sparemap);
                   2448:                sc->rx_sparemap = NULL;
                   2449:        }
                   2450:        if (sc->rx_buffer_area != NULL) {
                   2451:                rx_buffer = sc->rx_buffer_area;
                   2452:                for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
                   2453:                        if (rx_buffer->map != NULL &&
                   2454:                            rx_buffer->map->dm_nsegs > 0) {
                   2455:                                bus_dmamap_sync(sc->rxtag, rx_buffer->map,
                   2456:                                    0, rx_buffer->map->dm_mapsize,
                   2457:                                    BUS_DMASYNC_POSTREAD);
                   2458:                                bus_dmamap_unload(sc->rxtag,
                   2459:                                    rx_buffer->map);
                   2460:                        }
                   2461:                        if (rx_buffer->m_head != NULL) {
                   2462:                                m_freem(rx_buffer->m_head);
                   2463:                                rx_buffer->m_head = NULL;
                   2464:                        }
                   2465:                        if (rx_buffer->map != NULL) {
                   2466:                                bus_dmamap_destroy(sc->rxtag,
                   2467:                                    rx_buffer->map);
                   2468:                                rx_buffer->map = NULL;
                   2469:                        }
                   2470:                }
                   2471:        }
                   2472:        if (sc->rx_buffer_area != NULL) {
                   2473:                free(sc->rx_buffer_area, M_DEVBUF);
                   2474:                sc->rx_buffer_area = NULL;
                   2475:        }
                   2476:        if (sc->rxtag != NULL)
                   2477:                sc->rxtag = NULL;
                   2478: }
                   2479:
                   2480: /*********************************************************************
                   2481:  *
                   2482:  *  This routine executes in interrupt context. It replenishes
                   2483:  *  the mbufs in the descriptor and sends data which has been
                   2484:  *  dma'ed into host memory to upper layer.
                   2485:  *
                   2486:  *  We loop at most count times if count is > 0, or until done if
                   2487:  *  count < 0.
                   2488:  *
                   2489:  *********************************************************************/
                   2490: void
                   2491: em_rxeof(struct em_softc *sc, int count)
                   2492: {
                   2493:        struct ifnet        *ifp;
                   2494:        struct mbuf         *mp;
                   2495:        u_int8_t            accept_frame = 0;
                   2496:        u_int8_t            eop = 0;
                   2497:        u_int16_t           len, desc_len, prev_len_adj;
                   2498:        int                 i;
                   2499:
                   2500:        /* Pointer to the receive descriptor being examined. */
                   2501:        struct em_rx_desc   *current_desc;
                   2502:        u_int8_t            status;
                   2503:
                   2504:        ifp = &sc->interface_data.ac_if;
                   2505:        i = sc->next_rx_desc_to_check;
                   2506:        current_desc = &sc->rx_desc_base[i];
                   2507:        bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
                   2508:            sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
                   2509:
                   2510:        if (!((current_desc->status) & E1000_RXD_STAT_DD))
                   2511:                return;
                   2512:
                   2513:        while ((current_desc->status & E1000_RXD_STAT_DD) &&
                   2514:            (count != 0) &&
                   2515:            (ifp->if_flags & IFF_RUNNING)) {
                   2516:                struct mbuf *m = NULL;
                   2517:
                   2518:                mp = sc->rx_buffer_area[i].m_head;
                   2519:                /*
                   2520:                 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
                   2521:                 * needs to access the last received byte in the mbuf.
                   2522:                 */
                   2523:                bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
                   2524:                    0, sc->rx_buffer_area[i].map->dm_mapsize,
                   2525:                    BUS_DMASYNC_POSTREAD);
                   2526:
                   2527:                accept_frame = 1;
                   2528:                prev_len_adj = 0;
                   2529:                desc_len = letoh16(current_desc->length);
                   2530:                status = current_desc->status;
                   2531:                if (status & E1000_RXD_STAT_EOP) {
                   2532:                        count--;
                   2533:                        eop = 1;
                   2534:                        if (desc_len < ETHER_CRC_LEN) {
                   2535:                                len = 0;
                   2536:                                prev_len_adj = ETHER_CRC_LEN - desc_len;
                   2537:                        } else
                   2538:                                len = desc_len - ETHER_CRC_LEN;
                   2539:                } else {
                   2540:                        eop = 0;
                   2541:                        len = desc_len;
                   2542:                }
                   2543:
                   2544:                if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
                   2545:                        u_int8_t last_byte;
                   2546:                        u_int32_t pkt_len = desc_len;
                   2547:
                   2548:                        if (sc->fmp != NULL)
                   2549:                                pkt_len += sc->fmp->m_pkthdr.len;
                   2550:
                   2551:                        last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
                   2552:                        if (TBI_ACCEPT(&sc->hw, status, current_desc->errors,
                   2553:                                       pkt_len, last_byte)) {
                   2554:                                em_tbi_adjust_stats(&sc->hw,
                   2555:                                                    &sc->stats,
                   2556:                                                    pkt_len,
                   2557:                                                    sc->hw.mac_addr);
                   2558:                                if (len > 0)
                   2559:                                        len--;
                   2560:                        } else
                   2561:                                accept_frame = 0;
                   2562:                }
                   2563:
                   2564:                if (accept_frame) {
                   2565:                        if (em_get_buf(sc, i) != 0) {
                   2566:                                sc->dropped_pkts++;
                   2567:                                goto discard;
                   2568:                        }
                   2569:
                   2570:                        /* Assign correct length to the current fragment */
                   2571:                        mp->m_len = len;
                   2572:
                   2573: #ifdef __STRICT_ALIGNMENT
                   2574:                        /*
                   2575:                         * The Ethernet payload is not 32-bit aligned when
                   2576:                         * Jumbo packets are enabled, so on architectures with
                   2577:                         * strict alignment we need to shift the entire packet
                   2578:                         * ETHER_ALIGN bytes. Ugh.
                   2579:                         */
                   2580:                        if (sc->hw.max_frame_size > (MCLBYTES - ETHER_ALIGN)) {
                   2581:                                unsigned char tmp_align_buf[ETHER_ALIGN];
                   2582:                                int tmp_align_buf_len = 0;
                   2583:
                   2584:                                if (prev_len_adj > sc->align_buf_len)
                   2585:                                        prev_len_adj -= sc->align_buf_len;
                   2586:                                else
                   2587:                                        prev_len_adj = 0;
                   2588:
                   2589:                                if (mp->m_len > (MCLBYTES - ETHER_ALIGN)) {
                   2590:                                        bcopy(mp->m_data +
                   2591:                                            (MCLBYTES - ETHER_ALIGN),
                   2592:                                            &tmp_align_buf,
                   2593:                                            ETHER_ALIGN);
                   2594:                                        tmp_align_buf_len = mp->m_len -
                   2595:                                            (MCLBYTES - ETHER_ALIGN);
                   2596:                                        mp->m_len -= ETHER_ALIGN;
                   2597:                                }
                   2598:
                   2599:                                if (mp->m_len) {
                   2600:                                        bcopy(mp->m_data,
                   2601:                                            mp->m_data + ETHER_ALIGN,
                   2602:                                            mp->m_len);
                   2603:                                        if (!sc->align_buf_len)
                   2604:                                                mp->m_data += ETHER_ALIGN;
                   2605:                                }
                   2606:
                   2607:                                if (sc->align_buf_len) {
                   2608:                                        mp->m_len += sc->align_buf_len;
                   2609:                                        bcopy(&sc->align_buf,
                   2610:                                            mp->m_data,
                   2611:                                            sc->align_buf_len);
                   2612:                                }
                   2613:
                   2614:                                if (tmp_align_buf_len)
                   2615:                                        bcopy(&tmp_align_buf,
                   2616:                                            &sc->align_buf,
                   2617:                                            tmp_align_buf_len);
                   2618:                                sc->align_buf_len = tmp_align_buf_len;
                   2619:                        }
                   2620: #endif /* __STRICT_ALIGNMENT */
                   2621:
                   2622:                        if (sc->fmp == NULL) {
                   2623:                                mp->m_pkthdr.len = mp->m_len;
                   2624:                                sc->fmp = mp;    /* Store the first mbuf */
                   2625:                                sc->lmp = mp;
                   2626:                        } else {
                   2627:                                /* Chain mbuf's together */
                   2628:                                mp->m_flags &= ~M_PKTHDR;
                   2629:                                 /*
                   2630:                                  * Adjust length of previous mbuf in chain if we
                   2631:                                  * received less than 4 bytes in the last descriptor.
                   2632:                                  */
                   2633:                                 if (prev_len_adj > 0) {
                   2634:                                         sc->lmp->m_len -= prev_len_adj;
                   2635:                                         sc->fmp->m_pkthdr.len -= prev_len_adj;
                   2636:                                 }
                   2637:                                 sc->lmp->m_next = mp;
                   2638:                                 sc->lmp = sc->lmp->m_next;
                   2639:                                 sc->fmp->m_pkthdr.len += mp->m_len;
                   2640:                        }
                   2641:
                   2642:                        if (eop) {
                   2643:                                sc->fmp->m_pkthdr.rcvif = ifp;
                   2644:                                ifp->if_ipackets++;
                   2645:                                em_receive_checksum(sc, current_desc,
                   2646:                                            sc->fmp);
                   2647:                                m = sc->fmp;
                   2648:                                sc->fmp = NULL;
                   2649:                                sc->lmp = NULL;
                   2650:                        }
                   2651:                } else {
                   2652:                        sc->dropped_pkts++;
                   2653: discard:
                   2654:                        /* Reuse loaded DMA map and just update mbuf chain */
                   2655:                        mp = sc->rx_buffer_area[i].m_head;
                   2656:                        mp->m_len = mp->m_pkthdr.len = MCLBYTES;
                   2657:                        mp->m_data = mp->m_ext.ext_buf;
                   2658:                        mp->m_next = NULL;
                   2659:                        if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
                   2660:                                m_adj(mp, ETHER_ALIGN);
                   2661:                        if (sc->fmp != NULL) {
                   2662:                                m_freem(sc->fmp);
                   2663:                                sc->fmp = NULL;
                   2664:                                sc->lmp = NULL;
                   2665:                        }
                   2666:                        m = NULL;
                   2667:                }
                   2668:
                   2669:                /* Zero out the receive descriptors status. */
                   2670:                current_desc->status = 0;
                   2671:                bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
                   2672:                    sc->rxdma.dma_map->dm_mapsize,
                   2673:                    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   2674:
                   2675:                /* Advance our pointers to the next descriptor. */
                   2676:                if (++i == sc->num_rx_desc)
                   2677:                        i = 0;
                   2678:                if (m != NULL) {
                   2679:                        sc->next_rx_desc_to_check = i;
                   2680:
                   2681: #if NBPFILTER > 0
                   2682:                        /*
                   2683:                         * Handle BPF listeners. Let the BPF
                   2684:                         * user see the packet.
                   2685:                         */
                   2686:                        if (ifp->if_bpf)
                   2687:                                bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
                   2688: #endif
                   2689:
                   2690:                        ether_input_mbuf(ifp, m);
                   2691:
                   2692:                        i = sc->next_rx_desc_to_check;
                   2693:                }
                   2694:                current_desc = &sc->rx_desc_base[i];
                   2695:        }
                   2696:        sc->next_rx_desc_to_check = i;
                   2697:
                   2698:        /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
                   2699:        if (--i < 0)
                   2700:                i = sc->num_rx_desc - 1;
                   2701:        E1000_WRITE_REG(&sc->hw, RDT, i);
                   2702: }
                   2703:
                   2704: /*********************************************************************
                   2705:  *
                   2706:  *  Verify that the hardware indicated that the checksum is valid.
                   2707:  *  Inform the stack about the status of checksum so that stack
                   2708:  *  doesn't spend time verifying the checksum.
                   2709:  *
                   2710:  *********************************************************************/
                   2711: void
                   2712: em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
                   2713:     struct mbuf *mp)
                   2714: {
                   2715:        /* 82543 or newer only */
                   2716:        if ((sc->hw.mac_type < em_82543) ||
                   2717:            /* Ignore Checksum bit is set */
                   2718:            (rx_desc->status & E1000_RXD_STAT_IXSM)) {
                   2719:                mp->m_pkthdr.csum_flags = 0;
                   2720:                return;
                   2721:        }
                   2722:
                   2723:        if (rx_desc->status & E1000_RXD_STAT_IPCS) {
                   2724:                /* Did it pass? */
                   2725:                if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
                   2726:                        /* IP Checksum Good */
                   2727:                        mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
                   2728:
                   2729:                } else
                   2730:                        mp->m_pkthdr.csum_flags = 0;
                   2731:        }
                   2732:
                   2733:        if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
                   2734:                /* Did it pass? */
                   2735:                if (!(rx_desc->errors & E1000_RXD_ERR_TCPE))
                   2736:                        mp->m_pkthdr.csum_flags |=
                   2737:                                M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
                   2738:        }
                   2739: }
                   2740:
                   2741: void
                   2742: em_enable_intr(struct em_softc *sc)
                   2743: {
                   2744:        E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
                   2745: }
                   2746:
                   2747: void
                   2748: em_disable_intr(struct em_softc *sc)
                   2749: {
                   2750:        /*
                   2751:         * The first version of 82542 had an errata where when link
                   2752:         * was forced it would stay up even if the cable was disconnected
                   2753:         * Sequence errors were used to detect the disconnect and then
                   2754:         * the driver would unforce the link.  This code is in the ISR.
                   2755:         * For this to work correctly the Sequence error interrupt had
                   2756:         * to be enabled all the time.
                   2757:         */
                   2758:
                   2759:        if (sc->hw.mac_type == em_82542_rev2_0)
                   2760:                E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ));
                   2761:        else
                   2762:                E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff);
                   2763: }
                   2764:
                   2765: int
                   2766: em_is_valid_ether_addr(u_int8_t *addr)
                   2767: {
                   2768:        const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
                   2769:
                   2770:        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
                   2771:                return (FALSE);
                   2772:
                   2773:        return (TRUE);
                   2774: }
                   2775:
                   2776: void
                   2777: em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
                   2778: {
                   2779:        struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
                   2780:        pci_chipset_tag_t pc = pa->pa_pc;
                   2781:        /* Should we do read/mask/write...?  16 vs 32 bit!!! */
                   2782:        pci_conf_write(pc, pa->pa_tag, reg, *value);
                   2783: }
                   2784:
                   2785: void
                   2786: em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
                   2787: {
                   2788:        struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
                   2789:        pci_chipset_tag_t pc = pa->pa_pc;
                   2790:        *value = pci_conf_read(pc, pa->pa_tag, reg);
                   2791: }
                   2792:
                   2793: void
                   2794: em_pci_set_mwi(struct em_hw *hw)
                   2795: {
                   2796:        struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
                   2797:        pci_chipset_tag_t pc = pa->pa_pc;
                   2798:        /* Should we do read/mask/write...?  16 vs 32 bit!!! */
                   2799:        pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
                   2800:                (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
                   2801: }
                   2802:
                   2803: void
                   2804: em_pci_clear_mwi(struct em_hw *hw)
                   2805: {
                   2806:        struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
                   2807:        pci_chipset_tag_t pc = pa->pa_pc;
                   2808:        /* Should we do read/mask/write...?  16 vs 32 bit!!! */
                   2809:        pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
                   2810:                (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
                   2811: }
                   2812:
                   2813: int32_t
                   2814: em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
                   2815: {
                   2816:        struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
                   2817:        int32_t rc;
                   2818:        u_int16_t pectl;
                   2819:
                   2820:        /* find the PCIe link width and set max read request to 4KB */
                   2821:        if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
                   2822:            NULL, NULL) != 0) {
                   2823:                em_read_pci_cfg(hw, reg + 0x12, value);
                   2824:
                   2825:                em_read_pci_cfg(hw, reg + 0x8, &pectl);
                   2826:                pectl = (pectl & ~0x7000) | (5 << 12);
                   2827:                em_write_pci_cfg(hw, reg + 0x8, &pectl);
                   2828:                rc = 0;
                   2829:        } else
                   2830:                rc = -1;
                   2831:
                   2832:        return (rc);
                   2833: }
                   2834:
                   2835: /*********************************************************************
                   2836: * 82544 Coexistence issue workaround.
                   2837: *    There are 2 issues.
                   2838: *       1. Transmit Hang issue.
                   2839: *    To detect this issue, following equation can be used...
                   2840: *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
                   2841: *          If SUM[3:0] is in between 1 to 4, we will have this issue.
                   2842: *
                   2843: *       2. DAC issue.
                   2844: *    To detect this issue, following equation can be used...
                   2845: *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
                   2846: *          If SUM[3:0] is in between 9 to c, we will have this issue.
                   2847: *
                   2848: *
                   2849: *    WORKAROUND:
                   2850: *          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
                   2851: *
                   2852: *** *********************************************************************/
                   2853: u_int32_t
                   2854: em_fill_descriptors(u_int64_t address, u_int32_t length,
                   2855:     PDESC_ARRAY desc_array)
                   2856: {
                   2857:         /* Since issue is sensitive to length and address.*/
                   2858:         /* Let us first check the address...*/
                   2859:         u_int32_t safe_terminator;
                   2860:         if (length <= 4) {
                   2861:                 desc_array->descriptor[0].address = address;
                   2862:                 desc_array->descriptor[0].length = length;
                   2863:                 desc_array->elements = 1;
                   2864:                 return desc_array->elements;
                   2865:         }
                   2866:         safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
                   2867:         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
                   2868:         if (safe_terminator == 0   ||
                   2869:         (safe_terminator > 4   &&
                   2870:         safe_terminator < 9)   ||
                   2871:         (safe_terminator > 0xC &&
                   2872:         safe_terminator <= 0xF)) {
                   2873:                 desc_array->descriptor[0].address = address;
                   2874:                 desc_array->descriptor[0].length = length;
                   2875:                 desc_array->elements = 1;
                   2876:                 return desc_array->elements;
                   2877:         }
                   2878:
                   2879:         desc_array->descriptor[0].address = address;
                   2880:         desc_array->descriptor[0].length = length - 4;
                   2881:         desc_array->descriptor[1].address = address + (length - 4);
                   2882:         desc_array->descriptor[1].length = 4;
                   2883:         desc_array->elements = 2;
                   2884:         return desc_array->elements;
                   2885: }
                   2886:
                   2887: /**********************************************************************
                   2888:  *
                   2889:  *  Update the board statistics counters.
                   2890:  *
                   2891:  **********************************************************************/
                   2892: void
                   2893: em_update_stats_counters(struct em_softc *sc)
                   2894: {
                   2895:        struct ifnet   *ifp;
                   2896:
                   2897:        if (sc->hw.media_type == em_media_type_copper ||
                   2898:            (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)) {
                   2899:                sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
                   2900:                sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
                   2901:        }
                   2902:        sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
                   2903:        sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
                   2904:        sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
                   2905:        sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
                   2906:
                   2907:        sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
                   2908:        sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
                   2909:        sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
                   2910:        sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
                   2911:        sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
                   2912:        sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
                   2913:        sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
                   2914:        sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
                   2915:        sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
                   2916:        sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
                   2917:        sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
                   2918:        sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
                   2919:        sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
                   2920:        sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
                   2921:        sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
                   2922:        sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
                   2923:        sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
                   2924:        sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
                   2925:        sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
                   2926:        sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
                   2927:
                   2928:        /* For the 64-bit byte counters the low dword must be read first. */
                   2929:        /* Both registers clear on the read of the high dword */
                   2930:
                   2931:        sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
                   2932:        sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
                   2933:        sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
                   2934:        sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
                   2935:
                   2936:        sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
                   2937:        sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
                   2938:        sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
                   2939:        sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
                   2940:        sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
                   2941:
                   2942:        sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
                   2943:        sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
                   2944:        sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
                   2945:        sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
                   2946:
                   2947:        sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
                   2948:        sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
                   2949:        sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
                   2950:        sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
                   2951:        sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
                   2952:        sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
                   2953:        sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
                   2954:        sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
                   2955:        sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
                   2956:        sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
                   2957:
                   2958:        if (sc->hw.mac_type >= em_82543) {
                   2959:                sc->stats.algnerrc +=
                   2960:                E1000_READ_REG(&sc->hw, ALGNERRC);
                   2961:                sc->stats.rxerrc +=
                   2962:                E1000_READ_REG(&sc->hw, RXERRC);
                   2963:                sc->stats.tncrs +=
                   2964:                E1000_READ_REG(&sc->hw, TNCRS);
                   2965:                sc->stats.cexterr +=
                   2966:                E1000_READ_REG(&sc->hw, CEXTERR);
                   2967:                sc->stats.tsctc +=
                   2968:                E1000_READ_REG(&sc->hw, TSCTC);
                   2969:                sc->stats.tsctfc +=
                   2970:                E1000_READ_REG(&sc->hw, TSCTFC);
                   2971:        }
                   2972:        ifp = &sc->interface_data.ac_if;
                   2973:
                   2974:        /* Fill out the OS statistics structure */
                   2975:        ifp->if_collisions = sc->stats.colc;
                   2976:
                   2977:        /* Rx Errors */
                   2978:        ifp->if_ierrors =
                   2979:            sc->dropped_pkts +
                   2980:            sc->stats.rxerrc +
                   2981:            sc->stats.crcerrs +
                   2982:            sc->stats.algnerrc +
                   2983:            sc->stats.ruc + sc->stats.roc +
                   2984:            sc->stats.mpc + sc->stats.cexterr +
                   2985:            sc->rx_overruns;
                   2986:
                   2987:        /* Tx Errors */
                   2988:        ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol +
                   2989:            sc->watchdog_events;
                   2990: }
                   2991:
                   2992: /**********************************************************************
                   2993:  *
                   2994:  *  This routine is called only when em_display_debug_stats is enabled.
                   2995:  *  This routine provides a way to take a look at important statistics
                   2996:  *  maintained by the driver and hardware.
                   2997:  *
                   2998:  **********************************************************************/
                   2999: void
                   3000: em_print_hw_stats(struct em_softc *sc)
                   3001: {
                   3002:        const char * const unit = sc->sc_dv.dv_xname;
                   3003:
                   3004:        printf("%s: Excessive collisions = %lld\n", unit,
                   3005:                (long long)sc->stats.ecol);
                   3006:        printf("%s: Symbol errors = %lld\n", unit,
                   3007:                (long long)sc->stats.symerrs);
                   3008:        printf("%s: Sequence errors = %lld\n", unit,
                   3009:                (long long)sc->stats.sec);
                   3010:        printf("%s: Defer count = %lld\n", unit,
                   3011:                (long long)sc->stats.dc);
                   3012:
                   3013:        printf("%s: Missed Packets = %lld\n", unit,
                   3014:                (long long)sc->stats.mpc);
                   3015:        printf("%s: Receive No Buffers = %lld\n", unit,
                   3016:                (long long)sc->stats.rnbc);
                   3017:        /* RLEC is inaccurate on some hardware, calculate our own */
                   3018:        printf("%s: Receive Length Errors = %lld\n", unit,
                   3019:                ((long long)sc->stats.roc +
                   3020:                (long long)sc->stats.ruc));
                   3021:        printf("%s: Receive errors = %lld\n", unit,
                   3022:                (long long)sc->stats.rxerrc);
                   3023:        printf("%s: Crc errors = %lld\n", unit,
                   3024:                (long long)sc->stats.crcerrs);
                   3025:        printf("%s: Alignment errors = %lld\n", unit,
                   3026:                (long long)sc->stats.algnerrc);
                   3027:        printf("%s: Carrier extension errors = %lld\n", unit,
                   3028:                (long long)sc->stats.cexterr);
                   3029:
                   3030:        printf("%s: RX overruns = %ld\n", unit,
                   3031:                sc->rx_overruns);
                   3032:        printf("%s: watchdog timeouts = %ld\n", unit,
                   3033:                sc->watchdog_events);
                   3034:
                   3035:        printf("%s: XON Rcvd = %lld\n", unit,
                   3036:                (long long)sc->stats.xonrxc);
                   3037:        printf("%s: XON Xmtd = %lld\n", unit,
                   3038:                (long long)sc->stats.xontxc);
                   3039:        printf("%s: XOFF Rcvd = %lld\n", unit,
                   3040:                (long long)sc->stats.xoffrxc);
                   3041:        printf("%s: XOFF Xmtd = %lld\n", unit,
                   3042:                (long long)sc->stats.xofftxc);
                   3043:
                   3044:        printf("%s: Good Packets Rcvd = %lld\n", unit,
                   3045:                (long long)sc->stats.gprc);
                   3046:        printf("%s: Good Packets Xmtd = %lld\n", unit,
                   3047:                (long long)sc->stats.gptc);
                   3048: }

CVSweb