[BACK]Return to if_qe.c CVS log [TXT][DIR] Up to [local] / sys / arch / vax / if

Annotation of sys/arch/vax/if/if_qe.c, Revision 1.1.1.1

1.1       nbrk        1: /*     $OpenBSD: if_qe.c,v 1.20 2006/04/16 00:46:32 pascoe Exp $       */
                      2: /*      $NetBSD: if_qe.c,v 1.51 2002/06/08 12:28:37 ragge Exp $ */
                      3: /*
                      4:  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
                      5:  *
                      6:  * Redistribution and use in source and binary forms, with or without
                      7:  * modification, are permitted provided that the following conditions
                      8:  * are met:
                      9:  * 1. Redistributions of source code must retain the above copyright
                     10:  *    notice, this list of conditions and the following disclaimer.
                     11:  * 2. Redistributions in binary form must reproduce the above copyright
                     12:  *    notice, this list of conditions and the following disclaimer in the
                     13:  *    documentation and/or other materials provided with the distribution.
                     14:  * 3. All advertising materials mentioning features or use of this software
                     15:  *    must display the following acknowledgement:
                     16:  *      This product includes software developed at Ludd, University of
                     17:  *      Lule}, Sweden and its contributors.
                     18:  * 4. The name of the author may not be used to endorse or promote products
                     19:  *    derived from this software without specific prior written permission
                     20:  *
                     21:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     22:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     23:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     24:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     25:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     26:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     27:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     28:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     29:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     30:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     31:  */
                     32:
                     33: /*
                     34:  * Driver for DEQNA/DELQA ethernet cards.
                     35:  * Things that is still to do:
                     36:  *     Handle ubaresets. Does not work at all right now.
                     37:  *     Fix ALLMULTI reception. But someone must tell me how...
                     38:  *     Collect statistics.
                     39:  */
                     40:
                     41: #include "bpfilter.h"
                     42:
                     43: #include <sys/param.h>
                     44: #include <sys/mbuf.h>
                     45: #include <sys/socket.h>
                     46: #include <sys/device.h>
                     47: #include <sys/systm.h>
                     48: #include <sys/sockio.h>
                     49:
                     50: #include <net/if.h>
                     51: #include <net/if_dl.h>
                     52:
                     53: #include <netinet/in.h>
                     54: #include <netinet/if_ether.h>
                     55:
                     56: #if NBPFILTER > 0
                     57: #include <net/bpf.h>
                     58: #include <net/bpfdesc.h>
                     59: #endif
                     60:
                     61: #include <machine/bus.h>
                     62:
                     63: #include <arch/vax/qbus/ubavar.h>
                     64: #include <arch/vax/if/if_qereg.h>
                     65:
                     66: #define RXDESCS        30      /* # of receive descriptors */
                     67: #define TXDESCS        60      /* # transmit descs */
                     68:
                     69: /*
                     70:  * Structure containing the elements that must be in DMA-safe memory.
                     71:  */
                     72: struct qe_cdata {
                     73:        struct qe_ring  qc_recv[RXDESCS+1];     /* Receive descriptors */
                     74:        struct qe_ring  qc_xmit[TXDESCS+1];     /* Transmit descriptors */
                     75:        u_int8_t        qc_setup[128];          /* Setup packet layout */
                     76: };
                     77:
                     78: struct qe_softc {
                     79:        struct device   sc_dev;         /* Configuration common part    */
                     80:        struct evcount  sc_intrcnt;     /* Interrupt counting           */
                     81:        int             sc_cvec;
                     82:        struct arpcom   sc_ac;          /* Ethernet common part         */
                     83: #define sc_if  sc_ac.ac_if             /* network-visible interface    */
                     84:        bus_space_tag_t sc_iot;
                     85:        bus_addr_t      sc_ioh;
                     86:        bus_dma_tag_t   sc_dmat;
                     87:        struct qe_cdata *sc_qedata;     /* Descriptor struct            */
                     88:        struct qe_cdata *sc_pqedata;    /* Unibus address of above      */
                     89:        struct mbuf*    sc_txmbuf[TXDESCS];
                     90:        struct mbuf*    sc_rxmbuf[RXDESCS];
                     91:        bus_dmamap_t    sc_xmtmap[TXDESCS];
                     92:        bus_dmamap_t    sc_rcvmap[RXDESCS];
                     93:        struct ubinfo   sc_ui;
                     94:        int             sc_intvec;      /* Interrupt vector             */
                     95:        int             sc_nexttx;
                     96:        int             sc_inq;
                     97:        int             sc_lastack;
                     98:        int             sc_nextrx;
                     99:        int             sc_setup;       /* Setup packet in queue        */
                    100: };
                    101:
                    102: static int     qematch(struct device *, struct cfdata *, void *);
                    103: static void    qeattach(struct device *, struct device *, void *);
                    104: static void    qeinit(struct qe_softc *);
                    105: static void    qestart(struct ifnet *);
                    106: static void    qeintr(void *);
                    107: static int     qeioctl(struct ifnet *, u_long, caddr_t);
                    108: static int     qe_add_rxbuf(struct qe_softc *, int);
                    109: static void    qe_setup(struct qe_softc *);
                    110: static void    qetimeout(struct ifnet *);
                    111:
                    112: struct cfattach qe_ca = {
                    113:        sizeof(struct qe_softc), (cfmatch_t)qematch, qeattach
                    114: };
                    115:
                    116: struct cfdriver qe_cd = {
                    117:        NULL, "qe", DV_IFNET
                    118: };
                    119:
                    120: #define        QE_WCSR(csr, val) \
                    121:        bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
                    122: #define        QE_RCSR(csr) \
                    123:        bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
                    124:
                    125: #define        LOWORD(x)       ((int)(x) & 0xffff)
                    126: #define        HIWORD(x)       (((int)(x) >> 16) & 0x3f)
                    127:
                    128: /*
                    129:  * Check for present DEQNA. Done by sending a fake setup packet
                    130:  * and wait for interrupt.
                    131:  */
                    132: int
                    133: qematch(struct device *parent, struct cfdata *cf, void *aux)
                    134: {
                    135:        struct  qe_softc ssc;
                    136:        struct  qe_softc *sc = &ssc;
                    137:        struct  uba_attach_args *ua = aux;
                    138:        struct  uba_softc *ubasc = (struct uba_softc *)parent;
                    139:        struct ubinfo ui;
                    140:
                    141: #define        PROBESIZE       4096
                    142:        struct qe_ring *ring;
                    143:        struct  qe_ring *rp;
                    144:        int error;
                    145:
                    146:        ring = malloc(PROBESIZE, M_TEMP, M_WAITOK);
                    147:        bzero(sc, sizeof(struct qe_softc));
                    148:        bzero(ring, PROBESIZE);
                    149:        sc->sc_iot = ua->ua_iot;
                    150:        sc->sc_ioh = ua->ua_ioh;
                    151:        sc->sc_dmat = ua->ua_dmat;
                    152:
                    153:        ubasc->uh_lastiv -= 4;
                    154:        QE_WCSR(QE_CSR_CSR, QE_RESET);
                    155:        QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
                    156:
                    157:        /*
                    158:         * Map the ring area. Actually this is done only to be able to
                    159:         * send and receive a internal packet; some junk is loopbacked
                    160:         * so that the DEQNA has a reason to interrupt.
                    161:         */
                    162:        ui.ui_size = PROBESIZE;
                    163:        ui.ui_vaddr = (caddr_t)&ring[0];
                    164:        if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
                    165:                return 0;
                    166:
                    167:        /*
                    168:         * Init a simple "fake" receive and transmit descriptor that
                    169:         * points to some unused area. Send a fake setup packet.
                    170:         */
                    171:        rp = (void *)ui.ui_baddr;
                    172:        ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
                    173:        ring[0].qe_addr_lo = LOWORD(&rp[4]);
                    174:        ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
                    175:        ring[0].qe_buf_len = -64;
                    176:
                    177:        ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
                    178:        ring[2].qe_addr_lo = LOWORD(&rp[4]);
                    179:        ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
                    180:        ring[2].qe_buf_len = -(1500/2);
                    181:
                    182:        QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
                    183:        DELAY(1000);
                    184:
                    185:        /*
                    186:         * Start the interface and wait for the packet.
                    187:         */
                    188:        QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
                    189:        QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
                    190:        QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
                    191:        QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
                    192:        QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
                    193:        DELAY(10000);
                    194:
                    195:        /*
                    196:         * All done with the bus resources.
                    197:         */
                    198:        ubfree((void *)parent, &ui);
                    199:        free(ring, M_TEMP);
                    200:        return 1;
                    201: }
                    202:
                    203: /*
                    204:  * Interface exists: make available by filling in network interface
                    205:  * record.  System will initialize the interface when it is ready
                    206:  * to accept packets.
                    207:  */
                    208: void
                    209: qeattach(struct device *parent, struct device *self, void *aux)
                    210: {
                    211:        struct  uba_attach_args *ua = aux;
                    212:        struct  uba_softc *ubasc = (struct uba_softc *)parent;
                    213:        struct  qe_softc *sc = (struct qe_softc *)self;
                    214:        struct  ifnet *ifp = (struct ifnet *)&sc->sc_if;
                    215:        struct  qe_ring *rp;
                    216:        int i, error;
                    217:
                    218:        sc->sc_iot = ua->ua_iot;
                    219:        sc->sc_ioh = ua->ua_ioh;
                    220:        sc->sc_dmat = ua->ua_dmat;
                    221:
                    222:         /*
                    223:          * Allocate DMA safe memory for descriptors and setup memory.
                    224:          */
                    225:
                    226:        sc->sc_ui.ui_size = sizeof(struct qe_cdata);
                    227:        if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
                    228:                printf(": unable to ubmemalloc(), error = %d\n", error);
                    229:                return;
                    230:        }
                    231:        sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
                    232:        sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
                    233:
                    234:        /*
                    235:         * Zero the newly allocated memory.
                    236:         */
                    237:        bzero(sc->sc_qedata, sizeof(struct qe_cdata));
                    238:        /*
                    239:         * Create the transmit descriptor DMA maps. We take advantage
                    240:         * of the fact that the Qbus address space is big, and therefore
                    241:         * allocate map registers for all transmit descriptors also,
                    242:         * so that we can avoid this each time we send a packet.
                    243:         */
                    244:        for (i = 0; i < TXDESCS; i++) {
                    245:                if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
                    246:                    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
                    247:                    &sc->sc_xmtmap[i]))) {
                    248:                        printf(": unable to create tx DMA map %d, error = %d\n",
                    249:                            i, error);
                    250:                        goto fail_4;
                    251:                }
                    252:        }
                    253:
                    254:        /*
                    255:         * Create receive buffer DMA maps.
                    256:         */
                    257:        for (i = 0; i < RXDESCS; i++) {
                    258:                if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
                    259:                    MCLBYTES, 0, BUS_DMA_NOWAIT,
                    260:                    &sc->sc_rcvmap[i]))) {
                    261:                        printf(": unable to create rx DMA map %d, error = %d\n",
                    262:                            i, error);
                    263:                        goto fail_5;
                    264:                }
                    265:        }
                    266:        /*
                    267:         * Pre-allocate the receive buffers.
                    268:         */
                    269:        for (i = 0; i < RXDESCS; i++) {
                    270:                if ((error = qe_add_rxbuf(sc, i)) != 0) {
                    271:                        printf(": unable to allocate or map rx buffer %d\n,"
                    272:                            " error = %d\n", i, error);
                    273:                        goto fail_6;
                    274:                }
                    275:        }
                    276:
                    277:        /*
                    278:         * Create ring loops of the buffer chains.
                    279:         * This is only done once.
                    280:         */
                    281:
                    282:        rp = sc->sc_qedata->qc_recv;
                    283:        rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
                    284:        rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
                    285:            QE_VALID | QE_CHAIN;
                    286:        rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
                    287:
                    288:        rp = sc->sc_qedata->qc_xmit;
                    289:        rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
                    290:        rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
                    291:            QE_VALID | QE_CHAIN;
                    292:        rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
                    293:
                    294:        /*
                    295:         * Get the vector that were set at match time, and remember it.
                    296:         */
                    297:        sc->sc_intvec = ubasc->uh_lastiv;
                    298:        QE_WCSR(QE_CSR_CSR, QE_RESET);
                    299:        DELAY(1000);
                    300:        QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
                    301:
                    302:        /*
                    303:         * Read out ethernet address and tell which type this card is.
                    304:         */
                    305:        for (i = 0; i < 6; i++)
                    306:                sc->sc_ac.ac_enaddr[i] = QE_RCSR(i * 2) & 0xff;
                    307:
                    308:        QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
                    309:        printf(": %s, address %s\n",
                    310:                QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa" : "deqna",
                    311:                ether_sprintf(sc->sc_ac.ac_enaddr));
                    312:
                    313:        QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
                    314:
                    315:        uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
                    316:                sc, &sc->sc_intrcnt);
                    317:        sc->sc_cvec = ua->ua_cvec;
                    318:        evcount_attach(&sc->sc_intrcnt, sc->sc_dev.dv_xname,
                    319:            (void *)&sc->sc_cvec, &evcount_intr);
                    320:
                    321:        strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
                    322:        ifp->if_softc = sc;
                    323:        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
                    324:        ifp->if_start = qestart;
                    325:        ifp->if_ioctl = qeioctl;
                    326:        ifp->if_watchdog = qetimeout;
                    327:        IFQ_SET_READY(&ifp->if_snd);
                    328:
                    329:        /*
                    330:         * Attach the interface.
                    331:         */
                    332:        if_attach(ifp);
                    333:        ether_ifattach(ifp);
                    334:
                    335:        return;
                    336:
                    337:        /*
                    338:         * Free any resources we've allocated during the failed attach
                    339:         * attempt.  Do this in reverse order and fall through.
                    340:         */
                    341:  fail_6:
                    342:        for (i = 0; i < RXDESCS; i++) {
                    343:                if (sc->sc_rxmbuf[i] != NULL) {
                    344:                        bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
                    345:                        m_freem(sc->sc_rxmbuf[i]);
                    346:                }
                    347:        }
                    348:  fail_5:
                    349:        for (i = 0; i < RXDESCS; i++) {
                    350:                if (sc->sc_xmtmap[i] != NULL)
                    351:                        bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
                    352:        }
                    353:  fail_4:
                    354:        for (i = 0; i < TXDESCS; i++) {
                    355:                if (sc->sc_rcvmap[i] != NULL)
                    356:                        bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
                    357:        }
                    358: }
                    359:
                    360: /*
                    361:  * Initialization of interface.
                    362:  */
                    363: void
                    364: qeinit(struct qe_softc *sc)
                    365: {
                    366:        struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
                    367:        struct qe_cdata *qc = sc->sc_qedata;
                    368:        int i;
                    369:
                    370:
                    371:        /*
                    372:         * Reset the interface.
                    373:         */
                    374:        QE_WCSR(QE_CSR_CSR, QE_RESET);
                    375:        DELAY(1000);
                    376:        QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
                    377:        QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
                    378:
                    379:        sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
                    380:        /*
                    381:         * Release and init transmit descriptors.
                    382:         */
                    383:        for (i = 0; i < TXDESCS; i++) {
                    384:                if (sc->sc_txmbuf[i]) {
                    385:                        bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
                    386:                        m_freem(sc->sc_txmbuf[i]);
                    387:                        sc->sc_txmbuf[i] = 0;
                    388:                }
                    389:                qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
                    390:                qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
                    391:        }
                    392:
                    393:
                    394:        /*
                    395:         * Init receive descriptors.
                    396:         */
                    397:        for (i = 0; i < RXDESCS; i++)
                    398:                qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
                    399:        sc->sc_nextrx = 0;
                    400:
                    401:        /*
                    402:         * Write the descriptor addresses to the device.
                    403:         * Receiving packets will be enabled in the interrupt routine.
                    404:         */
                    405:        QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
                    406:        QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
                    407:        QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
                    408:
                    409:        ifp->if_flags |= IFF_RUNNING;
                    410:        ifp->if_flags &= ~IFF_OACTIVE;
                    411:
                    412:        /*
                    413:         * Send a setup frame.
                    414:         * This will start the transmit machinery as well.
                    415:         */
                    416:        qe_setup(sc);
                    417:
                    418: }
                    419:
                    420: /*
                    421:  * Start output on interface.
                    422:  */
                    423: void
                    424: qestart(struct ifnet *ifp)
                    425: {
                    426:        struct qe_softc *sc = ifp->if_softc;
                    427:        struct qe_cdata *qc = sc->sc_qedata;
                    428:        paddr_t buffer;
                    429:        struct mbuf *m, *m0;
                    430:        int idx, len, s, i, totlen, error;
                    431:        short orword, csr;
                    432:
                    433:        if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
                    434:                return;
                    435:
                    436:        s = splnet();
                    437:        while (sc->sc_inq < (TXDESCS - 1)) {
                    438:
                    439:                if (sc->sc_setup) {
                    440:                        qe_setup(sc);
                    441:                        continue;
                    442:                }
                    443:                idx = sc->sc_nexttx;
                    444:                IFQ_POLL(&ifp->if_snd, m);
                    445:                if (m == 0)
                    446:                        goto out;
                    447:                /*
                    448:                 * Count number of mbufs in chain.
                    449:                 * Always do DMA directly from mbufs, therefore the transmit
                    450:                 * ring is really big.
                    451:                 */
                    452:                for (m0 = m, i = 0; m0; m0 = m0->m_next)
                    453:                        if (m0->m_len)
                    454:                                i++;
                    455:                if (i >= TXDESCS)
                    456:                        panic("qestart");
                    457:
                    458:                if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
                    459:                        ifp->if_flags |= IFF_OACTIVE;
                    460:                        goto out;
                    461:                }
                    462:
                    463:                IFQ_DEQUEUE(&ifp->if_snd, m);
                    464:
                    465: #if NBPFILTER > 0
                    466:                if (ifp->if_bpf)
                    467:                        bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
                    468: #endif
                    469:                /*
                    470:                 * m now points to a mbuf chain that can be loaded.
                    471:                 * Loop around and set it.
                    472:                 */
                    473:                totlen = 0;
                    474:                for (m0 = m; m0; m0 = m0->m_next) {
                    475:                        error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
                    476:                            mtod(m0, void *), m0->m_len, 0, 0);
                    477:                        buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
                    478:                        len = m0->m_len;
                    479:                        if (len == 0)
                    480:                                continue;
                    481:
                    482:                        totlen += len;
                    483:                        /* Word alignment calc */
                    484:                        orword = 0;
                    485:                        if (totlen == m->m_pkthdr.len) {
                    486:                                if (totlen < ETHER_MIN_LEN)
                    487:                                        len += (ETHER_MIN_LEN - totlen);
                    488:                                orword |= QE_EOMSG;
                    489:                                sc->sc_txmbuf[idx] = m;
                    490:                        }
                    491:                        if ((buffer & 1) || (len & 1))
                    492:                                len += 2;
                    493:                        if (buffer & 1)
                    494:                                orword |= QE_ODDBEGIN;
                    495:                        if ((buffer + len) & 1)
                    496:                                orword |= QE_ODDEND;
                    497:                        qc->qc_xmit[idx].qe_buf_len = -(len/2);
                    498:                        qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
                    499:                        qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
                    500:                        qc->qc_xmit[idx].qe_flag =
                    501:                            qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
                    502:                        qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
                    503:                        if (++idx == TXDESCS)
                    504:                                idx = 0;
                    505:                        sc->sc_inq++;
                    506:                }
                    507: #ifdef DIAGNOSTIC
                    508:                if (totlen != m->m_pkthdr.len)
                    509:                        panic("qestart: len fault");
                    510: #endif
                    511:
                    512:                /*
                    513:                 * Kick off the transmit logic, if it is stopped.
                    514:                 */
                    515:                csr = QE_RCSR(QE_CSR_CSR);
                    516:                if (csr & QE_XL_INVALID) {
                    517:                        QE_WCSR(QE_CSR_XMTL,
                    518:                            LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
                    519:                        QE_WCSR(QE_CSR_XMTH,
                    520:                            HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
                    521:                }
                    522:                sc->sc_nexttx = idx;
                    523:        }
                    524:        if (sc->sc_inq == (TXDESCS - 1))
                    525:                ifp->if_flags |= IFF_OACTIVE;
                    526:
                    527: out:   if (sc->sc_inq)
                    528:                ifp->if_timer = 5; /* If transmit logic dies */
                    529:        splx(s);
                    530: }
                    531:
                    532: static void
                    533: qeintr(void *arg)
                    534: {
                    535:        struct qe_softc *sc = arg;
                    536:        struct qe_cdata *qc = sc->sc_qedata;
                    537:        struct ifnet *ifp = &sc->sc_if;
                    538:        struct ether_header *eh;
                    539:        struct mbuf *m;
                    540:        int csr, status1, status2, len;
                    541:
                    542:        csr = QE_RCSR(QE_CSR_CSR);
                    543:
                    544:        QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
                    545:            QE_RCV_INT | QE_ILOOP);
                    546:
                    547:        if (csr & QE_RCV_INT)
                    548:                while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
                    549:                        status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
                    550:                        status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
                    551:
                    552:                        m = sc->sc_rxmbuf[sc->sc_nextrx];
                    553:                        len = ((status1 & QE_RBL_HI) |
                    554:                            (status2 & QE_RBL_LO)) + 60;
                    555:                        qe_add_rxbuf(sc, sc->sc_nextrx);
                    556:                        m->m_pkthdr.rcvif = ifp;
                    557:                        m->m_pkthdr.len = m->m_len = len;
                    558:                        if (++sc->sc_nextrx == RXDESCS)
                    559:                                sc->sc_nextrx = 0;
                    560:                        eh = mtod(m, struct ether_header *);
                    561: #if NBPFILTER > 0
                    562:                        if (ifp->if_bpf) {
                    563:                                bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
                    564:                                if ((ifp->if_flags & IFF_PROMISC) != 0 &&
                    565:                                    bcmp(sc->sc_ac.ac_enaddr, eh->ether_dhost,
                    566:                                    ETHER_ADDR_LEN) != 0 &&
                    567:                                    ((eh->ether_dhost[0] & 1) == 0)) {
                    568:                                        m_freem(m);
                    569:                                        continue;
                    570:                                }
                    571:                        }
                    572: #endif
                    573:                        /*
                    574:                         * ALLMULTI means PROMISC in this driver.
                    575:                         */
                    576:                        if ((ifp->if_flags & IFF_ALLMULTI) &&
                    577:                            ((eh->ether_dhost[0] & 1) == 0) &&
                    578:                            bcmp(sc->sc_ac.ac_enaddr, eh->ether_dhost,
                    579:                            ETHER_ADDR_LEN)) {
                    580:                                m_freem(m);
                    581:                                continue;
                    582:                        }
                    583:
                    584:                        if ((status1 & QE_ESETUP) == 0)
                    585:                                ether_input_mbuf(ifp, m);
                    586:                        else
                    587:                                m_freem(m);
                    588:                }
                    589:
                    590:        if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
                    591:                while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
                    592:                        int idx = sc->sc_lastack;
                    593:
                    594:                        sc->sc_inq--;
                    595:                        if (++sc->sc_lastack == TXDESCS)
                    596:                                sc->sc_lastack = 0;
                    597:
                    598:                        /* XXX collect statistics */
                    599:                        qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
                    600:                        qc->qc_xmit[idx].qe_status1 =
                    601:                            qc->qc_xmit[idx].qe_flag = QE_NOTYET;
                    602:
                    603:                        if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
                    604:                                continue;
                    605:                        bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
                    606:                        if (sc->sc_txmbuf[idx]) {
                    607:                                m_freem(sc->sc_txmbuf[idx]);
                    608:                                sc->sc_txmbuf[idx] = 0;
                    609:                        }
                    610:                }
                    611:                ifp->if_timer = 0;
                    612:                ifp->if_flags &= ~IFF_OACTIVE;
                    613:                qestart(ifp); /* Put in more in queue */
                    614:        }
                    615:        /*
                    616:         * How can the receive list get invalid???
                    617:         * Verified that it happens anyway.
                    618:         */
                    619:        if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
                    620:            (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
                    621:                QE_WCSR(QE_CSR_RCLL,
                    622:                    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
                    623:                QE_WCSR(QE_CSR_RCLH,
                    624:                    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
                    625:        }
                    626: }
                    627:
                    628: /*
                    629:  * Process an ioctl request.
                    630:  */
                    631: int
                    632: qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
                    633: {
                    634:        struct qe_softc *sc = ifp->if_softc;
                    635:        struct ifreq *ifr = (struct ifreq *)data;
                    636:        struct ifaddr *ifa = (struct ifaddr *)data;
                    637:        int s = splnet(), error = 0;
                    638:
                    639:        switch (cmd) {
                    640:
                    641:        case SIOCSIFADDR:
                    642:                ifp->if_flags |= IFF_UP;
                    643:                switch(ifa->ifa_addr->sa_family) {
                    644: #ifdef INET
                    645:                case AF_INET:
                    646:                        qeinit(sc);
                    647:                        arp_ifinit(&sc->sc_ac, ifa);
                    648:                        break;
                    649: #endif
                    650:                }
                    651:                break;
                    652:
                    653:        case SIOCSIFFLAGS:
                    654:                if ((ifp->if_flags & IFF_UP) == 0 &&
                    655:                    (ifp->if_flags & IFF_RUNNING) != 0) {
                    656:                        /*
                    657:                         * If interface is marked down and it is running,
                    658:                         * stop it. (by disabling receive mechanism).
                    659:                         */
                    660:                        QE_WCSR(QE_CSR_CSR,
                    661:                            QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
                    662:                        ifp->if_flags &= ~IFF_RUNNING;
                    663:                } else if ((ifp->if_flags & IFF_UP) != 0 &&
                    664:                           (ifp->if_flags & IFF_RUNNING) == 0) {
                    665:                        /*
                    666:                         * If interface it marked up and it is stopped, then
                    667:                         * start it.
                    668:                         */
                    669:                        qeinit(sc);
                    670:                } else if ((ifp->if_flags & IFF_UP) != 0) {
                    671:                        /*
                    672:                         * Send a new setup packet to match any new changes.
                    673:                         * (Like IFF_PROMISC etc)
                    674:                         */
                    675:                        qe_setup(sc);
                    676:                }
                    677:                break;
                    678:
                    679:        case SIOCADDMULTI:
                    680:        case SIOCDELMULTI:
                    681:                /*
                    682:                 * Update our multicast list.
                    683:                 */
                    684:                error = (cmd == SIOCADDMULTI) ?
                    685:                        ether_addmulti(ifr, &sc->sc_ac):
                    686:                        ether_delmulti(ifr, &sc->sc_ac);
                    687:
                    688:                if (error == ENETRESET) {
                    689:                        /*
                    690:                         * Multicast list has changed; set the hardware filter
                    691:                         * accordingly.
                    692:                         */
                    693:                        qe_setup(sc);
                    694:                        error = 0;
                    695:                }
                    696:                break;
                    697:
                    698:        default:
                    699:                error = EINVAL;
                    700:
                    701:        }
                    702:        splx(s);
                    703:        return (error);
                    704: }
                    705:
                    706: /*
                    707:  * Add a receive buffer to the indicated descriptor.
                    708:  */
                    709: int
                    710: qe_add_rxbuf(struct qe_softc *sc, int i)
                    711: {
                    712:        struct mbuf *m;
                    713:        struct qe_ring *rp;
                    714:        vaddr_t addr;
                    715:        int error;
                    716:
                    717:        MGETHDR(m, M_DONTWAIT, MT_DATA);
                    718:        if (m == NULL)
                    719:                return (ENOBUFS);
                    720:
                    721:        MCLGET(m, M_DONTWAIT);
                    722:        if ((m->m_flags & M_EXT) == 0) {
                    723:                m_freem(m);
                    724:                return (ENOBUFS);
                    725:        }
                    726:
                    727:        if (sc->sc_rxmbuf[i] != NULL)
                    728:                bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
                    729:
                    730:        error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
                    731:            m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
                    732:        if (error)
                    733:                panic("%s: can't load rx DMA map %d, error = %d",
                    734:                    sc->sc_dev.dv_xname, i, error);
                    735:        sc->sc_rxmbuf[i] = m;
                    736:
                    737:        bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
                    738:            sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
                    739:
                    740:        /*
                    741:         * We know that the mbuf cluster is page aligned. Also, be sure
                    742:         * that the IP header will be longword aligned.
                    743:         */
                    744:        m->m_data += 2;
                    745:        addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
                    746:        rp = &sc->sc_qedata->qc_recv[i];
                    747:        rp->qe_flag = rp->qe_status1 = QE_NOTYET;
                    748:        rp->qe_addr_lo = LOWORD(addr);
                    749:        rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
                    750:        rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
                    751:
                    752:        return (0);
                    753: }
                    754:
                    755: /*
                    756:  * Create a setup packet and put in queue for sending.
                    757:  */
                    758: void
                    759: qe_setup(struct qe_softc *sc)
                    760: {
                    761:        struct ether_multi *enm;
                    762:        struct ether_multistep step;
                    763:        struct qe_cdata *qc = sc->sc_qedata;
                    764:        struct ifnet *ifp = &sc->sc_if;
                    765:        u_int8_t *enaddr = sc->sc_ac.ac_enaddr;
                    766:        int i, j, k, idx, s;
                    767:
                    768:        s = splnet();
                    769:        if (sc->sc_inq == (TXDESCS - 1)) {
                    770:                sc->sc_setup = 1;
                    771:                splx(s);
                    772:                return;
                    773:        }
                    774:        sc->sc_setup = 0;
                    775:        /*
                    776:         * Init the setup packet with valid info.
                    777:         */
                    778:        memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
                    779:        for (i = 0; i < ETHER_ADDR_LEN; i++)
                    780:                qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
                    781:
                    782:        /*
                    783:         * Multicast handling. The DEQNA can handle up to 12 direct
                    784:         * ethernet addresses.
                    785:         */
                    786:        j = 3; k = 0;
                    787:        ifp->if_flags &= ~IFF_ALLMULTI;
                    788:        ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
                    789:        while (enm != NULL) {
                    790:                if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
                    791:                        ifp->if_flags |= IFF_ALLMULTI;
                    792:                        break;
                    793:                }
                    794:                for (i = 0; i < ETHER_ADDR_LEN; i++)
                    795:                        qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
                    796:                j++;
                    797:                if (j == 8) {
                    798:                        j = 1; k += 64;
                    799:                }
                    800:                if (k > 64) {
                    801:                        ifp->if_flags |= IFF_ALLMULTI;
                    802:                        break;
                    803:                }
                    804:                ETHER_NEXT_MULTI(step, enm);
                    805:        }
                    806:        idx = sc->sc_nexttx;
                    807:        qc->qc_xmit[idx].qe_buf_len = -64;
                    808:
                    809:        /*
                    810:         * How is the DEQNA turned in ALLMULTI mode???
                    811:         * Until someone tells me, fall back to PROMISC when more than
                    812:         * 12 ethernet addresses.
                    813:         */
                    814:        if (ifp->if_flags & IFF_ALLMULTI)
                    815:                ifp->if_flags |= IFF_PROMISC;
                    816:        else if (ifp->if_pcount == 0)
                    817:                ifp->if_flags &= ~IFF_PROMISC;
                    818:        if (ifp->if_flags & IFF_PROMISC)
                    819:                qc->qc_xmit[idx].qe_buf_len = -65;
                    820:
                    821:        qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
                    822:        qc->qc_xmit[idx].qe_addr_hi =
                    823:            HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
                    824:        qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
                    825:        qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
                    826:
                    827:        if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
                    828:                QE_WCSR(QE_CSR_XMTL,
                    829:                    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
                    830:                QE_WCSR(QE_CSR_XMTH,
                    831:                    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
                    832:        }
                    833:
                    834:        sc->sc_inq++;
                    835:        if (++sc->sc_nexttx == TXDESCS)
                    836:                sc->sc_nexttx = 0;
                    837:        splx(s);
                    838: }
                    839:
                    840: /*
                    841:  * Check for dead transmit logic. Not uncommon.
                    842:  */
                    843: void
                    844: qetimeout(struct ifnet *ifp)
                    845: {
                    846:        struct qe_softc *sc = ifp->if_softc;
                    847:
                    848:        if (sc->sc_inq == 0)
                    849:                return;
                    850:
                    851:        printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
                    852:        /*
                    853:         * Do a reset of interface, to get it going again.
                    854:         * Will it work by just restart the transmit logic?
                    855:         */
                    856:        qeinit(sc);
                    857: }

CVSweb