Annotation of sys/arch/sgi/dev/if_mec.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_mec.c,v 1.12 2007/07/31 19:10:22 deraadt Exp $ */
2: /* $NetBSD: if_mec_mace.c,v 1.5 2004/08/01 06:36:36 tsutsui Exp $ */
3:
4: /*
5: * Copyright (c) 2004 Izumi Tsutsui.
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. The name of the author may not be used to endorse or promote products
17: * derived from this software without specific prior written permission.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29: */
30:
31: /*
32: * Copyright (c) 2003 Christopher SEKIYA
33: * All rights reserved.
34: *
35: * Redistribution and use in source and binary forms, with or without
36: * modification, are permitted provided that the following conditions
37: * are met:
38: * 1. Redistributions of source code must retain the above copyright
39: * notice, this list of conditions and the following disclaimer.
40: * 2. Redistributions in binary form must reproduce the above copyright
41: * notice, this list of conditions and the following disclaimer in the
42: * documentation and/or other materials provided with the distribution.
43: * 3. All advertising materials mentioning features or use of this software
44: * must display the following acknowledgement:
45: * This product includes software developed for the
46: * NetBSD Project. See http://www.NetBSD.org/ for
47: * information about NetBSD.
48: * 4. The name of the author may not be used to endorse or promote products
49: * derived from this software without specific prior written permission.
50: *
51: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61: */
62:
63: /*
64: * MACE MAC-110 Ethernet driver
65: */
66:
67: #include "bpfilter.h"
68:
69: #include <sys/param.h>
70: #include <sys/systm.h>
71: #include <sys/device.h>
72: #include <sys/timeout.h>
73: #include <sys/mbuf.h>
74: #include <sys/malloc.h>
75: #include <sys/kernel.h>
76: #include <sys/socket.h>
77: #include <sys/ioctl.h>
78: #include <sys/errno.h>
79:
80: #include <net/if.h>
81: #include <net/if_dl.h>
82: #include <net/if_media.h>
83: #include <net/if_types.h>
84:
85: #ifdef INET
86: #include <netinet/in.h>
87: #include <netinet/in_systm.h>
88: #include <netinet/in_var.h>
89: #include <netinet/ip.h>
90: #endif
91:
92: #if NBPFILTER > 0
93: #include <net/bpf.h>
94: #endif
95:
96: #include <netinet/if_ether.h>
97:
98: #include <machine/bus.h>
99: #include <machine/intr.h>
100: #include <machine/autoconf.h>
101:
102: #include <dev/mii/mii.h>
103: #include <dev/mii/miivar.h>
104:
105: #include <mips64/archtype.h>
106: #include <mips64/arcbios.h>
107: #include <sgi/dev/if_mecreg.h>
108:
109: #ifdef MEC_DEBUG
110: #define MEC_DEBUG_RESET 0x01
111: #define MEC_DEBUG_START 0x02
112: #define MEC_DEBUG_STOP 0x04
113: #define MEC_DEBUG_INTR 0x08
114: #define MEC_DEBUG_RXINTR 0x10
115: #define MEC_DEBUG_TXINTR 0x20
116: uint32_t mec_debug = 0xff;
117: #define DPRINTF(x, y) if (mec_debug & (x)) printf y
118: #else
119: #define DPRINTF(x, y) /* nothing */
120: #endif
121:
122: /*
123: * Transmit descriptor list size
124: */
125: #define MEC_NTXDESC 64
126: #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
127: #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
128:
129: /*
130: * software state for TX
131: */
132: struct mec_txsoft {
133: struct mbuf *txs_mbuf; /* head of our mbuf chain */
134: bus_dmamap_t txs_dmamap; /* our DMA map */
135: uint32_t txs_flags;
136: #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
137: #define MEC_TXS_TXDBUF 0x00000080 /* txd_buf is used */
138: #define MEC_TXS_TXDPTR1 0x00000100 /* txd_ptr[0] is used */
139: };
140:
141: /*
142: * Transmit buffer descriptor
143: */
144: #define MEC_TXDESCSIZE 128
145: #define MEC_NTXPTR 3
146: #define MEC_TXD_BUFOFFSET \
147: (sizeof(uint64_t) + MEC_NTXPTR * sizeof(uint64_t))
148: #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
149: #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
150: #define MEC_TXD_ALIGN 8
151: #define MEC_TXD_ROUNDUP(addr) \
152: (((addr) + (MEC_TXD_ALIGN - 1)) & ~((uint64_t)MEC_TXD_ALIGN - 1))
153:
154: struct mec_txdesc {
155: volatile uint64_t txd_cmd;
156: #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
157: #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
158: #define TXCMD_BUFSTART(x) ((x) << 16)
159: #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
160: #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
161: #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
162: #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
163: #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
164: #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
165:
166: #define txd_stat txd_cmd
167: #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
168: #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
169: #define MEC_TXSTAT_COLCNT_SHIFT 16
170: #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
171: #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
172: #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
173: #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
174: #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
175: #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
176: #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
177: #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
178: #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
179: #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
180: #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
181:
182: uint64_t txd_ptr[MEC_NTXPTR];
183: #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
184: #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
185: #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
186: #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
187: #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
188:
189: uint8_t txd_buf[MEC_TXD_BUFSIZE];
190: };
191:
192: /*
193: * Receive buffer size
194: */
195: #define MEC_NRXDESC 16
196: #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
197: #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
198:
199: /*
200: * Receive buffer description
201: */
202: #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
203: #define MEC_RXD_NRXPAD 3
204: #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
205: #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
206: #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
207:
208: struct mec_rxdesc {
209: volatile uint64_t rxd_stat;
210: #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
211: #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
212: #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
213: #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
214: #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
215: #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
216: #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
217: #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
218: #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
219: #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
220: #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
221: #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
222: #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
223: #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
224: #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
225: #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
226: uint64_t rxd_pad1[MEC_RXD_NRXPAD];
227: uint8_t rxd_buf[MEC_RXD_BUFSIZE];
228: };
229:
230: /*
231: * control structures for DMA ops
232: */
233: struct mec_control_data {
234: /*
235: * TX descriptors and buffers
236: */
237: struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
238:
239: /*
240: * RX descriptors and buffers
241: */
242: struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
243: };
244:
245: /*
246: * It _seems_ there are some restrictions on descriptor address:
247: *
248: * - Base address of txdescs should be 8kbyte aligned
249: * - Each txdesc should be 128byte aligned
250: * - Each rxdesc should be 4kbyte aligned
251: *
252: * So we should specify 64k align to allocalte txdescs.
253: * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
254: * so rxdescs are also allocated at 4kbyte aligned.
255: */
256: #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
257:
258: #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
259: #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
260: #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
261:
262: /*
263: * software state per device
264: */
265: struct mec_softc {
266: struct device sc_dev; /* generic device structures */
267: struct arpcom sc_ac; /* Ethernet common part */
268:
269: bus_space_tag_t sc_st; /* bus_space tag */
270: bus_space_handle_t sc_sh; /* bus_space handle */
271: bus_dma_tag_t sc_dmat; /* bus_dma tag */
272: void *sc_sdhook; /* shutdown hook */
273:
274: struct mii_data sc_mii; /* MII/media information */
275: int sc_phyaddr; /* MII address */
276: struct timeout sc_tick_ch; /* tick timeout */
277:
278: bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
279: #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
280:
281: /* pointer to allocated control data */
282: struct mec_control_data *sc_control_data;
283: #define sc_txdesc sc_control_data->mcd_txdesc
284: #define sc_rxdesc sc_control_data->mcd_rxdesc
285:
286: /* software state for TX descs */
287: struct mec_txsoft sc_txsoft[MEC_NTXDESC];
288:
289: int sc_txpending; /* number of TX requests pending */
290: int sc_txdirty; /* first dirty TX descriptor */
291: int sc_txlast; /* last used TX descriptor */
292:
293: int sc_rxptr; /* next ready RX buffer */
294: };
295:
296: #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
297: #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
298:
299: #define MEC_TXDESCSYNC(sc, x, ops) \
300: bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
301: MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
302: #define MEC_TXCMDSYNC(sc, x, ops) \
303: bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
304: MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
305:
306: #define MEC_RXSTATSYNC(sc, x, ops) \
307: bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
308: MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
309: #define MEC_RXBUFSYNC(sc, x, len, ops) \
310: bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
311: MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
312: ETHER_ALIGN + (len), (ops))
313:
314: /* XXX these values should be moved to <net/if_ether.h> ? */
315: #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
316:
317: struct cfdriver mec_cd = {
318: NULL, "mec", DV_IFNET
319: };
320:
321: int mec_match(struct device *, void *, void *);
322: void mec_attach(struct device *, struct device *, void *);
323:
324: struct cfattach mec_ca = {
325: sizeof(struct mec_softc), mec_match, mec_attach
326: };
327:
328: int mec_mii_readreg(struct device *, int, int);
329: void mec_mii_writereg(struct device *, int, int, int);
330: int mec_mii_wait(struct mec_softc *);
331: void mec_statchg(struct device *);
332: void mec_mediastatus(struct ifnet *, struct ifmediareq *);
333: int mec_mediachange(struct ifnet *);
334:
335: int mec_init(struct ifnet * ifp);
336: void mec_start(struct ifnet *);
337: void mec_watchdog(struct ifnet *);
338: void mec_tick(void *);
339: int mec_ioctl(struct ifnet *, u_long, caddr_t);
340: void mec_reset(struct mec_softc *);
341: void mec_setfilter(struct mec_softc *);
342: int mec_intr(void *arg);
343: void mec_stop(struct ifnet *, int);
344: void mec_rxintr(struct mec_softc *, uint32_t);
345: void mec_txintr(struct mec_softc *, uint32_t);
346: void mec_shutdown(void *);
347:
348: int
349: mec_match(struct device *parent, void *match, void *aux)
350: {
351: struct confargs *ca = aux;
352:
353: if (ca->ca_sys != SGI_O2 || strcmp(ca->ca_name, mec_cd.cd_name))
354: return (0);
355:
356: return (1);
357: }
358:
359: void
360: mec_attach(struct device *parent, struct device *self, void *aux)
361: {
362: struct mec_softc *sc = (void *)self;
363: struct confargs *ca = aux;
364: struct ifnet *ifp = &sc->sc_ac.ac_if;
365: uint32_t command;
366: struct mii_softc *child;
367: bus_dma_segment_t seg;
368: int i, err, rseg;
369:
370: sc->sc_st = ca->ca_iot;
371: if (bus_space_map(sc->sc_st, ca->ca_baseaddr, MEC_NREGS, 0,
372: &sc->sc_sh) != 0) {
373: printf(": can't map i/o space\n");
374: return;
375: }
376:
377: /* set up DMA structures */
378: sc->sc_dmat = ca->ca_dmat;
379:
380: /*
381: * Allocate the control data structures, and create and load the
382: * DMA map for it.
383: */
384: if ((err = bus_dmamem_alloc(sc->sc_dmat,
385: sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
386: &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
387: printf(": unable to allocate control data, error = %d\n", err);
388: goto fail_0;
389: }
390:
391: /*
392: * XXX needs re-think...
393: * control data structures contain whole RX data buffer, so
394: * BUS_DMA_COHERENT (which disables cache) may cause some performance
395: * issue on copying data from the RX buffer to mbuf on normal memory,
396: * though we have to make sure all bus_dmamap_sync(9) ops are called
397: * properly in that case.
398: */
399: if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
400: sizeof(struct mec_control_data),
401: (caddr_t *)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
402: printf(": unable to map control data, error = %d\n", err);
403: goto fail_1;
404: }
405: memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
406:
407: if ((err = bus_dmamap_create(sc->sc_dmat,
408: sizeof(struct mec_control_data), 1,
409: sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
410: printf(": unable to create control data DMA map, error = %d\n",
411: err);
412: goto fail_2;
413: }
414: if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
415: sc->sc_control_data, sizeof(struct mec_control_data), NULL,
416: BUS_DMA_NOWAIT)) != 0) {
417: printf(": unable to load control data DMA map, error = %d\n",
418: err);
419: goto fail_3;
420: }
421:
422: /* create TX buffer DMA maps */
423: for (i = 0; i < MEC_NTXDESC; i++) {
424: if ((err = bus_dmamap_create(sc->sc_dmat,
425: MCLBYTES, 1, MCLBYTES, 0, 0,
426: &sc->sc_txsoft[i].txs_dmamap)) != 0) {
427: printf(": unable to create tx DMA map %d, error = %d\n",
428: i, err);
429: goto fail_4;
430: }
431: }
432:
433: timeout_set(&sc->sc_tick_ch, mec_tick, sc);
434:
435: /* use Ethernet address from ARCBIOS */
436: enaddr_aton(bios_enaddr, sc->sc_ac.ac_enaddr);
437:
438: /* reset device */
439: mec_reset(sc);
440:
441: command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
442:
443: printf(": MAC-110 rev %d, address %s\n",
444: (command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT,
445: ether_sprintf(sc->sc_ac.ac_enaddr));
446:
447: /* Done, now attach everything */
448:
449: sc->sc_mii.mii_ifp = ifp;
450: sc->sc_mii.mii_readreg = mec_mii_readreg;
451: sc->sc_mii.mii_writereg = mec_mii_writereg;
452: sc->sc_mii.mii_statchg = mec_statchg;
453:
454: /* Set up PHY properties */
455: ifmedia_init(&sc->sc_mii.mii_media, 0, mec_mediachange,
456: mec_mediastatus);
457: mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
458: MII_OFFSET_ANY, 0);
459:
460: child = LIST_FIRST(&sc->sc_mii.mii_phys);
461: if (child == NULL) {
462: /* No PHY attached */
463: ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
464: 0, NULL);
465: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
466: } else {
467: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
468: sc->sc_phyaddr = child->mii_phy;
469: }
470:
471: bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
472: ifp->if_softc = sc;
473: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
474: ifp->if_ioctl = mec_ioctl;
475: ifp->if_start = mec_start;
476: ifp->if_watchdog = mec_watchdog;
477: IFQ_SET_READY(&ifp->if_snd);
478:
479: if_attach(ifp);
480: IFQ_SET_MAXLEN(&ifp->if_snd, MEC_NTXDESC - 1);
481: ether_ifattach(ifp);
482:
483: /* establish interrupt */
484: BUS_INTR_ESTABLISH(ca, NULL, ca->ca_intr, IST_EDGE, IPL_NET,
485: mec_intr, sc, sc->sc_dev.dv_xname);
486:
487: /* set shutdown hook to reset interface on powerdown */
488: sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);
489:
490: return;
491:
492: /*
493: * Free any resources we've allocated during the failed attach
494: * attempt. Do this in reverse order and fall though.
495: */
496: fail_4:
497: for (i = 0; i < MEC_NTXDESC; i++) {
498: if (sc->sc_txsoft[i].txs_dmamap != NULL)
499: bus_dmamap_destroy(sc->sc_dmat,
500: sc->sc_txsoft[i].txs_dmamap);
501: }
502: bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
503: fail_3:
504: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
505: fail_2:
506: bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
507: sizeof(struct mec_control_data));
508: fail_1:
509: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
510: fail_0:
511: return;
512: }
513:
514: int
515: mec_mii_readreg(struct device *self, int phy, int reg)
516: {
517: struct mec_softc *sc = (void *)self;
518: bus_space_tag_t st = sc->sc_st;
519: bus_space_handle_t sh = sc->sc_sh;
520: uint32_t val;
521: int i;
522:
523: if (mec_mii_wait(sc) != 0)
524: return 0;
525:
526: bus_space_write_4(st, sh, MEC_PHY_ADDRESS,
527: (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
528: bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
529: delay(25);
530:
531: for (i = 0; i < 20; i++) {
532: delay(30);
533:
534: val = bus_space_read_4(st, sh, MEC_PHY_DATA);
535:
536: if ((val & MEC_PHY_DATA_BUSY) == 0)
537: return val & MEC_PHY_DATA_VALUE;
538: }
539: return 0;
540: }
541:
542: void
543: mec_mii_writereg(struct device *self, int phy, int reg, int val)
544: {
545: struct mec_softc *sc = (void *)self;
546: bus_space_tag_t st = sc->sc_st;
547: bus_space_handle_t sh = sc->sc_sh;
548:
549: if (mec_mii_wait(sc) != 0) {
550: printf("timed out writing %x: %x\n", reg, val);
551: return;
552: }
553:
554: bus_space_write_4(st, sh, MEC_PHY_ADDRESS,
555: (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
556:
557: delay(60);
558:
559: bus_space_write_4(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
560:
561: delay(60);
562:
563: mec_mii_wait(sc);
564: }
565:
566: int
567: mec_mii_wait(struct mec_softc *sc)
568: {
569: uint32_t busy;
570: int i, s;
571:
572: for (i = 0; i < 100; i++) {
573: delay(30);
574:
575: s = splhigh();
576: busy = bus_space_read_4(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
577: splx(s);
578:
579: if ((busy & MEC_PHY_DATA_BUSY) == 0)
580: return 0;
581: if (busy == 0xffff) /* XXX ? */
582: return 0;
583: }
584:
585: printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
586: return 1;
587: }
588:
589: void
590: mec_statchg(struct device *self)
591: {
592: struct mec_softc *sc = (void *)self;
593: bus_space_tag_t st = sc->sc_st;
594: bus_space_handle_t sh = sc->sc_sh;
595: uint32_t control;
596:
597: control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
598: control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
599: MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
600:
601: /* must also set IPG here for duplex stuff ... */
602: if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
603: control |= MEC_MAC_FULL_DUPLEX;
604: } else {
605: /* set IPG */
606: control |= MEC_MAC_IPG_DEFAULT;
607: }
608:
609: bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
610: }
611:
612: void
613: mec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
614: {
615: struct mec_softc *sc = ifp->if_softc;
616:
617: if ((ifp->if_flags & IFF_UP) == 0)
618: return;
619:
620: mii_pollstat(&sc->sc_mii);
621: ifmr->ifm_status = sc->sc_mii.mii_media_status;
622: ifmr->ifm_active = sc->sc_mii.mii_media_active;
623: }
624:
625: int
626: mec_mediachange(struct ifnet *ifp)
627: {
628: struct mec_softc *sc = ifp->if_softc;
629:
630: if ((ifp->if_flags & IFF_UP) == 0)
631: return 0;
632:
633: return mii_mediachg(&sc->sc_mii);
634: }
635:
636: int
637: mec_init(struct ifnet *ifp)
638: {
639: struct mec_softc *sc = ifp->if_softc;
640: bus_space_tag_t st = sc->sc_st;
641: bus_space_handle_t sh = sc->sc_sh;
642: struct mec_rxdesc *rxd;
643: int i;
644:
645: /* cancel any pending I/O */
646: mec_stop(ifp, 0);
647:
648: /* reset device */
649: mec_reset(sc);
650:
651: /* setup filter for multicast or promisc mode */
652: mec_setfilter(sc);
653:
654: /* set the TX ring pointer to the base address */
655: bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
656:
657: sc->sc_txpending = 0;
658: sc->sc_txdirty = 0;
659: sc->sc_txlast = MEC_NTXDESC - 1;
660:
661: /* put RX buffers into FIFO */
662: for (i = 0; i < MEC_NRXDESC; i++) {
663: rxd = &sc->sc_rxdesc[i];
664: rxd->rxd_stat = 0;
665: MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
666: MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
667: bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
668: }
669: sc->sc_rxptr = 0;
670:
671: #if 0 /* XXX no info */
672: bus_space_write_8(st, sh, MEC_TIMER, 0);
673: #endif
674:
675: /*
676: * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
677: * spurious interrupts when TX buffers are empty
678: */
679: bus_space_write_8(st, sh, MEC_DMA_CONTROL,
680: (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
681: (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
682: MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
683: MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
684:
685: timeout_add(&sc->sc_tick_ch, hz);
686:
687: ifp->if_flags |= IFF_RUNNING;
688: ifp->if_flags &= ~IFF_OACTIVE;
689: mec_start(ifp);
690:
691: mii_mediachg(&sc->sc_mii);
692:
693: return 0;
694: }
695:
696: void
697: mec_reset(struct mec_softc *sc)
698: {
699: bus_space_tag_t st = sc->sc_st;
700: bus_space_handle_t sh = sc->sc_sh;
701: uint64_t address, control;
702: int i;
703:
704: /* reset chip */
705: bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
706: delay(1000);
707: bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
708: delay(1000);
709:
710: /* set Ethernet address */
711: address = 0;
712: for (i = 0; i < ETHER_ADDR_LEN; i++) {
713: address = address << 8;
714: address += sc->sc_ac.ac_enaddr[i];
715: }
716: bus_space_write_8(st, sh, MEC_STATION, address);
717:
718: /* Default to 100/half and let auto-negotiation work its magic */
719: control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
720: MEC_MAC_IPG_DEFAULT;
721:
722: bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
723: bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
724:
725: DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
726: bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
727: }
728:
729: void
730: mec_start(struct ifnet *ifp)
731: {
732: struct mec_softc *sc = ifp->if_softc;
733: struct mbuf *m0;
734: struct mec_txdesc *txd;
735: struct mec_txsoft *txs;
736: bus_dmamap_t dmamap;
737: bus_space_tag_t st = sc->sc_st;
738: bus_space_handle_t sh = sc->sc_sh;
739: uint64_t txdaddr;
740: int error, firsttx, nexttx, opending;
741: int len, bufoff, buflen, unaligned, txdlen;
742:
743: if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
744: return;
745:
746: /*
747: * Remember the previous txpending and the first transmit descriptor.
748: */
749: opending = sc->sc_txpending;
750: firsttx = MEC_NEXTTX(sc->sc_txlast);
751:
752: DPRINTF(MEC_DEBUG_START,
753: ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));
754:
755: for (;;) {
756: /* Grab a packet off the queue. */
757: IFQ_POLL(&ifp->if_snd, m0);
758: if (m0 == NULL)
759: break;
760:
761: if (sc->sc_txpending == MEC_NTXDESC) {
762: break;
763: }
764:
765: /*
766: * Get the next available transmit descriptor.
767: */
768: nexttx = MEC_NEXTTX(sc->sc_txlast);
769: txd = &sc->sc_txdesc[nexttx];
770: txs = &sc->sc_txsoft[nexttx];
771:
772: buflen = 0;
773: bufoff = 0;
774: txdaddr = 0; /* XXX gcc */
775: txdlen = 0; /* XXX gcc */
776:
777: len = m0->m_pkthdr.len;
778:
779: DPRINTF(MEC_DEBUG_START,
780: ("mec_start: len = %d, nexttx = %d\n", len, nexttx));
781:
782: IFQ_DEQUEUE(&ifp->if_snd, m0);
783: if (len < ETHER_PAD_LEN) {
784: /*
785: * I don't know if MEC chip does auto padding,
786: * so if the packet is small enough,
787: * just copy it to the buffer in txdesc.
788: * Maybe this is the simple way.
789: */
790: DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));
791:
792: bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
793: m_copydata(m0, 0, m0->m_pkthdr.len,
794: txd->txd_buf + bufoff);
795: memset(txd->txd_buf + bufoff + len, 0,
796: ETHER_PAD_LEN - len);
797: len = buflen = ETHER_PAD_LEN;
798:
799: txs->txs_flags = MEC_TXS_TXDBUF | buflen;
800: } else {
801: /*
802: * If the packet won't fit the buffer in txdesc,
803: * we have to use concatenate pointer to handle it.
804: * While MEC can handle up to three segments to
805: * concatenate, MEC requires that both the second and
806: * third segments have to be 8 byte aligned.
807: * Since it's unlikely for mbuf clusters, we use
808: * only the first concatenate pointer. If the packet
809: * doesn't fit in one DMA segment, allocate new mbuf
810: * and copy the packet to it.
811: *
812: * Besides, if the start address of the first segments
813: * is not 8 byte aligned, such part have to be copied
814: * to the txdesc buffer. (XXX see below comments)
815: */
816: DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));
817:
818: dmamap = txs->txs_dmamap;
819: if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
820: BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
821: struct mbuf *m;
822:
823: DPRINTF(MEC_DEBUG_START,
824: ("mec_start: re-allocating mbuf\n"));
825: MGETHDR(m, M_DONTWAIT, MT_DATA);
826: if (m == NULL) {
827: printf("%s: unable to allocate "
828: "TX mbuf\n", sc->sc_dev.dv_xname);
829: break;
830: }
831: if (len > (MHLEN - ETHER_ALIGN)) {
832: MCLGET(m, M_DONTWAIT);
833: if ((m->m_flags & M_EXT) == 0) {
834: printf("%s: unable to allocate "
835: "TX cluster\n",
836: sc->sc_dev.dv_xname);
837: m_freem(m);
838: break;
839: }
840: }
841: /*
842: * Each packet has the Ethernet header, so
843: * in many case the header isn't 4-byte aligned
844: * and data after the header is 4-byte aligned.
845: * Thus adding 2-byte offset before copying to
846: * new mbuf avoids unaligned copy and this may
847: * improve some performance.
848: * As noted above, unaligned part has to be
849: * copied to txdesc buffer so this may cause
850: * extra copy ops, but for now MEC always
851: * requires some data in txdesc buffer,
852: * so we always have to copy some data anyway.
853: */
854: m->m_data += ETHER_ALIGN;
855: m_copydata(m0, 0, len, mtod(m, caddr_t));
856: m->m_pkthdr.len = m->m_len = len;
857: m_freem(m0);
858: m0 = m;
859: error = bus_dmamap_load_mbuf(sc->sc_dmat,
860: dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
861: if (error) {
862: printf("%s: unable to load TX buffer, "
863: "error = %d\n",
864: sc->sc_dev.dv_xname, error);
865: m_freem(m);
866: break;
867: }
868: }
869:
870: /* handle unaligned part */
871: txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
872: txs->txs_flags = MEC_TXS_TXDPTR1;
873: unaligned =
874: dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
875: DPRINTF(MEC_DEBUG_START,
876: ("mec_start: ds_addr = 0x%x, unaligned = %d\n",
877: (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
878: if (unaligned != 0) {
879: buflen = MEC_TXD_ALIGN - unaligned;
880: bufoff = MEC_TXD_BUFSTART(buflen);
881: DPRINTF(MEC_DEBUG_START,
882: ("mec_start: unaligned, "
883: "buflen = %d, bufoff = %d\n",
884: buflen, bufoff));
885: memcpy(txd->txd_buf + bufoff,
886: mtod(m0, caddr_t), buflen);
887: txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
888: }
889: #if 1
890: else {
891: /*
892: * XXX needs hardware info XXX
893: * It seems MEC always requires some data
894: * in txd_buf[] even if buffer is
895: * 8-byte aligned otherwise DMA abort error
896: * occurs later...
897: */
898: buflen = MEC_TXD_ALIGN;
899: bufoff = MEC_TXD_BUFSTART(buflen);
900: memcpy(txd->txd_buf + bufoff,
901: mtod(m0, caddr_t), buflen);
902: DPRINTF(MEC_DEBUG_START,
903: ("mec_start: aligned, "
904: "buflen = %d, bufoff = %d\n",
905: buflen, bufoff));
906: txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
907: txdaddr += MEC_TXD_ALIGN;
908: }
909: #endif
910: txdlen = len - buflen;
911: DPRINTF(MEC_DEBUG_START,
912: ("mec_start: txdaddr = 0x%llx, txdlen = %d\n",
913: txdaddr, txdlen));
914:
915: /*
916: * sync the DMA map for TX mbuf
917: *
918: * XXX unaligned part doesn't have to be sync'ed,
919: * but it's harmless...
920: */
921: bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
922: dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
923: }
924:
925: #if NBPFILTER > 0
926: /*
927: * Pass packet to bpf if there is a listener.
928: */
929: if (ifp->if_bpf)
930: bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
931: #endif
932:
933: /*
934: * Setup the transmit descriptor.
935: */
936:
937: /* TXINT bit will be set later on the last packet */
938: txd->txd_cmd = (len - 1);
939: /* but also set TXINT bit on a half of TXDESC */
940: if (sc->sc_txpending == (MEC_NTXDESC / 2))
941: txd->txd_cmd |= MEC_TXCMD_TXINT;
942:
943: if (txs->txs_flags & MEC_TXS_TXDBUF)
944: txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
945: if (txs->txs_flags & MEC_TXS_TXDPTR1) {
946: txd->txd_cmd |= MEC_TXCMD_PTR1;
947: txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
948: /*
949: * Store a pointer to the packet so we can
950: * free it later.
951: */
952: txs->txs_mbuf = m0;
953: } else {
954: txd->txd_ptr[0] = 0;
955: /*
956: * In this case all data are copied to buffer in txdesc,
957: * we can free TX mbuf here.
958: */
959: m_freem(m0);
960: }
961:
962: DPRINTF(MEC_DEBUG_START,
963: ("mec_start: txd_cmd = 0x%llx, txd_ptr = 0x%llx\n",
964: txd->txd_cmd, txd->txd_ptr[0]));
965: DPRINTF(MEC_DEBUG_START,
966: ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
967: len, len, buflen, buflen));
968:
969: /* sync TX descriptor */
970: MEC_TXDESCSYNC(sc, nexttx,
971: BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
972:
973: /* advance the TX pointer. */
974: sc->sc_txpending++;
975: sc->sc_txlast = nexttx;
976: }
977:
978: if (sc->sc_txpending == MEC_NTXDESC) {
979: /* No more slots; notify upper layer. */
980: ifp->if_flags |= IFF_OACTIVE;
981: }
982:
983: if (sc->sc_txpending != opending) {
984: /*
985: * Cause a TX interrupt to happen on the last packet
986: * we enqueued.
987: */
988: sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT;
989: MEC_TXCMDSYNC(sc, sc->sc_txlast,
990: BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
991:
992: /* start TX */
993: bus_space_write_8(st, sh, MEC_TX_RING_PTR,
994: MEC_NEXTTX(sc->sc_txlast));
995:
996: /*
997: * If the transmitter was idle,
998: * reset the txdirty pointer and re-enable TX interrupt.
999: */
1000: if (opending == 0) {
1001: sc->sc_txdirty = firsttx;
1002: bus_space_write_8(st, sh, MEC_TX_ALIAS,
1003: MEC_TX_ALIAS_INT_ENABLE);
1004: }
1005:
1006: /* Set a watchdog timer in case the chip flakes out. */
1007: ifp->if_timer = 5;
1008: }
1009: }
1010:
1011: void
1012: mec_stop(struct ifnet *ifp, int disable)
1013: {
1014: struct mec_softc *sc = ifp->if_softc;
1015: struct mec_txsoft *txs;
1016: int i;
1017:
1018: DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n"));
1019:
1020: ifp->if_timer = 0;
1021: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1022:
1023: timeout_del(&sc->sc_tick_ch);
1024: mii_down(&sc->sc_mii);
1025:
1026: /* release any TX buffers */
1027: for (i = 0; i < MEC_NTXDESC; i++) {
1028: txs = &sc->sc_txsoft[i];
1029: if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1030: bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1031: m_freem(txs->txs_mbuf);
1032: txs->txs_mbuf = NULL;
1033: }
1034: }
1035: }
1036:
1037: int
1038: mec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1039: {
1040: struct mec_softc *sc = ifp->if_softc;
1041: struct ifreq *ifr = (struct ifreq *)data;
1042: struct ifaddr *ifa = (struct ifaddr *)data;
1043: int s, error;
1044:
1045: s = splnet();
1046:
1047: if ((error = ether_ioctl(ifp, &sc->sc_ac, cmd, data)) > 0) {
1048: splx(s);
1049: return (error);
1050: }
1051:
1052: switch (cmd) {
1053: case SIOCSIFADDR:
1054: ifp->if_flags |= IFF_UP;
1055:
1056: switch (ifa->ifa_addr->sa_family) {
1057: #ifdef INET
1058: case AF_INET:
1059: mec_init(ifp);
1060: arp_ifinit(&sc->sc_ac, ifa);
1061: break;
1062: #endif
1063: default:
1064: mec_init(ifp);
1065: break;
1066: }
1067: break;
1068: case SIOCSIFMTU:
1069: if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN)
1070: error = EINVAL;
1071: else if (ifp->if_mtu != ifr->ifr_mtu)
1072: ifp->if_mtu = ifr->ifr_mtu;
1073: break;
1074:
1075: case SIOCSIFFLAGS:
1076: /*
1077: * If interface is marked up and not running, then start it.
1078: * If it is marked down and running, stop it.
1079: * XXX If it's up then re-initialize it. This is so flags
1080: * such as IFF_PROMISC are handled.
1081: */
1082: if (ifp->if_flags & IFF_UP)
1083: mec_init(ifp);
1084: else if (ifp->if_flags & IFF_RUNNING)
1085: mec_stop(ifp, 1);
1086: break;
1087:
1088: case SIOCADDMULTI:
1089: case SIOCDELMULTI:
1090: error = (cmd == SIOCADDMULTI) ?
1091: ether_addmulti(ifr, &sc->sc_ac) :
1092: ether_delmulti(ifr, &sc->sc_ac);
1093:
1094: if (error == ENETRESET) {
1095: /*
1096: * Multicast list has changed; set the hardware
1097: * filter accordingly.
1098: */
1099: if (ifp->if_flags & IFF_RUNNING)
1100: mec_init(ifp);
1101: error = 0;
1102: }
1103: break;
1104:
1105: case SIOCSIFMEDIA:
1106: case SIOCGIFMEDIA:
1107: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1108: break;
1109:
1110: default:
1111: error = ENXIO;
1112: break;
1113: }
1114:
1115: splx(s);
1116: return error;
1117: }
1118:
1119: void
1120: mec_watchdog(struct ifnet *ifp)
1121: {
1122: struct mec_softc *sc = ifp->if_softc;
1123:
1124: printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1125: ifp->if_oerrors++;
1126:
1127: mec_init(ifp);
1128: }
1129:
1130: void
1131: mec_tick(void *arg)
1132: {
1133: struct mec_softc *sc = arg;
1134: int s;
1135:
1136: s = splnet();
1137: mii_tick(&sc->sc_mii);
1138: splx(s);
1139:
1140: timeout_add(&sc->sc_tick_ch, hz);
1141: }
1142:
1143: void
1144: mec_setfilter(struct mec_softc *sc)
1145: {
1146: struct arpcom *ec = &sc->sc_ac;
1147: struct ifnet *ifp = &sc->sc_ac.ac_if;
1148: struct ether_multi *enm;
1149: struct ether_multistep step;
1150: bus_space_tag_t st = sc->sc_st;
1151: bus_space_handle_t sh = sc->sc_sh;
1152: uint64_t mchash;
1153: uint32_t control, hash;
1154: int mcnt;
1155:
1156: control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1157: control &= ~MEC_MAC_FILTER_MASK;
1158:
1159: if (ifp->if_flags & IFF_PROMISC) {
1160: control |= MEC_MAC_FILTER_PROMISC;
1161: bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1162: bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1163: return;
1164: }
1165:
1166: mcnt = 0;
1167: mchash = 0;
1168: ETHER_FIRST_MULTI(step, ec, enm);
1169: while (enm != NULL) {
1170: if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1171: /* set allmulti for a range of multicast addresses */
1172: control |= MEC_MAC_FILTER_ALLMULTI;
1173: bus_space_write_8(st, sh, MEC_MULTICAST,
1174: 0xffffffffffffffffULL);
1175: bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1176: return;
1177: }
1178:
1179: #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1180:
1181: hash = mec_calchash(enm->enm_addrlo);
1182: mchash |= 1 << hash;
1183: mcnt++;
1184: ETHER_NEXT_MULTI(step, enm);
1185: }
1186:
1187: ifp->if_flags &= ~IFF_ALLMULTI;
1188:
1189: if (mcnt > 0)
1190: control |= MEC_MAC_FILTER_MATCHMULTI;
1191:
1192: bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1193: bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1194: }
1195:
1196: int
1197: mec_intr(void *arg)
1198: {
1199: struct mec_softc *sc = arg;
1200: bus_space_tag_t st = sc->sc_st;
1201: bus_space_handle_t sh = sc->sc_sh;
1202: struct ifnet *ifp = &sc->sc_ac.ac_if;
1203: uint32_t statreg, statack, dmac;
1204: int handled, sent;
1205:
1206: DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n"));
1207:
1208: handled = sent = 0;
1209:
1210: for (;;) {
1211: statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1212:
1213: DPRINTF(MEC_DEBUG_INTR,
1214: ("mec_intr: INT_STAT = 0x%x\n", statreg));
1215:
1216: statack = statreg & MEC_INT_STATUS_MASK;
1217: if (statack == 0)
1218: break;
1219: bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1220:
1221: handled = 1;
1222:
1223: if (statack &
1224: (MEC_INT_RX_THRESHOLD |
1225: MEC_INT_RX_FIFO_UNDERFLOW)) {
1226: mec_rxintr(sc, statreg);
1227: }
1228:
1229: dmac = bus_space_read_8(st, sh, MEC_DMA_CONTROL);
1230: DPRINTF(MEC_DEBUG_INTR,
1231: ("mec_intr: DMA_CONT = 0x%x\n", dmac));
1232:
1233: if (statack &
1234: (MEC_INT_TX_EMPTY |
1235: MEC_INT_TX_PACKET_SENT |
1236: MEC_INT_TX_ABORT)) {
1237: mec_txintr(sc, statreg);
1238: sent = 1;
1239: }
1240:
1241: if (statack &
1242: (MEC_INT_TX_LINK_FAIL |
1243: MEC_INT_TX_MEM_ERROR |
1244: MEC_INT_TX_ABORT |
1245: MEC_INT_RX_DMA_UNDERFLOW)) {
1246: printf("%s: mec_intr: interrupt status = 0x%x\n",
1247: sc->sc_dev.dv_xname, statreg);
1248: }
1249: }
1250:
1251: if (sent) {
1252: /* try to get more packets going */
1253: mec_start(ifp);
1254: }
1255:
1256: return handled;
1257: }
1258:
1259: void
1260: mec_rxintr(struct mec_softc *sc, uint32_t stat)
1261: {
1262: bus_space_tag_t st = sc->sc_st;
1263: bus_space_handle_t sh = sc->sc_sh;
1264: struct ifnet *ifp = &sc->sc_ac.ac_if;
1265: struct mbuf *m;
1266: struct mec_rxdesc *rxd;
1267: uint64_t rxstat;
1268: u_int len;
1269: int i, last;
1270:
1271: DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n"));
1272:
1273: bus_space_write_8(st, sh, MEC_RX_ALIAS, 0);
1274:
1275: last = (stat & MEC_INT_RX_MCL_FIFO_ALIAS) >> 8;
1276: /* XXX does alias count mod 32 even if 16 descs are set up? */
1277: last &= MEC_NRXDESC_MASK;
1278:
1279: if (stat & MEC_INT_RX_FIFO_UNDERFLOW)
1280: last = (last - 1) & MEC_NRXDESC_MASK;
1281:
1282: DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxptr %d last %d\n",
1283: sc->sc_rxptr, last));
1284: for (i = sc->sc_rxptr; i != last; i = MEC_NEXTRX(i)) {
1285:
1286: MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1287: rxd = &sc->sc_rxdesc[i];
1288: rxstat = rxd->rxd_stat;
1289:
1290: DPRINTF(MEC_DEBUG_RXINTR,
1291: ("mec_rxintr: rxstat = 0x%llx, rxptr = %d\n",
1292: rxstat, i));
1293: DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%x\n",
1294: (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1295:
1296: if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1297: /* Status not received but fifo counted? Drop it! */
1298: goto dropit;
1299: }
1300:
1301: len = rxstat & MEC_RXSTAT_LEN;
1302:
1303: if (len < ETHER_MIN_LEN ||
1304: len > ETHER_MAX_LEN) {
1305: /* invalid length packet; drop it. */
1306: DPRINTF(MEC_DEBUG_RXINTR,
1307: ("mec_rxintr: wrong packet\n"));
1308: dropit:
1309: ifp->if_ierrors++;
1310: rxd->rxd_stat = 0;
1311: MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1312: bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1313: MEC_CDRXADDR(sc, i));
1314: continue;
1315: }
1316:
1317: if (rxstat &
1318: (MEC_RXSTAT_BADPACKET |
1319: MEC_RXSTAT_LONGEVENT |
1320: MEC_RXSTAT_INVALID |
1321: MEC_RXSTAT_CRCERROR |
1322: MEC_RXSTAT_VIOLATION)) {
1323: printf("%s: mec_rxintr: status = 0x%llx\n",
1324: sc->sc_dev.dv_xname, rxstat);
1325: goto dropit;
1326: }
1327:
1328: /*
1329: * Now allocate an mbuf (and possibly a cluster) to hold
1330: * the received packet.
1331: */
1332: MGETHDR(m, M_DONTWAIT, MT_DATA);
1333: if (m == NULL) {
1334: printf("%s: unable to allocate RX mbuf\n",
1335: sc->sc_dev.dv_xname);
1336: goto dropit;
1337: }
1338: if (len > (MHLEN - ETHER_ALIGN)) {
1339: MCLGET(m, M_DONTWAIT);
1340: if ((m->m_flags & M_EXT) == 0) {
1341: printf("%s: unable to allocate RX cluster\n",
1342: sc->sc_dev.dv_xname);
1343: m_freem(m);
1344: m = NULL;
1345: goto dropit;
1346: }
1347: }
1348:
1349: /*
1350: * Note MEC chip seems to insert 2 byte padding at the start of
1351: * RX buffer, but we copy whole buffer to avoid unaligned copy.
1352: */
1353: MEC_RXBUFSYNC(sc, i, len + ETHER_ALIGN, BUS_DMASYNC_POSTREAD);
1354: memcpy(mtod(m, caddr_t), rxd->rxd_buf,
1355: ETHER_ALIGN + len - ETHER_CRC_LEN);
1356: MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1357: m->m_data += ETHER_ALIGN;
1358:
1359: /* put RX buffer into FIFO again */
1360: rxd->rxd_stat = 0;
1361: MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1362: bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1363:
1364: m->m_pkthdr.rcvif = ifp;
1365: m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN;
1366:
1367: ifp->if_ipackets++;
1368:
1369: #if NBPFILTER > 0
1370: /*
1371: * Pass this up to any BPF listeners, but only
1372: * pass it up the stack it is for us.
1373: */
1374: if (ifp->if_bpf)
1375: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1376: #endif
1377:
1378: /* Pass it on. */
1379: ether_input_mbuf(ifp, m);
1380: }
1381:
1382: /* update RX pointer */
1383: sc->sc_rxptr = i;
1384:
1385: bus_space_write_8(st, sh, MEC_RX_ALIAS,
1386: (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
1387: MEC_DMA_RX_INT_ENABLE);
1388: }
1389:
1390: void
1391: mec_txintr(struct mec_softc *sc, uint32_t stat)
1392: {
1393: struct ifnet *ifp = &sc->sc_ac.ac_if;
1394: struct mec_txdesc *txd;
1395: struct mec_txsoft *txs;
1396: bus_dmamap_t dmamap;
1397: uint64_t txstat;
1398: int i, last;
1399: u_int col;
1400:
1401: ifp->if_flags &= ~IFF_OACTIVE;
1402:
1403: DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n"));
1404:
1405: bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_TX_ALIAS, 0);
1406: last = (stat & MEC_INT_TX_RING_BUFFER_ALIAS) >> 16;
1407:
1408: DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: dirty %d last %d\n",
1409: sc->sc_txdirty, last));
1410: for (i = sc->sc_txdirty; i != last && sc->sc_txpending != 0;
1411: i = MEC_NEXTTX(i), sc->sc_txpending--) {
1412: txd = &sc->sc_txdesc[i];
1413:
1414: MEC_TXDESCSYNC(sc, i,
1415: BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1416:
1417: txstat = txd->txd_stat;
1418: DPRINTF(MEC_DEBUG_TXINTR,
1419: ("mec_txintr: dirty = %d, txstat = 0x%llx\n",
1420: i, txstat));
1421: if ((txstat & MEC_TXSTAT_SENT) == 0) {
1422: MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1423: break;
1424: }
1425:
1426: if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1427: printf("%s: TX error: txstat = 0x%llx\n",
1428: sc->sc_dev.dv_xname, txstat);
1429: ifp->if_oerrors++;
1430: continue;
1431: }
1432:
1433: txs = &sc->sc_txsoft[i];
1434: if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1435: dmamap = txs->txs_dmamap;
1436: bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1437: dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1438: bus_dmamap_unload(sc->sc_dmat, dmamap);
1439: m_freem(txs->txs_mbuf);
1440: txs->txs_mbuf = NULL;
1441: }
1442:
1443: col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1444: ifp->if_collisions += col;
1445: ifp->if_opackets++;
1446: }
1447:
1448: /* update the dirty TX buffer pointer */
1449: sc->sc_txdirty = i;
1450: DPRINTF(MEC_DEBUG_INTR,
1451: ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n",
1452: sc->sc_txdirty, sc->sc_txpending));
1453:
1454: /* cancel the watchdog timer if there are no pending TX packets */
1455: if (sc->sc_txpending == 0)
1456: ifp->if_timer = 0;
1457: else if (!(stat & MEC_INT_TX_EMPTY))
1458: bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_TX_ALIAS,
1459: MEC_TX_ALIAS_INT_ENABLE);
1460: }
1461:
1462: void
1463: mec_shutdown(void *arg)
1464: {
1465: struct mec_softc *sc = arg;
1466:
1467: mec_stop(&sc->sc_ac.ac_if, 1);
1468: }
CVSweb