Annotation of sys/dev/ic/dc.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: dc.c,v 1.97 2007/05/08 00:04:47 deraadt Exp $ */
2:
3: /*
4: * Copyright (c) 1997, 1998, 1999
5: * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. All advertising materials mentioning features or use of this software
16: * must display the following acknowledgement:
17: * This product includes software developed by Bill Paul.
18: * 4. Neither the name of the author nor the names of any co-contributors
19: * may be used to endorse or promote products derived from this software
20: * without specific prior written permission.
21: *
22: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32: * THE POSSIBILITY OF SUCH DAMAGE.
33: *
34: * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35: */
36:
37: /*
38: * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39: * series chips and several workalikes including the following:
40: *
41: * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42: * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43: * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44: * ASIX Electronics AX88140A (www.asix.com.tw)
45: * ASIX Electronics AX88141 (www.asix.com.tw)
46: * ADMtek AL981 (www.admtek.com.tw)
47: * ADMtek AN983 (www.admtek.com.tw)
48: * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49: * Accton EN1217, EN2242 (www.accton.com)
50: * Xircom X3201 (www.xircom.com)
51: *
52: * Datasheets for the 21143 are available at developer.intel.com.
53: * Datasheets for the clone parts can be found at their respective sites.
54: * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55: * The PNIC II is essentially a Macronix 98715A chip; the only difference
56: * worth noting is that its multicast hash table is only 128 bits wide
57: * instead of 512.
58: *
59: * Written by Bill Paul <wpaul@ee.columbia.edu>
60: * Electrical Engineering Department
61: * Columbia University, New York City
62: */
63:
64: /*
65: * The Intel 21143 is the successor to the DEC 21140. It is basically
66: * the same as the 21140 but with a few new features. The 21143 supports
67: * three kinds of media attachments:
68: *
69: * o MII port, for 10Mbps and 100Mbps support and NWAY
70: * autonegotiation provided by an external PHY.
71: * o SYM port, for symbol mode 100Mbps support.
72: * o 10baseT port.
73: * o AUI/BNC port.
74: *
75: * The 100Mbps SYM port and 10baseT port can be used together in
76: * combination with the internal NWAY support to create a 10/100
77: * autosensing configuration.
78: *
79: * Note that not all tulip workalikes are handled in this driver: we only
80: * deal with those which are relatively well behaved. The Winbond is
81: * handled separately due to its different register offsets and the
82: * special handling needed for its various bugs. The PNIC is handled
83: * here, but I'm not thrilled about it.
84: *
85: * All of the workalike chips use some form of MII transceiver support
86: * with the exception of the Macronix chips, which also have a SYM port.
87: * The ASIX AX88140A is also documented to have a SYM port, but all
88: * the cards I've seen use an MII transceiver, probably because the
89: * AX88140A doesn't support internal NWAY.
90: */
91:
92: #include "bpfilter.h"
93:
94: #include <sys/param.h>
95: #include <sys/systm.h>
96: #include <sys/mbuf.h>
97: #include <sys/protosw.h>
98: #include <sys/socket.h>
99: #include <sys/ioctl.h>
100: #include <sys/errno.h>
101: #include <sys/malloc.h>
102: #include <sys/kernel.h>
103: #include <sys/device.h>
104: #include <sys/timeout.h>
105:
106: #include <net/if.h>
107: #include <net/if_dl.h>
108: #include <net/if_types.h>
109:
110: #ifdef INET
111: #include <netinet/in.h>
112: #include <netinet/in_systm.h>
113: #include <netinet/in_var.h>
114: #include <netinet/ip.h>
115: #include <netinet/if_ether.h>
116: #endif
117:
118: #include <net/if_media.h>
119:
120: #if NBPFILTER > 0
121: #include <net/bpf.h>
122: #endif
123:
124: #include <dev/mii/mii.h>
125: #include <dev/mii/miivar.h>
126:
127: #include <machine/bus.h>
128: #include <dev/pci/pcidevs.h>
129:
130: #include <dev/ic/dcreg.h>
131:
132: int dc_intr(void *);
133: void dc_shutdown(void *);
134: void dc_power(int, void *);
135: struct dc_type *dc_devtype(void *);
136: int dc_newbuf(struct dc_softc *, int, struct mbuf *);
137: int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *);
138: int dc_coal(struct dc_softc *, struct mbuf **);
139:
140: void dc_pnic_rx_bug_war(struct dc_softc *, int);
141: int dc_rx_resync(struct dc_softc *);
142: void dc_rxeof(struct dc_softc *);
143: void dc_txeof(struct dc_softc *);
144: void dc_tick(void *);
145: void dc_tx_underrun(struct dc_softc *);
146: void dc_start(struct ifnet *);
147: int dc_ioctl(struct ifnet *, u_long, caddr_t);
148: void dc_init(void *);
149: void dc_stop(struct dc_softc *);
150: void dc_watchdog(struct ifnet *);
151: int dc_ifmedia_upd(struct ifnet *);
152: void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
153:
154: void dc_delay(struct dc_softc *);
155: void dc_eeprom_width(struct dc_softc *);
156: void dc_eeprom_idle(struct dc_softc *);
157: void dc_eeprom_putbyte(struct dc_softc *, int);
158: void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
159: void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
160: void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
161: void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
162:
163: void dc_mii_writebit(struct dc_softc *, int);
164: int dc_mii_readbit(struct dc_softc *);
165: void dc_mii_sync(struct dc_softc *);
166: void dc_mii_send(struct dc_softc *, u_int32_t, int);
167: int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
168: int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
169: int dc_miibus_readreg(struct device *, int, int);
170: void dc_miibus_writereg(struct device *, int, int, int);
171: void dc_miibus_statchg(struct device *);
172:
173: void dc_setcfg(struct dc_softc *, int);
174: u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
175: u_int32_t dc_crc_be(caddr_t);
176: void dc_setfilt_21143(struct dc_softc *);
177: void dc_setfilt_asix(struct dc_softc *);
178: void dc_setfilt_admtek(struct dc_softc *);
179: void dc_setfilt_xircom(struct dc_softc *);
180:
181: void dc_setfilt(struct dc_softc *);
182:
183: void dc_reset(struct dc_softc *);
184: int dc_list_rx_init(struct dc_softc *);
185: int dc_list_tx_init(struct dc_softc *);
186:
187: void dc_read_srom(struct dc_softc *, int);
188: void dc_parse_21143_srom(struct dc_softc *);
189: void dc_decode_leaf_sia(struct dc_softc *,
190: struct dc_eblock_sia *);
191: void dc_decode_leaf_mii(struct dc_softc *,
192: struct dc_eblock_mii *);
193: void dc_decode_leaf_sym(struct dc_softc *,
194: struct dc_eblock_sym *);
195: void dc_apply_fixup(struct dc_softc *, int);
196:
197: #define DC_SETBIT(sc, reg, x) \
198: CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
199:
200: #define DC_CLRBIT(sc, reg, x) \
201: CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
202:
203: #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x))
204: #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x))
205:
206: void
207: dc_delay(sc)
208: struct dc_softc *sc;
209: {
210: int idx;
211:
212: for (idx = (300 / 33) + 1; idx > 0; idx--)
213: CSR_READ_4(sc, DC_BUSCTL);
214: }
215:
216: void
217: dc_eeprom_width(sc)
218: struct dc_softc *sc;
219: {
220: int i;
221:
222: /* Force EEPROM to idle state. */
223: dc_eeprom_idle(sc);
224:
225: /* Enter EEPROM access mode. */
226: CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
227: dc_delay(sc);
228: DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
229: dc_delay(sc);
230: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
231: dc_delay(sc);
232: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
233: dc_delay(sc);
234:
235: for (i = 3; i--;) {
236: if (6 & (1 << i))
237: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
238: else
239: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
240: dc_delay(sc);
241: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
242: dc_delay(sc);
243: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
244: dc_delay(sc);
245: }
246:
247: for (i = 1; i <= 12; i++) {
248: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
249: dc_delay(sc);
250: if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
251: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
252: dc_delay(sc);
253: break;
254: }
255: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
256: dc_delay(sc);
257: }
258:
259: /* Turn off EEPROM access mode. */
260: dc_eeprom_idle(sc);
261:
262: if (i < 4 || i > 12)
263: sc->dc_romwidth = 6;
264: else
265: sc->dc_romwidth = i;
266:
267: /* Enter EEPROM access mode. */
268: CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
269: dc_delay(sc);
270: DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
271: dc_delay(sc);
272: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
273: dc_delay(sc);
274: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
275: dc_delay(sc);
276:
277: /* Turn off EEPROM access mode. */
278: dc_eeprom_idle(sc);
279: }
280:
281: void
282: dc_eeprom_idle(sc)
283: struct dc_softc *sc;
284: {
285: int i;
286:
287: CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
288: dc_delay(sc);
289: DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
290: dc_delay(sc);
291: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
292: dc_delay(sc);
293: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
294: dc_delay(sc);
295:
296: for (i = 0; i < 25; i++) {
297: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
298: dc_delay(sc);
299: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
300: dc_delay(sc);
301: }
302:
303: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
304: dc_delay(sc);
305: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
306: dc_delay(sc);
307: CSR_WRITE_4(sc, DC_SIO, 0x00000000);
308: }
309:
310: /*
311: * Send a read command and address to the EEPROM, check for ACK.
312: */
313: void
314: dc_eeprom_putbyte(sc, addr)
315: struct dc_softc *sc;
316: int addr;
317: {
318: int d, i;
319:
320: d = DC_EECMD_READ >> 6;
321:
322: for (i = 3; i--; ) {
323: if (d & (1 << i))
324: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
325: else
326: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
327: dc_delay(sc);
328: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
329: dc_delay(sc);
330: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
331: dc_delay(sc);
332: }
333:
334: /*
335: * Feed in each bit and strobe the clock.
336: */
337: for (i = sc->dc_romwidth; i--;) {
338: if (addr & (1 << i)) {
339: SIO_SET(DC_SIO_EE_DATAIN);
340: } else {
341: SIO_CLR(DC_SIO_EE_DATAIN);
342: }
343: dc_delay(sc);
344: SIO_SET(DC_SIO_EE_CLK);
345: dc_delay(sc);
346: SIO_CLR(DC_SIO_EE_CLK);
347: dc_delay(sc);
348: }
349: }
350:
351: /*
352: * Read a word of data stored in the EEPROM at address 'addr.'
353: * The PNIC 82c168/82c169 has its own non-standard way to read
354: * the EEPROM.
355: */
356: void
357: dc_eeprom_getword_pnic(sc, addr, dest)
358: struct dc_softc *sc;
359: int addr;
360: u_int16_t *dest;
361: {
362: int i;
363: u_int32_t r;
364:
365: CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
366:
367: for (i = 0; i < DC_TIMEOUT; i++) {
368: DELAY(1);
369: r = CSR_READ_4(sc, DC_SIO);
370: if (!(r & DC_PN_SIOCTL_BUSY)) {
371: *dest = (u_int16_t)(r & 0xFFFF);
372: return;
373: }
374: }
375: }
376:
377: /*
378: * Read a word of data stored in the EEPROM at address 'addr.'
379: * The Xircom X3201 has its own non-standard way to read
380: * the EEPROM, too.
381: */
382: void
383: dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
384: {
385: SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
386:
387: addr *= 2;
388: CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
389: *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
390: addr += 1;
391: CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
392: *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
393:
394: SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
395: }
396:
397: /*
398: * Read a word of data stored in the EEPROM at address 'addr.'
399: */
400: void
401: dc_eeprom_getword(sc, addr, dest)
402: struct dc_softc *sc;
403: int addr;
404: u_int16_t *dest;
405: {
406: int i;
407: u_int16_t word = 0;
408:
409: /* Force EEPROM to idle state. */
410: dc_eeprom_idle(sc);
411:
412: /* Enter EEPROM access mode. */
413: CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
414: dc_delay(sc);
415: DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
416: dc_delay(sc);
417: DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
418: dc_delay(sc);
419: DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
420: dc_delay(sc);
421:
422: /*
423: * Send address of word we want to read.
424: */
425: dc_eeprom_putbyte(sc, addr);
426:
427: /*
428: * Start reading bits from EEPROM.
429: */
430: for (i = 0x8000; i; i >>= 1) {
431: SIO_SET(DC_SIO_EE_CLK);
432: dc_delay(sc);
433: if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
434: word |= i;
435: dc_delay(sc);
436: SIO_CLR(DC_SIO_EE_CLK);
437: dc_delay(sc);
438: }
439:
440: /* Turn off EEPROM access mode. */
441: dc_eeprom_idle(sc);
442:
443: *dest = word;
444: }
445:
446: /*
447: * Read a sequence of words from the EEPROM.
448: */
449: void dc_read_eeprom(sc, dest, off, cnt, swap)
450: struct dc_softc *sc;
451: caddr_t dest;
452: int off, cnt, swap;
453: {
454: int i;
455: u_int16_t word = 0, *ptr;
456:
457: for (i = 0; i < cnt; i++) {
458: if (DC_IS_PNIC(sc))
459: dc_eeprom_getword_pnic(sc, off + i, &word);
460: else if (DC_IS_XIRCOM(sc))
461: dc_eeprom_getword_xircom(sc, off + i, &word);
462: else
463: dc_eeprom_getword(sc, off + i, &word);
464: ptr = (u_int16_t *)(dest + (i * 2));
465: if (swap)
466: *ptr = betoh16(word);
467: else
468: *ptr = letoh16(word);
469: }
470: }
471:
472: /*
473: * The following two routines are taken from the Macronix 98713
474: * Application Notes pp.19-21.
475: */
476: /*
477: * Write a bit to the MII bus.
478: */
479: void
480: dc_mii_writebit(sc, bit)
481: struct dc_softc *sc;
482: int bit;
483: {
484: if (bit)
485: CSR_WRITE_4(sc, DC_SIO,
486: DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
487: else
488: CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
489:
490: DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
491: DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
492: }
493:
494: /*
495: * Read a bit from the MII bus.
496: */
497: int
498: dc_mii_readbit(sc)
499: struct dc_softc *sc;
500: {
501: CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
502: CSR_READ_4(sc, DC_SIO);
503: DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
504: DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
505: if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
506: return (1);
507: return (0);
508: }
509:
510: /*
511: * Sync the PHYs by setting data bit and strobing the clock 32 times.
512: */
513: void
514: dc_mii_sync(sc)
515: struct dc_softc *sc;
516: {
517: int i;
518:
519: CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
520:
521: for (i = 0; i < 32; i++)
522: dc_mii_writebit(sc, 1);
523: }
524:
525: /*
526: * Clock a series of bits through the MII.
527: */
528: void
529: dc_mii_send(sc, bits, cnt)
530: struct dc_softc *sc;
531: u_int32_t bits;
532: int cnt;
533: {
534: int i;
535:
536: for (i = (0x1 << (cnt - 1)); i; i >>= 1)
537: dc_mii_writebit(sc, bits & i);
538: }
539:
540: /*
541: * Read an PHY register through the MII.
542: */
543: int
544: dc_mii_readreg(sc, frame)
545: struct dc_softc *sc;
546: struct dc_mii_frame *frame;
547: {
548: int i, ack, s;
549:
550: s = splnet();
551:
552: /*
553: * Set up frame for RX.
554: */
555: frame->mii_stdelim = DC_MII_STARTDELIM;
556: frame->mii_opcode = DC_MII_READOP;
557: frame->mii_turnaround = 0;
558: frame->mii_data = 0;
559:
560: /*
561: * Sync the PHYs.
562: */
563: dc_mii_sync(sc);
564:
565: /*
566: * Send command/address info.
567: */
568: dc_mii_send(sc, frame->mii_stdelim, 2);
569: dc_mii_send(sc, frame->mii_opcode, 2);
570: dc_mii_send(sc, frame->mii_phyaddr, 5);
571: dc_mii_send(sc, frame->mii_regaddr, 5);
572:
573: #ifdef notdef
574: /* Idle bit */
575: dc_mii_writebit(sc, 1);
576: dc_mii_writebit(sc, 0);
577: #endif
578:
579: /* Check for ack */
580: ack = dc_mii_readbit(sc);
581:
582: /*
583: * Now try reading data bits. If the ack failed, we still
584: * need to clock through 16 cycles to keep the PHY(s) in sync.
585: */
586: if (ack) {
587: for(i = 0; i < 16; i++) {
588: dc_mii_readbit(sc);
589: }
590: goto fail;
591: }
592:
593: for (i = 0x8000; i; i >>= 1) {
594: if (!ack) {
595: if (dc_mii_readbit(sc))
596: frame->mii_data |= i;
597: }
598: }
599:
600: fail:
601:
602: dc_mii_writebit(sc, 0);
603: dc_mii_writebit(sc, 0);
604:
605: splx(s);
606:
607: if (ack)
608: return (1);
609: return (0);
610: }
611:
612: /*
613: * Write to a PHY register through the MII.
614: */
615: int
616: dc_mii_writereg(sc, frame)
617: struct dc_softc *sc;
618: struct dc_mii_frame *frame;
619: {
620: int s;
621:
622: s = splnet();
623: /*
624: * Set up frame for TX.
625: */
626:
627: frame->mii_stdelim = DC_MII_STARTDELIM;
628: frame->mii_opcode = DC_MII_WRITEOP;
629: frame->mii_turnaround = DC_MII_TURNAROUND;
630:
631: /*
632: * Sync the PHYs.
633: */
634: dc_mii_sync(sc);
635:
636: dc_mii_send(sc, frame->mii_stdelim, 2);
637: dc_mii_send(sc, frame->mii_opcode, 2);
638: dc_mii_send(sc, frame->mii_phyaddr, 5);
639: dc_mii_send(sc, frame->mii_regaddr, 5);
640: dc_mii_send(sc, frame->mii_turnaround, 2);
641: dc_mii_send(sc, frame->mii_data, 16);
642:
643: /* Idle bit. */
644: dc_mii_writebit(sc, 0);
645: dc_mii_writebit(sc, 0);
646:
647: splx(s);
648: return (0);
649: }
650:
651: int
652: dc_miibus_readreg(self, phy, reg)
653: struct device *self;
654: int phy, reg;
655: {
656: struct dc_mii_frame frame;
657: struct dc_softc *sc = (struct dc_softc *)self;
658: int i, rval, phy_reg;
659:
660: /*
661: * Note: both the AL981 and AN983 have internal PHYs,
662: * however the AL981 provides direct access to the PHY
663: * registers while the AN983 uses a serial MII interface.
664: * The AN983's MII interface is also buggy in that you
665: * can read from any MII address (0 to 31), but only address 1
666: * behaves normally. To deal with both cases, we pretend
667: * that the PHY is at MII address 1.
668: */
669: if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
670: return (0);
671:
672: /*
673: * Note: the ukphy probs of the RS7112 report a PHY at
674: * MII address 0 (possibly HomePNA?) and 1 (ethernet)
675: * so we only respond to correct one.
676: */
677: if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
678: return (0);
679:
680: if (sc->dc_pmode != DC_PMODE_MII) {
681: if (phy == (MII_NPHY - 1)) {
682: switch(reg) {
683: case MII_BMSR:
684: /*
685: * Fake something to make the probe
686: * code think there's a PHY here.
687: */
688: return (BMSR_MEDIAMASK);
689: break;
690: case MII_PHYIDR1:
691: if (DC_IS_PNIC(sc))
692: return (PCI_VENDOR_LITEON);
693: return (PCI_VENDOR_DEC);
694: break;
695: case MII_PHYIDR2:
696: if (DC_IS_PNIC(sc))
697: return (PCI_PRODUCT_LITEON_PNIC);
698: return (PCI_PRODUCT_DEC_21142);
699: break;
700: default:
701: return (0);
702: break;
703: }
704: } else
705: return (0);
706: }
707:
708: if (DC_IS_PNIC(sc)) {
709: CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
710: (phy << 23) | (reg << 18));
711: for (i = 0; i < DC_TIMEOUT; i++) {
712: DELAY(1);
713: rval = CSR_READ_4(sc, DC_PN_MII);
714: if (!(rval & DC_PN_MII_BUSY)) {
715: rval &= 0xFFFF;
716: return (rval == 0xFFFF ? 0 : rval);
717: }
718: }
719: return (0);
720: }
721:
722: if (DC_IS_COMET(sc)) {
723: switch(reg) {
724: case MII_BMCR:
725: phy_reg = DC_AL_BMCR;
726: break;
727: case MII_BMSR:
728: phy_reg = DC_AL_BMSR;
729: break;
730: case MII_PHYIDR1:
731: phy_reg = DC_AL_VENID;
732: break;
733: case MII_PHYIDR2:
734: phy_reg = DC_AL_DEVID;
735: break;
736: case MII_ANAR:
737: phy_reg = DC_AL_ANAR;
738: break;
739: case MII_ANLPAR:
740: phy_reg = DC_AL_LPAR;
741: break;
742: case MII_ANER:
743: phy_reg = DC_AL_ANER;
744: break;
745: default:
746: printf("%s: phy_read: bad phy register %x\n",
747: sc->sc_dev.dv_xname, reg);
748: return (0);
749: break;
750: }
751:
752: rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
753:
754: if (rval == 0xFFFF)
755: return (0);
756: return (rval);
757: }
758:
759: bzero(&frame, sizeof(frame));
760:
761: frame.mii_phyaddr = phy;
762: frame.mii_regaddr = reg;
763: if (sc->dc_type == DC_TYPE_98713) {
764: phy_reg = CSR_READ_4(sc, DC_NETCFG);
765: CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
766: }
767: dc_mii_readreg(sc, &frame);
768: if (sc->dc_type == DC_TYPE_98713)
769: CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
770:
771: return (frame.mii_data);
772: }
773:
774: void
775: dc_miibus_writereg(self, phy, reg, data)
776: struct device *self;
777: int phy, reg, data;
778: {
779: struct dc_softc *sc = (struct dc_softc *)self;
780: struct dc_mii_frame frame;
781: int i, phy_reg;
782:
783: bzero((char *)&frame, sizeof(frame));
784:
785: if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
786: return;
787: if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
788: return;
789:
790: if (DC_IS_PNIC(sc)) {
791: CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
792: (phy << 23) | (reg << 10) | data);
793: for (i = 0; i < DC_TIMEOUT; i++) {
794: if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
795: break;
796: }
797: return;
798: }
799:
800: if (DC_IS_COMET(sc)) {
801: switch(reg) {
802: case MII_BMCR:
803: phy_reg = DC_AL_BMCR;
804: break;
805: case MII_BMSR:
806: phy_reg = DC_AL_BMSR;
807: break;
808: case MII_PHYIDR1:
809: phy_reg = DC_AL_VENID;
810: break;
811: case MII_PHYIDR2:
812: phy_reg = DC_AL_DEVID;
813: break;
814: case MII_ANAR:
815: phy_reg = DC_AL_ANAR;
816: break;
817: case MII_ANLPAR:
818: phy_reg = DC_AL_LPAR;
819: break;
820: case MII_ANER:
821: phy_reg = DC_AL_ANER;
822: break;
823: default:
824: printf("%s: phy_write: bad phy register %x\n",
825: sc->sc_dev.dv_xname, reg);
826: return;
827: break;
828: }
829:
830: CSR_WRITE_4(sc, phy_reg, data);
831: return;
832: }
833:
834: frame.mii_phyaddr = phy;
835: frame.mii_regaddr = reg;
836: frame.mii_data = data;
837:
838: if (sc->dc_type == DC_TYPE_98713) {
839: phy_reg = CSR_READ_4(sc, DC_NETCFG);
840: CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
841: }
842: dc_mii_writereg(sc, &frame);
843: if (sc->dc_type == DC_TYPE_98713)
844: CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
845: }
846:
847: void
848: dc_miibus_statchg(self)
849: struct device *self;
850: {
851: struct dc_softc *sc = (struct dc_softc *)self;
852: struct mii_data *mii;
853: struct ifmedia *ifm;
854:
855: if (DC_IS_ADMTEK(sc))
856: return;
857:
858: mii = &sc->sc_mii;
859: ifm = &mii->mii_media;
860: if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
861: dc_setcfg(sc, ifm->ifm_media);
862: sc->dc_if_media = ifm->ifm_media;
863: } else {
864: dc_setcfg(sc, mii->mii_media_active);
865: sc->dc_if_media = mii->mii_media_active;
866: }
867: }
868:
869: #define DC_BITS_512 9
870: #define DC_BITS_128 7
871: #define DC_BITS_64 6
872:
873: u_int32_t
874: dc_crc_le(sc, addr)
875: struct dc_softc *sc;
876: caddr_t addr;
877: {
878: u_int32_t crc;
879:
880: /* Compute CRC for the address value. */
881: crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
882:
883: /*
884: * The hash table on the PNIC II and the MX98715AEC-C/D/E
885: * chips is only 128 bits wide.
886: */
887: if (sc->dc_flags & DC_128BIT_HASH)
888: return (crc & ((1 << DC_BITS_128) - 1));
889:
890: /* The hash table on the MX98715BEC is only 64 bits wide. */
891: if (sc->dc_flags & DC_64BIT_HASH)
892: return (crc & ((1 << DC_BITS_64) - 1));
893:
894: /* Xircom's hash filtering table is different (read: weird) */
895: /* Xircom uses the LEAST significant bits */
896: if (DC_IS_XIRCOM(sc)) {
897: if ((crc & 0x180) == 0x180)
898: return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4);
899: else
900: return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
901: }
902:
903: return (crc & ((1 << DC_BITS_512) - 1));
904: }
905:
906: /*
907: * Calculate CRC of a multicast group address, return the lower 6 bits.
908: */
909: #define dc_crc_be(addr) ((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \
910: & 0x0000003F)
911:
912: /*
913: * 21143-style RX filter setup routine. Filter programming is done by
914: * downloading a special setup frame into the TX engine. 21143, Macronix,
915: * PNIC, PNIC II and Davicom chips are programmed this way.
916: *
917: * We always program the chip using 'hash perfect' mode, i.e. one perfect
918: * address (our node address) and a 512-bit hash filter for multicast
919: * frames. We also sneak the broadcast address into the hash filter since
920: * we need that too.
921: */
922: void
923: dc_setfilt_21143(sc)
924: struct dc_softc *sc;
925: {
926: struct dc_desc *sframe;
927: u_int32_t h, *sp;
928: struct arpcom *ac = &sc->sc_arpcom;
929: struct ether_multi *enm;
930: struct ether_multistep step;
931: struct ifnet *ifp;
932: int i;
933:
934: ifp = &sc->sc_arpcom.ac_if;
935:
936: i = sc->dc_cdata.dc_tx_prod;
937: DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
938: sc->dc_cdata.dc_tx_cnt++;
939: sframe = &sc->dc_ldata->dc_tx_list[i];
940: sp = &sc->dc_ldata->dc_sbuf[0];
941: bzero((char *)sp, DC_SFRAME_LEN);
942:
943: sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
944: offsetof(struct dc_list_data, dc_sbuf));
945: sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
946: DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
947:
948: sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
949: (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
950:
951: /* If we want promiscuous mode, set the allframes bit. */
952: if (ifp->if_flags & IFF_PROMISC)
953: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
954: else
955: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
956:
957: allmulti:
958: if (ifp->if_flags & IFF_ALLMULTI)
959: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
960: else {
961: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
962:
963: ETHER_FIRST_MULTI(step, ac, enm);
964: while (enm != NULL) {
965: if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
966: ETHER_ADDR_LEN)) {
967: ifp->if_flags |= IFF_ALLMULTI;
968: goto allmulti;
969: }
970:
971: h = dc_crc_le(sc, enm->enm_addrlo);
972: sp[h >> 4] |= htole32(1 << (h & 0xF));
973: ETHER_NEXT_MULTI(step, enm);
974: }
975: }
976:
977: if (ifp->if_flags & IFF_BROADCAST) {
978: h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr);
979: sp[h >> 4] |= htole32(1 << (h & 0xF));
980: }
981:
982: /* Set our MAC address */
983: sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
984: sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
985: sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
986:
987: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
988: offsetof(struct dc_list_data, dc_sbuf[0]),
989: sizeof(struct dc_list_data) -
990: offsetof(struct dc_list_data, dc_sbuf[0]),
991: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
992:
993: sframe->dc_status = htole32(DC_TXSTAT_OWN);
994:
995: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
996: offsetof(struct dc_list_data, dc_tx_list[i]),
997: sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
998:
999: CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1000:
1001: /*
1002: * The PNIC takes an exceedingly long time to process its
1003: * setup frame; wait 10ms after posting the setup frame
1004: * before proceeding, just so it has time to swallow its
1005: * medicine.
1006: */
1007: DELAY(10000);
1008:
1009: ifp->if_timer = 5;
1010: }
1011:
1012: void
1013: dc_setfilt_admtek(sc)
1014: struct dc_softc *sc;
1015: {
1016: struct ifnet *ifp;
1017: struct arpcom *ac = &sc->sc_arpcom;
1018: struct ether_multi *enm;
1019: struct ether_multistep step;
1020: int h = 0;
1021: u_int32_t hashes[2] = { 0, 0 };
1022:
1023: ifp = &sc->sc_arpcom.ac_if;
1024:
1025: /* Init our MAC address */
1026: CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |
1027: ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]);
1028: CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]);
1029:
1030: /* If we want promiscuous mode, set the allframes bit. */
1031: if (ifp->if_flags & IFF_PROMISC)
1032: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1033: else
1034: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1035:
1036: allmulti:
1037: if (ifp->if_flags & IFF_ALLMULTI)
1038: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1039: else
1040: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1041:
1042: /* first, zot all the existing hash bits */
1043: CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1044: CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1045:
1046: /*
1047: * If we're already in promisc or allmulti mode, we
1048: * don't have to bother programming the multicast filter.
1049: */
1050: if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1051: return;
1052:
1053: /* now program new ones */
1054: ETHER_FIRST_MULTI(step, ac, enm);
1055: while (enm != NULL) {
1056: if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1057: ifp->if_flags |= IFF_ALLMULTI;
1058: goto allmulti;
1059: }
1060:
1061: if (DC_IS_CENTAUR(sc))
1062: h = dc_crc_le(sc, enm->enm_addrlo);
1063: else
1064: h = dc_crc_be(enm->enm_addrlo);
1065: if (h < 32)
1066: hashes[0] |= (1 << h);
1067: else
1068: hashes[1] |= (1 << (h - 32));
1069: ETHER_NEXT_MULTI(step, enm);
1070: }
1071:
1072: CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1073: CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1074: }
1075:
1076: void
1077: dc_setfilt_asix(sc)
1078: struct dc_softc *sc;
1079: {
1080: struct ifnet *ifp;
1081: struct arpcom *ac = &sc->sc_arpcom;
1082: struct ether_multi *enm;
1083: struct ether_multistep step;
1084: int h = 0;
1085: u_int32_t hashes[2] = { 0, 0 };
1086:
1087: ifp = &sc->sc_arpcom.ac_if;
1088:
1089: /* Init our MAC address */
1090: CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1091: CSR_WRITE_4(sc, DC_AX_FILTDATA,
1092: *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1093: CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1094: CSR_WRITE_4(sc, DC_AX_FILTDATA,
1095: *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1096:
1097: /* If we want promiscuous mode, set the allframes bit. */
1098: if (ifp->if_flags & IFF_PROMISC)
1099: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1100: else
1101: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1102:
1103: if (ifp->if_flags & IFF_ALLMULTI)
1104: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1105: else
1106: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1107:
1108: /*
1109: * The ASIX chip has a special bit to enable reception
1110: * of broadcast frames.
1111: */
1112: if (ifp->if_flags & IFF_BROADCAST)
1113: DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1114: else
1115: DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1116:
1117: /* first, zot all the existing hash bits */
1118: CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1119: CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1120: CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1121: CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1122:
1123: /*
1124: * If we're already in promisc or allmulti mode, we
1125: * don't have to bother programming the multicast filter.
1126: */
1127: if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1128: return;
1129:
1130: /* now program new ones */
1131: ETHER_FIRST_MULTI(step, ac, enm);
1132: while (enm != NULL) {
1133: h = dc_crc_be(enm->enm_addrlo);
1134: if (h < 32)
1135: hashes[0] |= (1 << h);
1136: else
1137: hashes[1] |= (1 << (h - 32));
1138: ETHER_NEXT_MULTI(step, enm);
1139: }
1140:
1141: CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1142: CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1143: CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1144: CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1145: }
1146:
1147: void
1148: dc_setfilt_xircom(sc)
1149: struct dc_softc *sc;
1150: {
1151: struct dc_desc *sframe;
1152: struct arpcom *ac = &sc->sc_arpcom;
1153: struct ether_multi *enm;
1154: struct ether_multistep step;
1155: u_int32_t h, *sp;
1156: struct ifnet *ifp;
1157: int i;
1158:
1159: ifp = &sc->sc_arpcom.ac_if;
1160: DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1161:
1162: i = sc->dc_cdata.dc_tx_prod;
1163: DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1164: sc->dc_cdata.dc_tx_cnt++;
1165: sframe = &sc->dc_ldata->dc_tx_list[i];
1166: sp = &sc->dc_ldata->dc_sbuf[0];
1167: bzero((char *)sp, DC_SFRAME_LEN);
1168:
1169: sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1170: offsetof(struct dc_list_data, dc_sbuf));
1171: sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1172: DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1173:
1174: sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1175: (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1176:
1177: /* If we want promiscuous mode, set the allframes bit. */
1178: if (ifp->if_flags & IFF_PROMISC)
1179: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1180: else
1181: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1182:
1183: if (ifp->if_flags & IFF_ALLMULTI)
1184: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1185: else
1186: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1187:
1188: /* now program new ones */
1189: ETHER_FIRST_MULTI(step, ac, enm);
1190: while (enm != NULL) {
1191: h = dc_crc_le(sc, enm->enm_addrlo);
1192: sp[h >> 4] |= htole32(1 << (h & 0xF));
1193: ETHER_NEXT_MULTI(step, enm);
1194: }
1195:
1196: if (ifp->if_flags & IFF_BROADCAST) {
1197: h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr);
1198: sp[h >> 4] |= htole32(1 << (h & 0xF));
1199: }
1200:
1201: /* Set our MAC address */
1202: sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1203: sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1204: sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1205:
1206: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1207: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1208: ifp->if_flags |= IFF_RUNNING;
1209: sframe->dc_status = htole32(DC_TXSTAT_OWN);
1210: CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1211:
1212: /*
1213: * wait some time...
1214: */
1215: DELAY(1000);
1216:
1217: ifp->if_timer = 5;
1218: }
1219:
1220: void
1221: dc_setfilt(sc)
1222: struct dc_softc *sc;
1223: {
1224: if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1225: DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1226: dc_setfilt_21143(sc);
1227:
1228: if (DC_IS_ASIX(sc))
1229: dc_setfilt_asix(sc);
1230:
1231: if (DC_IS_ADMTEK(sc))
1232: dc_setfilt_admtek(sc);
1233:
1234: if (DC_IS_XIRCOM(sc))
1235: dc_setfilt_xircom(sc);
1236: }
1237:
1238: /*
1239: * In order to fiddle with the
1240: * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1241: * first have to put the transmit and/or receive logic in the idle state.
1242: */
1243: void
1244: dc_setcfg(sc, media)
1245: struct dc_softc *sc;
1246: int media;
1247: {
1248: int i, restart = 0;
1249: u_int32_t isr;
1250:
1251: if (IFM_SUBTYPE(media) == IFM_NONE)
1252: return;
1253:
1254: if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1255: restart = 1;
1256: DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1257:
1258: for (i = 0; i < DC_TIMEOUT; i++) {
1259: isr = CSR_READ_4(sc, DC_ISR);
1260: if (isr & DC_ISR_TX_IDLE &&
1261: ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1262: (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1263: break;
1264: DELAY(10);
1265: }
1266:
1267: if (i == DC_TIMEOUT)
1268: printf("%s: failed to force tx and "
1269: "rx to idle state\n", sc->sc_dev.dv_xname);
1270: }
1271:
1272: if (IFM_SUBTYPE(media) == IFM_100_TX) {
1273: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1274: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1275: if (sc->dc_pmode == DC_PMODE_MII) {
1276: int watchdogreg;
1277:
1278: if (DC_IS_INTEL(sc)) {
1279: /* there's a write enable bit here that reads as 1 */
1280: watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1281: watchdogreg &= ~DC_WDOG_CTLWREN;
1282: watchdogreg |= DC_WDOG_JABBERDIS;
1283: CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1284: } else {
1285: DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1286: }
1287: DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1288: DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1289: if (sc->dc_type == DC_TYPE_98713)
1290: DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1291: DC_NETCFG_SCRAMBLER));
1292: if (!DC_IS_DAVICOM(sc))
1293: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1294: DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1295: if (DC_IS_INTEL(sc))
1296: dc_apply_fixup(sc, IFM_AUTO);
1297: } else {
1298: if (DC_IS_PNIC(sc)) {
1299: DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1300: DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1301: DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1302: }
1303: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1304: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1305: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1306: if (DC_IS_INTEL(sc))
1307: dc_apply_fixup(sc,
1308: (media & IFM_GMASK) == IFM_FDX ?
1309: IFM_100_TX|IFM_FDX : IFM_100_TX);
1310: }
1311: }
1312:
1313: if (IFM_SUBTYPE(media) == IFM_10_T) {
1314: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1315: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1316: if (sc->dc_pmode == DC_PMODE_MII) {
1317: int watchdogreg;
1318:
1319: if (DC_IS_INTEL(sc)) {
1320: /* there's a write enable bit here that reads as 1 */
1321: watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1322: watchdogreg &= ~DC_WDOG_CTLWREN;
1323: watchdogreg |= DC_WDOG_JABBERDIS;
1324: CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1325: } else {
1326: DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1327: }
1328: DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1329: DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1330: if (sc->dc_type == DC_TYPE_98713)
1331: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1332: if (!DC_IS_DAVICOM(sc))
1333: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1334: DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1335: if (DC_IS_INTEL(sc))
1336: dc_apply_fixup(sc, IFM_AUTO);
1337: } else {
1338: if (DC_IS_PNIC(sc)) {
1339: DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1340: DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1341: DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1342: }
1343: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1344: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1345: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1346: if (DC_IS_INTEL(sc)) {
1347: DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1348: DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1349: if ((media & IFM_GMASK) == IFM_FDX)
1350: DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1351: else
1352: DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1353: DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1354: DC_CLRBIT(sc, DC_10BTCTRL,
1355: DC_TCTL_AUTONEGENBL);
1356: dc_apply_fixup(sc,
1357: (media & IFM_GMASK) == IFM_FDX ?
1358: IFM_10_T|IFM_FDX : IFM_10_T);
1359: DELAY(20000);
1360: }
1361: }
1362: }
1363:
1364: /*
1365: * If this is a Davicom DM9102A card with a DM9801 HomePNA
1366: * PHY and we want HomePNA mode, set the portsel bit to turn
1367: * on the external MII port.
1368: */
1369: if (DC_IS_DAVICOM(sc)) {
1370: if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1371: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1372: sc->dc_link = 1;
1373: } else {
1374: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1375: }
1376: }
1377:
1378: if ((media & IFM_GMASK) == IFM_FDX) {
1379: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1380: if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1381: DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1382: } else {
1383: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1384: if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1385: DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1386: }
1387:
1388: if (restart)
1389: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1390: }
1391:
1392: void
1393: dc_reset(sc)
1394: struct dc_softc *sc;
1395: {
1396: int i;
1397:
1398: DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1399:
1400: for (i = 0; i < DC_TIMEOUT; i++) {
1401: DELAY(10);
1402: if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1403: break;
1404: }
1405:
1406: if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1407: DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1408: DELAY(10000);
1409: DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1410: i = 0;
1411: }
1412:
1413: if (i == DC_TIMEOUT)
1414: printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1415:
1416: /* Wait a little while for the chip to get its brains in order. */
1417: DELAY(1000);
1418:
1419: CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1420: CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1421: CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1422:
1423: /*
1424: * Bring the SIA out of reset. In some cases, it looks
1425: * like failing to unreset the SIA soon enough gets it
1426: * into a state where it will never come out of reset
1427: * until we reset the whole chip again.
1428: */
1429: if (DC_IS_INTEL(sc)) {
1430: DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1431: CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1432: CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1433: }
1434:
1435: if (sc->dc_type == DC_TYPE_21145)
1436: dc_setcfg(sc, IFM_10_T);
1437: }
1438:
1439: void
1440: dc_apply_fixup(sc, media)
1441: struct dc_softc *sc;
1442: int media;
1443: {
1444: struct dc_mediainfo *m;
1445: u_int8_t *p;
1446: int i;
1447: u_int32_t reg;
1448:
1449: m = sc->dc_mi;
1450:
1451: while (m != NULL) {
1452: if (m->dc_media == media)
1453: break;
1454: m = m->dc_next;
1455: }
1456:
1457: if (m == NULL)
1458: return;
1459:
1460: for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1461: reg = (p[0] | (p[1] << 8)) << 16;
1462: CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1463: }
1464:
1465: for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1466: reg = (p[0] | (p[1] << 8)) << 16;
1467: CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1468: }
1469: }
1470:
1471: void
1472: dc_decode_leaf_sia(sc, l)
1473: struct dc_softc *sc;
1474: struct dc_eblock_sia *l;
1475: {
1476: struct dc_mediainfo *m;
1477:
1478: m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1479: if (m == NULL)
1480: return;
1481: bzero(m, sizeof(struct dc_mediainfo));
1482: switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1483: case DC_SIA_CODE_10BT:
1484: m->dc_media = IFM_10_T;
1485: break;
1486: case DC_SIA_CODE_10BT_FDX:
1487: m->dc_media = IFM_10_T|IFM_FDX;
1488: break;
1489: case DC_SIA_CODE_10B2:
1490: m->dc_media = IFM_10_2;
1491: break;
1492: case DC_SIA_CODE_10B5:
1493: m->dc_media = IFM_10_5;
1494: break;
1495: default:
1496: break;
1497: }
1498:
1499: /*
1500: * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1501: * Things apparently already work for cards that do
1502: * supply Media Specific Data.
1503: */
1504: if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1505: m->dc_gp_len = 2;
1506: m->dc_gp_ptr =
1507: (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1508: } else {
1509: m->dc_gp_len = 2;
1510: m->dc_gp_ptr =
1511: (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1512: }
1513:
1514: m->dc_next = sc->dc_mi;
1515: sc->dc_mi = m;
1516:
1517: sc->dc_pmode = DC_PMODE_SIA;
1518: }
1519:
1520: void
1521: dc_decode_leaf_sym(sc, l)
1522: struct dc_softc *sc;
1523: struct dc_eblock_sym *l;
1524: {
1525: struct dc_mediainfo *m;
1526:
1527: m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1528: if (m == NULL)
1529: return;
1530: bzero(m, sizeof(struct dc_mediainfo));
1531: if (l->dc_sym_code == DC_SYM_CODE_100BT)
1532: m->dc_media = IFM_100_TX;
1533:
1534: if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1535: m->dc_media = IFM_100_TX|IFM_FDX;
1536:
1537: m->dc_gp_len = 2;
1538: m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1539:
1540: m->dc_next = sc->dc_mi;
1541: sc->dc_mi = m;
1542:
1543: sc->dc_pmode = DC_PMODE_SYM;
1544: }
1545:
1546: void
1547: dc_decode_leaf_mii(sc, l)
1548: struct dc_softc *sc;
1549: struct dc_eblock_mii *l;
1550: {
1551: u_int8_t *p;
1552: struct dc_mediainfo *m;
1553:
1554: m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1555: if (m == NULL)
1556: return;
1557: bzero(m, sizeof(struct dc_mediainfo));
1558: /* We abuse IFM_AUTO to represent MII. */
1559: m->dc_media = IFM_AUTO;
1560: m->dc_gp_len = l->dc_gpr_len;
1561:
1562: p = (u_int8_t *)l;
1563: p += sizeof(struct dc_eblock_mii);
1564: m->dc_gp_ptr = p;
1565: p += 2 * l->dc_gpr_len;
1566: m->dc_reset_len = *p;
1567: p++;
1568: m->dc_reset_ptr = p;
1569:
1570: m->dc_next = sc->dc_mi;
1571: sc->dc_mi = m;
1572: }
1573:
1574: void
1575: dc_read_srom(sc, bits)
1576: struct dc_softc *sc;
1577: int bits;
1578: {
1579: int size;
1580:
1581: size = 2 << bits;
1582: sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1583: if (sc->dc_srom == NULL)
1584: return;
1585: dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1586: }
1587:
1588: void
1589: dc_parse_21143_srom(sc)
1590: struct dc_softc *sc;
1591: {
1592: struct dc_leaf_hdr *lhdr;
1593: struct dc_eblock_hdr *hdr;
1594: int have_mii, i, loff;
1595: char *ptr;
1596:
1597: have_mii = 0;
1598: loff = sc->dc_srom[27];
1599: lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1600:
1601: ptr = (char *)lhdr;
1602: ptr += sizeof(struct dc_leaf_hdr) - 1;
1603: /*
1604: * Look if we got a MII media block.
1605: */
1606: for (i = 0; i < lhdr->dc_mcnt; i++) {
1607: hdr = (struct dc_eblock_hdr *)ptr;
1608: if (hdr->dc_type == DC_EBLOCK_MII)
1609: have_mii++;
1610:
1611: ptr += (hdr->dc_len & 0x7F);
1612: ptr++;
1613: }
1614:
1615: /*
1616: * Do the same thing again. Only use SIA and SYM media
1617: * blocks if no MII media block is available.
1618: */
1619: ptr = (char *)lhdr;
1620: ptr += sizeof(struct dc_leaf_hdr) - 1;
1621: for (i = 0; i < lhdr->dc_mcnt; i++) {
1622: hdr = (struct dc_eblock_hdr *)ptr;
1623: switch(hdr->dc_type) {
1624: case DC_EBLOCK_MII:
1625: dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1626: break;
1627: case DC_EBLOCK_SIA:
1628: if (! have_mii)
1629: dc_decode_leaf_sia(sc,
1630: (struct dc_eblock_sia *)hdr);
1631: break;
1632: case DC_EBLOCK_SYM:
1633: if (! have_mii)
1634: dc_decode_leaf_sym(sc,
1635: (struct dc_eblock_sym *)hdr);
1636: break;
1637: default:
1638: /* Don't care. Yet. */
1639: break;
1640: }
1641: ptr += (hdr->dc_len & 0x7F);
1642: ptr++;
1643: }
1644: }
1645:
1646: /*
1647: * Attach the interface. Allocate softc structures, do ifmedia
1648: * setup and ethernet/BPF attach.
1649: */
1650: void
1651: dc_attach(sc)
1652: struct dc_softc *sc;
1653: {
1654: struct ifnet *ifp;
1655: int mac_offset, tmp, i;
1656: u_int32_t reg;
1657:
1658: /*
1659: * Get station address from the EEPROM.
1660: */
1661: if (sc->sc_hasmac)
1662: goto hasmac;
1663:
1664: switch(sc->dc_type) {
1665: case DC_TYPE_98713:
1666: case DC_TYPE_98713A:
1667: case DC_TYPE_987x5:
1668: case DC_TYPE_PNICII:
1669: dc_read_eeprom(sc, (caddr_t)&mac_offset,
1670: (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1671: dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1672: (mac_offset / 2), 3, 0);
1673: break;
1674: case DC_TYPE_PNIC:
1675: dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1676: break;
1677: case DC_TYPE_DM9102:
1678: case DC_TYPE_21143:
1679: case DC_TYPE_21145:
1680: case DC_TYPE_ASIX:
1681: dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1682: DC_EE_NODEADDR, 3, 0);
1683: break;
1684: case DC_TYPE_AL981:
1685: case DC_TYPE_AN983:
1686: reg = CSR_READ_4(sc, DC_AL_PAR0);
1687: sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1688: sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1689: sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1690: sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1691: reg = CSR_READ_4(sc, DC_AL_PAR1);
1692: sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1693: sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1694: break;
1695: case DC_TYPE_CONEXANT:
1696: bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1697: &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1698: break;
1699: case DC_TYPE_XIRCOM:
1700: break;
1701: default:
1702: dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1703: DC_EE_NODEADDR, 3, 0);
1704: break;
1705: }
1706: hasmac:
1707:
1708: if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1709: PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1710: BUS_DMA_NOWAIT) != 0) {
1711: printf(": can't alloc list mem\n");
1712: goto fail;
1713: }
1714: if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1715: sizeof(struct dc_list_data), &sc->sc_listkva,
1716: BUS_DMA_NOWAIT) != 0) {
1717: printf(": can't map list mem\n");
1718: goto fail;
1719: }
1720: if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1721: sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1722: &sc->sc_listmap) != 0) {
1723: printf(": can't alloc list map\n");
1724: goto fail;
1725: }
1726: if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1727: sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1728: printf(": can't load list map\n");
1729: goto fail;
1730: }
1731: sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1732: bzero(sc->dc_ldata, sizeof(struct dc_list_data));
1733:
1734: for (i = 0; i < DC_RX_LIST_CNT; i++) {
1735: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1736: 0, BUS_DMA_NOWAIT,
1737: &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1738: printf(": can't create rx map\n");
1739: return;
1740: }
1741: }
1742: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1743: BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1744: printf(": can't create rx spare map\n");
1745: return;
1746: }
1747:
1748: for (i = 0; i < DC_TX_LIST_CNT; i++) {
1749: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1750: DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
1751: &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1752: printf(": can't create tx map\n");
1753: return;
1754: }
1755: }
1756: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5,
1757: MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1758: printf(": can't create tx spare map\n");
1759: return;
1760: }
1761:
1762: /*
1763: * A 21143 or clone chip was detected. Inform the world.
1764: */
1765: printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1766:
1767: ifp = &sc->sc_arpcom.ac_if;
1768: ifp->if_softc = sc;
1769: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1770: ifp->if_ioctl = dc_ioctl;
1771: ifp->if_start = dc_start;
1772: ifp->if_watchdog = dc_watchdog;
1773: ifp->if_baudrate = 10000000;
1774: IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1775: IFQ_SET_READY(&ifp->if_snd);
1776: bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1777:
1778: ifp->if_capabilities = IFCAP_VLAN_MTU;
1779:
1780: /* Do MII setup. If this is a 21143, check for a PHY on the
1781: * MII bus after applying any necessary fixups to twiddle the
1782: * GPIO bits. If we don't end up finding a PHY, restore the
1783: * old selection (SIA only or SIA/SYM) and attach the dcphy
1784: * driver instead.
1785: */
1786: if (DC_IS_INTEL(sc)) {
1787: dc_apply_fixup(sc, IFM_AUTO);
1788: tmp = sc->dc_pmode;
1789: sc->dc_pmode = DC_PMODE_MII;
1790: }
1791:
1792: /*
1793: * Setup General Purpose port mode and data so the tulip can talk
1794: * to the MII. This needs to be done before mii_attach so that
1795: * we can actually see them.
1796: */
1797: if (DC_IS_XIRCOM(sc)) {
1798: CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1799: DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1800: DELAY(10);
1801: CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1802: DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1803: DELAY(10);
1804: }
1805:
1806: sc->sc_mii.mii_ifp = ifp;
1807: sc->sc_mii.mii_readreg = dc_miibus_readreg;
1808: sc->sc_mii.mii_writereg = dc_miibus_writereg;
1809: sc->sc_mii.mii_statchg = dc_miibus_statchg;
1810: ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1811: mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1812: MII_OFFSET_ANY, 0);
1813:
1814: if (DC_IS_INTEL(sc)) {
1815: if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1816: sc->dc_pmode = tmp;
1817: if (sc->dc_pmode != DC_PMODE_SIA)
1818: sc->dc_pmode = DC_PMODE_SYM;
1819: sc->dc_flags |= DC_21143_NWAY;
1820: if (sc->dc_flags & DC_MOMENCO_BOTCH)
1821: sc->dc_pmode = DC_PMODE_MII;
1822: mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1823: MII_PHY_ANY, MII_OFFSET_ANY, 0);
1824: } else {
1825: /* we have a PHY, so we must clear this bit */
1826: sc->dc_flags &= ~DC_TULIP_LEDS;
1827: }
1828: }
1829:
1830: if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1831: ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1832: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1833: printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1834: } else if (sc->dc_type == DC_TYPE_21145) {
1835: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1836: } else
1837: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1838:
1839: if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1840: ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1841:
1842: if (DC_IS_ADMTEK(sc)) {
1843: /*
1844: * Set automatic TX underrun recovery for the ADMtek chips
1845: */
1846: DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
1847: }
1848:
1849: /*
1850: * Call MI attach routines.
1851: */
1852: if_attach(ifp);
1853: ether_ifattach(ifp);
1854:
1855: sc->sc_dhook = shutdownhook_establish(dc_shutdown, sc);
1856: sc->sc_pwrhook = powerhook_establish(dc_power, sc);
1857:
1858: fail:
1859: return;
1860: }
1861:
1862: /*
1863: * Initialize the transmit descriptors.
1864: */
1865: int
1866: dc_list_tx_init(sc)
1867: struct dc_softc *sc;
1868: {
1869: struct dc_chain_data *cd;
1870: struct dc_list_data *ld;
1871: int i;
1872: bus_addr_t next;
1873:
1874: cd = &sc->dc_cdata;
1875: ld = sc->dc_ldata;
1876: for (i = 0; i < DC_TX_LIST_CNT; i++) {
1877: next = sc->sc_listmap->dm_segs[0].ds_addr;
1878: if (i == (DC_TX_LIST_CNT - 1))
1879: next +=
1880: offsetof(struct dc_list_data, dc_tx_list[0]);
1881: else
1882: next +=
1883: offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1884: cd->dc_tx_chain[i].sd_mbuf = NULL;
1885: ld->dc_tx_list[i].dc_data = htole32(0);
1886: ld->dc_tx_list[i].dc_ctl = htole32(0);
1887: ld->dc_tx_list[i].dc_next = htole32(next);
1888: }
1889:
1890: cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1891:
1892: return (0);
1893: }
1894:
1895:
1896: /*
1897: * Initialize the RX descriptors and allocate mbufs for them. Note that
1898: * we arrange the descriptors in a closed ring, so that the last descriptor
1899: * points back to the first.
1900: */
1901: int
1902: dc_list_rx_init(sc)
1903: struct dc_softc *sc;
1904: {
1905: struct dc_chain_data *cd;
1906: struct dc_list_data *ld;
1907: int i;
1908: bus_addr_t next;
1909:
1910: cd = &sc->dc_cdata;
1911: ld = sc->dc_ldata;
1912:
1913: for (i = 0; i < DC_RX_LIST_CNT; i++) {
1914: if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1915: return (ENOBUFS);
1916: next = sc->sc_listmap->dm_segs[0].ds_addr;
1917: if (i == (DC_RX_LIST_CNT - 1))
1918: next +=
1919: offsetof(struct dc_list_data, dc_rx_list[0]);
1920: else
1921: next +=
1922: offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1923: ld->dc_rx_list[i].dc_next = htole32(next);
1924: }
1925:
1926: cd->dc_rx_prod = 0;
1927:
1928: return (0);
1929: }
1930:
1931: /*
1932: * Initialize an RX descriptor and attach an MBUF cluster.
1933: */
1934: int
1935: dc_newbuf(sc, i, m)
1936: struct dc_softc *sc;
1937: int i;
1938: struct mbuf *m;
1939: {
1940: struct mbuf *m_new = NULL;
1941: struct dc_desc *c;
1942: bus_dmamap_t map;
1943:
1944: c = &sc->dc_ldata->dc_rx_list[i];
1945:
1946: if (m == NULL) {
1947: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1948: if (m_new == NULL)
1949: return (ENOBUFS);
1950:
1951: MCLGET(m_new, M_DONTWAIT);
1952: if (!(m_new->m_flags & M_EXT)) {
1953: m_freem(m_new);
1954: return (ENOBUFS);
1955: }
1956: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1957: if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,
1958: m_new, BUS_DMA_NOWAIT) != 0) {
1959: m_freem(m_new);
1960: return (ENOBUFS);
1961: }
1962: map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1963: sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1964: sc->sc_rx_sparemap = map;
1965: } else {
1966: /*
1967: * We're re-using a previously allocated mbuf;
1968: * be sure to re-init pointers and lengths to
1969: * default values.
1970: */
1971: m_new = m;
1972: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1973: m_new->m_data = m_new->m_ext.ext_buf;
1974: }
1975:
1976: m_adj(m_new, sizeof(u_int64_t));
1977:
1978: /*
1979: * If this is a PNIC chip, zero the buffer. This is part
1980: * of the workaround for the receive bug in the 82c168 and
1981: * 82c169 chips.
1982: */
1983: if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1984: bzero((char *)mtod(m_new, char *), m_new->m_len);
1985:
1986: bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1987: sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1988: BUS_DMASYNC_PREREAD);
1989:
1990: sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1991: c->dc_data = htole32(
1992: sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1993: sizeof(u_int64_t));
1994: c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN);
1995: c->dc_status = htole32(DC_RXSTAT_OWN);
1996:
1997: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1998: offsetof(struct dc_list_data, dc_rx_list[i]),
1999: sizeof(struct dc_desc),
2000: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2001:
2002: return (0);
2003: }
2004:
2005: /*
2006: * Grrrrr.
2007: * The PNIC chip has a terrible bug in it that manifests itself during
2008: * periods of heavy activity. The exact mode of failure if difficult to
2009: * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
2010: * will happen on slow machines. The bug is that sometimes instead of
2011: * uploading one complete frame during reception, it uploads what looks
2012: * like the entire contents of its FIFO memory. The frame we want is at
2013: * the end of the whole mess, but we never know exactly how much data has
2014: * been uploaded, so salvaging the frame is hard.
2015: *
2016: * There is only one way to do it reliably, and it's disgusting.
2017: * Here's what we know:
2018: *
2019: * - We know there will always be somewhere between one and three extra
2020: * descriptors uploaded.
2021: *
2022: * - We know the desired received frame will always be at the end of the
2023: * total data upload.
2024: *
2025: * - We know the size of the desired received frame because it will be
2026: * provided in the length field of the status word in the last descriptor.
2027: *
2028: * Here's what we do:
2029: *
2030: * - When we allocate buffers for the receive ring, we bzero() them.
2031: * This means that we know that the buffer contents should be all
2032: * zeros, except for data uploaded by the chip.
2033: *
2034: * - We also force the PNIC chip to upload frames that include the
2035: * ethernet CRC at the end.
2036: *
2037: * - We gather all of the bogus frame data into a single buffer.
2038: *
2039: * - We then position a pointer at the end of this buffer and scan
2040: * backwards until we encounter the first non-zero byte of data.
2041: * This is the end of the received frame. We know we will encounter
2042: * some data at the end of the frame because the CRC will always be
2043: * there, so even if the sender transmits a packet of all zeros,
2044: * we won't be fooled.
2045: *
2046: * - We know the size of the actual received frame, so we subtract
2047: * that value from the current pointer location. This brings us
2048: * to the start of the actual received packet.
2049: *
2050: * - We copy this into an mbuf and pass it on, along with the actual
2051: * frame length.
2052: *
2053: * The performance hit is tremendous, but it beats dropping frames all
2054: * the time.
2055: */
2056:
2057: #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
2058: void
2059: dc_pnic_rx_bug_war(sc, idx)
2060: struct dc_softc *sc;
2061: int idx;
2062: {
2063: struct dc_desc *cur_rx;
2064: struct dc_desc *c = NULL;
2065: struct mbuf *m = NULL;
2066: unsigned char *ptr;
2067: int i, total_len;
2068: u_int32_t rxstat = 0;
2069:
2070: i = sc->dc_pnic_rx_bug_save;
2071: cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2072: ptr = sc->dc_pnic_rx_buf;
2073: bzero(ptr, ETHER_MAX_DIX_LEN * 5);
2074:
2075: /* Copy all the bytes from the bogus buffers. */
2076: while (1) {
2077: c = &sc->dc_ldata->dc_rx_list[i];
2078: rxstat = letoh32(c->dc_status);
2079: m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2080: bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN);
2081: ptr += ETHER_MAX_DIX_LEN;
2082: /* If this is the last buffer, break out. */
2083: if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2084: break;
2085: dc_newbuf(sc, i, m);
2086: DC_INC(i, DC_RX_LIST_CNT);
2087: }
2088:
2089: /* Find the length of the actual receive frame. */
2090: total_len = DC_RXBYTES(rxstat);
2091:
2092: /* Scan backwards until we hit a non-zero byte. */
2093: while(*ptr == 0x00)
2094: ptr--;
2095:
2096: /* Round off. */
2097: if ((unsigned long)(ptr) & 0x3)
2098: ptr -= 1;
2099:
2100: /* Now find the start of the frame. */
2101: ptr -= total_len;
2102: if (ptr < sc->dc_pnic_rx_buf)
2103: ptr = sc->dc_pnic_rx_buf;
2104:
2105: /*
2106: * Now copy the salvaged frame to the last mbuf and fake up
2107: * the status word to make it look like a successful
2108: * frame reception.
2109: */
2110: dc_newbuf(sc, i, m);
2111: bcopy(ptr, mtod(m, char *), total_len);
2112: cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2113: }
2114:
2115: /*
2116: * This routine searches the RX ring for dirty descriptors in the
2117: * event that the rxeof routine falls out of sync with the chip's
2118: * current descriptor pointer. This may happen sometimes as a result
2119: * of a "no RX buffer available" condition that happens when the chip
2120: * consumes all of the RX buffers before the driver has a chance to
2121: * process the RX ring. This routine may need to be called more than
2122: * once to bring the driver back in sync with the chip, however we
2123: * should still be getting RX DONE interrupts to drive the search
2124: * for new packets in the RX ring, so we should catch up eventually.
2125: */
2126: int
2127: dc_rx_resync(sc)
2128: struct dc_softc *sc;
2129: {
2130: u_int32_t stat;
2131: int i, pos, offset;
2132:
2133: pos = sc->dc_cdata.dc_rx_prod;
2134:
2135: for (i = 0; i < DC_RX_LIST_CNT; i++) {
2136:
2137: offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2138: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2139: offset, sizeof(struct dc_desc),
2140: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2141:
2142: stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2143: if (!(stat & htole32(DC_RXSTAT_OWN)))
2144: break;
2145: DC_INC(pos, DC_RX_LIST_CNT);
2146: }
2147:
2148: /* If the ring really is empty, then just return. */
2149: if (i == DC_RX_LIST_CNT)
2150: return (0);
2151:
2152: /* We've fallen behind the chip: catch it. */
2153: sc->dc_cdata.dc_rx_prod = pos;
2154:
2155: return (EAGAIN);
2156: }
2157:
2158: /*
2159: * A frame has been uploaded: pass the resulting mbuf chain up to
2160: * the higher level protocols.
2161: */
2162: void
2163: dc_rxeof(sc)
2164: struct dc_softc *sc;
2165: {
2166: struct mbuf *m;
2167: struct ifnet *ifp;
2168: struct dc_desc *cur_rx;
2169: int i, offset, total_len = 0;
2170: u_int32_t rxstat;
2171:
2172: ifp = &sc->sc_arpcom.ac_if;
2173: i = sc->dc_cdata.dc_rx_prod;
2174:
2175: for(;;) {
2176: struct mbuf *m0 = NULL;
2177:
2178: offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2179: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2180: offset, sizeof(struct dc_desc),
2181: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2182:
2183: cur_rx = &sc->dc_ldata->dc_rx_list[i];
2184: rxstat = letoh32(cur_rx->dc_status);
2185: if (rxstat & DC_RXSTAT_OWN)
2186: break;
2187:
2188: m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2189: total_len = DC_RXBYTES(rxstat);
2190:
2191: bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2192: 0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2193: BUS_DMASYNC_POSTREAD);
2194:
2195: if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2196: if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2197: if (rxstat & DC_RXSTAT_FIRSTFRAG)
2198: sc->dc_pnic_rx_bug_save = i;
2199: if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2200: DC_INC(i, DC_RX_LIST_CNT);
2201: continue;
2202: }
2203: dc_pnic_rx_bug_war(sc, i);
2204: rxstat = letoh32(cur_rx->dc_status);
2205: total_len = DC_RXBYTES(rxstat);
2206: }
2207: }
2208:
2209: sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2210:
2211: /*
2212: * If an error occurs, update stats, clear the
2213: * status word and leave the mbuf cluster in place:
2214: * it should simply get re-used next time this descriptor
2215: * comes up in the ring. However, don't report long
2216: * frames as errors since they could be VLANs.
2217: */
2218: if ((rxstat & DC_RXSTAT_RXERR)) {
2219: if (!(rxstat & DC_RXSTAT_GIANT) ||
2220: (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2221: DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2222: DC_RXSTAT_RUNT | DC_RXSTAT_DE))) {
2223: ifp->if_ierrors++;
2224: if (rxstat & DC_RXSTAT_COLLSEEN)
2225: ifp->if_collisions++;
2226: dc_newbuf(sc, i, m);
2227: if (rxstat & DC_RXSTAT_CRCERR) {
2228: DC_INC(i, DC_RX_LIST_CNT);
2229: continue;
2230: } else {
2231: dc_init(sc);
2232: return;
2233: }
2234: }
2235: }
2236:
2237: /* No errors; receive the packet. */
2238: total_len -= ETHER_CRC_LEN;
2239:
2240: m->m_pkthdr.rcvif = ifp;
2241: m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
2242: total_len + ETHER_ALIGN, 0, ifp, NULL);
2243: dc_newbuf(sc, i, m);
2244: DC_INC(i, DC_RX_LIST_CNT);
2245: if (m0 == NULL) {
2246: ifp->if_ierrors++;
2247: continue;
2248: }
2249: m_adj(m0, ETHER_ALIGN);
2250: m = m0;
2251:
2252: ifp->if_ipackets++;
2253: #if NBPFILTER > 0
2254: if (ifp->if_bpf)
2255: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
2256: #endif
2257: ether_input_mbuf(ifp, m);
2258: }
2259:
2260: sc->dc_cdata.dc_rx_prod = i;
2261: }
2262:
2263: /*
2264: * A frame was downloaded to the chip. It's safe for us to clean up
2265: * the list buffers.
2266: */
2267:
2268: void
2269: dc_txeof(sc)
2270: struct dc_softc *sc;
2271: {
2272: struct dc_desc *cur_tx = NULL;
2273: struct ifnet *ifp;
2274: int idx, offset;
2275:
2276: ifp = &sc->sc_arpcom.ac_if;
2277:
2278: /*
2279: * Go through our tx list and free mbufs for those
2280: * frames that have been transmitted.
2281: */
2282: idx = sc->dc_cdata.dc_tx_cons;
2283: while(idx != sc->dc_cdata.dc_tx_prod) {
2284: u_int32_t txstat;
2285:
2286: offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2287: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2288: offset, sizeof(struct dc_desc),
2289: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2290:
2291: cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2292: txstat = letoh32(cur_tx->dc_status);
2293:
2294: if (txstat & DC_TXSTAT_OWN)
2295: break;
2296:
2297: if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2298: cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2299: if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2300: /*
2301: * Yes, the PNIC is so brain damaged
2302: * that it will sometimes generate a TX
2303: * underrun error while DMAing the RX
2304: * filter setup frame. If we detect this,
2305: * we have to send the setup frame again,
2306: * or else the filter won't be programmed
2307: * correctly.
2308: */
2309: if (DC_IS_PNIC(sc)) {
2310: if (txstat & DC_TXSTAT_ERRSUM)
2311: dc_setfilt(sc);
2312: }
2313: sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2314: }
2315: sc->dc_cdata.dc_tx_cnt--;
2316: DC_INC(idx, DC_TX_LIST_CNT);
2317: continue;
2318: }
2319:
2320: if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2321: /*
2322: * XXX: Why does my Xircom taunt me so?
2323: * For some reason it likes setting the CARRLOST flag
2324: * even when the carrier is there. wtf?!
2325: * Who knows, but Conexant chips have the
2326: * same problem. Maybe they took lessons
2327: * from Xircom.
2328: */
2329: if (/*sc->dc_type == DC_TYPE_21143 &&*/
2330: sc->dc_pmode == DC_PMODE_MII &&
2331: ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2332: DC_TXSTAT_NOCARRIER)))
2333: txstat &= ~DC_TXSTAT_ERRSUM;
2334: } else {
2335: if (/*sc->dc_type == DC_TYPE_21143 &&*/
2336: sc->dc_pmode == DC_PMODE_MII &&
2337: ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2338: DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2339: txstat &= ~DC_TXSTAT_ERRSUM;
2340: }
2341:
2342: if (txstat & DC_TXSTAT_ERRSUM) {
2343: ifp->if_oerrors++;
2344: if (txstat & DC_TXSTAT_EXCESSCOLL)
2345: ifp->if_collisions++;
2346: if (txstat & DC_TXSTAT_LATECOLL)
2347: ifp->if_collisions++;
2348: if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2349: dc_init(sc);
2350: return;
2351: }
2352: }
2353:
2354: ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2355:
2356: ifp->if_opackets++;
2357: if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2358: bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2359:
2360: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2361: BUS_DMASYNC_POSTWRITE);
2362: bus_dmamap_unload(sc->sc_dmat, map);
2363: }
2364: if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2365: m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2366: sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2367: }
2368:
2369: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2370: offset, sizeof(struct dc_desc),
2371: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2372:
2373: sc->dc_cdata.dc_tx_cnt--;
2374: DC_INC(idx, DC_TX_LIST_CNT);
2375: }
2376:
2377: if (idx != sc->dc_cdata.dc_tx_cons) {
2378: /* some buffers have been freed */
2379: sc->dc_cdata.dc_tx_cons = idx;
2380: ifp->if_flags &= ~IFF_OACTIVE;
2381: }
2382: ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5;
2383: }
2384:
2385: void
2386: dc_tick(xsc)
2387: void *xsc;
2388: {
2389: struct dc_softc *sc = (struct dc_softc *)xsc;
2390: struct mii_data *mii;
2391: struct ifnet *ifp;
2392: int s;
2393: u_int32_t r;
2394:
2395: s = splnet();
2396:
2397: ifp = &sc->sc_arpcom.ac_if;
2398: mii = &sc->sc_mii;
2399:
2400: if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2401: if (sc->dc_flags & DC_21143_NWAY) {
2402: r = CSR_READ_4(sc, DC_10BTSTAT);
2403: if (IFM_SUBTYPE(mii->mii_media_active) ==
2404: IFM_100_TX && (r & DC_TSTAT_LS100)) {
2405: sc->dc_link = 0;
2406: mii_mediachg(mii);
2407: }
2408: if (IFM_SUBTYPE(mii->mii_media_active) ==
2409: IFM_10_T && (r & DC_TSTAT_LS10)) {
2410: sc->dc_link = 0;
2411: mii_mediachg(mii);
2412: }
2413: if (sc->dc_link == 0)
2414: mii_tick(mii);
2415: } else {
2416: r = CSR_READ_4(sc, DC_ISR);
2417: if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT &&
2418: sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) {
2419: mii_tick(mii);
2420: if (!(mii->mii_media_status & IFM_ACTIVE))
2421: sc->dc_link = 0;
2422: }
2423: }
2424: } else
2425: mii_tick(mii);
2426:
2427: /*
2428: * When the init routine completes, we expect to be able to send
2429: * packets right away, and in fact the network code will send a
2430: * gratuitous ARP the moment the init routine marks the interface
2431: * as running. However, even though the MAC may have been initialized,
2432: * there may be a delay of a few seconds before the PHY completes
2433: * autonegotiation and the link is brought up. Any transmissions
2434: * made during that delay will be lost. Dealing with this is tricky:
2435: * we can't just pause in the init routine while waiting for the
2436: * PHY to come ready since that would bring the whole system to
2437: * a screeching halt for several seconds.
2438: *
2439: * What we do here is prevent the TX start routine from sending
2440: * any packets until a link has been established. After the
2441: * interface has been initialized, the tick routine will poll
2442: * the state of the PHY until the IFM_ACTIVE flag is set. Until
2443: * that time, packets will stay in the send queue, and once the
2444: * link comes up, they will be flushed out to the wire.
2445: */
2446: if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2447: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2448: sc->dc_link++;
2449: if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2450: dc_start(ifp);
2451: }
2452:
2453: if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2454: timeout_add(&sc->dc_tick_tmo, hz / 10);
2455: else
2456: timeout_add(&sc->dc_tick_tmo, hz);
2457:
2458: splx(s);
2459: }
2460:
2461: /* A transmit underrun has occurred. Back off the transmit threshold,
2462: * or switch to store and forward mode if we have to.
2463: */
2464: void
2465: dc_tx_underrun(sc)
2466: struct dc_softc *sc;
2467: {
2468: u_int32_t isr;
2469: int i;
2470:
2471: if (DC_IS_DAVICOM(sc))
2472: dc_init(sc);
2473:
2474: if (DC_IS_INTEL(sc)) {
2475: /*
2476: * The real 21143 requires that the transmitter be idle
2477: * in order to change the transmit threshold or store
2478: * and forward state.
2479: */
2480: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2481:
2482: for (i = 0; i < DC_TIMEOUT; i++) {
2483: isr = CSR_READ_4(sc, DC_ISR);
2484: if (isr & DC_ISR_TX_IDLE)
2485: break;
2486: DELAY(10);
2487: }
2488: if (i == DC_TIMEOUT) {
2489: printf("%s: failed to force tx to idle state\n",
2490: sc->sc_dev.dv_xname);
2491: dc_init(sc);
2492: }
2493: }
2494:
2495: sc->dc_txthresh += DC_TXTHRESH_INC;
2496: if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2497: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2498: } else {
2499: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2500: DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2501: }
2502:
2503: if (DC_IS_INTEL(sc))
2504: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2505:
2506: return;
2507: }
2508:
2509: int
2510: dc_intr(arg)
2511: void *arg;
2512: {
2513: struct dc_softc *sc;
2514: struct ifnet *ifp;
2515: u_int32_t status;
2516: int claimed = 0;
2517:
2518: sc = arg;
2519:
2520: ifp = &sc->sc_arpcom.ac_if;
2521:
2522: if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0)
2523: return (claimed);
2524:
2525: /* Suppress unwanted interrupts */
2526: if (!(ifp->if_flags & IFF_UP)) {
2527: if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2528: dc_stop(sc);
2529: return (claimed);
2530: }
2531:
2532: /* Disable interrupts. */
2533: CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2534:
2535: while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2536: status != 0xFFFFFFFF &&
2537: (ifp->if_flags & IFF_RUNNING)) {
2538:
2539: claimed = 1;
2540: CSR_WRITE_4(sc, DC_ISR, status);
2541:
2542: if (status & DC_ISR_RX_OK) {
2543: int curpkts;
2544: curpkts = ifp->if_ipackets;
2545: dc_rxeof(sc);
2546: if (curpkts == ifp->if_ipackets) {
2547: while(dc_rx_resync(sc))
2548: dc_rxeof(sc);
2549: }
2550: }
2551:
2552: if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2553: dc_txeof(sc);
2554:
2555: if (status & DC_ISR_TX_IDLE) {
2556: dc_txeof(sc);
2557: if (sc->dc_cdata.dc_tx_cnt) {
2558: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2559: CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2560: }
2561: }
2562:
2563: if (status & DC_ISR_TX_UNDERRUN)
2564: dc_tx_underrun(sc);
2565:
2566: if ((status & DC_ISR_RX_WATDOGTIMEO)
2567: || (status & DC_ISR_RX_NOBUF)) {
2568: int curpkts;
2569: curpkts = ifp->if_ipackets;
2570: dc_rxeof(sc);
2571: if (curpkts == ifp->if_ipackets) {
2572: while(dc_rx_resync(sc))
2573: dc_rxeof(sc);
2574: }
2575: }
2576:
2577: if (status & DC_ISR_BUS_ERR) {
2578: dc_reset(sc);
2579: dc_init(sc);
2580: }
2581: }
2582:
2583: /* Re-enable interrupts. */
2584: CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2585:
2586: if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2587: dc_start(ifp);
2588:
2589: return (claimed);
2590: }
2591:
2592: /*
2593: * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2594: * pointers to the fragment pointers.
2595: */
2596: int
2597: dc_encap(sc, m_head, txidx)
2598: struct dc_softc *sc;
2599: struct mbuf *m_head;
2600: u_int32_t *txidx;
2601: {
2602: struct dc_desc *f = NULL;
2603: int frag, cur, cnt = 0, i;
2604: bus_dmamap_t map;
2605:
2606: /*
2607: * Start packing the mbufs in this chain into
2608: * the fragment pointers. Stop when we run out
2609: * of fragments or hit the end of the mbuf chain.
2610: */
2611: map = sc->sc_tx_sparemap;
2612:
2613: if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
2614: m_head, BUS_DMA_NOWAIT) != 0)
2615: return (ENOBUFS);
2616:
2617: cur = frag = *txidx;
2618:
2619: for (i = 0; i < map->dm_nsegs; i++) {
2620: if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2621: if (*txidx != sc->dc_cdata.dc_tx_prod &&
2622: frag == (DC_TX_LIST_CNT - 1)) {
2623: bus_dmamap_unload(sc->sc_dmat, map);
2624: return (ENOBUFS);
2625: }
2626: }
2627: if ((DC_TX_LIST_CNT -
2628: (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) {
2629: bus_dmamap_unload(sc->sc_dmat, map);
2630: return (ENOBUFS);
2631: }
2632:
2633: f = &sc->dc_ldata->dc_tx_list[frag];
2634: f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2635: if (cnt == 0) {
2636: f->dc_status = htole32(0);
2637: f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2638: } else
2639: f->dc_status = htole32(DC_TXSTAT_OWN);
2640: f->dc_data = htole32(map->dm_segs[i].ds_addr);
2641: cur = frag;
2642: DC_INC(frag, DC_TX_LIST_CNT);
2643: cnt++;
2644: }
2645:
2646: sc->dc_cdata.dc_tx_cnt += cnt;
2647: sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head;
2648: sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2649: sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2650: sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2651: if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2652: sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |=
2653: htole32(DC_TXCTL_FINT);
2654: if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2655: sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2656: htole32(DC_TXCTL_FINT);
2657: if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2658: sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2659: htole32(DC_TXCTL_FINT);
2660: else if ((sc->dc_flags & DC_TX_USE_TX_INTR) &&
2661: TBR_IS_ENABLED(&sc->sc_arpcom.ac_if.if_snd))
2662: sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2663: htole32(DC_TXCTL_FINT);
2664: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2665: BUS_DMASYNC_PREWRITE);
2666:
2667: sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN);
2668:
2669: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2670: offsetof(struct dc_list_data, dc_tx_list[*txidx]),
2671: sizeof(struct dc_desc) * cnt,
2672: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2673:
2674: *txidx = frag;
2675:
2676: return (0);
2677: }
2678:
2679: /*
2680: * Coalesce an mbuf chain into a single mbuf cluster buffer.
2681: * Needed for some really badly behaved chips that just can't
2682: * do scatter/gather correctly.
2683: */
2684: int
2685: dc_coal(sc, m_head)
2686: struct dc_softc *sc;
2687: struct mbuf **m_head;
2688: {
2689: struct mbuf *m_new, *m;
2690:
2691: m = *m_head;
2692: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2693: if (m_new == NULL)
2694: return (ENOBUFS);
2695: if (m->m_pkthdr.len > MHLEN) {
2696: MCLGET(m_new, M_DONTWAIT);
2697: if (!(m_new->m_flags & M_EXT)) {
2698: m_freem(m_new);
2699: return (ENOBUFS);
2700: }
2701: }
2702: m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t));
2703: m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len;
2704: m_freem(m);
2705: *m_head = m_new;
2706:
2707: return (0);
2708: }
2709:
2710: /*
2711: * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2712: * to the mbuf data regions directly in the transmit lists. We also save a
2713: * copy of the pointers since the transmit list fragment pointers are
2714: * physical addresses.
2715: */
2716:
2717: void
2718: dc_start(ifp)
2719: struct ifnet *ifp;
2720: {
2721: struct dc_softc *sc;
2722: struct mbuf *m_head = NULL;
2723: int idx;
2724:
2725: sc = ifp->if_softc;
2726:
2727: if (!sc->dc_link && ifp->if_snd.ifq_len < 10)
2728: return;
2729:
2730: if (ifp->if_flags & IFF_OACTIVE)
2731: return;
2732:
2733: idx = sc->dc_cdata.dc_tx_prod;
2734:
2735: while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) {
2736: IFQ_POLL(&ifp->if_snd, m_head);
2737: if (m_head == NULL)
2738: break;
2739:
2740: if (sc->dc_flags & DC_TX_COALESCE &&
2741: (m_head->m_next != NULL ||
2742: sc->dc_flags & DC_TX_ALIGN)) {
2743: /* note: dc_coal breaks the poll-and-dequeue rule.
2744: * if dc_coal fails, we lose the packet.
2745: */
2746: IFQ_DEQUEUE(&ifp->if_snd, m_head);
2747: if (dc_coal(sc, &m_head)) {
2748: ifp->if_flags |= IFF_OACTIVE;
2749: break;
2750: }
2751: }
2752:
2753: if (dc_encap(sc, m_head, &idx)) {
2754: ifp->if_flags |= IFF_OACTIVE;
2755: break;
2756: }
2757:
2758: /* now we are committed to transmit the packet */
2759: if (sc->dc_flags & DC_TX_COALESCE) {
2760: /* if mbuf is coalesced, it is already dequeued */
2761: } else
2762: IFQ_DEQUEUE(&ifp->if_snd, m_head);
2763:
2764: /*
2765: * If there's a BPF listener, bounce a copy of this frame
2766: * to him.
2767: */
2768: #if NBPFILTER > 0
2769: if (ifp->if_bpf)
2770: bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2771: #endif
2772: if (sc->dc_flags & DC_TX_ONE) {
2773: ifp->if_flags |= IFF_OACTIVE;
2774: break;
2775: }
2776: }
2777: if (idx == sc->dc_cdata.dc_tx_prod)
2778: return;
2779:
2780: /* Transmit */
2781: sc->dc_cdata.dc_tx_prod = idx;
2782: if (!(sc->dc_flags & DC_TX_POLL))
2783: CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2784:
2785: /*
2786: * Set a timeout in case the chip goes out to lunch.
2787: */
2788: ifp->if_timer = 5;
2789: }
2790:
2791: void
2792: dc_init(xsc)
2793: void *xsc;
2794: {
2795: struct dc_softc *sc = xsc;
2796: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2797: struct mii_data *mii;
2798: int s;
2799:
2800: s = splnet();
2801:
2802: mii = &sc->sc_mii;
2803:
2804: /*
2805: * Cancel pending I/O and free all RX/TX buffers.
2806: */
2807: dc_stop(sc);
2808: dc_reset(sc);
2809:
2810: /*
2811: * Set cache alignment and burst length.
2812: */
2813: if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2814: CSR_WRITE_4(sc, DC_BUSCTL, 0);
2815: else
2816: CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2817: /*
2818: * Evenly share the bus between receive and transmit process.
2819: */
2820: if (DC_IS_INTEL(sc))
2821: DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
2822: if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2823: DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2824: } else {
2825: DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2826: }
2827: if (sc->dc_flags & DC_TX_POLL)
2828: DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2829: switch(sc->dc_cachesize) {
2830: case 32:
2831: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2832: break;
2833: case 16:
2834: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2835: break;
2836: case 8:
2837: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2838: break;
2839: case 0:
2840: default:
2841: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2842: break;
2843: }
2844:
2845: if (sc->dc_flags & DC_TX_STORENFWD)
2846: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2847: else {
2848: if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2849: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2850: } else {
2851: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2852: DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2853: }
2854: }
2855:
2856: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2857: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2858:
2859: if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2860: /*
2861: * The app notes for the 98713 and 98715A say that
2862: * in order to have the chips operate properly, a magic
2863: * number must be written to CSR16. Macronix does not
2864: * document the meaning of these bits so there's no way
2865: * to know exactly what they do. The 98713 has a magic
2866: * number all its own; the rest all use a different one.
2867: */
2868: DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2869: if (sc->dc_type == DC_TYPE_98713)
2870: DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2871: else
2872: DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2873: }
2874:
2875: if (DC_IS_XIRCOM(sc)) {
2876: CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2877: DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2878: DELAY(10);
2879: CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2880: DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2881: DELAY(10);
2882: }
2883:
2884: DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2885: DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
2886:
2887: /* Init circular RX list. */
2888: if (dc_list_rx_init(sc) == ENOBUFS) {
2889: printf("%s: initialization failed: no "
2890: "memory for rx buffers\n", sc->sc_dev.dv_xname);
2891: dc_stop(sc);
2892: splx(s);
2893: return;
2894: }
2895:
2896: /*
2897: * Init tx descriptors.
2898: */
2899: dc_list_tx_init(sc);
2900:
2901: /*
2902: * Sync down both lists initialized.
2903: */
2904: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2905: 0, sc->sc_listmap->dm_mapsize,
2906: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2907:
2908: /*
2909: * Load the address of the RX list.
2910: */
2911: CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2912: offsetof(struct dc_list_data, dc_rx_list[0]));
2913: CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2914: offsetof(struct dc_list_data, dc_tx_list[0]));
2915:
2916: /*
2917: * Enable interrupts.
2918: */
2919: CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2920: CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2921:
2922: /* Enable transmitter. */
2923: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2924:
2925: /*
2926: * If this is an Intel 21143 and we're not using the
2927: * MII port, program the LED control pins so we get
2928: * link and activity indications.
2929: */
2930: if (sc->dc_flags & DC_TULIP_LEDS) {
2931: CSR_WRITE_4(sc, DC_WATCHDOG,
2932: DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2933: CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2934: }
2935:
2936: /*
2937: * Load the RX/multicast filter. We do this sort of late
2938: * because the filter programming scheme on the 21143 and
2939: * some clones requires DMAing a setup frame via the TX
2940: * engine, and we need the transmitter enabled for that.
2941: */
2942: dc_setfilt(sc);
2943:
2944: /* Enable receiver. */
2945: DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2946: CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2947:
2948: mii_mediachg(mii);
2949: dc_setcfg(sc, sc->dc_if_media);
2950:
2951: ifp->if_flags |= IFF_RUNNING;
2952: ifp->if_flags &= ~IFF_OACTIVE;
2953:
2954: splx(s);
2955:
2956: timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2957:
2958: if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2959: sc->dc_link = 1;
2960: else {
2961: if (sc->dc_flags & DC_21143_NWAY)
2962: timeout_add(&sc->dc_tick_tmo, hz / 10);
2963: else
2964: timeout_add(&sc->dc_tick_tmo, hz);
2965: }
2966:
2967: #ifdef SRM_MEDIA
2968: if(sc->dc_srm_media) {
2969: struct ifreq ifr;
2970:
2971: ifr.ifr_media = sc->dc_srm_media;
2972: ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2973: sc->dc_srm_media = 0;
2974: }
2975: #endif
2976: }
2977:
2978: /*
2979: * Set media options.
2980: */
2981: int
2982: dc_ifmedia_upd(ifp)
2983: struct ifnet *ifp;
2984: {
2985: struct dc_softc *sc;
2986: struct mii_data *mii;
2987: struct ifmedia *ifm;
2988:
2989: sc = ifp->if_softc;
2990: mii = &sc->sc_mii;
2991: mii_mediachg(mii);
2992:
2993: ifm = &mii->mii_media;
2994:
2995: if (DC_IS_DAVICOM(sc) &&
2996: IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2997: dc_setcfg(sc, ifm->ifm_media);
2998: else
2999: sc->dc_link = 0;
3000:
3001: return (0);
3002: }
3003:
3004: /*
3005: * Report current media status.
3006: */
3007: void
3008: dc_ifmedia_sts(ifp, ifmr)
3009: struct ifnet *ifp;
3010: struct ifmediareq *ifmr;
3011: {
3012: struct dc_softc *sc;
3013: struct mii_data *mii;
3014: struct ifmedia *ifm;
3015:
3016: sc = ifp->if_softc;
3017: mii = &sc->sc_mii;
3018: mii_pollstat(mii);
3019: ifm = &mii->mii_media;
3020: if (DC_IS_DAVICOM(sc)) {
3021: if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
3022: ifmr->ifm_active = ifm->ifm_media;
3023: ifmr->ifm_status = 0;
3024: return;
3025: }
3026: }
3027: ifmr->ifm_active = mii->mii_media_active;
3028: ifmr->ifm_status = mii->mii_media_status;
3029: }
3030:
3031: int
3032: dc_ioctl(ifp, command, data)
3033: struct ifnet *ifp;
3034: u_long command;
3035: caddr_t data;
3036: {
3037: struct dc_softc *sc = ifp->if_softc;
3038: struct ifreq *ifr = (struct ifreq *) data;
3039: struct ifaddr *ifa = (struct ifaddr *)data;
3040: struct mii_data *mii;
3041: int s, error = 0;
3042:
3043: s = splnet();
3044:
3045: if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
3046: splx(s);
3047: return (error);
3048: }
3049:
3050: switch(command) {
3051: case SIOCSIFADDR:
3052: ifp->if_flags |= IFF_UP;
3053: if (!(ifp->if_flags & IFF_RUNNING))
3054: dc_init(sc);
3055: #ifdef INET
3056: if (ifa->ifa_addr->sa_family == AF_INET)
3057: arp_ifinit(&sc->sc_arpcom, ifa);
3058: #endif
3059: break;
3060: case SIOCSIFFLAGS:
3061: if (ifp->if_flags & IFF_UP) {
3062: if (ifp->if_flags & IFF_RUNNING &&
3063: (ifp->if_flags ^ sc->dc_if_flags) &
3064: IFF_PROMISC) {
3065: dc_setfilt(sc);
3066: } else {
3067: if (!(ifp->if_flags & IFF_RUNNING)) {
3068: sc->dc_txthresh = 0;
3069: dc_init(sc);
3070: }
3071: }
3072: } else {
3073: if (ifp->if_flags & IFF_RUNNING)
3074: dc_stop(sc);
3075: }
3076: sc->dc_if_flags = ifp->if_flags;
3077: break;
3078: case SIOCSIFMTU:
3079: if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) {
3080: error = EINVAL;
3081: } else if (ifp->if_mtu != ifr->ifr_mtu) {
3082: ifp->if_mtu = ifr->ifr_mtu;
3083: }
3084: break;
3085: case SIOCADDMULTI:
3086: case SIOCDELMULTI:
3087: error = (command == SIOCADDMULTI) ?
3088: ether_addmulti(ifr, &sc->sc_arpcom) :
3089: ether_delmulti(ifr, &sc->sc_arpcom);
3090:
3091: if (error == ENETRESET) {
3092: /*
3093: * Multicast list has changed; set the hardware
3094: * filter accordingly.
3095: */
3096: if (ifp->if_flags & IFF_RUNNING)
3097: dc_setfilt(sc);
3098: error = 0;
3099: }
3100: break;
3101: case SIOCGIFMEDIA:
3102: case SIOCSIFMEDIA:
3103: mii = &sc->sc_mii;
3104: error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3105: #ifdef SRM_MEDIA
3106: if (sc->dc_srm_media)
3107: sc->dc_srm_media = 0;
3108: #endif
3109: break;
3110: default:
3111: error = EINVAL;
3112: break;
3113: }
3114:
3115: splx(s);
3116:
3117: return (error);
3118: }
3119:
3120: void
3121: dc_watchdog(ifp)
3122: struct ifnet *ifp;
3123: {
3124: struct dc_softc *sc;
3125:
3126: sc = ifp->if_softc;
3127:
3128: ifp->if_oerrors++;
3129: printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
3130:
3131: dc_stop(sc);
3132: dc_reset(sc);
3133: dc_init(sc);
3134:
3135: if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
3136: dc_start(ifp);
3137: }
3138:
3139: /*
3140: * Stop the adapter and free any mbufs allocated to the
3141: * RX and TX lists.
3142: */
3143: void
3144: dc_stop(sc)
3145: struct dc_softc *sc;
3146: {
3147: struct ifnet *ifp;
3148: int i;
3149:
3150: ifp = &sc->sc_arpcom.ac_if;
3151: ifp->if_timer = 0;
3152:
3153: timeout_del(&sc->dc_tick_tmo);
3154:
3155: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3156:
3157: DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
3158: CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3159: CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3160: CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3161: sc->dc_link = 0;
3162:
3163: /*
3164: * Free data in the RX lists.
3165: */
3166: for (i = 0; i < DC_RX_LIST_CNT; i++) {
3167: if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3168: bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3169:
3170: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3171: BUS_DMASYNC_POSTREAD);
3172: bus_dmamap_unload(sc->sc_dmat, map);
3173: }
3174: if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3175: m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3176: sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3177: }
3178: }
3179: bzero((char *)&sc->dc_ldata->dc_rx_list,
3180: sizeof(sc->dc_ldata->dc_rx_list));
3181:
3182: /*
3183: * Free the TX list buffers.
3184: */
3185: for (i = 0; i < DC_TX_LIST_CNT; i++) {
3186: if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3187: bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3188:
3189: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3190: BUS_DMASYNC_POSTWRITE);
3191: bus_dmamap_unload(sc->sc_dmat, map);
3192: }
3193: if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3194: if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3195: htole32(DC_TXCTL_SETUP)) {
3196: sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3197: continue;
3198: }
3199: m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3200: sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3201: }
3202: }
3203: bzero((char *)&sc->dc_ldata->dc_tx_list,
3204: sizeof(sc->dc_ldata->dc_tx_list));
3205:
3206: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3207: 0, sc->sc_listmap->dm_mapsize,
3208: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3209: }
3210:
3211: /*
3212: * Stop all chip I/O so that the kernel's probe routines don't
3213: * get confused by errant DMAs when rebooting.
3214: */
3215: void
3216: dc_shutdown(v)
3217: void *v;
3218: {
3219: struct dc_softc *sc = (struct dc_softc *)v;
3220:
3221: dc_stop(sc);
3222: }
3223:
3224: void
3225: dc_power(why, arg)
3226: int why;
3227: void *arg;
3228: {
3229: struct dc_softc *sc = arg;
3230: struct ifnet *ifp;
3231: int s;
3232:
3233: s = splnet();
3234: if (why != PWR_RESUME)
3235: dc_stop(sc);
3236: else {
3237: ifp = &sc->sc_arpcom.ac_if;
3238: if (ifp->if_flags & IFF_UP)
3239: dc_init(sc);
3240: }
3241: splx(s);
3242: }
3243:
3244: struct cfdriver dc_cd = {
3245: 0, "dc", DV_IFNET
3246: };
CVSweb