Annotation of sys/arch/sparc/dev/if_ie.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_ie.c,v 1.37 2006/12/03 16:35:25 miod Exp $ */
2: /* $NetBSD: if_ie.c,v 1.33 1997/07/29 17:55:38 fair Exp $ */
3:
4: /*-
5: * Copyright (c) 1993, 1994, 1995 Charles Hannum.
6: * Copyright (c) 1992, 1993, University of Vermont and State
7: * Agricultural College.
8: * Copyright (c) 1992, 1993, Garrett A. Wollman.
9: *
10: * Portions:
11: * Copyright (c) 1994, 1995, Rafal K. Boni
12: * Copyright (c) 1990, 1991, William F. Jolitz
13: * Copyright (c) 1990, The Regents of the University of California
14: *
15: * All rights reserved.
16: *
17: * Redistribution and use in source and binary forms, with or without
18: * modification, are permitted provided that the following conditions
19: * are met:
20: * 1. Redistributions of source code must retain the above copyright
21: * notice, this list of conditions and the following disclaimer.
22: * 2. Redistributions in binary form must reproduce the above copyright
23: * notice, this list of conditions and the following disclaimer in the
24: * documentation and/or other materials provided with the distribution.
25: * 3. All advertising materials mentioning features or use of this software
26: * must display the following acknowledgement:
27: * This product includes software developed by Charles Hannum, by the
28: * University of Vermont and State Agricultural College and Garrett A.
29: * Wollman, by William F. Jolitz, and by the University of California,
30: * Berkeley, Lawrence Berkeley Laboratory, and its contributors.
31: * 4. Neither the names of the Universities nor the names of the authors
32: * may be used to endorse or promote products derived from this software
33: * without specific prior written permission.
34: *
35: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38: * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
39: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45: * SUCH DAMAGE.
46: */
47:
48: /*
49: * Intel 82586 Ethernet chip
50: * Register, bit, and structure definitions.
51: *
52: * Original StarLAN driver written by Garrett Wollman with reference to the
53: * Clarkson Packet Driver code for this chip written by Russ Nelson and others.
54: *
55: * BPF support code taken from hpdev/if_le.c, supplied with tcpdump.
56: *
57: * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni.
58: *
59: * Majorly cleaned up and 3C507 code merged by Charles Hannum.
60: *
61: * Converted to SUN ie driver by Charles D. Cranor,
62: * October 1994, January 1995.
63: * This sun version based on i386 version 1.30.
64: */
65:
66: /*
67: * The i82586 is a very painful chip, found in sun3's, sun-4/100's
68: * sun-4/200's, and VME based suns. The byte order is all wrong for a
69: * SUN, making life difficult. Programming this chip is mostly the same,
70: * but certain details differ from system to system. This driver is
71: * written so that different "ie" interfaces can be controled by the same
72: * driver.
73: */
74:
75: /*
76: Mode of operation:
77:
78: We run the 82586 in a standard Ethernet mode. We keep NFRAMES
79: received frame descriptors around for the receiver to use, and
80: NRXBUF associated receive buffer descriptors, both in a circular
81: list. Whenever a frame is received, we rotate both lists as
82: necessary. (The 586 treats both lists as a simple queue.) We also
83: keep a transmit command around so that packets can be sent off
84: quickly.
85:
86: We configure the adapter in AL-LOC = 1 mode, which means that the
87: Ethernet/802.3 MAC header is placed at the beginning of the receive
88: buffer rather than being split off into various fields in the RFD.
89: This also means that we must include this header in the transmit
90: buffer as well.
91:
92: By convention, all transmit commands, and only transmit commands,
93: shall have the I (IE_CMD_INTR) bit set in the command. This way,
94: when an interrupt arrives at ieintr(), it is immediately possible
95: to tell what precisely caused it. ANY OTHER command-sending
96: routines should run at splnet(), and should post an acknowledgement
97: to every interrupt they generate.
98:
99: */
100:
101: #include "bpfilter.h"
102:
103: #include <sys/param.h>
104: #include <sys/systm.h>
105: #include <sys/mbuf.h>
106: #include <sys/buf.h>
107: #include <sys/protosw.h>
108: #include <sys/socket.h>
109: #include <sys/ioctl.h>
110: #include <sys/errno.h>
111: #include <sys/syslog.h>
112: #include <sys/device.h>
113:
114: #include <net/if.h>
115: #include <net/if_types.h>
116: #include <net/if_dl.h>
117: #include <net/netisr.h>
118: #include <net/route.h>
119:
120: #if NBPFILTER > 0
121: #include <net/bpf.h>
122: #include <net/bpfdesc.h>
123: #endif
124:
125: #ifdef INET
126: #include <netinet/in.h>
127: #include <netinet/in_systm.h>
128: #include <netinet/in_var.h>
129: #include <netinet/ip.h>
130: #include <netinet/if_ether.h>
131: #endif
132:
133: #include <uvm/uvm_extern.h>
134: #include <uvm/uvm_map.h>
135:
136: /*
137: * ugly byte-order hack for SUNs
138: */
139:
140: #define SWAP(x) ((u_short)(XSWAP((u_short)(x))))
141: #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) )
142:
143: #include <machine/autoconf.h>
144: #include <machine/cpu.h>
145: #include <machine/pmap.h>
146:
147: #include <sparc/dev/if_ie.h>
148: #include <sparc/dev/i82586.h>
149:
150: static struct mbuf *last_not_for_us;
151: struct vm_map *ie_map; /* for obio */
152:
153: #define IED_RINT 0x01
154: #define IED_TINT 0x02
155: #define IED_RNR 0x04
156: #define IED_CNA 0x08
157: #define IED_READFRAME 0x10
158: #define IED_ALL 0x1f
159:
160: #define B_PER_F 3 /* recv buffers per frame */
161: #define MXFRAMES 300 /* max number of recv frames */
162: #define MXRXBUF (MXFRAMES*B_PER_F) /* number of buffers to allocate */
163: #define IE_RBUF_SIZE 256 /* size of each receive buffer;
164: MUST BE POWER OF TWO */
165: #define NTXBUF 2 /* number of transmit commands */
166: #define IE_TBUF_SIZE ETHER_MAX_LEN /* length of transmit buffer */
167:
168:
169: enum ie_hardware {
170: IE_VME, /* multibus to VME card */
171: IE_OBIO, /* on board */
172: IE_VME3E, /* sun 3e VME card */
173: IE_UNKNOWN
174: };
175:
176: const char *ie_hardware_names[] = {
177: "multibus/vme",
178: "onboard",
179: "3e/vme",
180: "Unknown"
181: };
182:
183: /*
184: * Ethernet status, per interface.
185: *
186: * hardware addresses/sizes to know (all KVA):
187: * sc_iobase = base of chip's 24 bit address space
188: * sc_maddr = base address of chip RAM as stored in ie_base of iscp
189: * sc_msize = size of chip's RAM
190: * sc_reg = address of card dependent registers
191: *
192: * the chip uses two types of pointers: 16 bit and 24 bit
193: * 16 bit pointers are offsets from sc_maddr/ie_base
194: * KVA(16 bit offset) = offset + sc_maddr
195: * 24 bit pointers are offset from sc_iobase in KVA
196: * KVA(24 bit address) = address + sc_iobase
197: *
198: * on the vme/multibus we have the page map to control where ram appears
199: * in the address space. we choose to have RAM start at 0 in the
200: * 24 bit address space. this means that sc_iobase == sc_maddr!
201: * to get the phyiscal address of the board's RAM you must take the
202: * top 12 bits of the physical address of the register address
203: * and or in the 4 bits from the status word as bits 17-20 (remember that
204: * the board ignores the chip's top 4 address lines).
205: * For example:
206: * if the register is @ 0xffe88000, then the top 12 bits are 0xffe00000.
207: * to get the 4 bits from the status word just do status & IEVME_HADDR.
208: * suppose the value is "4". Then just shift it left 16 bits to get
209: * it into bits 17-20 (e.g. 0x40000). Then or it to get the
210: * address of RAM (in our example: 0xffe40000). see the attach routine!
211: *
212: * on the onboard ie interface the 24 bit address space is hardwired
213: * to be 0xff000000 -> 0xffffffff of KVA. this means that sc_iobase
214: * will be 0xff000000. sc_maddr will be where ever we allocate RAM
215: * in KVA. note that since the SCP is at a fixed address it means
216: * that we have to allocate a fixed KVA for the SCP.
217: */
218:
219: struct ie_softc {
220: struct device sc_dev; /* device structure */
221: struct intrhand sc_ih; /* interrupt info */
222:
223: caddr_t sc_iobase; /* KVA of base of 24 bit addr space */
224: caddr_t sc_maddr; /* KVA of base of chip's RAM (16bit addr sp.)*/
225: u_int sc_msize; /* how much RAM we have/use */
226: caddr_t sc_reg; /* KVA of car's register */
227:
228: struct arpcom sc_arpcom;/* system arpcom structure */
229:
230: void (*reset_586)(struct ie_softc *);
231: /* card dependent reset function */
232: void (*chan_attn)(struct ie_softc *);
233: /* card dependent attn function */
234: void (*run_586)(struct ie_softc *);
235: /* card depenent "go on-line" function */
236: void (*memcopy)(const void *, void *, size_t);
237: /* card dependent memory copy function */
238: void (*memzero)(void *, size_t);
239: /* card dependent memory zero function */
240:
241:
242: enum ie_hardware hard_type; /* card type */
243:
244: int want_mcsetup; /* mcsetup flag */
245: int promisc; /* are we in promisc mode? */
246:
247: /*
248: * pointers to the 3 major control structures
249: */
250:
251: volatile struct ie_sys_conf_ptr *scp;
252: volatile struct ie_int_sys_conf_ptr *iscp;
253: volatile struct ie_sys_ctl_block *scb;
254:
255: /*
256: * pointer and size of a block of KVA where the buffers
257: * are to be allocated from
258: */
259:
260: caddr_t buf_area;
261: int buf_area_sz;
262:
263: /*
264: * the actual buffers (recv and xmit)
265: */
266:
267: volatile struct ie_recv_frame_desc *rframes[MXFRAMES];
268: volatile struct ie_recv_buf_desc *rbuffs[MXRXBUF];
269: volatile char *cbuffs[MXRXBUF];
270: int rfhead, rftail, rbhead, rbtail;
271:
272: volatile struct ie_xmit_cmd *xmit_cmds[NTXBUF];
273: volatile struct ie_xmit_buf *xmit_buffs[NTXBUF];
274: u_char *xmit_cbuffs[NTXBUF];
275: int xmit_busy;
276: int xmit_free;
277: int xchead, xctail;
278:
279: struct ie_en_addr mcast_addrs[MAXMCAST + 1];
280: int mcast_count;
281:
282: int nframes; /* number of frames in use */
283: int nrxbuf; /* number of recv buffs in use */
284:
285: #ifdef IEDEBUG
286: int sc_debug;
287: #endif
288: };
289:
290: static void ie_obreset(struct ie_softc *);
291: static void ie_obattend(struct ie_softc *);
292: static void ie_obrun(struct ie_softc *);
293: static void ie_vmereset(struct ie_softc *);
294: static void ie_vmeattend(struct ie_softc *);
295: static void ie_vmerun(struct ie_softc *);
296:
297: void iewatchdog(struct ifnet *);
298: int ieintr(void *);
299: int ieinit(struct ie_softc *);
300: int ieioctl(struct ifnet *, u_long, caddr_t);
301: void iestart(struct ifnet *);
302: void iereset(struct ie_softc *);
303: static void ie_readframe(struct ie_softc *, int);
304: static void ie_drop_packet_buffer(struct ie_softc *);
305: int ie_setupram(struct ie_softc *);
306: static int command_and_wait(struct ie_softc *, int,
307: void volatile *, int);
308: /*static*/ void ierint(struct ie_softc *);
309: /*static*/ void ietint(struct ie_softc *);
310: static int ieget(struct ie_softc *, struct mbuf **,
311: struct ether_header *, int *);
312: static void setup_bufs(struct ie_softc *);
313: static int mc_setup(struct ie_softc *, void *);
314: static void mc_reset(struct ie_softc *);
315: static __inline int ether_equal(u_char *, u_char *);
316: static __inline void ie_ack(struct ie_softc *, u_int);
317: static __inline void ie_setup_config(volatile struct ie_config_cmd *,
318: int, int);
319: static __inline int check_eh(struct ie_softc *, struct ether_header *,
320: int *);
321: static __inline int ie_buflen(struct ie_softc *, int);
322: static __inline int ie_packet_len(struct ie_softc *);
323: static __inline void iexmit(struct ie_softc *);
324: static __inline caddr_t Align(caddr_t);
325:
326: static void chan_attn_timeout(void *);
327: static void run_tdr(struct ie_softc *, struct ie_tdr_cmd *);
328: static void iestop(struct ie_softc *);
329:
330: void wzero(void *, size_t);
331: void wcopy(const void *, void *, size_t);
332:
333: #ifdef IEDEBUG
334: void print_rbd(volatile struct ie_recv_buf_desc *);
335:
336: int in_ierint = 0;
337: int in_ietint = 0;
338: #endif
339:
340: int iematch(struct device *, void *, void *);
341: void ieattach(struct device *, struct device *, void *);
342:
343: struct cfattach ie_ca = {
344: sizeof(struct ie_softc), iematch, ieattach
345: };
346:
347: struct cfdriver ie_cd = {
348: NULL, "ie", DV_IFNET
349: };
350:
351: /*
352: * address generation macros
353: * MK_24 = KVA -> 24 bit address in SUN byte order
354: * MK_16 = KVA -> 16 bit address in INTEL byte order
355: * ST_24 = store a 24 bit address in SUN byte order to INTEL byte order
356: */
357: #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base))
358: #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) ))
359: #define ST_24(to, from) { \
360: u_long fval = (u_long)(from); \
361: u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \
362: t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0]
363: ;*/ \
364: }
365: /*
366: * Here are a few useful functions. We could have done these as macros, but
367: * since we have the inline facility, it makes sense to use that instead.
368: */
369: static __inline void
370: ie_setup_config(cmd, promiscuous, manchester)
371: volatile struct ie_config_cmd *cmd;
372: int promiscuous, manchester;
373: {
374:
375: cmd->ie_config_count = 0x0c;
376: cmd->ie_fifo = 8;
377: cmd->ie_save_bad = 0x40;
378: cmd->ie_addr_len = 0x2e;
379: cmd->ie_priority = 0;
380: cmd->ie_ifs = 0x60;
381: cmd->ie_slot_low = 0;
382: cmd->ie_slot_high = 0xf2;
383: cmd->ie_promisc = !!promiscuous | manchester << 2;
384: cmd->ie_crs_cdt = 0;
385: cmd->ie_min_len = 64;
386: cmd->ie_junk = 0xff;
387: }
388:
389: static __inline void
390: ie_ack(sc, mask)
391: struct ie_softc *sc;
392: u_int mask;
393: {
394: volatile struct ie_sys_ctl_block *scb = sc->scb;
395:
396: command_and_wait(sc, scb->ie_status & mask, 0, 0);
397: }
398:
399:
400: int
401: iematch(parent, vcf, aux)
402: struct device *parent;
403: void *vcf;
404: void *aux;
405: {
406: struct cfdata *cf = vcf;
407: struct confargs *ca = aux;
408: struct romaux *ra = &ca->ca_ra;
409:
410: if (strcmp(cf->cf_driver->cd_name, ra->ra_name)) /* correct name? */
411: return (0);
412:
413: switch (ca->ca_bustype) {
414: case BUS_SBUS:
415: default:
416: return (0);
417: case BUS_OBIO:
418: if (probeget(ra->ra_vaddr, 1) != -1)
419: return (1);
420: break;
421: case BUS_VME16:
422: case BUS_VME32:
423: if (probeget(ra->ra_vaddr, 2) != -1)
424: return (1);
425: break;
426: }
427: return (0);
428: }
429:
430: /*
431: * MULTIBUS/VME support
432: */
433: void
434: ie_vmereset(sc)
435: struct ie_softc *sc;
436: {
437: volatile struct ievme *iev = (struct ievme *) sc->sc_reg;
438: iev->status = IEVME_RESET;
439: delay(100); /* XXX could be shorter? */
440: iev->status = 0;
441: }
442:
443: void
444: ie_vmeattend(sc)
445: struct ie_softc *sc;
446: {
447: volatile struct ievme *iev = (struct ievme *) sc->sc_reg;
448:
449: iev->status |= IEVME_ATTEN; /* flag! */
450: iev->status &= ~IEVME_ATTEN; /* down. */
451: }
452:
453: void
454: ie_vmerun(sc)
455: struct ie_softc *sc;
456: {
457: volatile struct ievme *iev = (struct ievme *) sc->sc_reg;
458:
459: iev->status |= (IEVME_ONAIR | IEVME_IENAB | IEVME_PEINT);
460: }
461:
462: /*
463: * onboard ie support
464: */
465: void
466: ie_obreset(sc)
467: struct ie_softc *sc;
468: {
469: volatile struct ieob *ieo = (struct ieob *) sc->sc_reg;
470: ieo->obctrl = 0;
471: delay(100); /* XXX could be shorter? */
472: ieo->obctrl = IEOB_NORSET;
473: }
474: void
475: ie_obattend(sc)
476: struct ie_softc *sc;
477: {
478: volatile struct ieob *ieo = (struct ieob *) sc->sc_reg;
479:
480: ieo->obctrl |= IEOB_ATTEN; /* flag! */
481: ieo->obctrl &= ~IEOB_ATTEN; /* down. */
482: }
483:
484: void
485: ie_obrun(sc)
486: struct ie_softc *sc;
487: {
488: volatile struct ieob *ieo = (struct ieob *) sc->sc_reg;
489:
490: ieo->obctrl |= (IEOB_ONAIR|IEOB_IENAB|IEOB_NORSET);
491: }
492:
493: /*
494: * Taken almost exactly from Bill's if_is.c, then modified beyond recognition.
495: */
496: void
497: ieattach(parent, self, aux)
498: struct device *parent;
499: struct device *self;
500: void *aux;
501: {
502: struct ie_softc *sc = (void *) self;
503: struct confargs *ca = aux;
504: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
505: extern void myetheraddr(u_char *); /* should be elsewhere */
506: struct bootpath *bp;
507: int pri = ca->ca_ra.ra_intr[0].int_pri;
508:
509: /*
510: * *note*: we don't detect the difference between a VME3E and
511: * a multibus/vme card. if you want to use a 3E you'll have
512: * to fix this.
513: */
514:
515: switch (ca->ca_bustype) {
516: case BUS_OBIO:
517: {
518: volatile struct ieob *ieo;
519: paddr_t pa;
520:
521: sc->hard_type = IE_OBIO;
522: sc->reset_586 = ie_obreset;
523: sc->chan_attn = ie_obattend;
524: sc->run_586 = ie_obrun;
525: sc->memcopy = bcopy;
526: sc->memzero = bzero;
527: sc->sc_msize = 65536; /* XXX */
528: sc->sc_reg = mapiodev(ca->ca_ra.ra_reg, 0, sizeof(struct ieob));
529: ieo = (volatile struct ieob *) sc->sc_reg;
530:
531: /*
532: * the rest of the IE_OBIO case needs to be cleaned up
533: * XXX
534: */
535:
536: ie_map = uvm_map_create(pmap_kernel(), (vaddr_t)IEOB_ADBASE,
537: (vaddr_t)IEOB_ADBASE + sc->sc_msize, VM_MAP_INTRSAFE);
538: if (ie_map == NULL) panic("ie_map");
539: sc->sc_maddr = (caddr_t) uvm_km_alloc(ie_map, sc->sc_msize);
540: if (sc->sc_maddr == NULL) panic("ie kmem_alloc");
541: kvm_uncache(sc->sc_maddr, sc->sc_msize >> PGSHIFT);
542: if (((u_long)sc->sc_maddr & ~(NBPG-1)) != (u_long)sc->sc_maddr)
543: panic("unaligned dvmamalloc breaks");
544: sc->sc_iobase = (caddr_t)IEOB_ADBASE; /* 24 bit base addr */
545: (sc->memzero)(sc->sc_maddr, sc->sc_msize);
546: sc->iscp = (volatile struct ie_int_sys_conf_ptr *)
547: sc->sc_maddr; /* @ location zero */
548: sc->scb = (volatile struct ie_sys_ctl_block *)
549: sc->sc_maddr + sizeof(struct ie_int_sys_conf_ptr);
550: /* scb follows iscp */
551:
552: /*
553: * SCP: the scp must appear at KVA IEOB_ADBASE. The
554: * ROM seems to have page up there, but I'm not sure all
555: * ROMs will have it there. Also, I'm not sure if that
556: * page is on some free list somewhere or not. Let's
557: * map the first page of the buffer we just allocated
558: * to IEOB_ADBASE to be safe.
559: */
560:
561: if (pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_maddr, &pa) == FALSE)
562: panic("ie pmap_extract");
563: pmap_enter(pmap_kernel(), trunc_page(IEOB_ADBASE+IE_SCP_ADDR),
564: (paddr_t)pa | PMAP_NC /*| PMAP_IOC*/,
565: VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
566: pmap_update(pmap_kernel());
567:
568: sc->scp = (volatile struct ie_sys_conf_ptr *)
569: (IEOB_ADBASE + IE_SCP_ADDR);
570:
571: /*
572: * rest of first page is unused (wasted!), rest of ram
573: * for buffers
574: */
575: sc->buf_area = sc->sc_maddr + NBPG;
576: sc->buf_area_sz = sc->sc_msize - NBPG;
577: break;
578: }
579: case BUS_VME16:
580: {
581: volatile struct ievme *iev;
582: u_long rampaddr;
583: int lcv;
584:
585: sc->hard_type = IE_VME;
586: sc->reset_586 = ie_vmereset;
587: sc->chan_attn = ie_vmeattend;
588: sc->run_586 = ie_vmerun;
589: sc->memcopy = wcopy;
590: sc->memzero = wzero;
591: sc->sc_msize = 65536; /* XXX */
592: sc->sc_reg = mapiodev(ca->ca_ra.ra_reg, 0,
593: sizeof(struct ievme));
594: iev = (volatile struct ievme *) sc->sc_reg;
595: /* top 12 bits */
596: rampaddr = (u_long)ca->ca_ra.ra_paddr & 0xfff00000;
597: /* 4 more */
598: rampaddr = rampaddr | ((iev->status & IEVME_HADDR) << 16);
599: rampaddr -= (u_long)ca->ca_ra.ra_paddr;
600: sc->sc_maddr = mapiodev(ca->ca_ra.ra_reg, rampaddr,
601: sc->sc_msize);
602: sc->sc_iobase = sc->sc_maddr;
603: iev->pectrl = iev->pectrl | IEVME_PARACK; /* clear to start */
604:
605: /*
606: * set up mappings, direct map except for last page
607: * which is mapped at zero and at high address (for
608: * scp), zero ram
609: */
610:
611: for (lcv = 0; lcv < IEVME_MAPSZ - 1; lcv++)
612: iev->pgmap[lcv] = IEVME_SBORDR | IEVME_OBMEM | lcv;
613: iev->pgmap[IEVME_MAPSZ - 1] = IEVME_SBORDR | IEVME_OBMEM | 0;
614: (sc->memzero)(sc->sc_maddr, sc->sc_msize);
615:
616: /*
617: * set up pointers to data structures and buffer area.
618: * scp is in double mapped page... get offset into page
619: * and add to sc_maddr.
620: */
621: sc->scp = (volatile struct ie_sys_conf_ptr *)
622: (sc->sc_maddr + (IE_SCP_ADDR & (IEVME_PAGESIZE - 1)));
623: sc->iscp = (volatile struct ie_int_sys_conf_ptr *)
624: sc->sc_maddr; /* iscp @ location zero */
625: sc->scb = (volatile struct ie_sys_ctl_block *)
626: sc->sc_maddr + sizeof(struct ie_int_sys_conf_ptr);
627: /* scb follows iscp */
628:
629: /*
630: * rest of first page is unused, rest of ram
631: * for buffers
632: */
633: sc->buf_area = sc->sc_maddr + IEVME_PAGESIZE;
634: sc->buf_area_sz = sc->sc_msize - IEVME_PAGESIZE;
635: break;
636: }
637: default:
638: printf("unknown\n");
639: return;
640: }
641:
642: myetheraddr(sc->sc_arpcom.ac_enaddr);
643:
644: if (ie_setupram(sc) == 0) {
645: printf(": RAM CONFIG FAILED!\n");
646: /* XXX should reclaim resources? */
647: return;
648: }
649: bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
650: ifp->if_softc = sc;
651: ifp->if_start = iestart;
652: ifp->if_ioctl = ieioctl;
653: ifp->if_watchdog = iewatchdog;
654: ifp->if_flags =
655: IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
656:
657: /* Attach the interface. */
658: if_attach(ifp);
659: ether_ifattach(ifp);
660:
661: printf(" pri %d address %s, type %s\n", pri,
662: ether_sprintf(sc->sc_arpcom.ac_enaddr),
663: ie_hardware_names[sc->hard_type]);
664:
665: switch (ca->ca_bustype) {
666: #if defined(SUN4)
667: case BUS_OBIO:
668: sc->sc_ih.ih_fun = ieintr;
669: sc->sc_ih.ih_arg = sc;
670: intr_establish(pri, &sc->sc_ih, IPL_NET, self->dv_xname);
671: break;
672: case BUS_VME16:
673: case BUS_VME32:
674: sc->sc_ih.ih_fun = ieintr;
675: sc->sc_ih.ih_arg = sc;
676: vmeintr_establish(ca->ca_ra.ra_intr[0].int_vec, pri,
677: &sc->sc_ih, IPL_NET, self->dv_xname);
678: break;
679: #endif /* SUN4 */
680: }
681:
682: bp = ca->ca_ra.ra_bp;
683: if (bp != NULL && strcmp(bp->name, "ie") == 0 &&
684: sc->sc_dev.dv_unit == bp->val[1])
685: bp->dev = &sc->sc_dev;
686: }
687:
688:
689:
690: /*
691: * Device timeout/watchdog routine. Entered if the device neglects to generate
692: * an interrupt after a transmit has been started on it.
693: */
694: void
695: iewatchdog(ifp)
696: struct ifnet *ifp;
697: {
698: struct ie_softc *sc = ifp->if_softc;
699:
700: log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
701: ++sc->sc_arpcom.ac_if.if_oerrors;
702:
703: iereset(sc);
704: }
705:
706: /*
707: * What to do upon receipt of an interrupt.
708: */
709: int
710: ieintr(v)
711: void *v;
712: {
713: struct ie_softc *sc = v;
714: u_short status;
715:
716: status = sc->scb->ie_status;
717:
718: /*
719: * check for parity error
720: */
721: if (sc->hard_type == IE_VME) {
722: volatile struct ievme *iev = (volatile struct ievme *)sc->sc_reg
723: ;
724: if (iev->status & IEVME_PERR) {
725: printf("%s: parity error (ctrl 0x%x @ 0x%02x%04x)\n",
726: sc->sc_dev.dv_xname, iev->pectrl,
727: iev->pectrl & IEVME_HADDR, iev->peaddr);
728: iev->pectrl = iev->pectrl | IEVME_PARACK;
729: }
730: }
731:
732:
733: loop:
734: /* Ack interrupts FIRST in case we receive more during the ISR. */
735: ie_ack(sc, IE_ST_WHENCE & status);
736:
737: if (status & (IE_ST_RECV | IE_ST_RNR)) {
738: #ifdef IEDEBUG
739: in_ierint++;
740: if (sc->sc_debug & IED_RINT)
741: printf("%s: rint\n", sc->sc_dev.dv_xname);
742: #endif
743: ierint(sc);
744: #ifdef IEDEBUG
745: in_ierint--;
746: #endif
747: }
748:
749: if (status & IE_ST_DONE) {
750: #ifdef IEDEBUG
751: in_ietint++;
752: if (sc->sc_debug & IED_TINT)
753: printf("%s: tint\n", sc->sc_dev.dv_xname);
754: #endif
755: ietint(sc);
756: #ifdef IEDEBUG
757: in_ietint--;
758: #endif
759: }
760:
761: if (status & IE_ST_RNR) {
762: printf("%s: receiver not ready\n", sc->sc_dev.dv_xname);
763: sc->sc_arpcom.ac_if.if_ierrors++;
764: iereset(sc);
765: }
766:
767: #ifdef IEDEBUG
768: if ((status & IE_ST_ALLDONE) && (sc->sc_debug & IED_CNA))
769: printf("%s: cna\n", sc->sc_dev.dv_xname);
770: #endif
771:
772: if ((status = sc->scb->ie_status) & IE_ST_WHENCE)
773: goto loop;
774:
775: return 1;
776: }
777:
778: /*
779: * Process a received-frame interrupt.
780: */
781: void
782: ierint(sc)
783: struct ie_softc *sc;
784: {
785: volatile struct ie_sys_ctl_block *scb = sc->scb;
786: int i, status;
787: static int timesthru = 1024;
788:
789: i = sc->rfhead;
790: for (;;) {
791: status = sc->rframes[i]->ie_fd_status;
792:
793: if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
794: sc->sc_arpcom.ac_if.if_ipackets++;
795: if (!--timesthru) {
796: sc->sc_arpcom.ac_if.if_ierrors +=
797: SWAP(scb->ie_err_crc) +
798: SWAP(scb->ie_err_align) +
799: SWAP(scb->ie_err_resource) +
800: SWAP(scb->ie_err_overrun);
801: scb->ie_err_crc = scb->ie_err_align =
802: scb->ie_err_resource = scb->ie_err_overrun =
803: 0;
804: timesthru = 1024;
805: }
806: ie_readframe(sc, i);
807: } else {
808: if ((status & IE_FD_RNR) != 0 &&
809: (scb->ie_status & IE_RU_READY) == 0) {
810: sc->rframes[0]->ie_fd_buf_desc =
811: MK_16(sc->sc_maddr, sc->rbuffs[0]);
812: scb->ie_recv_list =
813: MK_16(sc->sc_maddr, sc->rframes[0]);
814: command_and_wait(sc, IE_RU_START, 0, 0);
815: }
816: break;
817: }
818: i = (i + 1) % sc->nframes;
819: }
820: }
821:
822: /*
823: * Process a command-complete interrupt. These are only generated by the
824: * transmission of frames. This routine is deceptively simple, since most of
825: * the real work is done by iestart().
826: */
827: void
828: ietint(sc)
829: struct ie_softc *sc;
830: {
831: int status;
832:
833: sc->sc_arpcom.ac_if.if_timer = 0;
834: sc->sc_arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
835:
836: status = sc->xmit_cmds[sc->xctail]->ie_xmit_status;
837:
838: if (!(status & IE_STAT_COMPL) || (status & IE_STAT_BUSY))
839: printf("ietint: command still busy!\n");
840:
841: if (status & IE_STAT_OK) {
842: sc->sc_arpcom.ac_if.if_opackets++;
843: sc->sc_arpcom.ac_if.if_collisions +=
844: SWAP(status & IE_XS_MAXCOLL);
845: } else if (status & IE_STAT_ABORT) {
846: printf("%s: send aborted\n", sc->sc_dev.dv_xname);
847: sc->sc_arpcom.ac_if.if_oerrors++;
848: } else if (status & IE_XS_NOCARRIER) {
849: printf("%s: no carrier\n", sc->sc_dev.dv_xname);
850: sc->sc_arpcom.ac_if.if_oerrors++;
851: } else if (status & IE_XS_LOSTCTS) {
852: printf("%s: lost CTS\n", sc->sc_dev.dv_xname);
853: sc->sc_arpcom.ac_if.if_oerrors++;
854: } else if (status & IE_XS_UNDERRUN) {
855: printf("%s: DMA underrun\n", sc->sc_dev.dv_xname);
856: sc->sc_arpcom.ac_if.if_oerrors++;
857: } else if (status & IE_XS_EXCMAX) {
858: printf("%s: too many collisions\n", sc->sc_dev.dv_xname);
859: sc->sc_arpcom.ac_if.if_collisions += 16;
860: sc->sc_arpcom.ac_if.if_oerrors++;
861: }
862:
863: /*
864: * If multicast addresses were added or deleted while transmitting,
865: * mc_reset() set the want_mcsetup flag indicating that we should do
866: * it.
867: */
868: if (sc->want_mcsetup) {
869: mc_setup(sc, (caddr_t)sc->xmit_cbuffs[sc->xctail]);
870: sc->want_mcsetup = 0;
871: }
872:
873: /* Done with the buffer. */
874: sc->xmit_free++;
875: sc->xmit_busy = 0;
876: sc->xctail = (sc->xctail + 1) % NTXBUF;
877:
878: iestart(&sc->sc_arpcom.ac_if);
879: }
880:
881: /*
882: * Compare two Ether/802 addresses for equality, inlined and unrolled for
883: * speed. I'd love to have an inline assembler version of this...
884: */
885: static __inline int
886: ether_equal(one, two)
887: u_char *one, *two;
888: {
889:
890: if (one[0] != two[0] || one[1] != two[1] || one[2] != two[2] ||
891: one[3] != two[3] || one[4] != two[4] || one[5] != two[5])
892: return 0;
893: return 1;
894: }
895:
896: /*
897: * Check for a valid address. to_bpf is filled in with one of the following:
898: * 0 -> BPF doesn't get this packet
899: * 1 -> BPF does get this packet
900: * 2 -> BPF does get this packet, but we don't
901: * Return value is true if the packet is for us, and false otherwise.
902: *
903: * This routine is a mess, but it's also critical that it be as fast
904: * as possible. It could be made cleaner if we can assume that the
905: * only client which will fiddle with IFF_PROMISC is BPF. This is
906: * probably a good assumption, but we do not make it here. (Yet.)
907: */
908: static __inline int
909: check_eh(sc, eh, to_bpf)
910: struct ie_softc *sc;
911: struct ether_header *eh;
912: int *to_bpf;
913: {
914: int i;
915:
916: switch(sc->promisc) {
917: case IFF_ALLMULTI:
918: /*
919: * Receiving all multicasts, but no unicasts except those
920: * destined for us.
921: */
922: #if NBPFILTER > 0
923: *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); /* BPF gets this packet if anybody cares */
924: #endif
925: if (eh->ether_dhost[0] & 1)
926: return 1;
927: if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) return 1;
928: return 0;
929:
930: case IFF_PROMISC:
931: /*
932: * Receiving all packets. These need to be passed on to BPF.
933: */
934: #if NBPFILTER > 0
935: *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0) ||
936: (sc->sc_arpcom.ac_if.if_bridge != NULL);
937: #else
938: *to_bpf = (sc->sc_arpcom.ac_if.if_bridge != NULL);
939: #endif
940: /* If for us, accept and hand up to BPF */
941: if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) return 1;
942:
943: #if NBPFILTER > 0
944: if (*to_bpf && sc->sc_arpcom.ac_if.if_bridge == NULL)
945: *to_bpf = 2; /* we don't need to see it */
946: #endif
947:
948: /*
949: * Not a multicast, so BPF wants to see it but we don't.
950: */
951: if (!(eh->ether_dhost[0] & 1))
952: return 1;
953:
954: /*
955: * If it's one of our multicast groups, accept it and pass it
956: * up.
957: */
958: for (i = 0; i < sc->mcast_count; i++) {
959: if (ether_equal(eh->ether_dhost, (u_char *)&sc->mcast_addrs[i])) {
960: #if NBPFILTER > 0
961: if (*to_bpf)
962: *to_bpf = 1;
963: #endif
964: return 1;
965: }
966: }
967: return 1;
968:
969: case IFF_ALLMULTI | IFF_PROMISC:
970: /*
971: * Acting as a multicast router, and BPF running at the same
972: * time. Whew! (Hope this is a fast machine...)
973: */
974: #if NBPFILTER > 0
975: *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0) ||
976: (sc->sc_arpcom.ac_if.if_bridge != NULL);
977: #else
978: *to_bpf = (sc->sc_arpcom.ac_if.if_bridge != 0);
979: #endif
980: /* We want to see multicasts. */
981: if (eh->ether_dhost[0] & 1)
982: return 1;
983:
984: /* We want to see our own packets */
985: if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr))
986: return 1;
987:
988: /* Anything else goes to BPF but nothing else. */
989: #if NBPFILTER > 0
990: if (*to_bpf && sc->sc_arpcom.ac_if.if_bridge == NULL)
991: *to_bpf = 2;
992: #endif
993: return 1;
994:
995: default:
996: /*
997: * Only accept unicast packets destined for us, or multicasts
998: * for groups that we belong to. For now, we assume that the
999: * '586 will only return packets that we asked it for. This
1000: * isn't strictly true (it uses hashing for the multicast
1001: * filter), but it will do in this case, and we want to get out
1002: * of here as quickly as possible.
1003: */
1004: #if NBPFILTER > 0
1005: *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
1006: #endif
1007: return 1;
1008: }
1009: return 0;
1010: }
1011:
1012: /*
1013: * We want to isolate the bits that have meaning... This assumes that
1014: * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds
1015: * the size of the buffer, then we are screwed anyway.
1016: */
1017: static __inline int
1018: ie_buflen(sc, head)
1019: struct ie_softc *sc;
1020: int head;
1021: {
1022:
1023: return (SWAP(sc->rbuffs[head]->ie_rbd_actual)
1024: & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
1025: }
1026:
1027: static __inline int
1028: ie_packet_len(sc)
1029: struct ie_softc *sc;
1030: {
1031: int i;
1032: int head = sc->rbhead;
1033: int acc = 0;
1034:
1035: do {
1036: if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
1037: #ifdef IEDEBUG
1038: print_rbd(sc->rbuffs[sc->rbhead]);
1039: #endif
1040: log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
1041: sc->sc_dev.dv_xname, sc->rbhead);
1042: iereset(sc);
1043: return -1;
1044: }
1045:
1046: i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST;
1047:
1048: acc += ie_buflen(sc, head);
1049: head = (head + 1) % sc->nrxbuf;
1050: } while (!i);
1051:
1052: return acc;
1053: }
1054:
1055: /*
1056: * Setup all necessary artifacts for an XMIT command, and then pass the XMIT
1057: * command to the chip to be executed. On the way, if we have a BPF listener
1058: * also give him a copy.
1059: */
1060: static __inline void
1061: iexmit(sc)
1062: struct ie_softc *sc;
1063: {
1064:
1065: #if NBPFILTER > 0
1066: /*
1067: * If BPF is listening on this interface, let it see the packet before
1068: * we push it on the wire.
1069: */
1070: if (sc->sc_arpcom.ac_if.if_bpf)
1071: bpf_tap(sc->sc_arpcom.ac_if.if_bpf,
1072: sc->xmit_cbuffs[sc->xctail],
1073: SWAP(sc->xmit_buffs[sc->xctail]->ie_xmit_flags),
1074: BPF_DIRECTION_OUT);
1075: #endif
1076:
1077: sc->xmit_buffs[sc->xctail]->ie_xmit_flags |= IE_XMIT_LAST;
1078: sc->xmit_buffs[sc->xctail]->ie_xmit_next = SWAP(0xffff);
1079: ST_24(sc->xmit_buffs[sc->xctail]->ie_xmit_buf,
1080: MK_24(sc->sc_iobase, sc->xmit_cbuffs[sc->xctail]));
1081:
1082: sc->xmit_cmds[sc->xctail]->com.ie_cmd_link = SWAP(0xffff);
1083: sc->xmit_cmds[sc->xctail]->com.ie_cmd_cmd =
1084: IE_CMD_XMIT | IE_CMD_INTR | IE_CMD_LAST;
1085:
1086: sc->xmit_cmds[sc->xctail]->ie_xmit_status = SWAP(0);
1087: sc->xmit_cmds[sc->xctail]->ie_xmit_desc =
1088: MK_16(sc->sc_maddr, sc->xmit_buffs[sc->xctail]);
1089:
1090: sc->scb->ie_command_list =
1091: MK_16(sc->sc_maddr, sc->xmit_cmds[sc->xctail]);
1092: command_and_wait(sc, IE_CU_START, 0, 0);
1093:
1094: sc->xmit_busy = 1;
1095: sc->sc_arpcom.ac_if.if_timer = 5;
1096: }
1097:
1098: /*
1099: * Read data off the interface, and turn it into an mbuf chain.
1100: *
1101: * This code is DRAMATICALLY different from the previous version; this
1102: * version tries to allocate the entire mbuf chain up front, given the
1103: * length of the data available. This enables us to allocate mbuf
1104: * clusters in many situations where before we would have had a long
1105: * chain of partially-full mbufs. This should help to speed up the
1106: * operation considerably. (Provided that it works, of course.)
1107: */
1108: static inline int
1109: ieget(sc, mp, ehp, to_bpf)
1110: struct ie_softc *sc;
1111: struct mbuf **mp;
1112: struct ether_header *ehp;
1113: int *to_bpf;
1114: {
1115: struct mbuf *m, *top, **mymp;
1116: int i;
1117: int offset;
1118: int totlen, resid;
1119: int thismboff;
1120: int head;
1121:
1122: totlen = ie_packet_len(sc);
1123: if (totlen <= 0)
1124: return -1;
1125:
1126: i = sc->rbhead;
1127:
1128: /*
1129: * Snarf the Ethernet header.
1130: */
1131: (sc->memcopy)((caddr_t)sc->cbuffs[i], (caddr_t)ehp, sizeof *ehp);
1132:
1133: /*
1134: * As quickly as possible, check if this packet is for us.
1135: * If not, don't waste a single cycle copying the rest of the
1136: * packet in.
1137: * This is only a consideration when FILTER is defined; i.e., when
1138: * we are either running BPF or doing multicasting.
1139: */
1140: if (!check_eh(sc, ehp, to_bpf)) {
1141: ie_drop_packet_buffer(sc);
1142: sc->sc_arpcom.ac_if.if_ierrors--; /* just this case, it's not an error */
1143: return -1;
1144: }
1145: totlen -= (offset = sizeof *ehp);
1146:
1147: MGETHDR(*mp, M_DONTWAIT, MT_DATA);
1148: if (!*mp) {
1149: ie_drop_packet_buffer(sc);
1150: return -1;
1151: }
1152:
1153: m = *mp;
1154: m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
1155: m->m_len = MHLEN;
1156: resid = m->m_pkthdr.len = totlen;
1157: top = 0;
1158: mymp = ⊤
1159:
1160: /*
1161: * This loop goes through and allocates mbufs for all the data we will
1162: * be copying in. It does not actually do the copying yet.
1163: */
1164: do { /* while (resid > 0) */
1165: /*
1166: * Try to allocate an mbuf to hold the data that we have. If
1167: * we already allocated one, just get another one and stick it
1168: * on the end (eventually). If we don't already have one, try
1169: * to allocate an mbuf cluster big enough to hold the whole
1170: * packet, if we think it's reasonable, or a single mbuf which
1171: * may or may not be big enough.
1172: * Got that?
1173: */
1174: if (top) {
1175: MGET(m, M_DONTWAIT, MT_DATA);
1176: if (!m) {
1177: m_freem(top);
1178: ie_drop_packet_buffer(sc);
1179: return -1;
1180: }
1181: m->m_len = MLEN;
1182: }
1183:
1184: if (resid >= MINCLSIZE) {
1185: MCLGET(m, M_DONTWAIT);
1186: if (m->m_flags & M_EXT)
1187: m->m_len = min(resid, MCLBYTES);
1188: } else {
1189: if (resid < m->m_len) {
1190: if (!top && resid + max_linkhdr <= m->m_len)
1191: m->m_data += max_linkhdr;
1192: m->m_len = resid;
1193: }
1194: }
1195: resid -= m->m_len;
1196: *mymp = m;
1197: mymp = &m->m_next;
1198: } while (resid > 0);
1199:
1200: resid = totlen;
1201: m = top;
1202: thismboff = 0;
1203: head = sc->rbhead;
1204:
1205: /*
1206: * Now we take the mbuf chain (hopefully only one mbuf most of the
1207: * time) and stuff the data into it. There are no possible failures at
1208: * or after this point.
1209: */
1210: while (resid > 0) { /* while there's stuff left */
1211: int thislen = ie_buflen(sc, head) - offset;
1212:
1213: /*
1214: * If too much data for the current mbuf, then fill the current
1215: * one up, go to the next one, and try again.
1216: */
1217: if (thislen > m->m_len - thismboff) {
1218: int newlen = m->m_len - thismboff;
1219: (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset),
1220: mtod(m, caddr_t) + thismboff, (u_int)newlen);
1221: m = m->m_next;
1222: thismboff = 0; /* new mbuf, so no offset */
1223: offset += newlen; /* we are now this far
1224: into the packet */
1225: resid -= newlen; /* so there is this much
1226: left to get */
1227: continue;
1228: }
1229:
1230: /*
1231: * If there is more than enough space in the mbuf to hold the
1232: * contents of this buffer, copy everything in, advance
1233: * pointers and so on.
1234: */
1235: if (thislen < m->m_len - thismboff) {
1236: (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset),
1237: mtod(m, caddr_t) + thismboff, (u_int)thislen);
1238: thismboff += thislen; /* we are this far into the mbuf */
1239: resid -= thislen; /* and this much is left */
1240: goto nextbuf;
1241: }
1242:
1243: /*
1244: * Otherwise, there is exactly enough space to put this
1245: * buffer's contents into the current mbuf. Do the combination
1246: * of the above actions.
1247: */
1248: (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset),
1249: mtod(m, caddr_t) + thismboff, (u_int)thislen);
1250: m = m->m_next;
1251: thismboff = 0; /* new mbuf, start at the beginning */
1252: resid -= thislen; /* and we are this far through */
1253:
1254: /*
1255: * Advance all the pointers. We can get here from either of
1256: * the last two cases, but never the first.
1257: */
1258: nextbuf:
1259: offset = 0;
1260: sc->rbuffs[head]->ie_rbd_actual = SWAP(0);
1261: sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST;
1262: sc->rbhead = head = (head + 1) % sc->nrxbuf;
1263: sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
1264: sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
1265: }
1266:
1267: /*
1268: * Unless something changed strangely while we were doing the copy, we
1269: * have now copied everything in from the shared memory.
1270: * This means that we are done.
1271: */
1272: return 0;
1273: }
1274:
1275: /*
1276: * Read frame NUM from unit UNIT (pre-cached as IE).
1277: *
1278: * This routine reads the RFD at NUM, and copies in the buffers from the list
1279: * of RBD, then rotates the RBD and RFD lists so that the receiver doesn't
1280: * start complaining. Trailers are DROPPED---there's no point in wasting time
1281: * on confusing code to deal with them. Hopefully, this machine will never ARP
1282: * for trailers anyway.
1283: */
1284: static void
1285: ie_readframe(sc, num)
1286: struct ie_softc *sc;
1287: int num; /* frame number to read */
1288: {
1289: int status;
1290: struct mbuf *m = 0;
1291: struct ether_header eh;
1292: #if NBPFILTER > 0
1293: int bpf_gets_it = 0;
1294: #endif
1295:
1296: status = sc->rframes[num]->ie_fd_status;
1297:
1298: /* Immediately advance the RFD list, since we have copied ours now. */
1299: sc->rframes[num]->ie_fd_status = SWAP(0);
1300: sc->rframes[num]->ie_fd_last |= IE_FD_LAST;
1301: sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST;
1302: sc->rftail = (sc->rftail + 1) % sc->nframes;
1303: sc->rfhead = (sc->rfhead + 1) % sc->nframes;
1304:
1305: if (status & IE_FD_OK) {
1306: #if NBPFILTER > 0
1307: if (ieget(sc, &m, &eh, &bpf_gets_it)) {
1308: #else
1309: if (ieget(sc, &m, &eh, 0)) {
1310: #endif
1311: sc->sc_arpcom.ac_if.if_ierrors++;
1312: return;
1313: }
1314: }
1315:
1316: #ifdef IEDEBUG
1317: if (sc->sc_debug & IED_READFRAME)
1318: printf("%s: frame from ether %s type 0x%x\n",
1319: sc->sc_dev.dv_xname,
1320: ether_sprintf(eh.ether_shost), (u_int)eh.ether_type);
1321: #endif
1322:
1323: if (!m)
1324: return;
1325:
1326: if (last_not_for_us) {
1327: m_freem(last_not_for_us);
1328: last_not_for_us = 0;
1329: }
1330:
1331: #if NBPFILTER > 0
1332: /*
1333: * Check for a BPF filter; if so, hand it up.
1334: * Note that we have to stick an extra mbuf up front, because bpf_mtap
1335: * expects to have the ether header at the front.
1336: * It doesn't matter that this results in an ill-formatted mbuf chain,
1337: * since BPF just looks at the data. (It doesn't try to free the mbuf,
1338: * tho' it will make a copy for tcpdump.)
1339: */
1340: if (bpf_gets_it) {
1341: /* Pass it up. */
1342: bpf_mtap_hdr(sc->sc_arpcom.ac_if.if_bpf, (caddr_t)&eh,
1343: sizeof(eh), m, BPF_DIRECTION_IN);
1344: }
1345: /*
1346: * A signal passed up from the filtering code indicating that the
1347: * packet is intended for BPF but not for the protocol machinery.
1348: * We can save a few cycles by not handing it off to them.
1349: */
1350: if (bpf_gets_it == 2) {
1351: last_not_for_us = m;
1352: return;
1353: }
1354: #endif /* NBPFILTER > 0 */
1355:
1356: /*
1357: * In here there used to be code to check destination addresses upon
1358: * receipt of a packet. We have deleted that code, and replaced it
1359: * with code to check the address much earlier in the cycle, before
1360: * copying the data in; this saves us valuable cycles when operating
1361: * as a multicast router or when using BPF.
1362: */
1363:
1364: /*
1365: * Finally pass this packet up to higher layers.
1366: */
1367: ether_input(&sc->sc_arpcom.ac_if, &eh, m);
1368: }
1369:
1370: static void
1371: ie_drop_packet_buffer(sc)
1372: struct ie_softc *sc;
1373: {
1374: int i;
1375:
1376: do {
1377: /*
1378: * This means we are somehow out of sync. So, we reset the
1379: * adapter.
1380: */
1381: if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
1382: #ifdef IEDEBUG
1383: print_rbd(sc->rbuffs[sc->rbhead]);
1384: #endif
1385: log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
1386: sc->sc_dev.dv_xname, sc->rbhead);
1387: iereset(sc);
1388: return;
1389: }
1390:
1391: i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST;
1392:
1393: sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST;
1394: sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0);
1395: sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf;
1396: sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
1397: sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
1398: } while (!i);
1399: }
1400:
1401:
1402: /*
1403: * Start transmission on an interface.
1404: */
1405: void
1406: iestart(ifp)
1407: struct ifnet *ifp;
1408: {
1409: struct ie_softc *sc = ifp->if_softc;
1410: struct mbuf *m0, *m;
1411: u_char *buffer;
1412: u_short len;
1413:
1414: if ((ifp->if_flags & IFF_RUNNING) == 0)
1415: return;
1416:
1417: if (sc->xmit_free == 0) {
1418: ifp->if_flags |= IFF_OACTIVE;
1419: if (!sc->xmit_busy)
1420: iexmit(sc);
1421: return;
1422: }
1423:
1424: do {
1425: IF_DEQUEUE(&sc->sc_arpcom.ac_if.if_snd, m);
1426: if (!m)
1427: break;
1428:
1429: len = 0;
1430: buffer = sc->xmit_cbuffs[sc->xchead];
1431:
1432: for (m0 = m; m && (len +m->m_len) < IE_TBUF_SIZE;
1433: m = m->m_next) {
1434: bcopy(mtod(m, caddr_t), buffer, m->m_len);
1435: buffer += m->m_len;
1436: len += m->m_len;
1437: }
1438: if (m)
1439: printf("%s: tbuf overflow\n", sc->sc_dev.dv_xname);
1440:
1441: m_freem(m0);
1442:
1443: if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) {
1444: bzero(buffer, ETHER_MIN_LEN - ETHER_CRC_LEN - len);
1445: len = ETHER_MIN_LEN - ETHER_CRC_LEN;
1446: buffer += ETHER_MIN_LEN - ETHER_CRC_LEN;
1447: }
1448: sc->xmit_buffs[sc->xchead]->ie_xmit_flags = SWAP(len);
1449:
1450: sc->xmit_free--;
1451: sc->xchead = (sc->xchead + 1) % NTXBUF;
1452: } while (sc->xmit_free > 0);
1453:
1454: /* If we stuffed any packets into the card's memory, send now. */
1455: if ((sc->xmit_free < NTXBUF) && (!sc->xmit_busy))
1456: iexmit(sc);
1457:
1458: return;
1459: }
1460:
1461: /*
1462: * set up IE's ram space
1463: */
1464: int
1465: ie_setupram(sc)
1466: struct ie_softc *sc;
1467: {
1468: volatile struct ie_sys_conf_ptr *scp;
1469: volatile struct ie_int_sys_conf_ptr *iscp;
1470: volatile struct ie_sys_ctl_block *scb;
1471: int s;
1472:
1473: s = splnet();
1474:
1475: scp = sc->scp;
1476: (sc->memzero)((char *) scp, sizeof *scp);
1477:
1478: iscp = sc->iscp;
1479: (sc->memzero)((char *) iscp, sizeof *iscp);
1480:
1481: scb = sc->scb;
1482: (sc->memzero)((char *) scb, sizeof *scb);
1483:
1484: scp->ie_bus_use = 0; /* 16-bit */
1485: ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp));
1486:
1487: iscp->ie_busy = 1; /* ie_busy == char */
1488: iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb);
1489: ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr));
1490:
1491: (sc->reset_586) (sc);
1492: (sc->chan_attn) (sc);
1493:
1494: delay(100); /* wait a while... */
1495:
1496: if (iscp->ie_busy) {
1497: splx(s);
1498: return 0;
1499: }
1500: /*
1501: * Acknowledge any interrupts we may have caused...
1502: */
1503: ie_ack(sc, IE_ST_WHENCE);
1504: splx(s);
1505:
1506: return 1;
1507: }
1508:
1509: void
1510: iereset(sc)
1511: struct ie_softc *sc;
1512: {
1513: int s = splnet();
1514:
1515: printf("%s: reset\n", sc->sc_dev.dv_xname);
1516:
1517: /* Clear OACTIVE in case we're called from watchdog (frozen xmit). */
1518: sc->sc_arpcom.ac_if.if_flags &= ~(IFF_UP | IFF_OACTIVE);
1519: ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0);
1520:
1521: /*
1522: * Stop i82586 dead in its tracks.
1523: */
1524: if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1525: printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname);
1526:
1527: if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1528: printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname);
1529:
1530: #ifdef notdef
1531: if (!check_ie_present(sc, sc->sc_maddr, sc->sc_msize))
1532: panic("ie disappeared!");
1533: #endif
1534:
1535: sc->sc_arpcom.ac_if.if_flags |= IFF_UP;
1536: ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0);
1537:
1538: splx(s);
1539: }
1540:
1541: /*
1542: * This is called if we time out.
1543: */
1544: static void
1545: chan_attn_timeout(rock)
1546: void *rock;
1547: {
1548:
1549: *(int *)rock = 1;
1550: }
1551:
1552: /*
1553: * Send a command to the controller and wait for it to either complete
1554: * or be accepted, depending on the command. If the command pointer
1555: * is null, then pretend that the command is not an action command.
1556: * If the command pointer is not null, and the command is an action
1557: * command, wait for
1558: * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1559: * to become true.
1560: */
1561: static int
1562: command_and_wait(sc, cmd, pcmd, mask)
1563: struct ie_softc *sc;
1564: int cmd;
1565: volatile void *pcmd;
1566: int mask;
1567: {
1568: volatile struct ie_cmd_common *cc = pcmd;
1569: volatile struct ie_sys_ctl_block *scb = sc->scb;
1570: volatile int timedout = 0;
1571: struct timeout chan_tmo;
1572: extern int hz;
1573:
1574: scb->ie_command = (u_short)cmd;
1575:
1576: if (IE_ACTION_COMMAND(cmd) && pcmd) {
1577: (sc->chan_attn)(sc);
1578:
1579: /*
1580: * XXX
1581: * I don't think this timeout works on suns.
1582: * we are at splnet() in the loop, and the timeout
1583: * stuff runs at software spl (so it is masked off?).
1584: */
1585:
1586: /*
1587: * According to the packet driver, the minimum timeout should
1588: * be .369 seconds, which we round up to .4.
1589: */
1590: timeout_set(&chan_tmo, chan_attn_timeout, (caddr_t)&timedout);
1591: timeout_add(&chan_tmo, 2 * hz / 5);
1592:
1593: /*
1594: * Now spin-lock waiting for status. This is not a very nice
1595: * thing to do, but I haven't figured out how, or indeed if, we
1596: * can put the process waiting for action to sleep. (We may
1597: * be getting called through some other timeout running in the
1598: * kernel.)
1599: */
1600: for (;;)
1601: if ((cc->ie_cmd_status & mask) || timedout)
1602: break;
1603:
1604: timeout_del(&chan_tmo);
1605:
1606: return timedout;
1607: } else {
1608: /*
1609: * Otherwise, just wait for the command to be accepted.
1610: */
1611: (sc->chan_attn)(sc);
1612:
1613: while (scb->ie_command)
1614: ; /* XXX spin lock */
1615:
1616: return 0;
1617: }
1618: }
1619:
1620: /*
1621: * Run the time-domain reflectometer.
1622: */
1623: static void
1624: run_tdr(sc, cmd)
1625: struct ie_softc *sc;
1626: struct ie_tdr_cmd *cmd;
1627: {
1628: int result;
1629:
1630: cmd->com.ie_cmd_status = SWAP(0);
1631: cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST;
1632: cmd->com.ie_cmd_link = SWAP(0xffff);
1633:
1634: sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1635: cmd->ie_tdr_time = SWAP(0);
1636:
1637: if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1638: !(cmd->com.ie_cmd_status & IE_STAT_OK))
1639: result = 0x10000; /* XXX */
1640: else
1641: result = cmd->ie_tdr_time;
1642:
1643: ie_ack(sc, IE_ST_WHENCE);
1644:
1645: if (result & IE_TDR_SUCCESS)
1646: return;
1647:
1648: if (result & 0x10000)
1649: printf("%s: TDR command failed\n", sc->sc_dev.dv_xname);
1650: else if (result & IE_TDR_XCVR)
1651: printf("%s: transceiver problem\n", sc->sc_dev.dv_xname);
1652: else if (result & IE_TDR_OPEN)
1653: printf("%s: TDR detected an open %d clocks away\n",
1654: sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1655: else if (result & IE_TDR_SHORT)
1656: printf("%s: TDR detected a short %d clocks away\n",
1657: sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1658: else
1659: printf("%s: TDR returned unknown status 0x%x\n",
1660: sc->sc_dev.dv_xname, result);
1661: }
1662:
1663: #ifdef notdef
1664: /* ALIGN works on 8 byte boundaries.... but 4 byte boundaries are ok for sun */
1665: #define _ALLOC(p, n) (bzero(p, n), p += n, p - n)
1666: #define ALLOC(p, n) _ALLOC(p, ALIGN(n)) /* XXX convert to this? */
1667: #endif
1668:
1669: static __inline caddr_t
1670: Align(ptr)
1671: caddr_t ptr;
1672: {
1673: u_long l = (u_long)ptr;
1674:
1675: l = (l + 3) & ~3L;
1676: return (caddr_t)l;
1677: }
1678:
1679: /*
1680: * setup_bufs: set up the buffers
1681: *
1682: * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz.
1683: * this is to be used for the buffers. the chip indexs its control data
1684: * structures with 16 bit offsets, and it indexes actual buffers with
1685: * 24 bit addresses. so we should allocate control buffers first so that
1686: * we don't overflow the 16 bit offset field. The number of transmit
1687: * buffers is fixed at compile time.
1688: *
1689: * note: this function was written to be easy to understand, rather than
1690: * highly efficient (it isn't in the critical path).
1691: */
1692: static void
1693: setup_bufs(sc)
1694: struct ie_softc *sc;
1695: {
1696: caddr_t ptr = sc->buf_area; /* memory pool */
1697: int n, r;
1698:
1699: /*
1700: * step 0: zero memory and figure out how many recv buffers and
1701: * frames we can have. XXX CURRENTLY HARDWIRED AT MAX
1702: */
1703: (sc->memzero)(ptr, sc->buf_area_sz);
1704: ptr = Align(ptr); /* set alignment and stick with it */
1705:
1706: n = (int)Align((caddr_t) sizeof(struct ie_xmit_cmd)) +
1707: (int)Align((caddr_t) sizeof(struct ie_xmit_buf)) + IE_TBUF_SIZE;
1708: n *= NTXBUF; /* n = total size of xmit area */
1709:
1710: n = sc->buf_area_sz - n;/* n = free space for recv stuff */
1711:
1712: r = (int)Align((caddr_t) sizeof(struct ie_recv_frame_desc)) +
1713: (((int)Align((caddr_t) sizeof(struct ie_recv_buf_desc)) +
1714: IE_RBUF_SIZE) * B_PER_F);
1715:
1716: /* r = size of one R frame */
1717:
1718: sc->nframes = n / r;
1719: if (sc->nframes <= 0)
1720: panic("ie: bogus buffer calc");
1721: if (sc->nframes > MXFRAMES)
1722: sc->nframes = MXFRAMES;
1723:
1724: sc->nrxbuf = sc->nframes * B_PER_F;
1725:
1726: #ifdef IEDEBUG
1727: printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf);
1728: #endif
1729:
1730: /*
1731: * step 1a: lay out and zero frame data structures for transmit and recv
1732: */
1733: for (n = 0; n < NTXBUF; n++) {
1734: sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr;
1735: ptr = Align(ptr + sizeof(struct ie_xmit_cmd));
1736: }
1737:
1738: for (n = 0; n < sc->nframes; n++) {
1739: sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr;
1740: ptr = Align(ptr + sizeof(struct ie_recv_frame_desc));
1741: }
1742:
1743: /*
1744: * step 1b: link together the recv frames and set EOL on last one
1745: */
1746: for (n = 0; n < sc->nframes; n++) {
1747: sc->rframes[n]->ie_fd_next =
1748: MK_16(sc->sc_maddr, sc->rframes[(n + 1) % sc->nframes]);
1749: }
1750: sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST;
1751:
1752: /*
1753: * step 2a: lay out and zero frame buffer structures for xmit and recv
1754: */
1755: for (n = 0; n < NTXBUF; n++) {
1756: sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr;
1757: ptr = Align(ptr + sizeof(struct ie_xmit_buf));
1758: }
1759:
1760: for (n = 0; n < sc->nrxbuf; n++) {
1761: sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr;
1762: ptr = Align(ptr + sizeof(struct ie_recv_buf_desc));
1763: }
1764:
1765: /*
1766: * step 2b: link together recv bufs and set EOL on last one
1767: */
1768: for (n = 0; n < sc->nrxbuf; n++) {
1769: sc->rbuffs[n]->ie_rbd_next =
1770: MK_16(sc->sc_maddr, sc->rbuffs[(n + 1) % sc->nrxbuf]);
1771: }
1772: sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= IE_RBD_LAST;
1773:
1774: /*
1775: * step 3: allocate the actual data buffers for xmit and recv
1776: * recv buffer gets linked into recv_buf_desc list here
1777: */
1778: for (n = 0; n < NTXBUF; n++) {
1779: sc->xmit_cbuffs[n] = (u_char *) ptr;
1780: ptr = Align(ptr + IE_TBUF_SIZE);
1781: }
1782:
1783: /* Pointers to last packet sent and next available transmit buffer. */
1784: sc->xchead = sc->xctail = 0;
1785:
1786: /* Clear transmit-busy flag and set number of free transmit buffers. */
1787: sc->xmit_busy = 0;
1788: sc->xmit_free = NTXBUF;
1789:
1790: for (n = 0; n < sc->nrxbuf; n++) {
1791: sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */
1792: sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE);
1793: ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(sc->sc_iobase, ptr));
1794: ptr = Align(ptr + IE_RBUF_SIZE);
1795: }
1796:
1797: /*
1798: * step 4: set the head and tail pointers on receive to keep track of
1799: * the order in which RFDs and RBDs are used. link in recv frames
1800: * and buffer into the scb.
1801: */
1802:
1803: sc->rfhead = 0;
1804: sc->rftail = sc->nframes - 1;
1805: sc->rbhead = 0;
1806: sc->rbtail = sc->nrxbuf - 1;
1807:
1808: sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1809: sc->rframes[0]->ie_fd_buf_desc = MK_16(sc->sc_maddr, sc->rbuffs[0]);
1810:
1811: #ifdef IEDEBUG
1812: printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area);
1813: #endif
1814: }
1815:
1816: /*
1817: * Run the multicast setup command.
1818: * Called at splnet().
1819: */
1820: static int
1821: mc_setup(sc, ptr)
1822: struct ie_softc *sc;
1823: void *ptr;
1824: {
1825: volatile struct ie_mcast_cmd *cmd = ptr;
1826:
1827: cmd->com.ie_cmd_status = SWAP(0);
1828: cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST;
1829: cmd->com.ie_cmd_link = SWAP(0xffff);
1830:
1831: (sc->memcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs,
1832: sc->mcast_count * sizeof *sc->mcast_addrs);
1833:
1834: cmd->ie_mcast_bytes =
1835: SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */
1836:
1837: sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1838: if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1839: !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1840: printf("%s: multicast address setup command failed\n",
1841: sc->sc_dev.dv_xname);
1842: return 0;
1843: }
1844: return 1;
1845: }
1846:
1847: /*
1848: * This routine takes the environment generated by check_ie_present() and adds
1849: * to it all the other structures we need to operate the adapter. This
1850: * includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, starting
1851: * the receiver unit, and clearing interrupts.
1852: *
1853: * THIS ROUTINE MUST BE CALLED AT splnet() OR HIGHER.
1854: */
1855: int
1856: ieinit(sc)
1857: struct ie_softc *sc;
1858: {
1859: volatile struct ie_sys_ctl_block *scb = sc->scb;
1860: void *ptr;
1861:
1862: ptr = sc->buf_area;
1863:
1864: /*
1865: * Send the configure command first.
1866: */
1867: {
1868: volatile struct ie_config_cmd *cmd = ptr;
1869:
1870: scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1871: cmd->com.ie_cmd_status = SWAP(0);
1872: cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST;
1873: cmd->com.ie_cmd_link = SWAP(0xffff);
1874:
1875: ie_setup_config(cmd, sc->promisc, 0);
1876:
1877: if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1878: !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1879: printf("%s: configure command failed\n",
1880: sc->sc_dev.dv_xname);
1881: return 0;
1882: }
1883: }
1884:
1885: /*
1886: * Now send the Individual Address Setup command.
1887: */
1888: {
1889: volatile struct ie_iasetup_cmd *cmd = ptr;
1890:
1891: scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1892: cmd->com.ie_cmd_status = SWAP(0);
1893: cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST;
1894: cmd->com.ie_cmd_link = SWAP(0xffff);
1895:
1896: (sc->memcopy)(sc->sc_arpcom.ac_enaddr,
1897: (caddr_t)&cmd->ie_address, sizeof cmd->ie_address);
1898:
1899: if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1900: !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1901: printf("%s: individual address setup command failed\n",
1902: sc->sc_dev.dv_xname);
1903: return 0;
1904: }
1905: }
1906:
1907: /*
1908: * Now run the time-domain reflectometer.
1909: */
1910: run_tdr(sc, ptr);
1911:
1912: /*
1913: * Acknowledge any interrupts we have generated thus far.
1914: */
1915: ie_ack(sc, IE_ST_WHENCE);
1916:
1917: /*
1918: * Set up the transmit and recv buffers.
1919: */
1920: setup_bufs(sc);
1921:
1922: sc->sc_arpcom.ac_if.if_flags |= IFF_RUNNING; /* tell higher levels that we are here */
1923:
1924: sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1925: command_and_wait(sc, IE_RU_START, 0, 0);
1926:
1927: ie_ack(sc, IE_ST_WHENCE);
1928:
1929: if (sc->run_586)
1930: (sc->run_586)(sc);
1931:
1932: return 0;
1933: }
1934:
1935: static void
1936: iestop(sc)
1937: struct ie_softc *sc;
1938: {
1939:
1940: command_and_wait(sc, IE_RU_DISABLE, 0, 0);
1941: }
1942:
1943: int
1944: ieioctl(ifp, cmd, data)
1945: struct ifnet *ifp;
1946: u_long cmd;
1947: caddr_t data;
1948: {
1949: struct ie_softc *sc = ifp->if_softc;
1950: struct ifaddr *ifa = (struct ifaddr *)data;
1951: struct ifreq *ifr = (struct ifreq *)data;
1952: int s, error = 0;
1953:
1954: s = splnet();
1955:
1956: switch(cmd) {
1957:
1958: case SIOCSIFADDR:
1959: ifp->if_flags |= IFF_UP;
1960:
1961: switch(ifa->ifa_addr->sa_family) {
1962: #ifdef INET
1963: case AF_INET:
1964: ieinit(sc);
1965: arp_ifinit(&sc->sc_arpcom, ifa);
1966: break;
1967: #endif
1968: default:
1969: ieinit(sc);
1970: break;
1971: }
1972: break;
1973:
1974: case SIOCSIFFLAGS:
1975: sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1976: if ((ifp->if_flags & IFF_UP) == 0 &&
1977: (ifp->if_flags & IFF_RUNNING) != 0) {
1978: /*
1979: * If interface is marked down and it is running, then
1980: * stop it.
1981: */
1982: iestop(sc);
1983: ifp->if_flags &= ~IFF_RUNNING;
1984: } else if ((ifp->if_flags & IFF_UP) != 0 &&
1985: (ifp->if_flags & IFF_RUNNING) == 0) {
1986: /*
1987: * If interface is marked up and it is stopped, then
1988: * start it.
1989: */
1990: ieinit(sc);
1991: } else {
1992: /*
1993: * Reset the interface to pick up changes in any other
1994: * flags that affect hardware registers.
1995: */
1996: iestop(sc);
1997: ieinit(sc);
1998: }
1999: #ifdef IEDEBUG
2000: if (ifp->if_flags & IFF_DEBUG)
2001: sc->sc_debug = IED_ALL;
2002: else
2003: sc->sc_debug = 0;
2004: #endif
2005: break;
2006:
2007: case SIOCADDMULTI:
2008: case SIOCDELMULTI:
2009: error = (cmd == SIOCADDMULTI) ?
2010: ether_addmulti(ifr, &sc->sc_arpcom):
2011: ether_delmulti(ifr, &sc->sc_arpcom);
2012:
2013: if (error == ENETRESET) {
2014: /*
2015: * Multicast list has changed; set the hardware filter
2016: * accordingly.
2017: */
2018: if (ifp->if_flags & IFF_RUNNING)
2019: mc_reset(sc);
2020: error = 0;
2021: }
2022: break;
2023:
2024: default:
2025: error = ENOTTY;
2026: }
2027: splx(s);
2028: return error;
2029: }
2030:
2031: static void
2032: mc_reset(sc)
2033: struct ie_softc *sc;
2034: {
2035: struct ether_multi *enm;
2036: struct ether_multistep step;
2037:
2038: /*
2039: * Step through the list of addresses.
2040: */
2041: sc->mcast_count = 0;
2042: ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
2043: while (enm) {
2044: if (sc->mcast_count >= MAXMCAST ||
2045: bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
2046: sc->sc_arpcom.ac_if.if_flags |= IFF_ALLMULTI;
2047: ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, (void *)0);
2048: goto setflag;
2049: }
2050:
2051: bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6);
2052: sc->mcast_count++;
2053: ETHER_NEXT_MULTI(step, enm);
2054: }
2055: setflag:
2056: sc->want_mcsetup = 1;
2057: }
2058:
2059: #ifdef IEDEBUG
2060: void
2061: print_rbd(rbd)
2062: volatile struct ie_recv_buf_desc *rbd;
2063: {
2064:
2065: printf("RBD at %08lx:\nactual %04x, next %04x, buffer %08x\n"
2066: "length %04x, mbz %04x\n", (u_long)rbd, rbd->ie_rbd_actual,
2067: rbd->ie_rbd_next, rbd->ie_rbd_buffer, rbd->ie_rbd_length,
2068: rbd->mbz);
2069: }
2070: #endif
2071:
2072: void
2073: wzero(vb, l)
2074: void *vb;
2075: size_t l;
2076: {
2077: u_char *b = vb;
2078: u_char *be = b + l;
2079: u_short *sp;
2080:
2081: if (l == 0)
2082: return;
2083:
2084: /* front, */
2085: if ((u_long)b & 1)
2086: *b++ = 0;
2087:
2088: /* back, */
2089: if (b != be && ((u_long)be & 1) != 0) {
2090: be--;
2091: *be = 0;
2092: }
2093:
2094: /* and middle. */
2095: sp = (u_short *)b;
2096: while (sp != (u_short *)be)
2097: *sp++ = 0;
2098: }
2099:
2100: void
2101: wcopy(vb1, vb2, l)
2102: const void *vb1;
2103: void *vb2;
2104: size_t l;
2105: {
2106: const u_char *b1e, *b1 = vb1;
2107: u_char *b2 = vb2;
2108: u_short *sp;
2109: int bstore = 0;
2110:
2111: if (l == 0)
2112: return;
2113:
2114: /* front, */
2115: if ((u_long)b1 & 1) {
2116: *b2++ = *b1++;
2117: l--;
2118: }
2119:
2120: /* middle, */
2121: sp = (u_short *)b1;
2122: b1e = b1 + l;
2123: if (l & 1)
2124: b1e--;
2125: bstore = (u_long)b2 & 1;
2126:
2127: while (sp < (u_short *)b1e) {
2128: if (bstore) {
2129: b2[1] = *sp & 0xff;
2130: b2[0] = *sp >> 8;
2131: } else
2132: *((short *)b2) = *sp;
2133: sp++;
2134: b2 += 2;
2135: }
2136:
2137: /* and back. */
2138: if (l & 1)
2139: *b2 = *b1e;
2140: }
CVSweb