Annotation of sys/net/if_bridge.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_bridge.c,v 1.164 2007/05/28 17:16:39 henning Exp $ */
2:
3: /*
4: * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19: * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25: * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26: * POSSIBILITY OF SUCH DAMAGE.
27: *
28: * Effort sponsored in part by the Defense Advanced Research Projects
29: * Agency (DARPA) and Air Force Research Laboratory, Air Force
30: * Materiel Command, USAF, under agreement number F30602-01-2-0537.
31: *
32: */
33:
34: #include "bpfilter.h"
35: #include "gif.h"
36: #include "pf.h"
37: #include "carp.h"
38:
39: #include <sys/param.h>
40: #include <sys/proc.h>
41: #include <sys/systm.h>
42: #include <sys/mbuf.h>
43: #include <sys/socket.h>
44: #include <sys/ioctl.h>
45: #include <sys/errno.h>
46: #include <sys/kernel.h>
47: #include <machine/cpu.h>
48:
49: #include <net/if.h>
50: #include <net/if_types.h>
51: #include <net/if_llc.h>
52: #include <net/route.h>
53: #include <net/netisr.h>
54:
55: /* for arc4random() */
56: #include <dev/rndvar.h>
57:
58: #ifdef INET
59: #include <netinet/in.h>
60: #include <netinet/in_systm.h>
61: #include <netinet/in_var.h>
62: #include <netinet/ip.h>
63: #include <netinet/ip_var.h>
64: #include <netinet/if_ether.h>
65: #include <netinet/ip_icmp.h>
66: #endif
67:
68: #ifdef IPSEC
69: #include <netinet/ip_ipsp.h>
70:
71: #include <net/if_enc.h>
72: #endif
73:
74: #ifdef INET6
75: #include <netinet/ip6.h>
76: #include <netinet6/ip6_var.h>
77: #endif
78:
79: #if NPF > 0
80: #include <net/pfvar.h>
81: #define BRIDGE_IN PF_IN
82: #define BRIDGE_OUT PF_OUT
83: #else
84: #define BRIDGE_IN 0
85: #define BRIDGE_OUT 1
86: #endif
87:
88: #if NBPFILTER > 0
89: #include <net/bpf.h>
90: #endif
91:
92: #include <net/if_vlan_var.h>
93:
94: #if NCARP > 0
95: #include <netinet/ip_carp.h>
96: #endif
97:
98: #include <net/if_bridge.h>
99:
100: /*
101: * Maximum number of addresses to cache
102: */
103: #ifndef BRIDGE_RTABLE_MAX
104: #define BRIDGE_RTABLE_MAX 100
105: #endif
106:
107: /*
108: * Timeout (in seconds) for entries learned dynamically
109: */
110: #ifndef BRIDGE_RTABLE_TIMEOUT
111: #define BRIDGE_RTABLE_TIMEOUT 240
112: #endif
113:
114: void bridgeattach(int);
115: int bridge_ioctl(struct ifnet *, u_long, caddr_t);
116: void bridge_start(struct ifnet *);
117: void bridgeintr_frame(struct bridge_softc *, struct mbuf *);
118: void bridge_broadcast(struct bridge_softc *, struct ifnet *,
119: struct ether_header *, struct mbuf *);
120: void bridge_span(struct bridge_softc *, struct ether_header *,
121: struct mbuf *);
122: void bridge_stop(struct bridge_softc *);
123: void bridge_init(struct bridge_softc *);
124: int bridge_bifconf(struct bridge_softc *, struct ifbifconf *);
125:
126: void bridge_timer(void *);
127: int bridge_rtfind(struct bridge_softc *, struct ifbaconf *);
128: void bridge_rtage(struct bridge_softc *);
129: void bridge_rttrim(struct bridge_softc *);
130: int bridge_rtdaddr(struct bridge_softc *, struct ether_addr *);
131: int bridge_rtflush(struct bridge_softc *, int);
132: struct ifnet * bridge_rtupdate(struct bridge_softc *,
133: struct ether_addr *, struct ifnet *ifp, int, u_int8_t);
134: struct ifnet * bridge_rtlookup(struct bridge_softc *,
135: struct ether_addr *);
136: u_int32_t bridge_hash(struct bridge_softc *, struct ether_addr *);
137: int bridge_blocknonip(struct ether_header *, struct mbuf *);
138: int bridge_addrule(struct bridge_iflist *,
139: struct ifbrlreq *, int out);
140: int bridge_flushrule(struct bridge_iflist *);
141: int bridge_brlconf(struct bridge_softc *, struct ifbrlconf *);
142: u_int8_t bridge_filterrule(struct brl_head *, struct ether_header *,
143: struct mbuf *);
144: #if NPF > 0
145: struct mbuf *bridge_filter(struct bridge_softc *, int, struct ifnet *,
146: struct ether_header *, struct mbuf *m);
147: #endif
148: int bridge_ifenqueue(struct bridge_softc *, struct ifnet *, struct mbuf *);
149: void bridge_fragment(struct bridge_softc *, struct ifnet *,
150: struct ether_header *, struct mbuf *);
151: #ifdef INET
152: void bridge_send_icmp_err(struct bridge_softc *, struct ifnet *,
153: struct ether_header *, struct mbuf *, int, struct llc *, int, int, int);
154: #endif
155: #ifdef IPSEC
156: int bridge_ipsec(struct bridge_softc *, struct ifnet *,
157: struct ether_header *, int, struct llc *,
158: int, int, int, struct mbuf *);
159: #define ICMP_DEFLEN MHLEN
160: #endif
161: int bridge_clone_create(struct if_clone *, int);
162: int bridge_clone_destroy(struct ifnet *ifp);
163: int bridge_delete(struct bridge_softc *, struct bridge_iflist *);
164:
165: #define ETHERADDR_IS_IP_MCAST(a) \
166: /* struct etheraddr *a; */ \
167: ((a)->ether_addr_octet[0] == 0x01 && \
168: (a)->ether_addr_octet[1] == 0x00 && \
169: (a)->ether_addr_octet[2] == 0x5e)
170:
171: LIST_HEAD(, bridge_softc) bridge_list;
172:
173: struct if_clone bridge_cloner =
174: IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
175:
176: /* ARGSUSED */
177: void
178: bridgeattach(int n)
179: {
180: LIST_INIT(&bridge_list);
181: if_clone_attach(&bridge_cloner);
182: bstp_attach(n);
183: }
184:
185: int
186: bridge_clone_create(struct if_clone *ifc, int unit)
187: {
188: struct bridge_softc *sc;
189: struct ifnet *ifp;
190: int i, s;
191:
192: sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT);
193: if (!sc)
194: return (ENOMEM);
195: bzero(sc, sizeof(*sc));
196:
197: sc->sc_stp = bstp_create(&sc->sc_if);
198: if (!sc->sc_stp) {
199: free(sc, M_DEVBUF);
200: return (ENOMEM);
201: }
202:
203: sc->sc_brtmax = BRIDGE_RTABLE_MAX;
204: sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
205: timeout_set(&sc->sc_brtimeout, bridge_timer, sc);
206: LIST_INIT(&sc->sc_iflist);
207: LIST_INIT(&sc->sc_spanlist);
208: for (i = 0; i < BRIDGE_RTABLE_SIZE; i++)
209: LIST_INIT(&sc->sc_rts[i]);
210: sc->sc_hashkey = arc4random();
211: ifp = &sc->sc_if;
212: snprintf(ifp->if_xname, sizeof ifp->if_xname, "%s%d", ifc->ifc_name,
213: unit);
214: ifp->if_softc = sc;
215: ifp->if_mtu = ETHERMTU;
216: ifp->if_ioctl = bridge_ioctl;
217: ifp->if_output = bridge_output;
218: ifp->if_start = bridge_start;
219: ifp->if_type = IFT_BRIDGE;
220: ifp->if_hdrlen = ETHER_HDR_LEN;
221: IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
222: IFQ_SET_READY(&ifp->if_snd);
223:
224: if_attach(ifp);
225: if_alloc_sadl(ifp);
226:
227: #if NBPFILTER > 0
228: bpfattach(&sc->sc_if.if_bpf, ifp,
229: DLT_EN10MB, ETHER_HDR_LEN);
230: #endif
231:
232: s = splnet();
233: LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
234: splx(s);
235:
236: return (0);
237: }
238:
239: int
240: bridge_clone_destroy(struct ifnet *ifp)
241: {
242: struct bridge_softc *sc = ifp->if_softc;
243: struct bridge_iflist *bif;
244: int s;
245:
246: bridge_stop(sc);
247: bridge_rtflush(sc, IFBF_FLUSHALL);
248: while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
249: bridge_delete(sc, bif);
250: while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) {
251: LIST_REMOVE(bif, next);
252: free(bif, M_DEVBUF);
253: }
254:
255: s = splnet();
256: LIST_REMOVE(sc, sc_list);
257: splx(s);
258:
259: bstp_destroy(sc->sc_stp);
260: if_detach(ifp);
261:
262: free(sc, M_DEVBUF);
263: return (0);
264: }
265:
266: int
267: bridge_delete(struct bridge_softc *sc, struct bridge_iflist *p)
268: {
269: int error;
270:
271: if (p->bif_flags & IFBIF_STP)
272: bstp_delete(p->bif_stp);
273:
274: p->ifp->if_bridge = NULL;
275: error = ifpromisc(p->ifp, 0);
276:
277: LIST_REMOVE(p, next);
278: bridge_rtdelete(sc, p->ifp, 0);
279: bridge_flushrule(p);
280: free(p, M_DEVBUF);
281:
282: return (error);
283: }
284:
285: int
286: bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
287: {
288: struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc;
289: struct ifbreq *req = (struct ifbreq *)data;
290: struct ifbareq *bareq = (struct ifbareq *)data;
291: struct ifbrparam *bparam = (struct ifbrparam *)data;
292: struct ifbrlreq *brlreq = (struct ifbrlreq *)data;
293: struct ifbropreq *brop = (struct ifbropreq *)data;
294: struct ifnet *ifs;
295: struct bridge_iflist *p;
296: struct bstp_port *bp;
297: struct bstp_state *bs = sc->sc_stp;
298: int error = 0, s;
299:
300: s = splnet();
301: switch (cmd) {
302: case SIOCBRDGADD:
303: if ((error = suser(curproc, 0)) != 0)
304: break;
305:
306: ifs = ifunit(req->ifbr_ifsname);
307: if (ifs == NULL) { /* no such interface */
308: error = ENOENT;
309: break;
310: }
311: if (ifs->if_bridge == (caddr_t)sc) {
312: error = EEXIST;
313: break;
314: }
315: if (ifs->if_bridge != NULL) {
316: error = EBUSY;
317: break;
318: }
319:
320: /* If it's in the span list, it can't be a member. */
321: LIST_FOREACH(p, &sc->sc_spanlist, next)
322: if (p->ifp == ifs)
323: break;
324:
325: if (p != LIST_END(&sc->sc_spanlist)) {
326: error = EBUSY;
327: break;
328: }
329:
330: if (ifs->if_type == IFT_ETHER) {
331: if ((ifs->if_flags & IFF_UP) == 0) {
332: struct ifreq ifreq;
333:
334: /*
335: * Bring interface up long enough to set
336: * promiscuous flag, then shut it down again.
337: */
338: strlcpy(ifreq.ifr_name, req->ifbr_ifsname,
339: IFNAMSIZ);
340: ifs->if_flags |= IFF_UP;
341: ifreq.ifr_flags = ifs->if_flags;
342: error = (*ifs->if_ioctl)(ifs, SIOCSIFFLAGS,
343: (caddr_t)&ifreq);
344: if (error != 0)
345: break;
346:
347: error = ifpromisc(ifs, 1);
348: if (error != 0)
349: break;
350:
351: strlcpy(ifreq.ifr_name, req->ifbr_ifsname,
352: IFNAMSIZ);
353: ifs->if_flags &= ~IFF_UP;
354: ifreq.ifr_flags = ifs->if_flags;
355: error = (*ifs->if_ioctl)(ifs, SIOCSIFFLAGS,
356: (caddr_t)&ifreq);
357: if (error != 0) {
358: ifpromisc(ifs, 0);
359: break;
360: }
361: } else {
362: error = ifpromisc(ifs, 1);
363: if (error != 0)
364: break;
365: }
366: }
367: #if NGIF > 0
368: else if (ifs->if_type == IFT_GIF) {
369: /* Nothing needed */
370: }
371: #endif /* NGIF */
372: else {
373: error = EINVAL;
374: break;
375: }
376:
377: p = (struct bridge_iflist *)malloc(
378: sizeof(struct bridge_iflist), M_DEVBUF, M_NOWAIT);
379: if (p == NULL) {
380: if (ifs->if_type == IFT_ETHER)
381: ifpromisc(ifs, 0);
382: error = ENOMEM;
383: break;
384: }
385: bzero(p, sizeof(struct bridge_iflist));
386:
387: p->ifp = ifs;
388: p->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
389: SIMPLEQ_INIT(&p->bif_brlin);
390: SIMPLEQ_INIT(&p->bif_brlout);
391: ifs->if_bridge = (caddr_t)sc;
392: LIST_INSERT_HEAD(&sc->sc_iflist, p, next);
393: break;
394: case SIOCBRDGDEL:
395: if ((error = suser(curproc, 0)) != 0)
396: break;
397:
398: LIST_FOREACH(p, &sc->sc_iflist, next) {
399: if (strncmp(p->ifp->if_xname, req->ifbr_ifsname,
400: sizeof(p->ifp->if_xname)) == 0) {
401: error = bridge_delete(sc, p);
402: p = NULL;
403: break;
404: }
405: }
406: if (p != NULL && p == LIST_END(&sc->sc_iflist)) {
407: error = ENOENT;
408: break;
409: }
410: break;
411: case SIOCBRDGIFS:
412: error = bridge_bifconf(sc, (struct ifbifconf *)data);
413: break;
414: case SIOCBRDGADDS:
415: if ((error = suser(curproc, 0)) != 0)
416: break;
417: ifs = ifunit(req->ifbr_ifsname);
418: if (ifs == NULL) { /* no such interface */
419: error = ENOENT;
420: break;
421: }
422: if (ifs->if_bridge == (caddr_t)sc) {
423: error = EEXIST;
424: break;
425: }
426: if (ifs->if_bridge != NULL) {
427: error = EBUSY;
428: break;
429: }
430: LIST_FOREACH(p, &sc->sc_spanlist, next) {
431: if (p->ifp == ifs)
432: break;
433: }
434: if (p != LIST_END(&sc->sc_spanlist)) {
435: error = EBUSY;
436: break;
437: }
438: p = (struct bridge_iflist *)malloc(
439: sizeof(struct bridge_iflist), M_DEVBUF, M_NOWAIT);
440: if (p == NULL) {
441: error = ENOMEM;
442: break;
443: }
444: bzero(p, sizeof(struct bridge_iflist));
445: p->ifp = ifs;
446: p->bif_flags = IFBIF_SPAN;
447: SIMPLEQ_INIT(&p->bif_brlin);
448: SIMPLEQ_INIT(&p->bif_brlout);
449: LIST_INSERT_HEAD(&sc->sc_spanlist, p, next);
450: break;
451: case SIOCBRDGDELS:
452: if ((error = suser(curproc, 0)) != 0)
453: break;
454: LIST_FOREACH(p, &sc->sc_spanlist, next) {
455: if (strncmp(p->ifp->if_xname, req->ifbr_ifsname,
456: sizeof(p->ifp->if_xname)) == 0) {
457: LIST_REMOVE(p, next);
458: free(p, M_DEVBUF);
459: break;
460: }
461: }
462: if (p == LIST_END(&sc->sc_spanlist)) {
463: error = ENOENT;
464: break;
465: }
466: break;
467: case SIOCBRDGGIFFLGS:
468: ifs = ifunit(req->ifbr_ifsname);
469: if (ifs == NULL) {
470: error = ENOENT;
471: break;
472: }
473: if ((caddr_t)sc != ifs->if_bridge) {
474: error = ESRCH;
475: break;
476: }
477: LIST_FOREACH(p, &sc->sc_iflist, next) {
478: if (p->ifp == ifs)
479: break;
480: }
481: if (p == LIST_END(&sc->sc_iflist)) {
482: error = ESRCH;
483: break;
484: }
485: req->ifbr_ifsflags = p->bif_flags;
486: req->ifbr_portno = p->ifp->if_index & 0xfff;
487: if (p->bif_flags & IFBIF_STP) {
488: bp = p->bif_stp;
489: req->ifbr_state = bstp_getstate(bs, bp);
490: req->ifbr_priority = bp->bp_priority;
491: req->ifbr_path_cost = bp->bp_path_cost;
492: req->ifbr_proto = bp->bp_protover;
493: req->ifbr_role = bp->bp_role;
494: req->ifbr_stpflags = bp->bp_flags;
495: req->ifbr_fwd_trans = bp->bp_forward_transitions;
496: req->ifbr_desg_bridge = bp->bp_desg_pv.pv_dbridge_id;
497: req->ifbr_desg_port = bp->bp_desg_pv.pv_dport_id;
498: req->ifbr_root_bridge = bp->bp_desg_pv.pv_root_id;
499: req->ifbr_root_cost = bp->bp_desg_pv.pv_cost;
500: req->ifbr_root_port = bp->bp_desg_pv.pv_port_id;
501:
502: /* Copy STP state options as flags */
503: if (bp->bp_operedge)
504: req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
505: if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
506: req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
507: if (bp->bp_ptp_link)
508: req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
509: if (bp->bp_flags & BSTP_PORT_AUTOPTP)
510: req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
511: }
512: break;
513: case SIOCBRDGSIFFLGS:
514: if ((error = suser(curproc, 0)) != 0)
515: break;
516: ifs = ifunit(req->ifbr_ifsname);
517: if (ifs == NULL) {
518: error = ENOENT;
519: break;
520: }
521: if ((caddr_t)sc != ifs->if_bridge) {
522: error = ESRCH;
523: break;
524: }
525: LIST_FOREACH(p, &sc->sc_iflist, next) {
526: if (p->ifp == ifs)
527: break;
528: }
529: if (p == LIST_END(&sc->sc_iflist)) {
530: error = ESRCH;
531: break;
532: }
533: if (req->ifbr_ifsflags & IFBIF_RO_MASK) {
534: error = EINVAL;
535: break;
536: }
537: if (req->ifbr_ifsflags & IFBIF_STP) {
538: if ((p->bif_flags & IFBIF_STP) == 0) {
539: /* Enable STP */
540: if ((p->bif_stp = bstp_add(sc->sc_stp,
541: p->ifp)) == NULL) {
542: error = ENOMEM;
543: break;
544: }
545: } else {
546: /* Update STP flags */
547: bstp_ifsflags(p->bif_stp, req->ifbr_ifsflags);
548: }
549: } else if (p->bif_flags & IFBIF_STP) {
550: bstp_delete(p->bif_stp);
551: p->bif_stp = NULL;
552: }
553: p->bif_flags = req->ifbr_ifsflags;
554: break;
555: case SIOCBRDGRTS:
556: error = bridge_rtfind(sc, (struct ifbaconf *)data);
557: break;
558: case SIOCBRDGFLUSH:
559: if ((error = suser(curproc, 0)) != 0)
560: break;
561:
562: error = bridge_rtflush(sc, req->ifbr_ifsflags);
563: break;
564: case SIOCBRDGSADDR:
565: if ((error = suser(curproc, 0)) != 0)
566: break;
567:
568: ifs = ifunit(bareq->ifba_ifsname);
569: if (ifs == NULL) { /* no such interface */
570: error = ENOENT;
571: break;
572: }
573:
574: if (ifs->if_bridge == NULL ||
575: ifs->if_bridge != (caddr_t)sc) {
576: error = ESRCH;
577: break;
578: }
579:
580: ifs = bridge_rtupdate(sc, &bareq->ifba_dst, ifs, 1,
581: bareq->ifba_flags);
582: if (ifs == NULL)
583: error = ENOMEM;
584: break;
585: case SIOCBRDGDADDR:
586: if ((error = suser(curproc, 0)) != 0)
587: break;
588: error = bridge_rtdaddr(sc, &bareq->ifba_dst);
589: break;
590: case SIOCBRDGGCACHE:
591: bparam->ifbrp_csize = sc->sc_brtmax;
592: break;
593: case SIOCBRDGSCACHE:
594: if ((error = suser(curproc, 0)) != 0)
595: break;
596: sc->sc_brtmax = bparam->ifbrp_csize;
597: bridge_rttrim(sc);
598: break;
599: case SIOCBRDGSTO:
600: if ((error = suser(curproc, 0)) != 0)
601: break;
602: if (bparam->ifbrp_ctime < 0 ||
603: bparam->ifbrp_ctime > INT_MAX / hz) {
604: error = EINVAL;
605: break;
606: }
607: sc->sc_brttimeout = bparam->ifbrp_ctime;
608: timeout_del(&sc->sc_brtimeout);
609: if (bparam->ifbrp_ctime != 0)
610: timeout_add(&sc->sc_brtimeout, sc->sc_brttimeout * hz);
611: break;
612: case SIOCBRDGGTO:
613: bparam->ifbrp_ctime = sc->sc_brttimeout;
614: break;
615: case SIOCSIFFLAGS:
616: if ((ifp->if_flags & IFF_UP) == IFF_UP)
617: bridge_init(sc);
618:
619: if ((ifp->if_flags & IFF_UP) == 0)
620: bridge_stop(sc);
621:
622: break;
623: case SIOCBRDGARL:
624: if ((error = suser(curproc, 0)) != 0)
625: break;
626: ifs = ifunit(brlreq->ifbr_ifsname);
627: if (ifs == NULL) {
628: error = ENOENT;
629: break;
630: }
631: if (ifs->if_bridge == NULL ||
632: ifs->if_bridge != (caddr_t)sc) {
633: error = ESRCH;
634: break;
635: }
636: LIST_FOREACH(p, &sc->sc_iflist, next) {
637: if (p->ifp == ifs)
638: break;
639: }
640: if (p == LIST_END(&sc->sc_iflist)) {
641: error = ESRCH;
642: break;
643: }
644: if ((brlreq->ifbr_action != BRL_ACTION_BLOCK &&
645: brlreq->ifbr_action != BRL_ACTION_PASS) ||
646: (brlreq->ifbr_flags & (BRL_FLAG_IN|BRL_FLAG_OUT)) == 0) {
647: error = EINVAL;
648: break;
649: }
650: if (brlreq->ifbr_flags & BRL_FLAG_IN) {
651: error = bridge_addrule(p, brlreq, 0);
652: if (error)
653: break;
654: }
655: if (brlreq->ifbr_flags & BRL_FLAG_OUT) {
656: error = bridge_addrule(p, brlreq, 1);
657: if (error)
658: break;
659: }
660: break;
661: case SIOCBRDGFRL:
662: if ((error = suser(curproc, 0)) != 0)
663: break;
664: ifs = ifunit(brlreq->ifbr_ifsname);
665: if (ifs == NULL) {
666: error = ENOENT;
667: break;
668: }
669: if (ifs->if_bridge == NULL ||
670: ifs->if_bridge != (caddr_t)sc) {
671: error = ESRCH;
672: break;
673: }
674: LIST_FOREACH(p, &sc->sc_iflist, next) {
675: if (p->ifp == ifs)
676: break;
677: }
678: if (p == LIST_END(&sc->sc_iflist)) {
679: error = ESRCH;
680: break;
681: }
682: error = bridge_flushrule(p);
683: break;
684: case SIOCBRDGGRL:
685: error = bridge_brlconf(sc, (struct ifbrlconf *)data);
686: break;
687: case SIOCBRDGGPARAM:
688: if ((bp = bs->bs_root_port) == NULL)
689: brop->ifbop_root_port = 0;
690: else
691: brop->ifbop_root_port = bp->bp_ifp->if_index;
692: brop->ifbop_maxage = bs->bs_bridge_max_age >> 8;
693: brop->ifbop_hellotime = bs->bs_bridge_htime >> 8;
694: brop->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
695: brop->ifbop_holdcount = bs->bs_txholdcount;
696: brop->ifbop_priority = bs->bs_bridge_priority;
697: brop->ifbop_protocol = bs->bs_protover;
698: brop->ifbop_root_bridge = bs->bs_root_pv.pv_root_id;
699: brop->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
700: brop->ifbop_root_port = bs->bs_root_pv.pv_port_id;
701: brop->ifbop_desg_bridge = bs->bs_root_pv.pv_dbridge_id;
702: brop->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
703: brop->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
704: break;
705: case SIOCBRDGGPRI:
706: case SIOCBRDGGMA:
707: case SIOCBRDGGHT:
708: case SIOCBRDGGFD:
709: break;
710: case SIOCBRDGSPRI:
711: case SIOCBRDGSFD:
712: case SIOCBRDGSMA:
713: case SIOCBRDGSHT:
714: case SIOCBRDGSTXHC:
715: case SIOCBRDGSPROTO:
716: case SIOCBRDGSIFPRIO:
717: case SIOCBRDGSIFCOST:
718: error = suser(curproc, 0);
719: break;
720: default:
721: error = EINVAL;
722: break;
723: }
724:
725: if (!error)
726: error = bstp_ioctl(ifp, cmd, data);
727:
728: splx(s);
729: return (error);
730: }
731:
732: /* Detach an interface from a bridge. */
733: void
734: bridge_ifdetach(struct ifnet *ifp)
735: {
736: struct bridge_softc *sc = (struct bridge_softc *)ifp->if_bridge;
737: struct bridge_iflist *bif;
738:
739: LIST_FOREACH(bif, &sc->sc_iflist, next)
740: if (bif->ifp == ifp) {
741: LIST_REMOVE(bif, next);
742: bridge_rtdelete(sc, ifp, 0);
743: bridge_flushrule(bif);
744: free(bif, M_DEVBUF);
745: ifp->if_bridge = NULL;
746: break;
747: }
748: }
749:
750: void
751: bridge_update(struct ifnet *ifp, struct ether_addr *ea, int delete)
752: {
753: struct bridge_softc *sc = (struct bridge_softc *)ifp->if_bridge;
754: struct bridge_iflist *bif;
755: u_int8_t *addr;
756:
757: addr = (u_int8_t *)ea;
758:
759: LIST_FOREACH(bif, &sc->sc_iflist, next)
760: if (bif->ifp == ifp) {
761: /*
762: * Update the bridge interface if it is in
763: * the learning state.
764: */
765: if ((bif->bif_flags & IFBIF_LEARNING) &&
766: (ETHER_IS_MULTICAST(addr) == 0) &&
767: !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0 &&
768: addr[3] == 0 && addr[4] == 0 && addr[5] == 0)) {
769: /* Care must be taken with spanning tree */
770: if ((bif->bif_flags & IFBIF_STP) &&
771: (bif->bif_state == BSTP_IFSTATE_DISCARDING))
772: return;
773:
774: /* Delete the address from the bridge */
775: bridge_rtdaddr(sc, ea);
776:
777: if (!delete) {
778: /* Update the bridge table */
779: bridge_rtupdate(sc, ea, ifp, 0,
780: IFBAF_DYNAMIC);
781: }
782: }
783: return;
784: }
785: }
786:
787: int
788: bridge_bifconf(struct bridge_softc *sc, struct ifbifconf *bifc)
789: {
790: struct bridge_iflist *p;
791: struct bstp_port *bp;
792: struct bstp_state *bs = sc->sc_stp;
793: u_int32_t total = 0, i = 0;
794: int error = 0;
795: struct ifbreq *breq = NULL;
796:
797: LIST_FOREACH(p, &sc->sc_iflist, next)
798: total++;
799:
800: LIST_FOREACH(p, &sc->sc_spanlist, next)
801: total++;
802:
803: if (bifc->ifbic_len == 0) {
804: i = total;
805: goto done;
806: }
807:
808: if ((breq = (struct ifbreq *)
809: malloc(sizeof(*breq), M_DEVBUF, M_NOWAIT)) == NULL)
810: goto done;
811:
812: LIST_FOREACH(p, &sc->sc_iflist, next) {
813: bzero(breq, sizeof(*breq));
814: if (bifc->ifbic_len < sizeof(*breq))
815: break;
816: strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ);
817: strlcpy(breq->ifbr_ifsname, p->ifp->if_xname, IFNAMSIZ);
818: breq->ifbr_ifsflags = p->bif_flags;
819: breq->ifbr_portno = p->ifp->if_index & 0xfff;
820: if (p->bif_flags & IFBIF_STP) {
821: bp = p->bif_stp;
822: breq->ifbr_state = bstp_getstate(sc->sc_stp, bp);
823: breq->ifbr_priority = bp->bp_priority;
824: breq->ifbr_path_cost = bp->bp_path_cost;
825: breq->ifbr_proto = bp->bp_protover;
826: breq->ifbr_role = bp->bp_role;
827: breq->ifbr_stpflags = bp->bp_flags;
828: breq->ifbr_fwd_trans = bp->bp_forward_transitions;
829: breq->ifbr_root_bridge = bs->bs_root_pv.pv_root_id;
830: breq->ifbr_root_cost = bs->bs_root_pv.pv_cost;
831: breq->ifbr_root_port = bs->bs_root_pv.pv_port_id;
832: breq->ifbr_desg_bridge = bs->bs_root_pv.pv_dbridge_id;
833: breq->ifbr_desg_port = bs->bs_root_pv.pv_dport_id;
834:
835: /* Copy STP state options as flags */
836: if (bp->bp_operedge)
837: breq->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
838: if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
839: breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
840: if (bp->bp_ptp_link)
841: breq->ifbr_ifsflags |= IFBIF_BSTP_PTP;
842: if (bp->bp_flags & BSTP_PORT_AUTOPTP)
843: breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
844: }
845: error = copyout((caddr_t)breq,
846: (caddr_t)(bifc->ifbic_req + i), sizeof(*breq));
847: if (error)
848: goto done;
849: i++;
850: bifc->ifbic_len -= sizeof(*breq);
851: }
852: LIST_FOREACH(p, &sc->sc_spanlist, next) {
853: bzero(breq, sizeof(*breq));
854: if (bifc->ifbic_len < sizeof(*breq))
855: break;
856: strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ);
857: strlcpy(breq->ifbr_ifsname, p->ifp->if_xname, IFNAMSIZ);
858: breq->ifbr_ifsflags = p->bif_flags | IFBIF_SPAN;
859: breq->ifbr_portno = p->ifp->if_index & 0xfff;
860: error = copyout((caddr_t)breq,
861: (caddr_t)(bifc->ifbic_req + i), sizeof(*breq));
862: if (error)
863: goto done;
864: i++;
865: bifc->ifbic_len -= sizeof(*breq);
866: }
867:
868: done:
869: if (breq != NULL)
870: free(breq, M_DEVBUF);
871: bifc->ifbic_len = i * sizeof(*breq);
872: return (error);
873: }
874:
875: int
876: bridge_brlconf(struct bridge_softc *sc, struct ifbrlconf *bc)
877: {
878: struct ifnet *ifp;
879: struct bridge_iflist *ifl;
880: struct brl_node *n;
881: struct ifbrlreq req;
882: int error = 0;
883: u_int32_t i = 0, total = 0;
884:
885: ifp = ifunit(bc->ifbrl_ifsname);
886: if (ifp == NULL)
887: return (ENOENT);
888: if (ifp->if_bridge == NULL || ifp->if_bridge != (caddr_t)sc)
889: return (ESRCH);
890: LIST_FOREACH(ifl, &sc->sc_iflist, next) {
891: if (ifl->ifp == ifp)
892: break;
893: }
894: if (ifl == LIST_END(&sc->sc_iflist))
895: return (ESRCH);
896:
897: SIMPLEQ_FOREACH(n, &ifl->bif_brlin, brl_next) {
898: total++;
899: }
900: SIMPLEQ_FOREACH(n, &ifl->bif_brlout, brl_next) {
901: total++;
902: }
903:
904: if (bc->ifbrl_len == 0) {
905: i = total;
906: goto done;
907: }
908:
909: SIMPLEQ_FOREACH(n, &ifl->bif_brlin, brl_next) {
910: bzero(&req, sizeof req);
911: if (bc->ifbrl_len < sizeof(req))
912: goto done;
913: strlcpy(req.ifbr_name, sc->sc_if.if_xname, IFNAMSIZ);
914: strlcpy(req.ifbr_ifsname, ifl->ifp->if_xname, IFNAMSIZ);
915: req.ifbr_action = n->brl_action;
916: req.ifbr_flags = n->brl_flags;
917: req.ifbr_src = n->brl_src;
918: req.ifbr_dst = n->brl_dst;
919: #if NPF > 0
920: req.ifbr_tagname[0] = '\0';
921: if (n->brl_tag)
922: pf_tag2tagname(n->brl_tag, req.ifbr_tagname);
923: #endif
924: error = copyout((caddr_t)&req,
925: (caddr_t)(bc->ifbrl_buf + (i * sizeof(req))), sizeof(req));
926: if (error)
927: goto done;
928: i++;
929: bc->ifbrl_len -= sizeof(req);
930: }
931:
932: SIMPLEQ_FOREACH(n, &ifl->bif_brlout, brl_next) {
933: bzero(&req, sizeof req);
934: if (bc->ifbrl_len < sizeof(req))
935: goto done;
936: strlcpy(req.ifbr_name, sc->sc_if.if_xname, IFNAMSIZ);
937: strlcpy(req.ifbr_ifsname, ifl->ifp->if_xname, IFNAMSIZ);
938: req.ifbr_action = n->brl_action;
939: req.ifbr_flags = n->brl_flags;
940: req.ifbr_src = n->brl_src;
941: req.ifbr_dst = n->brl_dst;
942: #if NPF > 0
943: req.ifbr_tagname[0] = '\0';
944: if (n->brl_tag)
945: pf_tag2tagname(n->brl_tag, req.ifbr_tagname);
946: #endif
947: error = copyout((caddr_t)&req,
948: (caddr_t)(bc->ifbrl_buf + (i * sizeof(req))), sizeof(req));
949: if (error)
950: goto done;
951: i++;
952: bc->ifbrl_len -= sizeof(req);
953: }
954:
955: done:
956: bc->ifbrl_len = i * sizeof(req);
957: return (error);
958: }
959:
960: void
961: bridge_init(struct bridge_softc *sc)
962: {
963: struct ifnet *ifp = &sc->sc_if;
964:
965: if ((ifp->if_flags & IFF_RUNNING) == IFF_RUNNING)
966: return;
967:
968: ifp->if_flags |= IFF_RUNNING;
969: bstp_initialization(sc->sc_stp);
970:
971: if (sc->sc_brttimeout != 0)
972: timeout_add(&sc->sc_brtimeout, sc->sc_brttimeout * hz);
973: }
974:
975: /*
976: * Stop the bridge and deallocate the routing table.
977: */
978: void
979: bridge_stop(struct bridge_softc *sc)
980: {
981: struct ifnet *ifp = &sc->sc_if;
982:
983: /*
984: * If we're not running, there's nothing to do.
985: */
986: if ((ifp->if_flags & IFF_RUNNING) == 0)
987: return;
988:
989: timeout_del(&sc->sc_brtimeout);
990:
991: bridge_rtflush(sc, IFBF_FLUSHDYN);
992:
993: ifp->if_flags &= ~IFF_RUNNING;
994: }
995:
996: /*
997: * Send output from the bridge. The mbuf has the ethernet header
998: * already attached. We must enqueue or free the mbuf before exiting.
999: */
1000: int
1001: bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
1002: struct rtentry *rt)
1003: {
1004: struct ether_header *eh;
1005: struct ifnet *dst_if;
1006: struct ether_addr *src, *dst;
1007: struct bridge_softc *sc;
1008: int s, error, len;
1009: #ifdef IPSEC
1010: struct m_tag *mtag;
1011: #endif /* IPSEC */
1012:
1013: /* ifp must be a member interface of the bridge. */
1014: sc = (struct bridge_softc *)ifp->if_bridge;
1015: if (sc == NULL) {
1016: m_freem(m);
1017: return (EINVAL);
1018: }
1019:
1020: if (m->m_len < sizeof(*eh)) {
1021: m = m_pullup(m, sizeof(*eh));
1022: if (m == NULL)
1023: return (ENOBUFS);
1024: }
1025: eh = mtod(m, struct ether_header *);
1026: dst = (struct ether_addr *)&eh->ether_dhost[0];
1027: src = (struct ether_addr *)&eh->ether_shost[0];
1028:
1029: s = splnet();
1030:
1031: /*
1032: * If bridge is down, but original output interface is up,
1033: * go ahead and send out that interface. Otherwise the packet
1034: * is dropped below.
1035: */
1036: if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1037: dst_if = ifp;
1038: goto sendunicast;
1039: }
1040:
1041: /*
1042: * If the packet is a broadcast or we don't know a better way to
1043: * get there, send to all interfaces.
1044: */
1045: dst_if = bridge_rtlookup(sc, dst);
1046: if (dst_if == NULL || ETHER_IS_MULTICAST(eh->ether_dhost)) {
1047: struct bridge_iflist *p;
1048: struct mbuf *mc;
1049: int used = 0;
1050:
1051: #ifdef IPSEC
1052: /*
1053: * Don't send out the packet if IPsec is needed, and
1054: * notify IPsec to do its own crypto for now.
1055: */
1056: if ((mtag = m_tag_find(m, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED,
1057: NULL)) != NULL) {
1058: ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
1059: m_freem(m);
1060: splx(s);
1061: return (0);
1062: }
1063: #endif /* IPSEC */
1064:
1065: /* Catch packets that need TCP/UDP/IP hardware checksumming */
1066: if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT ||
1067: m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT ||
1068: m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
1069: m_freem(m);
1070: splx(s);
1071: return (0);
1072: }
1073:
1074: bridge_span(sc, NULL, m);
1075:
1076: LIST_FOREACH(p, &sc->sc_iflist, next) {
1077: dst_if = p->ifp;
1078: if ((dst_if->if_flags & IFF_RUNNING) == 0)
1079: continue;
1080:
1081: /*
1082: * If this is not the original output interface,
1083: * and the interface is participating in spanning
1084: * tree, make sure the port is in a state that
1085: * allows forwarding.
1086: */
1087: if (dst_if != ifp &&
1088: (p->bif_flags & IFBIF_STP) &&
1089: (p->bif_state == BSTP_IFSTATE_DISCARDING))
1090: continue;
1091:
1092: if ((p->bif_flags & IFBIF_DISCOVER) == 0 &&
1093: (m->m_flags & (M_BCAST | M_MCAST)) == 0)
1094: continue;
1095:
1096: #ifdef ALTQ
1097: if (ALTQ_IS_ENABLED(&dst_if->if_snd) == 0)
1098: #endif
1099: if (IF_QFULL(&dst_if->if_snd)) {
1100: IF_DROP(&dst_if->if_snd);
1101: sc->sc_if.if_oerrors++;
1102: continue;
1103: }
1104: if (LIST_NEXT(p, next) == LIST_END(&sc->sc_iflist)) {
1105: used = 1;
1106: mc = m;
1107: } else {
1108: struct mbuf *m1, *m2, *mx;
1109:
1110: m1 = m_copym2(m, 0, ETHER_HDR_LEN,
1111: M_DONTWAIT);
1112: if (m1 == NULL) {
1113: sc->sc_if.if_oerrors++;
1114: continue;
1115: }
1116: m2 = m_copym2(m, ETHER_HDR_LEN,
1117: M_COPYALL, M_DONTWAIT);
1118: if (m2 == NULL) {
1119: m_freem(m1);
1120: sc->sc_if.if_oerrors++;
1121: continue;
1122: }
1123:
1124: for (mx = m1; mx->m_next != NULL; mx = mx->m_next)
1125: /*EMPTY*/;
1126: mx->m_next = m2;
1127:
1128: if (m1->m_flags & M_PKTHDR) {
1129: len = 0;
1130: for (mx = m1; mx != NULL; mx = mx->m_next)
1131: len += mx->m_len;
1132: m1->m_pkthdr.len = len;
1133: }
1134: mc = m1;
1135: }
1136:
1137: error = bridge_ifenqueue(sc, dst_if, mc);
1138: if (error)
1139: continue;
1140: }
1141: if (!used)
1142: m_freem(m);
1143: splx(s);
1144: return (0);
1145: }
1146:
1147: sendunicast:
1148: bridge_span(sc, NULL, m);
1149: if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1150: m_freem(m);
1151: splx(s);
1152: return (ENETDOWN);
1153: }
1154: bridge_ifenqueue(sc, dst_if, m);
1155: splx(s);
1156: return (0);
1157: }
1158:
1159: /*
1160: * Start output on the bridge. This function should never be called.
1161: */
1162: void
1163: bridge_start(struct ifnet *ifp)
1164: {
1165: }
1166:
1167: /*
1168: * Loop through each bridge interface and process their input queues.
1169: */
1170: void
1171: bridgeintr(void)
1172: {
1173: struct bridge_softc *sc;
1174: struct mbuf *m;
1175: int s;
1176:
1177: LIST_FOREACH(sc, &bridge_list, sc_list) {
1178: while (sc->sc_if.if_snd.ifq_head) {
1179: s = splnet();
1180: IF_DEQUEUE(&sc->sc_if.if_snd, m);
1181: splx(s);
1182: if (m == NULL)
1183: break;
1184: bridgeintr_frame(sc, m);
1185: }
1186: }
1187: }
1188:
1189: /*
1190: * Process a single frame. Frame must be freed or queued before returning.
1191: */
1192: void
1193: bridgeintr_frame(struct bridge_softc *sc, struct mbuf *m)
1194: {
1195: int s, len;
1196: struct ifnet *src_if, *dst_if;
1197: struct bridge_iflist *ifl;
1198: struct ether_addr *dst, *src;
1199: struct ether_header eh;
1200:
1201: if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1202: m_freem(m);
1203: return;
1204: }
1205:
1206: src_if = m->m_pkthdr.rcvif;
1207:
1208: #if NBPFILTER > 0
1209: if (sc->sc_if.if_bpf)
1210: bpf_mtap(sc->sc_if.if_bpf, m, BPF_DIRECTION_IN);
1211: #endif
1212:
1213: sc->sc_if.if_ipackets++;
1214: sc->sc_if.if_ibytes += m->m_pkthdr.len;
1215:
1216: LIST_FOREACH(ifl, &sc->sc_iflist, next)
1217: if (ifl->ifp == src_if)
1218: break;
1219:
1220: if (ifl == LIST_END(&sc->sc_iflist)) {
1221: m_freem(m);
1222: return;
1223: }
1224:
1225: if ((ifl->bif_flags & IFBIF_STP) &&
1226: (ifl->bif_state == BSTP_IFSTATE_DISCARDING)) {
1227: m_freem(m);
1228: return;
1229: }
1230:
1231: if (m->m_pkthdr.len < sizeof(eh)) {
1232: m_freem(m);
1233: return;
1234: }
1235: m_copydata(m, 0, ETHER_HDR_LEN, (caddr_t)&eh);
1236: dst = (struct ether_addr *)&eh.ether_dhost[0];
1237: src = (struct ether_addr *)&eh.ether_shost[0];
1238:
1239: /*
1240: * If interface is learning, and if source address
1241: * is not broadcast or multicast, record its address.
1242: */
1243: if ((ifl->bif_flags & IFBIF_LEARNING) &&
1244: (eh.ether_shost[0] & 1) == 0 &&
1245: !(eh.ether_shost[0] == 0 && eh.ether_shost[1] == 0 &&
1246: eh.ether_shost[2] == 0 && eh.ether_shost[3] == 0 &&
1247: eh.ether_shost[4] == 0 && eh.ether_shost[5] == 0))
1248: bridge_rtupdate(sc, src, src_if, 0, IFBAF_DYNAMIC);
1249:
1250: if ((ifl->bif_flags & IFBIF_STP) &&
1251: (ifl->bif_state == BSTP_IFSTATE_LEARNING)) {
1252: m_freem(m);
1253: return;
1254: }
1255:
1256: /*
1257: * At this point, the port either doesn't participate in stp or
1258: * it's in the forwarding state
1259: */
1260:
1261: /*
1262: * If packet is unicast, destined for someone on "this"
1263: * side of the bridge, drop it.
1264: */
1265: if ((m->m_flags & (M_BCAST | M_MCAST)) == 0) {
1266: dst_if = bridge_rtlookup(sc, dst);
1267: if (dst_if == src_if) {
1268: m_freem(m);
1269: return;
1270: }
1271: } else
1272: dst_if = NULL;
1273:
1274: /*
1275: * Multicast packets get handled a little differently:
1276: * If interface is:
1277: * -link0,-link1 (default) Forward all multicast
1278: * as broadcast.
1279: * -link0,link1 Drop non-IP multicast, forward
1280: * as broadcast IP multicast.
1281: * link0,-link1 Drop IP multicast, forward as
1282: * broadcast non-IP multicast.
1283: * link0,link1 Drop all multicast.
1284: */
1285: if (m->m_flags & M_MCAST) {
1286: if ((sc->sc_if.if_flags &
1287: (IFF_LINK0 | IFF_LINK1)) ==
1288: (IFF_LINK0 | IFF_LINK1)) {
1289: m_freem(m);
1290: return;
1291: }
1292: if (sc->sc_if.if_flags & IFF_LINK0 &&
1293: ETHERADDR_IS_IP_MCAST(dst)) {
1294: m_freem(m);
1295: return;
1296: }
1297: if (sc->sc_if.if_flags & IFF_LINK1 &&
1298: !ETHERADDR_IS_IP_MCAST(dst)) {
1299: m_freem(m);
1300: return;
1301: }
1302: }
1303:
1304: if (ifl->bif_flags & IFBIF_BLOCKNONIP && bridge_blocknonip(&eh, m)) {
1305: m_freem(m);
1306: return;
1307: }
1308:
1309: if (bridge_filterrule(&ifl->bif_brlin, &eh, m) == BRL_ACTION_BLOCK) {
1310: m_freem(m);
1311: return;
1312: }
1313: #if NPF > 0
1314: m = bridge_filter(sc, BRIDGE_IN, src_if, &eh, m);
1315: if (m == NULL)
1316: return;
1317: #endif
1318: /*
1319: * If the packet is a multicast or broadcast OR if we don't
1320: * know any better, forward it to all interfaces.
1321: */
1322: if ((m->m_flags & (M_BCAST | M_MCAST)) || dst_if == NULL) {
1323: sc->sc_if.if_imcasts++;
1324: s = splnet();
1325: bridge_broadcast(sc, src_if, &eh, m);
1326: splx(s);
1327: return;
1328: }
1329:
1330: /*
1331: * At this point, we're dealing with a unicast frame going to a
1332: * different interface
1333: */
1334: if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1335: m_freem(m);
1336: return;
1337: }
1338: LIST_FOREACH(ifl, &sc->sc_iflist, next) {
1339: if (ifl->ifp == dst_if)
1340: break;
1341: }
1342: if (ifl == LIST_END(&sc->sc_iflist)) {
1343: m_freem(m);
1344: return;
1345: }
1346: if ((ifl->bif_flags & IFBIF_STP) &&
1347: (ifl->bif_state == BSTP_IFSTATE_DISCARDING)) {
1348: m_freem(m);
1349: return;
1350: }
1351: if (bridge_filterrule(&ifl->bif_brlout, &eh, m) == BRL_ACTION_BLOCK) {
1352: m_freem(m);
1353: return;
1354: }
1355: #if NPF > 0
1356: m = bridge_filter(sc, BRIDGE_OUT, dst_if, &eh, m);
1357: if (m == NULL)
1358: return;
1359: #endif
1360:
1361: len = m->m_pkthdr.len;
1362: if ((len - ETHER_HDR_LEN) > dst_if->if_mtu)
1363: bridge_fragment(sc, dst_if, &eh, m);
1364: else {
1365: s = splnet();
1366: bridge_ifenqueue(sc, dst_if, m);
1367: splx(s);
1368: }
1369: }
1370:
1371: /*
1372: * Receive input from an interface. Queue the packet for bridging if its
1373: * not for us, and schedule an interrupt.
1374: */
1375: struct mbuf *
1376: bridge_input(struct ifnet *ifp, struct ether_header *eh, struct mbuf *m)
1377: {
1378: struct bridge_softc *sc;
1379: int s;
1380: struct bridge_iflist *ifl, *srcifl;
1381: struct arpcom *ac;
1382: struct mbuf *mc;
1383:
1384: /*
1385: * Make sure this interface is a bridge member.
1386: */
1387: if (ifp == NULL || ifp->if_bridge == NULL || m == NULL)
1388: return (m);
1389:
1390: if ((m->m_flags & M_PKTHDR) == 0)
1391: panic("bridge_input(): no HDR");
1392:
1393: m->m_flags &= ~M_PROTO1; /* Loop prevention */
1394:
1395: sc = (struct bridge_softc *)ifp->if_bridge;
1396: if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1397: return (m);
1398:
1399: LIST_FOREACH(ifl, &sc->sc_iflist, next) {
1400: if (ifl->ifp == ifp)
1401: break;
1402: }
1403: if (ifl == LIST_END(&sc->sc_iflist))
1404: return (m);
1405:
1406: bridge_span(sc, eh, m);
1407:
1408: if (m->m_flags & (M_BCAST | M_MCAST)) {
1409: /* Tap off 802.1D packets, they do not get forwarded */
1410: if (bcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
1411: m = bstp_input(sc->sc_stp, ifl->bif_stp, eh, m);
1412: if (m == NULL)
1413: return (NULL);
1414: }
1415:
1416: /*
1417: * No need to queue frames for ifs in the discarding state
1418: */
1419: if ((ifl->bif_flags & IFBIF_STP) &&
1420: (ifl->bif_state == BSTP_IFSTATE_DISCARDING))
1421: return (m);
1422:
1423: /*
1424: * make a copy of 'm' with 'eh' tacked on to the
1425: * beginning. Return 'm' for local processing
1426: * and enqueue the copy. Schedule netisr.
1427: */
1428: mc = m_copym2(m, 0, M_COPYALL, M_NOWAIT);
1429: if (mc == NULL)
1430: return (m);
1431: M_PREPEND(mc, ETHER_HDR_LEN, M_DONTWAIT);
1432: if (mc == NULL)
1433: return (m);
1434: bcopy(eh, mtod(mc, caddr_t), ETHER_HDR_LEN);
1435: s = splnet();
1436: if (IF_QFULL(&sc->sc_if.if_snd)) {
1437: m_freem(mc);
1438: splx(s);
1439: return (m);
1440: }
1441: IF_ENQUEUE(&sc->sc_if.if_snd, mc);
1442: splx(s);
1443: schednetisr(NETISR_BRIDGE);
1444: if (ifp->if_type == IFT_GIF) {
1445: LIST_FOREACH(ifl, &sc->sc_iflist, next) {
1446: if (ifl->ifp->if_type == IFT_ETHER)
1447: break;
1448: }
1449: if (ifl != LIST_END(&sc->sc_iflist)) {
1450: m->m_flags |= M_PROTO1;
1451: m->m_pkthdr.rcvif = ifl->ifp;
1452: ether_input(ifl->ifp, eh, m);
1453: m = NULL;
1454: }
1455: }
1456: return (m);
1457: }
1458:
1459: /*
1460: * No need to queue frames for ifs in the discarding state
1461: */
1462: if ((ifl->bif_flags & IFBIF_STP) &&
1463: (ifl->bif_state == BSTP_IFSTATE_DISCARDING))
1464: return (m);
1465:
1466: /*
1467: * Unicast, make sure it's not for us.
1468: */
1469: srcifl = ifl;
1470: LIST_FOREACH(ifl, &sc->sc_iflist, next) {
1471: if (ifl->ifp->if_type != IFT_ETHER)
1472: continue;
1473: ac = (struct arpcom *)ifl->ifp;
1474: if (bcmp(ac->ac_enaddr, eh->ether_dhost, ETHER_ADDR_LEN) == 0
1475: #if NCARP > 0
1476: || (ifl->ifp->if_carp && carp_ourether(ifl->ifp->if_carp,
1477: eh, IFT_ETHER, 0) != NULL)
1478: #endif
1479: ) {
1480: if (srcifl->bif_flags & IFBIF_LEARNING)
1481: bridge_rtupdate(sc,
1482: (struct ether_addr *)&eh->ether_shost,
1483: ifp, 0, IFBAF_DYNAMIC);
1484: if (bridge_filterrule(&srcifl->bif_brlin, eh, m) ==
1485: BRL_ACTION_BLOCK) {
1486: m_freem(m);
1487: return (NULL);
1488: }
1489: m->m_pkthdr.rcvif = ifl->ifp;
1490: if (ifp->if_type == IFT_GIF) {
1491: m->m_flags |= M_PROTO1;
1492: ether_input(ifl->ifp, eh, m);
1493: m = NULL;
1494: }
1495: return (m);
1496: }
1497: if (bcmp(ac->ac_enaddr, eh->ether_shost, ETHER_ADDR_LEN) == 0
1498: #if NCARP > 0
1499: || (ifl->ifp->if_carp && carp_ourether(ifl->ifp->if_carp,
1500: eh, IFT_ETHER, 1) != NULL)
1501: #endif
1502: ) {
1503: m_freem(m);
1504: return (NULL);
1505: }
1506: }
1507: M_PREPEND(m, ETHER_HDR_LEN, M_DONTWAIT);
1508: if (m == NULL)
1509: return (NULL);
1510: bcopy(eh, mtod(m, caddr_t), ETHER_HDR_LEN);
1511: s = splnet();
1512: if (IF_QFULL(&sc->sc_if.if_snd)) {
1513: m_freem(m);
1514: splx(s);
1515: return (NULL);
1516: }
1517: IF_ENQUEUE(&sc->sc_if.if_snd, m);
1518: splx(s);
1519: schednetisr(NETISR_BRIDGE);
1520: return (NULL);
1521: }
1522:
1523: /*
1524: * Send a frame to all interfaces that are members of the bridge
1525: * (except the one it came in on). This code assumes that it is
1526: * running at splnet or higher.
1527: */
1528: void
1529: bridge_broadcast(struct bridge_softc *sc, struct ifnet *ifp,
1530: struct ether_header *eh, struct mbuf *m)
1531: {
1532: struct bridge_iflist *p;
1533: struct mbuf *mc;
1534: struct ifnet *dst_if;
1535: int len = m->m_pkthdr.len, used = 0;
1536:
1537: splassert(IPL_NET);
1538:
1539: LIST_FOREACH(p, &sc->sc_iflist, next) {
1540: /*
1541: * Don't retransmit out of the same interface where
1542: * the packet was received from.
1543: */
1544: dst_if = p->ifp;
1545: if (dst_if->if_index == ifp->if_index)
1546: continue;
1547:
1548: if ((p->bif_flags & IFBIF_STP) &&
1549: (p->bif_state == BSTP_IFSTATE_DISCARDING))
1550: continue;
1551:
1552: if ((p->bif_flags & IFBIF_DISCOVER) == 0 &&
1553: (m->m_flags & (M_BCAST | M_MCAST)) == 0)
1554: continue;
1555:
1556: if ((dst_if->if_flags & IFF_RUNNING) == 0)
1557: continue;
1558:
1559: #ifdef ALTQ
1560: if (ALTQ_IS_ENABLED(&dst_if->if_snd) == 0)
1561: #endif
1562: if (IF_QFULL(&dst_if->if_snd)) {
1563: IF_DROP(&dst_if->if_snd);
1564: sc->sc_if.if_oerrors++;
1565: continue;
1566: }
1567:
1568: /* Drop non-IP frames if the appropriate flag is set. */
1569: if (p->bif_flags & IFBIF_BLOCKNONIP &&
1570: bridge_blocknonip(eh, m))
1571: continue;
1572:
1573: if (bridge_filterrule(&p->bif_brlout, eh, m) == BRL_ACTION_BLOCK)
1574: continue;
1575:
1576: /* If last one, reuse the passed-in mbuf */
1577: if (LIST_NEXT(p, next) == LIST_END(&sc->sc_iflist)) {
1578: mc = m;
1579: used = 1;
1580: } else {
1581: struct mbuf *m1, *m2, *mx;
1582:
1583: m1 = m_copym2(m, 0, ETHER_HDR_LEN,
1584: M_DONTWAIT);
1585: if (m1 == NULL) {
1586: sc->sc_if.if_oerrors++;
1587: continue;
1588: }
1589: m2 = m_copym2(m, ETHER_HDR_LEN,
1590: M_COPYALL, M_DONTWAIT);
1591: if (m2 == NULL) {
1592: m_freem(m1);
1593: sc->sc_if.if_oerrors++;
1594: continue;
1595: }
1596:
1597: for (mx = m1; mx->m_next != NULL; mx = mx->m_next)
1598: /*EMPTY*/;
1599: mx->m_next = m2;
1600:
1601: if (m1->m_flags & M_PKTHDR) {
1602: int len = 0;
1603:
1604: for (mx = m1; mx != NULL; mx = mx->m_next)
1605: len += mx->m_len;
1606: m1->m_pkthdr.len = len;
1607: }
1608: mc = m1;
1609: }
1610:
1611: #if NPF > 0
1612: mc = bridge_filter(sc, BRIDGE_OUT, dst_if, eh, mc);
1613: if (mc == NULL)
1614: continue;
1615: #endif
1616:
1617: if ((len - ETHER_HDR_LEN) > dst_if->if_mtu)
1618: bridge_fragment(sc, dst_if, eh, mc);
1619: else {
1620: bridge_ifenqueue(sc, dst_if, mc);
1621: }
1622: }
1623:
1624: if (!used)
1625: m_freem(m);
1626: }
1627:
1628: void
1629: bridge_span(struct bridge_softc *sc, struct ether_header *eh,
1630: struct mbuf *morig)
1631: {
1632: struct bridge_iflist *p;
1633: struct ifnet *ifp;
1634: struct mbuf *mc, *m;
1635: int error;
1636:
1637: if (LIST_EMPTY(&sc->sc_spanlist))
1638: return;
1639:
1640: m = m_copym2(morig, 0, M_COPYALL, M_NOWAIT);
1641: if (m == NULL)
1642: return;
1643: if (eh != NULL) {
1644: M_PREPEND(m, ETHER_HDR_LEN, M_DONTWAIT);
1645: if (m == NULL)
1646: return;
1647: bcopy(eh, mtod(m, caddr_t), ETHER_HDR_LEN);
1648: }
1649:
1650: LIST_FOREACH(p, &sc->sc_spanlist, next) {
1651: ifp = p->ifp;
1652:
1653: if ((ifp->if_flags & IFF_RUNNING) == 0)
1654: continue;
1655:
1656: #ifdef ALTQ
1657: if (ALTQ_IS_ENABLED(&ifp->if_snd) == 0)
1658: #endif
1659: if (IF_QFULL(&ifp->if_snd)) {
1660: IF_DROP(&ifp->if_snd);
1661: sc->sc_if.if_oerrors++;
1662: continue;
1663: }
1664:
1665: mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
1666: if (mc == NULL) {
1667: sc->sc_if.if_oerrors++;
1668: continue;
1669: }
1670:
1671: error = bridge_ifenqueue(sc, ifp, mc);
1672: if (error)
1673: continue;
1674: }
1675: m_freem(m);
1676: }
1677:
1678: struct ifnet *
1679: bridge_rtupdate(struct bridge_softc *sc, struct ether_addr *ea,
1680: struct ifnet *ifp, int setflags, u_int8_t flags)
1681: {
1682: struct bridge_rtnode *p, *q;
1683: u_int32_t h;
1684: int dir;
1685:
1686: h = bridge_hash(sc, ea);
1687: p = LIST_FIRST(&sc->sc_rts[h]);
1688: if (p == LIST_END(&sc->sc_rts[h])) {
1689: if (sc->sc_brtcnt >= sc->sc_brtmax)
1690: goto done;
1691: p = (struct bridge_rtnode *)malloc(
1692: sizeof(struct bridge_rtnode), M_DEVBUF, M_NOWAIT);
1693: if (p == NULL)
1694: goto done;
1695:
1696: bcopy(ea, &p->brt_addr, sizeof(p->brt_addr));
1697: p->brt_if = ifp;
1698: p->brt_age = 1;
1699:
1700: if (setflags)
1701: p->brt_flags = flags;
1702: else
1703: p->brt_flags = IFBAF_DYNAMIC;
1704:
1705: LIST_INSERT_HEAD(&sc->sc_rts[h], p, brt_next);
1706: sc->sc_brtcnt++;
1707: goto want;
1708: }
1709:
1710: do {
1711: q = p;
1712: p = LIST_NEXT(p, brt_next);
1713:
1714: dir = memcmp(ea, &q->brt_addr, sizeof(q->brt_addr));
1715: if (dir == 0) {
1716: if (setflags) {
1717: q->brt_if = ifp;
1718: q->brt_flags = flags;
1719: } else if (!(q->brt_flags & IFBAF_STATIC))
1720: q->brt_if = ifp;
1721:
1722: if (q->brt_if == ifp)
1723: q->brt_age = 1;
1724: ifp = q->brt_if;
1725: goto want;
1726: }
1727:
1728: if (dir > 0) {
1729: if (sc->sc_brtcnt >= sc->sc_brtmax)
1730: goto done;
1731: p = (struct bridge_rtnode *)malloc(
1732: sizeof(struct bridge_rtnode), M_DEVBUF, M_NOWAIT);
1733: if (p == NULL)
1734: goto done;
1735:
1736: bcopy(ea, &p->brt_addr, sizeof(p->brt_addr));
1737: p->brt_if = ifp;
1738: p->brt_age = 1;
1739:
1740: if (setflags)
1741: p->brt_flags = flags;
1742: else
1743: p->brt_flags = IFBAF_DYNAMIC;
1744:
1745: LIST_INSERT_BEFORE(q, p, brt_next);
1746: sc->sc_brtcnt++;
1747: goto want;
1748: }
1749:
1750: if (p == LIST_END(&sc->sc_rts[h])) {
1751: if (sc->sc_brtcnt >= sc->sc_brtmax)
1752: goto done;
1753: p = (struct bridge_rtnode *)malloc(
1754: sizeof(struct bridge_rtnode), M_DEVBUF, M_NOWAIT);
1755: if (p == NULL)
1756: goto done;
1757:
1758: bcopy(ea, &p->brt_addr, sizeof(p->brt_addr));
1759: p->brt_if = ifp;
1760: p->brt_age = 1;
1761:
1762: if (setflags)
1763: p->brt_flags = flags;
1764: else
1765: p->brt_flags = IFBAF_DYNAMIC;
1766: LIST_INSERT_AFTER(q, p, brt_next);
1767: sc->sc_brtcnt++;
1768: goto want;
1769: }
1770: } while (p != LIST_END(&sc->sc_rts[h]));
1771:
1772: done:
1773: ifp = NULL;
1774: want:
1775: return (ifp);
1776: }
1777:
1778: struct ifnet *
1779: bridge_rtlookup(struct bridge_softc *sc, struct ether_addr *ea)
1780: {
1781: struct bridge_rtnode *p;
1782: u_int32_t h;
1783: int dir;
1784:
1785: h = bridge_hash(sc, ea);
1786: LIST_FOREACH(p, &sc->sc_rts[h], brt_next) {
1787: dir = memcmp(ea, &p->brt_addr, sizeof(p->brt_addr));
1788: if (dir == 0)
1789: return (p->brt_if);
1790: if (dir > 0)
1791: goto fail;
1792: }
1793: fail:
1794: return (NULL);
1795: }
1796:
1797: /*
1798: * The following hash function is adapted from 'Hash Functions' by Bob Jenkins
1799: * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
1800: * "You may use this code any way you wish, private, educational, or
1801: * commercial. It's free."
1802: */
1803: #define mix(a,b,c) \
1804: do { \
1805: a -= b; a -= c; a ^= (c >> 13); \
1806: b -= c; b -= a; b ^= (a << 8); \
1807: c -= a; c -= b; c ^= (b >> 13); \
1808: a -= b; a -= c; a ^= (c >> 12); \
1809: b -= c; b -= a; b ^= (a << 16); \
1810: c -= a; c -= b; c ^= (b >> 5); \
1811: a -= b; a -= c; a ^= (c >> 3); \
1812: b -= c; b -= a; b ^= (a << 10); \
1813: c -= a; c -= b; c ^= (b >> 15); \
1814: } while (0)
1815:
1816: u_int32_t
1817: bridge_hash(struct bridge_softc *sc, struct ether_addr *addr)
1818: {
1819: u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_hashkey;
1820:
1821: b += addr->ether_addr_octet[5] << 8;
1822: b += addr->ether_addr_octet[4];
1823: a += addr->ether_addr_octet[3] << 24;
1824: a += addr->ether_addr_octet[2] << 16;
1825: a += addr->ether_addr_octet[1] << 8;
1826: a += addr->ether_addr_octet[0];
1827:
1828: mix(a, b, c);
1829: return (c & BRIDGE_RTABLE_MASK);
1830: }
1831:
1832: /*
1833: * Trim the routing table so that we've got a number of routes
1834: * less than or equal to the maximum.
1835: */
1836: void
1837: bridge_rttrim(struct bridge_softc *sc)
1838: {
1839: struct bridge_rtnode *n, *p;
1840: int i;
1841:
1842: /*
1843: * Make sure we have to trim the address table
1844: */
1845: if (sc->sc_brtcnt <= sc->sc_brtmax)
1846: return;
1847:
1848: /*
1849: * Force an aging cycle, this might trim enough addresses.
1850: */
1851: bridge_rtage(sc);
1852:
1853: if (sc->sc_brtcnt <= sc->sc_brtmax)
1854: return;
1855:
1856: for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
1857: n = LIST_FIRST(&sc->sc_rts[i]);
1858: while (n != LIST_END(&sc->sc_rts[i])) {
1859: p = LIST_NEXT(n, brt_next);
1860: if ((n->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1861: LIST_REMOVE(n, brt_next);
1862: sc->sc_brtcnt--;
1863: free(n, M_DEVBUF);
1864: n = p;
1865: if (sc->sc_brtcnt <= sc->sc_brtmax)
1866: return;
1867: }
1868: }
1869: }
1870: }
1871:
1872: void
1873: bridge_timer(void *vsc)
1874: {
1875: struct bridge_softc *sc = vsc;
1876: int s;
1877:
1878: s = splsoftnet();
1879: bridge_rtage(sc);
1880: splx(s);
1881: }
1882:
1883: /*
1884: * Perform an aging cycle
1885: */
1886: void
1887: bridge_rtage(struct bridge_softc *sc)
1888: {
1889: struct bridge_rtnode *n, *p;
1890: int i;
1891:
1892: for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
1893: n = LIST_FIRST(&sc->sc_rts[i]);
1894: while (n != LIST_END(&sc->sc_rts[i])) {
1895: if ((n->brt_flags & IFBAF_TYPEMASK) == IFBAF_STATIC) {
1896: n->brt_age = !n->brt_age;
1897: if (n->brt_age)
1898: n->brt_age = 0;
1899: n = LIST_NEXT(n, brt_next);
1900: } else if (n->brt_age) {
1901: n->brt_age = 0;
1902: n = LIST_NEXT(n, brt_next);
1903: } else {
1904: p = LIST_NEXT(n, brt_next);
1905: LIST_REMOVE(n, brt_next);
1906: sc->sc_brtcnt--;
1907: free(n, M_DEVBUF);
1908: n = p;
1909: }
1910: }
1911: }
1912:
1913: if (sc->sc_brttimeout != 0)
1914: timeout_add(&sc->sc_brtimeout, sc->sc_brttimeout * hz);
1915: }
1916:
1917: void
1918: bridge_rtagenode(struct ifnet *ifp, int age)
1919: {
1920: struct bridge_softc *sc = (struct bridge_softc *)ifp->if_bridge;
1921: struct bridge_rtnode *n;
1922: int i;
1923:
1924: if (sc == NULL)
1925: return;
1926:
1927: /*
1928: * If the age is zero then flush, otherwise set all the expiry times to
1929: * age for the interface
1930: */
1931: if (age == 0)
1932: bridge_rtdelete(sc, ifp, 1);
1933: else {
1934: for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
1935: n = LIST_FIRST(&sc->sc_rts[i]);
1936: while (n != LIST_END(&sc->sc_rts[i])) {
1937: /* Cap the expiry time to 'age' */
1938: if (n->brt_if == ifp &&
1939: n->brt_age > time_uptime + age &&
1940: (n->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
1941: n->brt_age = time_uptime + age;
1942: }
1943: }
1944: }
1945: }
1946:
1947:
1948:
1949: /*
1950: * Remove all dynamic addresses from the cache
1951: */
1952: int
1953: bridge_rtflush(struct bridge_softc *sc, int full)
1954: {
1955: int i;
1956: struct bridge_rtnode *p, *n;
1957:
1958: for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
1959: n = LIST_FIRST(&sc->sc_rts[i]);
1960: while (n != LIST_END(&sc->sc_rts[i])) {
1961: if (full ||
1962: (n->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1963: p = LIST_NEXT(n, brt_next);
1964: LIST_REMOVE(n, brt_next);
1965: sc->sc_brtcnt--;
1966: free(n, M_DEVBUF);
1967: n = p;
1968: } else
1969: n = LIST_NEXT(n, brt_next);
1970: }
1971: }
1972:
1973: return (0);
1974: }
1975:
1976: /*
1977: * Remove an address from the cache
1978: */
1979: int
1980: bridge_rtdaddr(struct bridge_softc *sc, struct ether_addr *ea)
1981: {
1982: int h;
1983: struct bridge_rtnode *p;
1984:
1985: h = bridge_hash(sc, ea);
1986: LIST_FOREACH(p, &sc->sc_rts[h], brt_next) {
1987: if (bcmp(ea, &p->brt_addr, sizeof(p->brt_addr)) == 0) {
1988: LIST_REMOVE(p, brt_next);
1989: sc->sc_brtcnt--;
1990: free(p, M_DEVBUF);
1991: return (0);
1992: }
1993: }
1994:
1995: return (ENOENT);
1996: }
1997: /*
1998: * Delete routes to a specific interface member.
1999: */
2000: void
2001: bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int dynonly)
2002: {
2003: int i;
2004: struct bridge_rtnode *n, *p;
2005:
2006: /*
2007: * Loop through all of the hash buckets and traverse each
2008: * chain looking for routes to this interface.
2009: */
2010: for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
2011: n = LIST_FIRST(&sc->sc_rts[i]);
2012: while (n != LIST_END(&sc->sc_rts[i])) {
2013: if (n->brt_if != ifp) {
2014: /* Not ours */
2015: n = LIST_NEXT(n, brt_next);
2016: continue;
2017: }
2018: if (dynonly &&
2019: (n->brt_flags & IFBAF_TYPEMASK) != IFBAF_DYNAMIC) {
2020: /* only deleting dynamics */
2021: n = LIST_NEXT(n, brt_next);
2022: continue;
2023: }
2024: p = LIST_NEXT(n, brt_next);
2025: LIST_REMOVE(n, brt_next);
2026: sc->sc_brtcnt--;
2027: free(n, M_DEVBUF);
2028: n = p;
2029: }
2030: }
2031: }
2032:
2033: /*
2034: * Gather all of the routes for this interface.
2035: */
2036: int
2037: bridge_rtfind(struct bridge_softc *sc, struct ifbaconf *baconf)
2038: {
2039: int i, error = 0, onlycnt = 0;
2040: u_int32_t cnt = 0;
2041: struct bridge_rtnode *n;
2042: struct ifbareq bareq;
2043:
2044: if (baconf->ifbac_len == 0)
2045: onlycnt = 1;
2046:
2047: for (i = 0, cnt = 0; i < BRIDGE_RTABLE_SIZE; i++) {
2048: LIST_FOREACH(n, &sc->sc_rts[i], brt_next) {
2049: if (!onlycnt) {
2050: if (baconf->ifbac_len < sizeof(struct ifbareq))
2051: goto done;
2052: bcopy(sc->sc_if.if_xname, bareq.ifba_name,
2053: sizeof(bareq.ifba_name));
2054: bcopy(n->brt_if->if_xname, bareq.ifba_ifsname,
2055: sizeof(bareq.ifba_ifsname));
2056: bcopy(&n->brt_addr, &bareq.ifba_dst,
2057: sizeof(bareq.ifba_dst));
2058: bareq.ifba_age = n->brt_age;
2059: bareq.ifba_flags = n->brt_flags;
2060: error = copyout((caddr_t)&bareq,
2061: (caddr_t)(baconf->ifbac_req + cnt), sizeof(bareq));
2062: if (error)
2063: goto done;
2064: baconf->ifbac_len -= sizeof(struct ifbareq);
2065: }
2066: cnt++;
2067: }
2068: }
2069: done:
2070: baconf->ifbac_len = cnt * sizeof(struct ifbareq);
2071: return (error);
2072: }
2073:
2074: /*
2075: * Block non-ip frames:
2076: * Returns 0 if frame is ip, and 1 if it should be dropped.
2077: */
2078: int
2079: bridge_blocknonip(struct ether_header *eh, struct mbuf *m)
2080: {
2081: struct llc llc;
2082: u_int16_t etype;
2083:
2084: if (m->m_pkthdr.len < ETHER_HDR_LEN)
2085: return (1);
2086:
2087: etype = ntohs(eh->ether_type);
2088: switch (etype) {
2089: case ETHERTYPE_ARP:
2090: case ETHERTYPE_REVARP:
2091: case ETHERTYPE_IP:
2092: case ETHERTYPE_IPV6:
2093: return (0);
2094: }
2095:
2096: if (etype > ETHERMTU)
2097: return (1);
2098:
2099: if (m->m_pkthdr.len <
2100: (ETHER_HDR_LEN + LLC_SNAPFRAMELEN))
2101: return (1);
2102:
2103: m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN,
2104: (caddr_t)&llc);
2105:
2106: etype = ntohs(llc.llc_snap.ether_type);
2107: if (llc.llc_dsap == LLC_SNAP_LSAP &&
2108: llc.llc_ssap == LLC_SNAP_LSAP &&
2109: llc.llc_control == LLC_UI &&
2110: llc.llc_snap.org_code[0] == 0 &&
2111: llc.llc_snap.org_code[1] == 0 &&
2112: llc.llc_snap.org_code[2] == 0 &&
2113: (etype == ETHERTYPE_ARP || etype == ETHERTYPE_REVARP ||
2114: etype == ETHERTYPE_IP || etype == ETHERTYPE_IPV6)) {
2115: return (0);
2116: }
2117:
2118: return (1);
2119: }
2120:
2121: u_int8_t
2122: bridge_filterrule(struct brl_head *h, struct ether_header *eh, struct mbuf *m)
2123: {
2124: struct brl_node *n;
2125: u_int8_t flags;
2126:
2127: SIMPLEQ_FOREACH(n, h, brl_next) {
2128: flags = n->brl_flags & (BRL_FLAG_SRCVALID|BRL_FLAG_DSTVALID);
2129: if (flags == 0)
2130: goto return_action;
2131: if (flags == (BRL_FLAG_SRCVALID|BRL_FLAG_DSTVALID)) {
2132: if (bcmp(eh->ether_shost, &n->brl_src, ETHER_ADDR_LEN))
2133: continue;
2134: if (bcmp(eh->ether_dhost, &n->brl_dst, ETHER_ADDR_LEN))
2135: continue;
2136: goto return_action;
2137: }
2138: if (flags == BRL_FLAG_SRCVALID) {
2139: if (bcmp(eh->ether_shost, &n->brl_src, ETHER_ADDR_LEN))
2140: continue;
2141: goto return_action;
2142: }
2143: if (flags == BRL_FLAG_DSTVALID) {
2144: if (bcmp(eh->ether_dhost, &n->brl_dst, ETHER_ADDR_LEN))
2145: continue;
2146: goto return_action;
2147: }
2148: }
2149: return (BRL_ACTION_PASS);
2150:
2151: return_action:
2152: #if NPF > 0
2153: pf_tag_packet(m, n->brl_tag, -1);
2154: #endif
2155: return (n->brl_action);
2156: }
2157:
2158: int
2159: bridge_addrule(struct bridge_iflist *bif, struct ifbrlreq *req, int out)
2160: {
2161: struct brl_node *n;
2162:
2163: n = (struct brl_node *)malloc(sizeof(struct brl_node), M_DEVBUF, M_NOWAIT);
2164: if (n == NULL)
2165: return (ENOMEM);
2166: bcopy(&req->ifbr_src, &n->brl_src, sizeof(struct ether_addr));
2167: bcopy(&req->ifbr_dst, &n->brl_dst, sizeof(struct ether_addr));
2168: n->brl_action = req->ifbr_action;
2169: n->brl_flags = req->ifbr_flags;
2170: #if NPF > 0
2171: if (req->ifbr_tagname[0])
2172: n->brl_tag = pf_tagname2tag(req->ifbr_tagname);
2173: else
2174: n->brl_tag = 0;
2175: #endif
2176: if (out) {
2177: n->brl_flags &= ~BRL_FLAG_IN;
2178: n->brl_flags |= BRL_FLAG_OUT;
2179: SIMPLEQ_INSERT_TAIL(&bif->bif_brlout, n, brl_next);
2180: } else {
2181: n->brl_flags &= ~BRL_FLAG_OUT;
2182: n->brl_flags |= BRL_FLAG_IN;
2183: SIMPLEQ_INSERT_TAIL(&bif->bif_brlin, n, brl_next);
2184: }
2185: return (0);
2186: }
2187:
2188: int
2189: bridge_flushrule(struct bridge_iflist *bif)
2190: {
2191: struct brl_node *p;
2192:
2193: while (!SIMPLEQ_EMPTY(&bif->bif_brlin)) {
2194: p = SIMPLEQ_FIRST(&bif->bif_brlin);
2195: SIMPLEQ_REMOVE_HEAD(&bif->bif_brlin, brl_next);
2196: #if NPF > 0
2197: pf_tag_unref(p->brl_tag);
2198: #endif
2199: free(p, M_DEVBUF);
2200: }
2201: while (!SIMPLEQ_EMPTY(&bif->bif_brlout)) {
2202: p = SIMPLEQ_FIRST(&bif->bif_brlout);
2203: SIMPLEQ_REMOVE_HEAD(&bif->bif_brlout, brl_next);
2204: #if NPF > 0
2205: pf_tag_unref(p->brl_tag);
2206: #endif
2207: free(p, M_DEVBUF);
2208: }
2209: return (0);
2210: }
2211:
2212: #ifdef IPSEC
2213: int
2214: bridge_ipsec(struct bridge_softc *sc, struct ifnet *ifp,
2215: struct ether_header *eh, int hassnap, struct llc *llc,
2216: int dir, int af, int hlen, struct mbuf *m)
2217: {
2218: union sockaddr_union dst;
2219: struct timeval tv;
2220: struct tdb *tdb;
2221: u_int32_t spi;
2222: u_int16_t cpi;
2223: int error, off, s;
2224: u_int8_t proto = 0;
2225: #ifdef INET
2226: struct ip *ip;
2227: #endif /* INET */
2228: #ifdef INET6
2229: struct ip6_hdr *ip6;
2230: #endif /* INET6 */
2231:
2232: if (dir == BRIDGE_IN) {
2233: switch (af) {
2234: #ifdef INET
2235: case AF_INET:
2236: if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t))
2237: break;
2238:
2239: ip = mtod(m, struct ip *);
2240: proto = ip->ip_p;
2241: off = offsetof(struct ip, ip_p);
2242:
2243: if (proto != IPPROTO_ESP && proto != IPPROTO_AH &&
2244: proto != IPPROTO_IPCOMP)
2245: goto skiplookup;
2246:
2247: bzero(&dst, sizeof(union sockaddr_union));
2248: dst.sa.sa_family = AF_INET;
2249: dst.sin.sin_len = sizeof(struct sockaddr_in);
2250: m_copydata(m, offsetof(struct ip, ip_dst),
2251: sizeof(struct in_addr),
2252: (caddr_t)&dst.sin.sin_addr);
2253:
2254: if (ip->ip_p == IPPROTO_ESP)
2255: m_copydata(m, hlen, sizeof(u_int32_t),
2256: (caddr_t)&spi);
2257: else if (ip->ip_p == IPPROTO_AH)
2258: m_copydata(m, hlen + sizeof(u_int32_t),
2259: sizeof(u_int32_t), (caddr_t)&spi);
2260: else if (ip->ip_p == IPPROTO_IPCOMP) {
2261: m_copydata(m, hlen + sizeof(u_int16_t),
2262: sizeof(u_int16_t), (caddr_t)&cpi);
2263: spi = ntohl(htons(cpi));
2264: }
2265: break;
2266: #endif /* INET */
2267: #ifdef INET6
2268: case AF_INET6:
2269: if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t))
2270: break;
2271:
2272: ip6 = mtod(m, struct ip6_hdr *);
2273:
2274: /* XXX We should chase down the header chain */
2275: proto = ip6->ip6_nxt;
2276: off = offsetof(struct ip6_hdr, ip6_nxt);
2277:
2278: if (proto != IPPROTO_ESP && proto != IPPROTO_AH &&
2279: proto != IPPROTO_IPCOMP)
2280: goto skiplookup;
2281:
2282: bzero(&dst, sizeof(union sockaddr_union));
2283: dst.sa.sa_family = AF_INET6;
2284: dst.sin6.sin6_len = sizeof(struct sockaddr_in6);
2285: m_copydata(m, offsetof(struct ip6_hdr, ip6_nxt),
2286: sizeof(struct in6_addr),
2287: (caddr_t)&dst.sin6.sin6_addr);
2288:
2289: if (proto == IPPROTO_ESP)
2290: m_copydata(m, hlen, sizeof(u_int32_t),
2291: (caddr_t)&spi);
2292: else if (proto == IPPROTO_AH)
2293: m_copydata(m, hlen + sizeof(u_int32_t),
2294: sizeof(u_int32_t), (caddr_t)&spi);
2295: else if (proto == IPPROTO_IPCOMP) {
2296: m_copydata(m, hlen + sizeof(u_int16_t),
2297: sizeof(u_int16_t), (caddr_t)&cpi);
2298: spi = ntohl(htons(cpi));
2299: }
2300: break;
2301: #endif /* INET6 */
2302: default:
2303: return (0);
2304: }
2305:
2306: if (proto == 0)
2307: goto skiplookup;
2308:
2309: s = spltdb();
2310:
2311: tdb = gettdb(spi, &dst, proto);
2312: if (tdb != NULL && (tdb->tdb_flags & TDBF_INVALID) == 0 &&
2313: tdb->tdb_xform != NULL) {
2314: if (tdb->tdb_first_use == 0) {
2315: tdb->tdb_first_use = time_second;
2316:
2317: tv.tv_usec = 0;
2318:
2319: /* Check for wrap-around. */
2320: if (tdb->tdb_exp_first_use + tdb->tdb_first_use
2321: < tdb->tdb_first_use)
2322: tv.tv_sec = ((unsigned long)-1) / 2;
2323: else
2324: tv.tv_sec = tdb->tdb_exp_first_use +
2325: tdb->tdb_first_use;
2326:
2327: if (tdb->tdb_flags & TDBF_FIRSTUSE)
2328: timeout_add(&tdb->tdb_first_tmo,
2329: hzto(&tv));
2330:
2331: /* Check for wrap-around. */
2332: if (tdb->tdb_first_use +
2333: tdb->tdb_soft_first_use
2334: < tdb->tdb_first_use)
2335: tv.tv_sec = ((unsigned long)-1) / 2;
2336: else
2337: tv.tv_sec = tdb->tdb_first_use +
2338: tdb->tdb_soft_first_use;
2339:
2340: if (tdb->tdb_flags & TDBF_SOFT_FIRSTUSE)
2341: timeout_add(&tdb->tdb_sfirst_tmo,
2342: hzto(&tv));
2343: }
2344:
2345: (*(tdb->tdb_xform->xf_input))(m, tdb, hlen, off);
2346: splx(s);
2347: return (1);
2348: } else {
2349: splx(s);
2350: skiplookup:
2351: /* XXX do an input policy lookup */
2352: return (0);
2353: }
2354: } else { /* Outgoing from the bridge. */
2355: tdb = ipsp_spd_lookup(m, af, hlen, &error,
2356: IPSP_DIRECTION_OUT, NULL, NULL);
2357: if (tdb != NULL) {
2358: /*
2359: * We don't need to do loop detection, the
2360: * bridge will do that for us.
2361: */
2362: #if NPF > 0
2363: switch (af) {
2364: #ifdef INET
2365: case AF_INET:
2366: if (pf_test(dir, &encif[0].sc_if,
2367: &m, NULL) != PF_PASS) {
2368: m_freem(m);
2369: return (1);
2370: }
2371: break;
2372: #endif /* INET */
2373: #ifdef INET6
2374: case AF_INET6:
2375: if (pf_test6(dir, &encif[0].sc_if,
2376: &m, NULL) != PF_PASS) {
2377: m_freem(m);
2378: return (1);
2379: }
2380: break;
2381: #endif /* INET6 */
2382: }
2383: if (m == NULL)
2384: return (1);
2385: #endif /* NPF */
2386:
2387: ip = mtod(m, struct ip *);
2388: if ((af == AF_INET) &&
2389: ip_mtudisc && (ip->ip_off & htons(IP_DF)) &&
2390: tdb->tdb_mtu && ntohs(ip->ip_len) > tdb->tdb_mtu &&
2391: tdb->tdb_mtutimeout > time_second)
2392: bridge_send_icmp_err(sc, ifp, eh, m,
2393: hassnap, llc, tdb->tdb_mtu,
2394: ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG);
2395: else
2396: error = ipsp_process_packet(m, tdb, af, 0);
2397: return (1);
2398: } else
2399: return (0);
2400: }
2401:
2402: return (0);
2403: }
2404: #endif /* IPSEC */
2405:
2406: #if NPF > 0
2407: /*
2408: * Filter IP packets by peeking into the ethernet frame. This violates
2409: * the ISO model, but allows us to act as a IP filter at the data link
2410: * layer. As a result, most of this code will look familiar to those
2411: * who've read net/if_ethersubr.c and netinet/ip_input.c
2412: */
2413: struct mbuf *
2414: bridge_filter(struct bridge_softc *sc, int dir, struct ifnet *ifp,
2415: struct ether_header *eh, struct mbuf *m)
2416: {
2417: struct llc llc;
2418: int hassnap = 0;
2419: struct ip *ip;
2420: int hlen;
2421: u_int16_t etype;
2422:
2423: etype = ntohs(eh->ether_type);
2424:
2425: if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6) {
2426: if (etype > ETHERMTU ||
2427: m->m_pkthdr.len < (LLC_SNAPFRAMELEN +
2428: ETHER_HDR_LEN))
2429: return (m);
2430:
2431: m_copydata(m, ETHER_HDR_LEN,
2432: LLC_SNAPFRAMELEN, (caddr_t)&llc);
2433:
2434: if (llc.llc_dsap != LLC_SNAP_LSAP ||
2435: llc.llc_ssap != LLC_SNAP_LSAP ||
2436: llc.llc_control != LLC_UI ||
2437: llc.llc_snap.org_code[0] ||
2438: llc.llc_snap.org_code[1] ||
2439: llc.llc_snap.org_code[2])
2440: return (m);
2441:
2442: etype = ntohs(llc.llc_snap.ether_type);
2443: if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6)
2444: return (m);
2445: hassnap = 1;
2446: }
2447:
2448: m_adj(m, ETHER_HDR_LEN);
2449: if (hassnap)
2450: m_adj(m, LLC_SNAPFRAMELEN);
2451:
2452: switch (etype) {
2453:
2454: case ETHERTYPE_IP:
2455: if (m->m_pkthdr.len < sizeof(struct ip))
2456: goto dropit;
2457:
2458: /* Copy minimal header, and drop invalids */
2459: if (m->m_len < sizeof(struct ip) &&
2460: (m = m_pullup(m, sizeof(struct ip))) == NULL) {
2461: ipstat.ips_toosmall++;
2462: return (NULL);
2463: }
2464: ip = mtod(m, struct ip *);
2465:
2466: if (ip->ip_v != IPVERSION) {
2467: ipstat.ips_badvers++;
2468: goto dropit;
2469: }
2470:
2471: hlen = ip->ip_hl << 2; /* get whole header length */
2472: if (hlen < sizeof(struct ip)) {
2473: ipstat.ips_badhlen++;
2474: goto dropit;
2475: }
2476:
2477: if (hlen > m->m_len) {
2478: if ((m = m_pullup(m, hlen)) == NULL) {
2479: ipstat.ips_badhlen++;
2480: return (NULL);
2481: }
2482: ip = mtod(m, struct ip *);
2483: }
2484:
2485: if ((ip->ip_sum = in_cksum(m, hlen)) != 0) {
2486: ipstat.ips_badsum++;
2487: goto dropit;
2488: }
2489:
2490: if (ntohs(ip->ip_len) < hlen)
2491: goto dropit;
2492:
2493: if (m->m_pkthdr.len < ntohs(ip->ip_len))
2494: goto dropit;
2495: if (m->m_pkthdr.len > ntohs(ip->ip_len)) {
2496: if (m->m_len == m->m_pkthdr.len) {
2497: m->m_len = ntohs(ip->ip_len);
2498: m->m_pkthdr.len = ntohs(ip->ip_len);
2499: } else
2500: m_adj(m, ntohs(ip->ip_len) - m->m_pkthdr.len);
2501: }
2502:
2503: #ifdef IPSEC
2504: if ((sc->sc_if.if_flags & IFF_LINK2) == IFF_LINK2 &&
2505: bridge_ipsec(sc, ifp, eh, hassnap, &llc,
2506: dir, AF_INET, hlen, m))
2507: return (NULL);
2508: #endif /* IPSEC */
2509:
2510: /* Finally, we get to filter the packet! */
2511: m->m_pkthdr.rcvif = ifp;
2512: if (pf_test(dir, ifp, &m, eh) != PF_PASS)
2513: goto dropit;
2514: if (m == NULL)
2515: goto dropit;
2516:
2517: /* Rebuild the IP header */
2518: if (m->m_len < hlen && ((m = m_pullup(m, hlen)) == NULL))
2519: return (NULL);
2520: if (m->m_len < sizeof(struct ip))
2521: goto dropit;
2522: ip = mtod(m, struct ip *);
2523: ip->ip_sum = 0;
2524: ip->ip_sum = in_cksum(m, hlen);
2525:
2526: break;
2527:
2528: #ifdef INET6
2529: case ETHERTYPE_IPV6: {
2530: struct ip6_hdr *ip6;
2531:
2532: if (m->m_len < sizeof(struct ip6_hdr)) {
2533: if ((m = m_pullup(m, sizeof(struct ip6_hdr)))
2534: == NULL) {
2535: ip6stat.ip6s_toosmall++;
2536: return (NULL);
2537: }
2538: }
2539:
2540: ip6 = mtod(m, struct ip6_hdr *);
2541:
2542: if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2543: ip6stat.ip6s_badvers++;
2544: in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
2545: goto dropit;
2546: }
2547:
2548: #ifdef IPSEC
2549: hlen = sizeof(struct ip6_hdr);
2550:
2551: if ((sc->sc_if.if_flags & IFF_LINK2) == IFF_LINK2 &&
2552: bridge_ipsec(sc, ifp, eh, hassnap, &llc,
2553: dir, AF_INET6, hlen, m))
2554: return (NULL);
2555: #endif /* IPSEC */
2556:
2557: if (pf_test6(dir, ifp, &m, eh) != PF_PASS)
2558: goto dropit;
2559: if (m == NULL)
2560: return (NULL);
2561:
2562: break;
2563: }
2564: #endif /* INET6 */
2565:
2566: default:
2567: goto dropit;
2568: break;
2569: }
2570:
2571: /* Reattach SNAP header */
2572: if (hassnap) {
2573: M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT);
2574: if (m == NULL)
2575: goto dropit;
2576: bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN);
2577: }
2578:
2579: /* Reattach ethernet header */
2580: M_PREPEND(m, sizeof(*eh), M_DONTWAIT);
2581: if (m == NULL)
2582: goto dropit;
2583: bcopy(eh, mtod(m, caddr_t), sizeof(*eh));
2584:
2585: return (m);
2586:
2587: dropit:
2588: if (m != NULL)
2589: m_freem(m);
2590: return (NULL);
2591: }
2592: #endif /* NPF > 0 */
2593:
2594: void
2595: bridge_fragment(struct bridge_softc *sc, struct ifnet *ifp,
2596: struct ether_header *eh, struct mbuf *m)
2597: {
2598: struct llc llc;
2599: struct mbuf *m0;
2600: int s, len, error = 0;
2601: int hassnap = 0;
2602: #ifdef INET
2603: u_int16_t etype;
2604: struct ip *ip;
2605: #endif
2606:
2607: #ifndef INET
2608: goto dropit;
2609: #else
2610: etype = ntohs(eh->ether_type);
2611: if (etype == ETHERTYPE_VLAN &&
2612: (ifp->if_capabilities & IFCAP_VLAN_MTU) &&
2613: ((m->m_pkthdr.len - sizeof(struct ether_vlan_header)) <=
2614: ifp->if_mtu)) {
2615: s = splnet();
2616: bridge_ifenqueue(sc, ifp, m);
2617: splx(s);
2618: return;
2619: }
2620: if (etype != ETHERTYPE_IP) {
2621: if (etype > ETHERMTU ||
2622: m->m_pkthdr.len < (LLC_SNAPFRAMELEN +
2623: ETHER_HDR_LEN))
2624: goto dropit;
2625:
2626: m_copydata(m, ETHER_HDR_LEN,
2627: LLC_SNAPFRAMELEN, (caddr_t)&llc);
2628:
2629: if (llc.llc_dsap != LLC_SNAP_LSAP ||
2630: llc.llc_ssap != LLC_SNAP_LSAP ||
2631: llc.llc_control != LLC_UI ||
2632: llc.llc_snap.org_code[0] ||
2633: llc.llc_snap.org_code[1] ||
2634: llc.llc_snap.org_code[2] ||
2635: llc.llc_snap.ether_type != htons(ETHERTYPE_IP))
2636: goto dropit;
2637:
2638: hassnap = 1;
2639: }
2640:
2641: m_adj(m, ETHER_HDR_LEN);
2642: if (hassnap)
2643: m_adj(m, LLC_SNAPFRAMELEN);
2644:
2645: if (m->m_len < sizeof(struct ip) &&
2646: (m = m_pullup(m, sizeof(struct ip))) == NULL)
2647: goto dropit;
2648: ip = mtod(m, struct ip *);
2649:
2650: /* Respect IP_DF, return a ICMP_UNREACH_NEEDFRAG. */
2651: if (ip->ip_off & htons(IP_DF)) {
2652: bridge_send_icmp_err(sc, ifp, eh, m, hassnap, &llc,
2653: ifp->if_mtu, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG);
2654: return;
2655: }
2656:
2657: error = ip_fragment(m, ifp, ifp->if_mtu);
2658: if (error) {
2659: m = NULL;
2660: goto dropit;
2661: }
2662:
2663: for (; m; m = m0) {
2664: m0 = m->m_nextpkt;
2665: m->m_nextpkt = NULL;
2666: if (error == 0) {
2667: if (hassnap) {
2668: M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT);
2669: if (m == NULL) {
2670: error = ENOBUFS;
2671: continue;
2672: }
2673: bcopy(&llc, mtod(m, caddr_t),
2674: LLC_SNAPFRAMELEN);
2675: }
2676: M_PREPEND(m, sizeof(*eh), M_DONTWAIT);
2677: if (m == NULL) {
2678: error = ENOBUFS;
2679: continue;
2680: }
2681: len = m->m_pkthdr.len;
2682: bcopy(eh, mtod(m, caddr_t), sizeof(*eh));
2683: s = splnet();
2684: error = bridge_ifenqueue(sc, ifp, m);
2685: if (error) {
2686: splx(s);
2687: continue;
2688: }
2689: splx(s);
2690: } else
2691: m_freem(m);
2692: }
2693:
2694: if (error == 0)
2695: ipstat.ips_fragmented++;
2696:
2697: return;
2698: #endif /* INET */
2699: dropit:
2700: if (m != NULL)
2701: m_freem(m);
2702: }
2703:
2704: int
2705: bridge_ifenqueue(struct bridge_softc *sc, struct ifnet *ifp, struct mbuf *m)
2706: {
2707: int error, len;
2708: short mflags;
2709:
2710: #if NGIF > 0
2711: /* Packet needs etherip encapsulation. */
2712: if (ifp->if_type == IFT_GIF)
2713: m->m_flags |= M_PROTO1;
2714: #endif
2715: len = m->m_pkthdr.len;
2716: mflags = m->m_flags;
2717: IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
2718: if (error) {
2719: sc->sc_if.if_oerrors++;
2720: return (error);
2721: }
2722: sc->sc_if.if_opackets++;
2723: sc->sc_if.if_obytes += len;
2724: ifp->if_obytes += len;
2725: if (mflags & M_MCAST)
2726: ifp->if_omcasts++;
2727: if ((ifp->if_flags & IFF_OACTIVE) == 0)
2728: (*ifp->if_start)(ifp);
2729:
2730: return (0);
2731: }
2732:
2733: #ifdef INET
2734: void
2735: bridge_send_icmp_err(struct bridge_softc *sc, struct ifnet *ifp,
2736: struct ether_header *eh, struct mbuf *n, int hassnap, struct llc *llc,
2737: int mtu, int type, int code)
2738: {
2739: struct ip *ip;
2740: struct icmp *icp;
2741: struct in_addr t;
2742: struct mbuf *m, *n2;
2743: int hlen;
2744: u_int8_t ether_tmp[ETHER_ADDR_LEN];
2745:
2746: n2 = m_copym(n, 0, M_COPYALL, M_DONTWAIT);
2747: if (!n2) {
2748: m_freem(n);
2749: return;
2750: }
2751: m = icmp_do_error(n, type, code, 0, mtu);
2752: if (m == NULL) {
2753: m_freem(n2);
2754: return;
2755: }
2756:
2757: n = n2;
2758:
2759: ip = mtod(m, struct ip *);
2760: hlen = ip->ip_hl << 2;
2761: t = ip->ip_dst;
2762: ip->ip_dst = ip->ip_src;
2763: ip->ip_src = t;
2764:
2765: m->m_data += hlen;
2766: m->m_len -= hlen;
2767: icp = mtod(m, struct icmp *);
2768: icp->icmp_cksum = 0;
2769: icp->icmp_cksum = in_cksum(m, ntohs(ip->ip_len) - hlen);
2770: m->m_data -= hlen;
2771: m->m_len += hlen;
2772:
2773: ip->ip_v = IPVERSION;
2774: ip->ip_off &= htons(IP_DF);
2775: ip->ip_id = htons(ip_randomid());
2776: ip->ip_ttl = MAXTTL;
2777: ip->ip_sum = 0;
2778: ip->ip_sum = in_cksum(m, hlen);
2779:
2780: /* Swap ethernet addresses */
2781: bcopy(&eh->ether_dhost, ðer_tmp, sizeof(ether_tmp));
2782: bcopy(&eh->ether_shost, &eh->ether_dhost, sizeof(ether_tmp));
2783: bcopy(ðer_tmp, &eh->ether_shost, sizeof(ether_tmp));
2784:
2785: /* Reattach SNAP header */
2786: if (hassnap) {
2787: M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT);
2788: if (m == NULL)
2789: goto dropit;
2790: bcopy(llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN);
2791: }
2792:
2793: /* Reattach ethernet header */
2794: M_PREPEND(m, sizeof(*eh), M_DONTWAIT);
2795: if (m == NULL)
2796: goto dropit;
2797: bcopy(eh, mtod(m, caddr_t), sizeof(*eh));
2798:
2799: bridge_output(ifp, m, NULL, NULL);
2800: m_freem(n);
2801: return;
2802:
2803: dropit:
2804: m_freem(n);
2805: }
2806: #endif
CVSweb