Annotation of sys/net/route.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: route.c,v 1.84 2007/06/14 18:31:49 reyk Exp $ */
2: /* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */
3:
4: /*
5: * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. Neither the name of the project nor the names of its contributors
17: * may be used to endorse or promote products derived from this software
18: * without specific prior written permission.
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23: * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30: * SUCH DAMAGE.
31: */
32:
33: /*
34: * Copyright (c) 1980, 1986, 1991, 1993
35: * The Regents of the University of California. All rights reserved.
36: *
37: * Redistribution and use in source and binary forms, with or without
38: * modification, are permitted provided that the following conditions
39: * are met:
40: * 1. Redistributions of source code must retain the above copyright
41: * notice, this list of conditions and the following disclaimer.
42: * 2. Redistributions in binary form must reproduce the above copyright
43: * notice, this list of conditions and the following disclaimer in the
44: * documentation and/or other materials provided with the distribution.
45: * 3. Neither the name of the University nor the names of its contributors
46: * may be used to endorse or promote products derived from this software
47: * without specific prior written permission.
48: *
49: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59: * SUCH DAMAGE.
60: *
61: * @(#)route.c 8.2 (Berkeley) 11/15/93
62: */
63:
64: /*
65: * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
66: *
67: * NRL grants permission for redistribution and use in source and binary
68: * forms, with or without modification, of the software and documentation
69: * created at NRL provided that the following conditions are met:
70: *
71: * 1. Redistributions of source code must retain the above copyright
72: * notice, this list of conditions and the following disclaimer.
73: * 2. Redistributions in binary form must reproduce the above copyright
74: * notice, this list of conditions and the following disclaimer in the
75: * documentation and/or other materials provided with the distribution.
76: * 3. All advertising materials mentioning features or use of this software
77: * must display the following acknowledgements:
78: * This product includes software developed by the University of
79: * California, Berkeley and its contributors.
80: * This product includes software developed at the Information
81: * Technology Division, US Naval Research Laboratory.
82: * 4. Neither the name of the NRL nor the names of its contributors
83: * may be used to endorse or promote products derived from this software
84: * without specific prior written permission.
85: *
86: * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
87: * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
88: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
89: * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
90: * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
91: * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
92: * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
93: * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
94: * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
95: * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
96: * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97: *
98: * The views and conclusions contained in the software and documentation
99: * are those of the authors and should not be interpreted as representing
100: * official policies, either expressed or implied, of the US Naval
101: * Research Laboratory (NRL).
102: */
103:
104: #include <sys/param.h>
105: #include <sys/systm.h>
106: #include <sys/proc.h>
107: #include <sys/mbuf.h>
108: #include <sys/socket.h>
109: #include <sys/socketvar.h>
110: #include <sys/domain.h>
111: #include <sys/protosw.h>
112: #include <sys/ioctl.h>
113: #include <sys/kernel.h>
114: #include <sys/queue.h>
115: #include <sys/pool.h>
116:
117: #include <net/if.h>
118: #include <net/route.h>
119: #include <net/raw_cb.h>
120:
121: #include <netinet/in.h>
122: #include <netinet/in_var.h>
123:
124: #ifdef IPSEC
125: #include <netinet/ip_ipsp.h>
126:
127: extern struct ifnet encif;
128: struct ifaddr *encap_findgwifa(struct sockaddr *);
129: #endif
130:
131: #define SA(p) ((struct sockaddr *)(p))
132:
133: struct route_cb route_cb;
134: struct rtstat rtstat;
135: struct radix_node_head ***rt_tables;
136: u_int8_t af2rtafidx[AF_MAX+1];
137: u_int8_t rtafidx_max;
138: u_int rtbl_id_max = 0;
139:
140: int rttrash; /* routes not in table but not freed */
141:
142: struct pool rtentry_pool; /* pool for rtentry structures */
143: struct pool rttimer_pool; /* pool for rttimer structures */
144:
145: int rtable_init(struct radix_node_head ***);
146: int okaytoclone(u_int, int);
147: int rtdeletemsg(struct rtentry *, u_int);
148: int rtflushclone1(struct radix_node *, void *);
149: void rtflushclone(struct radix_node_head *, struct rtentry *);
150: int rt_if_remove_rtdelete(struct radix_node *, void *);
151:
152: #define LABELID_MAX 50000
153:
154: struct rt_label {
155: TAILQ_ENTRY(rt_label) rtl_entry;
156: char rtl_name[RTLABEL_LEN];
157: u_int16_t rtl_id;
158: int rtl_ref;
159: };
160:
161: TAILQ_HEAD(rt_labels, rt_label) rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels);
162:
163: #ifdef IPSEC
164: struct ifaddr *
165: encap_findgwifa(struct sockaddr *gw)
166: {
167: return (TAILQ_FIRST(&encif.if_addrlist));
168: }
169: #endif
170:
171: int
172: rtable_init(struct radix_node_head ***table)
173: {
174: void **p;
175: struct domain *dom;
176:
177: if ((p = malloc(sizeof(void *) * (rtafidx_max + 1), M_RTABLE,
178: M_NOWAIT)) == NULL)
179: return (-1);
180: bzero(p, sizeof(void *) * (rtafidx_max + 1));
181:
182: /* 2nd pass: attach */
183: for (dom = domains; dom != NULL; dom = dom->dom_next)
184: if (dom->dom_rtattach)
185: dom->dom_rtattach(&p[af2rtafidx[dom->dom_family]],
186: dom->dom_rtoffset);
187:
188: *table = (struct radix_node_head **)p;
189: return (0);
190: }
191:
192: void
193: route_init()
194: {
195: struct domain *dom;
196:
197: pool_init(&rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl",
198: NULL);
199: rn_init(); /* initialize all zeroes, all ones, mask table */
200:
201: bzero(af2rtafidx, sizeof(af2rtafidx));
202: rtafidx_max = 1; /* must have NULL at index 0, so start at 1 */
203:
204: /* find out how many tables to allocate */
205: for (dom = domains; dom != NULL; dom = dom->dom_next)
206: if (dom->dom_rtattach)
207: af2rtafidx[dom->dom_family] = rtafidx_max++;
208:
209: if (rtable_add(0) == -1)
210: panic("route_init rtable_add");
211: }
212:
213: int
214: rtable_add(u_int id) /* must be called at splsoftnet */
215: {
216: void *p;
217:
218: if (id > RT_TABLEID_MAX)
219: return (-1);
220:
221: if (id == 0 || id > rtbl_id_max) {
222: size_t newlen = sizeof(void *) * (id+1);
223:
224: if ((p = malloc(newlen, M_RTABLE, M_NOWAIT)) == NULL)
225: return (-1);
226: bzero(p, newlen);
227: if (id > 0) {
228: bcopy(rt_tables, p, sizeof(void *) * (rtbl_id_max+1));
229: free(rt_tables, M_RTABLE);
230: }
231: rt_tables = p;
232: rtbl_id_max = id;
233: }
234:
235: if (rt_tables[id] != NULL) /* already exists */
236: return (-1);
237:
238: return (rtable_init(&rt_tables[id]));
239: }
240:
241: int
242: rtable_exists(u_int id) /* verify table with that ID exists */
243: {
244: if (id > RT_TABLEID_MAX)
245: return (0);
246:
247: if (id > rtbl_id_max)
248: return (0);
249:
250: if (rt_tables[id] == NULL) /* should not happen */
251: return (0);
252:
253: return (1);
254: }
255:
256: #include "pf.h"
257: #if NPF > 0
258: void
259: rtalloc_noclone(struct route *ro, int howstrict)
260: {
261: if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP))
262: return; /* XXX */
263: ro->ro_rt = rtalloc2(&ro->ro_dst, 1, howstrict);
264: }
265:
266: int
267: okaytoclone(u_int flags, int howstrict)
268: {
269: if (howstrict == ALL_CLONING)
270: return (1);
271: if (howstrict == ONNET_CLONING && !(flags & RTF_GATEWAY))
272: return (1);
273: return (0);
274: }
275:
276: struct rtentry *
277: rtalloc2(struct sockaddr *dst, int report, int howstrict)
278: {
279: struct radix_node_head *rnh;
280: struct rtentry *rt;
281: struct radix_node *rn;
282: struct rtentry *newrt = 0;
283: struct rt_addrinfo info;
284: int s = splnet(), err = 0, msgtype = RTM_MISS;
285:
286: rnh = rt_gettable(dst->sa_family, 0);
287: if (rnh && (rn = rnh->rnh_matchaddr((caddr_t)dst, rnh)) &&
288: ((rn->rn_flags & RNF_ROOT) == 0)) {
289: newrt = rt = (struct rtentry *)rn;
290: if (report && (rt->rt_flags & RTF_CLONING) &&
291: okaytoclone(rt->rt_flags, howstrict)) {
292: err = rtrequest(RTM_RESOLVE, dst, SA(0), SA(0), 0,
293: &newrt, 0);
294: if (err) {
295: newrt = rt;
296: rt->rt_refcnt++;
297: goto miss;
298: }
299: if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
300: msgtype = RTM_RESOLVE;
301: goto miss;
302: }
303: } else
304: rt->rt_refcnt++;
305: } else {
306: rtstat.rts_unreach++;
307: miss:
308: if (report) {
309: bzero((caddr_t)&info, sizeof(info));
310: info.rti_info[RTAX_DST] = dst;
311: rt_missmsg(msgtype, &info, 0, NULL, err, 0);
312: }
313: }
314: splx(s);
315: return (newrt);
316: }
317: #endif /* NPF > 0 */
318:
319: /*
320: * Packet routing routines.
321: */
322: void
323: rtalloc(struct route *ro)
324: {
325: if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP))
326: return; /* XXX */
327: ro->ro_rt = rtalloc1(&ro->ro_dst, 1, 0);
328: }
329:
330: struct rtentry *
331: rtalloc1(struct sockaddr *dst, int report, u_int tableid)
332: {
333: struct radix_node_head *rnh;
334: struct rtentry *rt;
335: struct radix_node *rn;
336: struct rtentry *newrt = 0;
337: struct rt_addrinfo info;
338: int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
339:
340: rnh = rt_gettable(dst->sa_family, tableid);
341: if (rnh && (rn = rnh->rnh_matchaddr((caddr_t)dst, rnh)) &&
342: ((rn->rn_flags & RNF_ROOT) == 0)) {
343: newrt = rt = (struct rtentry *)rn;
344: if (report && (rt->rt_flags & RTF_CLONING)) {
345: err = rtrequest(RTM_RESOLVE, dst, SA(NULL),
346: SA(NULL), 0, &newrt, tableid);
347: if (err) {
348: newrt = rt;
349: rt->rt_refcnt++;
350: goto miss;
351: }
352: if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
353: msgtype = RTM_RESOLVE;
354: goto miss;
355: }
356: /* Inform listeners of the new route */
357: bzero(&info, sizeof(info));
358: info.rti_info[RTAX_DST] = rt_key(rt);
359: info.rti_info[RTAX_NETMASK] = rt_mask(rt);
360: info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
361: if (rt->rt_ifp != NULL) {
362: info.rti_info[RTAX_IFP] =
363: TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr;
364: info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
365: }
366: rt_missmsg(RTM_ADD, &info, rt->rt_flags,
367: rt->rt_ifp, 0, tableid);
368: } else
369: rt->rt_refcnt++;
370: } else {
371: if (dst->sa_family != PF_KEY)
372: rtstat.rts_unreach++;
373: /*
374: * IP encapsulation does lots of lookups where we don't need nor want
375: * the RTM_MISSes that would be generated. It causes RTM_MISS storms
376: * sent upward breaking user-level routing queries.
377: */
378: miss:
379: if (report && dst->sa_family != PF_KEY) {
380: bzero((caddr_t)&info, sizeof(info));
381: info.rti_info[RTAX_DST] = dst;
382: rt_missmsg(msgtype, &info, 0, NULL, err, tableid);
383: }
384: }
385: splx(s);
386: return (newrt);
387: }
388:
389: void
390: rtfree(struct rtentry *rt)
391: {
392: struct ifaddr *ifa;
393:
394: if (rt == NULL)
395: panic("rtfree");
396:
397: rt->rt_refcnt--;
398:
399: if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
400: if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
401: panic("rtfree 2");
402: rttrash--;
403: if (rt->rt_refcnt < 0) {
404: printf("rtfree: %p not freed (neg refs)\n", rt);
405: return;
406: }
407: rt_timer_remove_all(rt);
408: ifa = rt->rt_ifa;
409: if (ifa)
410: IFAFREE(ifa);
411: rtlabel_unref(rt->rt_labelid);
412: Free(rt_key(rt));
413: pool_put(&rtentry_pool, rt);
414: }
415: }
416:
417: void
418: ifafree(struct ifaddr *ifa)
419: {
420: if (ifa == NULL)
421: panic("ifafree");
422: if (ifa->ifa_refcnt == 0)
423: free(ifa, M_IFADDR);
424: else
425: ifa->ifa_refcnt--;
426: }
427:
428: /*
429: * Force a routing table entry to the specified
430: * destination to go through the given gateway.
431: * Normally called as a result of a routing redirect
432: * message from the network layer.
433: *
434: * N.B.: must be called at splsoftnet
435: */
436: void
437: rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
438: struct sockaddr *netmask, int flags, struct sockaddr *src,
439: struct rtentry **rtp)
440: {
441: struct rtentry *rt;
442: int error = 0;
443: u_int32_t *stat = NULL;
444: struct rt_addrinfo info;
445: struct ifaddr *ifa;
446: struct ifnet *ifp = NULL;
447:
448: splassert(IPL_SOFTNET);
449:
450: /* verify the gateway is directly reachable */
451: if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
452: error = ENETUNREACH;
453: goto out;
454: }
455: ifp = ifa->ifa_ifp;
456: rt = rtalloc1(dst, 0, 0);
457: /*
458: * If the redirect isn't from our current router for this dst,
459: * it's either old or wrong. If it redirects us to ourselves,
460: * we have a routing loop, perhaps as a result of an interface
461: * going down recently.
462: */
463: #define equal(a1, a2) \
464: ((a1)->sa_len == (a2)->sa_len && \
465: bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
466: if (!(flags & RTF_DONE) && rt &&
467: (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
468: error = EINVAL;
469: else if (ifa_ifwithaddr(gateway) != NULL)
470: error = EHOSTUNREACH;
471: if (error)
472: goto done;
473: /*
474: * Create a new entry if we just got back a wildcard entry
475: * or the lookup failed. This is necessary for hosts
476: * which use routing redirects generated by smart gateways
477: * to dynamically build the routing tables.
478: */
479: if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
480: goto create;
481: /*
482: * Don't listen to the redirect if it's
483: * for a route to an interface.
484: */
485: if (rt->rt_flags & RTF_GATEWAY) {
486: if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
487: /*
488: * Changing from route to net => route to host.
489: * Create new route, rather than smashing route to net.
490: */
491: create:
492: if (rt)
493: rtfree(rt);
494: flags |= RTF_GATEWAY | RTF_DYNAMIC;
495: bzero(&info, sizeof(info));
496: info.rti_info[RTAX_DST] = dst;
497: info.rti_info[RTAX_GATEWAY] = gateway;
498: info.rti_info[RTAX_NETMASK] = netmask;
499: info.rti_ifa = ifa;
500: info.rti_flags = flags;
501: rt = NULL;
502: error = rtrequest1(RTM_ADD, &info, &rt, 0);
503: if (rt != NULL)
504: flags = rt->rt_flags;
505: stat = &rtstat.rts_dynamic;
506: } else {
507: /*
508: * Smash the current notion of the gateway to
509: * this destination. Should check about netmask!!!
510: */
511: rt->rt_flags |= RTF_MODIFIED;
512: flags |= RTF_MODIFIED;
513: stat = &rtstat.rts_newgateway;
514: rt_setgate(rt, rt_key(rt), gateway, 0);
515: }
516: } else
517: error = EHOSTUNREACH;
518: done:
519: if (rt) {
520: if (rtp && !error)
521: *rtp = rt;
522: else
523: rtfree(rt);
524: }
525: out:
526: if (error)
527: rtstat.rts_badredirect++;
528: else if (stat != NULL)
529: (*stat)++;
530: bzero((caddr_t)&info, sizeof(info));
531: info.rti_info[RTAX_DST] = dst;
532: info.rti_info[RTAX_GATEWAY] = gateway;
533: info.rti_info[RTAX_NETMASK] = netmask;
534: info.rti_info[RTAX_AUTHOR] = src;
535: rt_missmsg(RTM_REDIRECT, &info, flags, ifp, error, 0);
536: }
537:
538: /*
539: * Delete a route and generate a message
540: */
541: int
542: rtdeletemsg(struct rtentry *rt, u_int tableid)
543: {
544: int error;
545: struct rt_addrinfo info;
546: struct ifnet *ifp;
547:
548: /*
549: * Request the new route so that the entry is not actually
550: * deleted. That will allow the information being reported to
551: * be accurate (and consistent with route_output()).
552: */
553: bzero((caddr_t)&info, sizeof(info));
554: info.rti_info[RTAX_DST] = rt_key(rt);
555: info.rti_info[RTAX_NETMASK] = rt_mask(rt);
556: info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
557: info.rti_flags = rt->rt_flags;
558: ifp = rt->rt_ifp;
559: error = rtrequest1(RTM_DELETE, &info, &rt, tableid);
560:
561: rt_missmsg(RTM_DELETE, &info, info.rti_flags, ifp, error, tableid);
562:
563: /* Adjust the refcount */
564: if (error == 0 && rt->rt_refcnt <= 0) {
565: rt->rt_refcnt++;
566: rtfree(rt);
567: }
568: return (error);
569: }
570:
571: int
572: rtflushclone1(struct radix_node *rn, void *arg)
573: {
574: struct rtentry *rt, *parent;
575:
576: rt = (struct rtentry *)rn;
577: parent = (struct rtentry *)arg;
578: if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
579: rtdeletemsg(rt, 0);
580: return 0;
581: }
582:
583: void
584: rtflushclone(struct radix_node_head *rnh, struct rtentry *parent)
585: {
586:
587: #ifdef DIAGNOSTIC
588: if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
589: panic("rtflushclone: called with a non-cloning route");
590: if (!rnh->rnh_walktree)
591: panic("rtflushclone: no rnh_walktree");
592: #endif
593: rnh->rnh_walktree(rnh, rtflushclone1, (void *)parent);
594: }
595:
596: int
597: rtioctl(u_long req, caddr_t data, struct proc *p)
598: {
599: return (EOPNOTSUPP);
600: }
601:
602: struct ifaddr *
603: ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
604: {
605: struct ifaddr *ifa;
606:
607: #ifdef IPSEC
608: /*
609: * If the destination is a PF_KEY address, we'll look
610: * for the existence of a encap interface number or address
611: * in the options list of the gateway. By default, we'll return
612: * enc0.
613: */
614: if (dst && (dst->sa_family == PF_KEY))
615: return (encap_findgwifa(gateway));
616: #endif
617:
618: if ((flags & RTF_GATEWAY) == 0) {
619: /*
620: * If we are adding a route to an interface,
621: * and the interface is a pt to pt link
622: * we should search for the destination
623: * as our clue to the interface. Otherwise
624: * we can use the local address.
625: */
626: ifa = NULL;
627: if (flags & RTF_HOST)
628: ifa = ifa_ifwithdstaddr(dst);
629: if (ifa == NULL)
630: ifa = ifa_ifwithaddr(gateway);
631: } else {
632: /*
633: * If we are adding a route to a remote net
634: * or host, the gateway may still be on the
635: * other end of a pt to pt link.
636: */
637: ifa = ifa_ifwithdstaddr(gateway);
638: }
639: if (ifa == NULL)
640: ifa = ifa_ifwithnet(gateway);
641: if (ifa == NULL) {
642: struct rtentry *rt = rtalloc1(gateway, 0, 0);
643: if (rt == NULL)
644: return (NULL);
645: rt->rt_refcnt--;
646: /* The gateway must be local if the same address family. */
647: if ((rt->rt_flags & RTF_GATEWAY) &&
648: rt_key(rt)->sa_family == dst->sa_family)
649: return (0);
650: if ((ifa = rt->rt_ifa) == NULL)
651: return (NULL);
652: }
653: if (ifa->ifa_addr->sa_family != dst->sa_family) {
654: struct ifaddr *oifa = ifa;
655: ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
656: if (ifa == NULL)
657: ifa = oifa;
658: }
659: return (ifa);
660: }
661:
662: #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
663:
664: int
665: rtrequest(int req, struct sockaddr *dst, struct sockaddr *gateway,
666: struct sockaddr *netmask, int flags, struct rtentry **ret_nrt,
667: u_int tableid)
668: {
669: struct rt_addrinfo info;
670:
671: bzero(&info, sizeof(info));
672: info.rti_flags = flags;
673: info.rti_info[RTAX_DST] = dst;
674: info.rti_info[RTAX_GATEWAY] = gateway;
675: info.rti_info[RTAX_NETMASK] = netmask;
676: return (rtrequest1(req, &info, ret_nrt, tableid));
677: }
678:
679: int
680: rt_getifa(struct rt_addrinfo *info)
681: {
682: struct ifaddr *ifa;
683: int error = 0;
684:
685: /*
686: * ifp may be specified by sockaddr_dl when protocol address
687: * is ambiguous
688: */
689: if (info->rti_ifp == NULL && info->rti_info[RTAX_IFP] != NULL
690: && info->rti_info[RTAX_IFP]->sa_family == AF_LINK &&
691: (ifa = ifa_ifwithnet((struct sockaddr *)info->rti_info[RTAX_IFP]))
692: != NULL)
693: info->rti_ifp = ifa->ifa_ifp;
694:
695: if (info->rti_ifa == NULL && info->rti_info[RTAX_IFA] != NULL)
696: info->rti_ifa = ifa_ifwithaddr(info->rti_info[RTAX_IFA]);
697:
698: if (info->rti_ifa == NULL) {
699: struct sockaddr *sa;
700:
701: if ((sa = info->rti_info[RTAX_IFA]) == NULL)
702: if ((sa = info->rti_info[RTAX_GATEWAY]) == NULL)
703: sa = info->rti_info[RTAX_DST];
704:
705: if (sa != NULL && info->rti_ifp != NULL)
706: info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
707: else if (info->rti_info[RTAX_DST] != NULL &&
708: info->rti_info[RTAX_GATEWAY] != NULL)
709: info->rti_ifa = ifa_ifwithroute(info->rti_flags,
710: info->rti_info[RTAX_DST],
711: info->rti_info[RTAX_GATEWAY]);
712: else if (sa != NULL)
713: info->rti_ifa = ifa_ifwithroute(info->rti_flags,
714: sa, sa);
715: }
716: if ((ifa = info->rti_ifa) != NULL) {
717: if (info->rti_ifp == NULL)
718: info->rti_ifp = ifa->ifa_ifp;
719: } else
720: error = ENETUNREACH;
721: return (error);
722: }
723:
724: int
725: rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt,
726: u_int tableid)
727: {
728: int s = splsoftnet(); int error = 0;
729: struct rtentry *rt, *crt;
730: struct radix_node *rn;
731: struct radix_node_head *rnh;
732: struct ifaddr *ifa;
733: struct sockaddr *ndst;
734: struct sockaddr_rtlabel *sa_rl;
735: #define senderr(x) { error = x ; goto bad; }
736:
737: if ((rnh = rt_gettable(info->rti_info[RTAX_DST]->sa_family, tableid)) ==
738: NULL)
739: senderr(EAFNOSUPPORT);
740: if (info->rti_flags & RTF_HOST)
741: info->rti_info[RTAX_NETMASK] = NULL;
742: switch (req) {
743: case RTM_DELETE:
744: if ((rn = rnh->rnh_lookup(info->rti_info[RTAX_DST],
745: info->rti_info[RTAX_NETMASK], rnh)) == NULL)
746: senderr(ESRCH);
747: rt = (struct rtentry *)rn;
748: #ifndef SMALL_KERNEL
749: /*
750: * if we got multipath routes, we require users to specify
751: * a matching RTAX_GATEWAY.
752: */
753: if (rn_mpath_capable(rnh)) {
754: rt = rt_mpath_matchgate(rt,
755: info->rti_info[RTAX_GATEWAY]);
756: rn = (struct radix_node *)rt;
757: if (!rt)
758: senderr(ESRCH);
759: }
760: #endif
761: if ((rn = rnh->rnh_deladdr(info->rti_info[RTAX_DST],
762: info->rti_info[RTAX_NETMASK], rnh, rn)) == NULL)
763: senderr(ESRCH);
764: rt = (struct rtentry *)rn;
765:
766: /* clean up any cloned children */
767: if ((rt->rt_flags & RTF_CLONING) != 0)
768: rtflushclone(rnh, rt);
769:
770: if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
771: panic ("rtrequest delete");
772:
773: if (rt->rt_gwroute) {
774: rt = rt->rt_gwroute; RTFREE(rt);
775: (rt = (struct rtentry *)rn)->rt_gwroute = NULL;
776: }
777:
778: if (rt->rt_parent) {
779: rt->rt_parent->rt_refcnt--;
780: rt->rt_parent = NULL;
781: }
782:
783: #ifndef SMALL_KERNEL
784: if (rn_mpath_capable(rnh)) {
785: if ((rn = rnh->rnh_lookup(info->rti_info[RTAX_DST],
786: info->rti_info[RTAX_NETMASK], rnh)) != NULL &&
787: rn_mpath_next(rn) == NULL)
788: ((struct rtentry *)rn)->rt_flags &= ~RTF_MPATH;
789: }
790: #endif
791:
792: rt->rt_flags &= ~RTF_UP;
793: if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
794: ifa->ifa_rtrequest(RTM_DELETE, rt, info);
795: rttrash++;
796:
797: if (ret_nrt)
798: *ret_nrt = rt;
799: else if (rt->rt_refcnt <= 0) {
800: rt->rt_refcnt++;
801: rtfree(rt);
802: }
803: break;
804:
805: case RTM_RESOLVE:
806: if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
807: senderr(EINVAL);
808: if ((rt->rt_flags & RTF_CLONING) == 0)
809: senderr(EINVAL);
810: ifa = rt->rt_ifa;
811: info->rti_flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
812: info->rti_flags |= RTF_CLONED;
813: info->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
814: if ((info->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
815: info->rti_flags |= RTF_HOST;
816: goto makeroute;
817:
818: case RTM_ADD:
819: if (info->rti_ifa == 0 && (error = rt_getifa(info)))
820: senderr(error);
821: ifa = info->rti_ifa;
822: makeroute:
823: rt = pool_get(&rtentry_pool, PR_NOWAIT);
824: if (rt == NULL)
825: senderr(ENOBUFS);
826: Bzero(rt, sizeof(*rt));
827: rt->rt_flags = RTF_UP | info->rti_flags;
828: LIST_INIT(&rt->rt_timer);
829: if (rt_setgate(rt, info->rti_info[RTAX_DST],
830: info->rti_info[RTAX_GATEWAY], tableid)) {
831: pool_put(&rtentry_pool, rt);
832: senderr(ENOBUFS);
833: }
834: ndst = rt_key(rt);
835: if (info->rti_info[RTAX_NETMASK] != NULL) {
836: rt_maskedcopy(info->rti_info[RTAX_DST], ndst,
837: info->rti_info[RTAX_NETMASK]);
838: } else
839: Bcopy(info->rti_info[RTAX_DST], ndst,
840: info->rti_info[RTAX_DST]->sa_len);
841: #ifndef SMALL_KERNEL
842: /* do not permit exactly the same dst/mask/gw pair */
843: if (rn_mpath_capable(rnh) &&
844: rt_mpath_conflict(rnh, rt, info->rti_info[RTAX_NETMASK],
845: info->rti_flags & RTF_MPATH)) {
846: if (rt->rt_gwroute)
847: rtfree(rt->rt_gwroute);
848: Free(rt_key(rt));
849: pool_put(&rtentry_pool, rt);
850: senderr(EEXIST);
851: }
852: #endif
853:
854: if (info->rti_info[RTAX_LABEL] != NULL) {
855: sa_rl = (struct sockaddr_rtlabel *)
856: info->rti_info[RTAX_LABEL];
857: rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label);
858: }
859:
860: ifa->ifa_refcnt++;
861: rt->rt_ifa = ifa;
862: rt->rt_ifp = ifa->ifa_ifp;
863: if (req == RTM_RESOLVE) {
864: /*
865: * Copy both metrics and a back pointer to the cloned
866: * route's parent.
867: */
868: rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
869: rt->rt_parent = *ret_nrt; /* Back ptr. to parent. */
870: rt->rt_parent->rt_refcnt++;
871: }
872: rn = rnh->rnh_addaddr((caddr_t)ndst,
873: (caddr_t)info->rti_info[RTAX_NETMASK], rnh, rt->rt_nodes);
874: if (rn == NULL && (crt = rtalloc1(ndst, 0, tableid)) != NULL) {
875: /* overwrite cloned route */
876: if ((crt->rt_flags & RTF_CLONED) != 0) {
877: rtdeletemsg(crt, tableid);
878: rn = rnh->rnh_addaddr((caddr_t)ndst,
879: (caddr_t)info->rti_info[RTAX_NETMASK],
880: rnh, rt->rt_nodes);
881: }
882: RTFREE(crt);
883: }
884: if (rn == 0) {
885: IFAFREE(ifa);
886: if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
887: rtfree(rt->rt_parent);
888: if (rt->rt_gwroute)
889: rtfree(rt->rt_gwroute);
890: Free(rt_key(rt));
891: pool_put(&rtentry_pool, rt);
892: senderr(EEXIST);
893: }
894:
895: #ifndef SMALL_KERNEL
896: if (rn_mpath_capable(rnh) &&
897: (rn = rnh->rnh_lookup(info->rti_info[RTAX_DST],
898: info->rti_info[RTAX_NETMASK], rnh)) != NULL) {
899: if (rn_mpath_next(rn) == NULL)
900: ((struct rtentry *)rn)->rt_flags &= ~RTF_MPATH;
901: else
902: ((struct rtentry *)rn)->rt_flags |= RTF_MPATH;
903: }
904: #endif
905:
906: if (ifa->ifa_rtrequest)
907: ifa->ifa_rtrequest(req, rt, info);
908: if (ret_nrt) {
909: *ret_nrt = rt;
910: rt->rt_refcnt++;
911: }
912: if ((rt->rt_flags & RTF_CLONING) != 0) {
913: /* clean up any cloned children */
914: rtflushclone(rnh, rt);
915: }
916:
917: if_group_routechange(info->rti_info[RTAX_DST],
918: info->rti_info[RTAX_NETMASK]);
919: break;
920: }
921: bad:
922: splx(s);
923: return (error);
924: }
925:
926: int
927: rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
928: u_int tableid)
929: {
930: caddr_t new, old;
931: int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
932: struct rtentry *rt = rt0;
933:
934: if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
935: old = (caddr_t)rt_key(rt);
936: R_Malloc(new, caddr_t, dlen + glen);
937: if (new == NULL)
938: return 1;
939: rt->rt_nodes->rn_key = new;
940: } else {
941: new = rt->rt_nodes->rn_key;
942: old = NULL;
943: }
944: Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen);
945: if (old) {
946: Bcopy(dst, new, dlen);
947: Free(old);
948: }
949: if (rt->rt_gwroute != NULL) {
950: rt = rt->rt_gwroute;
951: RTFREE(rt);
952: rt = rt0;
953: rt->rt_gwroute = NULL;
954: }
955: if (rt->rt_flags & RTF_GATEWAY) {
956: rt->rt_gwroute = rtalloc1(gate, 1, tableid);
957: /*
958: * If we switched gateways, grab the MTU from the new
959: * gateway route if the current MTU is 0 or greater
960: * than the MTU of gateway.
961: * Note that, if the MTU of gateway is 0, we will reset the
962: * MTU of the route to run PMTUD again from scratch. XXX
963: */
964: if (rt->rt_gwroute && !(rt->rt_rmx.rmx_locks & RTV_MTU) &&
965: rt->rt_rmx.rmx_mtu &&
966: rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
967: rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
968: }
969: }
970: return (0);
971: }
972:
973: void
974: rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
975: struct sockaddr *netmask)
976: {
977: u_char *cp1 = (u_char *)src;
978: u_char *cp2 = (u_char *)dst;
979: u_char *cp3 = (u_char *)netmask;
980: u_char *cplim = cp2 + *cp3;
981: u_char *cplim2 = cp2 + *cp1;
982:
983: *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
984: cp3 += 2;
985: if (cplim > cplim2)
986: cplim = cplim2;
987: while (cp2 < cplim)
988: *cp2++ = *cp1++ & *cp3++;
989: if (cp2 < cplim2)
990: bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
991: }
992:
993: /*
994: * Set up a routing table entry, normally
995: * for an interface.
996: */
997: int
998: rtinit(struct ifaddr *ifa, int cmd, int flags)
999: {
1000: struct rtentry *rt;
1001: struct sockaddr *dst, *deldst;
1002: struct mbuf *m = NULL;
1003: struct rtentry *nrt = NULL;
1004: int error;
1005: struct rt_addrinfo info;
1006: struct sockaddr_rtlabel sa_rl;
1007: const char *label;
1008:
1009: dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
1010: if (cmd == RTM_DELETE) {
1011: if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
1012: m = m_get(M_DONTWAIT, MT_SONAME);
1013: if (m == NULL)
1014: return (ENOBUFS);
1015: deldst = mtod(m, struct sockaddr *);
1016: rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
1017: dst = deldst;
1018: }
1019: if ((rt = rtalloc1(dst, 0, 0)) != NULL) {
1020: rt->rt_refcnt--;
1021: if (rt->rt_ifa != ifa) {
1022: if (m != NULL)
1023: (void) m_free(m);
1024: return (flags & RTF_HOST ? EHOSTUNREACH
1025: : ENETUNREACH);
1026: }
1027: }
1028: }
1029: bzero(&info, sizeof(info));
1030: info.rti_ifa = ifa;
1031: info.rti_flags = flags | ifa->ifa_flags;
1032: info.rti_info[RTAX_DST] = dst;
1033: info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1034: if (ifa->ifa_ifp->if_rtlabelid) {
1035: label = rtlabel_id2name(ifa->ifa_ifp->if_rtlabelid);
1036: bzero(&sa_rl, sizeof(sa_rl));
1037: sa_rl.sr_len = sizeof(sa_rl);
1038: sa_rl.sr_family = AF_UNSPEC;
1039: strlcpy(sa_rl.sr_label, label, sizeof(sa_rl.sr_label));
1040: info.rti_info[RTAX_LABEL] = (struct sockaddr *)&sa_rl;
1041: }
1042:
1043: /*
1044: * XXX here, it seems that we are assuming that ifa_netmask is NULL
1045: * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
1046: * variable) when RTF_HOST is 1. still not sure if i can safely
1047: * change it to meet bsdi4 behavior.
1048: */
1049: info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1050: error = rtrequest1(cmd, &info, &nrt, 0);
1051: if (cmd == RTM_DELETE && error == 0 && (rt = nrt) != NULL) {
1052: rt_newaddrmsg(cmd, ifa, error, nrt);
1053: if (rt->rt_refcnt <= 0) {
1054: rt->rt_refcnt++;
1055: rtfree(rt);
1056: }
1057: }
1058: if (cmd == RTM_ADD && error == 0 && (rt = nrt) != NULL) {
1059: rt->rt_refcnt--;
1060: if (rt->rt_ifa != ifa) {
1061: printf("rtinit: wrong ifa (%p) was (%p)\n",
1062: ifa, rt->rt_ifa);
1063: if (rt->rt_ifa->ifa_rtrequest)
1064: rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
1065: IFAFREE(rt->rt_ifa);
1066: rt->rt_ifa = ifa;
1067: rt->rt_ifp = ifa->ifa_ifp;
1068: ifa->ifa_refcnt++;
1069: if (ifa->ifa_rtrequest)
1070: ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
1071: }
1072: rt_newaddrmsg(cmd, ifa, error, nrt);
1073: }
1074: return (error);
1075: }
1076:
1077: /*
1078: * Route timer routines. These routes allow functions to be called
1079: * for various routes at any time. This is useful in supporting
1080: * path MTU discovery and redirect route deletion.
1081: *
1082: * This is similar to some BSDI internal functions, but it provides
1083: * for multiple queues for efficiency's sake...
1084: */
1085:
1086: LIST_HEAD(, rttimer_queue) rttimer_queue_head;
1087: static int rt_init_done = 0;
1088:
1089: #define RTTIMER_CALLOUT(r) { \
1090: if (r->rtt_func != NULL) { \
1091: (*r->rtt_func)(r->rtt_rt, r); \
1092: } else { \
1093: rtrequest((int) RTM_DELETE, \
1094: (struct sockaddr *)rt_key(r->rtt_rt), \
1095: 0, 0, 0, 0, 0); \
1096: } \
1097: }
1098:
1099: /*
1100: * Some subtle order problems with domain initialization mean that
1101: * we cannot count on this being run from rt_init before various
1102: * protocol initializations are done. Therefore, we make sure
1103: * that this is run when the first queue is added...
1104: */
1105:
1106: void
1107: rt_timer_init()
1108: {
1109: static struct timeout rt_timer_timeout;
1110:
1111: if (rt_init_done)
1112: panic("rt_timer_init: already initialized");
1113:
1114: pool_init(&rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl",
1115: NULL);
1116:
1117: LIST_INIT(&rttimer_queue_head);
1118: timeout_set(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout);
1119: timeout_add(&rt_timer_timeout, hz); /* every second */
1120: rt_init_done = 1;
1121: }
1122:
1123: struct rttimer_queue *
1124: rt_timer_queue_create(u_int timeout)
1125: {
1126: struct rttimer_queue *rtq;
1127:
1128: if (rt_init_done == 0)
1129: rt_timer_init();
1130:
1131: R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
1132: if (rtq == NULL)
1133: return (NULL);
1134: Bzero(rtq, sizeof *rtq);
1135:
1136: rtq->rtq_timeout = timeout;
1137: rtq->rtq_count = 0;
1138: TAILQ_INIT(&rtq->rtq_head);
1139: LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1140:
1141: return (rtq);
1142: }
1143:
1144: void
1145: rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1146: {
1147: rtq->rtq_timeout = timeout;
1148: }
1149:
1150: void
1151: rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
1152: {
1153: struct rttimer *r;
1154:
1155: while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1156: LIST_REMOVE(r, rtt_link);
1157: TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1158: if (destroy)
1159: RTTIMER_CALLOUT(r);
1160: pool_put(&rttimer_pool, r);
1161: if (rtq->rtq_count > 0)
1162: rtq->rtq_count--;
1163: else
1164: printf("rt_timer_queue_destroy: rtq_count reached 0\n");
1165: }
1166:
1167: LIST_REMOVE(rtq, rtq_link);
1168:
1169: /*
1170: * Caller is responsible for freeing the rttimer_queue structure.
1171: */
1172: }
1173:
1174: unsigned long
1175: rt_timer_count(struct rttimer_queue *rtq)
1176: {
1177: return (rtq->rtq_count);
1178: }
1179:
1180: void
1181: rt_timer_remove_all(struct rtentry *rt)
1182: {
1183: struct rttimer *r;
1184:
1185: while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1186: LIST_REMOVE(r, rtt_link);
1187: TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1188: if (r->rtt_queue->rtq_count > 0)
1189: r->rtt_queue->rtq_count--;
1190: else
1191: printf("rt_timer_remove_all: rtq_count reached 0\n");
1192: pool_put(&rttimer_pool, r);
1193: }
1194: }
1195:
1196: int
1197: rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *,
1198: struct rttimer *), struct rttimer_queue *queue)
1199: {
1200: struct rttimer *r;
1201: long current_time;
1202:
1203: current_time = time_uptime;
1204: rt->rt_rmx.rmx_expire = time_second + queue->rtq_timeout;
1205:
1206: /*
1207: * If there's already a timer with this action, destroy it before
1208: * we add a new one.
1209: */
1210: for (r = LIST_FIRST(&rt->rt_timer); r != NULL;
1211: r = LIST_NEXT(r, rtt_link)) {
1212: if (r->rtt_func == func) {
1213: LIST_REMOVE(r, rtt_link);
1214: TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1215: if (r->rtt_queue->rtq_count > 0)
1216: r->rtt_queue->rtq_count--;
1217: else
1218: printf("rt_timer_add: rtq_count reached 0\n");
1219: pool_put(&rttimer_pool, r);
1220: break; /* only one per list, so we can quit... */
1221: }
1222: }
1223:
1224: r = pool_get(&rttimer_pool, PR_NOWAIT);
1225: if (r == NULL)
1226: return (ENOBUFS);
1227: Bzero(r, sizeof(*r));
1228:
1229: r->rtt_rt = rt;
1230: r->rtt_time = current_time;
1231: r->rtt_func = func;
1232: r->rtt_queue = queue;
1233: LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1234: TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1235: r->rtt_queue->rtq_count++;
1236:
1237: return (0);
1238: }
1239:
1240: struct radix_node_head *
1241: rt_gettable(sa_family_t af, u_int id)
1242: {
1243: return (rt_tables[id][af2rtafidx[af]]);
1244: }
1245:
1246: struct radix_node *
1247: rt_lookup(struct sockaddr *dst, struct sockaddr *mask, int tableid)
1248: {
1249: struct radix_node_head *rnh;
1250:
1251: if ((rnh = rt_gettable(dst->sa_family, tableid)) == NULL)
1252: return (NULL);
1253:
1254: return (rnh->rnh_lookup(dst, mask, rnh));
1255: }
1256:
1257: /* ARGSUSED */
1258: void
1259: rt_timer_timer(void *arg)
1260: {
1261: struct timeout *to = (struct timeout *)arg;
1262: struct rttimer_queue *rtq;
1263: struct rttimer *r;
1264: long current_time;
1265: int s;
1266:
1267: current_time = time_uptime;
1268:
1269: s = splsoftnet();
1270: for (rtq = LIST_FIRST(&rttimer_queue_head); rtq != NULL;
1271: rtq = LIST_NEXT(rtq, rtq_link)) {
1272: while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1273: (r->rtt_time + rtq->rtq_timeout) < current_time) {
1274: LIST_REMOVE(r, rtt_link);
1275: TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1276: RTTIMER_CALLOUT(r);
1277: pool_put(&rttimer_pool, r);
1278: if (rtq->rtq_count > 0)
1279: rtq->rtq_count--;
1280: else
1281: printf("rt_timer_timer: rtq_count reached 0\n");
1282: }
1283: }
1284: splx(s);
1285:
1286: timeout_add(to, hz); /* every second */
1287: }
1288:
1289: u_int16_t
1290: rtlabel_name2id(char *name)
1291: {
1292: struct rt_label *label, *p = NULL;
1293: u_int16_t new_id = 1;
1294:
1295: if (!name[0])
1296: return (0);
1297:
1298: TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1299: if (strcmp(name, label->rtl_name) == 0) {
1300: label->rtl_ref++;
1301: return (label->rtl_id);
1302: }
1303:
1304: /*
1305: * to avoid fragmentation, we do a linear search from the beginning
1306: * and take the first free slot we find. if there is none or the list
1307: * is empty, append a new entry at the end.
1308: */
1309:
1310: if (!TAILQ_EMPTY(&rt_labels))
1311: for (p = TAILQ_FIRST(&rt_labels); p != NULL &&
1312: p->rtl_id == new_id; p = TAILQ_NEXT(p, rtl_entry))
1313: new_id = p->rtl_id + 1;
1314:
1315: if (new_id > LABELID_MAX)
1316: return (0);
1317:
1318: label = (struct rt_label *)malloc(sizeof(struct rt_label),
1319: M_TEMP, M_NOWAIT);
1320: if (label == NULL)
1321: return (0);
1322: bzero(label, sizeof(struct rt_label));
1323: strlcpy(label->rtl_name, name, sizeof(label->rtl_name));
1324: label->rtl_id = new_id;
1325: label->rtl_ref++;
1326:
1327: if (p != NULL) /* insert new entry before p */
1328: TAILQ_INSERT_BEFORE(p, label, rtl_entry);
1329: else /* either list empty or no free slot in between */
1330: TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry);
1331:
1332: return (label->rtl_id);
1333: }
1334:
1335: const char *
1336: rtlabel_id2name(u_int16_t id)
1337: {
1338: struct rt_label *label;
1339:
1340: TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1341: if (label->rtl_id == id)
1342: return (label->rtl_name);
1343:
1344: return (NULL);
1345: }
1346:
1347: void
1348: rtlabel_unref(u_int16_t id)
1349: {
1350: struct rt_label *p, *next;
1351:
1352: if (id == 0)
1353: return;
1354:
1355: for (p = TAILQ_FIRST(&rt_labels); p != NULL; p = next) {
1356: next = TAILQ_NEXT(p, rtl_entry);
1357: if (id == p->rtl_id) {
1358: if (--p->rtl_ref == 0) {
1359: TAILQ_REMOVE(&rt_labels, p, rtl_entry);
1360: free(p, M_TEMP);
1361: }
1362: break;
1363: }
1364: }
1365: }
1366:
1367: void
1368: rt_if_remove(struct ifnet *ifp)
1369: {
1370: int i;
1371: struct radix_node_head *rnh;
1372:
1373: for (i = 1; i <= AF_MAX; i++)
1374: if ((rnh = rt_gettable(i, 0)) != NULL)
1375: while ((*rnh->rnh_walktree)(rnh,
1376: rt_if_remove_rtdelete, ifp) == EAGAIN)
1377: ; /* nothing */
1378: }
1379:
1380: /*
1381: * Note that deleting a RTF_CLONING route can trigger the
1382: * deletion of more entries, so we need to cancel the walk
1383: * and return EAGAIN. The caller should restart the walk
1384: * as long as EAGAIN is returned.
1385: */
1386: int
1387: rt_if_remove_rtdelete(struct radix_node *rn, void *vifp)
1388: {
1389: struct ifnet *ifp = vifp;
1390: struct rtentry *rt = (struct rtentry *)rn;
1391:
1392: if (rt->rt_ifp == ifp) {
1393: int cloning = (rt->rt_flags & RTF_CLONING);
1394:
1395: if (rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1396: rt_mask(rt), 0, NULL, 0) == 0 && cloning)
1397: return (EAGAIN);
1398: }
1399:
1400: /*
1401: * XXX There should be no need to check for rt_ifa belonging to this
1402: * interface, because then rt_ifp is set, right?
1403: */
1404:
1405: return (0);
1406: }
CVSweb