Annotation of sys/net/pf_table.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pf_table.c,v 1.70 2007/05/23 11:53:45 markus Exp $ */
2:
3: /*
4: * Copyright (c) 2002 Cedric Berger
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: *
11: * - Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * - Redistributions in binary form must reproduce the above
14: * copyright notice, this list of conditions and the following
15: * disclaimer in the documentation and/or other materials provided
16: * with the distribution.
17: *
18: * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19: * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20: * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21: * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22: * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24: * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25: * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26: * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28: * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: *
31: */
32:
33: #include <sys/param.h>
34: #include <sys/systm.h>
35: #include <sys/socket.h>
36: #include <sys/mbuf.h>
37: #include <sys/kernel.h>
38:
39: #include <net/if.h>
40: #include <net/route.h>
41: #include <netinet/in.h>
42: #include <netinet/ip_ipsp.h>
43: #include <net/pfvar.h>
44:
45: #define ACCEPT_FLAGS(flags, oklist) \
46: do { \
47: if ((flags & ~(oklist)) & \
48: PFR_FLAG_ALLMASK) \
49: return (EINVAL); \
50: } while (0)
51:
52: #define COPYIN(from, to, size, flags) \
53: ((flags & PFR_FLAG_USERIOCTL) ? \
54: copyin((from), (to), (size)) : \
55: (bcopy((from), (to), (size)), 0))
56:
57: #define COPYOUT(from, to, size, flags) \
58: ((flags & PFR_FLAG_USERIOCTL) ? \
59: copyout((from), (to), (size)) : \
60: (bcopy((from), (to), (size)), 0))
61:
62: #define FILLIN_SIN(sin, addr) \
63: do { \
64: (sin).sin_len = sizeof(sin); \
65: (sin).sin_family = AF_INET; \
66: (sin).sin_addr = (addr); \
67: } while (0)
68:
69: #define FILLIN_SIN6(sin6, addr) \
70: do { \
71: (sin6).sin6_len = sizeof(sin6); \
72: (sin6).sin6_family = AF_INET6; \
73: (sin6).sin6_addr = (addr); \
74: } while (0)
75:
76: #define SWAP(type, a1, a2) \
77: do { \
78: type tmp = a1; \
79: a1 = a2; \
80: a2 = tmp; \
81: } while (0)
82:
83: #define SUNION2PF(su, af) (((af)==AF_INET) ? \
84: (struct pf_addr *)&(su)->sin.sin_addr : \
85: (struct pf_addr *)&(su)->sin6.sin6_addr)
86:
87: #define AF_BITS(af) (((af)==AF_INET)?32:128)
88: #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
89: #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
90: #define KENTRY_RNF_ROOT(ke) \
91: ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
92:
93: #define NO_ADDRESSES (-1)
94: #define ENQUEUE_UNMARKED_ONLY (1)
95: #define INVERT_NEG_FLAG (1)
96:
97: struct pfr_walktree {
98: enum pfrw_op {
99: PFRW_MARK,
100: PFRW_SWEEP,
101: PFRW_ENQUEUE,
102: PFRW_GET_ADDRS,
103: PFRW_GET_ASTATS,
104: PFRW_POOL_GET,
105: PFRW_DYNADDR_UPDATE
106: } pfrw_op;
107: union {
108: struct pfr_addr *pfrw1_addr;
109: struct pfr_astats *pfrw1_astats;
110: struct pfr_kentryworkq *pfrw1_workq;
111: struct pfr_kentry *pfrw1_kentry;
112: struct pfi_dynaddr *pfrw1_dyn;
113: } pfrw_1;
114: int pfrw_free;
115: int pfrw_flags;
116: };
117: #define pfrw_addr pfrw_1.pfrw1_addr
118: #define pfrw_astats pfrw_1.pfrw1_astats
119: #define pfrw_workq pfrw_1.pfrw1_workq
120: #define pfrw_kentry pfrw_1.pfrw1_kentry
121: #define pfrw_dyn pfrw_1.pfrw1_dyn
122: #define pfrw_cnt pfrw_free
123:
124: #define senderr(e) do { rv = (e); goto _bad; } while (0)
125:
126: struct pool pfr_ktable_pl;
127: struct pool pfr_kentry_pl;
128: struct pool pfr_kentry_pl2;
129: struct sockaddr_in pfr_sin;
130: struct sockaddr_in6 pfr_sin6;
131: union sockaddr_union pfr_mask;
132: struct pf_addr pfr_ffaddr;
133:
134: void pfr_copyout_addr(struct pfr_addr *,
135: struct pfr_kentry *ke);
136: int pfr_validate_addr(struct pfr_addr *);
137: void pfr_enqueue_addrs(struct pfr_ktable *,
138: struct pfr_kentryworkq *, int *, int);
139: void pfr_mark_addrs(struct pfr_ktable *);
140: struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
141: struct pfr_addr *, int);
142: struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
143: void pfr_destroy_kentries(struct pfr_kentryworkq *);
144: void pfr_destroy_kentry(struct pfr_kentry *);
145: void pfr_insert_kentries(struct pfr_ktable *,
146: struct pfr_kentryworkq *, long);
147: void pfr_remove_kentries(struct pfr_ktable *,
148: struct pfr_kentryworkq *);
149: void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
150: int);
151: void pfr_reset_feedback(struct pfr_addr *, int, int);
152: void pfr_prepare_network(union sockaddr_union *, int, int);
153: int pfr_route_kentry(struct pfr_ktable *,
154: struct pfr_kentry *);
155: int pfr_unroute_kentry(struct pfr_ktable *,
156: struct pfr_kentry *);
157: int pfr_walktree(struct radix_node *, void *);
158: int pfr_validate_table(struct pfr_table *, int, int);
159: int pfr_fix_anchor(char *);
160: void pfr_commit_ktable(struct pfr_ktable *, long);
161: void pfr_insert_ktables(struct pfr_ktableworkq *);
162: void pfr_insert_ktable(struct pfr_ktable *);
163: void pfr_setflags_ktables(struct pfr_ktableworkq *);
164: void pfr_setflags_ktable(struct pfr_ktable *, int);
165: void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
166: int);
167: void pfr_clstats_ktable(struct pfr_ktable *, long, int);
168: struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
169: void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
170: void pfr_destroy_ktable(struct pfr_ktable *, int);
171: int pfr_ktable_compare(struct pfr_ktable *,
172: struct pfr_ktable *);
173: struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
174: void pfr_clean_node_mask(struct pfr_ktable *,
175: struct pfr_kentryworkq *);
176: int pfr_table_count(struct pfr_table *, int);
177: int pfr_skip_table(struct pfr_table *,
178: struct pfr_ktable *, int);
179: struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
180:
181: RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
182: RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
183:
184: struct pfr_ktablehead pfr_ktables;
185: struct pfr_table pfr_nulltable;
186: int pfr_ktable_cnt;
187:
188: void
189: pfr_initialize(void)
190: {
191: pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
192: "pfrktable", &pool_allocator_oldnointr);
193: pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
194: "pfrkentry", &pool_allocator_oldnointr);
195: pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
196: "pfrkentry2", NULL);
197:
198: pfr_sin.sin_len = sizeof(pfr_sin);
199: pfr_sin.sin_family = AF_INET;
200: pfr_sin6.sin6_len = sizeof(pfr_sin6);
201: pfr_sin6.sin6_family = AF_INET6;
202:
203: memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
204: }
205:
206: int
207: pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
208: {
209: struct pfr_ktable *kt;
210: struct pfr_kentryworkq workq;
211: int s;
212:
213: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
214: if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
215: return (EINVAL);
216: kt = pfr_lookup_table(tbl);
217: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
218: return (ESRCH);
219: if (kt->pfrkt_flags & PFR_TFLAG_CONST)
220: return (EPERM);
221: pfr_enqueue_addrs(kt, &workq, ndel, 0);
222:
223: if (!(flags & PFR_FLAG_DUMMY)) {
224: if (flags & PFR_FLAG_ATOMIC)
225: s = splsoftnet();
226: pfr_remove_kentries(kt, &workq);
227: if (flags & PFR_FLAG_ATOMIC)
228: splx(s);
229: if (kt->pfrkt_cnt) {
230: printf("pfr_clr_addrs: corruption detected (%d).\n",
231: kt->pfrkt_cnt);
232: kt->pfrkt_cnt = 0;
233: }
234: }
235: return (0);
236: }
237:
238: int
239: pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
240: int *nadd, int flags)
241: {
242: struct pfr_ktable *kt, *tmpkt;
243: struct pfr_kentryworkq workq;
244: struct pfr_kentry *p, *q;
245: struct pfr_addr ad;
246: int i, rv, s, xadd = 0;
247: long tzero = time_second;
248:
249: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
250: PFR_FLAG_FEEDBACK);
251: if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
252: return (EINVAL);
253: kt = pfr_lookup_table(tbl);
254: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
255: return (ESRCH);
256: if (kt->pfrkt_flags & PFR_TFLAG_CONST)
257: return (EPERM);
258: tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
259: if (tmpkt == NULL)
260: return (ENOMEM);
261: SLIST_INIT(&workq);
262: for (i = 0; i < size; i++) {
263: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
264: senderr(EFAULT);
265: if (pfr_validate_addr(&ad))
266: senderr(EINVAL);
267: p = pfr_lookup_addr(kt, &ad, 1);
268: q = pfr_lookup_addr(tmpkt, &ad, 1);
269: if (flags & PFR_FLAG_FEEDBACK) {
270: if (q != NULL)
271: ad.pfra_fback = PFR_FB_DUPLICATE;
272: else if (p == NULL)
273: ad.pfra_fback = PFR_FB_ADDED;
274: else if (p->pfrke_not != ad.pfra_not)
275: ad.pfra_fback = PFR_FB_CONFLICT;
276: else
277: ad.pfra_fback = PFR_FB_NONE;
278: }
279: if (p == NULL && q == NULL) {
280: p = pfr_create_kentry(&ad,
281: !(flags & PFR_FLAG_USERIOCTL));
282: if (p == NULL)
283: senderr(ENOMEM);
284: if (pfr_route_kentry(tmpkt, p)) {
285: pfr_destroy_kentry(p);
286: ad.pfra_fback = PFR_FB_NONE;
287: } else {
288: SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
289: xadd++;
290: }
291: }
292: if (flags & PFR_FLAG_FEEDBACK)
293: if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
294: senderr(EFAULT);
295: }
296: pfr_clean_node_mask(tmpkt, &workq);
297: if (!(flags & PFR_FLAG_DUMMY)) {
298: if (flags & PFR_FLAG_ATOMIC)
299: s = splsoftnet();
300: pfr_insert_kentries(kt, &workq, tzero);
301: if (flags & PFR_FLAG_ATOMIC)
302: splx(s);
303: } else
304: pfr_destroy_kentries(&workq);
305: if (nadd != NULL)
306: *nadd = xadd;
307: pfr_destroy_ktable(tmpkt, 0);
308: return (0);
309: _bad:
310: pfr_clean_node_mask(tmpkt, &workq);
311: pfr_destroy_kentries(&workq);
312: if (flags & PFR_FLAG_FEEDBACK)
313: pfr_reset_feedback(addr, size, flags);
314: pfr_destroy_ktable(tmpkt, 0);
315: return (rv);
316: }
317:
318: int
319: pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
320: int *ndel, int flags)
321: {
322: struct pfr_ktable *kt;
323: struct pfr_kentryworkq workq;
324: struct pfr_kentry *p;
325: struct pfr_addr ad;
326: int i, rv, s, xdel = 0, log = 1;
327:
328: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
329: PFR_FLAG_FEEDBACK);
330: if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
331: return (EINVAL);
332: kt = pfr_lookup_table(tbl);
333: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
334: return (ESRCH);
335: if (kt->pfrkt_flags & PFR_TFLAG_CONST)
336: return (EPERM);
337: /*
338: * there are two algorithms to choose from here.
339: * with:
340: * n: number of addresses to delete
341: * N: number of addresses in the table
342: *
343: * one is O(N) and is better for large 'n'
344: * one is O(n*LOG(N)) and is better for small 'n'
345: *
346: * following code try to decide which one is best.
347: */
348: for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
349: log++;
350: if (size > kt->pfrkt_cnt/log) {
351: /* full table scan */
352: pfr_mark_addrs(kt);
353: } else {
354: /* iterate over addresses to delete */
355: for (i = 0; i < size; i++) {
356: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
357: return (EFAULT);
358: if (pfr_validate_addr(&ad))
359: return (EINVAL);
360: p = pfr_lookup_addr(kt, &ad, 1);
361: if (p != NULL)
362: p->pfrke_mark = 0;
363: }
364: }
365: SLIST_INIT(&workq);
366: for (i = 0; i < size; i++) {
367: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
368: senderr(EFAULT);
369: if (pfr_validate_addr(&ad))
370: senderr(EINVAL);
371: p = pfr_lookup_addr(kt, &ad, 1);
372: if (flags & PFR_FLAG_FEEDBACK) {
373: if (p == NULL)
374: ad.pfra_fback = PFR_FB_NONE;
375: else if (p->pfrke_not != ad.pfra_not)
376: ad.pfra_fback = PFR_FB_CONFLICT;
377: else if (p->pfrke_mark)
378: ad.pfra_fback = PFR_FB_DUPLICATE;
379: else
380: ad.pfra_fback = PFR_FB_DELETED;
381: }
382: if (p != NULL && p->pfrke_not == ad.pfra_not &&
383: !p->pfrke_mark) {
384: p->pfrke_mark = 1;
385: SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
386: xdel++;
387: }
388: if (flags & PFR_FLAG_FEEDBACK)
389: if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
390: senderr(EFAULT);
391: }
392: if (!(flags & PFR_FLAG_DUMMY)) {
393: if (flags & PFR_FLAG_ATOMIC)
394: s = splsoftnet();
395: pfr_remove_kentries(kt, &workq);
396: if (flags & PFR_FLAG_ATOMIC)
397: splx(s);
398: }
399: if (ndel != NULL)
400: *ndel = xdel;
401: return (0);
402: _bad:
403: if (flags & PFR_FLAG_FEEDBACK)
404: pfr_reset_feedback(addr, size, flags);
405: return (rv);
406: }
407:
408: int
409: pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
410: int *size2, int *nadd, int *ndel, int *nchange, int flags,
411: u_int32_t ignore_pfrt_flags)
412: {
413: struct pfr_ktable *kt, *tmpkt;
414: struct pfr_kentryworkq addq, delq, changeq;
415: struct pfr_kentry *p, *q;
416: struct pfr_addr ad;
417: int i, rv, s, xadd = 0, xdel = 0, xchange = 0;
418: long tzero = time_second;
419:
420: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
421: PFR_FLAG_FEEDBACK);
422: if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
423: PFR_FLAG_USERIOCTL))
424: return (EINVAL);
425: kt = pfr_lookup_table(tbl);
426: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
427: return (ESRCH);
428: if (kt->pfrkt_flags & PFR_TFLAG_CONST)
429: return (EPERM);
430: tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
431: if (tmpkt == NULL)
432: return (ENOMEM);
433: pfr_mark_addrs(kt);
434: SLIST_INIT(&addq);
435: SLIST_INIT(&delq);
436: SLIST_INIT(&changeq);
437: for (i = 0; i < size; i++) {
438: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
439: senderr(EFAULT);
440: if (pfr_validate_addr(&ad))
441: senderr(EINVAL);
442: ad.pfra_fback = PFR_FB_NONE;
443: p = pfr_lookup_addr(kt, &ad, 1);
444: if (p != NULL) {
445: if (p->pfrke_mark) {
446: ad.pfra_fback = PFR_FB_DUPLICATE;
447: goto _skip;
448: }
449: p->pfrke_mark = 1;
450: if (p->pfrke_not != ad.pfra_not) {
451: SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
452: ad.pfra_fback = PFR_FB_CHANGED;
453: xchange++;
454: }
455: } else {
456: q = pfr_lookup_addr(tmpkt, &ad, 1);
457: if (q != NULL) {
458: ad.pfra_fback = PFR_FB_DUPLICATE;
459: goto _skip;
460: }
461: p = pfr_create_kentry(&ad,
462: !(flags & PFR_FLAG_USERIOCTL));
463: if (p == NULL)
464: senderr(ENOMEM);
465: if (pfr_route_kentry(tmpkt, p)) {
466: pfr_destroy_kentry(p);
467: ad.pfra_fback = PFR_FB_NONE;
468: } else {
469: SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
470: ad.pfra_fback = PFR_FB_ADDED;
471: xadd++;
472: }
473: }
474: _skip:
475: if (flags & PFR_FLAG_FEEDBACK)
476: if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
477: senderr(EFAULT);
478: }
479: pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
480: if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
481: if (*size2 < size+xdel) {
482: *size2 = size+xdel;
483: senderr(0);
484: }
485: i = 0;
486: SLIST_FOREACH(p, &delq, pfrke_workq) {
487: pfr_copyout_addr(&ad, p);
488: ad.pfra_fback = PFR_FB_DELETED;
489: if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
490: senderr(EFAULT);
491: i++;
492: }
493: }
494: pfr_clean_node_mask(tmpkt, &addq);
495: if (!(flags & PFR_FLAG_DUMMY)) {
496: if (flags & PFR_FLAG_ATOMIC)
497: s = splsoftnet();
498: pfr_insert_kentries(kt, &addq, tzero);
499: pfr_remove_kentries(kt, &delq);
500: pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
501: if (flags & PFR_FLAG_ATOMIC)
502: splx(s);
503: } else
504: pfr_destroy_kentries(&addq);
505: if (nadd != NULL)
506: *nadd = xadd;
507: if (ndel != NULL)
508: *ndel = xdel;
509: if (nchange != NULL)
510: *nchange = xchange;
511: if ((flags & PFR_FLAG_FEEDBACK) && size2)
512: *size2 = size+xdel;
513: pfr_destroy_ktable(tmpkt, 0);
514: return (0);
515: _bad:
516: pfr_clean_node_mask(tmpkt, &addq);
517: pfr_destroy_kentries(&addq);
518: if (flags & PFR_FLAG_FEEDBACK)
519: pfr_reset_feedback(addr, size, flags);
520: pfr_destroy_ktable(tmpkt, 0);
521: return (rv);
522: }
523:
524: int
525: pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
526: int *nmatch, int flags)
527: {
528: struct pfr_ktable *kt;
529: struct pfr_kentry *p;
530: struct pfr_addr ad;
531: int i, xmatch = 0;
532:
533: ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
534: if (pfr_validate_table(tbl, 0, 0))
535: return (EINVAL);
536: kt = pfr_lookup_table(tbl);
537: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
538: return (ESRCH);
539:
540: for (i = 0; i < size; i++) {
541: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
542: return (EFAULT);
543: if (pfr_validate_addr(&ad))
544: return (EINVAL);
545: if (ADDR_NETWORK(&ad))
546: return (EINVAL);
547: p = pfr_lookup_addr(kt, &ad, 0);
548: if (flags & PFR_FLAG_REPLACE)
549: pfr_copyout_addr(&ad, p);
550: ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
551: (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
552: if (p != NULL && !p->pfrke_not)
553: xmatch++;
554: if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
555: return (EFAULT);
556: }
557: if (nmatch != NULL)
558: *nmatch = xmatch;
559: return (0);
560: }
561:
562: int
563: pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
564: int flags)
565: {
566: struct pfr_ktable *kt;
567: struct pfr_walktree w;
568: int rv;
569:
570: ACCEPT_FLAGS(flags, 0);
571: if (pfr_validate_table(tbl, 0, 0))
572: return (EINVAL);
573: kt = pfr_lookup_table(tbl);
574: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
575: return (ESRCH);
576: if (kt->pfrkt_cnt > *size) {
577: *size = kt->pfrkt_cnt;
578: return (0);
579: }
580:
581: bzero(&w, sizeof(w));
582: w.pfrw_op = PFRW_GET_ADDRS;
583: w.pfrw_addr = addr;
584: w.pfrw_free = kt->pfrkt_cnt;
585: w.pfrw_flags = flags;
586: rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
587: if (!rv)
588: rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
589: if (rv)
590: return (rv);
591:
592: if (w.pfrw_free) {
593: printf("pfr_get_addrs: corruption detected (%d).\n",
594: w.pfrw_free);
595: return (ENOTTY);
596: }
597: *size = kt->pfrkt_cnt;
598: return (0);
599: }
600:
601: int
602: pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
603: int flags)
604: {
605: struct pfr_ktable *kt;
606: struct pfr_walktree w;
607: struct pfr_kentryworkq workq;
608: int rv, s;
609: long tzero = time_second;
610:
611: /* XXX PFR_FLAG_CLSTATS disabled */
612: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
613: if (pfr_validate_table(tbl, 0, 0))
614: return (EINVAL);
615: kt = pfr_lookup_table(tbl);
616: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
617: return (ESRCH);
618: if (kt->pfrkt_cnt > *size) {
619: *size = kt->pfrkt_cnt;
620: return (0);
621: }
622:
623: bzero(&w, sizeof(w));
624: w.pfrw_op = PFRW_GET_ASTATS;
625: w.pfrw_astats = addr;
626: w.pfrw_free = kt->pfrkt_cnt;
627: w.pfrw_flags = flags;
628: if (flags & PFR_FLAG_ATOMIC)
629: s = splsoftnet();
630: rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
631: if (!rv)
632: rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
633: if (!rv && (flags & PFR_FLAG_CLSTATS)) {
634: pfr_enqueue_addrs(kt, &workq, NULL, 0);
635: pfr_clstats_kentries(&workq, tzero, 0);
636: }
637: if (flags & PFR_FLAG_ATOMIC)
638: splx(s);
639: if (rv)
640: return (rv);
641:
642: if (w.pfrw_free) {
643: printf("pfr_get_astats: corruption detected (%d).\n",
644: w.pfrw_free);
645: return (ENOTTY);
646: }
647: *size = kt->pfrkt_cnt;
648: return (0);
649: }
650:
651: int
652: pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
653: int *nzero, int flags)
654: {
655: struct pfr_ktable *kt;
656: struct pfr_kentryworkq workq;
657: struct pfr_kentry *p;
658: struct pfr_addr ad;
659: int i, rv, s, xzero = 0;
660:
661: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
662: PFR_FLAG_FEEDBACK);
663: if (pfr_validate_table(tbl, 0, 0))
664: return (EINVAL);
665: kt = pfr_lookup_table(tbl);
666: if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
667: return (ESRCH);
668: SLIST_INIT(&workq);
669: for (i = 0; i < size; i++) {
670: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
671: senderr(EFAULT);
672: if (pfr_validate_addr(&ad))
673: senderr(EINVAL);
674: p = pfr_lookup_addr(kt, &ad, 1);
675: if (flags & PFR_FLAG_FEEDBACK) {
676: ad.pfra_fback = (p != NULL) ?
677: PFR_FB_CLEARED : PFR_FB_NONE;
678: if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
679: senderr(EFAULT);
680: }
681: if (p != NULL) {
682: SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
683: xzero++;
684: }
685: }
686:
687: if (!(flags & PFR_FLAG_DUMMY)) {
688: if (flags & PFR_FLAG_ATOMIC)
689: s = splsoftnet();
690: pfr_clstats_kentries(&workq, 0, 0);
691: if (flags & PFR_FLAG_ATOMIC)
692: splx(s);
693: }
694: if (nzero != NULL)
695: *nzero = xzero;
696: return (0);
697: _bad:
698: if (flags & PFR_FLAG_FEEDBACK)
699: pfr_reset_feedback(addr, size, flags);
700: return (rv);
701: }
702:
703: int
704: pfr_validate_addr(struct pfr_addr *ad)
705: {
706: int i;
707:
708: switch (ad->pfra_af) {
709: #ifdef INET
710: case AF_INET:
711: if (ad->pfra_net > 32)
712: return (-1);
713: break;
714: #endif /* INET */
715: #ifdef INET6
716: case AF_INET6:
717: if (ad->pfra_net > 128)
718: return (-1);
719: break;
720: #endif /* INET6 */
721: default:
722: return (-1);
723: }
724: if (ad->pfra_net < 128 &&
725: (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
726: return (-1);
727: for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
728: if (((caddr_t)ad)[i])
729: return (-1);
730: if (ad->pfra_not && ad->pfra_not != 1)
731: return (-1);
732: if (ad->pfra_fback)
733: return (-1);
734: return (0);
735: }
736:
737: void
738: pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
739: int *naddr, int sweep)
740: {
741: struct pfr_walktree w;
742:
743: SLIST_INIT(workq);
744: bzero(&w, sizeof(w));
745: w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
746: w.pfrw_workq = workq;
747: if (kt->pfrkt_ip4 != NULL)
748: if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
749: printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
750: if (kt->pfrkt_ip6 != NULL)
751: if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
752: printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
753: if (naddr != NULL)
754: *naddr = w.pfrw_cnt;
755: }
756:
757: void
758: pfr_mark_addrs(struct pfr_ktable *kt)
759: {
760: struct pfr_walktree w;
761:
762: bzero(&w, sizeof(w));
763: w.pfrw_op = PFRW_MARK;
764: if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
765: printf("pfr_mark_addrs: IPv4 walktree failed.\n");
766: if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
767: printf("pfr_mark_addrs: IPv6 walktree failed.\n");
768: }
769:
770:
771: struct pfr_kentry *
772: pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
773: {
774: union sockaddr_union sa, mask;
775: struct radix_node_head *head;
776: struct pfr_kentry *ke;
777: int s;
778:
779: bzero(&sa, sizeof(sa));
780: if (ad->pfra_af == AF_INET) {
781: FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
782: head = kt->pfrkt_ip4;
783: } else if ( ad->pfra_af == AF_INET6 ) {
784: FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
785: head = kt->pfrkt_ip6;
786: }
787: if (ADDR_NETWORK(ad)) {
788: pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
789: s = splsoftnet(); /* rn_lookup makes use of globals */
790: ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
791: splx(s);
792: if (ke && KENTRY_RNF_ROOT(ke))
793: ke = NULL;
794: } else {
795: ke = (struct pfr_kentry *)rn_match(&sa, head);
796: if (ke && KENTRY_RNF_ROOT(ke))
797: ke = NULL;
798: if (exact && ke && KENTRY_NETWORK(ke))
799: ke = NULL;
800: }
801: return (ke);
802: }
803:
804: struct pfr_kentry *
805: pfr_create_kentry(struct pfr_addr *ad, int intr)
806: {
807: struct pfr_kentry *ke;
808:
809: if (intr)
810: ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
811: else
812: ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
813: if (ke == NULL)
814: return (NULL);
815: bzero(ke, sizeof(*ke));
816:
817: if (ad->pfra_af == AF_INET)
818: FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
819: else if (ad->pfra_af == AF_INET6)
820: FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
821: ke->pfrke_af = ad->pfra_af;
822: ke->pfrke_net = ad->pfra_net;
823: ke->pfrke_not = ad->pfra_not;
824: ke->pfrke_intrpool = intr;
825: return (ke);
826: }
827:
828: void
829: pfr_destroy_kentries(struct pfr_kentryworkq *workq)
830: {
831: struct pfr_kentry *p, *q;
832:
833: for (p = SLIST_FIRST(workq); p != NULL; p = q) {
834: q = SLIST_NEXT(p, pfrke_workq);
835: pfr_destroy_kentry(p);
836: }
837: }
838:
839: void
840: pfr_destroy_kentry(struct pfr_kentry *ke)
841: {
842: if (ke->pfrke_intrpool)
843: pool_put(&pfr_kentry_pl2, ke);
844: else
845: pool_put(&pfr_kentry_pl, ke);
846: }
847:
848: void
849: pfr_insert_kentries(struct pfr_ktable *kt,
850: struct pfr_kentryworkq *workq, long tzero)
851: {
852: struct pfr_kentry *p;
853: int rv, n = 0;
854:
855: SLIST_FOREACH(p, workq, pfrke_workq) {
856: rv = pfr_route_kentry(kt, p);
857: if (rv) {
858: printf("pfr_insert_kentries: cannot route entry "
859: "(code=%d).\n", rv);
860: break;
861: }
862: p->pfrke_tzero = tzero;
863: n++;
864: }
865: kt->pfrkt_cnt += n;
866: }
867:
868: int
869: pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
870: {
871: struct pfr_kentry *p;
872: int rv;
873:
874: p = pfr_lookup_addr(kt, ad, 1);
875: if (p != NULL)
876: return (0);
877: p = pfr_create_kentry(ad, 1);
878: if (p == NULL)
879: return (EINVAL);
880:
881: rv = pfr_route_kentry(kt, p);
882: if (rv)
883: return (rv);
884:
885: p->pfrke_tzero = tzero;
886: kt->pfrkt_cnt++;
887:
888: return (0);
889: }
890:
891: void
892: pfr_remove_kentries(struct pfr_ktable *kt,
893: struct pfr_kentryworkq *workq)
894: {
895: struct pfr_kentry *p;
896: int n = 0;
897:
898: SLIST_FOREACH(p, workq, pfrke_workq) {
899: pfr_unroute_kentry(kt, p);
900: n++;
901: }
902: kt->pfrkt_cnt -= n;
903: pfr_destroy_kentries(workq);
904: }
905:
906: void
907: pfr_clean_node_mask(struct pfr_ktable *kt,
908: struct pfr_kentryworkq *workq)
909: {
910: struct pfr_kentry *p;
911:
912: SLIST_FOREACH(p, workq, pfrke_workq)
913: pfr_unroute_kentry(kt, p);
914: }
915:
916: void
917: pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
918: {
919: struct pfr_kentry *p;
920: int s;
921:
922: SLIST_FOREACH(p, workq, pfrke_workq) {
923: s = splsoftnet();
924: if (negchange)
925: p->pfrke_not = !p->pfrke_not;
926: bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
927: bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
928: splx(s);
929: p->pfrke_tzero = tzero;
930: }
931: }
932:
933: void
934: pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
935: {
936: struct pfr_addr ad;
937: int i;
938:
939: for (i = 0; i < size; i++) {
940: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
941: break;
942: ad.pfra_fback = PFR_FB_NONE;
943: if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
944: break;
945: }
946: }
947:
948: void
949: pfr_prepare_network(union sockaddr_union *sa, int af, int net)
950: {
951: int i;
952:
953: bzero(sa, sizeof(*sa));
954: if (af == AF_INET) {
955: sa->sin.sin_len = sizeof(sa->sin);
956: sa->sin.sin_family = AF_INET;
957: sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
958: } else if (af == AF_INET6) {
959: sa->sin6.sin6_len = sizeof(sa->sin6);
960: sa->sin6.sin6_family = AF_INET6;
961: for (i = 0; i < 4; i++) {
962: if (net <= 32) {
963: sa->sin6.sin6_addr.s6_addr32[i] =
964: net ? htonl(-1 << (32-net)) : 0;
965: break;
966: }
967: sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
968: net -= 32;
969: }
970: }
971: }
972:
973: int
974: pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
975: {
976: union sockaddr_union mask;
977: struct radix_node *rn;
978: struct radix_node_head *head;
979: int s;
980:
981: bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
982: if (ke->pfrke_af == AF_INET)
983: head = kt->pfrkt_ip4;
984: else if (ke->pfrke_af == AF_INET6)
985: head = kt->pfrkt_ip6;
986:
987: s = splsoftnet();
988: if (KENTRY_NETWORK(ke)) {
989: pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
990: rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
991: } else
992: rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
993: splx(s);
994:
995: return (rn == NULL ? -1 : 0);
996: }
997:
998: int
999: pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1000: {
1001: union sockaddr_union mask;
1002: struct radix_node *rn;
1003: struct radix_node_head *head;
1004: int s;
1005:
1006: if (ke->pfrke_af == AF_INET)
1007: head = kt->pfrkt_ip4;
1008: else if (ke->pfrke_af == AF_INET6)
1009: head = kt->pfrkt_ip6;
1010:
1011: s = splsoftnet();
1012: if (KENTRY_NETWORK(ke)) {
1013: pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1014: rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1015: } else
1016: rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1017: splx(s);
1018:
1019: if (rn == NULL) {
1020: printf("pfr_unroute_kentry: delete failed.\n");
1021: return (-1);
1022: }
1023: return (0);
1024: }
1025:
1026: void
1027: pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1028: {
1029: bzero(ad, sizeof(*ad));
1030: if (ke == NULL)
1031: return;
1032: ad->pfra_af = ke->pfrke_af;
1033: ad->pfra_net = ke->pfrke_net;
1034: ad->pfra_not = ke->pfrke_not;
1035: if (ad->pfra_af == AF_INET)
1036: ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1037: else if (ad->pfra_af == AF_INET6)
1038: ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1039: }
1040:
1041: int
1042: pfr_walktree(struct radix_node *rn, void *arg)
1043: {
1044: struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1045: struct pfr_walktree *w = arg;
1046: int s, flags = w->pfrw_flags;
1047:
1048: switch (w->pfrw_op) {
1049: case PFRW_MARK:
1050: ke->pfrke_mark = 0;
1051: break;
1052: case PFRW_SWEEP:
1053: if (ke->pfrke_mark)
1054: break;
1055: /* FALLTHROUGH */
1056: case PFRW_ENQUEUE:
1057: SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1058: w->pfrw_cnt++;
1059: break;
1060: case PFRW_GET_ADDRS:
1061: if (w->pfrw_free-- > 0) {
1062: struct pfr_addr ad;
1063:
1064: pfr_copyout_addr(&ad, ke);
1065: if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1066: return (EFAULT);
1067: w->pfrw_addr++;
1068: }
1069: break;
1070: case PFRW_GET_ASTATS:
1071: if (w->pfrw_free-- > 0) {
1072: struct pfr_astats as;
1073:
1074: pfr_copyout_addr(&as.pfras_a, ke);
1075:
1076: s = splsoftnet();
1077: bcopy(ke->pfrke_packets, as.pfras_packets,
1078: sizeof(as.pfras_packets));
1079: bcopy(ke->pfrke_bytes, as.pfras_bytes,
1080: sizeof(as.pfras_bytes));
1081: splx(s);
1082: as.pfras_tzero = ke->pfrke_tzero;
1083:
1084: if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1085: return (EFAULT);
1086: w->pfrw_astats++;
1087: }
1088: break;
1089: case PFRW_POOL_GET:
1090: if (ke->pfrke_not)
1091: break; /* negative entries are ignored */
1092: if (!w->pfrw_cnt--) {
1093: w->pfrw_kentry = ke;
1094: return (1); /* finish search */
1095: }
1096: break;
1097: case PFRW_DYNADDR_UPDATE:
1098: if (ke->pfrke_af == AF_INET) {
1099: if (w->pfrw_dyn->pfid_acnt4++ > 0)
1100: break;
1101: pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1102: w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1103: &ke->pfrke_sa, AF_INET);
1104: w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1105: &pfr_mask, AF_INET);
1106: } else if (ke->pfrke_af == AF_INET6){
1107: if (w->pfrw_dyn->pfid_acnt6++ > 0)
1108: break;
1109: pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1110: w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1111: &ke->pfrke_sa, AF_INET6);
1112: w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1113: &pfr_mask, AF_INET6);
1114: }
1115: break;
1116: }
1117: return (0);
1118: }
1119:
1120: int
1121: pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1122: {
1123: struct pfr_ktableworkq workq;
1124: struct pfr_ktable *p;
1125: int s, xdel = 0;
1126:
1127: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1128: PFR_FLAG_ALLRSETS);
1129: if (pfr_fix_anchor(filter->pfrt_anchor))
1130: return (EINVAL);
1131: if (pfr_table_count(filter, flags) < 0)
1132: return (ENOENT);
1133:
1134: SLIST_INIT(&workq);
1135: RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1136: if (pfr_skip_table(filter, p, flags))
1137: continue;
1138: if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1139: continue;
1140: if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1141: continue;
1142: p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1143: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1144: xdel++;
1145: }
1146: if (!(flags & PFR_FLAG_DUMMY)) {
1147: if (flags & PFR_FLAG_ATOMIC)
1148: s = splsoftnet();
1149: pfr_setflags_ktables(&workq);
1150: if (flags & PFR_FLAG_ATOMIC)
1151: splx(s);
1152: }
1153: if (ndel != NULL)
1154: *ndel = xdel;
1155: return (0);
1156: }
1157:
1158: int
1159: pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1160: {
1161: struct pfr_ktableworkq addq, changeq;
1162: struct pfr_ktable *p, *q, *r, key;
1163: int i, rv, s, xadd = 0;
1164: long tzero = time_second;
1165:
1166: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1167: SLIST_INIT(&addq);
1168: SLIST_INIT(&changeq);
1169: for (i = 0; i < size; i++) {
1170: if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1171: senderr(EFAULT);
1172: if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1173: flags & PFR_FLAG_USERIOCTL))
1174: senderr(EINVAL);
1175: key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1176: p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1177: if (p == NULL) {
1178: p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1179: if (p == NULL)
1180: senderr(ENOMEM);
1181: SLIST_FOREACH(q, &addq, pfrkt_workq) {
1182: if (!pfr_ktable_compare(p, q))
1183: goto _skip;
1184: }
1185: SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1186: xadd++;
1187: if (!key.pfrkt_anchor[0])
1188: goto _skip;
1189:
1190: /* find or create root table */
1191: bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1192: r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1193: if (r != NULL) {
1194: p->pfrkt_root = r;
1195: goto _skip;
1196: }
1197: SLIST_FOREACH(q, &addq, pfrkt_workq) {
1198: if (!pfr_ktable_compare(&key, q)) {
1199: p->pfrkt_root = q;
1200: goto _skip;
1201: }
1202: }
1203: key.pfrkt_flags = 0;
1204: r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1205: if (r == NULL)
1206: senderr(ENOMEM);
1207: SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1208: p->pfrkt_root = r;
1209: } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1210: SLIST_FOREACH(q, &changeq, pfrkt_workq)
1211: if (!pfr_ktable_compare(&key, q))
1212: goto _skip;
1213: p->pfrkt_nflags = (p->pfrkt_flags &
1214: ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1215: SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1216: xadd++;
1217: }
1218: _skip:
1219: ;
1220: }
1221: if (!(flags & PFR_FLAG_DUMMY)) {
1222: if (flags & PFR_FLAG_ATOMIC)
1223: s = splsoftnet();
1224: pfr_insert_ktables(&addq);
1225: pfr_setflags_ktables(&changeq);
1226: if (flags & PFR_FLAG_ATOMIC)
1227: splx(s);
1228: } else
1229: pfr_destroy_ktables(&addq, 0);
1230: if (nadd != NULL)
1231: *nadd = xadd;
1232: return (0);
1233: _bad:
1234: pfr_destroy_ktables(&addq, 0);
1235: return (rv);
1236: }
1237:
1238: int
1239: pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1240: {
1241: struct pfr_ktableworkq workq;
1242: struct pfr_ktable *p, *q, key;
1243: int i, s, xdel = 0;
1244:
1245: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1246: SLIST_INIT(&workq);
1247: for (i = 0; i < size; i++) {
1248: if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1249: return (EFAULT);
1250: if (pfr_validate_table(&key.pfrkt_t, 0,
1251: flags & PFR_FLAG_USERIOCTL))
1252: return (EINVAL);
1253: p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1254: if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1255: SLIST_FOREACH(q, &workq, pfrkt_workq)
1256: if (!pfr_ktable_compare(p, q))
1257: goto _skip;
1258: p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1259: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1260: xdel++;
1261: }
1262: _skip:
1263: ;
1264: }
1265:
1266: if (!(flags & PFR_FLAG_DUMMY)) {
1267: if (flags & PFR_FLAG_ATOMIC)
1268: s = splsoftnet();
1269: pfr_setflags_ktables(&workq);
1270: if (flags & PFR_FLAG_ATOMIC)
1271: splx(s);
1272: }
1273: if (ndel != NULL)
1274: *ndel = xdel;
1275: return (0);
1276: }
1277:
1278: int
1279: pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1280: int flags)
1281: {
1282: struct pfr_ktable *p;
1283: int n, nn;
1284:
1285: ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1286: if (pfr_fix_anchor(filter->pfrt_anchor))
1287: return (EINVAL);
1288: n = nn = pfr_table_count(filter, flags);
1289: if (n < 0)
1290: return (ENOENT);
1291: if (n > *size) {
1292: *size = n;
1293: return (0);
1294: }
1295: RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1296: if (pfr_skip_table(filter, p, flags))
1297: continue;
1298: if (n-- <= 0)
1299: continue;
1300: if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1301: return (EFAULT);
1302: }
1303: if (n) {
1304: printf("pfr_get_tables: corruption detected (%d).\n", n);
1305: return (ENOTTY);
1306: }
1307: *size = nn;
1308: return (0);
1309: }
1310:
1311: int
1312: pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1313: int flags)
1314: {
1315: struct pfr_ktable *p;
1316: struct pfr_ktableworkq workq;
1317: int s, n, nn;
1318: long tzero = time_second;
1319:
1320: /* XXX PFR_FLAG_CLSTATS disabled */
1321: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1322: if (pfr_fix_anchor(filter->pfrt_anchor))
1323: return (EINVAL);
1324: n = nn = pfr_table_count(filter, flags);
1325: if (n < 0)
1326: return (ENOENT);
1327: if (n > *size) {
1328: *size = n;
1329: return (0);
1330: }
1331: SLIST_INIT(&workq);
1332: if (flags & PFR_FLAG_ATOMIC)
1333: s = splsoftnet();
1334: RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1335: if (pfr_skip_table(filter, p, flags))
1336: continue;
1337: if (n-- <= 0)
1338: continue;
1339: if (!(flags & PFR_FLAG_ATOMIC))
1340: s = splsoftnet();
1341: if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1342: splx(s);
1343: return (EFAULT);
1344: }
1345: if (!(flags & PFR_FLAG_ATOMIC))
1346: splx(s);
1347: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1348: }
1349: if (flags & PFR_FLAG_CLSTATS)
1350: pfr_clstats_ktables(&workq, tzero,
1351: flags & PFR_FLAG_ADDRSTOO);
1352: if (flags & PFR_FLAG_ATOMIC)
1353: splx(s);
1354: if (n) {
1355: printf("pfr_get_tstats: corruption detected (%d).\n", n);
1356: return (ENOTTY);
1357: }
1358: *size = nn;
1359: return (0);
1360: }
1361:
1362: int
1363: pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1364: {
1365: struct pfr_ktableworkq workq;
1366: struct pfr_ktable *p, key;
1367: int i, s, xzero = 0;
1368: long tzero = time_second;
1369:
1370: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1371: PFR_FLAG_ADDRSTOO);
1372: SLIST_INIT(&workq);
1373: for (i = 0; i < size; i++) {
1374: if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1375: return (EFAULT);
1376: if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1377: return (EINVAL);
1378: p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1379: if (p != NULL) {
1380: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1381: xzero++;
1382: }
1383: }
1384: if (!(flags & PFR_FLAG_DUMMY)) {
1385: if (flags & PFR_FLAG_ATOMIC)
1386: s = splsoftnet();
1387: pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1388: if (flags & PFR_FLAG_ATOMIC)
1389: splx(s);
1390: }
1391: if (nzero != NULL)
1392: *nzero = xzero;
1393: return (0);
1394: }
1395:
1396: int
1397: pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1398: int *nchange, int *ndel, int flags)
1399: {
1400: struct pfr_ktableworkq workq;
1401: struct pfr_ktable *p, *q, key;
1402: int i, s, xchange = 0, xdel = 0;
1403:
1404: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1405: if ((setflag & ~PFR_TFLAG_USRMASK) ||
1406: (clrflag & ~PFR_TFLAG_USRMASK) ||
1407: (setflag & clrflag))
1408: return (EINVAL);
1409: SLIST_INIT(&workq);
1410: for (i = 0; i < size; i++) {
1411: if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1412: return (EFAULT);
1413: if (pfr_validate_table(&key.pfrkt_t, 0,
1414: flags & PFR_FLAG_USERIOCTL))
1415: return (EINVAL);
1416: p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1417: if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1418: p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1419: ~clrflag;
1420: if (p->pfrkt_nflags == p->pfrkt_flags)
1421: goto _skip;
1422: SLIST_FOREACH(q, &workq, pfrkt_workq)
1423: if (!pfr_ktable_compare(p, q))
1424: goto _skip;
1425: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1426: if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1427: (clrflag & PFR_TFLAG_PERSIST) &&
1428: !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1429: xdel++;
1430: else
1431: xchange++;
1432: }
1433: _skip:
1434: ;
1435: }
1436: if (!(flags & PFR_FLAG_DUMMY)) {
1437: if (flags & PFR_FLAG_ATOMIC)
1438: s = splsoftnet();
1439: pfr_setflags_ktables(&workq);
1440: if (flags & PFR_FLAG_ATOMIC)
1441: splx(s);
1442: }
1443: if (nchange != NULL)
1444: *nchange = xchange;
1445: if (ndel != NULL)
1446: *ndel = xdel;
1447: return (0);
1448: }
1449:
1450: int
1451: pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1452: {
1453: struct pfr_ktableworkq workq;
1454: struct pfr_ktable *p;
1455: struct pf_ruleset *rs;
1456: int xdel = 0;
1457:
1458: ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1459: rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1460: if (rs == NULL)
1461: return (ENOMEM);
1462: SLIST_INIT(&workq);
1463: RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1464: if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1465: pfr_skip_table(trs, p, 0))
1466: continue;
1467: p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1468: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1469: xdel++;
1470: }
1471: if (!(flags & PFR_FLAG_DUMMY)) {
1472: pfr_setflags_ktables(&workq);
1473: if (ticket != NULL)
1474: *ticket = ++rs->tticket;
1475: rs->topen = 1;
1476: } else
1477: pf_remove_if_empty_ruleset(rs);
1478: if (ndel != NULL)
1479: *ndel = xdel;
1480: return (0);
1481: }
1482:
1483: int
1484: pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1485: int *nadd, int *naddr, u_int32_t ticket, int flags)
1486: {
1487: struct pfr_ktableworkq tableq;
1488: struct pfr_kentryworkq addrq;
1489: struct pfr_ktable *kt, *rt, *shadow, key;
1490: struct pfr_kentry *p;
1491: struct pfr_addr ad;
1492: struct pf_ruleset *rs;
1493: int i, rv, xadd = 0, xaddr = 0;
1494:
1495: ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1496: if (size && !(flags & PFR_FLAG_ADDRSTOO))
1497: return (EINVAL);
1498: if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1499: flags & PFR_FLAG_USERIOCTL))
1500: return (EINVAL);
1501: rs = pf_find_ruleset(tbl->pfrt_anchor);
1502: if (rs == NULL || !rs->topen || ticket != rs->tticket)
1503: return (EBUSY);
1504: tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1505: SLIST_INIT(&tableq);
1506: kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1507: if (kt == NULL) {
1508: kt = pfr_create_ktable(tbl, 0, 1);
1509: if (kt == NULL)
1510: return (ENOMEM);
1511: SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1512: xadd++;
1513: if (!tbl->pfrt_anchor[0])
1514: goto _skip;
1515:
1516: /* find or create root table */
1517: bzero(&key, sizeof(key));
1518: strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1519: rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1520: if (rt != NULL) {
1521: kt->pfrkt_root = rt;
1522: goto _skip;
1523: }
1524: rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1525: if (rt == NULL) {
1526: pfr_destroy_ktables(&tableq, 0);
1527: return (ENOMEM);
1528: }
1529: SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1530: kt->pfrkt_root = rt;
1531: } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1532: xadd++;
1533: _skip:
1534: shadow = pfr_create_ktable(tbl, 0, 0);
1535: if (shadow == NULL) {
1536: pfr_destroy_ktables(&tableq, 0);
1537: return (ENOMEM);
1538: }
1539: SLIST_INIT(&addrq);
1540: for (i = 0; i < size; i++) {
1541: if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1542: senderr(EFAULT);
1543: if (pfr_validate_addr(&ad))
1544: senderr(EINVAL);
1545: if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1546: continue;
1547: p = pfr_create_kentry(&ad, 0);
1548: if (p == NULL)
1549: senderr(ENOMEM);
1550: if (pfr_route_kentry(shadow, p)) {
1551: pfr_destroy_kentry(p);
1552: continue;
1553: }
1554: SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1555: xaddr++;
1556: }
1557: if (!(flags & PFR_FLAG_DUMMY)) {
1558: if (kt->pfrkt_shadow != NULL)
1559: pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1560: kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1561: pfr_insert_ktables(&tableq);
1562: shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1563: xaddr : NO_ADDRESSES;
1564: kt->pfrkt_shadow = shadow;
1565: } else {
1566: pfr_clean_node_mask(shadow, &addrq);
1567: pfr_destroy_ktable(shadow, 0);
1568: pfr_destroy_ktables(&tableq, 0);
1569: pfr_destroy_kentries(&addrq);
1570: }
1571: if (nadd != NULL)
1572: *nadd = xadd;
1573: if (naddr != NULL)
1574: *naddr = xaddr;
1575: return (0);
1576: _bad:
1577: pfr_destroy_ktable(shadow, 0);
1578: pfr_destroy_ktables(&tableq, 0);
1579: pfr_destroy_kentries(&addrq);
1580: return (rv);
1581: }
1582:
1583: int
1584: pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1585: {
1586: struct pfr_ktableworkq workq;
1587: struct pfr_ktable *p;
1588: struct pf_ruleset *rs;
1589: int xdel = 0;
1590:
1591: ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1592: rs = pf_find_ruleset(trs->pfrt_anchor);
1593: if (rs == NULL || !rs->topen || ticket != rs->tticket)
1594: return (0);
1595: SLIST_INIT(&workq);
1596: RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1597: if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1598: pfr_skip_table(trs, p, 0))
1599: continue;
1600: p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1601: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1602: xdel++;
1603: }
1604: if (!(flags & PFR_FLAG_DUMMY)) {
1605: pfr_setflags_ktables(&workq);
1606: rs->topen = 0;
1607: pf_remove_if_empty_ruleset(rs);
1608: }
1609: if (ndel != NULL)
1610: *ndel = xdel;
1611: return (0);
1612: }
1613:
1614: int
1615: pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1616: int *nchange, int flags)
1617: {
1618: struct pfr_ktable *p, *q;
1619: struct pfr_ktableworkq workq;
1620: struct pf_ruleset *rs;
1621: int s, xadd = 0, xchange = 0;
1622: long tzero = time_second;
1623:
1624: ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1625: rs = pf_find_ruleset(trs->pfrt_anchor);
1626: if (rs == NULL || !rs->topen || ticket != rs->tticket)
1627: return (EBUSY);
1628:
1629: SLIST_INIT(&workq);
1630: RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1631: if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1632: pfr_skip_table(trs, p, 0))
1633: continue;
1634: SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1635: if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1636: xchange++;
1637: else
1638: xadd++;
1639: }
1640:
1641: if (!(flags & PFR_FLAG_DUMMY)) {
1642: if (flags & PFR_FLAG_ATOMIC)
1643: s = splsoftnet();
1644: for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1645: q = SLIST_NEXT(p, pfrkt_workq);
1646: pfr_commit_ktable(p, tzero);
1647: }
1648: if (flags & PFR_FLAG_ATOMIC)
1649: splx(s);
1650: rs->topen = 0;
1651: pf_remove_if_empty_ruleset(rs);
1652: }
1653: if (nadd != NULL)
1654: *nadd = xadd;
1655: if (nchange != NULL)
1656: *nchange = xchange;
1657:
1658: return (0);
1659: }
1660:
1661: void
1662: pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1663: {
1664: struct pfr_ktable *shadow = kt->pfrkt_shadow;
1665: int nflags;
1666:
1667: if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1668: if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1669: pfr_clstats_ktable(kt, tzero, 1);
1670: } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1671: /* kt might contain addresses */
1672: struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1673: struct pfr_kentry *p, *q, *next;
1674: struct pfr_addr ad;
1675:
1676: pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1677: pfr_mark_addrs(kt);
1678: SLIST_INIT(&addq);
1679: SLIST_INIT(&changeq);
1680: SLIST_INIT(&delq);
1681: SLIST_INIT(&garbageq);
1682: pfr_clean_node_mask(shadow, &addrq);
1683: for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1684: next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1685: pfr_copyout_addr(&ad, p);
1686: q = pfr_lookup_addr(kt, &ad, 1);
1687: if (q != NULL) {
1688: if (q->pfrke_not != p->pfrke_not)
1689: SLIST_INSERT_HEAD(&changeq, q,
1690: pfrke_workq);
1691: q->pfrke_mark = 1;
1692: SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1693: } else {
1694: p->pfrke_tzero = tzero;
1695: SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1696: }
1697: }
1698: pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1699: pfr_insert_kentries(kt, &addq, tzero);
1700: pfr_remove_kentries(kt, &delq);
1701: pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1702: pfr_destroy_kentries(&garbageq);
1703: } else {
1704: /* kt cannot contain addresses */
1705: SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1706: shadow->pfrkt_ip4);
1707: SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1708: shadow->pfrkt_ip6);
1709: SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1710: pfr_clstats_ktable(kt, tzero, 1);
1711: }
1712: nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1713: (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1714: & ~PFR_TFLAG_INACTIVE;
1715: pfr_destroy_ktable(shadow, 0);
1716: kt->pfrkt_shadow = NULL;
1717: pfr_setflags_ktable(kt, nflags);
1718: }
1719:
1720: int
1721: pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1722: {
1723: int i;
1724:
1725: if (!tbl->pfrt_name[0])
1726: return (-1);
1727: if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1728: return (-1);
1729: if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1730: return (-1);
1731: for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1732: if (tbl->pfrt_name[i])
1733: return (-1);
1734: if (pfr_fix_anchor(tbl->pfrt_anchor))
1735: return (-1);
1736: if (tbl->pfrt_flags & ~allowedflags)
1737: return (-1);
1738: return (0);
1739: }
1740:
1741: /*
1742: * Rewrite anchors referenced by tables to remove slashes
1743: * and check for validity.
1744: */
1745: int
1746: pfr_fix_anchor(char *anchor)
1747: {
1748: size_t siz = MAXPATHLEN;
1749: int i;
1750:
1751: if (anchor[0] == '/') {
1752: char *path;
1753: int off;
1754:
1755: path = anchor;
1756: off = 1;
1757: while (*++path == '/')
1758: off++;
1759: bcopy(path, anchor, siz - off);
1760: memset(anchor + siz - off, 0, off);
1761: }
1762: if (anchor[siz - 1])
1763: return (-1);
1764: for (i = strlen(anchor); i < siz; i++)
1765: if (anchor[i])
1766: return (-1);
1767: return (0);
1768: }
1769:
1770: int
1771: pfr_table_count(struct pfr_table *filter, int flags)
1772: {
1773: struct pf_ruleset *rs;
1774:
1775: if (flags & PFR_FLAG_ALLRSETS)
1776: return (pfr_ktable_cnt);
1777: if (filter->pfrt_anchor[0]) {
1778: rs = pf_find_ruleset(filter->pfrt_anchor);
1779: return ((rs != NULL) ? rs->tables : -1);
1780: }
1781: return (pf_main_ruleset.tables);
1782: }
1783:
1784: int
1785: pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1786: {
1787: if (flags & PFR_FLAG_ALLRSETS)
1788: return (0);
1789: if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1790: return (1);
1791: return (0);
1792: }
1793:
1794: void
1795: pfr_insert_ktables(struct pfr_ktableworkq *workq)
1796: {
1797: struct pfr_ktable *p;
1798:
1799: SLIST_FOREACH(p, workq, pfrkt_workq)
1800: pfr_insert_ktable(p);
1801: }
1802:
1803: void
1804: pfr_insert_ktable(struct pfr_ktable *kt)
1805: {
1806: RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1807: pfr_ktable_cnt++;
1808: if (kt->pfrkt_root != NULL)
1809: if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1810: pfr_setflags_ktable(kt->pfrkt_root,
1811: kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1812: }
1813:
1814: void
1815: pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1816: {
1817: struct pfr_ktable *p, *q;
1818:
1819: for (p = SLIST_FIRST(workq); p; p = q) {
1820: q = SLIST_NEXT(p, pfrkt_workq);
1821: pfr_setflags_ktable(p, p->pfrkt_nflags);
1822: }
1823: }
1824:
1825: void
1826: pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1827: {
1828: struct pfr_kentryworkq addrq;
1829:
1830: if (!(newf & PFR_TFLAG_REFERENCED) &&
1831: !(newf & PFR_TFLAG_PERSIST))
1832: newf &= ~PFR_TFLAG_ACTIVE;
1833: if (!(newf & PFR_TFLAG_ACTIVE))
1834: newf &= ~PFR_TFLAG_USRMASK;
1835: if (!(newf & PFR_TFLAG_SETMASK)) {
1836: RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1837: if (kt->pfrkt_root != NULL)
1838: if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1839: pfr_setflags_ktable(kt->pfrkt_root,
1840: kt->pfrkt_root->pfrkt_flags &
1841: ~PFR_TFLAG_REFDANCHOR);
1842: pfr_destroy_ktable(kt, 1);
1843: pfr_ktable_cnt--;
1844: return;
1845: }
1846: if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1847: pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1848: pfr_remove_kentries(kt, &addrq);
1849: }
1850: if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1851: pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1852: kt->pfrkt_shadow = NULL;
1853: }
1854: kt->pfrkt_flags = newf;
1855: }
1856:
1857: void
1858: pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1859: {
1860: struct pfr_ktable *p;
1861:
1862: SLIST_FOREACH(p, workq, pfrkt_workq)
1863: pfr_clstats_ktable(p, tzero, recurse);
1864: }
1865:
1866: void
1867: pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1868: {
1869: struct pfr_kentryworkq addrq;
1870: int s;
1871:
1872: if (recurse) {
1873: pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1874: pfr_clstats_kentries(&addrq, tzero, 0);
1875: }
1876: s = splsoftnet();
1877: bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1878: bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1879: kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1880: splx(s);
1881: kt->pfrkt_tzero = tzero;
1882: }
1883:
1884: struct pfr_ktable *
1885: pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1886: {
1887: struct pfr_ktable *kt;
1888: struct pf_ruleset *rs;
1889:
1890: kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1891: if (kt == NULL)
1892: return (NULL);
1893: bzero(kt, sizeof(*kt));
1894: kt->pfrkt_t = *tbl;
1895:
1896: if (attachruleset) {
1897: rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1898: if (!rs) {
1899: pfr_destroy_ktable(kt, 0);
1900: return (NULL);
1901: }
1902: kt->pfrkt_rs = rs;
1903: rs->tables++;
1904: }
1905:
1906: if (!rn_inithead((void **)&kt->pfrkt_ip4,
1907: offsetof(struct sockaddr_in, sin_addr) * 8) ||
1908: !rn_inithead((void **)&kt->pfrkt_ip6,
1909: offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1910: pfr_destroy_ktable(kt, 0);
1911: return (NULL);
1912: }
1913: kt->pfrkt_tzero = tzero;
1914:
1915: return (kt);
1916: }
1917:
1918: void
1919: pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1920: {
1921: struct pfr_ktable *p, *q;
1922:
1923: for (p = SLIST_FIRST(workq); p; p = q) {
1924: q = SLIST_NEXT(p, pfrkt_workq);
1925: pfr_destroy_ktable(p, flushaddr);
1926: }
1927: }
1928:
1929: void
1930: pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1931: {
1932: struct pfr_kentryworkq addrq;
1933:
1934: if (flushaddr) {
1935: pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1936: pfr_clean_node_mask(kt, &addrq);
1937: pfr_destroy_kentries(&addrq);
1938: }
1939: if (kt->pfrkt_ip4 != NULL)
1940: free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1941: if (kt->pfrkt_ip6 != NULL)
1942: free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1943: if (kt->pfrkt_shadow != NULL)
1944: pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1945: if (kt->pfrkt_rs != NULL) {
1946: kt->pfrkt_rs->tables--;
1947: pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1948: }
1949: pool_put(&pfr_ktable_pl, kt);
1950: }
1951:
1952: int
1953: pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1954: {
1955: int d;
1956:
1957: if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1958: return (d);
1959: return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1960: }
1961:
1962: struct pfr_ktable *
1963: pfr_lookup_table(struct pfr_table *tbl)
1964: {
1965: /* struct pfr_ktable start like a struct pfr_table */
1966: return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1967: (struct pfr_ktable *)tbl));
1968: }
1969:
1970: int
1971: pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1972: {
1973: struct pfr_kentry *ke = NULL;
1974: int match;
1975:
1976: if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1977: kt = kt->pfrkt_root;
1978: if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1979: return (0);
1980:
1981: switch (af) {
1982: #ifdef INET
1983: case AF_INET:
1984: pfr_sin.sin_addr.s_addr = a->addr32[0];
1985: ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1986: if (ke && KENTRY_RNF_ROOT(ke))
1987: ke = NULL;
1988: break;
1989: #endif /* INET */
1990: #ifdef INET6
1991: case AF_INET6:
1992: bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1993: ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1994: if (ke && KENTRY_RNF_ROOT(ke))
1995: ke = NULL;
1996: break;
1997: #endif /* INET6 */
1998: }
1999: match = (ke && !ke->pfrke_not);
2000: if (match)
2001: kt->pfrkt_match++;
2002: else
2003: kt->pfrkt_nomatch++;
2004: return (match);
2005: }
2006:
2007: void
2008: pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2009: u_int64_t len, int dir_out, int op_pass, int notrule)
2010: {
2011: struct pfr_kentry *ke = NULL;
2012:
2013: if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2014: kt = kt->pfrkt_root;
2015: if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2016: return;
2017:
2018: switch (af) {
2019: #ifdef INET
2020: case AF_INET:
2021: pfr_sin.sin_addr.s_addr = a->addr32[0];
2022: ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2023: if (ke && KENTRY_RNF_ROOT(ke))
2024: ke = NULL;
2025: break;
2026: #endif /* INET */
2027: #ifdef INET6
2028: case AF_INET6:
2029: bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2030: ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2031: if (ke && KENTRY_RNF_ROOT(ke))
2032: ke = NULL;
2033: break;
2034: #endif /* INET6 */
2035: default:
2036: ;
2037: }
2038: if ((ke == NULL || ke->pfrke_not) != notrule) {
2039: if (op_pass != PFR_OP_PASS)
2040: printf("pfr_update_stats: assertion failed.\n");
2041: op_pass = PFR_OP_XPASS;
2042: }
2043: kt->pfrkt_packets[dir_out][op_pass]++;
2044: kt->pfrkt_bytes[dir_out][op_pass] += len;
2045: if (ke != NULL && op_pass != PFR_OP_XPASS) {
2046: ke->pfrke_packets[dir_out][op_pass]++;
2047: ke->pfrke_bytes[dir_out][op_pass] += len;
2048: }
2049: }
2050:
2051: struct pfr_ktable *
2052: pfr_attach_table(struct pf_ruleset *rs, char *name)
2053: {
2054: struct pfr_ktable *kt, *rt;
2055: struct pfr_table tbl;
2056: struct pf_anchor *ac = rs->anchor;
2057:
2058: bzero(&tbl, sizeof(tbl));
2059: strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2060: if (ac != NULL)
2061: strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2062: kt = pfr_lookup_table(&tbl);
2063: if (kt == NULL) {
2064: kt = pfr_create_ktable(&tbl, time_second, 1);
2065: if (kt == NULL)
2066: return (NULL);
2067: if (ac != NULL) {
2068: bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2069: rt = pfr_lookup_table(&tbl);
2070: if (rt == NULL) {
2071: rt = pfr_create_ktable(&tbl, 0, 1);
2072: if (rt == NULL) {
2073: pfr_destroy_ktable(kt, 0);
2074: return (NULL);
2075: }
2076: pfr_insert_ktable(rt);
2077: }
2078: kt->pfrkt_root = rt;
2079: }
2080: pfr_insert_ktable(kt);
2081: }
2082: if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2083: pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2084: return (kt);
2085: }
2086:
2087: void
2088: pfr_detach_table(struct pfr_ktable *kt)
2089: {
2090: if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2091: printf("pfr_detach_table: refcount = %d.\n",
2092: kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2093: else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2094: pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2095: }
2096:
2097: int
2098: pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2099: struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2100: {
2101: struct pfr_kentry *ke, *ke2;
2102: struct pf_addr *addr;
2103: union sockaddr_union mask;
2104: int idx = -1, use_counter = 0;
2105:
2106: if (af == AF_INET)
2107: addr = (struct pf_addr *)&pfr_sin.sin_addr;
2108: else if (af == AF_INET6)
2109: addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2110: if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2111: kt = kt->pfrkt_root;
2112: if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2113: return (-1);
2114:
2115: if (pidx != NULL)
2116: idx = *pidx;
2117: if (counter != NULL && idx >= 0)
2118: use_counter = 1;
2119: if (idx < 0)
2120: idx = 0;
2121:
2122: _next_block:
2123: ke = pfr_kentry_byidx(kt, idx, af);
2124: if (ke == NULL)
2125: return (1);
2126: pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2127: *raddr = SUNION2PF(&ke->pfrke_sa, af);
2128: *rmask = SUNION2PF(&pfr_mask, af);
2129:
2130: if (use_counter) {
2131: /* is supplied address within block? */
2132: if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2133: /* no, go to next block in table */
2134: idx++;
2135: use_counter = 0;
2136: goto _next_block;
2137: }
2138: PF_ACPY(addr, counter, af);
2139: } else {
2140: /* use first address of block */
2141: PF_ACPY(addr, *raddr, af);
2142: }
2143:
2144: if (!KENTRY_NETWORK(ke)) {
2145: /* this is a single IP address - no possible nested block */
2146: PF_ACPY(counter, addr, af);
2147: *pidx = idx;
2148: return (0);
2149: }
2150: for (;;) {
2151: /* we don't want to use a nested block */
2152: if (af == AF_INET)
2153: ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2154: kt->pfrkt_ip4);
2155: else if (af == AF_INET6)
2156: ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2157: kt->pfrkt_ip6);
2158: /* no need to check KENTRY_RNF_ROOT() here */
2159: if (ke2 == ke) {
2160: /* lookup return the same block - perfect */
2161: PF_ACPY(counter, addr, af);
2162: *pidx = idx;
2163: return (0);
2164: }
2165:
2166: /* we need to increase the counter past the nested block */
2167: pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2168: PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2169: PF_AINC(addr, af);
2170: if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2171: /* ok, we reached the end of our main block */
2172: /* go to next block in table */
2173: idx++;
2174: use_counter = 0;
2175: goto _next_block;
2176: }
2177: }
2178: }
2179:
2180: struct pfr_kentry *
2181: pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2182: {
2183: struct pfr_walktree w;
2184:
2185: bzero(&w, sizeof(w));
2186: w.pfrw_op = PFRW_POOL_GET;
2187: w.pfrw_cnt = idx;
2188:
2189: switch (af) {
2190: #ifdef INET
2191: case AF_INET:
2192: rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2193: return (w.pfrw_kentry);
2194: #endif /* INET */
2195: #ifdef INET6
2196: case AF_INET6:
2197: rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2198: return (w.pfrw_kentry);
2199: #endif /* INET6 */
2200: default:
2201: return (NULL);
2202: }
2203: }
2204:
2205: void
2206: pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2207: {
2208: struct pfr_walktree w;
2209: int s;
2210:
2211: bzero(&w, sizeof(w));
2212: w.pfrw_op = PFRW_DYNADDR_UPDATE;
2213: w.pfrw_dyn = dyn;
2214:
2215: s = splsoftnet();
2216: dyn->pfid_acnt4 = 0;
2217: dyn->pfid_acnt6 = 0;
2218: if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2219: rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2220: if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2221: rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2222: splx(s);
2223: }
CVSweb