1 /* $NetBSD: pf_table.c,v 1.14 2008/06/18 09:06:27 yamt Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.70 2007/05/23 11:53:45 markus Exp $ */
5 * Copyright (c) 2002 Cedric Berger
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: pf_table.c,v 1.14 2008/06/18 09:06:27 yamt Exp $");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/socket.h>
45 #include <sys/kernel.h>
48 #include <net/route.h>
49 #include <netinet/in.h>
51 #include <netinet/ip_ipsp.h>
52 #endif /* !__NetBSD__ */
53 #include <net/pfvar.h>
55 #define ACCEPT_FLAGS(flags, oklist) \
57 if ((flags & ~(oklist)) & \
62 #define COPYIN(from, to, size, flags) \
63 ((flags & PFR_FLAG_USERIOCTL) ? \
64 copyin((from), (to), (size)) : \
65 (bcopy((from), (to), (size)), 0))
67 #define COPYOUT(from, to, size, flags) \
68 ((flags & PFR_FLAG_USERIOCTL) ? \
69 copyout((from), (to), (size)) : \
70 (bcopy((from), (to), (size)), 0))
72 #define FILLIN_SIN(sin, addr) \
74 (sin).sin_len = sizeof(sin); \
75 (sin).sin_family = AF_INET; \
76 (sin).sin_addr = (addr); \
79 #define FILLIN_SIN6(sin6, addr) \
81 (sin6).sin6_len = sizeof(sin6); \
82 (sin6).sin6_family = AF_INET6; \
83 (sin6).sin6_addr = (addr); \
86 #define SWAP(type, a1, a2) \
93 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
94 (struct pf_addr *)&(su)->sin.sin_addr : \
95 (struct pf_addr *)&(su)->sin6.sin6_addr)
97 #define AF_BITS(af) (((af)==AF_INET)?32:128)
98 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
99 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
100 #define KENTRY_RNF_ROOT(ke) \
101 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
103 #define NO_ADDRESSES (-1)
104 #define ENQUEUE_UNMARKED_ONLY (1)
105 #define INVERT_NEG_FLAG (1)
107 struct pfr_walktree
{
118 struct pfr_addr
*pfrw1_addr
;
119 struct pfr_astats
*pfrw1_astats
;
120 struct pfr_kentryworkq
*pfrw1_workq
;
121 struct pfr_kentry
*pfrw1_kentry
;
122 struct pfi_dynaddr
*pfrw1_dyn
;
127 #define pfrw_addr pfrw_1.pfrw1_addr
128 #define pfrw_astats pfrw_1.pfrw1_astats
129 #define pfrw_workq pfrw_1.pfrw1_workq
130 #define pfrw_kentry pfrw_1.pfrw1_kentry
131 #define pfrw_dyn pfrw_1.pfrw1_dyn
132 #define pfrw_cnt pfrw_free
134 #define senderr(e) do { rv = (e); goto _bad; } while (0)
136 struct pool pfr_ktable_pl
;
137 struct pool pfr_kentry_pl
;
138 struct pool pfr_kentry_pl2
;
139 struct sockaddr_in pfr_sin
;
140 struct sockaddr_in6 pfr_sin6
;
141 union sockaddr_union pfr_mask
;
142 struct pf_addr pfr_ffaddr
;
144 void pfr_copyout_addr(struct pfr_addr
*,
145 struct pfr_kentry
*ke
);
146 int pfr_validate_addr(struct pfr_addr
*);
147 void pfr_enqueue_addrs(struct pfr_ktable
*,
148 struct pfr_kentryworkq
*, int *, int);
149 void pfr_mark_addrs(struct pfr_ktable
*);
150 struct pfr_kentry
*pfr_lookup_addr(struct pfr_ktable
*,
151 struct pfr_addr
*, int);
152 struct pfr_kentry
*pfr_create_kentry(struct pfr_addr
*, int);
153 void pfr_destroy_kentries(struct pfr_kentryworkq
*);
154 void pfr_destroy_kentry(struct pfr_kentry
*);
155 void pfr_insert_kentries(struct pfr_ktable
*,
156 struct pfr_kentryworkq
*, long);
157 void pfr_remove_kentries(struct pfr_ktable
*,
158 struct pfr_kentryworkq
*);
159 void pfr_clstats_kentries(struct pfr_kentryworkq
*, long,
161 void pfr_reset_feedback(struct pfr_addr
*, int, int);
162 void pfr_prepare_network(union sockaddr_union
*, int, int);
163 int pfr_route_kentry(struct pfr_ktable
*,
164 struct pfr_kentry
*);
165 int pfr_unroute_kentry(struct pfr_ktable
*,
166 struct pfr_kentry
*);
167 int pfr_walktree(struct radix_node
*, void *);
168 int pfr_validate_table(struct pfr_table
*, int, int);
169 int pfr_fix_anchor(char *);
170 void pfr_commit_ktable(struct pfr_ktable
*, long);
171 void pfr_insert_ktables(struct pfr_ktableworkq
*);
172 void pfr_insert_ktable(struct pfr_ktable
*);
173 void pfr_setflags_ktables(struct pfr_ktableworkq
*);
174 void pfr_setflags_ktable(struct pfr_ktable
*, int);
175 void pfr_clstats_ktables(struct pfr_ktableworkq
*, long,
177 void pfr_clstats_ktable(struct pfr_ktable
*, long, int);
178 struct pfr_ktable
*pfr_create_ktable(struct pfr_table
*, long, int);
179 void pfr_destroy_ktables(struct pfr_ktableworkq
*, int);
180 void pfr_destroy_ktable(struct pfr_ktable
*, int);
181 int pfr_ktable_compare(struct pfr_ktable
*,
182 struct pfr_ktable
*);
183 struct pfr_ktable
*pfr_lookup_table(struct pfr_table
*);
184 void pfr_clean_node_mask(struct pfr_ktable
*,
185 struct pfr_kentryworkq
*);
186 int pfr_table_count(struct pfr_table
*, int);
187 int pfr_skip_table(struct pfr_table
*,
188 struct pfr_ktable
*, int);
189 struct pfr_kentry
*pfr_kentry_byidx(struct pfr_ktable
*, int, int);
191 RB_PROTOTYPE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
192 RB_GENERATE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
194 struct pfr_ktablehead pfr_ktables
;
195 struct pfr_table pfr_nulltable
;
202 pool_init(&pfr_ktable_pl
, sizeof(struct pfr_ktable
), 0, 0, 0,
203 "pfrktable", &pool_allocator_nointr
, IPL_NONE
);
204 pool_init(&pfr_kentry_pl
, sizeof(struct pfr_kentry
), 0, 0, 0,
205 "pfrkentry", &pool_allocator_nointr
, IPL_NONE
);
206 pool_init(&pfr_kentry_pl2
, sizeof(struct pfr_kentry
), 0, 0, 0,
207 "pfrkentry2", NULL
, IPL_SOFTNET
);
209 pool_init(&pfr_ktable_pl
, sizeof(struct pfr_ktable
), 0, 0, 0,
210 "pfrktable", &pool_allocator_oldnointr
);
211 pool_init(&pfr_kentry_pl
, sizeof(struct pfr_kentry
), 0, 0, 0,
212 "pfrkentry", &pool_allocator_oldnointr
);
213 pool_init(&pfr_kentry_pl2
, sizeof(struct pfr_kentry
), 0, 0, 0,
215 #endif /* !__NetBSD__ */
217 pfr_sin
.sin_len
= sizeof(pfr_sin
);
218 pfr_sin
.sin_family
= AF_INET
;
219 pfr_sin6
.sin6_len
= sizeof(pfr_sin6
);
220 pfr_sin6
.sin6_family
= AF_INET6
;
222 memset(&pfr_ffaddr
, 0xff, sizeof(pfr_ffaddr
));
226 pfr_clr_addrs(struct pfr_table
*tbl
, int *ndel
, int flags
)
228 struct pfr_ktable
*kt
;
229 struct pfr_kentryworkq workq
;
232 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
233 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
235 kt
= pfr_lookup_table(tbl
);
236 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
238 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
240 pfr_enqueue_addrs(kt
, &workq
, ndel
, 0);
242 if (!(flags
& PFR_FLAG_DUMMY
)) {
243 if (flags
& PFR_FLAG_ATOMIC
)
245 pfr_remove_kentries(kt
, &workq
);
246 if (flags
& PFR_FLAG_ATOMIC
)
249 printf("pfr_clr_addrs: corruption detected (%d).\n",
258 pfr_add_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
259 int *nadd
, int flags
)
261 struct pfr_ktable
*kt
, *tmpkt
;
262 struct pfr_kentryworkq workq
;
263 struct pfr_kentry
*p
, *q
;
265 int i
, rv
, s
= 0 /* XXX gcc */, xadd
= 0;
266 long tzero
= time_second
;
268 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
270 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
272 kt
= pfr_lookup_table(tbl
);
273 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
275 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
277 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
281 for (i
= 0; i
< size
; i
++) {
282 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
284 if (pfr_validate_addr(&ad
))
286 p
= pfr_lookup_addr(kt
, &ad
, 1);
287 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
288 if (flags
& PFR_FLAG_FEEDBACK
) {
290 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
292 ad
.pfra_fback
= PFR_FB_ADDED
;
293 else if (p
->pfrke_not
!= ad
.pfra_not
)
294 ad
.pfra_fback
= PFR_FB_CONFLICT
;
296 ad
.pfra_fback
= PFR_FB_NONE
;
298 if (p
== NULL
&& q
== NULL
) {
299 p
= pfr_create_kentry(&ad
,
300 !(flags
& PFR_FLAG_USERIOCTL
));
303 if (pfr_route_kentry(tmpkt
, p
)) {
304 pfr_destroy_kentry(p
);
305 ad
.pfra_fback
= PFR_FB_NONE
;
307 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
311 if (flags
& PFR_FLAG_FEEDBACK
)
312 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
315 pfr_clean_node_mask(tmpkt
, &workq
);
316 if (!(flags
& PFR_FLAG_DUMMY
)) {
317 if (flags
& PFR_FLAG_ATOMIC
)
319 pfr_insert_kentries(kt
, &workq
, tzero
);
320 if (flags
& PFR_FLAG_ATOMIC
)
323 pfr_destroy_kentries(&workq
);
326 pfr_destroy_ktable(tmpkt
, 0);
329 pfr_clean_node_mask(tmpkt
, &workq
);
330 pfr_destroy_kentries(&workq
);
331 if (flags
& PFR_FLAG_FEEDBACK
)
332 pfr_reset_feedback(addr
, size
, flags
);
333 pfr_destroy_ktable(tmpkt
, 0);
338 pfr_del_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
339 int *ndel
, int flags
)
341 struct pfr_ktable
*kt
;
342 struct pfr_kentryworkq workq
;
343 struct pfr_kentry
*p
;
345 int i
, rv
, s
= 0 /* XXX gcc */, xdel
= 0, log
= 1;
347 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
349 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
351 kt
= pfr_lookup_table(tbl
);
352 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
354 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
357 * there are two algorithms to choose from here.
359 * n: number of addresses to delete
360 * N: number of addresses in the table
362 * one is O(N) and is better for large 'n'
363 * one is O(n*LOG(N)) and is better for small 'n'
365 * following code try to decide which one is best.
367 for (i
= kt
->pfrkt_cnt
; i
> 0; i
>>= 1)
369 if (size
> kt
->pfrkt_cnt
/log
) {
370 /* full table scan */
373 /* iterate over addresses to delete */
374 for (i
= 0; i
< size
; i
++) {
375 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
377 if (pfr_validate_addr(&ad
))
379 p
= pfr_lookup_addr(kt
, &ad
, 1);
385 for (i
= 0; i
< size
; i
++) {
386 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
388 if (pfr_validate_addr(&ad
))
390 p
= pfr_lookup_addr(kt
, &ad
, 1);
391 if (flags
& PFR_FLAG_FEEDBACK
) {
393 ad
.pfra_fback
= PFR_FB_NONE
;
394 else if (p
->pfrke_not
!= ad
.pfra_not
)
395 ad
.pfra_fback
= PFR_FB_CONFLICT
;
396 else if (p
->pfrke_mark
)
397 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
399 ad
.pfra_fback
= PFR_FB_DELETED
;
401 if (p
!= NULL
&& p
->pfrke_not
== ad
.pfra_not
&&
404 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
407 if (flags
& PFR_FLAG_FEEDBACK
)
408 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
411 if (!(flags
& PFR_FLAG_DUMMY
)) {
412 if (flags
& PFR_FLAG_ATOMIC
)
414 pfr_remove_kentries(kt
, &workq
);
415 if (flags
& PFR_FLAG_ATOMIC
)
422 if (flags
& PFR_FLAG_FEEDBACK
)
423 pfr_reset_feedback(addr
, size
, flags
);
428 pfr_set_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
429 int *size2
, int *nadd
, int *ndel
, int *nchange
, int flags
,
430 u_int32_t ignore_pfrt_flags
)
432 struct pfr_ktable
*kt
, *tmpkt
;
433 struct pfr_kentryworkq addq
, delq
, changeq
;
434 struct pfr_kentry
*p
, *q
;
436 int i
, rv
, s
= 0 /* XXX gcc */, xadd
= 0, xdel
= 0,
438 long tzero
= time_second
;
440 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
442 if (pfr_validate_table(tbl
, ignore_pfrt_flags
, flags
&
445 kt
= pfr_lookup_table(tbl
);
446 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
448 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
450 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
456 SLIST_INIT(&changeq
);
457 for (i
= 0; i
< size
; i
++) {
458 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
460 if (pfr_validate_addr(&ad
))
462 ad
.pfra_fback
= PFR_FB_NONE
;
463 p
= pfr_lookup_addr(kt
, &ad
, 1);
466 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
470 if (p
->pfrke_not
!= ad
.pfra_not
) {
471 SLIST_INSERT_HEAD(&changeq
, p
, pfrke_workq
);
472 ad
.pfra_fback
= PFR_FB_CHANGED
;
476 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
478 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
481 p
= pfr_create_kentry(&ad
,
482 !(flags
& PFR_FLAG_USERIOCTL
));
485 if (pfr_route_kentry(tmpkt
, p
)) {
486 pfr_destroy_kentry(p
);
487 ad
.pfra_fback
= PFR_FB_NONE
;
489 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
490 ad
.pfra_fback
= PFR_FB_ADDED
;
495 if (flags
& PFR_FLAG_FEEDBACK
)
496 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
499 pfr_enqueue_addrs(kt
, &delq
, &xdel
, ENQUEUE_UNMARKED_ONLY
);
500 if ((flags
& PFR_FLAG_FEEDBACK
) && *size2
) {
501 if (*size2
< size
+xdel
) {
506 SLIST_FOREACH(p
, &delq
, pfrke_workq
) {
507 pfr_copyout_addr(&ad
, p
);
508 ad
.pfra_fback
= PFR_FB_DELETED
;
509 if (COPYOUT(&ad
, addr
+size
+i
, sizeof(ad
), flags
))
514 pfr_clean_node_mask(tmpkt
, &addq
);
515 if (!(flags
& PFR_FLAG_DUMMY
)) {
516 if (flags
& PFR_FLAG_ATOMIC
)
518 pfr_insert_kentries(kt
, &addq
, tzero
);
519 pfr_remove_kentries(kt
, &delq
);
520 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
521 if (flags
& PFR_FLAG_ATOMIC
)
524 pfr_destroy_kentries(&addq
);
531 if ((flags
& PFR_FLAG_FEEDBACK
) && size2
)
533 pfr_destroy_ktable(tmpkt
, 0);
536 pfr_clean_node_mask(tmpkt
, &addq
);
537 pfr_destroy_kentries(&addq
);
538 if (flags
& PFR_FLAG_FEEDBACK
)
539 pfr_reset_feedback(addr
, size
, flags
);
540 pfr_destroy_ktable(tmpkt
, 0);
545 pfr_tst_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
546 int *nmatch
, int flags
)
548 struct pfr_ktable
*kt
;
549 struct pfr_kentry
*p
;
553 ACCEPT_FLAGS(flags
, PFR_FLAG_REPLACE
);
554 if (pfr_validate_table(tbl
, 0, 0))
556 kt
= pfr_lookup_table(tbl
);
557 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
560 for (i
= 0; i
< size
; i
++) {
561 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
563 if (pfr_validate_addr(&ad
))
565 if (ADDR_NETWORK(&ad
))
567 p
= pfr_lookup_addr(kt
, &ad
, 0);
568 if (flags
& PFR_FLAG_REPLACE
)
569 pfr_copyout_addr(&ad
, p
);
570 ad
.pfra_fback
= (p
== NULL
) ? PFR_FB_NONE
:
571 (p
->pfrke_not
? PFR_FB_NOTMATCH
: PFR_FB_MATCH
);
572 if (p
!= NULL
&& !p
->pfrke_not
)
574 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
583 pfr_get_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int *size
,
586 struct pfr_ktable
*kt
;
587 struct pfr_walktree w
;
590 ACCEPT_FLAGS(flags
, 0);
591 if (pfr_validate_table(tbl
, 0, 0))
593 kt
= pfr_lookup_table(tbl
);
594 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
596 if (kt
->pfrkt_cnt
> *size
) {
597 *size
= kt
->pfrkt_cnt
;
601 bzero(&w
, sizeof(w
));
602 w
.pfrw_op
= PFRW_GET_ADDRS
;
604 w
.pfrw_free
= kt
->pfrkt_cnt
;
605 w
.pfrw_flags
= flags
;
606 rv
= rn_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
608 rv
= rn_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
613 printf("pfr_get_addrs: corruption detected (%d).\n",
617 *size
= kt
->pfrkt_cnt
;
622 pfr_get_astats(struct pfr_table
*tbl
, struct pfr_astats
*addr
, int *size
,
625 struct pfr_ktable
*kt
;
626 struct pfr_walktree w
;
627 struct pfr_kentryworkq workq
;
628 int rv
, s
= 0 /* XXX gcc */;
629 long tzero
= time_second
;
631 /* XXX PFR_FLAG_CLSTATS disabled */
632 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
);
633 if (pfr_validate_table(tbl
, 0, 0))
635 kt
= pfr_lookup_table(tbl
);
636 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
638 if (kt
->pfrkt_cnt
> *size
) {
639 *size
= kt
->pfrkt_cnt
;
643 bzero(&w
, sizeof(w
));
644 w
.pfrw_op
= PFRW_GET_ASTATS
;
645 w
.pfrw_astats
= addr
;
646 w
.pfrw_free
= kt
->pfrkt_cnt
;
647 w
.pfrw_flags
= flags
;
648 if (flags
& PFR_FLAG_ATOMIC
)
650 rv
= rn_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
652 rv
= rn_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
653 if (!rv
&& (flags
& PFR_FLAG_CLSTATS
)) {
654 pfr_enqueue_addrs(kt
, &workq
, NULL
, 0);
655 pfr_clstats_kentries(&workq
, tzero
, 0);
657 if (flags
& PFR_FLAG_ATOMIC
)
663 printf("pfr_get_astats: corruption detected (%d).\n",
667 *size
= kt
->pfrkt_cnt
;
672 pfr_clr_astats(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
673 int *nzero
, int flags
)
675 struct pfr_ktable
*kt
;
676 struct pfr_kentryworkq workq
;
677 struct pfr_kentry
*p
;
679 int i
, rv
, s
= 0, xzero
= 0;
681 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
683 if (pfr_validate_table(tbl
, 0, 0))
685 kt
= pfr_lookup_table(tbl
);
686 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
689 for (i
= 0; i
< size
; i
++) {
690 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
692 if (pfr_validate_addr(&ad
))
694 p
= pfr_lookup_addr(kt
, &ad
, 1);
695 if (flags
& PFR_FLAG_FEEDBACK
) {
696 ad
.pfra_fback
= (p
!= NULL
) ?
697 PFR_FB_CLEARED
: PFR_FB_NONE
;
698 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
702 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
707 if (!(flags
& PFR_FLAG_DUMMY
)) {
708 if (flags
& PFR_FLAG_ATOMIC
)
710 pfr_clstats_kentries(&workq
, 0, 0);
711 if (flags
& PFR_FLAG_ATOMIC
)
718 if (flags
& PFR_FLAG_FEEDBACK
)
719 pfr_reset_feedback(addr
, size
, flags
);
724 pfr_validate_addr(struct pfr_addr
*ad
)
728 switch (ad
->pfra_af
) {
731 if (ad
->pfra_net
> 32)
737 if (ad
->pfra_net
> 128)
744 if (ad
->pfra_net
< 128 &&
745 (((char *)ad
)[ad
->pfra_net
/8] & (0xFF >> (ad
->pfra_net
%8))))
747 for (i
= (ad
->pfra_net
+7)/8; i
< sizeof(ad
->pfra_u
); i
++)
750 if (ad
->pfra_not
&& ad
->pfra_not
!= 1)
758 pfr_enqueue_addrs(struct pfr_ktable
*kt
, struct pfr_kentryworkq
*workq
,
759 int *naddr
, int sweep
)
761 struct pfr_walktree w
;
764 bzero(&w
, sizeof(w
));
765 w
.pfrw_op
= sweep
? PFRW_SWEEP
: PFRW_ENQUEUE
;
766 w
.pfrw_workq
= workq
;
767 if (kt
->pfrkt_ip4
!= NULL
)
768 if (rn_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
))
769 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
770 if (kt
->pfrkt_ip6
!= NULL
)
771 if (rn_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
))
772 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
778 pfr_mark_addrs(struct pfr_ktable
*kt
)
780 struct pfr_walktree w
;
782 bzero(&w
, sizeof(w
));
783 w
.pfrw_op
= PFRW_MARK
;
784 if (rn_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
))
785 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
786 if (rn_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
))
787 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
792 pfr_lookup_addr(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, int exact
)
794 union sockaddr_union sa
, mask
;
795 struct radix_node_head
*head
= (void *)0xdeadb;
796 struct pfr_kentry
*ke
;
799 bzero(&sa
, sizeof(sa
));
800 if (ad
->pfra_af
== AF_INET
) {
801 FILLIN_SIN(sa
.sin
, ad
->pfra_ip4addr
);
802 head
= kt
->pfrkt_ip4
;
803 } else if ( ad
->pfra_af
== AF_INET6
) {
804 FILLIN_SIN6(sa
.sin6
, ad
->pfra_ip6addr
);
805 head
= kt
->pfrkt_ip6
;
807 if (ADDR_NETWORK(ad
)) {
808 pfr_prepare_network(&mask
, ad
->pfra_af
, ad
->pfra_net
);
809 s
= splsoftnet(); /* rn_lookup makes use of globals */
810 ke
= (struct pfr_kentry
*)rn_lookup(&sa
, &mask
, head
);
812 if (ke
&& KENTRY_RNF_ROOT(ke
))
815 ke
= (struct pfr_kentry
*)rn_match(&sa
, head
);
816 if (ke
&& KENTRY_RNF_ROOT(ke
))
818 if (exact
&& ke
&& KENTRY_NETWORK(ke
))
825 pfr_create_kentry(struct pfr_addr
*ad
, int intr
)
827 struct pfr_kentry
*ke
;
830 ke
= pool_get(&pfr_kentry_pl2
, PR_NOWAIT
);
832 ke
= pool_get(&pfr_kentry_pl
, PR_NOWAIT
);
835 bzero(ke
, sizeof(*ke
));
837 if (ad
->pfra_af
== AF_INET
)
838 FILLIN_SIN(ke
->pfrke_sa
.sin
, ad
->pfra_ip4addr
);
839 else if (ad
->pfra_af
== AF_INET6
)
840 FILLIN_SIN6(ke
->pfrke_sa
.sin6
, ad
->pfra_ip6addr
);
841 ke
->pfrke_af
= ad
->pfra_af
;
842 ke
->pfrke_net
= ad
->pfra_net
;
843 ke
->pfrke_not
= ad
->pfra_not
;
844 ke
->pfrke_intrpool
= intr
;
849 pfr_destroy_kentries(struct pfr_kentryworkq
*workq
)
851 struct pfr_kentry
*p
, *q
;
853 for (p
= SLIST_FIRST(workq
); p
!= NULL
; p
= q
) {
854 q
= SLIST_NEXT(p
, pfrke_workq
);
855 pfr_destroy_kentry(p
);
860 pfr_destroy_kentry(struct pfr_kentry
*ke
)
862 if (ke
->pfrke_intrpool
)
863 pool_put(&pfr_kentry_pl2
, ke
);
865 pool_put(&pfr_kentry_pl
, ke
);
869 pfr_insert_kentries(struct pfr_ktable
*kt
,
870 struct pfr_kentryworkq
*workq
, long tzero
)
872 struct pfr_kentry
*p
;
875 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
876 rv
= pfr_route_kentry(kt
, p
);
878 printf("pfr_insert_kentries: cannot route entry "
882 p
->pfrke_tzero
= tzero
;
889 pfr_insert_kentry(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, long tzero
)
891 struct pfr_kentry
*p
;
894 p
= pfr_lookup_addr(kt
, ad
, 1);
897 p
= pfr_create_kentry(ad
, 1);
901 rv
= pfr_route_kentry(kt
, p
);
905 p
->pfrke_tzero
= tzero
;
912 pfr_remove_kentries(struct pfr_ktable
*kt
,
913 struct pfr_kentryworkq
*workq
)
915 struct pfr_kentry
*p
;
918 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
919 pfr_unroute_kentry(kt
, p
);
923 pfr_destroy_kentries(workq
);
927 pfr_clean_node_mask(struct pfr_ktable
*kt
,
928 struct pfr_kentryworkq
*workq
)
930 struct pfr_kentry
*p
;
932 SLIST_FOREACH(p
, workq
, pfrke_workq
)
933 pfr_unroute_kentry(kt
, p
);
937 pfr_clstats_kentries(struct pfr_kentryworkq
*workq
, long tzero
, int negchange
)
939 struct pfr_kentry
*p
;
942 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
945 p
->pfrke_not
= !p
->pfrke_not
;
946 bzero(p
->pfrke_packets
, sizeof(p
->pfrke_packets
));
947 bzero(p
->pfrke_bytes
, sizeof(p
->pfrke_bytes
));
949 p
->pfrke_tzero
= tzero
;
954 pfr_reset_feedback(struct pfr_addr
*addr
, int size
, int flags
)
959 for (i
= 0; i
< size
; i
++) {
960 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
962 ad
.pfra_fback
= PFR_FB_NONE
;
963 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
969 pfr_prepare_network(union sockaddr_union
*sa
, int af
, int net
)
973 bzero(sa
, sizeof(*sa
));
975 sa
->sin
.sin_len
= sizeof(sa
->sin
);
976 sa
->sin
.sin_family
= AF_INET
;
977 sa
->sin
.sin_addr
.s_addr
= net
? htonl(-1 << (32-net
)) : 0;
978 } else if (af
== AF_INET6
) {
979 sa
->sin6
.sin6_len
= sizeof(sa
->sin6
);
980 sa
->sin6
.sin6_family
= AF_INET6
;
981 for (i
= 0; i
< 4; i
++) {
983 sa
->sin6
.sin6_addr
.s6_addr32
[i
] =
984 net
? htonl(-1 << (32-net
)) : 0;
987 sa
->sin6
.sin6_addr
.s6_addr32
[i
] = 0xFFFFFFFF;
994 pfr_route_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
996 union sockaddr_union mask
;
997 struct radix_node
*rn
;
998 struct radix_node_head
*head
= (void *)0xdeadb;
1001 bzero(ke
->pfrke_node
, sizeof(ke
->pfrke_node
));
1002 if (ke
->pfrke_af
== AF_INET
)
1003 head
= kt
->pfrkt_ip4
;
1004 else if (ke
->pfrke_af
== AF_INET6
)
1005 head
= kt
->pfrkt_ip6
;
1008 if (KENTRY_NETWORK(ke
)) {
1009 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
1010 rn
= rn_addroute(&ke
->pfrke_sa
, &mask
, head
, ke
->pfrke_node
);
1012 rn
= rn_addroute(&ke
->pfrke_sa
, NULL
, head
, ke
->pfrke_node
);
1015 return (rn
== NULL
? -1 : 0);
1019 pfr_unroute_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
1021 union sockaddr_union mask
;
1022 struct radix_node
*rn
;
1023 struct radix_node_head
*head
= (void *)0xdeadb;
1026 if (ke
->pfrke_af
== AF_INET
)
1027 head
= kt
->pfrkt_ip4
;
1028 else if (ke
->pfrke_af
== AF_INET6
)
1029 head
= kt
->pfrkt_ip6
;
1032 if (KENTRY_NETWORK(ke
)) {
1033 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
1034 rn
= rn_delete(&ke
->pfrke_sa
, &mask
, head
);
1036 rn
= rn_delete(&ke
->pfrke_sa
, NULL
, head
);
1040 printf("pfr_unroute_kentry: delete failed.\n");
1047 pfr_copyout_addr(struct pfr_addr
*ad
, struct pfr_kentry
*ke
)
1049 bzero(ad
, sizeof(*ad
));
1052 ad
->pfra_af
= ke
->pfrke_af
;
1053 ad
->pfra_net
= ke
->pfrke_net
;
1054 ad
->pfra_not
= ke
->pfrke_not
;
1055 if (ad
->pfra_af
== AF_INET
)
1056 ad
->pfra_ip4addr
= ke
->pfrke_sa
.sin
.sin_addr
;
1057 else if (ad
->pfra_af
== AF_INET6
)
1058 ad
->pfra_ip6addr
= ke
->pfrke_sa
.sin6
.sin6_addr
;
1062 pfr_walktree(struct radix_node
*rn
, void *arg
)
1064 struct pfr_kentry
*ke
= (struct pfr_kentry
*)rn
;
1065 struct pfr_walktree
*w
= arg
;
1066 int s
, flags
= w
->pfrw_flags
;
1068 switch (w
->pfrw_op
) {
1077 SLIST_INSERT_HEAD(w
->pfrw_workq
, ke
, pfrke_workq
);
1080 case PFRW_GET_ADDRS
:
1081 if (w
->pfrw_free
-- > 0) {
1084 pfr_copyout_addr(&ad
, ke
);
1085 if (copyout(&ad
, w
->pfrw_addr
, sizeof(ad
)))
1090 case PFRW_GET_ASTATS
:
1091 if (w
->pfrw_free
-- > 0) {
1092 struct pfr_astats as
;
1094 pfr_copyout_addr(&as
.pfras_a
, ke
);
1097 bcopy(ke
->pfrke_packets
, as
.pfras_packets
,
1098 sizeof(as
.pfras_packets
));
1099 bcopy(ke
->pfrke_bytes
, as
.pfras_bytes
,
1100 sizeof(as
.pfras_bytes
));
1102 as
.pfras_tzero
= ke
->pfrke_tzero
;
1104 if (COPYOUT(&as
, w
->pfrw_astats
, sizeof(as
), flags
))
1111 break; /* negative entries are ignored */
1112 if (!w
->pfrw_cnt
--) {
1113 w
->pfrw_kentry
= ke
;
1114 return (1); /* finish search */
1117 case PFRW_DYNADDR_UPDATE
:
1118 if (ke
->pfrke_af
== AF_INET
) {
1119 if (w
->pfrw_dyn
->pfid_acnt4
++ > 0)
1121 pfr_prepare_network(&pfr_mask
, AF_INET
, ke
->pfrke_net
);
1122 w
->pfrw_dyn
->pfid_addr4
= *SUNION2PF(
1123 &ke
->pfrke_sa
, AF_INET
);
1124 w
->pfrw_dyn
->pfid_mask4
= *SUNION2PF(
1125 &pfr_mask
, AF_INET
);
1126 } else if (ke
->pfrke_af
== AF_INET6
){
1127 if (w
->pfrw_dyn
->pfid_acnt6
++ > 0)
1129 pfr_prepare_network(&pfr_mask
, AF_INET6
, ke
->pfrke_net
);
1130 w
->pfrw_dyn
->pfid_addr6
= *SUNION2PF(
1131 &ke
->pfrke_sa
, AF_INET6
);
1132 w
->pfrw_dyn
->pfid_mask6
= *SUNION2PF(
1133 &pfr_mask
, AF_INET6
);
1141 pfr_clr_tables(struct pfr_table
*filter
, int *ndel
, int flags
)
1143 struct pfr_ktableworkq workq
;
1144 struct pfr_ktable
*p
;
1145 int s
= 0, xdel
= 0;
1147 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1149 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1151 if (pfr_table_count(filter
, flags
) < 0)
1155 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1156 if (pfr_skip_table(filter
, p
, flags
))
1158 if (!strcmp(p
->pfrkt_anchor
, PF_RESERVED_ANCHOR
))
1160 if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1162 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1163 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1166 if (!(flags
& PFR_FLAG_DUMMY
)) {
1167 if (flags
& PFR_FLAG_ATOMIC
)
1169 pfr_setflags_ktables(&workq
);
1170 if (flags
& PFR_FLAG_ATOMIC
)
1179 pfr_add_tables(struct pfr_table
*tbl
, int size
, int *nadd
, int flags
)
1181 struct pfr_ktableworkq addq
, changeq
;
1182 struct pfr_ktable
*p
, *q
, *r
, key
;
1183 int i
, rv
, s
= 0 /* XXX gcc */, xadd
= 0;
1184 long tzero
= time_second
;
1186 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1188 SLIST_INIT(&changeq
);
1189 for (i
= 0; i
< size
; i
++) {
1190 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1192 if (pfr_validate_table(&key
.pfrkt_t
, PFR_TFLAG_USRMASK
,
1193 flags
& PFR_FLAG_USERIOCTL
))
1195 key
.pfrkt_flags
|= PFR_TFLAG_ACTIVE
;
1196 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1198 p
= pfr_create_ktable(&key
.pfrkt_t
, tzero
, 1);
1201 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1202 if (!pfr_ktable_compare(p
, q
))
1205 SLIST_INSERT_HEAD(&addq
, p
, pfrkt_workq
);
1207 if (!key
.pfrkt_anchor
[0])
1210 /* find or create root table */
1211 bzero(key
.pfrkt_anchor
, sizeof(key
.pfrkt_anchor
));
1212 r
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1217 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1218 if (!pfr_ktable_compare(&key
, q
)) {
1223 key
.pfrkt_flags
= 0;
1224 r
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1227 SLIST_INSERT_HEAD(&addq
, r
, pfrkt_workq
);
1229 } else if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1230 SLIST_FOREACH(q
, &changeq
, pfrkt_workq
)
1231 if (!pfr_ktable_compare(&key
, q
))
1233 p
->pfrkt_nflags
= (p
->pfrkt_flags
&
1234 ~PFR_TFLAG_USRMASK
) | key
.pfrkt_flags
;
1235 SLIST_INSERT_HEAD(&changeq
, p
, pfrkt_workq
);
1241 if (!(flags
& PFR_FLAG_DUMMY
)) {
1242 if (flags
& PFR_FLAG_ATOMIC
)
1244 pfr_insert_ktables(&addq
);
1245 pfr_setflags_ktables(&changeq
);
1246 if (flags
& PFR_FLAG_ATOMIC
)
1249 pfr_destroy_ktables(&addq
, 0);
1254 pfr_destroy_ktables(&addq
, 0);
1259 pfr_del_tables(struct pfr_table
*tbl
, int size
, int *ndel
, int flags
)
1261 struct pfr_ktableworkq workq
;
1262 struct pfr_ktable
*p
, *q
, key
;
1263 int i
, s
= 0, xdel
= 0;
1265 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1267 for (i
= 0; i
< size
; i
++) {
1268 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1270 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1271 flags
& PFR_FLAG_USERIOCTL
))
1273 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1274 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1275 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1276 if (!pfr_ktable_compare(p
, q
))
1278 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1279 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1286 if (!(flags
& PFR_FLAG_DUMMY
)) {
1287 if (flags
& PFR_FLAG_ATOMIC
)
1289 pfr_setflags_ktables(&workq
);
1290 if (flags
& PFR_FLAG_ATOMIC
)
1299 pfr_get_tables(struct pfr_table
*filter
, struct pfr_table
*tbl
, int *size
,
1302 struct pfr_ktable
*p
;
1305 ACCEPT_FLAGS(flags
, PFR_FLAG_ALLRSETS
);
1306 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1308 n
= nn
= pfr_table_count(filter
, flags
);
1315 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1316 if (pfr_skip_table(filter
, p
, flags
))
1320 if (COPYOUT(&p
->pfrkt_t
, tbl
++, sizeof(*tbl
), flags
))
1324 printf("pfr_get_tables: corruption detected (%d).\n", n
);
1332 pfr_get_tstats(struct pfr_table
*filter
, struct pfr_tstats
*tbl
, int *size
,
1335 struct pfr_ktable
*p
;
1336 struct pfr_ktableworkq workq
;
1337 int s
= 0 /* XXX gcc */, n
, nn
;
1338 long tzero
= time_second
;
1340 /* XXX PFR_FLAG_CLSTATS disabled */
1341 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_ALLRSETS
);
1342 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1344 n
= nn
= pfr_table_count(filter
, flags
);
1352 if (flags
& PFR_FLAG_ATOMIC
)
1354 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1355 if (pfr_skip_table(filter
, p
, flags
))
1359 if (!(flags
& PFR_FLAG_ATOMIC
))
1361 if (COPYOUT(&p
->pfrkt_ts
, tbl
++, sizeof(*tbl
), flags
)) {
1365 if (!(flags
& PFR_FLAG_ATOMIC
))
1367 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1369 if (flags
& PFR_FLAG_CLSTATS
)
1370 pfr_clstats_ktables(&workq
, tzero
,
1371 flags
& PFR_FLAG_ADDRSTOO
);
1372 if (flags
& PFR_FLAG_ATOMIC
)
1375 printf("pfr_get_tstats: corruption detected (%d).\n", n
);
1383 pfr_clr_tstats(struct pfr_table
*tbl
, int size
, int *nzero
, int flags
)
1385 struct pfr_ktableworkq workq
;
1386 struct pfr_ktable
*p
, key
;
1387 int i
, s
= 0 /* XXX gcc */, xzero
= 0;
1388 long tzero
= time_second
;
1390 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1393 for (i
= 0; i
< size
; i
++) {
1394 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1396 if (pfr_validate_table(&key
.pfrkt_t
, 0, 0))
1398 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1400 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1404 if (!(flags
& PFR_FLAG_DUMMY
)) {
1405 if (flags
& PFR_FLAG_ATOMIC
)
1407 pfr_clstats_ktables(&workq
, tzero
, flags
& PFR_FLAG_ADDRSTOO
);
1408 if (flags
& PFR_FLAG_ATOMIC
)
1417 pfr_set_tflags(struct pfr_table
*tbl
, int size
, int setflag
, int clrflag
,
1418 int *nchange
, int *ndel
, int flags
)
1420 struct pfr_ktableworkq workq
;
1421 struct pfr_ktable
*p
, *q
, key
;
1422 int i
, s
= 0, xchange
= 0, xdel
= 0;
1424 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1425 if ((setflag
& ~PFR_TFLAG_USRMASK
) ||
1426 (clrflag
& ~PFR_TFLAG_USRMASK
) ||
1427 (setflag
& clrflag
))
1430 for (i
= 0; i
< size
; i
++) {
1431 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1433 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1434 flags
& PFR_FLAG_USERIOCTL
))
1436 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1437 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1438 p
->pfrkt_nflags
= (p
->pfrkt_flags
| setflag
) &
1440 if (p
->pfrkt_nflags
== p
->pfrkt_flags
)
1442 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1443 if (!pfr_ktable_compare(p
, q
))
1445 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1446 if ((p
->pfrkt_flags
& PFR_TFLAG_PERSIST
) &&
1447 (clrflag
& PFR_TFLAG_PERSIST
) &&
1448 !(p
->pfrkt_flags
& PFR_TFLAG_REFERENCED
))
1456 if (!(flags
& PFR_FLAG_DUMMY
)) {
1457 if (flags
& PFR_FLAG_ATOMIC
)
1459 pfr_setflags_ktables(&workq
);
1460 if (flags
& PFR_FLAG_ATOMIC
)
1463 if (nchange
!= NULL
)
1471 pfr_ina_begin(struct pfr_table
*trs
, u_int32_t
*ticket
, int *ndel
, int flags
)
1473 struct pfr_ktableworkq workq
;
1474 struct pfr_ktable
*p
;
1475 struct pf_ruleset
*rs
;
1478 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1479 rs
= pf_find_or_create_ruleset(trs
->pfrt_anchor
);
1483 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1484 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1485 pfr_skip_table(trs
, p
, 0))
1487 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1488 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1491 if (!(flags
& PFR_FLAG_DUMMY
)) {
1492 pfr_setflags_ktables(&workq
);
1494 *ticket
= ++rs
->tticket
;
1497 pf_remove_if_empty_ruleset(rs
);
1504 pfr_ina_define(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
1505 int *nadd
, int *naddr
, u_int32_t ticket
, int flags
)
1507 struct pfr_ktableworkq tableq
;
1508 struct pfr_kentryworkq addrq
;
1509 struct pfr_ktable
*kt
, *rt
, *shadow
, key
;
1510 struct pfr_kentry
*p
;
1512 struct pf_ruleset
*rs
;
1513 int i
, rv
, xadd
= 0, xaddr
= 0;
1515 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
| PFR_FLAG_ADDRSTOO
);
1516 if (size
&& !(flags
& PFR_FLAG_ADDRSTOO
))
1518 if (pfr_validate_table(tbl
, PFR_TFLAG_USRMASK
,
1519 flags
& PFR_FLAG_USERIOCTL
))
1521 rs
= pf_find_ruleset(tbl
->pfrt_anchor
);
1522 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1524 tbl
->pfrt_flags
|= PFR_TFLAG_INACTIVE
;
1525 SLIST_INIT(&tableq
);
1526 kt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, (struct pfr_ktable
*)tbl
);
1528 kt
= pfr_create_ktable(tbl
, 0, 1);
1531 SLIST_INSERT_HEAD(&tableq
, kt
, pfrkt_workq
);
1533 if (!tbl
->pfrt_anchor
[0])
1536 /* find or create root table */
1537 bzero(&key
, sizeof(key
));
1538 strlcpy(key
.pfrkt_name
, tbl
->pfrt_name
, sizeof(key
.pfrkt_name
));
1539 rt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1541 kt
->pfrkt_root
= rt
;
1544 rt
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1546 pfr_destroy_ktables(&tableq
, 0);
1549 SLIST_INSERT_HEAD(&tableq
, rt
, pfrkt_workq
);
1550 kt
->pfrkt_root
= rt
;
1551 } else if (!(kt
->pfrkt_flags
& PFR_TFLAG_INACTIVE
))
1554 shadow
= pfr_create_ktable(tbl
, 0, 0);
1555 if (shadow
== NULL
) {
1556 pfr_destroy_ktables(&tableq
, 0);
1560 for (i
= 0; i
< size
; i
++) {
1561 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
1563 if (pfr_validate_addr(&ad
))
1565 if (pfr_lookup_addr(shadow
, &ad
, 1) != NULL
)
1567 p
= pfr_create_kentry(&ad
, 0);
1570 if (pfr_route_kentry(shadow
, p
)) {
1571 pfr_destroy_kentry(p
);
1574 SLIST_INSERT_HEAD(&addrq
, p
, pfrke_workq
);
1577 if (!(flags
& PFR_FLAG_DUMMY
)) {
1578 if (kt
->pfrkt_shadow
!= NULL
)
1579 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1580 kt
->pfrkt_flags
|= PFR_TFLAG_INACTIVE
;
1581 pfr_insert_ktables(&tableq
);
1582 shadow
->pfrkt_cnt
= (flags
& PFR_FLAG_ADDRSTOO
) ?
1583 xaddr
: NO_ADDRESSES
;
1584 kt
->pfrkt_shadow
= shadow
;
1586 pfr_clean_node_mask(shadow
, &addrq
);
1587 pfr_destroy_ktable(shadow
, 0);
1588 pfr_destroy_ktables(&tableq
, 0);
1589 pfr_destroy_kentries(&addrq
);
1597 pfr_destroy_ktable(shadow
, 0);
1598 pfr_destroy_ktables(&tableq
, 0);
1599 pfr_destroy_kentries(&addrq
);
1604 pfr_ina_rollback(struct pfr_table
*trs
, u_int32_t ticket
, int *ndel
, int flags
)
1606 struct pfr_ktableworkq workq
;
1607 struct pfr_ktable
*p
;
1608 struct pf_ruleset
*rs
;
1611 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1612 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1613 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1616 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1617 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1618 pfr_skip_table(trs
, p
, 0))
1620 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1621 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1624 if (!(flags
& PFR_FLAG_DUMMY
)) {
1625 pfr_setflags_ktables(&workq
);
1627 pf_remove_if_empty_ruleset(rs
);
1635 pfr_ina_commit(struct pfr_table
*trs
, u_int32_t ticket
, int *nadd
,
1636 int *nchange
, int flags
)
1638 struct pfr_ktable
*p
, *q
;
1639 struct pfr_ktableworkq workq
;
1640 struct pf_ruleset
*rs
;
1641 int s
= 0 /* XXX gcc */, xadd
= 0, xchange
= 0;
1642 long tzero
= time_second
;
1644 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1645 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1646 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1650 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1651 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1652 pfr_skip_table(trs
, p
, 0))
1654 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1655 if (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)
1661 if (!(flags
& PFR_FLAG_DUMMY
)) {
1662 if (flags
& PFR_FLAG_ATOMIC
)
1664 for (p
= SLIST_FIRST(&workq
); p
!= NULL
; p
= q
) {
1665 q
= SLIST_NEXT(p
, pfrkt_workq
);
1666 pfr_commit_ktable(p
, tzero
);
1668 if (flags
& PFR_FLAG_ATOMIC
)
1671 pf_remove_if_empty_ruleset(rs
);
1675 if (nchange
!= NULL
)
1682 pfr_commit_ktable(struct pfr_ktable
*kt
, long tzero
)
1684 struct pfr_ktable
*shadow
= kt
->pfrkt_shadow
;
1687 if (shadow
->pfrkt_cnt
== NO_ADDRESSES
) {
1688 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1689 pfr_clstats_ktable(kt
, tzero
, 1);
1690 } else if (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) {
1691 /* kt might contain addresses */
1692 struct pfr_kentryworkq addrq
, addq
, changeq
, delq
, garbageq
;
1693 struct pfr_kentry
*p
, *q
, *next
;
1696 pfr_enqueue_addrs(shadow
, &addrq
, NULL
, 0);
1699 SLIST_INIT(&changeq
);
1701 SLIST_INIT(&garbageq
);
1702 pfr_clean_node_mask(shadow
, &addrq
);
1703 for (p
= SLIST_FIRST(&addrq
); p
!= NULL
; p
= next
) {
1704 next
= SLIST_NEXT(p
, pfrke_workq
); /* XXX */
1705 pfr_copyout_addr(&ad
, p
);
1706 q
= pfr_lookup_addr(kt
, &ad
, 1);
1708 if (q
->pfrke_not
!= p
->pfrke_not
)
1709 SLIST_INSERT_HEAD(&changeq
, q
,
1712 SLIST_INSERT_HEAD(&garbageq
, p
, pfrke_workq
);
1714 p
->pfrke_tzero
= tzero
;
1715 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
1718 pfr_enqueue_addrs(kt
, &delq
, NULL
, ENQUEUE_UNMARKED_ONLY
);
1719 pfr_insert_kentries(kt
, &addq
, tzero
);
1720 pfr_remove_kentries(kt
, &delq
);
1721 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
1722 pfr_destroy_kentries(&garbageq
);
1724 /* kt cannot contain addresses */
1725 SWAP(struct radix_node_head
*, kt
->pfrkt_ip4
,
1727 SWAP(struct radix_node_head
*, kt
->pfrkt_ip6
,
1729 SWAP(int, kt
->pfrkt_cnt
, shadow
->pfrkt_cnt
);
1730 pfr_clstats_ktable(kt
, tzero
, 1);
1732 nflags
= ((shadow
->pfrkt_flags
& PFR_TFLAG_USRMASK
) |
1733 (kt
->pfrkt_flags
& PFR_TFLAG_SETMASK
) | PFR_TFLAG_ACTIVE
)
1734 & ~PFR_TFLAG_INACTIVE
;
1735 pfr_destroy_ktable(shadow
, 0);
1736 kt
->pfrkt_shadow
= NULL
;
1737 pfr_setflags_ktable(kt
, nflags
);
1741 pfr_validate_table(struct pfr_table
*tbl
, int allowedflags
, int no_reserved
)
1745 if (!tbl
->pfrt_name
[0])
1747 if (no_reserved
&& !strcmp(tbl
->pfrt_anchor
, PF_RESERVED_ANCHOR
))
1749 if (tbl
->pfrt_name
[PF_TABLE_NAME_SIZE
-1])
1751 for (i
= strlen(tbl
->pfrt_name
); i
< PF_TABLE_NAME_SIZE
; i
++)
1752 if (tbl
->pfrt_name
[i
])
1754 if (pfr_fix_anchor(tbl
->pfrt_anchor
))
1756 if (tbl
->pfrt_flags
& ~allowedflags
)
1762 * Rewrite anchors referenced by tables to remove slashes
1763 * and check for validity.
1766 pfr_fix_anchor(char *anchor
)
1768 size_t siz
= MAXPATHLEN
;
1771 if (anchor
[0] == '/') {
1777 while (*++path
== '/')
1779 bcopy(path
, anchor
, siz
- off
);
1780 memset(anchor
+ siz
- off
, 0, off
);
1782 if (anchor
[siz
- 1])
1784 for (i
= strlen(anchor
); i
< siz
; i
++)
1791 pfr_table_count(struct pfr_table
*filter
, int flags
)
1793 struct pf_ruleset
*rs
;
1795 if (flags
& PFR_FLAG_ALLRSETS
)
1796 return (pfr_ktable_cnt
);
1797 if (filter
->pfrt_anchor
[0]) {
1798 rs
= pf_find_ruleset(filter
->pfrt_anchor
);
1799 return ((rs
!= NULL
) ? rs
->tables
: -1);
1801 return (pf_main_ruleset
.tables
);
1805 pfr_skip_table(struct pfr_table
*filter
, struct pfr_ktable
*kt
, int flags
)
1807 if (flags
& PFR_FLAG_ALLRSETS
)
1809 if (strcmp(filter
->pfrt_anchor
, kt
->pfrkt_anchor
))
1815 pfr_insert_ktables(struct pfr_ktableworkq
*workq
)
1817 struct pfr_ktable
*p
;
1819 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1820 pfr_insert_ktable(p
);
1824 pfr_insert_ktable(struct pfr_ktable
*kt
)
1826 RB_INSERT(pfr_ktablehead
, &pfr_ktables
, kt
);
1828 if (kt
->pfrkt_root
!= NULL
)
1829 if (!kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
]++)
1830 pfr_setflags_ktable(kt
->pfrkt_root
,
1831 kt
->pfrkt_root
->pfrkt_flags
|PFR_TFLAG_REFDANCHOR
);
1835 pfr_setflags_ktables(struct pfr_ktableworkq
*workq
)
1837 struct pfr_ktable
*p
, *q
;
1839 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
1840 q
= SLIST_NEXT(p
, pfrkt_workq
);
1841 pfr_setflags_ktable(p
, p
->pfrkt_nflags
);
1846 pfr_setflags_ktable(struct pfr_ktable
*kt
, int newf
)
1848 struct pfr_kentryworkq addrq
;
1850 if (!(newf
& PFR_TFLAG_REFERENCED
) &&
1851 !(newf
& PFR_TFLAG_PERSIST
))
1852 newf
&= ~PFR_TFLAG_ACTIVE
;
1853 if (!(newf
& PFR_TFLAG_ACTIVE
))
1854 newf
&= ~PFR_TFLAG_USRMASK
;
1855 if (!(newf
& PFR_TFLAG_SETMASK
)) {
1856 RB_REMOVE(pfr_ktablehead
, &pfr_ktables
, kt
);
1857 if (kt
->pfrkt_root
!= NULL
)
1858 if (!--kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
])
1859 pfr_setflags_ktable(kt
->pfrkt_root
,
1860 kt
->pfrkt_root
->pfrkt_flags
&
1861 ~PFR_TFLAG_REFDANCHOR
);
1862 pfr_destroy_ktable(kt
, 1);
1866 if (!(newf
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_cnt
) {
1867 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1868 pfr_remove_kentries(kt
, &addrq
);
1870 if (!(newf
& PFR_TFLAG_INACTIVE
) && kt
->pfrkt_shadow
!= NULL
) {
1871 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1872 kt
->pfrkt_shadow
= NULL
;
1874 kt
->pfrkt_flags
= newf
;
1878 pfr_clstats_ktables(struct pfr_ktableworkq
*workq
, long tzero
, int recurse
)
1880 struct pfr_ktable
*p
;
1882 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1883 pfr_clstats_ktable(p
, tzero
, recurse
);
1887 pfr_clstats_ktable(struct pfr_ktable
*kt
, long tzero
, int recurse
)
1889 struct pfr_kentryworkq addrq
;
1893 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1894 pfr_clstats_kentries(&addrq
, tzero
, 0);
1897 bzero(kt
->pfrkt_packets
, sizeof(kt
->pfrkt_packets
));
1898 bzero(kt
->pfrkt_bytes
, sizeof(kt
->pfrkt_bytes
));
1899 kt
->pfrkt_match
= kt
->pfrkt_nomatch
= 0;
1901 kt
->pfrkt_tzero
= tzero
;
1905 pfr_create_ktable(struct pfr_table
*tbl
, long tzero
, int attachruleset
)
1907 struct pfr_ktable
*kt
;
1908 struct pf_ruleset
*rs
;
1909 void *h4
= NULL
, *h6
= NULL
;
1911 kt
= pool_get(&pfr_ktable_pl
, PR_NOWAIT
);
1914 bzero(kt
, sizeof(*kt
));
1917 if (attachruleset
) {
1918 rs
= pf_find_or_create_ruleset(tbl
->pfrt_anchor
);
1920 pfr_destroy_ktable(kt
, 0);
1927 if (!rn_inithead(&h4
, offsetof(struct sockaddr_in
, sin_addr
) * 8))
1930 if (!rn_inithead(&h6
, offsetof(struct sockaddr_in6
, sin6_addr
) * 8)) {
1936 kt
->pfrkt_tzero
= tzero
;
1940 pfr_destroy_ktable(kt
, 0);
1945 pfr_destroy_ktables(struct pfr_ktableworkq
*workq
, int flushaddr
)
1947 struct pfr_ktable
*p
, *q
;
1949 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
1950 q
= SLIST_NEXT(p
, pfrkt_workq
);
1951 pfr_destroy_ktable(p
, flushaddr
);
1956 pfr_destroy_ktable(struct pfr_ktable
*kt
, int flushaddr
)
1958 struct pfr_kentryworkq addrq
;
1961 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1962 pfr_clean_node_mask(kt
, &addrq
);
1963 pfr_destroy_kentries(&addrq
);
1965 if (kt
->pfrkt_ip4
!= NULL
)
1966 free((void *)kt
->pfrkt_ip4
, M_RTABLE
);
1967 if (kt
->pfrkt_ip6
!= NULL
)
1968 free((void *)kt
->pfrkt_ip6
, M_RTABLE
);
1969 if (kt
->pfrkt_shadow
!= NULL
)
1970 pfr_destroy_ktable(kt
->pfrkt_shadow
, flushaddr
);
1971 if (kt
->pfrkt_rs
!= NULL
) {
1972 kt
->pfrkt_rs
->tables
--;
1973 pf_remove_if_empty_ruleset(kt
->pfrkt_rs
);
1975 pool_put(&pfr_ktable_pl
, kt
);
1979 pfr_ktable_compare(struct pfr_ktable
*p
, struct pfr_ktable
*q
)
1983 if ((d
= strncmp(p
->pfrkt_name
, q
->pfrkt_name
, PF_TABLE_NAME_SIZE
)))
1985 return (strcmp(p
->pfrkt_anchor
, q
->pfrkt_anchor
));
1989 pfr_lookup_table(struct pfr_table
*tbl
)
1991 /* struct pfr_ktable start like a struct pfr_table */
1992 return (RB_FIND(pfr_ktablehead
, &pfr_ktables
,
1993 (struct pfr_ktable
*)tbl
));
1997 pfr_match_addr(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
)
1999 struct pfr_kentry
*ke
= NULL
;
2002 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2003 kt
= kt
->pfrkt_root
;
2004 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2010 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
2011 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin
, kt
->pfrkt_ip4
);
2012 if (ke
&& KENTRY_RNF_ROOT(ke
))
2018 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
2019 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin6
, kt
->pfrkt_ip6
);
2020 if (ke
&& KENTRY_RNF_ROOT(ke
))
2025 match
= (ke
&& !ke
->pfrke_not
);
2029 kt
->pfrkt_nomatch
++;
2034 pfr_update_stats(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
,
2035 u_int64_t len
, int dir_out
, int op_pass
, int notrule
)
2037 struct pfr_kentry
*ke
= NULL
;
2039 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2040 kt
= kt
->pfrkt_root
;
2041 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2047 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
2048 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin
, kt
->pfrkt_ip4
);
2049 if (ke
&& KENTRY_RNF_ROOT(ke
))
2055 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
2056 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin6
, kt
->pfrkt_ip6
);
2057 if (ke
&& KENTRY_RNF_ROOT(ke
))
2064 if ((ke
== NULL
|| ke
->pfrke_not
) != notrule
) {
2065 if (op_pass
!= PFR_OP_PASS
)
2066 printf("pfr_update_stats: assertion failed.\n");
2067 op_pass
= PFR_OP_XPASS
;
2069 kt
->pfrkt_packets
[dir_out
][op_pass
]++;
2070 kt
->pfrkt_bytes
[dir_out
][op_pass
] += len
;
2071 if (ke
!= NULL
&& op_pass
!= PFR_OP_XPASS
) {
2072 ke
->pfrke_packets
[dir_out
][op_pass
]++;
2073 ke
->pfrke_bytes
[dir_out
][op_pass
] += len
;
2078 pfr_attach_table(struct pf_ruleset
*rs
, char *name
)
2080 struct pfr_ktable
*kt
, *rt
;
2081 struct pfr_table tbl
;
2082 struct pf_anchor
*ac
= rs
->anchor
;
2084 bzero(&tbl
, sizeof(tbl
));
2085 strlcpy(tbl
.pfrt_name
, name
, sizeof(tbl
.pfrt_name
));
2087 strlcpy(tbl
.pfrt_anchor
, ac
->path
, sizeof(tbl
.pfrt_anchor
));
2088 kt
= pfr_lookup_table(&tbl
);
2090 kt
= pfr_create_ktable(&tbl
, time_second
, 1);
2094 bzero(tbl
.pfrt_anchor
, sizeof(tbl
.pfrt_anchor
));
2095 rt
= pfr_lookup_table(&tbl
);
2097 rt
= pfr_create_ktable(&tbl
, 0, 1);
2099 pfr_destroy_ktable(kt
, 0);
2102 pfr_insert_ktable(rt
);
2104 kt
->pfrkt_root
= rt
;
2106 pfr_insert_ktable(kt
);
2108 if (!kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]++)
2109 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
|PFR_TFLAG_REFERENCED
);
2114 pfr_detach_table(struct pfr_ktable
*kt
)
2116 if (kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
] <= 0)
2117 printf("pfr_detach_table: refcount = %d.\n",
2118 kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]);
2119 else if (!--kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
])
2120 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
&~PFR_TFLAG_REFERENCED
);
2124 pfr_pool_get(struct pfr_ktable
*kt
, int *pidx
, struct pf_addr
*counter
,
2125 struct pf_addr
**raddr
, struct pf_addr
**rmask
, sa_family_t af
)
2127 struct pfr_kentry
*ke
, *ke2
= (void *)0xdeadb;
2128 struct pf_addr
*addr
= (void *)0xdeadb;
2129 union sockaddr_union mask
;
2130 int idx
= -1, use_counter
= 0;
2133 addr
= (struct pf_addr
*)&pfr_sin
.sin_addr
;
2134 else if (af
== AF_INET6
)
2135 addr
= (struct pf_addr
*)&pfr_sin6
.sin6_addr
;
2136 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2137 kt
= kt
->pfrkt_root
;
2138 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2143 if (counter
!= NULL
&& idx
>= 0)
2149 ke
= pfr_kentry_byidx(kt
, idx
, af
);
2152 pfr_prepare_network(&pfr_mask
, af
, ke
->pfrke_net
);
2153 *raddr
= SUNION2PF(&ke
->pfrke_sa
, af
);
2154 *rmask
= SUNION2PF(&pfr_mask
, af
);
2157 /* is supplied address within block? */
2158 if (!PF_MATCHA(0, *raddr
, *rmask
, counter
, af
)) {
2159 /* no, go to next block in table */
2164 PF_ACPY(addr
, counter
, af
);
2166 /* use first address of block */
2167 PF_ACPY(addr
, *raddr
, af
);
2170 if (!KENTRY_NETWORK(ke
)) {
2171 /* this is a single IP address - no possible nested block */
2172 PF_ACPY(counter
, addr
, af
);
2177 /* we don't want to use a nested block */
2179 ke2
= (struct pfr_kentry
*)rn_match(&pfr_sin
,
2181 else if (af
== AF_INET6
)
2182 ke2
= (struct pfr_kentry
*)rn_match(&pfr_sin6
,
2184 /* no need to check KENTRY_RNF_ROOT() here */
2186 /* lookup return the same block - perfect */
2187 PF_ACPY(counter
, addr
, af
);
2192 /* we need to increase the counter past the nested block */
2193 pfr_prepare_network(&mask
, AF_INET
, ke2
->pfrke_net
);
2194 PF_POOLMASK(addr
, addr
, SUNION2PF(&mask
, af
), &pfr_ffaddr
, af
);
2196 if (!PF_MATCHA(0, *raddr
, *rmask
, addr
, af
)) {
2197 /* ok, we reached the end of our main block */
2198 /* go to next block in table */
2207 pfr_kentry_byidx(struct pfr_ktable
*kt
, int idx
, int af
)
2209 struct pfr_walktree w
;
2211 bzero(&w
, sizeof(w
));
2212 w
.pfrw_op
= PFRW_POOL_GET
;
2218 rn_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
2219 return (w
.pfrw_kentry
);
2223 rn_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
2224 return (w
.pfrw_kentry
);
2232 pfr_dynaddr_update(struct pfr_ktable
*kt
, struct pfi_dynaddr
*dyn
)
2234 struct pfr_walktree w
;
2237 bzero(&w
, sizeof(w
));
2238 w
.pfrw_op
= PFRW_DYNADDR_UPDATE
;
2242 dyn
->pfid_acnt4
= 0;
2243 dyn
->pfid_acnt6
= 0;
2244 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET
)
2245 rn_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
2246 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET6
)
2247 rn_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);