2 /* $OpenBSD: pfctl_optimize.c,v 1.13 2006/10/31 14:17:45 mcbride Exp $ */
5 * Copyright (c) 2004 Mike Frantzen <frantzen@openbsd.org>
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <sys/types.h>
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
25 #include <net/pfvar.h>
27 #include <netinet/in.h>
28 #include <arpa/inet.h>
39 #include "pfctl_parser.h"
42 /* The size at which a table becomes faster than individual rules */
43 #define TABLE_THRESHOLD 6
46 /* #define OPT_DEBUG 1 */
48 # define DEBUG(str, v...) \
49 printf("%s: " str "\n", __FUNCTION__ , ## v)
51 # define DEBUG(str, v...) ((void)0)
56 * A container that lets us sort a superblock to optimize the skip step jumps
59 int ps_count
; /* number of items */
60 TAILQ_HEAD( , pf_opt_rule
) ps_rules
;
61 TAILQ_ENTRY(pf_skip_step
) ps_entry
;
66 * A superblock is a block of adjacent rules of similar action. If there
67 * are five PASS rules in a row, they all become members of a superblock.
68 * Once we have a superblock, we are free to re-order any rules within it
69 * in order to improve performance; if a packet is passed, it doesn't matter
73 TAILQ_HEAD( , pf_opt_rule
) sb_rules
;
74 TAILQ_ENTRY(superblock
) sb_entry
;
75 struct superblock
*sb_profiled_block
;
76 TAILQ_HEAD(skiplist
, pf_skip_step
) sb_skipsteps
[PF_SKIP_COUNT
];
78 TAILQ_HEAD(superblocks
, superblock
);
82 * Description of the PF rule structure.
85 BARRIER
, /* the presence of the field puts the rule in it's own block */
86 BREAK
, /* the field may not differ between rules in a superblock */
87 NOMERGE
, /* the field may not differ between rules when combined */
88 COMBINED
, /* the field may itself be combined with other rules */
89 DC
, /* we just don't care about the field */
90 NEVER
}; /* we should never see this field set?!? */
91 struct pf_rule_field
{
97 #define PF_RULE_FIELD(field, ty) \
100 offsetof(struct pf_rule, field), \
101 sizeof(((struct pf_rule *)0)->field)}
105 * The presence of these fields in a rule put the rule in it's own
106 * superblock. Thus it will not be optimized. It also prevents the
107 * rule from being re-ordered at all.
109 PF_RULE_FIELD(label
, BARRIER
),
110 PF_RULE_FIELD(prob
, BARRIER
),
111 PF_RULE_FIELD(max_states
, BARRIER
),
112 PF_RULE_FIELD(max_src_nodes
, BARRIER
),
113 PF_RULE_FIELD(max_src_states
, BARRIER
),
114 PF_RULE_FIELD(max_src_conn
, BARRIER
),
115 PF_RULE_FIELD(max_src_conn_rate
, BARRIER
),
116 PF_RULE_FIELD(anchor
, BARRIER
), /* for now */
119 * These fields must be the same between all rules in the same superblock.
120 * These rules are allowed to be re-ordered but only among like rules.
121 * For instance we can re-order all 'tag "foo"' rules because they have the
122 * same tag. But we can not re-order between a 'tag "foo"' and a
123 * 'tag "bar"' since that would change the meaning of the ruleset.
125 PF_RULE_FIELD(tagname
, BREAK
),
126 PF_RULE_FIELD(keep_state
, BREAK
),
127 PF_RULE_FIELD(qname
, BREAK
),
128 PF_RULE_FIELD(pqname
, BREAK
),
129 PF_RULE_FIELD(rt
, BREAK
),
130 PF_RULE_FIELD(allow_opts
, BREAK
),
131 PF_RULE_FIELD(rule_flag
, BREAK
),
132 PF_RULE_FIELD(action
, BREAK
),
133 PF_RULE_FIELD(log
, BREAK
),
134 PF_RULE_FIELD(quick
, BREAK
),
135 PF_RULE_FIELD(return_ttl
, BREAK
),
136 PF_RULE_FIELD(overload_tblname
, BREAK
),
137 PF_RULE_FIELD(flush
, BREAK
),
138 PF_RULE_FIELD(rpool
, BREAK
),
139 PF_RULE_FIELD(logif
, BREAK
),
142 * Any fields not listed in this structure act as BREAK fields
147 * These fields must not differ when we merge two rules together but
148 * their difference isn't enough to put the rules in different superblocks.
149 * There are no problems re-ordering any rules with these fields.
151 PF_RULE_FIELD(af
, NOMERGE
),
152 PF_RULE_FIELD(ifnot
, NOMERGE
),
153 PF_RULE_FIELD(ifname
, NOMERGE
), /* hack for IF groups */
154 PF_RULE_FIELD(match_tag_not
, NOMERGE
),
155 PF_RULE_FIELD(match_tagname
, NOMERGE
),
156 PF_RULE_FIELD(os_fingerprint
, NOMERGE
),
157 PF_RULE_FIELD(timeout
, NOMERGE
),
158 PF_RULE_FIELD(return_icmp
, NOMERGE
),
159 PF_RULE_FIELD(return_icmp6
, NOMERGE
),
160 PF_RULE_FIELD(uid
, NOMERGE
),
161 PF_RULE_FIELD(gid
, NOMERGE
),
162 PF_RULE_FIELD(direction
, NOMERGE
),
163 PF_RULE_FIELD(proto
, NOMERGE
),
164 PF_RULE_FIELD(type
, NOMERGE
),
165 PF_RULE_FIELD(code
, NOMERGE
),
166 PF_RULE_FIELD(flags
, NOMERGE
),
167 PF_RULE_FIELD(flagset
, NOMERGE
),
168 PF_RULE_FIELD(tos
, NOMERGE
),
169 PF_RULE_FIELD(src
.port
, NOMERGE
),
170 PF_RULE_FIELD(dst
.port
, NOMERGE
),
171 PF_RULE_FIELD(src
.port_op
, NOMERGE
),
172 PF_RULE_FIELD(dst
.port_op
, NOMERGE
),
173 PF_RULE_FIELD(src
.neg
, NOMERGE
),
174 PF_RULE_FIELD(dst
.neg
, NOMERGE
),
176 /* These fields can be merged */
177 PF_RULE_FIELD(src
.addr
, COMBINED
),
178 PF_RULE_FIELD(dst
.addr
, COMBINED
),
180 /* We just don't care about these fields. They're set by the kernel */
181 PF_RULE_FIELD(skip
, DC
),
182 PF_RULE_FIELD(evaluations
, DC
),
183 PF_RULE_FIELD(packets
, DC
),
184 PF_RULE_FIELD(bytes
, DC
),
185 PF_RULE_FIELD(kif
, DC
),
186 PF_RULE_FIELD(states
, DC
),
187 PF_RULE_FIELD(src_nodes
, DC
),
188 PF_RULE_FIELD(nr
, DC
),
189 PF_RULE_FIELD(entries
, DC
),
190 PF_RULE_FIELD(qid
, DC
),
191 PF_RULE_FIELD(pqid
, DC
),
192 PF_RULE_FIELD(anchor_relative
, DC
),
193 PF_RULE_FIELD(anchor_wildcard
, DC
),
194 PF_RULE_FIELD(tag
, DC
),
195 PF_RULE_FIELD(match_tag
, DC
),
196 PF_RULE_FIELD(overload_tbl
, DC
),
198 /* These fields should never be set in a PASS/BLOCK rule */
199 PF_RULE_FIELD(natpass
, NEVER
),
200 PF_RULE_FIELD(max_mss
, NEVER
),
201 PF_RULE_FIELD(min_ttl
, NEVER
),
206 int add_opt_table(struct pfctl
*, struct pf_opt_tbl
**, sa_family_t
,
207 struct pf_rule_addr
*);
208 int addrs_combineable(struct pf_rule_addr
*, struct pf_rule_addr
*);
209 int addrs_equal(struct pf_rule_addr
*, struct pf_rule_addr
*);
210 int block_feedback(struct pfctl
*, struct superblock
*);
211 int combine_rules(struct pfctl
*, struct superblock
*);
212 void comparable_rule(struct pf_rule
*, const struct pf_rule
*, int);
213 int construct_superblocks(struct pfctl
*, struct pf_opt_queue
*,
214 struct superblocks
*);
215 void exclude_supersets(struct pf_rule
*, struct pf_rule
*);
216 int interface_group(const char *);
217 int load_feedback_profile(struct pfctl
*, struct superblocks
*);
218 int optimize_superblock(struct pfctl
*, struct superblock
*);
219 int pf_opt_create_table(struct pfctl
*, struct pf_opt_tbl
*);
220 void remove_from_skipsteps(struct skiplist
*, struct superblock
*,
221 struct pf_opt_rule
*, struct pf_skip_step
*);
222 int remove_identical_rules(struct pfctl
*, struct superblock
*);
223 int reorder_rules(struct pfctl
*, struct superblock
*, int);
224 int rules_combineable(struct pf_rule
*, struct pf_rule
*);
225 void skip_append(struct superblock
*, int, struct pf_skip_step
*,
226 struct pf_opt_rule
*);
227 int skip_compare(int, struct pf_skip_step
*, struct pf_opt_rule
*);
228 void skip_init(void);
229 int skip_cmp_af(struct pf_rule
*, struct pf_rule
*);
230 int skip_cmp_dir(struct pf_rule
*, struct pf_rule
*);
231 int skip_cmp_dst_addr(struct pf_rule
*, struct pf_rule
*);
232 int skip_cmp_dst_port(struct pf_rule
*, struct pf_rule
*);
233 int skip_cmp_ifp(struct pf_rule
*, struct pf_rule
*);
234 int skip_cmp_proto(struct pf_rule
*, struct pf_rule
*);
235 int skip_cmp_src_addr(struct pf_rule
*, struct pf_rule
*);
236 int skip_cmp_src_port(struct pf_rule
*, struct pf_rule
*);
237 int superblock_inclusive(struct superblock
*, struct pf_opt_rule
*);
238 void superblock_free(struct pfctl
*, struct superblock
*);
241 int (*skip_comparitors
[PF_SKIP_COUNT
])(struct pf_rule
*, struct pf_rule
*);
242 const char *skip_comparitors_names
[PF_SKIP_COUNT
];
243 #define PF_SKIP_COMPARITORS { \
244 { "ifp", PF_SKIP_IFP, skip_cmp_ifp }, \
245 { "dir", PF_SKIP_DIR, skip_cmp_dir }, \
246 { "af", PF_SKIP_AF, skip_cmp_af }, \
247 { "proto", PF_SKIP_PROTO, skip_cmp_proto }, \
248 { "saddr", PF_SKIP_SRC_ADDR, skip_cmp_src_addr }, \
249 { "sport", PF_SKIP_SRC_PORT, skip_cmp_src_port }, \
250 { "daddr", PF_SKIP_DST_ADDR, skip_cmp_dst_addr }, \
251 { "dport", PF_SKIP_DST_PORT, skip_cmp_dst_port } \
254 struct pfr_buffer table_buffer
;
255 int table_identifier
;
259 pfctl_optimize_ruleset(struct pfctl
*pf
, struct pf_ruleset
*rs
)
261 struct superblocks superblocks
;
262 struct pf_opt_queue opt_queue
;
263 struct superblock
*block
;
264 struct pf_opt_rule
*por
;
266 struct pf_rulequeue
*old_rules
;
268 DEBUG("optimizing ruleset");
269 memset(&table_buffer
, 0, sizeof(table_buffer
));
271 TAILQ_INIT(&opt_queue
);
273 old_rules
= rs
->rules
[PF_RULESET_FILTER
].active
.ptr
;
274 rs
->rules
[PF_RULESET_FILTER
].active
.ptr
=
275 rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
;
276 rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
= old_rules
;
279 * XXX expanding the pf_opt_rule format throughout pfctl might allow
280 * us to avoid all this copying.
282 while ((r
= TAILQ_FIRST(rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
))
284 TAILQ_REMOVE(rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
, r
,
286 if ((por
= calloc(1, sizeof(*por
))) == NULL
)
288 memcpy(&por
->por_rule
, r
, sizeof(*r
));
289 if (TAILQ_FIRST(&r
->rpool
.list
) != NULL
) {
290 TAILQ_INIT(&por
->por_rule
.rpool
.list
);
291 pfctl_move_pool(&r
->rpool
, &por
->por_rule
.rpool
);
293 bzero(&por
->por_rule
.rpool
,
294 sizeof(por
->por_rule
.rpool
));
297 TAILQ_INSERT_TAIL(&opt_queue
, por
, por_entry
);
300 TAILQ_INIT(&superblocks
);
301 if (construct_superblocks(pf
, &opt_queue
, &superblocks
))
304 if (pf
->optimize
& PF_OPTIMIZE_PROFILE
) {
305 if (load_feedback_profile(pf
, &superblocks
))
309 TAILQ_FOREACH(block
, &superblocks
, sb_entry
) {
310 if (optimize_superblock(pf
, block
))
314 rs
->anchor
->refcnt
= 0;
315 while ((block
= TAILQ_FIRST(&superblocks
))) {
316 TAILQ_REMOVE(&superblocks
, block
, sb_entry
);
318 while ((por
= TAILQ_FIRST(&block
->sb_rules
))) {
319 TAILQ_REMOVE(&block
->sb_rules
, por
, por_entry
);
320 por
->por_rule
.nr
= rs
->anchor
->refcnt
++;
321 if ((r
= calloc(1, sizeof(*r
))) == NULL
)
323 memcpy(r
, &por
->por_rule
, sizeof(*r
));
324 TAILQ_INIT(&r
->rpool
.list
);
325 pfctl_move_pool(&por
->por_rule
.rpool
, &r
->rpool
);
327 rs
->rules
[PF_RULESET_FILTER
].active
.ptr
,
337 while ((por
= TAILQ_FIRST(&opt_queue
))) {
338 TAILQ_REMOVE(&opt_queue
, por
, por_entry
);
339 if (por
->por_src_tbl
) {
340 pfr_buf_clear(por
->por_src_tbl
->pt_buf
);
341 free(por
->por_src_tbl
->pt_buf
);
342 free(por
->por_src_tbl
);
344 if (por
->por_dst_tbl
) {
345 pfr_buf_clear(por
->por_dst_tbl
->pt_buf
);
346 free(por
->por_dst_tbl
->pt_buf
);
347 free(por
->por_dst_tbl
);
351 while ((block
= TAILQ_FIRST(&superblocks
))) {
352 TAILQ_REMOVE(&superblocks
, block
, sb_entry
);
353 superblock_free(pf
, block
);
360 * Go ahead and optimize a superblock
363 optimize_superblock(struct pfctl
*pf
, struct superblock
*block
)
366 struct pf_opt_rule
*por
;
367 #endif /* OPT_DEBUG */
369 /* We have a few optimization passes:
370 * 1) remove duplicate rules or rules that are a subset of other
372 * 2) combine otherwise identical rules with different IP addresses
373 * into a single rule and put the addresses in a table.
374 * 3) re-order the rules to improve kernel skip steps
375 * 4) re-order the 'quick' rules based on feedback from the
376 * active ruleset statistics
378 * XXX combine_rules() doesn't combine v4 and v6 rules. would just
379 * have to keep af in the table container, make af 'COMBINE' and
380 * twiddle the af on the merged rule
381 * XXX maybe add a weighting to the metric on skipsteps when doing
382 * reordering. sometimes two sequential tables will be better
383 * that four consecutive interfaces.
384 * XXX need to adjust the skipstep count of everything after PROTO,
385 * since they aren't actually checked on a proto mismatch in
386 * pf_test_{tcp, udp, icmp}()
387 * XXX should i treat proto=0, af=0 or dir=0 special in skepstep
388 * calculation since they are a DC?
389 * XXX keep last skiplist of last superblock to influence this
390 * superblock. '5 inet6 log' should make '3 inet6' come before '4
391 * inet' in the next superblock.
392 * XXX would be useful to add tables for ports
393 * XXX we can also re-order some mutually exclusive superblocks to
394 * try merging superblocks before any of these optimization passes.
395 * for instance a single 'log in' rule in the middle of non-logging
399 /* shortcut. there will be alot of 1-rule superblocks */
400 if (!TAILQ_NEXT(TAILQ_FIRST(&block
->sb_rules
), por_entry
))
404 printf("--- Superblock ---\n");
405 TAILQ_FOREACH(por
, &block
->sb_rules
, por_entry
) {
407 print_rule(&por
->por_rule
, por
->por_rule
.anchor
?
408 por
->por_rule
.anchor
->name
: "", 1);
410 #endif /* OPT_DEBUG */
413 if (remove_identical_rules(pf
, block
))
415 if (combine_rules(pf
, block
))
417 if ((pf
->optimize
& PF_OPTIMIZE_PROFILE
) &&
418 TAILQ_FIRST(&block
->sb_rules
)->por_rule
.quick
&&
419 block
->sb_profiled_block
) {
420 if (block_feedback(pf
, block
))
422 } else if (reorder_rules(pf
, block
, 0)) {
427 * Don't add any optimization passes below reorder_rules(). It will
428 * have divided superblocks into smaller blocks for further refinement
429 * and doesn't put them back together again. What once was a true
430 * superblock might have been split into multiple superblocks.
434 printf("--- END Superblock ---\n");
435 #endif /* OPT_DEBUG */
441 * Optimization pass #1: remove identical rules
444 remove_identical_rules(struct pfctl
*pf
, struct superblock
*block
)
446 struct pf_opt_rule
*por1
, *por2
, *por_next
, *por2_next
;
447 struct pf_rule a
, a2
, b
, b2
;
449 for (por1
= TAILQ_FIRST(&block
->sb_rules
); por1
; por1
= por_next
) {
450 por_next
= TAILQ_NEXT(por1
, por_entry
);
451 for (por2
= por_next
; por2
; por2
= por2_next
) {
452 por2_next
= TAILQ_NEXT(por2
, por_entry
);
453 comparable_rule(&a
, &por1
->por_rule
, DC
);
454 comparable_rule(&b
, &por2
->por_rule
, DC
);
455 memcpy(&a2
, &a
, sizeof(a2
));
456 memcpy(&b2
, &b
, sizeof(b2
));
458 exclude_supersets(&a
, &b
);
459 exclude_supersets(&b2
, &a2
);
460 if (memcmp(&a
, &b
, sizeof(a
)) == 0) {
461 DEBUG("removing identical rule nr%d = *nr%d*",
462 por1
->por_rule
.nr
, por2
->por_rule
.nr
);
463 TAILQ_REMOVE(&block
->sb_rules
, por2
, por_entry
);
464 if (por_next
== por2
)
465 por_next
= TAILQ_NEXT(por1
, por_entry
);
467 } else if (memcmp(&a2
, &b2
, sizeof(a2
)) == 0) {
468 DEBUG("removing identical rule *nr%d* = nr%d",
469 por1
->por_rule
.nr
, por2
->por_rule
.nr
);
470 TAILQ_REMOVE(&block
->sb_rules
, por1
, por_entry
);
482 * Optimization pass #2: combine similar rules with different addresses
483 * into a single rule and a table
486 combine_rules(struct pfctl
*pf
, struct superblock
*block
)
488 struct pf_opt_rule
*p1
, *p2
, *por_next
;
491 if ((pf
->loadopt
& PFCTL_FLAG_TABLE
) == 0) {
492 warnx("Must enable table loading for optimizations");
496 /* First we make a pass to combine the rules. O(n log n) */
497 TAILQ_FOREACH(p1
, &block
->sb_rules
, por_entry
) {
498 for (p2
= TAILQ_NEXT(p1
, por_entry
); p2
; p2
= por_next
) {
499 por_next
= TAILQ_NEXT(p2
, por_entry
);
501 src_eq
= addrs_equal(&p1
->por_rule
.src
,
503 dst_eq
= addrs_equal(&p1
->por_rule
.dst
,
506 if (src_eq
&& !dst_eq
&& p1
->por_src_tbl
== NULL
&&
507 p2
->por_dst_tbl
== NULL
&&
508 p2
->por_src_tbl
== NULL
&&
509 rules_combineable(&p1
->por_rule
, &p2
->por_rule
) &&
510 addrs_combineable(&p1
->por_rule
.dst
,
511 &p2
->por_rule
.dst
)) {
512 DEBUG("can combine rules nr%d = nr%d",
513 p1
->por_rule
.nr
, p2
->por_rule
.nr
);
514 if (p1
->por_dst_tbl
== NULL
&&
515 add_opt_table(pf
, &p1
->por_dst_tbl
,
516 p1
->por_rule
.af
, &p1
->por_rule
.dst
))
518 if (add_opt_table(pf
, &p1
->por_dst_tbl
,
519 p1
->por_rule
.af
, &p2
->por_rule
.dst
))
521 p2
->por_dst_tbl
= p1
->por_dst_tbl
;
522 if (p1
->por_dst_tbl
->pt_rulecount
>=
524 TAILQ_REMOVE(&block
->sb_rules
, p2
,
528 } else if (!src_eq
&& dst_eq
&& p1
->por_dst_tbl
== NULL
529 && p2
->por_src_tbl
== NULL
&&
530 p2
->por_dst_tbl
== NULL
&&
531 rules_combineable(&p1
->por_rule
, &p2
->por_rule
) &&
532 addrs_combineable(&p1
->por_rule
.src
,
533 &p2
->por_rule
.src
)) {
534 DEBUG("can combine rules nr%d = nr%d",
535 p1
->por_rule
.nr
, p2
->por_rule
.nr
);
536 if (p1
->por_src_tbl
== NULL
&&
537 add_opt_table(pf
, &p1
->por_src_tbl
,
538 p1
->por_rule
.af
, &p1
->por_rule
.src
))
540 if (add_opt_table(pf
, &p1
->por_src_tbl
,
541 p1
->por_rule
.af
, &p2
->por_rule
.src
))
543 p2
->por_src_tbl
= p1
->por_src_tbl
;
544 if (p1
->por_src_tbl
->pt_rulecount
>=
546 TAILQ_REMOVE(&block
->sb_rules
, p2
,
556 * Then we make a final pass to create a valid table name and
557 * insert the name into the rules.
559 for (p1
= TAILQ_FIRST(&block
->sb_rules
); p1
; p1
= por_next
) {
560 por_next
= TAILQ_NEXT(p1
, por_entry
);
561 assert(p1
->por_src_tbl
== NULL
|| p1
->por_dst_tbl
== NULL
);
563 if (p1
->por_src_tbl
&& p1
->por_src_tbl
->pt_rulecount
>=
565 if (p1
->por_src_tbl
->pt_generated
) {
566 /* This rule is included in a table */
567 TAILQ_REMOVE(&block
->sb_rules
, p1
, por_entry
);
571 p1
->por_src_tbl
->pt_generated
= 1;
573 if ((pf
->opts
& PF_OPT_NOACTION
) == 0 &&
574 pf_opt_create_table(pf
, p1
->por_src_tbl
))
579 if (pf
->opts
& PF_OPT_VERBOSE
)
580 print_tabledef(p1
->por_src_tbl
->pt_name
,
582 &p1
->por_src_tbl
->pt_nodes
);
584 memset(&p1
->por_rule
.src
.addr
, 0,
585 sizeof(p1
->por_rule
.src
.addr
));
586 p1
->por_rule
.src
.addr
.type
= PF_ADDR_TABLE
;
587 strlcpy(p1
->por_rule
.src
.addr
.v
.tblname
,
588 p1
->por_src_tbl
->pt_name
,
589 sizeof(p1
->por_rule
.src
.addr
.v
.tblname
));
591 pfr_buf_clear(p1
->por_src_tbl
->pt_buf
);
592 free(p1
->por_src_tbl
->pt_buf
);
593 p1
->por_src_tbl
->pt_buf
= NULL
;
595 if (p1
->por_dst_tbl
&& p1
->por_dst_tbl
->pt_rulecount
>=
597 if (p1
->por_dst_tbl
->pt_generated
) {
598 /* This rule is included in a table */
599 TAILQ_REMOVE(&block
->sb_rules
, p1
, por_entry
);
603 p1
->por_dst_tbl
->pt_generated
= 1;
605 if ((pf
->opts
& PF_OPT_NOACTION
) == 0 &&
606 pf_opt_create_table(pf
, p1
->por_dst_tbl
))
610 if (pf
->opts
& PF_OPT_VERBOSE
)
611 print_tabledef(p1
->por_dst_tbl
->pt_name
,
613 &p1
->por_dst_tbl
->pt_nodes
);
615 memset(&p1
->por_rule
.dst
.addr
, 0,
616 sizeof(p1
->por_rule
.dst
.addr
));
617 p1
->por_rule
.dst
.addr
.type
= PF_ADDR_TABLE
;
618 strlcpy(p1
->por_rule
.dst
.addr
.v
.tblname
,
619 p1
->por_dst_tbl
->pt_name
,
620 sizeof(p1
->por_rule
.dst
.addr
.v
.tblname
));
622 pfr_buf_clear(p1
->por_dst_tbl
->pt_buf
);
623 free(p1
->por_dst_tbl
->pt_buf
);
624 p1
->por_dst_tbl
->pt_buf
= NULL
;
633 * Optimization pass #3: re-order rules to improve skip steps
636 reorder_rules(struct pfctl
*pf
, struct superblock
*block
, int depth
)
638 struct superblock
*newblock
;
639 struct pf_skip_step
*skiplist
;
640 struct pf_opt_rule
*por
;
641 int i
, largest
, largest_list
= -1, rule_count
= 0;
642 TAILQ_HEAD( , pf_opt_rule
) head
;
645 * Calculate the best-case skip steps. We put each rule in a list
646 * of other rules with common fields
648 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
649 TAILQ_FOREACH(por
, &block
->sb_rules
, por_entry
) {
650 TAILQ_FOREACH(skiplist
, &block
->sb_skipsteps
[i
],
652 if (skip_compare(i
, skiplist
, por
) == 0)
655 if (skiplist
== NULL
) {
656 if ((skiplist
= calloc(1, sizeof(*skiplist
))) ==
659 TAILQ_INIT(&skiplist
->ps_rules
);
660 TAILQ_INSERT_TAIL(&block
->sb_skipsteps
[i
],
663 skip_append(block
, i
, skiplist
, por
);
667 TAILQ_FOREACH(por
, &block
->sb_rules
, por_entry
)
671 * Now we're going to ignore any fields that are identical between
672 * all of the rules in the superblock and those fields which differ
673 * between every rule in the superblock.
676 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
677 skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]);
678 if (skiplist
->ps_count
== rule_count
) {
679 DEBUG("(%d) original skipstep '%s' is all rules",
680 depth
, skip_comparitors_names
[i
]);
681 skiplist
->ps_count
= 0;
682 } else if (skiplist
->ps_count
== 1) {
683 skiplist
->ps_count
= 0;
685 DEBUG("(%d) original skipstep '%s' largest jump is %d",
686 depth
, skip_comparitors_names
[i
],
688 if (skiplist
->ps_count
> largest
)
689 largest
= skiplist
->ps_count
;
693 /* Ugh. There is NO commonality in the superblock on which
694 * optimize the skipsteps optimization.
700 * Now we're going to empty the superblock rule list and re-create
701 * it based on a more optimal skipstep order.
704 while ((por
= TAILQ_FIRST(&block
->sb_rules
))) {
705 TAILQ_REMOVE(&block
->sb_rules
, por
, por_entry
);
706 TAILQ_INSERT_TAIL(&head
, por
, por_entry
);
710 while (!TAILQ_EMPTY(&head
)) {
714 * Find the most useful skip steps remaining
716 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
717 skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]);
718 if (skiplist
->ps_count
> largest
) {
719 largest
= skiplist
->ps_count
;
726 * Nothing useful left. Leave remaining rules in order.
728 DEBUG("(%d) no more commonality for skip steps", depth
);
729 while ((por
= TAILQ_FIRST(&head
))) {
730 TAILQ_REMOVE(&head
, por
, por_entry
);
731 TAILQ_INSERT_TAIL(&block
->sb_rules
, por
,
736 * There is commonality. Extract those common rules
737 * and place them in the ruleset adjacent to each
740 skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[
742 DEBUG("(%d) skipstep '%s' largest jump is %d @ #%d",
743 depth
, skip_comparitors_names
[largest_list
],
744 largest
, TAILQ_FIRST(&TAILQ_FIRST(&block
->
745 sb_skipsteps
[largest_list
])->ps_rules
)->
747 TAILQ_REMOVE(&block
->sb_skipsteps
[largest_list
],
752 * There may be further commonality inside these
753 * rules. So we'll split them off into they're own
754 * superblock and pass it back into the optimizer.
756 if (skiplist
->ps_count
> 2) {
757 if ((newblock
= calloc(1, sizeof(*newblock
)))
762 TAILQ_INIT(&newblock
->sb_rules
);
763 for (i
= 0; i
< PF_SKIP_COUNT
; i
++)
764 TAILQ_INIT(&newblock
->sb_skipsteps
[i
]);
765 TAILQ_INSERT_BEFORE(block
, newblock
, sb_entry
);
766 DEBUG("(%d) splitting off %d rules from superblock @ #%d",
767 depth
, skiplist
->ps_count
,
768 TAILQ_FIRST(&skiplist
->ps_rules
)->
774 while ((por
= TAILQ_FIRST(&skiplist
->ps_rules
))) {
775 TAILQ_REMOVE(&head
, por
, por_entry
);
776 TAILQ_REMOVE(&skiplist
->ps_rules
, por
,
777 por_skip_entry
[largest_list
]);
778 TAILQ_INSERT_TAIL(&newblock
->sb_rules
, por
,
781 /* Remove this rule from all other skiplists */
782 remove_from_skipsteps(&block
->sb_skipsteps
[
783 largest_list
], block
, por
, skiplist
);
786 if (newblock
!= block
)
787 if (reorder_rules(pf
, newblock
, depth
+ 1))
793 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
794 while ((skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]))) {
795 TAILQ_REMOVE(&block
->sb_skipsteps
[i
], skiplist
,
806 * Optimization pass #4: re-order 'quick' rules based on feedback from the
807 * currently running ruleset
810 block_feedback(struct pfctl
*pf
, struct superblock
*block
)
812 TAILQ_HEAD( , pf_opt_rule
) queue
;
813 struct pf_opt_rule
*por1
, *por2
;
814 u_int64_t total_count
= 0;
819 * Walk through all of the profiled superblock's rules and copy
820 * the counters onto our rules.
822 TAILQ_FOREACH(por1
, &block
->sb_profiled_block
->sb_rules
, por_entry
) {
823 comparable_rule(&a
, &por1
->por_rule
, DC
);
824 total_count
+= por1
->por_rule
.packets
[0] +
825 por1
->por_rule
.packets
[1];
826 TAILQ_FOREACH(por2
, &block
->sb_rules
, por_entry
) {
827 if (por2
->por_profile_count
)
829 comparable_rule(&b
, &por2
->por_rule
, DC
);
830 if (memcmp(&a
, &b
, sizeof(a
)) == 0) {
831 por2
->por_profile_count
=
832 por1
->por_rule
.packets
[0] +
833 por1
->por_rule
.packets
[1];
838 superblock_free(pf
, block
->sb_profiled_block
);
839 block
->sb_profiled_block
= NULL
;
842 * Now we pull all of the rules off the superblock and re-insert them
847 while ((por1
= TAILQ_FIRST(&block
->sb_rules
)) != NULL
) {
848 TAILQ_REMOVE(&block
->sb_rules
, por1
, por_entry
);
849 TAILQ_INSERT_TAIL(&queue
, por1
, por_entry
);
852 while ((por1
= TAILQ_FIRST(&queue
)) != NULL
) {
853 TAILQ_REMOVE(&queue
, por1
, por_entry
);
854 /* XXX I should sort all of the unused rules based on skip steps */
855 TAILQ_FOREACH(por2
, &block
->sb_rules
, por_entry
) {
856 if (por1
->por_profile_count
> por2
->por_profile_count
) {
857 TAILQ_INSERT_BEFORE(por2
, por1
, por_entry
);
861 if (por2
== TAILQ_END(&block
->sb_rules
))
862 TAILQ_INSERT_TAIL(&block
->sb_rules
, por1
, por_entry
);
870 * Load the current ruleset from the kernel and try to associate them with
871 * the ruleset we're optimizing.
874 load_feedback_profile(struct pfctl
*pf
, struct superblocks
*superblocks
)
876 struct superblock
*block
, *blockcur
;
877 struct superblocks prof_superblocks
;
878 struct pf_opt_rule
*por
;
879 struct pf_opt_queue queue
;
880 struct pfioc_rule pr
;
885 TAILQ_INIT(&prof_superblocks
);
887 memset(&pr
, 0, sizeof(pr
));
888 pr
.rule
.action
= PF_PASS
;
889 if (ioctl(pf
->dev
, DIOCGETRULES
, &pr
)) {
890 warn("DIOCGETRULES");
895 DEBUG("Loading %d active rules for a feedback profile", mnr
);
896 for (nr
= 0; nr
< mnr
; ++nr
) {
897 struct pf_ruleset
*rs
;
898 if ((por
= calloc(1, sizeof(*por
))) == NULL
) {
903 if (ioctl(pf
->dev
, DIOCGETRULE
, &pr
)) {
904 warn("DIOCGETRULES");
907 memcpy(&por
->por_rule
, &pr
.rule
, sizeof(por
->por_rule
));
908 rs
= pf_find_or_create_ruleset(pr
.anchor_call
);
909 por
->por_rule
.anchor
= rs
->anchor
;
910 if (TAILQ_EMPTY(&por
->por_rule
.rpool
.list
))
911 memset(&por
->por_rule
.rpool
, 0,
912 sizeof(por
->por_rule
.rpool
));
913 TAILQ_INSERT_TAIL(&queue
, por
, por_entry
);
915 /* XXX pfctl_get_pool(pf->dev, &pr.rule.rpool, nr, pr.ticket,
916 * PF_PASS, pf->anchor) ???
917 * ... pfctl_clear_pool(&pr.rule.rpool)
921 if (construct_superblocks(pf
, &queue
, &prof_superblocks
))
926 * Now we try to associate the active ruleset's superblocks with
927 * the superblocks we're compiling.
929 block
= TAILQ_FIRST(superblocks
);
930 blockcur
= TAILQ_FIRST(&prof_superblocks
);
931 while (block
&& blockcur
) {
932 comparable_rule(&a
, &TAILQ_FIRST(&block
->sb_rules
)->por_rule
,
934 comparable_rule(&b
, &TAILQ_FIRST(&blockcur
->sb_rules
)->por_rule
,
936 if (memcmp(&a
, &b
, sizeof(a
)) == 0) {
937 /* The two superblocks lined up */
938 block
->sb_profiled_block
= blockcur
;
940 DEBUG("superblocks don't line up between #%d and #%d",
941 TAILQ_FIRST(&block
->sb_rules
)->por_rule
.nr
,
942 TAILQ_FIRST(&blockcur
->sb_rules
)->por_rule
.nr
);
945 block
= TAILQ_NEXT(block
, sb_entry
);
946 blockcur
= TAILQ_NEXT(blockcur
, sb_entry
);
951 /* Free any superblocks we couldn't link */
953 block
= TAILQ_NEXT(blockcur
, sb_entry
);
954 superblock_free(pf
, blockcur
);
962 * Compare a rule to a skiplist to see if the rule is a member
965 skip_compare(int skipnum
, struct pf_skip_step
*skiplist
,
966 struct pf_opt_rule
*por
)
968 struct pf_rule
*a
, *b
;
969 if (skipnum
>= PF_SKIP_COUNT
|| skipnum
< 0)
970 errx(1, "skip_compare() out of bounds");
972 b
= &TAILQ_FIRST(&skiplist
->ps_rules
)->por_rule
;
974 return ((skip_comparitors
[skipnum
])(a
, b
));
979 * Add a rule to a skiplist
982 skip_append(struct superblock
*superblock
, int skipnum
,
983 struct pf_skip_step
*skiplist
, struct pf_opt_rule
*por
)
985 struct pf_skip_step
*prev
;
987 skiplist
->ps_count
++;
988 TAILQ_INSERT_TAIL(&skiplist
->ps_rules
, por
, por_skip_entry
[skipnum
]);
990 /* Keep the list of skiplists sorted by whichever is larger */
991 while ((prev
= TAILQ_PREV(skiplist
, skiplist
, ps_entry
)) &&
992 prev
->ps_count
< skiplist
->ps_count
) {
993 TAILQ_REMOVE(&superblock
->sb_skipsteps
[skipnum
],
995 TAILQ_INSERT_BEFORE(prev
, skiplist
, ps_entry
);
1001 * Remove a rule from the other skiplist calculations.
1004 remove_from_skipsteps(struct skiplist
*head
, struct superblock
*block
,
1005 struct pf_opt_rule
*por
, struct pf_skip_step
*active_list
)
1007 struct pf_skip_step
*sk
, *next
;
1008 struct pf_opt_rule
*p2
;
1011 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
1012 sk
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]);
1013 if (sk
== NULL
|| sk
== active_list
|| sk
->ps_count
<= 1)
1017 TAILQ_FOREACH(p2
, &sk
->ps_rules
, por_skip_entry
[i
])
1019 TAILQ_REMOVE(&sk
->ps_rules
, p2
,
1025 } while (!found
&& (sk
= TAILQ_NEXT(sk
, ps_entry
)));
1027 /* Does this change the sorting order? */
1028 while ((next
= TAILQ_NEXT(sk
, ps_entry
)) &&
1029 next
->ps_count
> sk
->ps_count
) {
1030 TAILQ_REMOVE(head
, sk
, ps_entry
);
1031 TAILQ_INSERT_AFTER(head
, next
, sk
, ps_entry
);
1034 next
= TAILQ_NEXT(sk
, ps_entry
);
1035 assert(next
== NULL
|| next
->ps_count
<= sk
->ps_count
);
1036 #endif /* OPT_DEBUG */
1042 /* Compare two rules AF field for skiplist construction */
1044 skip_cmp_af(struct pf_rule
*a
, struct pf_rule
*b
)
1046 if (a
->af
!= b
->af
|| a
->af
== 0)
1051 /* Compare two rules DIRECTION field for skiplist construction */
1053 skip_cmp_dir(struct pf_rule
*a
, struct pf_rule
*b
)
1055 if (a
->direction
== 0 || a
->direction
!= b
->direction
)
1060 /* Compare two rules DST Address field for skiplist construction */
1062 skip_cmp_dst_addr(struct pf_rule
*a
, struct pf_rule
*b
)
1064 if (a
->dst
.neg
!= b
->dst
.neg
||
1065 a
->dst
.addr
.type
!= b
->dst
.addr
.type
)
1067 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1068 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1069 * a->proto == IPPROTO_ICMP
1072 switch (a
->dst
.addr
.type
) {
1073 case PF_ADDR_ADDRMASK
:
1074 if (memcmp(&a
->dst
.addr
.v
.a
.addr
, &b
->dst
.addr
.v
.a
.addr
,
1075 sizeof(a
->dst
.addr
.v
.a
.addr
)) ||
1076 memcmp(&a
->dst
.addr
.v
.a
.mask
, &b
->dst
.addr
.v
.a
.mask
,
1077 sizeof(a
->dst
.addr
.v
.a
.mask
)) ||
1078 (a
->dst
.addr
.v
.a
.addr
.addr32
[0] == 0 &&
1079 a
->dst
.addr
.v
.a
.addr
.addr32
[1] == 0 &&
1080 a
->dst
.addr
.v
.a
.addr
.addr32
[2] == 0 &&
1081 a
->dst
.addr
.v
.a
.addr
.addr32
[3] == 0))
1084 case PF_ADDR_DYNIFTL
:
1085 if (strcmp(a
->dst
.addr
.v
.ifname
, b
->dst
.addr
.v
.ifname
) != 0 ||
1086 a
->dst
.addr
.iflags
!= a
->dst
.addr
.iflags
||
1087 memcmp(&a
->dst
.addr
.v
.a
.mask
, &b
->dst
.addr
.v
.a
.mask
,
1088 sizeof(a
->dst
.addr
.v
.a
.mask
)))
1091 case PF_ADDR_NOROUTE
:
1092 case PF_ADDR_URPFFAILED
:
1095 return (strcmp(a
->dst
.addr
.v
.tblname
, b
->dst
.addr
.v
.tblname
));
1100 /* Compare two rules DST port field for skiplist construction */
1102 skip_cmp_dst_port(struct pf_rule
*a
, struct pf_rule
*b
)
1104 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1105 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1106 * a->proto == IPPROTO_ICMP
1109 if (a
->dst
.port_op
== PF_OP_NONE
|| a
->dst
.port_op
!= b
->dst
.port_op
||
1110 a
->dst
.port
[0] != b
->dst
.port
[0] ||
1111 a
->dst
.port
[1] != b
->dst
.port
[1])
1116 /* Compare two rules IFP field for skiplist construction */
1118 skip_cmp_ifp(struct pf_rule
*a
, struct pf_rule
*b
)
1120 if (strcmp(a
->ifname
, b
->ifname
) || a
->ifname
[0] == '\0')
1122 return (a
->ifnot
!= b
->ifnot
);
1125 /* Compare two rules PROTO field for skiplist construction */
1127 skip_cmp_proto(struct pf_rule
*a
, struct pf_rule
*b
)
1129 return (a
->proto
!= b
->proto
|| a
->proto
== 0);
1132 /* Compare two rules SRC addr field for skiplist construction */
1134 skip_cmp_src_addr(struct pf_rule
*a
, struct pf_rule
*b
)
1136 if (a
->src
.neg
!= b
->src
.neg
||
1137 a
->src
.addr
.type
!= b
->src
.addr
.type
)
1139 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1140 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1141 * a->proto == IPPROTO_ICMP
1144 switch (a
->src
.addr
.type
) {
1145 case PF_ADDR_ADDRMASK
:
1146 if (memcmp(&a
->src
.addr
.v
.a
.addr
, &b
->src
.addr
.v
.a
.addr
,
1147 sizeof(a
->src
.addr
.v
.a
.addr
)) ||
1148 memcmp(&a
->src
.addr
.v
.a
.mask
, &b
->src
.addr
.v
.a
.mask
,
1149 sizeof(a
->src
.addr
.v
.a
.mask
)) ||
1150 (a
->src
.addr
.v
.a
.addr
.addr32
[0] == 0 &&
1151 a
->src
.addr
.v
.a
.addr
.addr32
[1] == 0 &&
1152 a
->src
.addr
.v
.a
.addr
.addr32
[2] == 0 &&
1153 a
->src
.addr
.v
.a
.addr
.addr32
[3] == 0))
1156 case PF_ADDR_DYNIFTL
:
1157 if (strcmp(a
->src
.addr
.v
.ifname
, b
->src
.addr
.v
.ifname
) != 0 ||
1158 a
->src
.addr
.iflags
!= a
->src
.addr
.iflags
||
1159 memcmp(&a
->src
.addr
.v
.a
.mask
, &b
->src
.addr
.v
.a
.mask
,
1160 sizeof(a
->src
.addr
.v
.a
.mask
)))
1163 case PF_ADDR_NOROUTE
:
1164 case PF_ADDR_URPFFAILED
:
1167 return (strcmp(a
->src
.addr
.v
.tblname
, b
->src
.addr
.v
.tblname
));
1172 /* Compare two rules SRC port field for skiplist construction */
1174 skip_cmp_src_port(struct pf_rule
*a
, struct pf_rule
*b
)
1176 if (a
->src
.port_op
== PF_OP_NONE
|| a
->src
.port_op
!= b
->src
.port_op
||
1177 a
->src
.port
[0] != b
->src
.port
[0] ||
1178 a
->src
.port
[1] != b
->src
.port
[1])
1180 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1181 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1182 * a->proto == IPPROTO_ICMP
1195 int (*func
)(struct pf_rule
*, struct pf_rule
*);
1196 } comps
[] = PF_SKIP_COMPARITORS
;
1199 for (skipnum
= 0; skipnum
< PF_SKIP_COUNT
; skipnum
++) {
1200 for (i
= 0; i
< sizeof(comps
)/sizeof(*comps
); i
++)
1201 if (comps
[i
].skipnum
== skipnum
) {
1202 skip_comparitors
[skipnum
] = comps
[i
].func
;
1203 skip_comparitors_names
[skipnum
] = comps
[i
].name
;
1206 for (skipnum
= 0; skipnum
< PF_SKIP_COUNT
; skipnum
++)
1207 if (skip_comparitors
[skipnum
] == NULL
)
1208 errx(1, "Need to add skip step comparitor to pfctl?!");
1212 * Add a host/netmask to a table
1215 add_opt_table(struct pfctl
*pf
, struct pf_opt_tbl
**tbl
, sa_family_t af
,
1216 struct pf_rule_addr
*addr
)
1220 #endif /* OPT_DEBUG */
1221 static int tablenum
= 0;
1222 struct node_host node_host
;
1225 if ((*tbl
= calloc(1, sizeof(**tbl
))) == NULL
||
1226 ((*tbl
)->pt_buf
= calloc(1, sizeof(*(*tbl
)->pt_buf
))) ==
1229 (*tbl
)->pt_buf
->pfrb_type
= PFRB_ADDRS
;
1230 SIMPLEQ_INIT(&(*tbl
)->pt_nodes
);
1232 /* This is just a temporary table name */
1233 snprintf((*tbl
)->pt_name
, sizeof((*tbl
)->pt_name
), "%s%d",
1234 PF_OPT_TABLE_PREFIX
, tablenum
++);
1235 DEBUG("creating table <%s>", (*tbl
)->pt_name
);
1238 memset(&node_host
, 0, sizeof(node_host
));
1240 node_host
.addr
= addr
->addr
;
1243 DEBUG("<%s> adding %s/%d", (*tbl
)->pt_name
, inet_ntop(af
,
1244 &node_host
.addr
.v
.a
.addr
, buf
, sizeof(buf
)),
1245 unmask(&node_host
.addr
.v
.a
.mask
, af
));
1246 #endif /* OPT_DEBUG */
1248 if (append_addr_host((*tbl
)->pt_buf
, &node_host
, 0, 0)) {
1249 warn("failed to add host");
1252 if (pf
->opts
& PF_OPT_VERBOSE
) {
1253 struct node_tinit
*ti
;
1255 if ((ti
= calloc(1, sizeof(*ti
))) == NULL
)
1257 if ((ti
->host
= malloc(sizeof(*ti
->host
))) == NULL
)
1259 memcpy(ti
->host
, &node_host
, sizeof(*ti
->host
));
1260 SIMPLEQ_INSERT_TAIL(&(*tbl
)->pt_nodes
, ti
, entries
);
1263 (*tbl
)->pt_rulecount
++;
1264 if ((*tbl
)->pt_rulecount
== TABLE_THRESHOLD
)
1265 DEBUG("table <%s> now faster than skip steps", (*tbl
)->pt_name
);
1272 * Do the dirty work of choosing an unused table name and creating it.
1273 * (be careful with the table name, it might already be used in another anchor)
1276 pf_opt_create_table(struct pfctl
*pf
, struct pf_opt_tbl
*tbl
)
1278 static int tablenum
;
1279 struct pfr_table
*t
;
1281 if (table_buffer
.pfrb_type
== 0) {
1282 /* Initialize the list of tables */
1283 table_buffer
.pfrb_type
= PFRB_TABLES
;
1285 pfr_buf_grow(&table_buffer
, table_buffer
.pfrb_size
);
1286 table_buffer
.pfrb_size
= table_buffer
.pfrb_msize
;
1287 if (pfr_get_tables(NULL
, table_buffer
.pfrb_caddr
,
1288 &table_buffer
.pfrb_size
, PFR_FLAG_ALLRSETS
))
1289 err(1, "pfr_get_tables");
1290 if (table_buffer
.pfrb_size
<= table_buffer
.pfrb_msize
)
1293 table_identifier
= arc4random();
1296 /* XXX would be *really* nice to avoid duplicating identical tables */
1298 /* Now we have to pick a table name that isn't used */
1300 DEBUG("translating temporary table <%s> to <%s%x_%d>", tbl
->pt_name
,
1301 PF_OPT_TABLE_PREFIX
, table_identifier
, tablenum
);
1302 snprintf(tbl
->pt_name
, sizeof(tbl
->pt_name
), "%s%x_%d",
1303 PF_OPT_TABLE_PREFIX
, table_identifier
, tablenum
);
1304 PFRB_FOREACH(t
, &table_buffer
) {
1305 if (strcasecmp(t
->pfrt_name
, tbl
->pt_name
) == 0) {
1306 /* Collision. Try again */
1307 DEBUG("wow, table <%s> in use. trying again",
1309 table_identifier
= arc4random();
1316 if (pfctl_define_table(tbl
->pt_name
, PFR_TFLAG_CONST
, 1,
1317 pf
->anchor
->name
, tbl
->pt_buf
, pf
->anchor
->ruleset
.tticket
)) {
1318 warn("failed to create table %s", tbl
->pt_name
);
1325 * Partition the flat ruleset into a list of distinct superblocks
1328 construct_superblocks(struct pfctl
*pf
, struct pf_opt_queue
*opt_queue
,
1329 struct superblocks
*superblocks
)
1331 struct superblock
*block
= NULL
;
1332 struct pf_opt_rule
*por
;
1335 while (!TAILQ_EMPTY(opt_queue
)) {
1336 por
= TAILQ_FIRST(opt_queue
);
1337 TAILQ_REMOVE(opt_queue
, por
, por_entry
);
1338 if (block
== NULL
|| !superblock_inclusive(block
, por
)) {
1339 if ((block
= calloc(1, sizeof(*block
))) == NULL
) {
1343 TAILQ_INIT(&block
->sb_rules
);
1344 for (i
= 0; i
< PF_SKIP_COUNT
; i
++)
1345 TAILQ_INIT(&block
->sb_skipsteps
[i
]);
1346 TAILQ_INSERT_TAIL(superblocks
, block
, sb_entry
);
1348 TAILQ_INSERT_TAIL(&block
->sb_rules
, por
, por_entry
);
1356 * Compare two rule addresses
1359 addrs_equal(struct pf_rule_addr
*a
, struct pf_rule_addr
*b
)
1361 if (a
->neg
!= b
->neg
)
1363 return (memcmp(&a
->addr
, &b
->addr
, sizeof(a
->addr
)) == 0);
1368 * The addresses are not equal, but can we combine them into one table?
1371 addrs_combineable(struct pf_rule_addr
*a
, struct pf_rule_addr
*b
)
1373 if (a
->addr
.type
!= PF_ADDR_ADDRMASK
||
1374 b
->addr
.type
!= PF_ADDR_ADDRMASK
)
1376 if (a
->neg
!= b
->neg
|| a
->port_op
!= b
->port_op
||
1377 a
->port
[0] != b
->port
[0] || a
->port
[1] != b
->port
[1])
1384 * Are we allowed to combine these two rules
1387 rules_combineable(struct pf_rule
*p1
, struct pf_rule
*p2
)
1389 struct pf_rule a
, b
;
1391 comparable_rule(&a
, p1
, COMBINED
);
1392 comparable_rule(&b
, p2
, COMBINED
);
1393 return (memcmp(&a
, &b
, sizeof(a
)) == 0);
1398 * Can a rule be included inside a superblock
1401 superblock_inclusive(struct superblock
*block
, struct pf_opt_rule
*por
)
1403 struct pf_rule a
, b
;
1406 /* First check for hard breaks */
1407 for (i
= 0; i
< sizeof(pf_rule_desc
)/sizeof(*pf_rule_desc
); i
++) {
1408 if (pf_rule_desc
[i
].prf_type
== BARRIER
) {
1409 for (j
= 0; j
< pf_rule_desc
[i
].prf_size
; j
++)
1410 if (((char *)&por
->por_rule
)[j
+
1411 pf_rule_desc
[i
].prf_offset
] != 0)
1416 /* per-rule src-track is also a hard break */
1417 if (por
->por_rule
.rule_flag
& PFRULE_RULESRCTRACK
)
1421 * Have to handle interface groups seperately. Consider the following
1423 * block on EXTIFS to any port 22
1424 * pass on em0 to any port 22
1425 * (where EXTIFS is an arbitrary interface group)
1426 * The optimizer may decide to re-order the pass rule in front of the
1427 * block rule. But what if EXTIFS includes em0??? Such a reordering
1428 * would change the meaning of the ruleset.
1429 * We can't just lookup the EXTIFS group and check if em0 is a member
1430 * because the user is allowed to add interfaces to a group during
1432 * Ergo interface groups become a defacto superblock break :-(
1434 if (interface_group(por
->por_rule
.ifname
) ||
1435 interface_group(TAILQ_FIRST(&block
->sb_rules
)->por_rule
.ifname
)) {
1436 if (strcasecmp(por
->por_rule
.ifname
,
1437 TAILQ_FIRST(&block
->sb_rules
)->por_rule
.ifname
) != 0)
1441 comparable_rule(&a
, &TAILQ_FIRST(&block
->sb_rules
)->por_rule
, NOMERGE
);
1442 comparable_rule(&b
, &por
->por_rule
, NOMERGE
);
1443 if (memcmp(&a
, &b
, sizeof(a
)) == 0)
1447 for (i
= 0; i
< sizeof(por
->por_rule
); i
++) {
1449 if (((u_int8_t
*)&a
)[i
] != ((u_int8_t
*)&b
)[i
]) {
1450 for (j
= 0; j
< sizeof(pf_rule_desc
) /
1451 sizeof(*pf_rule_desc
); j
++) {
1452 if (i
>= pf_rule_desc
[j
].prf_offset
&&
1453 i
< pf_rule_desc
[j
].prf_offset
+
1454 pf_rule_desc
[j
].prf_size
) {
1455 DEBUG("superblock break @ %d due to %s",
1457 pf_rule_desc
[j
].prf_name
);
1460 if (i
> pf_rule_desc
[j
].prf_offset
) {
1461 if (closest
== -1 ||
1462 i
-pf_rule_desc
[j
].prf_offset
<
1463 i
-pf_rule_desc
[closest
].prf_offset
)
1469 DEBUG("superblock break @ %d on %s+%xh",
1471 pf_rule_desc
[closest
].prf_name
,
1472 i
- pf_rule_desc
[closest
].prf_offset
-
1473 pf_rule_desc
[closest
].prf_size
);
1475 DEBUG("superblock break @ %d on field @ %d",
1476 por
->por_rule
.nr
, i
);
1480 #endif /* OPT_DEBUG */
1487 * Figure out if an interface name is an actual interface or actually a
1488 * group of interfaces.
1491 interface_group(const char *ifname
)
1493 if (ifname
== NULL
|| !ifname
[0])
1496 /* Real interfaces must end in a number, interface groups do not */
1497 if (isdigit((unsigned char)ifname
[strlen(ifname
) - 1]))
1505 * Make a rule that can directly compared by memcmp()
1508 comparable_rule(struct pf_rule
*dst
, const struct pf_rule
*src
, int type
)
1512 * To simplify the comparison, we just zero out the fields that are
1513 * allowed to be different and then do a simple memcmp()
1515 memcpy(dst
, src
, sizeof(*dst
));
1516 for (i
= 0; i
< sizeof(pf_rule_desc
)/sizeof(*pf_rule_desc
); i
++)
1517 if (pf_rule_desc
[i
].prf_type
>= type
) {
1519 assert(pf_rule_desc
[i
].prf_type
!= NEVER
||
1520 *(((char *)dst
) + pf_rule_desc
[i
].prf_offset
) == 0);
1521 #endif /* OPT_DEBUG */
1522 memset(((char *)dst
) + pf_rule_desc
[i
].prf_offset
, 0,
1523 pf_rule_desc
[i
].prf_size
);
1529 * Remove superset information from two rules so we can directly compare them
1533 exclude_supersets(struct pf_rule
*super
, struct pf_rule
*sub
)
1535 if (super
->ifname
[0] == '\0')
1536 memset(sub
->ifname
, 0, sizeof(sub
->ifname
));
1537 if (super
->direction
== PF_INOUT
)
1538 sub
->direction
= PF_INOUT
;
1539 if ((super
->proto
== 0 || super
->proto
== sub
->proto
) &&
1540 super
->flags
== 0 && super
->flagset
== 0 && (sub
->flags
||
1542 sub
->flags
= super
->flags
;
1543 sub
->flagset
= super
->flagset
;
1545 if (super
->proto
== 0)
1548 if (super
->src
.port_op
== 0) {
1549 sub
->src
.port_op
= 0;
1550 sub
->src
.port
[0] = 0;
1551 sub
->src
.port
[1] = 0;
1553 if (super
->dst
.port_op
== 0) {
1554 sub
->dst
.port_op
= 0;
1555 sub
->dst
.port
[0] = 0;
1556 sub
->dst
.port
[1] = 0;
1559 if (super
->src
.addr
.type
== PF_ADDR_ADDRMASK
&& !super
->src
.neg
&&
1560 !sub
->src
.neg
&& super
->src
.addr
.v
.a
.mask
.addr32
[0] == 0 &&
1561 super
->src
.addr
.v
.a
.mask
.addr32
[1] == 0 &&
1562 super
->src
.addr
.v
.a
.mask
.addr32
[2] == 0 &&
1563 super
->src
.addr
.v
.a
.mask
.addr32
[3] == 0)
1564 memset(&sub
->src
.addr
, 0, sizeof(sub
->src
.addr
));
1565 else if (super
->src
.addr
.type
== PF_ADDR_ADDRMASK
&&
1566 sub
->src
.addr
.type
== PF_ADDR_ADDRMASK
&&
1567 super
->src
.neg
== sub
->src
.neg
&&
1568 super
->af
== sub
->af
&&
1569 unmask(&super
->src
.addr
.v
.a
.mask
, super
->af
) <
1570 unmask(&sub
->src
.addr
.v
.a
.mask
, sub
->af
) &&
1571 super
->src
.addr
.v
.a
.addr
.addr32
[0] ==
1572 (sub
->src
.addr
.v
.a
.addr
.addr32
[0] &
1573 super
->src
.addr
.v
.a
.mask
.addr32
[0]) &&
1574 super
->src
.addr
.v
.a
.addr
.addr32
[1] ==
1575 (sub
->src
.addr
.v
.a
.addr
.addr32
[1] &
1576 super
->src
.addr
.v
.a
.mask
.addr32
[1]) &&
1577 super
->src
.addr
.v
.a
.addr
.addr32
[2] ==
1578 (sub
->src
.addr
.v
.a
.addr
.addr32
[2] &
1579 super
->src
.addr
.v
.a
.mask
.addr32
[2]) &&
1580 super
->src
.addr
.v
.a
.addr
.addr32
[3] ==
1581 (sub
->src
.addr
.v
.a
.addr
.addr32
[3] &
1582 super
->src
.addr
.v
.a
.mask
.addr32
[3])) {
1583 /* sub->src.addr is a subset of super->src.addr/mask */
1584 memcpy(&sub
->src
.addr
, &super
->src
.addr
, sizeof(sub
->src
.addr
));
1587 if (super
->dst
.addr
.type
== PF_ADDR_ADDRMASK
&& !super
->dst
.neg
&&
1588 !sub
->dst
.neg
&& super
->dst
.addr
.v
.a
.mask
.addr32
[0] == 0 &&
1589 super
->dst
.addr
.v
.a
.mask
.addr32
[1] == 0 &&
1590 super
->dst
.addr
.v
.a
.mask
.addr32
[2] == 0 &&
1591 super
->dst
.addr
.v
.a
.mask
.addr32
[3] == 0)
1592 memset(&sub
->dst
.addr
, 0, sizeof(sub
->dst
.addr
));
1593 else if (super
->dst
.addr
.type
== PF_ADDR_ADDRMASK
&&
1594 sub
->dst
.addr
.type
== PF_ADDR_ADDRMASK
&&
1595 super
->dst
.neg
== sub
->dst
.neg
&&
1596 super
->af
== sub
->af
&&
1597 unmask(&super
->dst
.addr
.v
.a
.mask
, super
->af
) <
1598 unmask(&sub
->dst
.addr
.v
.a
.mask
, sub
->af
) &&
1599 super
->dst
.addr
.v
.a
.addr
.addr32
[0] ==
1600 (sub
->dst
.addr
.v
.a
.addr
.addr32
[0] &
1601 super
->dst
.addr
.v
.a
.mask
.addr32
[0]) &&
1602 super
->dst
.addr
.v
.a
.addr
.addr32
[1] ==
1603 (sub
->dst
.addr
.v
.a
.addr
.addr32
[1] &
1604 super
->dst
.addr
.v
.a
.mask
.addr32
[1]) &&
1605 super
->dst
.addr
.v
.a
.addr
.addr32
[2] ==
1606 (sub
->dst
.addr
.v
.a
.addr
.addr32
[2] &
1607 super
->dst
.addr
.v
.a
.mask
.addr32
[2]) &&
1608 super
->dst
.addr
.v
.a
.addr
.addr32
[3] ==
1609 (sub
->dst
.addr
.v
.a
.addr
.addr32
[3] &
1610 super
->dst
.addr
.v
.a
.mask
.addr32
[3])) {
1611 /* sub->dst.addr is a subset of super->dst.addr/mask */
1612 memcpy(&sub
->dst
.addr
, &super
->dst
.addr
, sizeof(sub
->dst
.addr
));
1621 superblock_free(struct pfctl
*pf
, struct superblock
*block
)
1623 struct pf_opt_rule
*por
;
1624 while ((por
= TAILQ_FIRST(&block
->sb_rules
))) {
1625 TAILQ_REMOVE(&block
->sb_rules
, por
, por_entry
);
1626 if (por
->por_src_tbl
) {
1627 if (por
->por_src_tbl
->pt_buf
) {
1628 pfr_buf_clear(por
->por_src_tbl
->pt_buf
);
1629 free(por
->por_src_tbl
->pt_buf
);
1631 free(por
->por_src_tbl
);
1633 if (por
->por_dst_tbl
) {
1634 if (por
->por_dst_tbl
->pt_buf
) {
1635 pfr_buf_clear(por
->por_dst_tbl
->pt_buf
);
1636 free(por
->por_dst_tbl
->pt_buf
);
1638 free(por
->por_dst_tbl
);
1642 if (block
->sb_profiled_block
)
1643 superblock_free(pf
, block
->sb_profiled_block
);