1 /* $OpenBSD: pfctl_optimize.c,v 1.17 2008/05/06 03:45:21 mpf Exp $ */
4 * Copyright (c) 2004 Mike Frantzen <frantzen@openbsd.org>
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include <sys/cdefs.h>
20 __FBSDID("$FreeBSD$");
22 #include <sys/types.h>
23 #include <sys/ioctl.h>
24 #include <sys/socket.h>
27 #include <net/pfvar.h>
29 #include <netinet/in.h>
30 #include <arpa/inet.h>
41 #include "pfctl_parser.h"
44 /* The size at which a table becomes faster than individual rules */
45 #define TABLE_THRESHOLD 6
48 /* #define OPT_DEBUG 1 */
50 # define DEBUG(str, v...) \
51 printf("%s: " str "\n", __FUNCTION__ , ## v)
53 # define DEBUG(str, v...) ((void)0)
58 * A container that lets us sort a superblock to optimize the skip step jumps
61 int ps_count
; /* number of items */
62 TAILQ_HEAD( , pf_opt_rule
) ps_rules
;
63 TAILQ_ENTRY(pf_skip_step
) ps_entry
;
68 * A superblock is a block of adjacent rules of similar action. If there
69 * are five PASS rules in a row, they all become members of a superblock.
70 * Once we have a superblock, we are free to re-order any rules within it
71 * in order to improve performance; if a packet is passed, it doesn't matter
75 TAILQ_HEAD( , pf_opt_rule
) sb_rules
;
76 TAILQ_ENTRY(superblock
) sb_entry
;
77 struct superblock
*sb_profiled_block
;
78 TAILQ_HEAD(skiplist
, pf_skip_step
) sb_skipsteps
[PF_SKIP_COUNT
];
80 TAILQ_HEAD(superblocks
, superblock
);
84 * Description of the PF rule structure.
87 BARRIER
, /* the presence of the field puts the rule in it's own block */
88 BREAK
, /* the field may not differ between rules in a superblock */
89 NOMERGE
, /* the field may not differ between rules when combined */
90 COMBINED
, /* the field may itself be combined with other rules */
91 DC
, /* we just don't care about the field */
92 NEVER
}; /* we should never see this field set?!? */
93 struct pf_rule_field
{
99 #define PF_RULE_FIELD(field, ty) \
102 offsetof(struct pf_rule, field), \
103 sizeof(((struct pf_rule *)0)->field)}
107 * The presence of these fields in a rule put the rule in it's own
108 * superblock. Thus it will not be optimized. It also prevents the
109 * rule from being re-ordered at all.
111 PF_RULE_FIELD(label
, BARRIER
),
112 PF_RULE_FIELD(prob
, BARRIER
),
113 PF_RULE_FIELD(max_states
, BARRIER
),
114 PF_RULE_FIELD(max_src_nodes
, BARRIER
),
115 PF_RULE_FIELD(max_src_states
, BARRIER
),
116 PF_RULE_FIELD(max_src_conn
, BARRIER
),
117 PF_RULE_FIELD(max_src_conn_rate
, BARRIER
),
118 PF_RULE_FIELD(anchor
, BARRIER
), /* for now */
121 * These fields must be the same between all rules in the same superblock.
122 * These rules are allowed to be re-ordered but only among like rules.
123 * For instance we can re-order all 'tag "foo"' rules because they have the
124 * same tag. But we can not re-order between a 'tag "foo"' and a
125 * 'tag "bar"' since that would change the meaning of the ruleset.
127 PF_RULE_FIELD(tagname
, BREAK
),
128 PF_RULE_FIELD(keep_state
, BREAK
),
129 PF_RULE_FIELD(qname
, BREAK
),
130 PF_RULE_FIELD(pqname
, BREAK
),
131 PF_RULE_FIELD(rt
, BREAK
),
132 PF_RULE_FIELD(allow_opts
, BREAK
),
133 PF_RULE_FIELD(rule_flag
, BREAK
),
134 PF_RULE_FIELD(action
, BREAK
),
135 PF_RULE_FIELD(log
, BREAK
),
136 PF_RULE_FIELD(quick
, BREAK
),
137 PF_RULE_FIELD(return_ttl
, BREAK
),
138 PF_RULE_FIELD(overload_tblname
, BREAK
),
139 PF_RULE_FIELD(flush
, BREAK
),
140 PF_RULE_FIELD(rpool
, BREAK
),
141 PF_RULE_FIELD(logif
, BREAK
),
144 * Any fields not listed in this structure act as BREAK fields
149 * These fields must not differ when we merge two rules together but
150 * their difference isn't enough to put the rules in different superblocks.
151 * There are no problems re-ordering any rules with these fields.
153 PF_RULE_FIELD(af
, NOMERGE
),
154 PF_RULE_FIELD(ifnot
, NOMERGE
),
155 PF_RULE_FIELD(ifname
, NOMERGE
), /* hack for IF groups */
156 PF_RULE_FIELD(match_tag_not
, NOMERGE
),
157 PF_RULE_FIELD(match_tagname
, NOMERGE
),
158 PF_RULE_FIELD(os_fingerprint
, NOMERGE
),
159 PF_RULE_FIELD(timeout
, NOMERGE
),
160 PF_RULE_FIELD(return_icmp
, NOMERGE
),
161 PF_RULE_FIELD(return_icmp6
, NOMERGE
),
162 PF_RULE_FIELD(uid
, NOMERGE
),
163 PF_RULE_FIELD(gid
, NOMERGE
),
164 PF_RULE_FIELD(direction
, NOMERGE
),
165 PF_RULE_FIELD(proto
, NOMERGE
),
166 PF_RULE_FIELD(type
, NOMERGE
),
167 PF_RULE_FIELD(code
, NOMERGE
),
168 PF_RULE_FIELD(flags
, NOMERGE
),
169 PF_RULE_FIELD(flagset
, NOMERGE
),
170 PF_RULE_FIELD(tos
, NOMERGE
),
171 PF_RULE_FIELD(src
.port
, NOMERGE
),
172 PF_RULE_FIELD(dst
.port
, NOMERGE
),
173 PF_RULE_FIELD(src
.port_op
, NOMERGE
),
174 PF_RULE_FIELD(dst
.port_op
, NOMERGE
),
175 PF_RULE_FIELD(src
.neg
, NOMERGE
),
176 PF_RULE_FIELD(dst
.neg
, NOMERGE
),
178 /* These fields can be merged */
179 PF_RULE_FIELD(src
.addr
, COMBINED
),
180 PF_RULE_FIELD(dst
.addr
, COMBINED
),
182 /* We just don't care about these fields. They're set by the kernel */
183 PF_RULE_FIELD(skip
, DC
),
184 PF_RULE_FIELD(evaluations
, DC
),
185 PF_RULE_FIELD(packets
, DC
),
186 PF_RULE_FIELD(bytes
, DC
),
187 PF_RULE_FIELD(kif
, DC
),
188 PF_RULE_FIELD(states_cur
, DC
),
189 PF_RULE_FIELD(states_tot
, DC
),
190 PF_RULE_FIELD(src_nodes
, DC
),
191 PF_RULE_FIELD(nr
, DC
),
192 PF_RULE_FIELD(entries
, DC
),
193 PF_RULE_FIELD(qid
, DC
),
194 PF_RULE_FIELD(pqid
, DC
),
195 PF_RULE_FIELD(anchor_relative
, DC
),
196 PF_RULE_FIELD(anchor_wildcard
, DC
),
197 PF_RULE_FIELD(tag
, DC
),
198 PF_RULE_FIELD(match_tag
, DC
),
199 PF_RULE_FIELD(overload_tbl
, DC
),
201 /* These fields should never be set in a PASS/BLOCK rule */
202 PF_RULE_FIELD(natpass
, NEVER
),
203 PF_RULE_FIELD(max_mss
, NEVER
),
204 PF_RULE_FIELD(min_ttl
, NEVER
),
205 PF_RULE_FIELD(set_tos
, NEVER
),
210 int add_opt_table(struct pfctl
*, struct pf_opt_tbl
**, sa_family_t
,
211 struct pf_rule_addr
*);
212 int addrs_combineable(struct pf_rule_addr
*, struct pf_rule_addr
*);
213 int addrs_equal(struct pf_rule_addr
*, struct pf_rule_addr
*);
214 int block_feedback(struct pfctl
*, struct superblock
*);
215 int combine_rules(struct pfctl
*, struct superblock
*);
216 void comparable_rule(struct pf_rule
*, const struct pf_rule
*, int);
217 int construct_superblocks(struct pfctl
*, struct pf_opt_queue
*,
218 struct superblocks
*);
219 void exclude_supersets(struct pf_rule
*, struct pf_rule
*);
220 int interface_group(const char *);
221 int load_feedback_profile(struct pfctl
*, struct superblocks
*);
222 int optimize_superblock(struct pfctl
*, struct superblock
*);
223 int pf_opt_create_table(struct pfctl
*, struct pf_opt_tbl
*);
224 void remove_from_skipsteps(struct skiplist
*, struct superblock
*,
225 struct pf_opt_rule
*, struct pf_skip_step
*);
226 int remove_identical_rules(struct pfctl
*, struct superblock
*);
227 int reorder_rules(struct pfctl
*, struct superblock
*, int);
228 int rules_combineable(struct pf_rule
*, struct pf_rule
*);
229 void skip_append(struct superblock
*, int, struct pf_skip_step
*,
230 struct pf_opt_rule
*);
231 int skip_compare(int, struct pf_skip_step
*, struct pf_opt_rule
*);
232 void skip_init(void);
233 int skip_cmp_af(struct pf_rule
*, struct pf_rule
*);
234 int skip_cmp_dir(struct pf_rule
*, struct pf_rule
*);
235 int skip_cmp_dst_addr(struct pf_rule
*, struct pf_rule
*);
236 int skip_cmp_dst_port(struct pf_rule
*, struct pf_rule
*);
237 int skip_cmp_ifp(struct pf_rule
*, struct pf_rule
*);
238 int skip_cmp_proto(struct pf_rule
*, struct pf_rule
*);
239 int skip_cmp_src_addr(struct pf_rule
*, struct pf_rule
*);
240 int skip_cmp_src_port(struct pf_rule
*, struct pf_rule
*);
241 int superblock_inclusive(struct superblock
*, struct pf_opt_rule
*);
242 void superblock_free(struct pfctl
*, struct superblock
*);
245 int (*skip_comparitors
[PF_SKIP_COUNT
])(struct pf_rule
*, struct pf_rule
*);
246 const char *skip_comparitors_names
[PF_SKIP_COUNT
];
247 #define PF_SKIP_COMPARITORS { \
248 { "ifp", PF_SKIP_IFP, skip_cmp_ifp }, \
249 { "dir", PF_SKIP_DIR, skip_cmp_dir }, \
250 { "af", PF_SKIP_AF, skip_cmp_af }, \
251 { "proto", PF_SKIP_PROTO, skip_cmp_proto }, \
252 { "saddr", PF_SKIP_SRC_ADDR, skip_cmp_src_addr }, \
253 { "sport", PF_SKIP_SRC_PORT, skip_cmp_src_port }, \
254 { "daddr", PF_SKIP_DST_ADDR, skip_cmp_dst_addr }, \
255 { "dport", PF_SKIP_DST_PORT, skip_cmp_dst_port } \
258 struct pfr_buffer table_buffer
;
259 int table_identifier
;
263 pfctl_optimize_ruleset(struct pfctl
*pf
, struct pf_ruleset
*rs
)
265 struct superblocks superblocks
;
266 struct pf_opt_queue opt_queue
;
267 struct superblock
*block
;
268 struct pf_opt_rule
*por
;
270 struct pf_rulequeue
*old_rules
;
272 DEBUG("optimizing ruleset");
273 memset(&table_buffer
, 0, sizeof(table_buffer
));
275 TAILQ_INIT(&opt_queue
);
277 old_rules
= rs
->rules
[PF_RULESET_FILTER
].active
.ptr
;
278 rs
->rules
[PF_RULESET_FILTER
].active
.ptr
=
279 rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
;
280 rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
= old_rules
;
283 * XXX expanding the pf_opt_rule format throughout pfctl might allow
284 * us to avoid all this copying.
286 while ((r
= TAILQ_FIRST(rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
))
288 TAILQ_REMOVE(rs
->rules
[PF_RULESET_FILTER
].inactive
.ptr
, r
,
290 if ((por
= calloc(1, sizeof(*por
))) == NULL
)
292 memcpy(&por
->por_rule
, r
, sizeof(*r
));
293 if (TAILQ_FIRST(&r
->rpool
.list
) != NULL
) {
294 TAILQ_INIT(&por
->por_rule
.rpool
.list
);
295 pfctl_move_pool(&r
->rpool
, &por
->por_rule
.rpool
);
297 bzero(&por
->por_rule
.rpool
,
298 sizeof(por
->por_rule
.rpool
));
301 TAILQ_INSERT_TAIL(&opt_queue
, por
, por_entry
);
304 TAILQ_INIT(&superblocks
);
305 if (construct_superblocks(pf
, &opt_queue
, &superblocks
))
308 if (pf
->optimize
& PF_OPTIMIZE_PROFILE
) {
309 if (load_feedback_profile(pf
, &superblocks
))
313 TAILQ_FOREACH(block
, &superblocks
, sb_entry
) {
314 if (optimize_superblock(pf
, block
))
318 rs
->anchor
->refcnt
= 0;
319 while ((block
= TAILQ_FIRST(&superblocks
))) {
320 TAILQ_REMOVE(&superblocks
, block
, sb_entry
);
322 while ((por
= TAILQ_FIRST(&block
->sb_rules
))) {
323 TAILQ_REMOVE(&block
->sb_rules
, por
, por_entry
);
324 por
->por_rule
.nr
= rs
->anchor
->refcnt
++;
325 if ((r
= calloc(1, sizeof(*r
))) == NULL
)
327 memcpy(r
, &por
->por_rule
, sizeof(*r
));
328 TAILQ_INIT(&r
->rpool
.list
);
329 pfctl_move_pool(&por
->por_rule
.rpool
, &r
->rpool
);
331 rs
->rules
[PF_RULESET_FILTER
].active
.ptr
,
341 while ((por
= TAILQ_FIRST(&opt_queue
))) {
342 TAILQ_REMOVE(&opt_queue
, por
, por_entry
);
343 if (por
->por_src_tbl
) {
344 pfr_buf_clear(por
->por_src_tbl
->pt_buf
);
345 free(por
->por_src_tbl
->pt_buf
);
346 free(por
->por_src_tbl
);
348 if (por
->por_dst_tbl
) {
349 pfr_buf_clear(por
->por_dst_tbl
->pt_buf
);
350 free(por
->por_dst_tbl
->pt_buf
);
351 free(por
->por_dst_tbl
);
355 while ((block
= TAILQ_FIRST(&superblocks
))) {
356 TAILQ_REMOVE(&superblocks
, block
, sb_entry
);
357 superblock_free(pf
, block
);
364 * Go ahead and optimize a superblock
367 optimize_superblock(struct pfctl
*pf
, struct superblock
*block
)
370 struct pf_opt_rule
*por
;
371 #endif /* OPT_DEBUG */
373 /* We have a few optimization passes:
374 * 1) remove duplicate rules or rules that are a subset of other
376 * 2) combine otherwise identical rules with different IP addresses
377 * into a single rule and put the addresses in a table.
378 * 3) re-order the rules to improve kernel skip steps
379 * 4) re-order the 'quick' rules based on feedback from the
380 * active ruleset statistics
382 * XXX combine_rules() doesn't combine v4 and v6 rules. would just
383 * have to keep af in the table container, make af 'COMBINE' and
384 * twiddle the af on the merged rule
385 * XXX maybe add a weighting to the metric on skipsteps when doing
386 * reordering. sometimes two sequential tables will be better
387 * that four consecutive interfaces.
388 * XXX need to adjust the skipstep count of everything after PROTO,
389 * since they aren't actually checked on a proto mismatch in
390 * pf_test_{tcp, udp, icmp}()
391 * XXX should i treat proto=0, af=0 or dir=0 special in skepstep
392 * calculation since they are a DC?
393 * XXX keep last skiplist of last superblock to influence this
394 * superblock. '5 inet6 log' should make '3 inet6' come before '4
395 * inet' in the next superblock.
396 * XXX would be useful to add tables for ports
397 * XXX we can also re-order some mutually exclusive superblocks to
398 * try merging superblocks before any of these optimization passes.
399 * for instance a single 'log in' rule in the middle of non-logging
403 /* shortcut. there will be a lot of 1-rule superblocks */
404 if (!TAILQ_NEXT(TAILQ_FIRST(&block
->sb_rules
), por_entry
))
408 printf("--- Superblock ---\n");
409 TAILQ_FOREACH(por
, &block
->sb_rules
, por_entry
) {
411 print_rule(&por
->por_rule
, por
->por_rule
.anchor
?
412 por
->por_rule
.anchor
->name
: "", 1, 0);
414 #endif /* OPT_DEBUG */
417 if (remove_identical_rules(pf
, block
))
419 if (combine_rules(pf
, block
))
421 if ((pf
->optimize
& PF_OPTIMIZE_PROFILE
) &&
422 TAILQ_FIRST(&block
->sb_rules
)->por_rule
.quick
&&
423 block
->sb_profiled_block
) {
424 if (block_feedback(pf
, block
))
426 } else if (reorder_rules(pf
, block
, 0)) {
431 * Don't add any optimization passes below reorder_rules(). It will
432 * have divided superblocks into smaller blocks for further refinement
433 * and doesn't put them back together again. What once was a true
434 * superblock might have been split into multiple superblocks.
438 printf("--- END Superblock ---\n");
439 #endif /* OPT_DEBUG */
445 * Optimization pass #1: remove identical rules
448 remove_identical_rules(struct pfctl
*pf
, struct superblock
*block
)
450 struct pf_opt_rule
*por1
, *por2
, *por_next
, *por2_next
;
451 struct pf_rule a
, a2
, b
, b2
;
453 for (por1
= TAILQ_FIRST(&block
->sb_rules
); por1
; por1
= por_next
) {
454 por_next
= TAILQ_NEXT(por1
, por_entry
);
455 for (por2
= por_next
; por2
; por2
= por2_next
) {
456 por2_next
= TAILQ_NEXT(por2
, por_entry
);
457 comparable_rule(&a
, &por1
->por_rule
, DC
);
458 comparable_rule(&b
, &por2
->por_rule
, DC
);
459 memcpy(&a2
, &a
, sizeof(a2
));
460 memcpy(&b2
, &b
, sizeof(b2
));
462 exclude_supersets(&a
, &b
);
463 exclude_supersets(&b2
, &a2
);
464 if (memcmp(&a
, &b
, sizeof(a
)) == 0) {
465 DEBUG("removing identical rule nr%d = *nr%d*",
466 por1
->por_rule
.nr
, por2
->por_rule
.nr
);
467 TAILQ_REMOVE(&block
->sb_rules
, por2
, por_entry
);
468 if (por_next
== por2
)
469 por_next
= TAILQ_NEXT(por1
, por_entry
);
471 } else if (memcmp(&a2
, &b2
, sizeof(a2
)) == 0) {
472 DEBUG("removing identical rule *nr%d* = nr%d",
473 por1
->por_rule
.nr
, por2
->por_rule
.nr
);
474 TAILQ_REMOVE(&block
->sb_rules
, por1
, por_entry
);
486 * Optimization pass #2: combine similar rules with different addresses
487 * into a single rule and a table
490 combine_rules(struct pfctl
*pf
, struct superblock
*block
)
492 struct pf_opt_rule
*p1
, *p2
, *por_next
;
495 if ((pf
->loadopt
& PFCTL_FLAG_TABLE
) == 0) {
496 warnx("Must enable table loading for optimizations");
500 /* First we make a pass to combine the rules. O(n log n) */
501 TAILQ_FOREACH(p1
, &block
->sb_rules
, por_entry
) {
502 for (p2
= TAILQ_NEXT(p1
, por_entry
); p2
; p2
= por_next
) {
503 por_next
= TAILQ_NEXT(p2
, por_entry
);
505 src_eq
= addrs_equal(&p1
->por_rule
.src
,
507 dst_eq
= addrs_equal(&p1
->por_rule
.dst
,
510 if (src_eq
&& !dst_eq
&& p1
->por_src_tbl
== NULL
&&
511 p2
->por_dst_tbl
== NULL
&&
512 p2
->por_src_tbl
== NULL
&&
513 rules_combineable(&p1
->por_rule
, &p2
->por_rule
) &&
514 addrs_combineable(&p1
->por_rule
.dst
,
515 &p2
->por_rule
.dst
)) {
516 DEBUG("can combine rules nr%d = nr%d",
517 p1
->por_rule
.nr
, p2
->por_rule
.nr
);
518 if (p1
->por_dst_tbl
== NULL
&&
519 add_opt_table(pf
, &p1
->por_dst_tbl
,
520 p1
->por_rule
.af
, &p1
->por_rule
.dst
))
522 if (add_opt_table(pf
, &p1
->por_dst_tbl
,
523 p1
->por_rule
.af
, &p2
->por_rule
.dst
))
525 p2
->por_dst_tbl
= p1
->por_dst_tbl
;
526 if (p1
->por_dst_tbl
->pt_rulecount
>=
528 TAILQ_REMOVE(&block
->sb_rules
, p2
,
532 } else if (!src_eq
&& dst_eq
&& p1
->por_dst_tbl
== NULL
533 && p2
->por_src_tbl
== NULL
&&
534 p2
->por_dst_tbl
== NULL
&&
535 rules_combineable(&p1
->por_rule
, &p2
->por_rule
) &&
536 addrs_combineable(&p1
->por_rule
.src
,
537 &p2
->por_rule
.src
)) {
538 DEBUG("can combine rules nr%d = nr%d",
539 p1
->por_rule
.nr
, p2
->por_rule
.nr
);
540 if (p1
->por_src_tbl
== NULL
&&
541 add_opt_table(pf
, &p1
->por_src_tbl
,
542 p1
->por_rule
.af
, &p1
->por_rule
.src
))
544 if (add_opt_table(pf
, &p1
->por_src_tbl
,
545 p1
->por_rule
.af
, &p2
->por_rule
.src
))
547 p2
->por_src_tbl
= p1
->por_src_tbl
;
548 if (p1
->por_src_tbl
->pt_rulecount
>=
550 TAILQ_REMOVE(&block
->sb_rules
, p2
,
560 * Then we make a final pass to create a valid table name and
561 * insert the name into the rules.
563 for (p1
= TAILQ_FIRST(&block
->sb_rules
); p1
; p1
= por_next
) {
564 por_next
= TAILQ_NEXT(p1
, por_entry
);
565 assert(p1
->por_src_tbl
== NULL
|| p1
->por_dst_tbl
== NULL
);
567 if (p1
->por_src_tbl
&& p1
->por_src_tbl
->pt_rulecount
>=
569 if (p1
->por_src_tbl
->pt_generated
) {
570 /* This rule is included in a table */
571 TAILQ_REMOVE(&block
->sb_rules
, p1
, por_entry
);
575 p1
->por_src_tbl
->pt_generated
= 1;
577 if ((pf
->opts
& PF_OPT_NOACTION
) == 0 &&
578 pf_opt_create_table(pf
, p1
->por_src_tbl
))
583 if (pf
->opts
& PF_OPT_VERBOSE
)
584 print_tabledef(p1
->por_src_tbl
->pt_name
,
586 &p1
->por_src_tbl
->pt_nodes
);
588 memset(&p1
->por_rule
.src
.addr
, 0,
589 sizeof(p1
->por_rule
.src
.addr
));
590 p1
->por_rule
.src
.addr
.type
= PF_ADDR_TABLE
;
591 strlcpy(p1
->por_rule
.src
.addr
.v
.tblname
,
592 p1
->por_src_tbl
->pt_name
,
593 sizeof(p1
->por_rule
.src
.addr
.v
.tblname
));
595 pfr_buf_clear(p1
->por_src_tbl
->pt_buf
);
596 free(p1
->por_src_tbl
->pt_buf
);
597 p1
->por_src_tbl
->pt_buf
= NULL
;
599 if (p1
->por_dst_tbl
&& p1
->por_dst_tbl
->pt_rulecount
>=
601 if (p1
->por_dst_tbl
->pt_generated
) {
602 /* This rule is included in a table */
603 TAILQ_REMOVE(&block
->sb_rules
, p1
, por_entry
);
607 p1
->por_dst_tbl
->pt_generated
= 1;
609 if ((pf
->opts
& PF_OPT_NOACTION
) == 0 &&
610 pf_opt_create_table(pf
, p1
->por_dst_tbl
))
614 if (pf
->opts
& PF_OPT_VERBOSE
)
615 print_tabledef(p1
->por_dst_tbl
->pt_name
,
617 &p1
->por_dst_tbl
->pt_nodes
);
619 memset(&p1
->por_rule
.dst
.addr
, 0,
620 sizeof(p1
->por_rule
.dst
.addr
));
621 p1
->por_rule
.dst
.addr
.type
= PF_ADDR_TABLE
;
622 strlcpy(p1
->por_rule
.dst
.addr
.v
.tblname
,
623 p1
->por_dst_tbl
->pt_name
,
624 sizeof(p1
->por_rule
.dst
.addr
.v
.tblname
));
626 pfr_buf_clear(p1
->por_dst_tbl
->pt_buf
);
627 free(p1
->por_dst_tbl
->pt_buf
);
628 p1
->por_dst_tbl
->pt_buf
= NULL
;
637 * Optimization pass #3: re-order rules to improve skip steps
640 reorder_rules(struct pfctl
*pf
, struct superblock
*block
, int depth
)
642 struct superblock
*newblock
;
643 struct pf_skip_step
*skiplist
;
644 struct pf_opt_rule
*por
;
645 int i
, largest
, largest_list
, rule_count
= 0;
646 TAILQ_HEAD( , pf_opt_rule
) head
;
649 * Calculate the best-case skip steps. We put each rule in a list
650 * of other rules with common fields
652 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
653 TAILQ_FOREACH(por
, &block
->sb_rules
, por_entry
) {
654 TAILQ_FOREACH(skiplist
, &block
->sb_skipsteps
[i
],
656 if (skip_compare(i
, skiplist
, por
) == 0)
659 if (skiplist
== NULL
) {
660 if ((skiplist
= calloc(1, sizeof(*skiplist
))) ==
663 TAILQ_INIT(&skiplist
->ps_rules
);
664 TAILQ_INSERT_TAIL(&block
->sb_skipsteps
[i
],
667 skip_append(block
, i
, skiplist
, por
);
671 TAILQ_FOREACH(por
, &block
->sb_rules
, por_entry
)
675 * Now we're going to ignore any fields that are identical between
676 * all of the rules in the superblock and those fields which differ
677 * between every rule in the superblock.
680 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
681 skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]);
682 if (skiplist
->ps_count
== rule_count
) {
683 DEBUG("(%d) original skipstep '%s' is all rules",
684 depth
, skip_comparitors_names
[i
]);
685 skiplist
->ps_count
= 0;
686 } else if (skiplist
->ps_count
== 1) {
687 skiplist
->ps_count
= 0;
689 DEBUG("(%d) original skipstep '%s' largest jump is %d",
690 depth
, skip_comparitors_names
[i
],
692 if (skiplist
->ps_count
> largest
)
693 largest
= skiplist
->ps_count
;
697 /* Ugh. There is NO commonality in the superblock on which
698 * optimize the skipsteps optimization.
704 * Now we're going to empty the superblock rule list and re-create
705 * it based on a more optimal skipstep order.
708 while ((por
= TAILQ_FIRST(&block
->sb_rules
))) {
709 TAILQ_REMOVE(&block
->sb_rules
, por
, por_entry
);
710 TAILQ_INSERT_TAIL(&head
, por
, por_entry
);
714 while (!TAILQ_EMPTY(&head
)) {
718 * Find the most useful skip steps remaining
720 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
721 skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]);
722 if (skiplist
->ps_count
> largest
) {
723 largest
= skiplist
->ps_count
;
730 * Nothing useful left. Leave remaining rules in order.
732 DEBUG("(%d) no more commonality for skip steps", depth
);
733 while ((por
= TAILQ_FIRST(&head
))) {
734 TAILQ_REMOVE(&head
, por
, por_entry
);
735 TAILQ_INSERT_TAIL(&block
->sb_rules
, por
,
740 * There is commonality. Extract those common rules
741 * and place them in the ruleset adjacent to each
744 skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[
746 DEBUG("(%d) skipstep '%s' largest jump is %d @ #%d",
747 depth
, skip_comparitors_names
[largest_list
],
748 largest
, TAILQ_FIRST(&TAILQ_FIRST(&block
->
749 sb_skipsteps
[largest_list
])->ps_rules
)->
751 TAILQ_REMOVE(&block
->sb_skipsteps
[largest_list
],
756 * There may be further commonality inside these
757 * rules. So we'll split them off into they're own
758 * superblock and pass it back into the optimizer.
760 if (skiplist
->ps_count
> 2) {
761 if ((newblock
= calloc(1, sizeof(*newblock
)))
766 TAILQ_INIT(&newblock
->sb_rules
);
767 for (i
= 0; i
< PF_SKIP_COUNT
; i
++)
768 TAILQ_INIT(&newblock
->sb_skipsteps
[i
]);
769 TAILQ_INSERT_BEFORE(block
, newblock
, sb_entry
);
770 DEBUG("(%d) splitting off %d rules from superblock @ #%d",
771 depth
, skiplist
->ps_count
,
772 TAILQ_FIRST(&skiplist
->ps_rules
)->
778 while ((por
= TAILQ_FIRST(&skiplist
->ps_rules
))) {
779 TAILQ_REMOVE(&head
, por
, por_entry
);
780 TAILQ_REMOVE(&skiplist
->ps_rules
, por
,
781 por_skip_entry
[largest_list
]);
782 TAILQ_INSERT_TAIL(&newblock
->sb_rules
, por
,
785 /* Remove this rule from all other skiplists */
786 remove_from_skipsteps(&block
->sb_skipsteps
[
787 largest_list
], block
, por
, skiplist
);
790 if (newblock
!= block
)
791 if (reorder_rules(pf
, newblock
, depth
+ 1))
797 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
798 while ((skiplist
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]))) {
799 TAILQ_REMOVE(&block
->sb_skipsteps
[i
], skiplist
,
810 * Optimization pass #4: re-order 'quick' rules based on feedback from the
811 * currently running ruleset
814 block_feedback(struct pfctl
*pf
, struct superblock
*block
)
816 TAILQ_HEAD( , pf_opt_rule
) queue
;
817 struct pf_opt_rule
*por1
, *por2
;
818 u_int64_t total_count
= 0;
823 * Walk through all of the profiled superblock's rules and copy
824 * the counters onto our rules.
826 TAILQ_FOREACH(por1
, &block
->sb_profiled_block
->sb_rules
, por_entry
) {
827 comparable_rule(&a
, &por1
->por_rule
, DC
);
828 total_count
+= por1
->por_rule
.packets
[0] +
829 por1
->por_rule
.packets
[1];
830 TAILQ_FOREACH(por2
, &block
->sb_rules
, por_entry
) {
831 if (por2
->por_profile_count
)
833 comparable_rule(&b
, &por2
->por_rule
, DC
);
834 if (memcmp(&a
, &b
, sizeof(a
)) == 0) {
835 por2
->por_profile_count
=
836 por1
->por_rule
.packets
[0] +
837 por1
->por_rule
.packets
[1];
842 superblock_free(pf
, block
->sb_profiled_block
);
843 block
->sb_profiled_block
= NULL
;
846 * Now we pull all of the rules off the superblock and re-insert them
851 while ((por1
= TAILQ_FIRST(&block
->sb_rules
)) != NULL
) {
852 TAILQ_REMOVE(&block
->sb_rules
, por1
, por_entry
);
853 TAILQ_INSERT_TAIL(&queue
, por1
, por_entry
);
856 while ((por1
= TAILQ_FIRST(&queue
)) != NULL
) {
857 TAILQ_REMOVE(&queue
, por1
, por_entry
);
858 /* XXX I should sort all of the unused rules based on skip steps */
859 TAILQ_FOREACH(por2
, &block
->sb_rules
, por_entry
) {
860 if (por1
->por_profile_count
> por2
->por_profile_count
) {
861 TAILQ_INSERT_BEFORE(por2
, por1
, por_entry
);
868 if (por2
== TAILQ_END(&block
->sb_rules
))
870 TAILQ_INSERT_TAIL(&block
->sb_rules
, por1
, por_entry
);
878 * Load the current ruleset from the kernel and try to associate them with
879 * the ruleset we're optimizing.
882 load_feedback_profile(struct pfctl
*pf
, struct superblocks
*superblocks
)
884 struct superblock
*block
, *blockcur
;
885 struct superblocks prof_superblocks
;
886 struct pf_opt_rule
*por
;
887 struct pf_opt_queue queue
;
888 struct pfioc_rule pr
;
893 TAILQ_INIT(&prof_superblocks
);
895 memset(&pr
, 0, sizeof(pr
));
896 pr
.rule
.action
= PF_PASS
;
897 if (ioctl(pf
->dev
, DIOCGETRULES
, &pr
)) {
898 warn("DIOCGETRULES");
903 DEBUG("Loading %d active rules for a feedback profile", mnr
);
904 for (nr
= 0; nr
< mnr
; ++nr
) {
905 struct pf_ruleset
*rs
;
906 if ((por
= calloc(1, sizeof(*por
))) == NULL
) {
911 if (ioctl(pf
->dev
, DIOCGETRULE
, &pr
)) {
912 warn("DIOCGETRULES");
915 memcpy(&por
->por_rule
, &pr
.rule
, sizeof(por
->por_rule
));
916 rs
= pf_find_or_create_ruleset(pr
.anchor_call
);
917 por
->por_rule
.anchor
= rs
->anchor
;
918 if (TAILQ_EMPTY(&por
->por_rule
.rpool
.list
))
919 memset(&por
->por_rule
.rpool
, 0,
920 sizeof(por
->por_rule
.rpool
));
921 TAILQ_INSERT_TAIL(&queue
, por
, por_entry
);
923 /* XXX pfctl_get_pool(pf->dev, &pr.rule.rpool, nr, pr.ticket,
924 * PF_PASS, pf->anchor) ???
925 * ... pfctl_clear_pool(&pr.rule.rpool)
929 if (construct_superblocks(pf
, &queue
, &prof_superblocks
))
934 * Now we try to associate the active ruleset's superblocks with
935 * the superblocks we're compiling.
937 block
= TAILQ_FIRST(superblocks
);
938 blockcur
= TAILQ_FIRST(&prof_superblocks
);
939 while (block
&& blockcur
) {
940 comparable_rule(&a
, &TAILQ_FIRST(&block
->sb_rules
)->por_rule
,
942 comparable_rule(&b
, &TAILQ_FIRST(&blockcur
->sb_rules
)->por_rule
,
944 if (memcmp(&a
, &b
, sizeof(a
)) == 0) {
945 /* The two superblocks lined up */
946 block
->sb_profiled_block
= blockcur
;
948 DEBUG("superblocks don't line up between #%d and #%d",
949 TAILQ_FIRST(&block
->sb_rules
)->por_rule
.nr
,
950 TAILQ_FIRST(&blockcur
->sb_rules
)->por_rule
.nr
);
953 block
= TAILQ_NEXT(block
, sb_entry
);
954 blockcur
= TAILQ_NEXT(blockcur
, sb_entry
);
959 /* Free any superblocks we couldn't link */
961 block
= TAILQ_NEXT(blockcur
, sb_entry
);
962 superblock_free(pf
, blockcur
);
970 * Compare a rule to a skiplist to see if the rule is a member
973 skip_compare(int skipnum
, struct pf_skip_step
*skiplist
,
974 struct pf_opt_rule
*por
)
976 struct pf_rule
*a
, *b
;
977 if (skipnum
>= PF_SKIP_COUNT
|| skipnum
< 0)
978 errx(1, "skip_compare() out of bounds");
980 b
= &TAILQ_FIRST(&skiplist
->ps_rules
)->por_rule
;
982 return ((skip_comparitors
[skipnum
])(a
, b
));
987 * Add a rule to a skiplist
990 skip_append(struct superblock
*superblock
, int skipnum
,
991 struct pf_skip_step
*skiplist
, struct pf_opt_rule
*por
)
993 struct pf_skip_step
*prev
;
995 skiplist
->ps_count
++;
996 TAILQ_INSERT_TAIL(&skiplist
->ps_rules
, por
, por_skip_entry
[skipnum
]);
998 /* Keep the list of skiplists sorted by whichever is larger */
999 while ((prev
= TAILQ_PREV(skiplist
, skiplist
, ps_entry
)) &&
1000 prev
->ps_count
< skiplist
->ps_count
) {
1001 TAILQ_REMOVE(&superblock
->sb_skipsteps
[skipnum
],
1002 skiplist
, ps_entry
);
1003 TAILQ_INSERT_BEFORE(prev
, skiplist
, ps_entry
);
1009 * Remove a rule from the other skiplist calculations.
1012 remove_from_skipsteps(struct skiplist
*head
, struct superblock
*block
,
1013 struct pf_opt_rule
*por
, struct pf_skip_step
*active_list
)
1015 struct pf_skip_step
*sk
, *next
;
1016 struct pf_opt_rule
*p2
;
1019 for (i
= 0; i
< PF_SKIP_COUNT
; i
++) {
1020 sk
= TAILQ_FIRST(&block
->sb_skipsteps
[i
]);
1021 if (sk
== NULL
|| sk
== active_list
|| sk
->ps_count
<= 1)
1025 TAILQ_FOREACH(p2
, &sk
->ps_rules
, por_skip_entry
[i
])
1027 TAILQ_REMOVE(&sk
->ps_rules
, p2
,
1033 } while (!found
&& (sk
= TAILQ_NEXT(sk
, ps_entry
)));
1035 /* Does this change the sorting order? */
1036 while ((next
= TAILQ_NEXT(sk
, ps_entry
)) &&
1037 next
->ps_count
> sk
->ps_count
) {
1038 TAILQ_REMOVE(head
, sk
, ps_entry
);
1039 TAILQ_INSERT_AFTER(head
, next
, sk
, ps_entry
);
1042 next
= TAILQ_NEXT(sk
, ps_entry
);
1043 assert(next
== NULL
|| next
->ps_count
<= sk
->ps_count
);
1044 #endif /* OPT_DEBUG */
1050 /* Compare two rules AF field for skiplist construction */
1052 skip_cmp_af(struct pf_rule
*a
, struct pf_rule
*b
)
1054 if (a
->af
!= b
->af
|| a
->af
== 0)
1059 /* Compare two rules DIRECTION field for skiplist construction */
1061 skip_cmp_dir(struct pf_rule
*a
, struct pf_rule
*b
)
1063 if (a
->direction
== 0 || a
->direction
!= b
->direction
)
1068 /* Compare two rules DST Address field for skiplist construction */
1070 skip_cmp_dst_addr(struct pf_rule
*a
, struct pf_rule
*b
)
1072 if (a
->dst
.neg
!= b
->dst
.neg
||
1073 a
->dst
.addr
.type
!= b
->dst
.addr
.type
)
1075 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1076 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1077 * a->proto == IPPROTO_ICMP
1080 switch (a
->dst
.addr
.type
) {
1081 case PF_ADDR_ADDRMASK
:
1082 if (memcmp(&a
->dst
.addr
.v
.a
.addr
, &b
->dst
.addr
.v
.a
.addr
,
1083 sizeof(a
->dst
.addr
.v
.a
.addr
)) ||
1084 memcmp(&a
->dst
.addr
.v
.a
.mask
, &b
->dst
.addr
.v
.a
.mask
,
1085 sizeof(a
->dst
.addr
.v
.a
.mask
)) ||
1086 (a
->dst
.addr
.v
.a
.addr
.addr32
[0] == 0 &&
1087 a
->dst
.addr
.v
.a
.addr
.addr32
[1] == 0 &&
1088 a
->dst
.addr
.v
.a
.addr
.addr32
[2] == 0 &&
1089 a
->dst
.addr
.v
.a
.addr
.addr32
[3] == 0))
1092 case PF_ADDR_DYNIFTL
:
1093 if (strcmp(a
->dst
.addr
.v
.ifname
, b
->dst
.addr
.v
.ifname
) != 0 ||
1094 a
->dst
.addr
.iflags
!= a
->dst
.addr
.iflags
||
1095 memcmp(&a
->dst
.addr
.v
.a
.mask
, &b
->dst
.addr
.v
.a
.mask
,
1096 sizeof(a
->dst
.addr
.v
.a
.mask
)))
1099 case PF_ADDR_NOROUTE
:
1100 case PF_ADDR_URPFFAILED
:
1103 return (strcmp(a
->dst
.addr
.v
.tblname
, b
->dst
.addr
.v
.tblname
));
1108 /* Compare two rules DST port field for skiplist construction */
1110 skip_cmp_dst_port(struct pf_rule
*a
, struct pf_rule
*b
)
1112 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1113 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1114 * a->proto == IPPROTO_ICMP
1117 if (a
->dst
.port_op
== PF_OP_NONE
|| a
->dst
.port_op
!= b
->dst
.port_op
||
1118 a
->dst
.port
[0] != b
->dst
.port
[0] ||
1119 a
->dst
.port
[1] != b
->dst
.port
[1])
1124 /* Compare two rules IFP field for skiplist construction */
1126 skip_cmp_ifp(struct pf_rule
*a
, struct pf_rule
*b
)
1128 if (strcmp(a
->ifname
, b
->ifname
) || a
->ifname
[0] == '\0')
1130 return (a
->ifnot
!= b
->ifnot
);
1133 /* Compare two rules PROTO field for skiplist construction */
1135 skip_cmp_proto(struct pf_rule
*a
, struct pf_rule
*b
)
1137 return (a
->proto
!= b
->proto
|| a
->proto
== 0);
1140 /* Compare two rules SRC addr field for skiplist construction */
1142 skip_cmp_src_addr(struct pf_rule
*a
, struct pf_rule
*b
)
1144 if (a
->src
.neg
!= b
->src
.neg
||
1145 a
->src
.addr
.type
!= b
->src
.addr
.type
)
1147 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1148 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1149 * a->proto == IPPROTO_ICMP
1152 switch (a
->src
.addr
.type
) {
1153 case PF_ADDR_ADDRMASK
:
1154 if (memcmp(&a
->src
.addr
.v
.a
.addr
, &b
->src
.addr
.v
.a
.addr
,
1155 sizeof(a
->src
.addr
.v
.a
.addr
)) ||
1156 memcmp(&a
->src
.addr
.v
.a
.mask
, &b
->src
.addr
.v
.a
.mask
,
1157 sizeof(a
->src
.addr
.v
.a
.mask
)) ||
1158 (a
->src
.addr
.v
.a
.addr
.addr32
[0] == 0 &&
1159 a
->src
.addr
.v
.a
.addr
.addr32
[1] == 0 &&
1160 a
->src
.addr
.v
.a
.addr
.addr32
[2] == 0 &&
1161 a
->src
.addr
.v
.a
.addr
.addr32
[3] == 0))
1164 case PF_ADDR_DYNIFTL
:
1165 if (strcmp(a
->src
.addr
.v
.ifname
, b
->src
.addr
.v
.ifname
) != 0 ||
1166 a
->src
.addr
.iflags
!= a
->src
.addr
.iflags
||
1167 memcmp(&a
->src
.addr
.v
.a
.mask
, &b
->src
.addr
.v
.a
.mask
,
1168 sizeof(a
->src
.addr
.v
.a
.mask
)))
1171 case PF_ADDR_NOROUTE
:
1172 case PF_ADDR_URPFFAILED
:
1175 return (strcmp(a
->src
.addr
.v
.tblname
, b
->src
.addr
.v
.tblname
));
1180 /* Compare two rules SRC port field for skiplist construction */
1182 skip_cmp_src_port(struct pf_rule
*a
, struct pf_rule
*b
)
1184 if (a
->src
.port_op
== PF_OP_NONE
|| a
->src
.port_op
!= b
->src
.port_op
||
1185 a
->src
.port
[0] != b
->src
.port
[0] ||
1186 a
->src
.port
[1] != b
->src
.port
[1])
1188 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1189 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1190 * a->proto == IPPROTO_ICMP
1203 int (*func
)(struct pf_rule
*, struct pf_rule
*);
1204 } comps
[] = PF_SKIP_COMPARITORS
;
1207 for (skipnum
= 0; skipnum
< PF_SKIP_COUNT
; skipnum
++) {
1208 for (i
= 0; i
< sizeof(comps
)/sizeof(*comps
); i
++)
1209 if (comps
[i
].skipnum
== skipnum
) {
1210 skip_comparitors
[skipnum
] = comps
[i
].func
;
1211 skip_comparitors_names
[skipnum
] = comps
[i
].name
;
1214 for (skipnum
= 0; skipnum
< PF_SKIP_COUNT
; skipnum
++)
1215 if (skip_comparitors
[skipnum
] == NULL
)
1216 errx(1, "Need to add skip step comparitor to pfctl?!");
1220 * Add a host/netmask to a table
1223 add_opt_table(struct pfctl
*pf
, struct pf_opt_tbl
**tbl
, sa_family_t af
,
1224 struct pf_rule_addr
*addr
)
1228 #endif /* OPT_DEBUG */
1229 static int tablenum
= 0;
1230 struct node_host node_host
;
1233 if ((*tbl
= calloc(1, sizeof(**tbl
))) == NULL
||
1234 ((*tbl
)->pt_buf
= calloc(1, sizeof(*(*tbl
)->pt_buf
))) ==
1237 (*tbl
)->pt_buf
->pfrb_type
= PFRB_ADDRS
;
1238 SIMPLEQ_INIT(&(*tbl
)->pt_nodes
);
1240 /* This is just a temporary table name */
1241 snprintf((*tbl
)->pt_name
, sizeof((*tbl
)->pt_name
), "%s%d",
1242 PF_OPT_TABLE_PREFIX
, tablenum
++);
1243 DEBUG("creating table <%s>", (*tbl
)->pt_name
);
1246 memset(&node_host
, 0, sizeof(node_host
));
1248 node_host
.addr
= addr
->addr
;
1251 DEBUG("<%s> adding %s/%d", (*tbl
)->pt_name
, inet_ntop(af
,
1252 &node_host
.addr
.v
.a
.addr
, buf
, sizeof(buf
)),
1253 unmask(&node_host
.addr
.v
.a
.mask
, af
));
1254 #endif /* OPT_DEBUG */
1256 if (append_addr_host((*tbl
)->pt_buf
, &node_host
, 0, 0)) {
1257 warn("failed to add host");
1260 if (pf
->opts
& PF_OPT_VERBOSE
) {
1261 struct node_tinit
*ti
;
1263 if ((ti
= calloc(1, sizeof(*ti
))) == NULL
)
1265 if ((ti
->host
= malloc(sizeof(*ti
->host
))) == NULL
)
1267 memcpy(ti
->host
, &node_host
, sizeof(*ti
->host
));
1268 SIMPLEQ_INSERT_TAIL(&(*tbl
)->pt_nodes
, ti
, entries
);
1271 (*tbl
)->pt_rulecount
++;
1272 if ((*tbl
)->pt_rulecount
== TABLE_THRESHOLD
)
1273 DEBUG("table <%s> now faster than skip steps", (*tbl
)->pt_name
);
1280 * Do the dirty work of choosing an unused table name and creating it.
1281 * (be careful with the table name, it might already be used in another anchor)
1284 pf_opt_create_table(struct pfctl
*pf
, struct pf_opt_tbl
*tbl
)
1286 static int tablenum
;
1287 struct pfr_table
*t
;
1289 if (table_buffer
.pfrb_type
== 0) {
1290 /* Initialize the list of tables */
1291 table_buffer
.pfrb_type
= PFRB_TABLES
;
1293 pfr_buf_grow(&table_buffer
, table_buffer
.pfrb_size
);
1294 table_buffer
.pfrb_size
= table_buffer
.pfrb_msize
;
1295 if (pfr_get_tables(NULL
, table_buffer
.pfrb_caddr
,
1296 &table_buffer
.pfrb_size
, PFR_FLAG_ALLRSETS
))
1297 err(1, "pfr_get_tables");
1298 if (table_buffer
.pfrb_size
<= table_buffer
.pfrb_msize
)
1301 table_identifier
= arc4random();
1304 /* XXX would be *really* nice to avoid duplicating identical tables */
1306 /* Now we have to pick a table name that isn't used */
1308 DEBUG("translating temporary table <%s> to <%s%x_%d>", tbl
->pt_name
,
1309 PF_OPT_TABLE_PREFIX
, table_identifier
, tablenum
);
1310 snprintf(tbl
->pt_name
, sizeof(tbl
->pt_name
), "%s%x_%d",
1311 PF_OPT_TABLE_PREFIX
, table_identifier
, tablenum
);
1312 PFRB_FOREACH(t
, &table_buffer
) {
1313 if (strcasecmp(t
->pfrt_name
, tbl
->pt_name
) == 0) {
1314 /* Collision. Try again */
1315 DEBUG("wow, table <%s> in use. trying again",
1317 table_identifier
= arc4random();
1324 if (pfctl_define_table(tbl
->pt_name
, PFR_TFLAG_CONST
, 1,
1325 pf
->astack
[0]->name
, tbl
->pt_buf
, pf
->astack
[0]->ruleset
.tticket
)) {
1326 warn("failed to create table %s in %s",
1327 tbl
->pt_name
, pf
->astack
[0]->name
);
1334 * Partition the flat ruleset into a list of distinct superblocks
1337 construct_superblocks(struct pfctl
*pf
, struct pf_opt_queue
*opt_queue
,
1338 struct superblocks
*superblocks
)
1340 struct superblock
*block
= NULL
;
1341 struct pf_opt_rule
*por
;
1344 while (!TAILQ_EMPTY(opt_queue
)) {
1345 por
= TAILQ_FIRST(opt_queue
);
1346 TAILQ_REMOVE(opt_queue
, por
, por_entry
);
1347 if (block
== NULL
|| !superblock_inclusive(block
, por
)) {
1348 if ((block
= calloc(1, sizeof(*block
))) == NULL
) {
1352 TAILQ_INIT(&block
->sb_rules
);
1353 for (i
= 0; i
< PF_SKIP_COUNT
; i
++)
1354 TAILQ_INIT(&block
->sb_skipsteps
[i
]);
1355 TAILQ_INSERT_TAIL(superblocks
, block
, sb_entry
);
1357 TAILQ_INSERT_TAIL(&block
->sb_rules
, por
, por_entry
);
1365 * Compare two rule addresses
1368 addrs_equal(struct pf_rule_addr
*a
, struct pf_rule_addr
*b
)
1370 if (a
->neg
!= b
->neg
)
1372 return (memcmp(&a
->addr
, &b
->addr
, sizeof(a
->addr
)) == 0);
1377 * The addresses are not equal, but can we combine them into one table?
1380 addrs_combineable(struct pf_rule_addr
*a
, struct pf_rule_addr
*b
)
1382 if (a
->addr
.type
!= PF_ADDR_ADDRMASK
||
1383 b
->addr
.type
!= PF_ADDR_ADDRMASK
)
1385 if (a
->neg
!= b
->neg
|| a
->port_op
!= b
->port_op
||
1386 a
->port
[0] != b
->port
[0] || a
->port
[1] != b
->port
[1])
1393 * Are we allowed to combine these two rules
1396 rules_combineable(struct pf_rule
*p1
, struct pf_rule
*p2
)
1398 struct pf_rule a
, b
;
1400 comparable_rule(&a
, p1
, COMBINED
);
1401 comparable_rule(&b
, p2
, COMBINED
);
1402 return (memcmp(&a
, &b
, sizeof(a
)) == 0);
1407 * Can a rule be included inside a superblock
1410 superblock_inclusive(struct superblock
*block
, struct pf_opt_rule
*por
)
1412 struct pf_rule a
, b
;
1415 /* First check for hard breaks */
1416 for (i
= 0; i
< sizeof(pf_rule_desc
)/sizeof(*pf_rule_desc
); i
++) {
1417 if (pf_rule_desc
[i
].prf_type
== BARRIER
) {
1418 for (j
= 0; j
< pf_rule_desc
[i
].prf_size
; j
++)
1419 if (((char *)&por
->por_rule
)[j
+
1420 pf_rule_desc
[i
].prf_offset
] != 0)
1425 /* per-rule src-track is also a hard break */
1426 if (por
->por_rule
.rule_flag
& PFRULE_RULESRCTRACK
)
1430 * Have to handle interface groups separately. Consider the following
1432 * block on EXTIFS to any port 22
1433 * pass on em0 to any port 22
1434 * (where EXTIFS is an arbitrary interface group)
1435 * The optimizer may decide to re-order the pass rule in front of the
1436 * block rule. But what if EXTIFS includes em0??? Such a reordering
1437 * would change the meaning of the ruleset.
1438 * We can't just lookup the EXTIFS group and check if em0 is a member
1439 * because the user is allowed to add interfaces to a group during
1441 * Ergo interface groups become a defacto superblock break :-(
1443 if (interface_group(por
->por_rule
.ifname
) ||
1444 interface_group(TAILQ_FIRST(&block
->sb_rules
)->por_rule
.ifname
)) {
1445 if (strcasecmp(por
->por_rule
.ifname
,
1446 TAILQ_FIRST(&block
->sb_rules
)->por_rule
.ifname
) != 0)
1450 comparable_rule(&a
, &TAILQ_FIRST(&block
->sb_rules
)->por_rule
, NOMERGE
);
1451 comparable_rule(&b
, &por
->por_rule
, NOMERGE
);
1452 if (memcmp(&a
, &b
, sizeof(a
)) == 0)
1456 for (i
= 0; i
< sizeof(por
->por_rule
); i
++) {
1458 if (((u_int8_t
*)&a
)[i
] != ((u_int8_t
*)&b
)[i
]) {
1459 for (j
= 0; j
< sizeof(pf_rule_desc
) /
1460 sizeof(*pf_rule_desc
); j
++) {
1461 if (i
>= pf_rule_desc
[j
].prf_offset
&&
1462 i
< pf_rule_desc
[j
].prf_offset
+
1463 pf_rule_desc
[j
].prf_size
) {
1464 DEBUG("superblock break @ %d due to %s",
1466 pf_rule_desc
[j
].prf_name
);
1469 if (i
> pf_rule_desc
[j
].prf_offset
) {
1470 if (closest
== -1 ||
1471 i
-pf_rule_desc
[j
].prf_offset
<
1472 i
-pf_rule_desc
[closest
].prf_offset
)
1478 DEBUG("superblock break @ %d on %s+%xh",
1480 pf_rule_desc
[closest
].prf_name
,
1481 i
- pf_rule_desc
[closest
].prf_offset
-
1482 pf_rule_desc
[closest
].prf_size
);
1484 DEBUG("superblock break @ %d on field @ %d",
1485 por
->por_rule
.nr
, i
);
1489 #endif /* OPT_DEBUG */
1496 * Figure out if an interface name is an actual interface or actually a
1497 * group of interfaces.
1500 interface_group(const char *ifname
)
1502 if (ifname
== NULL
|| !ifname
[0])
1505 /* Real interfaces must end in a number, interface groups do not */
1506 if (isdigit(ifname
[strlen(ifname
) - 1]))
1514 * Make a rule that can directly compared by memcmp()
1517 comparable_rule(struct pf_rule
*dst
, const struct pf_rule
*src
, int type
)
1521 * To simplify the comparison, we just zero out the fields that are
1522 * allowed to be different and then do a simple memcmp()
1524 memcpy(dst
, src
, sizeof(*dst
));
1525 for (i
= 0; i
< sizeof(pf_rule_desc
)/sizeof(*pf_rule_desc
); i
++)
1526 if (pf_rule_desc
[i
].prf_type
>= type
) {
1528 assert(pf_rule_desc
[i
].prf_type
!= NEVER
||
1529 *(((char *)dst
) + pf_rule_desc
[i
].prf_offset
) == 0);
1530 #endif /* OPT_DEBUG */
1531 memset(((char *)dst
) + pf_rule_desc
[i
].prf_offset
, 0,
1532 pf_rule_desc
[i
].prf_size
);
1538 * Remove superset information from two rules so we can directly compare them
1542 exclude_supersets(struct pf_rule
*super
, struct pf_rule
*sub
)
1544 if (super
->ifname
[0] == '\0')
1545 memset(sub
->ifname
, 0, sizeof(sub
->ifname
));
1546 if (super
->direction
== PF_INOUT
)
1547 sub
->direction
= PF_INOUT
;
1548 if ((super
->proto
== 0 || super
->proto
== sub
->proto
) &&
1549 super
->flags
== 0 && super
->flagset
== 0 && (sub
->flags
||
1551 sub
->flags
= super
->flags
;
1552 sub
->flagset
= super
->flagset
;
1554 if (super
->proto
== 0)
1557 if (super
->src
.port_op
== 0) {
1558 sub
->src
.port_op
= 0;
1559 sub
->src
.port
[0] = 0;
1560 sub
->src
.port
[1] = 0;
1562 if (super
->dst
.port_op
== 0) {
1563 sub
->dst
.port_op
= 0;
1564 sub
->dst
.port
[0] = 0;
1565 sub
->dst
.port
[1] = 0;
1568 if (super
->src
.addr
.type
== PF_ADDR_ADDRMASK
&& !super
->src
.neg
&&
1569 !sub
->src
.neg
&& super
->src
.addr
.v
.a
.mask
.addr32
[0] == 0 &&
1570 super
->src
.addr
.v
.a
.mask
.addr32
[1] == 0 &&
1571 super
->src
.addr
.v
.a
.mask
.addr32
[2] == 0 &&
1572 super
->src
.addr
.v
.a
.mask
.addr32
[3] == 0)
1573 memset(&sub
->src
.addr
, 0, sizeof(sub
->src
.addr
));
1574 else if (super
->src
.addr
.type
== PF_ADDR_ADDRMASK
&&
1575 sub
->src
.addr
.type
== PF_ADDR_ADDRMASK
&&
1576 super
->src
.neg
== sub
->src
.neg
&&
1577 super
->af
== sub
->af
&&
1578 unmask(&super
->src
.addr
.v
.a
.mask
, super
->af
) <
1579 unmask(&sub
->src
.addr
.v
.a
.mask
, sub
->af
) &&
1580 super
->src
.addr
.v
.a
.addr
.addr32
[0] ==
1581 (sub
->src
.addr
.v
.a
.addr
.addr32
[0] &
1582 super
->src
.addr
.v
.a
.mask
.addr32
[0]) &&
1583 super
->src
.addr
.v
.a
.addr
.addr32
[1] ==
1584 (sub
->src
.addr
.v
.a
.addr
.addr32
[1] &
1585 super
->src
.addr
.v
.a
.mask
.addr32
[1]) &&
1586 super
->src
.addr
.v
.a
.addr
.addr32
[2] ==
1587 (sub
->src
.addr
.v
.a
.addr
.addr32
[2] &
1588 super
->src
.addr
.v
.a
.mask
.addr32
[2]) &&
1589 super
->src
.addr
.v
.a
.addr
.addr32
[3] ==
1590 (sub
->src
.addr
.v
.a
.addr
.addr32
[3] &
1591 super
->src
.addr
.v
.a
.mask
.addr32
[3])) {
1592 /* sub->src.addr is a subset of super->src.addr/mask */
1593 memcpy(&sub
->src
.addr
, &super
->src
.addr
, sizeof(sub
->src
.addr
));
1596 if (super
->dst
.addr
.type
== PF_ADDR_ADDRMASK
&& !super
->dst
.neg
&&
1597 !sub
->dst
.neg
&& super
->dst
.addr
.v
.a
.mask
.addr32
[0] == 0 &&
1598 super
->dst
.addr
.v
.a
.mask
.addr32
[1] == 0 &&
1599 super
->dst
.addr
.v
.a
.mask
.addr32
[2] == 0 &&
1600 super
->dst
.addr
.v
.a
.mask
.addr32
[3] == 0)
1601 memset(&sub
->dst
.addr
, 0, sizeof(sub
->dst
.addr
));
1602 else if (super
->dst
.addr
.type
== PF_ADDR_ADDRMASK
&&
1603 sub
->dst
.addr
.type
== PF_ADDR_ADDRMASK
&&
1604 super
->dst
.neg
== sub
->dst
.neg
&&
1605 super
->af
== sub
->af
&&
1606 unmask(&super
->dst
.addr
.v
.a
.mask
, super
->af
) <
1607 unmask(&sub
->dst
.addr
.v
.a
.mask
, sub
->af
) &&
1608 super
->dst
.addr
.v
.a
.addr
.addr32
[0] ==
1609 (sub
->dst
.addr
.v
.a
.addr
.addr32
[0] &
1610 super
->dst
.addr
.v
.a
.mask
.addr32
[0]) &&
1611 super
->dst
.addr
.v
.a
.addr
.addr32
[1] ==
1612 (sub
->dst
.addr
.v
.a
.addr
.addr32
[1] &
1613 super
->dst
.addr
.v
.a
.mask
.addr32
[1]) &&
1614 super
->dst
.addr
.v
.a
.addr
.addr32
[2] ==
1615 (sub
->dst
.addr
.v
.a
.addr
.addr32
[2] &
1616 super
->dst
.addr
.v
.a
.mask
.addr32
[2]) &&
1617 super
->dst
.addr
.v
.a
.addr
.addr32
[3] ==
1618 (sub
->dst
.addr
.v
.a
.addr
.addr32
[3] &
1619 super
->dst
.addr
.v
.a
.mask
.addr32
[3])) {
1620 /* sub->dst.addr is a subset of super->dst.addr/mask */
1621 memcpy(&sub
->dst
.addr
, &super
->dst
.addr
, sizeof(sub
->dst
.addr
));
1630 superblock_free(struct pfctl
*pf
, struct superblock
*block
)
1632 struct pf_opt_rule
*por
;
1633 while ((por
= TAILQ_FIRST(&block
->sb_rules
))) {
1634 TAILQ_REMOVE(&block
->sb_rules
, por
, por_entry
);
1635 if (por
->por_src_tbl
) {
1636 if (por
->por_src_tbl
->pt_buf
) {
1637 pfr_buf_clear(por
->por_src_tbl
->pt_buf
);
1638 free(por
->por_src_tbl
->pt_buf
);
1640 free(por
->por_src_tbl
);
1642 if (por
->por_dst_tbl
) {
1643 if (por
->por_dst_tbl
->pt_buf
) {
1644 pfr_buf_clear(por
->por_dst_tbl
->pt_buf
);
1645 free(por
->por_dst_tbl
->pt_buf
);
1647 free(por
->por_dst_tbl
);
1651 if (block
->sb_profiled_block
)
1652 superblock_free(pf
, block
->sb_profiled_block
);