Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dist / ipf / netinet / ip_frag.c
blob95dc3dec989c8be395ca864816f9c8f0ffecb988
1 /* $NetBSD$ */
3 /*
4 * Copyright (C) 1993-2003 by Darren Reed.
6 * See the IPFILTER.LICENCE file for details on licencing.
8 * Copyright 2008 Sun Microsystems, Inc.
9 */
10 #if defined(KERNEL) || defined(_KERNEL)
11 # undef KERNEL
12 # undef _KERNEL
13 # define KERNEL 1
14 # define _KERNEL 1
15 #endif
16 #include <sys/errno.h>
17 #include <sys/types.h>
18 #include <sys/param.h>
19 #include <sys/time.h>
20 #include <sys/file.h>
21 #ifdef __hpux
22 # include <sys/timeout.h>
23 #endif
24 #if !defined(_KERNEL)
25 # include <stdio.h>
26 # include <string.h>
27 # include <stdlib.h>
28 # define _KERNEL
29 # ifdef __OpenBSD__
30 struct file;
31 # endif
32 # include <sys/uio.h>
33 # undef _KERNEL
34 #endif
35 #if defined(_KERNEL) && (__FreeBSD_version >= 220000)
36 # include <sys/filio.h>
37 # include <sys/fcntl.h>
38 #else
39 # include <sys/ioctl.h>
40 #endif
41 #if !defined(linux)
42 # include <sys/protosw.h>
43 #endif
44 #include <sys/socket.h>
45 #if defined(_KERNEL)
46 # include <sys/systm.h>
47 # if !defined(__SVR4) && !defined(__svr4__)
48 # include <sys/mbuf.h>
49 # endif
50 #endif
51 #if !defined(__SVR4) && !defined(__svr4__)
52 # if defined(_KERNEL) && !defined(__sgi) && !defined(AIX)
53 # include <sys/kernel.h>
54 # endif
55 #else
56 # include <sys/byteorder.h>
57 # ifdef _KERNEL
58 # include <sys/dditypes.h>
59 # endif
60 # include <sys/stream.h>
61 # include <sys/kmem.h>
62 #endif
63 #include <net/if.h>
64 #ifdef sun
65 # include <net/af.h>
66 #endif
67 #include <netinet/in.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #if !defined(linux)
71 # include <netinet/ip_var.h>
72 #endif
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
75 #include <netinet/ip_icmp.h>
76 #include "netinet/ip_compat.h"
77 #include <netinet/tcpip.h>
78 #include "netinet/ip_fil.h"
79 #include "netinet/ip_nat.h"
80 #include "netinet/ip_frag.h"
81 #include "netinet/ip_state.h"
82 #include "netinet/ip_auth.h"
83 #include "netinet/ip_proxy.h"
84 #if (__FreeBSD_version >= 300000)
85 # include <sys/malloc.h>
86 # if defined(_KERNEL)
87 # ifndef IPFILTER_LKM
88 # include <sys/libkern.h>
89 # include <sys/systm.h>
90 # endif
91 extern struct callout_handle fr_slowtimer_ch;
92 # endif
93 #endif
94 #if defined(__NetBSD__) && (__NetBSD_Version__ >= 104230000)
95 # include <sys/callout.h>
96 extern struct callout fr_slowtimer_ch;
97 #endif
98 #if defined(__OpenBSD__)
99 # include <sys/timeout.h>
100 extern struct timeout fr_slowtimer_ch;
101 #endif
102 /* END OF INCLUDES */
104 #if !defined(lint)
105 #if defined(__NetBSD__)
106 #include <sys/cdefs.h>
107 __KERNEL_RCSID(0, "$NetBSD$");
108 #else
109 static const char sccsid[] = "@(#)ip_frag.c 1.11 3/24/96 (C) 1993-2000 Darren Reed";
110 static const char rcsid[] = "@(#)Id: ip_frag.c,v 2.77.2.17 2009/05/13 19:10:57 darrenr Exp";
111 #endif
112 #endif
115 ipfr_t *ipfr_list = NULL;
116 ipfr_t **ipfr_tail = &ipfr_list;
118 ipfr_t *ipfr_natlist = NULL;
119 ipfr_t **ipfr_nattail = &ipfr_natlist;
121 ipfr_t *ipfr_ipidlist = NULL;
122 ipfr_t **ipfr_ipidtail = &ipfr_ipidlist;
124 static ipfr_t **ipfr_heads;
125 static ipfr_t **ipfr_nattab;
126 static ipfr_t **ipfr_ipidtab;
128 static ipfrstat_t ipfr_stats;
129 static int ipfr_inuse = 0;
130 int ipfr_size = IPFT_SIZE;
132 int fr_ipfrttl = 120; /* 60 seconds */
133 int fr_frag_lock = 0;
134 int fr_frag_init = 0;
135 u_long fr_ticks = 0;
138 static INLINE int ipfr_index __P((fr_info_t *, ipfr_t *));
139 static ipfr_t *ipfr_newfrag __P((fr_info_t *, u_32_t, ipfr_t **));
140 static ipfr_t *fr_fraglookup __P((fr_info_t *, ipfr_t **));
141 static void fr_fragdelete __P((ipfr_t *, ipfr_t ***));
142 static void fr_fragfree __P((ipfr_t *));
145 /* ------------------------------------------------------------------------ */
146 /* Function: fr_fraginit */
147 /* Returns: int - 0 == success, -1 == error */
148 /* Parameters: Nil */
149 /* */
150 /* Initialise the hash tables for the fragment cache lookups. */
151 /* ------------------------------------------------------------------------ */
152 int fr_fraginit()
154 KMALLOCS(ipfr_heads, ipfr_t **, ipfr_size * sizeof(ipfr_t *));
155 if (ipfr_heads == NULL)
156 return -1;
157 bzero((char *)ipfr_heads, ipfr_size * sizeof(ipfr_t *));
159 KMALLOCS(ipfr_nattab, ipfr_t **, ipfr_size * sizeof(ipfr_t *));
160 if (ipfr_nattab == NULL)
161 return -1;
162 bzero((char *)ipfr_nattab, ipfr_size * sizeof(ipfr_t *));
164 KMALLOCS(ipfr_ipidtab, ipfr_t **, ipfr_size * sizeof(ipfr_t *));
165 if (ipfr_ipidtab == NULL)
166 return -1;
167 bzero((char *)ipfr_ipidtab, ipfr_size * sizeof(ipfr_t *));
169 RWLOCK_INIT(&ipf_frag, "ipf fragment rwlock");
170 fr_frag_init = 1;
172 return 0;
176 /* ------------------------------------------------------------------------ */
177 /* Function: fr_fragunload */
178 /* Returns: Nil */
179 /* Parameters: Nil */
180 /* */
181 /* Free all memory allocated whilst running and from initialisation. */
182 /* ------------------------------------------------------------------------ */
183 void fr_fragunload()
185 if (fr_frag_init == 1) {
186 fr_fragclear();
188 RW_DESTROY(&ipf_frag);
189 fr_frag_init = 0;
192 if (ipfr_heads != NULL)
193 KFREES(ipfr_heads, ipfr_size * sizeof(ipfr_t *));
194 ipfr_heads = NULL;
196 if (ipfr_nattab != NULL)
197 KFREES(ipfr_nattab, ipfr_size * sizeof(ipfr_t *));
198 ipfr_nattab = NULL;
200 if (ipfr_ipidtab != NULL)
201 KFREES(ipfr_ipidtab, ipfr_size * sizeof(ipfr_t *));
202 ipfr_ipidtab = NULL;
206 /* ------------------------------------------------------------------------ */
207 /* Function: fr_fragstats */
208 /* Returns: ipfrstat_t* - pointer to struct with current frag stats */
209 /* Parameters: Nil */
210 /* */
211 /* Updates ipfr_stats with current information and returns a pointer to it */
212 /* ------------------------------------------------------------------------ */
213 ipfrstat_t *fr_fragstats()
215 ipfr_stats.ifs_table = ipfr_heads;
216 ipfr_stats.ifs_nattab = ipfr_nattab;
217 ipfr_stats.ifs_inuse = ipfr_inuse;
218 return &ipfr_stats;
222 /* ------------------------------------------------------------------------ */
223 /* Function: ipfr_index */
224 /* Returns: int - index in fragment table for given packet */
225 /* Parameters: fin(I) - pointer to packet information */
226 /* frag(O) - pointer to ipfr_t structure to fill */
227 /* */
228 /* Compute the index in the fragment table while filling the per packet */
229 /* part of the fragment state. */
230 /* ------------------------------------------------------------------------ */
231 static INLINE int ipfr_index(fin, frag)
232 fr_info_t *fin;
233 ipfr_t *frag;
235 u_int idx;
238 * For fragments, we record protocol, packet id, TOS and both IP#'s
239 * (these should all be the same for all fragments of a packet).
241 * build up a hash value to index the table with.
244 #ifdef USE_INET6
245 if (fin->fin_v == 6) {
246 ip6_t *ip6 = (ip6_t *)fin->fin_ip;
248 frag->ipfr_p = fin->fin_fi.fi_p;
249 frag->ipfr_id = fin->fin_id;
250 frag->ipfr_tos = ip6->ip6_flow & IPV6_FLOWINFO_MASK;
251 frag->ipfr_src.in6 = ip6->ip6_src;
252 frag->ipfr_dst.in6 = ip6->ip6_dst;
253 } else
254 #endif
256 ip_t *ip = fin->fin_ip;
258 frag->ipfr_p = ip->ip_p;
259 frag->ipfr_id = ip->ip_id;
260 frag->ipfr_tos = ip->ip_tos;
261 frag->ipfr_src.in4.s_addr = ip->ip_src.s_addr;
262 frag->ipfr_src.i6[1] = 0;
263 frag->ipfr_src.i6[2] = 0;
264 frag->ipfr_src.i6[3] = 0;
265 frag->ipfr_dst.in4.s_addr = ip->ip_dst.s_addr;
266 frag->ipfr_dst.i6[1] = 0;
267 frag->ipfr_dst.i6[2] = 0;
268 frag->ipfr_dst.i6[3] = 0;
270 frag->ipfr_ifp = fin->fin_ifp;
271 frag->ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
272 frag->ipfr_secmsk = fin->fin_fi.fi_secmsk;
273 frag->ipfr_auth = fin->fin_fi.fi_auth;
275 idx = frag->ipfr_p;
276 idx += frag->ipfr_id;
277 idx += frag->ipfr_src.i6[0];
278 idx += frag->ipfr_src.i6[1];
279 idx += frag->ipfr_src.i6[2];
280 idx += frag->ipfr_src.i6[3];
281 idx += frag->ipfr_dst.i6[0];
282 idx += frag->ipfr_dst.i6[1];
283 idx += frag->ipfr_dst.i6[2];
284 idx += frag->ipfr_dst.i6[3];
285 idx *= 127;
286 idx %= IPFT_SIZE;
288 return idx;
292 /* ------------------------------------------------------------------------ */
293 /* Function: ipfr_newfrag */
294 /* Returns: ipfr_t * - pointer to fragment cache state info or NULL */
295 /* Parameters: fin(I) - pointer to packet information */
296 /* table(I) - pointer to frag table to add to */
297 /* */
298 /* Add a new entry to the fragment cache, registering it as having come */
299 /* through this box, with the result of the filter operation. */
300 /* ------------------------------------------------------------------------ */
301 static ipfr_t *ipfr_newfrag(fin, pass, table)
302 fr_info_t *fin;
303 u_32_t pass;
304 ipfr_t *table[];
306 ipfr_t *fra, frag;
307 u_int idx, off;
308 frentry_t *fr;
310 if (ipfr_inuse >= ipfr_size)
311 return NULL;
313 if ((fin->fin_flx & (FI_FRAG|FI_BAD)) != FI_FRAG)
314 return NULL;
316 if (pass & FR_FRSTRICT)
317 if (fin->fin_off != 0)
318 return NULL;
320 idx = ipfr_index(fin, &frag);
323 * first, make sure it isn't already there...
325 for (fra = table[idx]; (fra != NULL); fra = fra->ipfr_hnext)
326 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp,
327 IPFR_CMPSZ)) {
328 ipfr_stats.ifs_exists++;
329 return NULL;
333 * allocate some memory, if possible, if not, just record that we
334 * failed to do so.
336 KMALLOC(fra, ipfr_t *);
337 if (fra == NULL) {
338 ipfr_stats.ifs_nomem++;
339 return NULL;
342 fr = fin->fin_fr;
343 fra->ipfr_rule = fr;
344 if (fr != NULL) {
345 MUTEX_ENTER(&fr->fr_lock);
346 fr->fr_ref++;
347 MUTEX_EXIT(&fr->fr_lock);
351 * Insert the fragment into the fragment table, copy the struct used
352 * in the search using bcopy rather than reassign each field.
353 * Set the ttl to the default.
355 if ((fra->ipfr_hnext = table[idx]) != NULL)
356 table[idx]->ipfr_hprev = &fra->ipfr_hnext;
357 fra->ipfr_hprev = table + idx;
358 fra->ipfr_data = NULL;
359 table[idx] = fra;
360 bcopy((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, IPFR_CMPSZ);
361 fra->ipfr_ttl = fr_ticks + fr_ipfrttl;
364 * Compute the offset of the expected start of the next packet.
366 off = fin->fin_off >> 3;
367 if (off == 0)
368 fra->ipfr_seen0 = 1;
369 fra->ipfr_off = off + (fin->fin_dlen >> 3);
370 fra->ipfr_pass = pass;
371 fra->ipfr_ref = 1;
372 ipfr_stats.ifs_new++;
373 ipfr_inuse++;
374 return fra;
378 /* ------------------------------------------------------------------------ */
379 /* Function: fr_newfrag */
380 /* Returns: int - 0 == success, -1 == error */
381 /* Parameters: fin(I) - pointer to packet information */
382 /* */
383 /* Add a new entry to the fragment cache table based on the current packet */
384 /* ------------------------------------------------------------------------ */
385 int fr_newfrag(fin, pass)
386 u_32_t pass;
387 fr_info_t *fin;
389 ipfr_t *fra;
391 if (fr_frag_lock != 0)
392 return -1;
394 WRITE_ENTER(&ipf_frag);
395 fra = ipfr_newfrag(fin, pass, ipfr_heads);
396 if (fra != NULL) {
397 *ipfr_tail = fra;
398 fra->ipfr_prev = ipfr_tail;
399 ipfr_tail = &fra->ipfr_next;
400 if (ipfr_list == NULL)
401 ipfr_list = fra;
402 fra->ipfr_next = NULL;
404 RWLOCK_EXIT(&ipf_frag);
405 return fra ? 0 : -1;
409 /* ------------------------------------------------------------------------ */
410 /* Function: fr_nat_newfrag */
411 /* Returns: int - 0 == success, -1 == error */
412 /* Parameters: fin(I) - pointer to packet information */
413 /* nat(I) - pointer to NAT structure */
414 /* */
415 /* Create a new NAT fragment cache entry based on the current packet and */
416 /* the NAT structure for this "session". */
417 /* ------------------------------------------------------------------------ */
418 int fr_nat_newfrag(fin, pass, nat)
419 fr_info_t *fin;
420 u_32_t pass;
421 nat_t *nat;
423 ipfr_t *fra;
425 if (fr_frag_lock != 0)
426 return 0;
428 WRITE_ENTER(&ipf_natfrag);
429 fra = ipfr_newfrag(fin, pass, ipfr_nattab);
430 if (fra != NULL) {
431 fra->ipfr_data = nat;
432 nat->nat_data = fra;
433 *ipfr_nattail = fra;
434 fra->ipfr_prev = ipfr_nattail;
435 ipfr_nattail = &fra->ipfr_next;
436 fra->ipfr_next = NULL;
438 RWLOCK_EXIT(&ipf_natfrag);
439 return fra ? 0 : -1;
443 /* ------------------------------------------------------------------------ */
444 /* Function: fr_ipid_newfrag */
445 /* Returns: int - 0 == success, -1 == error */
446 /* Parameters: fin(I) - pointer to packet information */
447 /* ipid(I) - new IP ID for this fragmented packet */
448 /* */
449 /* Create a new fragment cache entry for this packet and store, as a data */
450 /* pointer, the new IP ID value. */
451 /* ------------------------------------------------------------------------ */
452 int fr_ipid_newfrag(fin, ipid)
453 fr_info_t *fin;
454 u_32_t ipid;
456 ipfr_t *fra;
458 if (fr_frag_lock)
459 return 0;
461 WRITE_ENTER(&ipf_ipidfrag);
462 fra = ipfr_newfrag(fin, 0, ipfr_ipidtab);
463 if (fra != NULL) {
464 fra->ipfr_data = (void *)(intptr_t)ipid;
465 *ipfr_ipidtail = fra;
466 fra->ipfr_prev = ipfr_ipidtail;
467 ipfr_ipidtail = &fra->ipfr_next;
468 fra->ipfr_next = NULL;
470 RWLOCK_EXIT(&ipf_ipidfrag);
471 return fra ? 0 : -1;
475 /* ------------------------------------------------------------------------ */
476 /* Function: fr_fraglookup */
477 /* Returns: ipfr_t * - pointer to ipfr_t structure if there's a */
478 /* matching entry in the frag table, else NULL */
479 /* Parameters: fin(I) - pointer to packet information */
480 /* table(I) - pointer to fragment cache table to search */
481 /* */
482 /* Check the fragment cache to see if there is already a record of this */
483 /* packet with its filter result known. */
484 /* ------------------------------------------------------------------------ */
485 static ipfr_t *fr_fraglookup(fin, table)
486 fr_info_t *fin;
487 ipfr_t *table[];
489 ipfr_t *f, frag;
490 u_int idx;
492 if ((fin->fin_flx & (FI_FRAG|FI_BAD)) != FI_FRAG)
493 return NULL;
496 * For fragments, we record protocol, packet id, TOS and both IP#'s
497 * (these should all be the same for all fragments of a packet).
499 * build up a hash value to index the table with.
501 idx = ipfr_index(fin, &frag);
503 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
504 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk;
505 frag.ipfr_auth = fin->fin_fi.fi_auth;
508 * check the table, careful to only compare the right amount of data
510 for (f = table[idx]; f; f = f->ipfr_hnext)
511 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&f->ipfr_ifp,
512 IPFR_CMPSZ)) {
513 u_short off;
516 * We don't want to let short packets match because
517 * they could be compromising the security of other
518 * rules that want to match on layer 4 fields (and
519 * can't because they have been fragmented off.)
520 * Why do this check here? The counter acts as an
521 * indicator of this kind of attack, whereas if it was
522 * elsewhere, it wouldn't know if other matching
523 * packets had been seen.
525 if (fin->fin_flx & FI_SHORT) {
526 ATOMIC_INCL(ipfr_stats.ifs_short);
527 continue;
531 * XXX - We really need to be guarding against the
532 * retransmission of (src,dst,id,offset-range) here
533 * because a fragmented packet is never resent with
534 * the same IP ID# (or shouldn't).
536 off = fin->fin_off >> 3;
537 if (f->ipfr_seen0) {
538 if (off == 0) {
539 ATOMIC_INCL(ipfr_stats.ifs_retrans0);
540 continue;
542 } else if (off == 0)
543 f->ipfr_seen0 = 1;
545 if (f != table[idx]) {
546 ipfr_t **fp;
549 * Move fragment info. to the top of the list
550 * to speed up searches. First, delink...
552 fp = f->ipfr_hprev;
553 (*fp) = f->ipfr_hnext;
554 if (f->ipfr_hnext != NULL)
555 f->ipfr_hnext->ipfr_hprev = fp;
557 * Then put back at the top of the chain.
559 f->ipfr_hnext = table[idx];
560 table[idx]->ipfr_hprev = &f->ipfr_hnext;
561 f->ipfr_hprev = table + idx;
562 table[idx] = f;
566 * If we've follwed the fragments, and this is the
567 * last (in order), shrink expiration time.
569 if (off == f->ipfr_off) {
570 if (!(fin->fin_flx & FI_MOREFRAG))
571 f->ipfr_ttl = fr_ticks + 1;
572 f->ipfr_off = (fin->fin_dlen >> 3) + off;
573 } else if (f->ipfr_pass & FR_FRSTRICT)
574 continue;
575 ATOMIC_INCL(ipfr_stats.ifs_hits);
576 return f;
578 return NULL;
582 /* ------------------------------------------------------------------------ */
583 /* Function: fr_nat_knownfrag */
584 /* Returns: nat_t* - pointer to 'parent' NAT structure if frag table */
585 /* match found, else NULL */
586 /* Parameters: fin(I) - pointer to packet information */
587 /* */
588 /* Functional interface for NAT lookups of the NAT fragment cache */
589 /* ------------------------------------------------------------------------ */
590 nat_t *fr_nat_knownfrag(fin)
591 fr_info_t *fin;
593 nat_t *nat;
594 ipfr_t *ipf;
596 if ((fr_frag_lock) || !ipfr_natlist)
597 return NULL;
598 READ_ENTER(&ipf_natfrag);
599 ipf = fr_fraglookup(fin, ipfr_nattab);
600 if (ipf != NULL) {
601 nat = ipf->ipfr_data;
603 * This is the last fragment for this packet.
605 if ((ipf->ipfr_ttl == fr_ticks + 1) && (nat != NULL)) {
606 nat->nat_data = NULL;
607 ipf->ipfr_data = NULL;
609 } else
610 nat = NULL;
611 RWLOCK_EXIT(&ipf_natfrag);
612 return nat;
616 /* ------------------------------------------------------------------------ */
617 /* Function: fr_ipid_knownfrag */
618 /* Returns: u_32_t - IPv4 ID for this packet if match found, else */
619 /* return 0xfffffff to indicate no match. */
620 /* Parameters: fin(I) - pointer to packet information */
621 /* */
622 /* Functional interface for IP ID lookups of the IP ID fragment cache */
623 /* ------------------------------------------------------------------------ */
624 u_32_t fr_ipid_knownfrag(fin)
625 fr_info_t *fin;
627 ipfr_t *ipf;
628 u_32_t id;
630 if ((fr_frag_lock) || !ipfr_ipidlist)
631 return 0xffffffff;
633 READ_ENTER(&ipf_ipidfrag);
634 ipf = fr_fraglookup(fin, ipfr_ipidtab);
635 if (ipf != NULL)
636 id = (u_32_t)(intptr_t)ipf->ipfr_data;
637 else
638 id = 0xffffffff;
639 RWLOCK_EXIT(&ipf_ipidfrag);
640 return id;
644 /* ------------------------------------------------------------------------ */
645 /* Function: fr_knownfrag */
646 /* Returns: frentry_t* - pointer to filter rule if a match is found in */
647 /* the frag cache table, else NULL. */
648 /* Parameters: fin(I) - pointer to packet information */
649 /* passp(O) - pointer to where to store rule flags resturned */
650 /* */
651 /* Functional interface for normal lookups of the fragment cache. If a */
652 /* match is found, return the rule pointer and flags from the rule, except */
653 /* that if FR_LOGFIRST is set, reset FR_LOG. */
654 /* ------------------------------------------------------------------------ */
655 frentry_t *fr_knownfrag(fin, passp)
656 fr_info_t *fin;
657 u_32_t *passp;
659 frentry_t *fr = NULL;
660 ipfr_t *fra;
661 u_32_t pass;
663 if ((fr_frag_lock) || (ipfr_list == NULL))
664 return NULL;
666 READ_ENTER(&ipf_frag);
667 fra = fr_fraglookup(fin, ipfr_heads);
668 if (fra != NULL) {
669 fr = fra->ipfr_rule;
670 fin->fin_fr = fr;
671 if (fr != NULL) {
672 pass = fr->fr_flags;
673 if ((pass & FR_LOGFIRST) != 0)
674 pass &= ~(FR_LOGFIRST|FR_LOG);
675 *passp = pass;
678 RWLOCK_EXIT(&ipf_frag);
679 return fr;
683 /* ------------------------------------------------------------------------ */
684 /* Function: fr_forget */
685 /* Returns: Nil */
686 /* Parameters: ptr(I) - pointer to data structure */
687 /* */
688 /* Search through all of the fragment cache entries and wherever a pointer */
689 /* is found to match ptr, reset it to NULL. */
690 /* ------------------------------------------------------------------------ */
691 void fr_forget(ptr)
692 void *ptr;
694 ipfr_t *fr;
696 WRITE_ENTER(&ipf_frag);
697 for (fr = ipfr_list; fr; fr = fr->ipfr_next)
698 if (fr->ipfr_data == ptr)
699 fr->ipfr_data = NULL;
700 RWLOCK_EXIT(&ipf_frag);
704 /* ------------------------------------------------------------------------ */
705 /* Function: fr_forgetnat */
706 /* Returns: Nil */
707 /* Parameters: ptr(I) - pointer to data structure */
708 /* */
709 /* Search through all of the fragment cache entries for NAT and wherever a */
710 /* pointer is found to match ptr, reset it to NULL. */
711 /* ------------------------------------------------------------------------ */
712 void fr_forgetnat(ptr)
713 void *ptr;
715 ipfr_t *fr;
717 WRITE_ENTER(&ipf_natfrag);
718 for (fr = ipfr_natlist; fr; fr = fr->ipfr_next)
719 if (fr->ipfr_data == ptr)
720 fr->ipfr_data = NULL;
721 RWLOCK_EXIT(&ipf_natfrag);
725 /* ------------------------------------------------------------------------ */
726 /* Function: fr_fragdelete */
727 /* Returns: Nil */
728 /* Parameters: fra(I) - pointer to fragment structure to delete */
729 /* tail(IO) - pointer to the pointer to the tail of the frag */
730 /* list */
731 /* */
732 /* Remove a fragment cache table entry from the table & list. Also free */
733 /* the filter rule it is associated with it if it is no longer used as a */
734 /* result of decreasing the reference count. */
735 /* ------------------------------------------------------------------------ */
736 static void fr_fragdelete(fra, tail)
737 ipfr_t *fra, ***tail;
740 if (fra->ipfr_next)
741 fra->ipfr_next->ipfr_prev = fra->ipfr_prev;
742 *fra->ipfr_prev = fra->ipfr_next;
743 if (*tail == &fra->ipfr_next)
744 *tail = fra->ipfr_prev;
746 if (fra->ipfr_hnext)
747 fra->ipfr_hnext->ipfr_hprev = fra->ipfr_hprev;
748 *fra->ipfr_hprev = fra->ipfr_hnext;
750 if (fra->ipfr_rule != NULL) {
751 (void) fr_derefrule(&fra->ipfr_rule);
754 if (fra->ipfr_ref <= 0)
755 fr_fragfree(fra);
759 /* ------------------------------------------------------------------------ */
760 /* Function: fr_fragfree */
761 /* Returns: Nil */
762 /* Parameters: fra - pointer to frag structure to free */
763 /* */
764 /* Take care of the details associated with deleting an entry from the frag */
765 /* cache. Currently this just means bumping stats correctly after freeing */
766 /* ------------------------------------------------------------------------ */
767 static void fr_fragfree(fra)
768 ipfr_t *fra;
770 KFREE(fra);
771 ipfr_stats.ifs_expire++;
772 ipfr_inuse--;
776 /* ------------------------------------------------------------------------ */
777 /* Function: fr_fragclear */
778 /* Returns: Nil */
779 /* Parameters: Nil */
780 /* */
781 /* Free memory in use by fragment state information kept. Do the normal */
782 /* fragment state stuff first and then the NAT-fragment table. */
783 /* ------------------------------------------------------------------------ */
784 void fr_fragclear()
786 ipfr_t *fra;
787 nat_t *nat;
789 WRITE_ENTER(&ipf_frag);
790 while ((fra = ipfr_list) != NULL) {
791 fra->ipfr_ref--;
792 fr_fragdelete(fra, &ipfr_tail);
794 ipfr_tail = &ipfr_list;
795 RWLOCK_EXIT(&ipf_frag);
797 WRITE_ENTER(&ipf_nat);
798 WRITE_ENTER(&ipf_natfrag);
799 while ((fra = ipfr_natlist) != NULL) {
800 nat = fra->ipfr_data;
801 if (nat != NULL) {
802 if (nat->nat_data == fra)
803 nat->nat_data = NULL;
805 fra->ipfr_ref--;
806 fr_fragdelete(fra, &ipfr_nattail);
808 ipfr_nattail = &ipfr_natlist;
809 RWLOCK_EXIT(&ipf_natfrag);
810 RWLOCK_EXIT(&ipf_nat);
814 /* ------------------------------------------------------------------------ */
815 /* Function: fr_fragexpire */
816 /* Returns: Nil */
817 /* Parameters: Nil */
818 /* */
819 /* Expire entries in the fragment cache table that have been there too long */
820 /* ------------------------------------------------------------------------ */
821 void fr_fragexpire()
823 ipfr_t **fp, *fra;
824 nat_t *nat;
825 SPL_INT(s);
827 if (fr_frag_lock)
828 return;
830 SPL_NET(s);
831 WRITE_ENTER(&ipf_frag);
833 * Go through the entire table, looking for entries to expire,
834 * which is indicated by the ttl being less than or equal to fr_ticks.
836 for (fp = &ipfr_list; ((fra = *fp) != NULL); ) {
837 if (fra->ipfr_ttl > fr_ticks)
838 break;
839 fra->ipfr_ref--;
840 fr_fragdelete(fra, &ipfr_tail);
842 RWLOCK_EXIT(&ipf_frag);
844 WRITE_ENTER(&ipf_ipidfrag);
845 for (fp = &ipfr_ipidlist; ((fra = *fp) != NULL); ) {
846 if (fra->ipfr_ttl > fr_ticks)
847 break;
848 fra->ipfr_ref--;
849 fr_fragdelete(fra, &ipfr_ipidtail);
851 RWLOCK_EXIT(&ipf_ipidfrag);
854 * Same again for the NAT table, except that if the structure also
855 * still points to a NAT structure, and the NAT structure points back
856 * at the one to be free'd, NULL the reference from the NAT struct.
857 * NOTE: We need to grab both mutex's early, and in this order so as
858 * to prevent a deadlock if both try to expire at the same time.
859 * The extra if() statement here is because it locks out all NAT
860 * operations - no need to do that if there are no entries in this
861 * list, right?
863 if (ipfr_natlist != NULL) {
864 WRITE_ENTER(&ipf_nat);
865 WRITE_ENTER(&ipf_natfrag);
866 for (fp = &ipfr_natlist; ((fra = *fp) != NULL); ) {
867 if (fra->ipfr_ttl > fr_ticks)
868 break;
869 nat = fra->ipfr_data;
870 if (nat != NULL) {
871 if (nat->nat_data == fra)
872 nat->nat_data = NULL;
874 fra->ipfr_ref--;
875 fr_fragdelete(fra, &ipfr_nattail);
877 RWLOCK_EXIT(&ipf_natfrag);
878 RWLOCK_EXIT(&ipf_nat);
880 SPL_X(s);
884 /* ------------------------------------------------------------------------ */
885 /* Function: fr_slowtimer */
886 /* Returns: Nil */
887 /* Parameters: Nil */
888 /* */
889 /* Slowly expire held state for fragments. Timeouts are set * in */
890 /* expectation of this being called twice per second. */
891 /* ------------------------------------------------------------------------ */
892 #if !defined(_KERNEL) || (!SOLARIS && !defined(__hpux) && !defined(__sgi) && \
893 !defined(__osf__) && !defined(linux))
894 # if defined(_KERNEL) && ((BSD >= 199103) || defined(__sgi))
895 void fr_slowtimer __P((void *ptr))
896 # else
897 int fr_slowtimer()
898 # endif
900 READ_ENTER(&ipf_global);
902 ipf_expiretokens();
903 fr_fragexpire();
904 fr_timeoutstate();
905 fr_natexpire();
906 fr_authexpire();
907 fr_ticks++;
908 if (fr_running <= 0)
909 goto done;
910 # ifdef _KERNEL
911 # if defined(__NetBSD__) && (__NetBSD_Version__ >= 104240000)
912 callout_reset(&fr_slowtimer_ch, hz / 2, fr_slowtimer, NULL);
913 # else
914 # if defined(__OpenBSD__)
915 timeout_add(&fr_slowtimer_ch, hz/2);
916 # else
917 # if (__FreeBSD_version >= 300000)
918 fr_slowtimer_ch = timeout(fr_slowtimer, NULL, hz/2);
919 # else
920 # ifdef linux
922 # else
923 timeout(fr_slowtimer, NULL, hz/2);
924 # endif
925 # endif /* FreeBSD */
926 # endif /* OpenBSD */
927 # endif /* NetBSD */
928 # endif
929 done:
930 RWLOCK_EXIT(&ipf_global);
931 # if (BSD < 199103) || !defined(_KERNEL)
932 return 0;
933 # endif
935 #endif /* !SOLARIS && !defined(__hpux) && !defined(__sgi) */
938 /* ------------------------------------------------------------------------ */
939 /* Function: fr_nextfrag */
940 /* Returns: int - 0 == success, else error */
941 /* Parameters: token(I) - pointer to token information for this caller */
942 /* itp(I) - pointer to generic iterator from caller */
943 /* top(I) - top of the fragment list */
944 /* tail(I) - tail of the fragment list */
945 /* lock(I) - fragment cache lock */
946 /* */
947 /* This function is used to interate through the list of entries in the */
948 /* fragment cache. It increases the reference count on the one currently */
949 /* being returned so that the caller can come back and resume from it later.*/
950 /* */
951 /* This function is used for both the NAT fragment cache as well as the ipf */
952 /* fragment cache - hence the reason for passing in top, tail and lock. */
953 /* ------------------------------------------------------------------------ */
954 int fr_nextfrag(token, itp, top, tail
955 #ifdef USE_MUTEXES
956 , lock
957 #endif
959 ipftoken_t *token;
960 ipfgeniter_t *itp;
961 ipfr_t **top, ***tail;
962 #ifdef USE_MUTEXES
963 ipfrwlock_t *lock;
964 #endif
966 ipfr_t *frag, *next, zero;
967 int error = 0;
969 READ_ENTER(lock);
972 * Retrieve "previous" entry from token and find the next entry.
974 frag = token->ipt_data;
975 if (frag == NULL)
976 next = *top;
977 else
978 next = frag->ipfr_next;
981 * If we found an entry, add reference to it and update token.
982 * Otherwise, zero out data to be returned and NULL out token.
984 if (next != NULL) {
985 ATOMIC_INC(next->ipfr_ref);
986 token->ipt_data = next;
987 } else {
988 bzero(&zero, sizeof(zero));
989 next = &zero;
990 token->ipt_data = NULL;
994 * Now that we have ref, it's save to give up lock.
996 RWLOCK_EXIT(lock);
999 * Copy out data and clean up references and token as needed.
1001 error = COPYOUT(next, itp->igi_data, sizeof(*next));
1002 if (error != 0)
1003 error = EFAULT;
1004 if (token->ipt_data != NULL) {
1005 if (frag != NULL)
1006 #ifdef USE_MUTEXES
1007 fr_fragderef(&frag, lock);
1008 #else
1009 fr_fragderef(&frag);
1010 #endif
1011 if (next->ipfr_next == NULL)
1012 token->ipt_data = NULL;
1014 return error;
1018 /* ------------------------------------------------------------------------ */
1019 /* Function: fr_fragderef */
1020 /* Returns: Nil */
1021 /* Parameters: frp(IO) - pointer to fragment structure to deference */
1022 /* lock(I) - lock associated with the fragment */
1023 /* */
1024 /* This function dereferences a fragment structure (ipfr_t). The pointer */
1025 /* passed in will always be reset back to NULL, even if the structure is */
1026 /* not freed, to enforce the notion that the caller is no longer entitled */
1027 /* to use the pointer it is dropping the reference to. */
1028 /* ------------------------------------------------------------------------ */
1029 void fr_fragderef(frp
1030 #ifdef USE_MUTEXES
1031 , lock
1032 #endif
1034 ipfr_t **frp;
1035 #ifdef USE_MUTEXES
1036 ipfrwlock_t *lock;
1037 #endif
1039 ipfr_t *fra;
1041 fra = *frp;
1042 *frp = NULL;
1044 WRITE_ENTER(lock);
1045 fra->ipfr_ref--;
1046 if (fra->ipfr_ref <= 0)
1047 fr_fragfree(fra);
1048 RWLOCK_EXIT(lock);