1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Expectation handling for nf_conntrack. */
4 /* (C) 1999-2001 Paul `Rusty' Russell
5 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
10 #include <linux/types.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h>
15 #include <linux/stddef.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/siphash.h>
21 #include <linux/moduleparam.h>
22 #include <linux/export.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/hash.h>
26 #include <net/netfilter/nf_conntrack.h>
27 #include <net/netfilter/nf_conntrack_core.h>
28 #include <net/netfilter/nf_conntrack_ecache.h>
29 #include <net/netfilter/nf_conntrack_expect.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_l4proto.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 unsigned int nf_ct_expect_hsize __read_mostly
;
36 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize
);
38 struct hlist_head
*nf_ct_expect_hash __read_mostly
;
39 EXPORT_SYMBOL_GPL(nf_ct_expect_hash
);
41 unsigned int nf_ct_expect_max __read_mostly
;
43 static struct kmem_cache
*nf_ct_expect_cachep __read_mostly
;
44 static siphash_aligned_key_t nf_ct_expect_hashrnd
;
46 /* nf_conntrack_expect helper functions */
47 void nf_ct_unlink_expect_report(struct nf_conntrack_expect
*exp
,
48 u32 portid
, int report
)
50 struct nf_conn_help
*master_help
= nfct_help(exp
->master
);
51 struct net
*net
= nf_ct_exp_net(exp
);
52 struct nf_conntrack_net
*cnet
;
54 WARN_ON(!master_help
);
55 WARN_ON(timer_pending(&exp
->timeout
));
57 hlist_del_rcu(&exp
->hnode
);
59 cnet
= nf_ct_pernet(net
);
62 hlist_del_rcu(&exp
->lnode
);
63 master_help
->expecting
[exp
->class]--;
65 nf_ct_expect_event_report(IPEXP_DESTROY
, exp
, portid
, report
);
66 nf_ct_expect_put(exp
);
68 NF_CT_STAT_INC(net
, expect_delete
);
70 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report
);
72 static void nf_ct_expectation_timed_out(struct timer_list
*t
)
74 struct nf_conntrack_expect
*exp
= from_timer(exp
, t
, timeout
);
76 spin_lock_bh(&nf_conntrack_expect_lock
);
77 nf_ct_unlink_expect(exp
);
78 spin_unlock_bh(&nf_conntrack_expect_lock
);
79 nf_ct_expect_put(exp
);
82 static unsigned int nf_ct_expect_dst_hash(const struct net
*n
, const struct nf_conntrack_tuple
*tuple
)
85 union nf_inet_addr dst_addr
;
90 } __aligned(SIPHASH_ALIGNMENT
) combined
;
93 get_random_once(&nf_ct_expect_hashrnd
, sizeof(nf_ct_expect_hashrnd
));
95 memset(&combined
, 0, sizeof(combined
));
97 combined
.dst_addr
= tuple
->dst
.u3
;
98 combined
.net_mix
= net_hash_mix(n
);
99 combined
.dport
= (__force __u16
)tuple
->dst
.u
.all
;
100 combined
.l3num
= tuple
->src
.l3num
;
101 combined
.protonum
= tuple
->dst
.protonum
;
103 hash
= siphash(&combined
, sizeof(combined
), &nf_ct_expect_hashrnd
);
105 return reciprocal_scale(hash
, nf_ct_expect_hsize
);
109 nf_ct_exp_equal(const struct nf_conntrack_tuple
*tuple
,
110 const struct nf_conntrack_expect
*i
,
111 const struct nf_conntrack_zone
*zone
,
112 const struct net
*net
)
114 return nf_ct_tuple_mask_cmp(tuple
, &i
->tuple
, &i
->mask
) &&
115 net_eq(net
, nf_ct_net(i
->master
)) &&
116 nf_ct_zone_equal_any(i
->master
, zone
);
119 bool nf_ct_remove_expect(struct nf_conntrack_expect
*exp
)
121 if (del_timer(&exp
->timeout
)) {
122 nf_ct_unlink_expect(exp
);
123 nf_ct_expect_put(exp
);
128 EXPORT_SYMBOL_GPL(nf_ct_remove_expect
);
130 struct nf_conntrack_expect
*
131 __nf_ct_expect_find(struct net
*net
,
132 const struct nf_conntrack_zone
*zone
,
133 const struct nf_conntrack_tuple
*tuple
)
135 struct nf_conntrack_net
*cnet
= nf_ct_pernet(net
);
136 struct nf_conntrack_expect
*i
;
139 if (!cnet
->expect_count
)
142 h
= nf_ct_expect_dst_hash(net
, tuple
);
143 hlist_for_each_entry_rcu(i
, &nf_ct_expect_hash
[h
], hnode
) {
144 if (nf_ct_exp_equal(tuple
, i
, zone
, net
))
149 EXPORT_SYMBOL_GPL(__nf_ct_expect_find
);
151 /* Just find a expectation corresponding to a tuple. */
152 struct nf_conntrack_expect
*
153 nf_ct_expect_find_get(struct net
*net
,
154 const struct nf_conntrack_zone
*zone
,
155 const struct nf_conntrack_tuple
*tuple
)
157 struct nf_conntrack_expect
*i
;
160 i
= __nf_ct_expect_find(net
, zone
, tuple
);
161 if (i
&& !refcount_inc_not_zero(&i
->use
))
167 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get
);
169 /* If an expectation for this connection is found, it gets delete from
170 * global list then returned. */
171 struct nf_conntrack_expect
*
172 nf_ct_find_expectation(struct net
*net
,
173 const struct nf_conntrack_zone
*zone
,
174 const struct nf_conntrack_tuple
*tuple
, bool unlink
)
176 struct nf_conntrack_net
*cnet
= nf_ct_pernet(net
);
177 struct nf_conntrack_expect
*i
, *exp
= NULL
;
180 if (!cnet
->expect_count
)
183 h
= nf_ct_expect_dst_hash(net
, tuple
);
184 hlist_for_each_entry(i
, &nf_ct_expect_hash
[h
], hnode
) {
185 if (!(i
->flags
& NF_CT_EXPECT_INACTIVE
) &&
186 nf_ct_exp_equal(tuple
, i
, zone
, net
)) {
194 /* If master is not in hash table yet (ie. packet hasn't left
195 this machine yet), how can other end know about expected?
196 Hence these are not the droids you are looking for (if
197 master ct never got confirmed, we'd hold a reference to it
198 and weird things would happen to future packets). */
199 if (!nf_ct_is_confirmed(exp
->master
))
202 /* Avoid race with other CPUs, that for exp->master ct, is
203 * about to invoke ->destroy(), or nf_ct_delete() via timeout
206 * The refcount_inc_not_zero() check tells: If that fails, we
207 * know that the ct is being destroyed. If it succeeds, we
208 * can be sure the ct cannot disappear underneath.
210 if (unlikely(nf_ct_is_dying(exp
->master
) ||
211 !refcount_inc_not_zero(&exp
->master
->ct_general
.use
)))
214 if (exp
->flags
& NF_CT_EXPECT_PERMANENT
|| !unlink
) {
215 refcount_inc(&exp
->use
);
217 } else if (del_timer(&exp
->timeout
)) {
218 nf_ct_unlink_expect(exp
);
221 /* Undo exp->master refcnt increase, if del_timer() failed */
222 nf_ct_put(exp
->master
);
227 /* delete all expectations for this conntrack */
228 void nf_ct_remove_expectations(struct nf_conn
*ct
)
230 struct nf_conn_help
*help
= nfct_help(ct
);
231 struct nf_conntrack_expect
*exp
;
232 struct hlist_node
*next
;
234 /* Optimization: most connection never expect any others. */
238 spin_lock_bh(&nf_conntrack_expect_lock
);
239 hlist_for_each_entry_safe(exp
, next
, &help
->expectations
, lnode
) {
240 nf_ct_remove_expect(exp
);
242 spin_unlock_bh(&nf_conntrack_expect_lock
);
244 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations
);
246 /* Would two expected things clash? */
247 static inline int expect_clash(const struct nf_conntrack_expect
*a
,
248 const struct nf_conntrack_expect
*b
)
250 /* Part covered by intersection of masks must be unequal,
251 otherwise they clash */
252 struct nf_conntrack_tuple_mask intersect_mask
;
255 intersect_mask
.src
.u
.all
= a
->mask
.src
.u
.all
& b
->mask
.src
.u
.all
;
257 for (count
= 0; count
< NF_CT_TUPLE_L3SIZE
; count
++){
258 intersect_mask
.src
.u3
.all
[count
] =
259 a
->mask
.src
.u3
.all
[count
] & b
->mask
.src
.u3
.all
[count
];
262 return nf_ct_tuple_mask_cmp(&a
->tuple
, &b
->tuple
, &intersect_mask
) &&
263 net_eq(nf_ct_net(a
->master
), nf_ct_net(b
->master
)) &&
264 nf_ct_zone_equal_any(a
->master
, nf_ct_zone(b
->master
));
267 static inline int expect_matches(const struct nf_conntrack_expect
*a
,
268 const struct nf_conntrack_expect
*b
)
270 return nf_ct_tuple_equal(&a
->tuple
, &b
->tuple
) &&
271 nf_ct_tuple_mask_equal(&a
->mask
, &b
->mask
) &&
272 net_eq(nf_ct_net(a
->master
), nf_ct_net(b
->master
)) &&
273 nf_ct_zone_equal_any(a
->master
, nf_ct_zone(b
->master
));
276 static bool master_matches(const struct nf_conntrack_expect
*a
,
277 const struct nf_conntrack_expect
*b
,
280 if (flags
& NF_CT_EXP_F_SKIP_MASTER
)
283 return a
->master
== b
->master
;
286 /* Generally a bad idea to call this: could have matched already. */
287 void nf_ct_unexpect_related(struct nf_conntrack_expect
*exp
)
289 spin_lock_bh(&nf_conntrack_expect_lock
);
290 nf_ct_remove_expect(exp
);
291 spin_unlock_bh(&nf_conntrack_expect_lock
);
293 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related
);
295 /* We don't increase the master conntrack refcount for non-fulfilled
296 * conntracks. During the conntrack destruction, the expectations are
297 * always killed before the conntrack itself */
298 struct nf_conntrack_expect
*nf_ct_expect_alloc(struct nf_conn
*me
)
300 struct nf_conntrack_expect
*new;
302 new = kmem_cache_alloc(nf_ct_expect_cachep
, GFP_ATOMIC
);
307 refcount_set(&new->use
, 1);
310 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc
);
312 void nf_ct_expect_init(struct nf_conntrack_expect
*exp
, unsigned int class,
314 const union nf_inet_addr
*saddr
,
315 const union nf_inet_addr
*daddr
,
316 u_int8_t proto
, const __be16
*src
, const __be16
*dst
)
320 if (family
== AF_INET
)
327 exp
->expectfn
= NULL
;
329 exp
->tuple
.src
.l3num
= family
;
330 exp
->tuple
.dst
.protonum
= proto
;
333 memcpy(&exp
->tuple
.src
.u3
, saddr
, len
);
334 if (sizeof(exp
->tuple
.src
.u3
) > len
)
335 /* address needs to be cleared for nf_ct_tuple_equal */
336 memset((void *)&exp
->tuple
.src
.u3
+ len
, 0x00,
337 sizeof(exp
->tuple
.src
.u3
) - len
);
338 memset(&exp
->mask
.src
.u3
, 0xFF, len
);
339 if (sizeof(exp
->mask
.src
.u3
) > len
)
340 memset((void *)&exp
->mask
.src
.u3
+ len
, 0x00,
341 sizeof(exp
->mask
.src
.u3
) - len
);
343 memset(&exp
->tuple
.src
.u3
, 0x00, sizeof(exp
->tuple
.src
.u3
));
344 memset(&exp
->mask
.src
.u3
, 0x00, sizeof(exp
->mask
.src
.u3
));
348 exp
->tuple
.src
.u
.all
= *src
;
349 exp
->mask
.src
.u
.all
= htons(0xFFFF);
351 exp
->tuple
.src
.u
.all
= 0;
352 exp
->mask
.src
.u
.all
= 0;
355 memcpy(&exp
->tuple
.dst
.u3
, daddr
, len
);
356 if (sizeof(exp
->tuple
.dst
.u3
) > len
)
357 /* address needs to be cleared for nf_ct_tuple_equal */
358 memset((void *)&exp
->tuple
.dst
.u3
+ len
, 0x00,
359 sizeof(exp
->tuple
.dst
.u3
) - len
);
361 exp
->tuple
.dst
.u
.all
= *dst
;
363 #if IS_ENABLED(CONFIG_NF_NAT)
364 memset(&exp
->saved_addr
, 0, sizeof(exp
->saved_addr
));
365 memset(&exp
->saved_proto
, 0, sizeof(exp
->saved_proto
));
368 EXPORT_SYMBOL_GPL(nf_ct_expect_init
);
370 static void nf_ct_expect_free_rcu(struct rcu_head
*head
)
372 struct nf_conntrack_expect
*exp
;
374 exp
= container_of(head
, struct nf_conntrack_expect
, rcu
);
375 kmem_cache_free(nf_ct_expect_cachep
, exp
);
378 void nf_ct_expect_put(struct nf_conntrack_expect
*exp
)
380 if (refcount_dec_and_test(&exp
->use
))
381 call_rcu(&exp
->rcu
, nf_ct_expect_free_rcu
);
383 EXPORT_SYMBOL_GPL(nf_ct_expect_put
);
385 static void nf_ct_expect_insert(struct nf_conntrack_expect
*exp
)
387 struct nf_conntrack_net
*cnet
;
388 struct nf_conn_help
*master_help
= nfct_help(exp
->master
);
389 struct nf_conntrack_helper
*helper
;
390 struct net
*net
= nf_ct_exp_net(exp
);
391 unsigned int h
= nf_ct_expect_dst_hash(net
, &exp
->tuple
);
393 /* two references : one for hash insert, one for the timer */
394 refcount_add(2, &exp
->use
);
396 timer_setup(&exp
->timeout
, nf_ct_expectation_timed_out
, 0);
397 helper
= rcu_dereference_protected(master_help
->helper
,
398 lockdep_is_held(&nf_conntrack_expect_lock
));
400 exp
->timeout
.expires
= jiffies
+
401 helper
->expect_policy
[exp
->class].timeout
* HZ
;
403 add_timer(&exp
->timeout
);
405 hlist_add_head_rcu(&exp
->lnode
, &master_help
->expectations
);
406 master_help
->expecting
[exp
->class]++;
408 hlist_add_head_rcu(&exp
->hnode
, &nf_ct_expect_hash
[h
]);
409 cnet
= nf_ct_pernet(net
);
410 cnet
->expect_count
++;
412 NF_CT_STAT_INC(net
, expect_create
);
415 /* Race with expectations being used means we could have none to find; OK. */
416 static void evict_oldest_expect(struct nf_conn
*master
,
417 struct nf_conntrack_expect
*new)
419 struct nf_conn_help
*master_help
= nfct_help(master
);
420 struct nf_conntrack_expect
*exp
, *last
= NULL
;
422 hlist_for_each_entry(exp
, &master_help
->expectations
, lnode
) {
423 if (exp
->class == new->class)
428 nf_ct_remove_expect(last
);
431 static inline int __nf_ct_expect_check(struct nf_conntrack_expect
*expect
,
434 const struct nf_conntrack_expect_policy
*p
;
435 struct nf_conntrack_expect
*i
;
436 struct nf_conntrack_net
*cnet
;
437 struct nf_conn
*master
= expect
->master
;
438 struct nf_conn_help
*master_help
= nfct_help(master
);
439 struct nf_conntrack_helper
*helper
;
440 struct net
*net
= nf_ct_exp_net(expect
);
441 struct hlist_node
*next
;
449 h
= nf_ct_expect_dst_hash(net
, &expect
->tuple
);
450 hlist_for_each_entry_safe(i
, next
, &nf_ct_expect_hash
[h
], hnode
) {
451 if (master_matches(i
, expect
, flags
) &&
452 expect_matches(i
, expect
)) {
453 if (i
->class != expect
->class ||
454 i
->master
!= expect
->master
)
457 if (nf_ct_remove_expect(i
))
459 } else if (expect_clash(i
, expect
)) {
464 /* Will be over limit? */
465 helper
= rcu_dereference_protected(master_help
->helper
,
466 lockdep_is_held(&nf_conntrack_expect_lock
));
468 p
= &helper
->expect_policy
[expect
->class];
469 if (p
->max_expected
&&
470 master_help
->expecting
[expect
->class] >= p
->max_expected
) {
471 evict_oldest_expect(master
, expect
);
472 if (master_help
->expecting
[expect
->class]
473 >= p
->max_expected
) {
480 cnet
= nf_ct_pernet(net
);
481 if (cnet
->expect_count
>= nf_ct_expect_max
) {
482 net_warn_ratelimited("nf_conntrack: expectation table full\n");
489 int nf_ct_expect_related_report(struct nf_conntrack_expect
*expect
,
490 u32 portid
, int report
, unsigned int flags
)
494 spin_lock_bh(&nf_conntrack_expect_lock
);
495 ret
= __nf_ct_expect_check(expect
, flags
);
499 nf_ct_expect_insert(expect
);
501 spin_unlock_bh(&nf_conntrack_expect_lock
);
502 nf_ct_expect_event_report(IPEXP_NEW
, expect
, portid
, report
);
505 spin_unlock_bh(&nf_conntrack_expect_lock
);
508 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report
);
510 void nf_ct_expect_iterate_destroy(bool (*iter
)(struct nf_conntrack_expect
*e
, void *data
),
513 struct nf_conntrack_expect
*exp
;
514 const struct hlist_node
*next
;
517 spin_lock_bh(&nf_conntrack_expect_lock
);
519 for (i
= 0; i
< nf_ct_expect_hsize
; i
++) {
520 hlist_for_each_entry_safe(exp
, next
,
521 &nf_ct_expect_hash
[i
],
523 if (iter(exp
, data
) && del_timer(&exp
->timeout
)) {
524 nf_ct_unlink_expect(exp
);
525 nf_ct_expect_put(exp
);
530 spin_unlock_bh(&nf_conntrack_expect_lock
);
532 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy
);
534 void nf_ct_expect_iterate_net(struct net
*net
,
535 bool (*iter
)(struct nf_conntrack_expect
*e
, void *data
),
537 u32 portid
, int report
)
539 struct nf_conntrack_expect
*exp
;
540 const struct hlist_node
*next
;
543 spin_lock_bh(&nf_conntrack_expect_lock
);
545 for (i
= 0; i
< nf_ct_expect_hsize
; i
++) {
546 hlist_for_each_entry_safe(exp
, next
,
547 &nf_ct_expect_hash
[i
],
550 if (!net_eq(nf_ct_exp_net(exp
), net
))
553 if (iter(exp
, data
) && del_timer(&exp
->timeout
)) {
554 nf_ct_unlink_expect_report(exp
, portid
, report
);
555 nf_ct_expect_put(exp
);
560 spin_unlock_bh(&nf_conntrack_expect_lock
);
562 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net
);
564 #ifdef CONFIG_NF_CONNTRACK_PROCFS
565 struct ct_expect_iter_state
{
566 struct seq_net_private p
;
570 static struct hlist_node
*ct_expect_get_first(struct seq_file
*seq
)
572 struct ct_expect_iter_state
*st
= seq
->private;
573 struct hlist_node
*n
;
575 for (st
->bucket
= 0; st
->bucket
< nf_ct_expect_hsize
; st
->bucket
++) {
576 n
= rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash
[st
->bucket
]));
583 static struct hlist_node
*ct_expect_get_next(struct seq_file
*seq
,
584 struct hlist_node
*head
)
586 struct ct_expect_iter_state
*st
= seq
->private;
588 head
= rcu_dereference(hlist_next_rcu(head
));
589 while (head
== NULL
) {
590 if (++st
->bucket
>= nf_ct_expect_hsize
)
592 head
= rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash
[st
->bucket
]));
597 static struct hlist_node
*ct_expect_get_idx(struct seq_file
*seq
, loff_t pos
)
599 struct hlist_node
*head
= ct_expect_get_first(seq
);
602 while (pos
&& (head
= ct_expect_get_next(seq
, head
)))
604 return pos
? NULL
: head
;
607 static void *exp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
611 return ct_expect_get_idx(seq
, *pos
);
614 static void *exp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
617 return ct_expect_get_next(seq
, v
);
620 static void exp_seq_stop(struct seq_file
*seq
, void *v
)
626 static int exp_seq_show(struct seq_file
*s
, void *v
)
628 struct nf_conntrack_expect
*expect
;
629 struct nf_conntrack_helper
*helper
;
630 struct hlist_node
*n
= v
;
633 expect
= hlist_entry(n
, struct nf_conntrack_expect
, hnode
);
635 if (expect
->timeout
.function
)
636 seq_printf(s
, "%ld ", timer_pending(&expect
->timeout
)
637 ? (long)(expect
->timeout
.expires
- jiffies
)/HZ
: 0);
640 seq_printf(s
, "l3proto = %u proto=%u ",
641 expect
->tuple
.src
.l3num
,
642 expect
->tuple
.dst
.protonum
);
643 print_tuple(s
, &expect
->tuple
,
644 nf_ct_l4proto_find(expect
->tuple
.dst
.protonum
));
646 if (expect
->flags
& NF_CT_EXPECT_PERMANENT
) {
647 seq_puts(s
, "PERMANENT");
650 if (expect
->flags
& NF_CT_EXPECT_INACTIVE
) {
651 seq_printf(s
, "%sINACTIVE", delim
);
654 if (expect
->flags
& NF_CT_EXPECT_USERSPACE
)
655 seq_printf(s
, "%sUSERSPACE", delim
);
657 helper
= rcu_dereference(nfct_help(expect
->master
)->helper
);
659 seq_printf(s
, "%s%s", expect
->flags
? " " : "", helper
->name
);
660 if (helper
->expect_policy
[expect
->class].name
[0])
662 helper
->expect_policy
[expect
->class].name
);
670 static const struct seq_operations exp_seq_ops
= {
671 .start
= exp_seq_start
,
672 .next
= exp_seq_next
,
673 .stop
= exp_seq_stop
,
676 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
678 static int exp_proc_init(struct net
*net
)
680 #ifdef CONFIG_NF_CONNTRACK_PROCFS
681 struct proc_dir_entry
*proc
;
685 proc
= proc_create_net("nf_conntrack_expect", 0440, net
->proc_net
,
686 &exp_seq_ops
, sizeof(struct ct_expect_iter_state
));
690 root_uid
= make_kuid(net
->user_ns
, 0);
691 root_gid
= make_kgid(net
->user_ns
, 0);
692 if (uid_valid(root_uid
) && gid_valid(root_gid
))
693 proc_set_user(proc
, root_uid
, root_gid
);
694 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
698 static void exp_proc_remove(struct net
*net
)
700 #ifdef CONFIG_NF_CONNTRACK_PROCFS
701 remove_proc_entry("nf_conntrack_expect", net
->proc_net
);
702 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
705 module_param_named(expect_hashsize
, nf_ct_expect_hsize
, uint
, 0400);
707 int nf_conntrack_expect_pernet_init(struct net
*net
)
709 return exp_proc_init(net
);
712 void nf_conntrack_expect_pernet_fini(struct net
*net
)
714 exp_proc_remove(net
);
717 int nf_conntrack_expect_init(void)
719 if (!nf_ct_expect_hsize
) {
720 nf_ct_expect_hsize
= nf_conntrack_htable_size
/ 256;
721 if (!nf_ct_expect_hsize
)
722 nf_ct_expect_hsize
= 1;
724 nf_ct_expect_max
= nf_ct_expect_hsize
* 4;
725 nf_ct_expect_cachep
= KMEM_CACHE(nf_conntrack_expect
, 0);
726 if (!nf_ct_expect_cachep
)
729 nf_ct_expect_hash
= nf_ct_alloc_hashtable(&nf_ct_expect_hsize
, 0);
730 if (!nf_ct_expect_hash
) {
731 kmem_cache_destroy(nf_ct_expect_cachep
);
738 void nf_conntrack_expect_fini(void)
740 rcu_barrier(); /* Wait for call_rcu() before destroy */
741 kmem_cache_destroy(nf_ct_expect_cachep
);
742 kvfree(nf_ct_expect_hash
);