close_port, kpacket_gen kmalloc oom, formard.c wake_sender/skb receive oom handling...
[cor_2_6_31.git] / net / ipv6 / ip6_flowlabel.c
blob7712578bdc66c7f177b32fe8baf0bfac8b9d7d85
1 /*
2 * ip6_flowlabel.c IPv6 flowlabel manager.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_arp.h>
19 #include <linux/in6.h>
20 #include <linux/route.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
27 #include <net/ipv6.h>
28 #include <net/ndisc.h>
29 #include <net/protocol.h>
30 #include <net/ip6_route.h>
31 #include <net/addrconf.h>
32 #include <net/rawv6.h>
33 #include <net/icmp.h>
34 #include <net/transp_v6.h>
36 #include <asm/uaccess.h>
38 #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
39 in old IPv6 RFC. Well, it was reasonable value.
41 #define FL_MAX_LINGER 60 /* Maximal linger timeout */
43 /* FL hash table */
45 #define FL_MAX_PER_SOCK 32
46 #define FL_MAX_SIZE 4096
47 #define FL_HASH_MASK 255
48 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
50 static atomic_t fl_size = ATOMIC_INIT(0);
51 static struct ip6_flowlabel *fl_ht[FL_HASH_MASK+1];
53 static void ip6_fl_gc(unsigned long dummy);
54 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
56 /* FL hash table lock: it protects only of GC */
58 static DEFINE_RWLOCK(ip6_fl_lock);
60 /* Big socket sock */
62 static DEFINE_RWLOCK(ip6_sk_fl_lock);
65 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
67 struct ip6_flowlabel *fl;
69 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
70 if (fl->label == label && fl->fl_net == net)
71 return fl;
73 return NULL;
76 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
78 struct ip6_flowlabel *fl;
80 read_lock_bh(&ip6_fl_lock);
81 fl = __fl_lookup(net, label);
82 if (fl)
83 atomic_inc(&fl->users);
84 read_unlock_bh(&ip6_fl_lock);
85 return fl;
89 static void fl_free(struct ip6_flowlabel *fl)
91 if (fl) {
92 release_net(fl->fl_net);
93 kfree(fl->opt);
95 kfree(fl);
98 static void fl_release(struct ip6_flowlabel *fl)
100 write_lock_bh(&ip6_fl_lock);
102 fl->lastuse = jiffies;
103 if (atomic_dec_and_test(&fl->users)) {
104 unsigned long ttd = fl->lastuse + fl->linger;
105 if (time_after(ttd, fl->expires))
106 fl->expires = ttd;
107 ttd = fl->expires;
108 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
109 struct ipv6_txoptions *opt = fl->opt;
110 fl->opt = NULL;
111 kfree(opt);
113 if (!timer_pending(&ip6_fl_gc_timer) ||
114 time_after(ip6_fl_gc_timer.expires, ttd))
115 mod_timer(&ip6_fl_gc_timer, ttd);
117 write_unlock_bh(&ip6_fl_lock);
120 static void ip6_fl_gc(unsigned long dummy)
122 int i;
123 unsigned long now = jiffies;
124 unsigned long sched = 0;
126 write_lock(&ip6_fl_lock);
128 for (i=0; i<=FL_HASH_MASK; i++) {
129 struct ip6_flowlabel *fl, **flp;
130 flp = &fl_ht[i];
131 while ((fl=*flp) != NULL) {
132 if (atomic_read(&fl->users) == 0) {
133 unsigned long ttd = fl->lastuse + fl->linger;
134 if (time_after(ttd, fl->expires))
135 fl->expires = ttd;
136 ttd = fl->expires;
137 if (time_after_eq(now, ttd)) {
138 *flp = fl->next;
139 fl_free(fl);
140 atomic_dec(&fl_size);
141 continue;
143 if (!sched || time_before(ttd, sched))
144 sched = ttd;
146 flp = &fl->next;
149 if (!sched && atomic_read(&fl_size))
150 sched = now + FL_MAX_LINGER;
151 if (sched) {
152 mod_timer(&ip6_fl_gc_timer, sched);
154 write_unlock(&ip6_fl_lock);
157 static void ip6_fl_purge(struct net *net)
159 int i;
161 write_lock(&ip6_fl_lock);
162 for (i = 0; i <= FL_HASH_MASK; i++) {
163 struct ip6_flowlabel *fl, **flp;
164 flp = &fl_ht[i];
165 while ((fl = *flp) != NULL) {
166 if (fl->fl_net == net && atomic_read(&fl->users) == 0) {
167 *flp = fl->next;
168 fl_free(fl);
169 atomic_dec(&fl_size);
170 continue;
172 flp = &fl->next;
175 write_unlock(&ip6_fl_lock);
178 static struct ip6_flowlabel *fl_intern(struct net *net,
179 struct ip6_flowlabel *fl, __be32 label)
181 struct ip6_flowlabel *lfl;
183 fl->label = label & IPV6_FLOWLABEL_MASK;
185 write_lock_bh(&ip6_fl_lock);
186 if (label == 0) {
187 for (;;) {
188 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
189 if (fl->label) {
190 lfl = __fl_lookup(net, fl->label);
191 if (lfl == NULL)
192 break;
195 } else {
197 * we dropper the ip6_fl_lock, so this entry could reappear
198 * and we need to recheck with it.
200 * OTOH no need to search the active socket first, like it is
201 * done in ipv6_flowlabel_opt - sock is locked, so new entry
202 * with the same label can only appear on another sock
204 lfl = __fl_lookup(net, fl->label);
205 if (lfl != NULL) {
206 atomic_inc(&lfl->users);
207 write_unlock_bh(&ip6_fl_lock);
208 return lfl;
212 fl->lastuse = jiffies;
213 fl->next = fl_ht[FL_HASH(fl->label)];
214 fl_ht[FL_HASH(fl->label)] = fl;
215 atomic_inc(&fl_size);
216 write_unlock_bh(&ip6_fl_lock);
217 return NULL;
222 /* Socket flowlabel lists */
224 struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
226 struct ipv6_fl_socklist *sfl;
227 struct ipv6_pinfo *np = inet6_sk(sk);
229 label &= IPV6_FLOWLABEL_MASK;
231 read_lock_bh(&ip6_sk_fl_lock);
232 for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) {
233 struct ip6_flowlabel *fl = sfl->fl;
234 if (fl->label == label) {
235 fl->lastuse = jiffies;
236 atomic_inc(&fl->users);
237 read_unlock_bh(&ip6_sk_fl_lock);
238 return fl;
241 read_unlock_bh(&ip6_sk_fl_lock);
242 return NULL;
245 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
247 void fl6_free_socklist(struct sock *sk)
249 struct ipv6_pinfo *np = inet6_sk(sk);
250 struct ipv6_fl_socklist *sfl;
252 while ((sfl = np->ipv6_fl_list) != NULL) {
253 np->ipv6_fl_list = sfl->next;
254 fl_release(sfl->fl);
255 kfree(sfl);
259 /* Service routines */
263 It is the only difficult place. flowlabel enforces equal headers
264 before and including routing header, however user may supply options
265 following rthdr.
268 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
269 struct ip6_flowlabel * fl,
270 struct ipv6_txoptions * fopt)
272 struct ipv6_txoptions * fl_opt = fl->opt;
274 if (fopt == NULL || fopt->opt_flen == 0)
275 return fl_opt;
277 if (fl_opt != NULL) {
278 opt_space->hopopt = fl_opt->hopopt;
279 opt_space->dst0opt = fl_opt->dst0opt;
280 opt_space->srcrt = fl_opt->srcrt;
281 opt_space->opt_nflen = fl_opt->opt_nflen;
282 } else {
283 if (fopt->opt_nflen == 0)
284 return fopt;
285 opt_space->hopopt = NULL;
286 opt_space->dst0opt = NULL;
287 opt_space->srcrt = NULL;
288 opt_space->opt_nflen = 0;
290 opt_space->dst1opt = fopt->dst1opt;
291 opt_space->opt_flen = fopt->opt_flen;
292 return opt_space;
295 static unsigned long check_linger(unsigned long ttl)
297 if (ttl < FL_MIN_LINGER)
298 return FL_MIN_LINGER*HZ;
299 if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
300 return 0;
301 return ttl*HZ;
304 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
306 linger = check_linger(linger);
307 if (!linger)
308 return -EPERM;
309 expires = check_linger(expires);
310 if (!expires)
311 return -EPERM;
312 fl->lastuse = jiffies;
313 if (time_before(fl->linger, linger))
314 fl->linger = linger;
315 if (time_before(expires, fl->linger))
316 expires = fl->linger;
317 if (time_before(fl->expires, fl->lastuse + expires))
318 fl->expires = fl->lastuse + expires;
319 return 0;
322 static struct ip6_flowlabel *
323 fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
324 int optlen, int *err_p)
326 struct ip6_flowlabel *fl = NULL;
327 int olen;
328 int addr_type;
329 int err;
331 olen = optlen - CMSG_ALIGN(sizeof(*freq));
332 err = -EINVAL;
333 if (olen > 64 * 1024)
334 goto done;
336 err = -ENOMEM;
337 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
338 if (fl == NULL)
339 goto done;
341 if (olen > 0) {
342 struct msghdr msg;
343 struct flowi flowi;
344 int junk;
346 err = -ENOMEM;
347 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
348 if (fl->opt == NULL)
349 goto done;
351 memset(fl->opt, 0, sizeof(*fl->opt));
352 fl->opt->tot_len = sizeof(*fl->opt) + olen;
353 err = -EFAULT;
354 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
355 goto done;
357 msg.msg_controllen = olen;
358 msg.msg_control = (void*)(fl->opt+1);
359 flowi.oif = 0;
361 err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk);
362 if (err)
363 goto done;
364 err = -EINVAL;
365 if (fl->opt->opt_flen)
366 goto done;
367 if (fl->opt->opt_nflen == 0) {
368 kfree(fl->opt);
369 fl->opt = NULL;
373 fl->fl_net = hold_net(net);
374 fl->expires = jiffies;
375 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
376 if (err)
377 goto done;
378 fl->share = freq->flr_share;
379 addr_type = ipv6_addr_type(&freq->flr_dst);
380 if ((addr_type&IPV6_ADDR_MAPPED)
381 || addr_type == IPV6_ADDR_ANY) {
382 err = -EINVAL;
383 goto done;
385 ipv6_addr_copy(&fl->dst, &freq->flr_dst);
386 atomic_set(&fl->users, 1);
387 switch (fl->share) {
388 case IPV6_FL_S_EXCL:
389 case IPV6_FL_S_ANY:
390 break;
391 case IPV6_FL_S_PROCESS:
392 fl->owner = current->pid;
393 break;
394 case IPV6_FL_S_USER:
395 fl->owner = current_euid();
396 break;
397 default:
398 err = -EINVAL;
399 goto done;
401 return fl;
403 done:
404 fl_free(fl);
405 *err_p = err;
406 return NULL;
409 static int mem_check(struct sock *sk)
411 struct ipv6_pinfo *np = inet6_sk(sk);
412 struct ipv6_fl_socklist *sfl;
413 int room = FL_MAX_SIZE - atomic_read(&fl_size);
414 int count = 0;
416 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
417 return 0;
419 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next)
420 count++;
422 if (room <= 0 ||
423 ((count >= FL_MAX_PER_SOCK ||
424 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4)
425 && !capable(CAP_NET_ADMIN)))
426 return -ENOBUFS;
428 return 0;
431 static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
433 if (h1 == h2)
434 return 0;
435 if (h1 == NULL || h2 == NULL)
436 return 1;
437 if (h1->hdrlen != h2->hdrlen)
438 return 1;
439 return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
442 static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
444 if (o1 == o2)
445 return 0;
446 if (o1 == NULL || o2 == NULL)
447 return 1;
448 if (o1->opt_nflen != o2->opt_nflen)
449 return 1;
450 if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
451 return 1;
452 if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
453 return 1;
454 if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
455 return 1;
456 return 0;
459 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
460 struct ip6_flowlabel *fl)
462 write_lock_bh(&ip6_sk_fl_lock);
463 sfl->fl = fl;
464 sfl->next = np->ipv6_fl_list;
465 np->ipv6_fl_list = sfl;
466 write_unlock_bh(&ip6_sk_fl_lock);
469 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
471 int uninitialized_var(err);
472 struct net *net = sock_net(sk);
473 struct ipv6_pinfo *np = inet6_sk(sk);
474 struct in6_flowlabel_req freq;
475 struct ipv6_fl_socklist *sfl1=NULL;
476 struct ipv6_fl_socklist *sfl, **sflp;
477 struct ip6_flowlabel *fl, *fl1 = NULL;
480 if (optlen < sizeof(freq))
481 return -EINVAL;
483 if (copy_from_user(&freq, optval, sizeof(freq)))
484 return -EFAULT;
486 switch (freq.flr_action) {
487 case IPV6_FL_A_PUT:
488 write_lock_bh(&ip6_sk_fl_lock);
489 for (sflp = &np->ipv6_fl_list; (sfl=*sflp)!=NULL; sflp = &sfl->next) {
490 if (sfl->fl->label == freq.flr_label) {
491 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
492 np->flow_label &= ~IPV6_FLOWLABEL_MASK;
493 *sflp = sfl->next;
494 write_unlock_bh(&ip6_sk_fl_lock);
495 fl_release(sfl->fl);
496 kfree(sfl);
497 return 0;
500 write_unlock_bh(&ip6_sk_fl_lock);
501 return -ESRCH;
503 case IPV6_FL_A_RENEW:
504 read_lock_bh(&ip6_sk_fl_lock);
505 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
506 if (sfl->fl->label == freq.flr_label) {
507 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
508 read_unlock_bh(&ip6_sk_fl_lock);
509 return err;
512 read_unlock_bh(&ip6_sk_fl_lock);
514 if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) {
515 fl = fl_lookup(net, freq.flr_label);
516 if (fl) {
517 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
518 fl_release(fl);
519 return err;
522 return -ESRCH;
524 case IPV6_FL_A_GET:
525 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
526 return -EINVAL;
528 fl = fl_create(net, &freq, optval, optlen, &err);
529 if (fl == NULL)
530 return err;
531 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
533 if (freq.flr_label) {
534 err = -EEXIST;
535 read_lock_bh(&ip6_sk_fl_lock);
536 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
537 if (sfl->fl->label == freq.flr_label) {
538 if (freq.flr_flags&IPV6_FL_F_EXCL) {
539 read_unlock_bh(&ip6_sk_fl_lock);
540 goto done;
542 fl1 = sfl->fl;
543 atomic_inc(&fl1->users);
544 break;
547 read_unlock_bh(&ip6_sk_fl_lock);
549 if (fl1 == NULL)
550 fl1 = fl_lookup(net, freq.flr_label);
551 if (fl1) {
552 recheck:
553 err = -EEXIST;
554 if (freq.flr_flags&IPV6_FL_F_EXCL)
555 goto release;
556 err = -EPERM;
557 if (fl1->share == IPV6_FL_S_EXCL ||
558 fl1->share != fl->share ||
559 fl1->owner != fl->owner)
560 goto release;
562 err = -EINVAL;
563 if (!ipv6_addr_equal(&fl1->dst, &fl->dst) ||
564 ipv6_opt_cmp(fl1->opt, fl->opt))
565 goto release;
567 err = -ENOMEM;
568 if (sfl1 == NULL)
569 goto release;
570 if (fl->linger > fl1->linger)
571 fl1->linger = fl->linger;
572 if ((long)(fl->expires - fl1->expires) > 0)
573 fl1->expires = fl->expires;
574 fl_link(np, sfl1, fl1);
575 fl_free(fl);
576 return 0;
578 release:
579 fl_release(fl1);
580 goto done;
583 err = -ENOENT;
584 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
585 goto done;
587 err = -ENOMEM;
588 if (sfl1 == NULL || (err = mem_check(sk)) != 0)
589 goto done;
591 fl1 = fl_intern(net, fl, freq.flr_label);
592 if (fl1 != NULL)
593 goto recheck;
595 if (!freq.flr_label) {
596 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
597 &fl->label, sizeof(fl->label))) {
598 /* Intentionally ignore fault. */
602 fl_link(np, sfl1, fl);
603 return 0;
605 default:
606 return -EINVAL;
609 done:
610 fl_free(fl);
611 kfree(sfl1);
612 return err;
615 #ifdef CONFIG_PROC_FS
617 struct ip6fl_iter_state {
618 struct seq_net_private p;
619 int bucket;
622 #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
624 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
626 struct ip6_flowlabel *fl = NULL;
627 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
628 struct net *net = seq_file_net(seq);
630 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
631 fl = fl_ht[state->bucket];
633 while (fl && fl->fl_net != net)
634 fl = fl->next;
635 if (fl)
636 break;
638 return fl;
641 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
643 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
644 struct net *net = seq_file_net(seq);
646 fl = fl->next;
647 try_again:
648 while (fl && fl->fl_net != net)
649 fl = fl->next;
651 while (!fl) {
652 if (++state->bucket <= FL_HASH_MASK) {
653 fl = fl_ht[state->bucket];
654 goto try_again;
655 } else
656 break;
658 return fl;
661 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
663 struct ip6_flowlabel *fl = ip6fl_get_first(seq);
664 if (fl)
665 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
666 --pos;
667 return pos ? NULL : fl;
670 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
671 __acquires(ip6_fl_lock)
673 read_lock_bh(&ip6_fl_lock);
674 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
677 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
679 struct ip6_flowlabel *fl;
681 if (v == SEQ_START_TOKEN)
682 fl = ip6fl_get_first(seq);
683 else
684 fl = ip6fl_get_next(seq, v);
685 ++*pos;
686 return fl;
689 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
690 __releases(ip6_fl_lock)
692 read_unlock_bh(&ip6_fl_lock);
695 static int ip6fl_seq_show(struct seq_file *seq, void *v)
697 if (v == SEQ_START_TOKEN)
698 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
699 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
700 else {
701 struct ip6_flowlabel *fl = v;
702 seq_printf(seq,
703 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
704 (unsigned)ntohl(fl->label),
705 fl->share,
706 (unsigned)fl->owner,
707 atomic_read(&fl->users),
708 fl->linger/HZ,
709 (long)(fl->expires - jiffies)/HZ,
710 &fl->dst,
711 fl->opt ? fl->opt->opt_nflen : 0);
713 return 0;
716 static const struct seq_operations ip6fl_seq_ops = {
717 .start = ip6fl_seq_start,
718 .next = ip6fl_seq_next,
719 .stop = ip6fl_seq_stop,
720 .show = ip6fl_seq_show,
723 static int ip6fl_seq_open(struct inode *inode, struct file *file)
725 return seq_open_net(inode, file, &ip6fl_seq_ops,
726 sizeof(struct ip6fl_iter_state));
729 static const struct file_operations ip6fl_seq_fops = {
730 .owner = THIS_MODULE,
731 .open = ip6fl_seq_open,
732 .read = seq_read,
733 .llseek = seq_lseek,
734 .release = seq_release_net,
737 static int ip6_flowlabel_proc_init(struct net *net)
739 if (!proc_net_fops_create(net, "ip6_flowlabel",
740 S_IRUGO, &ip6fl_seq_fops))
741 return -ENOMEM;
742 return 0;
745 static void ip6_flowlabel_proc_fini(struct net *net)
747 proc_net_remove(net, "ip6_flowlabel");
749 #else
750 static inline int ip6_flowlabel_proc_init(struct net *net)
752 return 0;
754 static inline void ip6_flowlabel_proc_fini(struct net *net)
756 return ;
758 #endif
760 static inline void ip6_flowlabel_net_exit(struct net *net)
762 ip6_fl_purge(net);
763 ip6_flowlabel_proc_fini(net);
766 static struct pernet_operations ip6_flowlabel_net_ops = {
767 .init = ip6_flowlabel_proc_init,
768 .exit = ip6_flowlabel_net_exit,
771 int ip6_flowlabel_init(void)
773 return register_pernet_subsys(&ip6_flowlabel_net_ops);
776 void ip6_flowlabel_cleanup(void)
778 del_timer(&ip6_fl_gc_timer);
779 unregister_pernet_subsys(&ip6_flowlabel_net_ops);