fix a kmap leak in virtio_console
[linux/fpc-iii.git] / net / ipv6 / ip6_flowlabel.c
blobdfa41bb4e0dc0a97a9de933716ef9dbbabe5475d
1 /*
2 * ip6_flowlabel.c IPv6 flowlabel manager.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_arp.h>
19 #include <linux/in6.h>
20 #include <linux/route.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 #include <linux/pid_namespace.h>
27 #include <net/net_namespace.h>
28 #include <net/sock.h>
30 #include <net/ipv6.h>
31 #include <net/ndisc.h>
32 #include <net/protocol.h>
33 #include <net/ip6_route.h>
34 #include <net/addrconf.h>
35 #include <net/rawv6.h>
36 #include <net/icmp.h>
37 #include <net/transp_v6.h>
39 #include <asm/uaccess.h>
41 #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
42 in old IPv6 RFC. Well, it was reasonable value.
44 #define FL_MAX_LINGER 150 /* Maximal linger timeout */
46 /* FL hash table */
48 #define FL_MAX_PER_SOCK 32
49 #define FL_MAX_SIZE 4096
50 #define FL_HASH_MASK 255
51 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
53 static atomic_t fl_size = ATOMIC_INIT(0);
54 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
56 static void ip6_fl_gc(unsigned long dummy);
57 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
59 /* FL hash table lock: it protects only of GC */
61 static DEFINE_SPINLOCK(ip6_fl_lock);
63 /* Big socket sock */
65 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
67 #define for_each_fl_rcu(hash, fl) \
68 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
69 fl != NULL; \
70 fl = rcu_dereference_bh(fl->next))
71 #define for_each_fl_continue_rcu(fl) \
72 for (fl = rcu_dereference_bh(fl->next); \
73 fl != NULL; \
74 fl = rcu_dereference_bh(fl->next))
76 #define for_each_sk_fl_rcu(np, sfl) \
77 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
78 sfl != NULL; \
79 sfl = rcu_dereference_bh(sfl->next))
81 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
83 struct ip6_flowlabel *fl;
85 for_each_fl_rcu(FL_HASH(label), fl) {
86 if (fl->label == label && net_eq(fl->fl_net, net))
87 return fl;
89 return NULL;
92 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
94 struct ip6_flowlabel *fl;
96 rcu_read_lock_bh();
97 fl = __fl_lookup(net, label);
98 if (fl && !atomic_inc_not_zero(&fl->users))
99 fl = NULL;
100 rcu_read_unlock_bh();
101 return fl;
105 static void fl_free(struct ip6_flowlabel *fl)
107 if (fl) {
108 if (fl->share == IPV6_FL_S_PROCESS)
109 put_pid(fl->owner.pid);
110 release_net(fl->fl_net);
111 kfree(fl->opt);
112 kfree_rcu(fl, rcu);
116 static void fl_release(struct ip6_flowlabel *fl)
118 spin_lock_bh(&ip6_fl_lock);
120 fl->lastuse = jiffies;
121 if (atomic_dec_and_test(&fl->users)) {
122 unsigned long ttd = fl->lastuse + fl->linger;
123 if (time_after(ttd, fl->expires))
124 fl->expires = ttd;
125 ttd = fl->expires;
126 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
127 struct ipv6_txoptions *opt = fl->opt;
128 fl->opt = NULL;
129 kfree(opt);
131 if (!timer_pending(&ip6_fl_gc_timer) ||
132 time_after(ip6_fl_gc_timer.expires, ttd))
133 mod_timer(&ip6_fl_gc_timer, ttd);
135 spin_unlock_bh(&ip6_fl_lock);
138 static void ip6_fl_gc(unsigned long dummy)
140 int i;
141 unsigned long now = jiffies;
142 unsigned long sched = 0;
144 spin_lock(&ip6_fl_lock);
146 for (i=0; i<=FL_HASH_MASK; i++) {
147 struct ip6_flowlabel *fl;
148 struct ip6_flowlabel __rcu **flp;
150 flp = &fl_ht[i];
151 while ((fl = rcu_dereference_protected(*flp,
152 lockdep_is_held(&ip6_fl_lock))) != NULL) {
153 if (atomic_read(&fl->users) == 0) {
154 unsigned long ttd = fl->lastuse + fl->linger;
155 if (time_after(ttd, fl->expires))
156 fl->expires = ttd;
157 ttd = fl->expires;
158 if (time_after_eq(now, ttd)) {
159 *flp = fl->next;
160 fl_free(fl);
161 atomic_dec(&fl_size);
162 continue;
164 if (!sched || time_before(ttd, sched))
165 sched = ttd;
167 flp = &fl->next;
170 if (!sched && atomic_read(&fl_size))
171 sched = now + FL_MAX_LINGER;
172 if (sched) {
173 mod_timer(&ip6_fl_gc_timer, sched);
175 spin_unlock(&ip6_fl_lock);
178 static void __net_exit ip6_fl_purge(struct net *net)
180 int i;
182 spin_lock(&ip6_fl_lock);
183 for (i = 0; i <= FL_HASH_MASK; i++) {
184 struct ip6_flowlabel *fl;
185 struct ip6_flowlabel __rcu **flp;
187 flp = &fl_ht[i];
188 while ((fl = rcu_dereference_protected(*flp,
189 lockdep_is_held(&ip6_fl_lock))) != NULL) {
190 if (net_eq(fl->fl_net, net) &&
191 atomic_read(&fl->users) == 0) {
192 *flp = fl->next;
193 fl_free(fl);
194 atomic_dec(&fl_size);
195 continue;
197 flp = &fl->next;
200 spin_unlock(&ip6_fl_lock);
203 static struct ip6_flowlabel *fl_intern(struct net *net,
204 struct ip6_flowlabel *fl, __be32 label)
206 struct ip6_flowlabel *lfl;
208 fl->label = label & IPV6_FLOWLABEL_MASK;
210 spin_lock_bh(&ip6_fl_lock);
211 if (label == 0) {
212 for (;;) {
213 fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
214 if (fl->label) {
215 lfl = __fl_lookup(net, fl->label);
216 if (lfl == NULL)
217 break;
220 } else {
222 * we dropper the ip6_fl_lock, so this entry could reappear
223 * and we need to recheck with it.
225 * OTOH no need to search the active socket first, like it is
226 * done in ipv6_flowlabel_opt - sock is locked, so new entry
227 * with the same label can only appear on another sock
229 lfl = __fl_lookup(net, fl->label);
230 if (lfl != NULL) {
231 atomic_inc(&lfl->users);
232 spin_unlock_bh(&ip6_fl_lock);
233 return lfl;
237 fl->lastuse = jiffies;
238 fl->next = fl_ht[FL_HASH(fl->label)];
239 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
240 atomic_inc(&fl_size);
241 spin_unlock_bh(&ip6_fl_lock);
242 return NULL;
247 /* Socket flowlabel lists */
249 struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
251 struct ipv6_fl_socklist *sfl;
252 struct ipv6_pinfo *np = inet6_sk(sk);
254 label &= IPV6_FLOWLABEL_MASK;
256 rcu_read_lock_bh();
257 for_each_sk_fl_rcu(np, sfl) {
258 struct ip6_flowlabel *fl = sfl->fl;
259 if (fl->label == label) {
260 fl->lastuse = jiffies;
261 atomic_inc(&fl->users);
262 rcu_read_unlock_bh();
263 return fl;
266 rcu_read_unlock_bh();
267 return NULL;
270 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
272 void fl6_free_socklist(struct sock *sk)
274 struct ipv6_pinfo *np = inet6_sk(sk);
275 struct ipv6_fl_socklist *sfl;
277 if (!rcu_access_pointer(np->ipv6_fl_list))
278 return;
280 spin_lock_bh(&ip6_sk_fl_lock);
281 while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
282 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
283 np->ipv6_fl_list = sfl->next;
284 spin_unlock_bh(&ip6_sk_fl_lock);
286 fl_release(sfl->fl);
287 kfree_rcu(sfl, rcu);
289 spin_lock_bh(&ip6_sk_fl_lock);
291 spin_unlock_bh(&ip6_sk_fl_lock);
294 /* Service routines */
298 It is the only difficult place. flowlabel enforces equal headers
299 before and including routing header, however user may supply options
300 following rthdr.
303 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
304 struct ip6_flowlabel * fl,
305 struct ipv6_txoptions * fopt)
307 struct ipv6_txoptions * fl_opt = fl->opt;
309 if (fopt == NULL || fopt->opt_flen == 0)
310 return fl_opt;
312 if (fl_opt != NULL) {
313 opt_space->hopopt = fl_opt->hopopt;
314 opt_space->dst0opt = fl_opt->dst0opt;
315 opt_space->srcrt = fl_opt->srcrt;
316 opt_space->opt_nflen = fl_opt->opt_nflen;
317 } else {
318 if (fopt->opt_nflen == 0)
319 return fopt;
320 opt_space->hopopt = NULL;
321 opt_space->dst0opt = NULL;
322 opt_space->srcrt = NULL;
323 opt_space->opt_nflen = 0;
325 opt_space->dst1opt = fopt->dst1opt;
326 opt_space->opt_flen = fopt->opt_flen;
327 return opt_space;
329 EXPORT_SYMBOL_GPL(fl6_merge_options);
331 static unsigned long check_linger(unsigned long ttl)
333 if (ttl < FL_MIN_LINGER)
334 return FL_MIN_LINGER*HZ;
335 if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
336 return 0;
337 return ttl*HZ;
340 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
342 linger = check_linger(linger);
343 if (!linger)
344 return -EPERM;
345 expires = check_linger(expires);
346 if (!expires)
347 return -EPERM;
349 spin_lock_bh(&ip6_fl_lock);
350 fl->lastuse = jiffies;
351 if (time_before(fl->linger, linger))
352 fl->linger = linger;
353 if (time_before(expires, fl->linger))
354 expires = fl->linger;
355 if (time_before(fl->expires, fl->lastuse + expires))
356 fl->expires = fl->lastuse + expires;
357 spin_unlock_bh(&ip6_fl_lock);
359 return 0;
362 static struct ip6_flowlabel *
363 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
364 char __user *optval, int optlen, int *err_p)
366 struct ip6_flowlabel *fl = NULL;
367 int olen;
368 int addr_type;
369 int err;
371 olen = optlen - CMSG_ALIGN(sizeof(*freq));
372 err = -EINVAL;
373 if (olen > 64 * 1024)
374 goto done;
376 err = -ENOMEM;
377 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
378 if (fl == NULL)
379 goto done;
381 if (olen > 0) {
382 struct msghdr msg;
383 struct flowi6 flowi6;
384 int junk;
386 err = -ENOMEM;
387 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
388 if (fl->opt == NULL)
389 goto done;
391 memset(fl->opt, 0, sizeof(*fl->opt));
392 fl->opt->tot_len = sizeof(*fl->opt) + olen;
393 err = -EFAULT;
394 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
395 goto done;
397 msg.msg_controllen = olen;
398 msg.msg_control = (void*)(fl->opt+1);
399 memset(&flowi6, 0, sizeof(flowi6));
401 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
402 &junk, &junk, &junk);
403 if (err)
404 goto done;
405 err = -EINVAL;
406 if (fl->opt->opt_flen)
407 goto done;
408 if (fl->opt->opt_nflen == 0) {
409 kfree(fl->opt);
410 fl->opt = NULL;
414 fl->fl_net = hold_net(net);
415 fl->expires = jiffies;
416 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
417 if (err)
418 goto done;
419 fl->share = freq->flr_share;
420 addr_type = ipv6_addr_type(&freq->flr_dst);
421 if ((addr_type & IPV6_ADDR_MAPPED) ||
422 addr_type == IPV6_ADDR_ANY) {
423 err = -EINVAL;
424 goto done;
426 fl->dst = freq->flr_dst;
427 atomic_set(&fl->users, 1);
428 switch (fl->share) {
429 case IPV6_FL_S_EXCL:
430 case IPV6_FL_S_ANY:
431 break;
432 case IPV6_FL_S_PROCESS:
433 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
434 break;
435 case IPV6_FL_S_USER:
436 fl->owner.uid = current_euid();
437 break;
438 default:
439 err = -EINVAL;
440 goto done;
442 return fl;
444 done:
445 fl_free(fl);
446 *err_p = err;
447 return NULL;
450 static int mem_check(struct sock *sk)
452 struct ipv6_pinfo *np = inet6_sk(sk);
453 struct ipv6_fl_socklist *sfl;
454 int room = FL_MAX_SIZE - atomic_read(&fl_size);
455 int count = 0;
457 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
458 return 0;
460 rcu_read_lock_bh();
461 for_each_sk_fl_rcu(np, sfl)
462 count++;
463 rcu_read_unlock_bh();
465 if (room <= 0 ||
466 ((count >= FL_MAX_PER_SOCK ||
467 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
468 !capable(CAP_NET_ADMIN)))
469 return -ENOBUFS;
471 return 0;
474 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
475 struct ip6_flowlabel *fl)
477 spin_lock_bh(&ip6_sk_fl_lock);
478 sfl->fl = fl;
479 sfl->next = np->ipv6_fl_list;
480 rcu_assign_pointer(np->ipv6_fl_list, sfl);
481 spin_unlock_bh(&ip6_sk_fl_lock);
484 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
485 int flags)
487 struct ipv6_pinfo *np = inet6_sk(sk);
488 struct ipv6_fl_socklist *sfl;
490 if (flags & IPV6_FL_F_REMOTE) {
491 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
492 return 0;
495 if (np->repflow) {
496 freq->flr_label = np->flow_label;
497 return 0;
500 rcu_read_lock_bh();
502 for_each_sk_fl_rcu(np, sfl) {
503 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
504 spin_lock_bh(&ip6_fl_lock);
505 freq->flr_label = sfl->fl->label;
506 freq->flr_dst = sfl->fl->dst;
507 freq->flr_share = sfl->fl->share;
508 freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
509 freq->flr_linger = sfl->fl->linger / HZ;
511 spin_unlock_bh(&ip6_fl_lock);
512 rcu_read_unlock_bh();
513 return 0;
516 rcu_read_unlock_bh();
518 return -ENOENT;
521 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
523 int uninitialized_var(err);
524 struct net *net = sock_net(sk);
525 struct ipv6_pinfo *np = inet6_sk(sk);
526 struct in6_flowlabel_req freq;
527 struct ipv6_fl_socklist *sfl1=NULL;
528 struct ipv6_fl_socklist *sfl;
529 struct ipv6_fl_socklist __rcu **sflp;
530 struct ip6_flowlabel *fl, *fl1 = NULL;
533 if (optlen < sizeof(freq))
534 return -EINVAL;
536 if (copy_from_user(&freq, optval, sizeof(freq)))
537 return -EFAULT;
539 switch (freq.flr_action) {
540 case IPV6_FL_A_PUT:
541 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
542 if (sk->sk_protocol != IPPROTO_TCP)
543 return -ENOPROTOOPT;
544 if (!np->repflow)
545 return -ESRCH;
546 np->flow_label = 0;
547 np->repflow = 0;
548 return 0;
550 spin_lock_bh(&ip6_sk_fl_lock);
551 for (sflp = &np->ipv6_fl_list;
552 (sfl = rcu_dereference(*sflp))!=NULL;
553 sflp = &sfl->next) {
554 if (sfl->fl->label == freq.flr_label) {
555 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
556 np->flow_label &= ~IPV6_FLOWLABEL_MASK;
557 *sflp = rcu_dereference(sfl->next);
558 spin_unlock_bh(&ip6_sk_fl_lock);
559 fl_release(sfl->fl);
560 kfree_rcu(sfl, rcu);
561 return 0;
564 spin_unlock_bh(&ip6_sk_fl_lock);
565 return -ESRCH;
567 case IPV6_FL_A_RENEW:
568 rcu_read_lock_bh();
569 for_each_sk_fl_rcu(np, sfl) {
570 if (sfl->fl->label == freq.flr_label) {
571 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
572 rcu_read_unlock_bh();
573 return err;
576 rcu_read_unlock_bh();
578 if (freq.flr_share == IPV6_FL_S_NONE &&
579 ns_capable(net->user_ns, CAP_NET_ADMIN)) {
580 fl = fl_lookup(net, freq.flr_label);
581 if (fl) {
582 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
583 fl_release(fl);
584 return err;
587 return -ESRCH;
589 case IPV6_FL_A_GET:
590 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
591 struct net *net = sock_net(sk);
592 if (net->ipv6.sysctl.flowlabel_consistency) {
593 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
594 return -EPERM;
597 if (sk->sk_protocol != IPPROTO_TCP)
598 return -ENOPROTOOPT;
600 np->repflow = 1;
601 return 0;
604 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
605 return -EINVAL;
607 fl = fl_create(net, sk, &freq, optval, optlen, &err);
608 if (fl == NULL)
609 return err;
610 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
612 if (freq.flr_label) {
613 err = -EEXIST;
614 rcu_read_lock_bh();
615 for_each_sk_fl_rcu(np, sfl) {
616 if (sfl->fl->label == freq.flr_label) {
617 if (freq.flr_flags&IPV6_FL_F_EXCL) {
618 rcu_read_unlock_bh();
619 goto done;
621 fl1 = sfl->fl;
622 atomic_inc(&fl1->users);
623 break;
626 rcu_read_unlock_bh();
628 if (fl1 == NULL)
629 fl1 = fl_lookup(net, freq.flr_label);
630 if (fl1) {
631 recheck:
632 err = -EEXIST;
633 if (freq.flr_flags&IPV6_FL_F_EXCL)
634 goto release;
635 err = -EPERM;
636 if (fl1->share == IPV6_FL_S_EXCL ||
637 fl1->share != fl->share ||
638 ((fl1->share == IPV6_FL_S_PROCESS) &&
639 (fl1->owner.pid == fl->owner.pid)) ||
640 ((fl1->share == IPV6_FL_S_USER) &&
641 uid_eq(fl1->owner.uid, fl->owner.uid)))
642 goto release;
644 err = -ENOMEM;
645 if (sfl1 == NULL)
646 goto release;
647 if (fl->linger > fl1->linger)
648 fl1->linger = fl->linger;
649 if ((long)(fl->expires - fl1->expires) > 0)
650 fl1->expires = fl->expires;
651 fl_link(np, sfl1, fl1);
652 fl_free(fl);
653 return 0;
655 release:
656 fl_release(fl1);
657 goto done;
660 err = -ENOENT;
661 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
662 goto done;
664 err = -ENOMEM;
665 if (sfl1 == NULL || (err = mem_check(sk)) != 0)
666 goto done;
668 fl1 = fl_intern(net, fl, freq.flr_label);
669 if (fl1 != NULL)
670 goto recheck;
672 if (!freq.flr_label) {
673 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
674 &fl->label, sizeof(fl->label))) {
675 /* Intentionally ignore fault. */
679 fl_link(np, sfl1, fl);
680 return 0;
682 default:
683 return -EINVAL;
686 done:
687 fl_free(fl);
688 kfree(sfl1);
689 return err;
692 #ifdef CONFIG_PROC_FS
694 struct ip6fl_iter_state {
695 struct seq_net_private p;
696 struct pid_namespace *pid_ns;
697 int bucket;
700 #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
702 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
704 struct ip6_flowlabel *fl = NULL;
705 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
706 struct net *net = seq_file_net(seq);
708 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
709 for_each_fl_rcu(state->bucket, fl) {
710 if (net_eq(fl->fl_net, net))
711 goto out;
714 fl = NULL;
715 out:
716 return fl;
719 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
721 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
722 struct net *net = seq_file_net(seq);
724 for_each_fl_continue_rcu(fl) {
725 if (net_eq(fl->fl_net, net))
726 goto out;
729 try_again:
730 if (++state->bucket <= FL_HASH_MASK) {
731 for_each_fl_rcu(state->bucket, fl) {
732 if (net_eq(fl->fl_net, net))
733 goto out;
735 goto try_again;
737 fl = NULL;
739 out:
740 return fl;
743 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
745 struct ip6_flowlabel *fl = ip6fl_get_first(seq);
746 if (fl)
747 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
748 --pos;
749 return pos ? NULL : fl;
752 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
753 __acquires(RCU)
755 rcu_read_lock_bh();
756 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
759 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
761 struct ip6_flowlabel *fl;
763 if (v == SEQ_START_TOKEN)
764 fl = ip6fl_get_first(seq);
765 else
766 fl = ip6fl_get_next(seq, v);
767 ++*pos;
768 return fl;
771 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
772 __releases(RCU)
774 rcu_read_unlock_bh();
777 static int ip6fl_seq_show(struct seq_file *seq, void *v)
779 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
780 if (v == SEQ_START_TOKEN)
781 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
782 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
783 else {
784 struct ip6_flowlabel *fl = v;
785 seq_printf(seq,
786 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
787 (unsigned int)ntohl(fl->label),
788 fl->share,
789 ((fl->share == IPV6_FL_S_PROCESS) ?
790 pid_nr_ns(fl->owner.pid, state->pid_ns) :
791 ((fl->share == IPV6_FL_S_USER) ?
792 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
793 0)),
794 atomic_read(&fl->users),
795 fl->linger/HZ,
796 (long)(fl->expires - jiffies)/HZ,
797 &fl->dst,
798 fl->opt ? fl->opt->opt_nflen : 0);
800 return 0;
803 static const struct seq_operations ip6fl_seq_ops = {
804 .start = ip6fl_seq_start,
805 .next = ip6fl_seq_next,
806 .stop = ip6fl_seq_stop,
807 .show = ip6fl_seq_show,
810 static int ip6fl_seq_open(struct inode *inode, struct file *file)
812 struct seq_file *seq;
813 struct ip6fl_iter_state *state;
814 int err;
816 err = seq_open_net(inode, file, &ip6fl_seq_ops,
817 sizeof(struct ip6fl_iter_state));
819 if (!err) {
820 seq = file->private_data;
821 state = ip6fl_seq_private(seq);
822 rcu_read_lock();
823 state->pid_ns = get_pid_ns(task_active_pid_ns(current));
824 rcu_read_unlock();
826 return err;
829 static int ip6fl_seq_release(struct inode *inode, struct file *file)
831 struct seq_file *seq = file->private_data;
832 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
833 put_pid_ns(state->pid_ns);
834 return seq_release_net(inode, file);
837 static const struct file_operations ip6fl_seq_fops = {
838 .owner = THIS_MODULE,
839 .open = ip6fl_seq_open,
840 .read = seq_read,
841 .llseek = seq_lseek,
842 .release = ip6fl_seq_release,
845 static int __net_init ip6_flowlabel_proc_init(struct net *net)
847 if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
848 &ip6fl_seq_fops))
849 return -ENOMEM;
850 return 0;
853 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
855 remove_proc_entry("ip6_flowlabel", net->proc_net);
857 #else
858 static inline int ip6_flowlabel_proc_init(struct net *net)
860 return 0;
862 static inline void ip6_flowlabel_proc_fini(struct net *net)
865 #endif
867 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
869 ip6_fl_purge(net);
870 ip6_flowlabel_proc_fini(net);
873 static struct pernet_operations ip6_flowlabel_net_ops = {
874 .init = ip6_flowlabel_proc_init,
875 .exit = ip6_flowlabel_net_exit,
878 int ip6_flowlabel_init(void)
880 return register_pernet_subsys(&ip6_flowlabel_net_ops);
883 void ip6_flowlabel_cleanup(void)
885 del_timer(&ip6_fl_gc_timer);
886 unregister_pernet_subsys(&ip6_flowlabel_net_ops);