[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / net / netlink / af_netlink.c
blob733bf52cef3e72c663a9f4bc7f90cecab421c706
1 /*
2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
19 #include <linux/config.h>
20 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/errno.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/un.h>
31 #include <linux/fcntl.h>
32 #include <linux/termios.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/fs.h>
36 #include <linux/slab.h>
37 #include <asm/uaccess.h>
38 #include <linux/skbuff.h>
39 #include <linux/netdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/smp_lock.h>
44 #include <linux/notifier.h>
45 #include <linux/security.h>
46 #include <linux/jhash.h>
47 #include <linux/jiffies.h>
48 #include <linux/random.h>
49 #include <linux/bitops.h>
50 #include <linux/mm.h>
51 #include <linux/types.h>
52 #include <linux/audit.h>
54 #include <net/sock.h>
55 #include <net/scm.h>
57 #define Nprintk(a...)
59 struct netlink_sock {
60 /* struct sock has to be the first member of netlink_sock */
61 struct sock sk;
62 u32 pid;
63 unsigned int groups;
64 u32 dst_pid;
65 unsigned int dst_groups;
66 unsigned long state;
67 wait_queue_head_t wait;
68 struct netlink_callback *cb;
69 spinlock_t cb_lock;
70 void (*data_ready)(struct sock *sk, int bytes);
73 static inline struct netlink_sock *nlk_sk(struct sock *sk)
75 return (struct netlink_sock *)sk;
78 struct nl_pid_hash {
79 struct hlist_head *table;
80 unsigned long rehash_time;
82 unsigned int mask;
83 unsigned int shift;
85 unsigned int entries;
86 unsigned int max_shift;
88 u32 rnd;
91 struct netlink_table {
92 struct nl_pid_hash hash;
93 struct hlist_head mc_list;
94 unsigned int nl_nonroot;
97 static struct netlink_table *nl_table;
99 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
101 static int netlink_dump(struct sock *sk);
102 static void netlink_destroy_callback(struct netlink_callback *cb);
104 static DEFINE_RWLOCK(nl_table_lock);
105 static atomic_t nl_table_users = ATOMIC_INIT(0);
107 static struct notifier_block *netlink_chain;
109 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
111 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
114 static void netlink_sock_destruct(struct sock *sk)
116 skb_queue_purge(&sk->sk_receive_queue);
118 if (!sock_flag(sk, SOCK_DEAD)) {
119 printk("Freeing alive netlink socket %p\n", sk);
120 return;
122 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
123 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
124 BUG_TRAP(!nlk_sk(sk)->cb);
127 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
128 * Look, when several writers sleep and reader wakes them up, all but one
129 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
130 * this, _but_ remember, it adds useless work on UP machines.
133 static void netlink_table_grab(void)
135 write_lock_bh(&nl_table_lock);
137 if (atomic_read(&nl_table_users)) {
138 DECLARE_WAITQUEUE(wait, current);
140 add_wait_queue_exclusive(&nl_table_wait, &wait);
141 for(;;) {
142 set_current_state(TASK_UNINTERRUPTIBLE);
143 if (atomic_read(&nl_table_users) == 0)
144 break;
145 write_unlock_bh(&nl_table_lock);
146 schedule();
147 write_lock_bh(&nl_table_lock);
150 __set_current_state(TASK_RUNNING);
151 remove_wait_queue(&nl_table_wait, &wait);
155 static __inline__ void netlink_table_ungrab(void)
157 write_unlock_bh(&nl_table_lock);
158 wake_up(&nl_table_wait);
161 static __inline__ void
162 netlink_lock_table(void)
164 /* read_lock() synchronizes us to netlink_table_grab */
166 read_lock(&nl_table_lock);
167 atomic_inc(&nl_table_users);
168 read_unlock(&nl_table_lock);
171 static __inline__ void
172 netlink_unlock_table(void)
174 if (atomic_dec_and_test(&nl_table_users))
175 wake_up(&nl_table_wait);
178 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
180 struct nl_pid_hash *hash = &nl_table[protocol].hash;
181 struct hlist_head *head;
182 struct sock *sk;
183 struct hlist_node *node;
185 read_lock(&nl_table_lock);
186 head = nl_pid_hashfn(hash, pid);
187 sk_for_each(sk, node, head) {
188 if (nlk_sk(sk)->pid == pid) {
189 sock_hold(sk);
190 goto found;
193 sk = NULL;
194 found:
195 read_unlock(&nl_table_lock);
196 return sk;
199 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
201 if (size <= PAGE_SIZE)
202 return kmalloc(size, GFP_ATOMIC);
203 else
204 return (struct hlist_head *)
205 __get_free_pages(GFP_ATOMIC, get_order(size));
208 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
210 if (size <= PAGE_SIZE)
211 kfree(table);
212 else
213 free_pages((unsigned long)table, get_order(size));
216 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
218 unsigned int omask, mask, shift;
219 size_t osize, size;
220 struct hlist_head *otable, *table;
221 int i;
223 omask = mask = hash->mask;
224 osize = size = (mask + 1) * sizeof(*table);
225 shift = hash->shift;
227 if (grow) {
228 if (++shift > hash->max_shift)
229 return 0;
230 mask = mask * 2 + 1;
231 size *= 2;
234 table = nl_pid_hash_alloc(size);
235 if (!table)
236 return 0;
238 memset(table, 0, size);
239 otable = hash->table;
240 hash->table = table;
241 hash->mask = mask;
242 hash->shift = shift;
243 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
245 for (i = 0; i <= omask; i++) {
246 struct sock *sk;
247 struct hlist_node *node, *tmp;
249 sk_for_each_safe(sk, node, tmp, &otable[i])
250 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
253 nl_pid_hash_free(otable, osize);
254 hash->rehash_time = jiffies + 10 * 60 * HZ;
255 return 1;
258 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
260 int avg = hash->entries >> hash->shift;
262 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
263 return 1;
265 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
266 nl_pid_hash_rehash(hash, 0);
267 return 1;
270 return 0;
273 static struct proto_ops netlink_ops;
275 static int netlink_insert(struct sock *sk, u32 pid)
277 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
278 struct hlist_head *head;
279 int err = -EADDRINUSE;
280 struct sock *osk;
281 struct hlist_node *node;
282 int len;
284 netlink_table_grab();
285 head = nl_pid_hashfn(hash, pid);
286 len = 0;
287 sk_for_each(osk, node, head) {
288 if (nlk_sk(osk)->pid == pid)
289 break;
290 len++;
292 if (node)
293 goto err;
295 err = -EBUSY;
296 if (nlk_sk(sk)->pid)
297 goto err;
299 err = -ENOMEM;
300 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
301 goto err;
303 if (len && nl_pid_hash_dilute(hash, len))
304 head = nl_pid_hashfn(hash, pid);
305 hash->entries++;
306 nlk_sk(sk)->pid = pid;
307 sk_add_node(sk, head);
308 err = 0;
310 err:
311 netlink_table_ungrab();
312 return err;
315 static void netlink_remove(struct sock *sk)
317 netlink_table_grab();
318 nl_table[sk->sk_protocol].hash.entries--;
319 sk_del_node_init(sk);
320 if (nlk_sk(sk)->groups)
321 __sk_del_bind_node(sk);
322 netlink_table_ungrab();
325 static struct proto netlink_proto = {
326 .name = "NETLINK",
327 .owner = THIS_MODULE,
328 .obj_size = sizeof(struct netlink_sock),
331 static int netlink_create(struct socket *sock, int protocol)
333 struct sock *sk;
334 struct netlink_sock *nlk;
336 sock->state = SS_UNCONNECTED;
338 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
339 return -ESOCKTNOSUPPORT;
341 if (protocol<0 || protocol >= MAX_LINKS)
342 return -EPROTONOSUPPORT;
344 sock->ops = &netlink_ops;
346 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
347 if (!sk)
348 return -ENOMEM;
350 sock_init_data(sock, sk);
352 nlk = nlk_sk(sk);
354 spin_lock_init(&nlk->cb_lock);
355 init_waitqueue_head(&nlk->wait);
356 sk->sk_destruct = netlink_sock_destruct;
358 sk->sk_protocol = protocol;
359 return 0;
362 static int netlink_release(struct socket *sock)
364 struct sock *sk = sock->sk;
365 struct netlink_sock *nlk;
367 if (!sk)
368 return 0;
370 netlink_remove(sk);
371 nlk = nlk_sk(sk);
373 spin_lock(&nlk->cb_lock);
374 if (nlk->cb) {
375 nlk->cb->done(nlk->cb);
376 netlink_destroy_callback(nlk->cb);
377 nlk->cb = NULL;
379 spin_unlock(&nlk->cb_lock);
381 /* OK. Socket is unlinked, and, therefore,
382 no new packets will arrive */
384 sock_orphan(sk);
385 sock->sk = NULL;
386 wake_up_interruptible_all(&nlk->wait);
388 skb_queue_purge(&sk->sk_write_queue);
390 if (nlk->pid && !nlk->groups) {
391 struct netlink_notify n = {
392 .protocol = sk->sk_protocol,
393 .pid = nlk->pid,
395 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
398 sock_put(sk);
399 return 0;
402 static int netlink_autobind(struct socket *sock)
404 struct sock *sk = sock->sk;
405 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
406 struct hlist_head *head;
407 struct sock *osk;
408 struct hlist_node *node;
409 s32 pid = current->pid;
410 int err;
411 static s32 rover = -4097;
413 retry:
414 cond_resched();
415 netlink_table_grab();
416 head = nl_pid_hashfn(hash, pid);
417 sk_for_each(osk, node, head) {
418 if (nlk_sk(osk)->pid == pid) {
419 /* Bind collision, search negative pid values. */
420 pid = rover--;
421 if (rover > -4097)
422 rover = -4097;
423 netlink_table_ungrab();
424 goto retry;
427 netlink_table_ungrab();
429 err = netlink_insert(sk, pid);
430 if (err == -EADDRINUSE)
431 goto retry;
432 return 0;
435 static inline int netlink_capable(struct socket *sock, unsigned int flag)
437 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
438 capable(CAP_NET_ADMIN);
441 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
443 struct sock *sk = sock->sk;
444 struct netlink_sock *nlk = nlk_sk(sk);
445 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
446 int err;
448 if (nladdr->nl_family != AF_NETLINK)
449 return -EINVAL;
451 /* Only superuser is allowed to listen multicasts */
452 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
453 return -EPERM;
455 if (nlk->pid) {
456 if (nladdr->nl_pid != nlk->pid)
457 return -EINVAL;
458 } else {
459 err = nladdr->nl_pid ?
460 netlink_insert(sk, nladdr->nl_pid) :
461 netlink_autobind(sock);
462 if (err)
463 return err;
466 if (!nladdr->nl_groups && !nlk->groups)
467 return 0;
469 netlink_table_grab();
470 if (nlk->groups && !nladdr->nl_groups)
471 __sk_del_bind_node(sk);
472 else if (!nlk->groups && nladdr->nl_groups)
473 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
474 nlk->groups = nladdr->nl_groups;
475 netlink_table_ungrab();
477 return 0;
480 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
481 int alen, int flags)
483 int err = 0;
484 struct sock *sk = sock->sk;
485 struct netlink_sock *nlk = nlk_sk(sk);
486 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
488 if (addr->sa_family == AF_UNSPEC) {
489 sk->sk_state = NETLINK_UNCONNECTED;
490 nlk->dst_pid = 0;
491 nlk->dst_groups = 0;
492 return 0;
494 if (addr->sa_family != AF_NETLINK)
495 return -EINVAL;
497 /* Only superuser is allowed to send multicasts */
498 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
499 return -EPERM;
501 if (!nlk->pid)
502 err = netlink_autobind(sock);
504 if (err == 0) {
505 sk->sk_state = NETLINK_CONNECTED;
506 nlk->dst_pid = nladdr->nl_pid;
507 nlk->dst_groups = nladdr->nl_groups;
510 return err;
513 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
515 struct sock *sk = sock->sk;
516 struct netlink_sock *nlk = nlk_sk(sk);
517 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
519 nladdr->nl_family = AF_NETLINK;
520 nladdr->nl_pad = 0;
521 *addr_len = sizeof(*nladdr);
523 if (peer) {
524 nladdr->nl_pid = nlk->dst_pid;
525 nladdr->nl_groups = nlk->dst_groups;
526 } else {
527 nladdr->nl_pid = nlk->pid;
528 nladdr->nl_groups = nlk->groups;
530 return 0;
533 static void netlink_overrun(struct sock *sk)
535 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
536 sk->sk_err = ENOBUFS;
537 sk->sk_error_report(sk);
541 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
543 int protocol = ssk->sk_protocol;
544 struct sock *sock;
545 struct netlink_sock *nlk;
547 sock = netlink_lookup(protocol, pid);
548 if (!sock)
549 return ERR_PTR(-ECONNREFUSED);
551 /* Don't bother queuing skb if kernel socket has no input function */
552 nlk = nlk_sk(sock);
553 if ((nlk->pid == 0 && !nlk->data_ready) ||
554 (sock->sk_state == NETLINK_CONNECTED &&
555 nlk->dst_pid != nlk_sk(ssk)->pid)) {
556 sock_put(sock);
557 return ERR_PTR(-ECONNREFUSED);
559 return sock;
562 struct sock *netlink_getsockbyfilp(struct file *filp)
564 struct inode *inode = filp->f_dentry->d_inode;
565 struct sock *sock;
567 if (!S_ISSOCK(inode->i_mode))
568 return ERR_PTR(-ENOTSOCK);
570 sock = SOCKET_I(inode)->sk;
571 if (sock->sk_family != AF_NETLINK)
572 return ERR_PTR(-EINVAL);
574 sock_hold(sock);
575 return sock;
579 * Attach a skb to a netlink socket.
580 * The caller must hold a reference to the destination socket. On error, the
581 * reference is dropped. The skb is not send to the destination, just all
582 * all error checks are performed and memory in the queue is reserved.
583 * Return values:
584 * < 0: error. skb freed, reference to sock dropped.
585 * 0: continue
586 * 1: repeat lookup - reference dropped while waiting for socket memory.
588 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
590 struct netlink_sock *nlk;
592 nlk = nlk_sk(sk);
594 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
595 test_bit(0, &nlk->state)) {
596 DECLARE_WAITQUEUE(wait, current);
597 if (!timeo) {
598 if (!nlk->pid)
599 netlink_overrun(sk);
600 sock_put(sk);
601 kfree_skb(skb);
602 return -EAGAIN;
605 __set_current_state(TASK_INTERRUPTIBLE);
606 add_wait_queue(&nlk->wait, &wait);
608 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
609 test_bit(0, &nlk->state)) &&
610 !sock_flag(sk, SOCK_DEAD))
611 timeo = schedule_timeout(timeo);
613 __set_current_state(TASK_RUNNING);
614 remove_wait_queue(&nlk->wait, &wait);
615 sock_put(sk);
617 if (signal_pending(current)) {
618 kfree_skb(skb);
619 return sock_intr_errno(timeo);
621 return 1;
623 skb_set_owner_r(skb, sk);
624 return 0;
627 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
629 struct netlink_sock *nlk;
630 int len = skb->len;
632 nlk = nlk_sk(sk);
634 skb_queue_tail(&sk->sk_receive_queue, skb);
635 sk->sk_data_ready(sk, len);
636 sock_put(sk);
637 return len;
640 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
642 kfree_skb(skb);
643 sock_put(sk);
646 static inline struct sk_buff *netlink_trim(struct sk_buff *skb, int allocation)
648 int delta;
650 skb_orphan(skb);
652 delta = skb->end - skb->tail;
653 if (delta * 2 < skb->truesize)
654 return skb;
656 if (skb_shared(skb)) {
657 struct sk_buff *nskb = skb_clone(skb, allocation);
658 if (!nskb)
659 return skb;
660 kfree_skb(skb);
661 skb = nskb;
664 if (!pskb_expand_head(skb, 0, -delta, allocation))
665 skb->truesize -= delta;
667 return skb;
670 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
672 struct sock *sk;
673 int err;
674 long timeo;
676 skb = netlink_trim(skb, gfp_any());
678 timeo = sock_sndtimeo(ssk, nonblock);
679 retry:
680 sk = netlink_getsockbypid(ssk, pid);
681 if (IS_ERR(sk)) {
682 kfree_skb(skb);
683 return PTR_ERR(sk);
685 err = netlink_attachskb(sk, skb, nonblock, timeo);
686 if (err == 1)
687 goto retry;
688 if (err)
689 return err;
691 return netlink_sendskb(sk, skb, ssk->sk_protocol);
694 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
696 struct netlink_sock *nlk = nlk_sk(sk);
698 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
699 !test_bit(0, &nlk->state)) {
700 skb_set_owner_r(skb, sk);
701 skb_queue_tail(&sk->sk_receive_queue, skb);
702 sk->sk_data_ready(sk, skb->len);
703 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
705 return -1;
708 struct netlink_broadcast_data {
709 struct sock *exclude_sk;
710 u32 pid;
711 u32 group;
712 int failure;
713 int congested;
714 int delivered;
715 int allocation;
716 struct sk_buff *skb, *skb2;
719 static inline int do_one_broadcast(struct sock *sk,
720 struct netlink_broadcast_data *p)
722 struct netlink_sock *nlk = nlk_sk(sk);
723 int val;
725 if (p->exclude_sk == sk)
726 goto out;
728 if (nlk->pid == p->pid || !(nlk->groups & p->group))
729 goto out;
731 if (p->failure) {
732 netlink_overrun(sk);
733 goto out;
736 sock_hold(sk);
737 if (p->skb2 == NULL) {
738 if (atomic_read(&p->skb->users) != 1) {
739 p->skb2 = skb_clone(p->skb, p->allocation);
740 } else {
741 p->skb2 = p->skb;
742 atomic_inc(&p->skb->users);
745 if (p->skb2 == NULL) {
746 netlink_overrun(sk);
747 /* Clone failed. Notify ALL listeners. */
748 p->failure = 1;
749 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
750 netlink_overrun(sk);
751 } else {
752 p->congested |= val;
753 p->delivered = 1;
754 p->skb2 = NULL;
756 sock_put(sk);
758 out:
759 return 0;
762 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
763 u32 group, int allocation)
765 struct netlink_broadcast_data info;
766 struct hlist_node *node;
767 struct sock *sk;
769 skb = netlink_trim(skb, allocation);
771 info.exclude_sk = ssk;
772 info.pid = pid;
773 info.group = group;
774 info.failure = 0;
775 info.congested = 0;
776 info.delivered = 0;
777 info.allocation = allocation;
778 info.skb = skb;
779 info.skb2 = NULL;
781 /* While we sleep in clone, do not allow to change socket list */
783 netlink_lock_table();
785 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
786 do_one_broadcast(sk, &info);
788 netlink_unlock_table();
790 if (info.skb2)
791 kfree_skb(info.skb2);
792 kfree_skb(skb);
794 if (info.delivered) {
795 if (info.congested && (allocation & __GFP_WAIT))
796 yield();
797 return 0;
799 if (info.failure)
800 return -ENOBUFS;
801 return -ESRCH;
804 struct netlink_set_err_data {
805 struct sock *exclude_sk;
806 u32 pid;
807 u32 group;
808 int code;
811 static inline int do_one_set_err(struct sock *sk,
812 struct netlink_set_err_data *p)
814 struct netlink_sock *nlk = nlk_sk(sk);
816 if (sk == p->exclude_sk)
817 goto out;
819 if (nlk->pid == p->pid || !(nlk->groups & p->group))
820 goto out;
822 sk->sk_err = p->code;
823 sk->sk_error_report(sk);
824 out:
825 return 0;
828 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
830 struct netlink_set_err_data info;
831 struct hlist_node *node;
832 struct sock *sk;
834 info.exclude_sk = ssk;
835 info.pid = pid;
836 info.group = group;
837 info.code = code;
839 read_lock(&nl_table_lock);
841 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
842 do_one_set_err(sk, &info);
844 read_unlock(&nl_table_lock);
847 static inline void netlink_rcv_wake(struct sock *sk)
849 struct netlink_sock *nlk = nlk_sk(sk);
851 if (!skb_queue_len(&sk->sk_receive_queue))
852 clear_bit(0, &nlk->state);
853 if (!test_bit(0, &nlk->state))
854 wake_up_interruptible(&nlk->wait);
857 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
858 struct msghdr *msg, size_t len)
860 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
861 struct sock *sk = sock->sk;
862 struct netlink_sock *nlk = nlk_sk(sk);
863 struct sockaddr_nl *addr=msg->msg_name;
864 u32 dst_pid;
865 u32 dst_groups;
866 struct sk_buff *skb;
867 int err;
868 struct scm_cookie scm;
870 if (msg->msg_flags&MSG_OOB)
871 return -EOPNOTSUPP;
873 if (NULL == siocb->scm)
874 siocb->scm = &scm;
875 err = scm_send(sock, msg, siocb->scm);
876 if (err < 0)
877 return err;
879 if (msg->msg_namelen) {
880 if (addr->nl_family != AF_NETLINK)
881 return -EINVAL;
882 dst_pid = addr->nl_pid;
883 dst_groups = addr->nl_groups;
884 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
885 return -EPERM;
886 } else {
887 dst_pid = nlk->dst_pid;
888 dst_groups = nlk->dst_groups;
891 if (!nlk->pid) {
892 err = netlink_autobind(sock);
893 if (err)
894 goto out;
897 err = -EMSGSIZE;
898 if (len > sk->sk_sndbuf - 32)
899 goto out;
900 err = -ENOBUFS;
901 skb = alloc_skb(len, GFP_KERNEL);
902 if (skb==NULL)
903 goto out;
905 NETLINK_CB(skb).pid = nlk->pid;
906 NETLINK_CB(skb).groups = nlk->groups;
907 NETLINK_CB(skb).dst_pid = dst_pid;
908 NETLINK_CB(skb).dst_groups = dst_groups;
909 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
910 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
912 /* What can I do? Netlink is asynchronous, so that
913 we will have to save current capabilities to
914 check them, when this message will be delivered
915 to corresponding kernel module. --ANK (980802)
918 err = -EFAULT;
919 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
920 kfree_skb(skb);
921 goto out;
924 err = security_netlink_send(sk, skb);
925 if (err) {
926 kfree_skb(skb);
927 goto out;
930 if (dst_groups) {
931 atomic_inc(&skb->users);
932 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
934 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
936 out:
937 return err;
940 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
941 struct msghdr *msg, size_t len,
942 int flags)
944 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
945 struct scm_cookie scm;
946 struct sock *sk = sock->sk;
947 struct netlink_sock *nlk = nlk_sk(sk);
948 int noblock = flags&MSG_DONTWAIT;
949 size_t copied;
950 struct sk_buff *skb;
951 int err;
953 if (flags&MSG_OOB)
954 return -EOPNOTSUPP;
956 copied = 0;
958 skb = skb_recv_datagram(sk,flags,noblock,&err);
959 if (skb==NULL)
960 goto out;
962 msg->msg_namelen = 0;
964 copied = skb->len;
965 if (len < copied) {
966 msg->msg_flags |= MSG_TRUNC;
967 copied = len;
970 skb->h.raw = skb->data;
971 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
973 if (msg->msg_name) {
974 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
975 addr->nl_family = AF_NETLINK;
976 addr->nl_pad = 0;
977 addr->nl_pid = NETLINK_CB(skb).pid;
978 addr->nl_groups = NETLINK_CB(skb).dst_groups;
979 msg->msg_namelen = sizeof(*addr);
982 if (NULL == siocb->scm) {
983 memset(&scm, 0, sizeof(scm));
984 siocb->scm = &scm;
986 siocb->scm->creds = *NETLINK_CREDS(skb);
987 skb_free_datagram(sk, skb);
989 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
990 netlink_dump(sk);
992 scm_recv(sock, msg, siocb->scm, flags);
994 out:
995 netlink_rcv_wake(sk);
996 return err ? : copied;
999 static void netlink_data_ready(struct sock *sk, int len)
1001 struct netlink_sock *nlk = nlk_sk(sk);
1003 if (nlk->data_ready)
1004 nlk->data_ready(sk, len);
1005 netlink_rcv_wake(sk);
1009 * We export these functions to other modules. They provide a
1010 * complete set of kernel non-blocking support for message
1011 * queueing.
1014 struct sock *
1015 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
1017 struct socket *sock;
1018 struct sock *sk;
1020 if (!nl_table)
1021 return NULL;
1023 if (unit<0 || unit>=MAX_LINKS)
1024 return NULL;
1026 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1027 return NULL;
1029 if (netlink_create(sock, unit) < 0) {
1030 sock_release(sock);
1031 return NULL;
1033 sk = sock->sk;
1034 sk->sk_data_ready = netlink_data_ready;
1035 if (input)
1036 nlk_sk(sk)->data_ready = input;
1038 if (netlink_insert(sk, 0)) {
1039 sock_release(sock);
1040 return NULL;
1042 return sk;
1045 void netlink_set_nonroot(int protocol, unsigned int flags)
1047 if ((unsigned int)protocol < MAX_LINKS)
1048 nl_table[protocol].nl_nonroot = flags;
1051 static void netlink_destroy_callback(struct netlink_callback *cb)
1053 if (cb->skb)
1054 kfree_skb(cb->skb);
1055 kfree(cb);
1059 * It looks a bit ugly.
1060 * It would be better to create kernel thread.
1063 static int netlink_dump(struct sock *sk)
1065 struct netlink_sock *nlk = nlk_sk(sk);
1066 struct netlink_callback *cb;
1067 struct sk_buff *skb;
1068 struct nlmsghdr *nlh;
1069 int len;
1071 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1072 if (!skb)
1073 return -ENOBUFS;
1075 spin_lock(&nlk->cb_lock);
1077 cb = nlk->cb;
1078 if (cb == NULL) {
1079 spin_unlock(&nlk->cb_lock);
1080 kfree_skb(skb);
1081 return -EINVAL;
1084 len = cb->dump(skb, cb);
1086 if (len > 0) {
1087 spin_unlock(&nlk->cb_lock);
1088 skb_queue_tail(&sk->sk_receive_queue, skb);
1089 sk->sk_data_ready(sk, len);
1090 return 0;
1093 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
1094 nlh->nlmsg_flags |= NLM_F_MULTI;
1095 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1096 skb_queue_tail(&sk->sk_receive_queue, skb);
1097 sk->sk_data_ready(sk, skb->len);
1099 cb->done(cb);
1100 nlk->cb = NULL;
1101 spin_unlock(&nlk->cb_lock);
1103 netlink_destroy_callback(cb);
1104 return 0;
1107 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1108 struct nlmsghdr *nlh,
1109 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1110 int (*done)(struct netlink_callback*))
1112 struct netlink_callback *cb;
1113 struct sock *sk;
1114 struct netlink_sock *nlk;
1116 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1117 if (cb == NULL)
1118 return -ENOBUFS;
1120 memset(cb, 0, sizeof(*cb));
1121 cb->dump = dump;
1122 cb->done = done;
1123 cb->nlh = nlh;
1124 atomic_inc(&skb->users);
1125 cb->skb = skb;
1127 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1128 if (sk == NULL) {
1129 netlink_destroy_callback(cb);
1130 return -ECONNREFUSED;
1132 nlk = nlk_sk(sk);
1133 /* A dump is in progress... */
1134 spin_lock(&nlk->cb_lock);
1135 if (nlk->cb) {
1136 spin_unlock(&nlk->cb_lock);
1137 netlink_destroy_callback(cb);
1138 sock_put(sk);
1139 return -EBUSY;
1141 nlk->cb = cb;
1142 spin_unlock(&nlk->cb_lock);
1144 netlink_dump(sk);
1145 sock_put(sk);
1146 return 0;
1149 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1151 struct sk_buff *skb;
1152 struct nlmsghdr *rep;
1153 struct nlmsgerr *errmsg;
1154 int size;
1156 if (err == 0)
1157 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1158 else
1159 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1161 skb = alloc_skb(size, GFP_KERNEL);
1162 if (!skb) {
1163 struct sock *sk;
1165 sk = netlink_lookup(in_skb->sk->sk_protocol,
1166 NETLINK_CB(in_skb).pid);
1167 if (sk) {
1168 sk->sk_err = ENOBUFS;
1169 sk->sk_error_report(sk);
1170 sock_put(sk);
1172 return;
1175 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1176 NLMSG_ERROR, sizeof(struct nlmsgerr));
1177 errmsg = NLMSG_DATA(rep);
1178 errmsg->error = err;
1179 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1180 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1184 #ifdef CONFIG_PROC_FS
1185 struct nl_seq_iter {
1186 int link;
1187 int hash_idx;
1190 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1192 struct nl_seq_iter *iter = seq->private;
1193 int i, j;
1194 struct sock *s;
1195 struct hlist_node *node;
1196 loff_t off = 0;
1198 for (i=0; i<MAX_LINKS; i++) {
1199 struct nl_pid_hash *hash = &nl_table[i].hash;
1201 for (j = 0; j <= hash->mask; j++) {
1202 sk_for_each(s, node, &hash->table[j]) {
1203 if (off == pos) {
1204 iter->link = i;
1205 iter->hash_idx = j;
1206 return s;
1208 ++off;
1212 return NULL;
1215 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1217 read_lock(&nl_table_lock);
1218 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1221 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1223 struct sock *s;
1224 struct nl_seq_iter *iter;
1225 int i, j;
1227 ++*pos;
1229 if (v == SEQ_START_TOKEN)
1230 return netlink_seq_socket_idx(seq, 0);
1232 s = sk_next(v);
1233 if (s)
1234 return s;
1236 iter = seq->private;
1237 i = iter->link;
1238 j = iter->hash_idx + 1;
1240 do {
1241 struct nl_pid_hash *hash = &nl_table[i].hash;
1243 for (; j <= hash->mask; j++) {
1244 s = sk_head(&hash->table[j]);
1245 if (s) {
1246 iter->link = i;
1247 iter->hash_idx = j;
1248 return s;
1252 j = 0;
1253 } while (++i < MAX_LINKS);
1255 return NULL;
1258 static void netlink_seq_stop(struct seq_file *seq, void *v)
1260 read_unlock(&nl_table_lock);
1264 static int netlink_seq_show(struct seq_file *seq, void *v)
1266 if (v == SEQ_START_TOKEN)
1267 seq_puts(seq,
1268 "sk Eth Pid Groups "
1269 "Rmem Wmem Dump Locks\n");
1270 else {
1271 struct sock *s = v;
1272 struct netlink_sock *nlk = nlk_sk(s);
1274 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1276 s->sk_protocol,
1277 nlk->pid,
1278 nlk->groups,
1279 atomic_read(&s->sk_rmem_alloc),
1280 atomic_read(&s->sk_wmem_alloc),
1281 nlk->cb,
1282 atomic_read(&s->sk_refcnt)
1286 return 0;
1289 static struct seq_operations netlink_seq_ops = {
1290 .start = netlink_seq_start,
1291 .next = netlink_seq_next,
1292 .stop = netlink_seq_stop,
1293 .show = netlink_seq_show,
1297 static int netlink_seq_open(struct inode *inode, struct file *file)
1299 struct seq_file *seq;
1300 struct nl_seq_iter *iter;
1301 int err;
1303 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1304 if (!iter)
1305 return -ENOMEM;
1307 err = seq_open(file, &netlink_seq_ops);
1308 if (err) {
1309 kfree(iter);
1310 return err;
1313 memset(iter, 0, sizeof(*iter));
1314 seq = file->private_data;
1315 seq->private = iter;
1316 return 0;
1319 static struct file_operations netlink_seq_fops = {
1320 .owner = THIS_MODULE,
1321 .open = netlink_seq_open,
1322 .read = seq_read,
1323 .llseek = seq_lseek,
1324 .release = seq_release_private,
1327 #endif
1329 int netlink_register_notifier(struct notifier_block *nb)
1331 return notifier_chain_register(&netlink_chain, nb);
1334 int netlink_unregister_notifier(struct notifier_block *nb)
1336 return notifier_chain_unregister(&netlink_chain, nb);
1339 static struct proto_ops netlink_ops = {
1340 .family = PF_NETLINK,
1341 .owner = THIS_MODULE,
1342 .release = netlink_release,
1343 .bind = netlink_bind,
1344 .connect = netlink_connect,
1345 .socketpair = sock_no_socketpair,
1346 .accept = sock_no_accept,
1347 .getname = netlink_getname,
1348 .poll = datagram_poll,
1349 .ioctl = sock_no_ioctl,
1350 .listen = sock_no_listen,
1351 .shutdown = sock_no_shutdown,
1352 .setsockopt = sock_no_setsockopt,
1353 .getsockopt = sock_no_getsockopt,
1354 .sendmsg = netlink_sendmsg,
1355 .recvmsg = netlink_recvmsg,
1356 .mmap = sock_no_mmap,
1357 .sendpage = sock_no_sendpage,
1360 static struct net_proto_family netlink_family_ops = {
1361 .family = PF_NETLINK,
1362 .create = netlink_create,
1363 .owner = THIS_MODULE, /* for consistency 8) */
1366 extern void netlink_skb_parms_too_large(void);
1368 static int __init netlink_proto_init(void)
1370 struct sk_buff *dummy_skb;
1371 int i;
1372 unsigned long max;
1373 unsigned int order;
1374 int err = proto_register(&netlink_proto, 0);
1376 if (err != 0)
1377 goto out;
1379 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1380 netlink_skb_parms_too_large();
1382 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1383 if (!nl_table) {
1384 enomem:
1385 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1386 return -ENOMEM;
1389 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1391 if (num_physpages >= (128 * 1024))
1392 max = num_physpages >> (21 - PAGE_SHIFT);
1393 else
1394 max = num_physpages >> (23 - PAGE_SHIFT);
1396 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1397 max = (1UL << order) / sizeof(struct hlist_head);
1398 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1400 for (i = 0; i < MAX_LINKS; i++) {
1401 struct nl_pid_hash *hash = &nl_table[i].hash;
1403 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1404 if (!hash->table) {
1405 while (i-- > 0)
1406 nl_pid_hash_free(nl_table[i].hash.table,
1407 1 * sizeof(*hash->table));
1408 kfree(nl_table);
1409 goto enomem;
1411 memset(hash->table, 0, 1 * sizeof(*hash->table));
1412 hash->max_shift = order;
1413 hash->shift = 0;
1414 hash->mask = 0;
1415 hash->rehash_time = jiffies;
1418 sock_register(&netlink_family_ops);
1419 #ifdef CONFIG_PROC_FS
1420 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1421 #endif
1422 /* The netlink device handler may be needed early. */
1423 rtnetlink_init();
1424 out:
1425 return err;
1428 static void __exit netlink_proto_exit(void)
1430 sock_unregister(PF_NETLINK);
1431 proc_net_remove("netlink");
1432 kfree(nl_table);
1433 nl_table = NULL;
1434 proto_unregister(&netlink_proto);
1437 core_initcall(netlink_proto_init);
1438 module_exit(netlink_proto_exit);
1440 MODULE_LICENSE("GPL");
1442 MODULE_ALIAS_NETPROTO(PF_NETLINK);
1444 EXPORT_SYMBOL(netlink_ack);
1445 EXPORT_SYMBOL(netlink_broadcast);
1446 EXPORT_SYMBOL(netlink_dump_start);
1447 EXPORT_SYMBOL(netlink_kernel_create);
1448 EXPORT_SYMBOL(netlink_register_notifier);
1449 EXPORT_SYMBOL(netlink_set_err);
1450 EXPORT_SYMBOL(netlink_set_nonroot);
1451 EXPORT_SYMBOL(netlink_unicast);
1452 EXPORT_SYMBOL(netlink_unregister_notifier);