2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
59 #include <net/net_namespace.h>
62 #include <net/netlink.h>
64 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
65 #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
68 /* struct sock has to be the first member of netlink_sock */
76 unsigned long *groups
;
78 wait_queue_head_t wait
;
79 struct netlink_callback
*cb
;
80 struct mutex
*cb_mutex
;
81 struct mutex cb_def_mutex
;
82 void (*netlink_rcv
)(struct sk_buff
*skb
);
83 void (*netlink_bind
)(int group
);
84 struct module
*module
;
89 unsigned long masks
[0];
92 #define NETLINK_KERNEL_SOCKET 0x1
93 #define NETLINK_RECV_PKTINFO 0x2
94 #define NETLINK_BROADCAST_SEND_ERROR 0x4
95 #define NETLINK_RECV_NO_ENOBUFS 0x8
97 static inline struct netlink_sock
*nlk_sk(struct sock
*sk
)
99 return container_of(sk
, struct netlink_sock
, sk
);
102 static inline int netlink_is_kernel(struct sock
*sk
)
104 return nlk_sk(sk
)->flags
& NETLINK_KERNEL_SOCKET
;
108 struct hlist_head
*table
;
109 unsigned long rehash_time
;
114 unsigned int entries
;
115 unsigned int max_shift
;
120 struct netlink_table
{
121 struct nl_pid_hash hash
;
122 struct hlist_head mc_list
;
123 struct listeners __rcu
*listeners
;
124 unsigned int nl_nonroot
;
126 struct mutex
*cb_mutex
;
127 struct module
*module
;
128 void (*bind
)(int group
);
132 static struct netlink_table
*nl_table
;
134 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait
);
136 static int netlink_dump(struct sock
*sk
);
138 static DEFINE_RWLOCK(nl_table_lock
);
139 static atomic_t nl_table_users
= ATOMIC_INIT(0);
141 static ATOMIC_NOTIFIER_HEAD(netlink_chain
);
143 static inline u32
netlink_group_mask(u32 group
)
145 return group
? 1 << (group
- 1) : 0;
148 static inline struct hlist_head
*nl_pid_hashfn(struct nl_pid_hash
*hash
, u32 pid
)
150 return &hash
->table
[jhash_1word(pid
, hash
->rnd
) & hash
->mask
];
153 static void netlink_destroy_callback(struct netlink_callback
*cb
)
159 static void netlink_consume_callback(struct netlink_callback
*cb
)
161 consume_skb(cb
->skb
);
165 static void netlink_sock_destruct(struct sock
*sk
)
167 struct netlink_sock
*nlk
= nlk_sk(sk
);
171 nlk
->cb
->done(nlk
->cb
);
172 netlink_destroy_callback(nlk
->cb
);
175 skb_queue_purge(&sk
->sk_receive_queue
);
177 if (!sock_flag(sk
, SOCK_DEAD
)) {
178 printk(KERN_ERR
"Freeing alive netlink socket %p\n", sk
);
182 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
183 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
184 WARN_ON(nlk_sk(sk
)->groups
);
187 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
188 * SMP. Look, when several writers sleep and reader wakes them up, all but one
189 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
190 * this, _but_ remember, it adds useless work on UP machines.
193 void netlink_table_grab(void)
194 __acquires(nl_table_lock
)
198 write_lock_irq(&nl_table_lock
);
200 if (atomic_read(&nl_table_users
)) {
201 DECLARE_WAITQUEUE(wait
, current
);
203 add_wait_queue_exclusive(&nl_table_wait
, &wait
);
205 set_current_state(TASK_UNINTERRUPTIBLE
);
206 if (atomic_read(&nl_table_users
) == 0)
208 write_unlock_irq(&nl_table_lock
);
210 write_lock_irq(&nl_table_lock
);
213 __set_current_state(TASK_RUNNING
);
214 remove_wait_queue(&nl_table_wait
, &wait
);
218 void netlink_table_ungrab(void)
219 __releases(nl_table_lock
)
221 write_unlock_irq(&nl_table_lock
);
222 wake_up(&nl_table_wait
);
226 netlink_lock_table(void)
228 /* read_lock() synchronizes us to netlink_table_grab */
230 read_lock(&nl_table_lock
);
231 atomic_inc(&nl_table_users
);
232 read_unlock(&nl_table_lock
);
236 netlink_unlock_table(void)
238 if (atomic_dec_and_test(&nl_table_users
))
239 wake_up(&nl_table_wait
);
242 static struct sock
*netlink_lookup(struct net
*net
, int protocol
, u32 pid
)
244 struct nl_pid_hash
*hash
= &nl_table
[protocol
].hash
;
245 struct hlist_head
*head
;
247 struct hlist_node
*node
;
249 read_lock(&nl_table_lock
);
250 head
= nl_pid_hashfn(hash
, pid
);
251 sk_for_each(sk
, node
, head
) {
252 if (net_eq(sock_net(sk
), net
) && (nlk_sk(sk
)->pid
== pid
)) {
259 read_unlock(&nl_table_lock
);
263 static struct hlist_head
*nl_pid_hash_zalloc(size_t size
)
265 if (size
<= PAGE_SIZE
)
266 return kzalloc(size
, GFP_ATOMIC
);
268 return (struct hlist_head
*)
269 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
273 static void nl_pid_hash_free(struct hlist_head
*table
, size_t size
)
275 if (size
<= PAGE_SIZE
)
278 free_pages((unsigned long)table
, get_order(size
));
281 static int nl_pid_hash_rehash(struct nl_pid_hash
*hash
, int grow
)
283 unsigned int omask
, mask
, shift
;
285 struct hlist_head
*otable
, *table
;
288 omask
= mask
= hash
->mask
;
289 osize
= size
= (mask
+ 1) * sizeof(*table
);
293 if (++shift
> hash
->max_shift
)
299 table
= nl_pid_hash_zalloc(size
);
303 otable
= hash
->table
;
307 get_random_bytes(&hash
->rnd
, sizeof(hash
->rnd
));
309 for (i
= 0; i
<= omask
; i
++) {
311 struct hlist_node
*node
, *tmp
;
313 sk_for_each_safe(sk
, node
, tmp
, &otable
[i
])
314 __sk_add_node(sk
, nl_pid_hashfn(hash
, nlk_sk(sk
)->pid
));
317 nl_pid_hash_free(otable
, osize
);
318 hash
->rehash_time
= jiffies
+ 10 * 60 * HZ
;
322 static inline int nl_pid_hash_dilute(struct nl_pid_hash
*hash
, int len
)
324 int avg
= hash
->entries
>> hash
->shift
;
326 if (unlikely(avg
> 1) && nl_pid_hash_rehash(hash
, 1))
329 if (unlikely(len
> avg
) && time_after(jiffies
, hash
->rehash_time
)) {
330 nl_pid_hash_rehash(hash
, 0);
337 static const struct proto_ops netlink_ops
;
340 netlink_update_listeners(struct sock
*sk
)
342 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
343 struct hlist_node
*node
;
347 for (i
= 0; i
< NLGRPLONGS(tbl
->groups
); i
++) {
349 sk_for_each_bound(sk
, node
, &tbl
->mc_list
) {
350 if (i
< NLGRPLONGS(nlk_sk(sk
)->ngroups
))
351 mask
|= nlk_sk(sk
)->groups
[i
];
353 tbl
->listeners
->masks
[i
] = mask
;
355 /* this function is only called with the netlink table "grabbed", which
356 * makes sure updates are visible before bind or setsockopt return. */
359 static int netlink_insert(struct sock
*sk
, struct net
*net
, u32 pid
)
361 struct nl_pid_hash
*hash
= &nl_table
[sk
->sk_protocol
].hash
;
362 struct hlist_head
*head
;
363 int err
= -EADDRINUSE
;
365 struct hlist_node
*node
;
368 netlink_table_grab();
369 head
= nl_pid_hashfn(hash
, pid
);
371 sk_for_each(osk
, node
, head
) {
372 if (net_eq(sock_net(osk
), net
) && (nlk_sk(osk
)->pid
== pid
))
384 if (BITS_PER_LONG
> 32 && unlikely(hash
->entries
>= UINT_MAX
))
387 if (len
&& nl_pid_hash_dilute(hash
, len
))
388 head
= nl_pid_hashfn(hash
, pid
);
390 nlk_sk(sk
)->pid
= pid
;
391 sk_add_node(sk
, head
);
395 netlink_table_ungrab();
399 static void netlink_remove(struct sock
*sk
)
401 netlink_table_grab();
402 if (sk_del_node_init(sk
))
403 nl_table
[sk
->sk_protocol
].hash
.entries
--;
404 if (nlk_sk(sk
)->subscriptions
)
405 __sk_del_bind_node(sk
);
406 netlink_table_ungrab();
409 static struct proto netlink_proto
= {
411 .owner
= THIS_MODULE
,
412 .obj_size
= sizeof(struct netlink_sock
),
415 static int __netlink_create(struct net
*net
, struct socket
*sock
,
416 struct mutex
*cb_mutex
, int protocol
)
419 struct netlink_sock
*nlk
;
421 sock
->ops
= &netlink_ops
;
423 sk
= sk_alloc(net
, PF_NETLINK
, GFP_KERNEL
, &netlink_proto
);
427 sock_init_data(sock
, sk
);
431 nlk
->cb_mutex
= cb_mutex
;
433 nlk
->cb_mutex
= &nlk
->cb_def_mutex
;
434 mutex_init(nlk
->cb_mutex
);
436 init_waitqueue_head(&nlk
->wait
);
438 sk
->sk_destruct
= netlink_sock_destruct
;
439 sk
->sk_protocol
= protocol
;
443 static int netlink_create(struct net
*net
, struct socket
*sock
, int protocol
,
446 struct module
*module
= NULL
;
447 struct mutex
*cb_mutex
;
448 struct netlink_sock
*nlk
;
449 void (*bind
)(int group
);
452 sock
->state
= SS_UNCONNECTED
;
454 if (sock
->type
!= SOCK_RAW
&& sock
->type
!= SOCK_DGRAM
)
455 return -ESOCKTNOSUPPORT
;
457 if (protocol
< 0 || protocol
>= MAX_LINKS
)
458 return -EPROTONOSUPPORT
;
460 netlink_lock_table();
461 #ifdef CONFIG_MODULES
462 if (!nl_table
[protocol
].registered
) {
463 netlink_unlock_table();
464 request_module("net-pf-%d-proto-%d", PF_NETLINK
, protocol
);
465 netlink_lock_table();
468 if (nl_table
[protocol
].registered
&&
469 try_module_get(nl_table
[protocol
].module
))
470 module
= nl_table
[protocol
].module
;
472 err
= -EPROTONOSUPPORT
;
473 cb_mutex
= nl_table
[protocol
].cb_mutex
;
474 bind
= nl_table
[protocol
].bind
;
475 netlink_unlock_table();
480 err
= __netlink_create(net
, sock
, cb_mutex
, protocol
);
485 sock_prot_inuse_add(net
, &netlink_proto
, 1);
488 nlk
= nlk_sk(sock
->sk
);
489 nlk
->module
= module
;
490 nlk
->netlink_bind
= bind
;
499 static int netlink_release(struct socket
*sock
)
501 struct sock
*sk
= sock
->sk
;
502 struct netlink_sock
*nlk
;
512 * OK. Socket is unlinked, any packets that arrive now
517 wake_up_interruptible_all(&nlk
->wait
);
519 skb_queue_purge(&sk
->sk_write_queue
);
522 struct netlink_notify n
= {
524 .protocol
= sk
->sk_protocol
,
527 atomic_notifier_call_chain(&netlink_chain
,
528 NETLINK_URELEASE
, &n
);
531 module_put(nlk
->module
);
533 netlink_table_grab();
534 if (netlink_is_kernel(sk
)) {
535 BUG_ON(nl_table
[sk
->sk_protocol
].registered
== 0);
536 if (--nl_table
[sk
->sk_protocol
].registered
== 0) {
537 kfree(nl_table
[sk
->sk_protocol
].listeners
);
538 nl_table
[sk
->sk_protocol
].module
= NULL
;
539 nl_table
[sk
->sk_protocol
].registered
= 0;
541 } else if (nlk
->subscriptions
) {
542 netlink_update_listeners(sk
);
544 netlink_table_ungrab();
550 sock_prot_inuse_add(sock_net(sk
), &netlink_proto
, -1);
556 static int netlink_autobind(struct socket
*sock
)
558 struct sock
*sk
= sock
->sk
;
559 struct net
*net
= sock_net(sk
);
560 struct nl_pid_hash
*hash
= &nl_table
[sk
->sk_protocol
].hash
;
561 struct hlist_head
*head
;
563 struct hlist_node
*node
;
564 s32 pid
= task_tgid_vnr(current
);
566 static s32 rover
= -4097;
570 netlink_table_grab();
571 head
= nl_pid_hashfn(hash
, pid
);
572 sk_for_each(osk
, node
, head
) {
573 if (!net_eq(sock_net(osk
), net
))
575 if (nlk_sk(osk
)->pid
== pid
) {
576 /* Bind collision, search negative pid values. */
580 netlink_table_ungrab();
584 netlink_table_ungrab();
586 err
= netlink_insert(sk
, net
, pid
);
587 if (err
== -EADDRINUSE
)
590 /* If 2 threads race to autobind, that is fine. */
597 static inline int netlink_capable(const struct socket
*sock
, unsigned int flag
)
599 return (nl_table
[sock
->sk
->sk_protocol
].nl_nonroot
& flag
) ||
600 capable(CAP_NET_ADMIN
);
604 netlink_update_subscriptions(struct sock
*sk
, unsigned int subscriptions
)
606 struct netlink_sock
*nlk
= nlk_sk(sk
);
608 if (nlk
->subscriptions
&& !subscriptions
)
609 __sk_del_bind_node(sk
);
610 else if (!nlk
->subscriptions
&& subscriptions
)
611 sk_add_bind_node(sk
, &nl_table
[sk
->sk_protocol
].mc_list
);
612 nlk
->subscriptions
= subscriptions
;
615 static int netlink_realloc_groups(struct sock
*sk
)
617 struct netlink_sock
*nlk
= nlk_sk(sk
);
619 unsigned long *new_groups
;
622 netlink_table_grab();
624 groups
= nl_table
[sk
->sk_protocol
].groups
;
625 if (!nl_table
[sk
->sk_protocol
].registered
) {
630 if (nlk
->ngroups
>= groups
)
633 new_groups
= krealloc(nlk
->groups
, NLGRPSZ(groups
), GFP_ATOMIC
);
634 if (new_groups
== NULL
) {
638 memset((char *)new_groups
+ NLGRPSZ(nlk
->ngroups
), 0,
639 NLGRPSZ(groups
) - NLGRPSZ(nlk
->ngroups
));
641 nlk
->groups
= new_groups
;
642 nlk
->ngroups
= groups
;
644 netlink_table_ungrab();
648 static int netlink_bind(struct socket
*sock
, struct sockaddr
*addr
,
651 struct sock
*sk
= sock
->sk
;
652 struct net
*net
= sock_net(sk
);
653 struct netlink_sock
*nlk
= nlk_sk(sk
);
654 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
657 if (nladdr
->nl_family
!= AF_NETLINK
)
660 /* Only superuser is allowed to listen multicasts */
661 if (nladdr
->nl_groups
) {
662 if (!netlink_capable(sock
, NL_NONROOT_RECV
))
664 err
= netlink_realloc_groups(sk
);
670 if (nladdr
->nl_pid
!= nlk
->pid
)
673 err
= nladdr
->nl_pid
?
674 netlink_insert(sk
, net
, nladdr
->nl_pid
) :
675 netlink_autobind(sock
);
680 if (!nladdr
->nl_groups
&& (nlk
->groups
== NULL
|| !(u32
)nlk
->groups
[0]))
683 netlink_table_grab();
684 netlink_update_subscriptions(sk
, nlk
->subscriptions
+
685 hweight32(nladdr
->nl_groups
) -
686 hweight32(nlk
->groups
[0]));
687 nlk
->groups
[0] = (nlk
->groups
[0] & ~0xffffffffUL
) | nladdr
->nl_groups
;
688 netlink_update_listeners(sk
);
689 netlink_table_ungrab();
691 if (nlk
->netlink_bind
&& nlk
->groups
[0]) {
694 for (i
=0; i
<nlk
->ngroups
; i
++) {
695 if (test_bit(i
, nlk
->groups
))
696 nlk
->netlink_bind(i
);
703 static int netlink_connect(struct socket
*sock
, struct sockaddr
*addr
,
707 struct sock
*sk
= sock
->sk
;
708 struct netlink_sock
*nlk
= nlk_sk(sk
);
709 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
711 if (alen
< sizeof(addr
->sa_family
))
714 if (addr
->sa_family
== AF_UNSPEC
) {
715 sk
->sk_state
= NETLINK_UNCONNECTED
;
720 if (addr
->sa_family
!= AF_NETLINK
)
723 /* Only superuser is allowed to send multicasts */
724 if (nladdr
->nl_groups
&& !netlink_capable(sock
, NL_NONROOT_SEND
))
728 err
= netlink_autobind(sock
);
731 sk
->sk_state
= NETLINK_CONNECTED
;
732 nlk
->dst_pid
= nladdr
->nl_pid
;
733 nlk
->dst_group
= ffs(nladdr
->nl_groups
);
739 static int netlink_getname(struct socket
*sock
, struct sockaddr
*addr
,
740 int *addr_len
, int peer
)
742 struct sock
*sk
= sock
->sk
;
743 struct netlink_sock
*nlk
= nlk_sk(sk
);
744 DECLARE_SOCKADDR(struct sockaddr_nl
*, nladdr
, addr
);
746 nladdr
->nl_family
= AF_NETLINK
;
748 *addr_len
= sizeof(*nladdr
);
751 nladdr
->nl_pid
= nlk
->dst_pid
;
752 nladdr
->nl_groups
= netlink_group_mask(nlk
->dst_group
);
754 nladdr
->nl_pid
= nlk
->pid
;
755 nladdr
->nl_groups
= nlk
->groups
? nlk
->groups
[0] : 0;
760 static void netlink_overrun(struct sock
*sk
)
762 struct netlink_sock
*nlk
= nlk_sk(sk
);
764 if (!(nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
)) {
765 if (!test_and_set_bit(0, &nlk_sk(sk
)->state
)) {
766 sk
->sk_err
= ENOBUFS
;
767 sk
->sk_error_report(sk
);
770 atomic_inc(&sk
->sk_drops
);
773 static struct sock
*netlink_getsockbypid(struct sock
*ssk
, u32 pid
)
776 struct netlink_sock
*nlk
;
778 sock
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, pid
);
780 return ERR_PTR(-ECONNREFUSED
);
782 /* Don't bother queuing skb if kernel socket has no input function */
784 if (sock
->sk_state
== NETLINK_CONNECTED
&&
785 nlk
->dst_pid
!= nlk_sk(ssk
)->pid
) {
787 return ERR_PTR(-ECONNREFUSED
);
792 struct sock
*netlink_getsockbyfilp(struct file
*filp
)
794 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
797 if (!S_ISSOCK(inode
->i_mode
))
798 return ERR_PTR(-ENOTSOCK
);
800 sock
= SOCKET_I(inode
)->sk
;
801 if (sock
->sk_family
!= AF_NETLINK
)
802 return ERR_PTR(-EINVAL
);
809 * Attach a skb to a netlink socket.
810 * The caller must hold a reference to the destination socket. On error, the
811 * reference is dropped. The skb is not send to the destination, just all
812 * all error checks are performed and memory in the queue is reserved.
814 * < 0: error. skb freed, reference to sock dropped.
816 * 1: repeat lookup - reference dropped while waiting for socket memory.
818 int netlink_attachskb(struct sock
*sk
, struct sk_buff
*skb
,
819 long *timeo
, struct sock
*ssk
)
821 struct netlink_sock
*nlk
;
825 if (atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
826 test_bit(0, &nlk
->state
)) {
827 DECLARE_WAITQUEUE(wait
, current
);
829 if (!ssk
|| netlink_is_kernel(ssk
))
836 __set_current_state(TASK_INTERRUPTIBLE
);
837 add_wait_queue(&nlk
->wait
, &wait
);
839 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
840 test_bit(0, &nlk
->state
)) &&
841 !sock_flag(sk
, SOCK_DEAD
))
842 *timeo
= schedule_timeout(*timeo
);
844 __set_current_state(TASK_RUNNING
);
845 remove_wait_queue(&nlk
->wait
, &wait
);
848 if (signal_pending(current
)) {
850 return sock_intr_errno(*timeo
);
854 skb_set_owner_r(skb
, sk
);
858 static int __netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
862 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
863 sk
->sk_data_ready(sk
, len
);
867 int netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
869 int len
= __netlink_sendskb(sk
, skb
);
875 void netlink_detachskb(struct sock
*sk
, struct sk_buff
*skb
)
881 static struct sk_buff
*netlink_trim(struct sk_buff
*skb
, gfp_t allocation
)
887 delta
= skb
->end
- skb
->tail
;
888 if (delta
* 2 < skb
->truesize
)
891 if (skb_shared(skb
)) {
892 struct sk_buff
*nskb
= skb_clone(skb
, allocation
);
899 if (!pskb_expand_head(skb
, 0, -delta
, allocation
))
900 skb
->truesize
-= delta
;
905 static void netlink_rcv_wake(struct sock
*sk
)
907 struct netlink_sock
*nlk
= nlk_sk(sk
);
909 if (skb_queue_empty(&sk
->sk_receive_queue
))
910 clear_bit(0, &nlk
->state
);
911 if (!test_bit(0, &nlk
->state
))
912 wake_up_interruptible(&nlk
->wait
);
915 static int netlink_unicast_kernel(struct sock
*sk
, struct sk_buff
*skb
)
918 struct netlink_sock
*nlk
= nlk_sk(sk
);
921 if (nlk
->netlink_rcv
!= NULL
) {
923 skb_set_owner_r(skb
, sk
);
924 nlk
->netlink_rcv(skb
);
933 int netlink_unicast(struct sock
*ssk
, struct sk_buff
*skb
,
934 u32 pid
, int nonblock
)
940 skb
= netlink_trim(skb
, gfp_any());
942 timeo
= sock_sndtimeo(ssk
, nonblock
);
944 sk
= netlink_getsockbypid(ssk
, pid
);
949 if (netlink_is_kernel(sk
))
950 return netlink_unicast_kernel(sk
, skb
);
952 if (sk_filter(sk
, skb
)) {
959 err
= netlink_attachskb(sk
, skb
, &timeo
, ssk
);
965 return netlink_sendskb(sk
, skb
);
967 EXPORT_SYMBOL(netlink_unicast
);
969 int netlink_has_listeners(struct sock
*sk
, unsigned int group
)
972 struct listeners
*listeners
;
974 BUG_ON(!netlink_is_kernel(sk
));
977 listeners
= rcu_dereference(nl_table
[sk
->sk_protocol
].listeners
);
979 if (group
- 1 < nl_table
[sk
->sk_protocol
].groups
)
980 res
= test_bit(group
- 1, listeners
->masks
);
986 EXPORT_SYMBOL_GPL(netlink_has_listeners
);
988 static int netlink_broadcast_deliver(struct sock
*sk
, struct sk_buff
*skb
)
990 struct netlink_sock
*nlk
= nlk_sk(sk
);
992 if (atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
&&
993 !test_bit(0, &nlk
->state
)) {
994 skb_set_owner_r(skb
, sk
);
995 __netlink_sendskb(sk
, skb
);
996 return atomic_read(&sk
->sk_rmem_alloc
) > (sk
->sk_rcvbuf
>> 1);
1001 struct netlink_broadcast_data
{
1002 struct sock
*exclude_sk
;
1007 int delivery_failure
;
1011 struct sk_buff
*skb
, *skb2
;
1012 int (*tx_filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
);
1016 static int do_one_broadcast(struct sock
*sk
,
1017 struct netlink_broadcast_data
*p
)
1019 struct netlink_sock
*nlk
= nlk_sk(sk
);
1022 if (p
->exclude_sk
== sk
)
1025 if (nlk
->pid
== p
->pid
|| p
->group
- 1 >= nlk
->ngroups
||
1026 !test_bit(p
->group
- 1, nlk
->groups
))
1029 if (!net_eq(sock_net(sk
), p
->net
))
1033 netlink_overrun(sk
);
1038 if (p
->skb2
== NULL
) {
1039 if (skb_shared(p
->skb
)) {
1040 p
->skb2
= skb_clone(p
->skb
, p
->allocation
);
1042 p
->skb2
= skb_get(p
->skb
);
1044 * skb ownership may have been set when
1045 * delivered to a previous socket.
1047 skb_orphan(p
->skb2
);
1050 if (p
->skb2
== NULL
) {
1051 netlink_overrun(sk
);
1052 /* Clone failed. Notify ALL listeners. */
1054 if (nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
)
1055 p
->delivery_failure
= 1;
1056 } else if (p
->tx_filter
&& p
->tx_filter(sk
, p
->skb2
, p
->tx_data
)) {
1059 } else if (sk_filter(sk
, p
->skb2
)) {
1062 } else if ((val
= netlink_broadcast_deliver(sk
, p
->skb2
)) < 0) {
1063 netlink_overrun(sk
);
1064 if (nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
)
1065 p
->delivery_failure
= 1;
1067 p
->congested
|= val
;
1077 int netlink_broadcast_filtered(struct sock
*ssk
, struct sk_buff
*skb
, u32 pid
,
1078 u32 group
, gfp_t allocation
,
1079 int (*filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
),
1082 struct net
*net
= sock_net(ssk
);
1083 struct netlink_broadcast_data info
;
1084 struct hlist_node
*node
;
1087 skb
= netlink_trim(skb
, allocation
);
1089 info
.exclude_sk
= ssk
;
1094 info
.delivery_failure
= 0;
1097 info
.allocation
= allocation
;
1100 info
.tx_filter
= filter
;
1101 info
.tx_data
= filter_data
;
1103 /* While we sleep in clone, do not allow to change socket list */
1105 netlink_lock_table();
1107 sk_for_each_bound(sk
, node
, &nl_table
[ssk
->sk_protocol
].mc_list
)
1108 do_one_broadcast(sk
, &info
);
1112 netlink_unlock_table();
1114 if (info
.delivery_failure
) {
1115 kfree_skb(info
.skb2
);
1118 consume_skb(info
.skb2
);
1120 if (info
.delivered
) {
1121 if (info
.congested
&& (allocation
& __GFP_WAIT
))
1127 EXPORT_SYMBOL(netlink_broadcast_filtered
);
1129 int netlink_broadcast(struct sock
*ssk
, struct sk_buff
*skb
, u32 pid
,
1130 u32 group
, gfp_t allocation
)
1132 return netlink_broadcast_filtered(ssk
, skb
, pid
, group
, allocation
,
1135 EXPORT_SYMBOL(netlink_broadcast
);
1137 struct netlink_set_err_data
{
1138 struct sock
*exclude_sk
;
1144 static int do_one_set_err(struct sock
*sk
, struct netlink_set_err_data
*p
)
1146 struct netlink_sock
*nlk
= nlk_sk(sk
);
1149 if (sk
== p
->exclude_sk
)
1152 if (!net_eq(sock_net(sk
), sock_net(p
->exclude_sk
)))
1155 if (nlk
->pid
== p
->pid
|| p
->group
- 1 >= nlk
->ngroups
||
1156 !test_bit(p
->group
- 1, nlk
->groups
))
1159 if (p
->code
== ENOBUFS
&& nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
) {
1164 sk
->sk_err
= p
->code
;
1165 sk
->sk_error_report(sk
);
1171 * netlink_set_err - report error to broadcast listeners
1172 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1173 * @pid: the PID of a process that we want to skip (if any)
1174 * @groups: the broadcast group that will notice the error
1175 * @code: error code, must be negative (as usual in kernelspace)
1177 * This function returns the number of broadcast listeners that have set the
1178 * NETLINK_RECV_NO_ENOBUFS socket option.
1180 int netlink_set_err(struct sock
*ssk
, u32 pid
, u32 group
, int code
)
1182 struct netlink_set_err_data info
;
1183 struct hlist_node
*node
;
1187 info
.exclude_sk
= ssk
;
1190 /* sk->sk_err wants a positive error value */
1193 read_lock(&nl_table_lock
);
1195 sk_for_each_bound(sk
, node
, &nl_table
[ssk
->sk_protocol
].mc_list
)
1196 ret
+= do_one_set_err(sk
, &info
);
1198 read_unlock(&nl_table_lock
);
1201 EXPORT_SYMBOL(netlink_set_err
);
1203 /* must be called with netlink table grabbed */
1204 static void netlink_update_socket_mc(struct netlink_sock
*nlk
,
1208 int old
, new = !!is_new
, subscriptions
;
1210 old
= test_bit(group
- 1, nlk
->groups
);
1211 subscriptions
= nlk
->subscriptions
- old
+ new;
1213 __set_bit(group
- 1, nlk
->groups
);
1215 __clear_bit(group
- 1, nlk
->groups
);
1216 netlink_update_subscriptions(&nlk
->sk
, subscriptions
);
1217 netlink_update_listeners(&nlk
->sk
);
1220 static int netlink_setsockopt(struct socket
*sock
, int level
, int optname
,
1221 char __user
*optval
, unsigned int optlen
)
1223 struct sock
*sk
= sock
->sk
;
1224 struct netlink_sock
*nlk
= nlk_sk(sk
);
1225 unsigned int val
= 0;
1228 if (level
!= SOL_NETLINK
)
1229 return -ENOPROTOOPT
;
1231 if (optlen
>= sizeof(int) &&
1232 get_user(val
, (unsigned int __user
*)optval
))
1236 case NETLINK_PKTINFO
:
1238 nlk
->flags
|= NETLINK_RECV_PKTINFO
;
1240 nlk
->flags
&= ~NETLINK_RECV_PKTINFO
;
1243 case NETLINK_ADD_MEMBERSHIP
:
1244 case NETLINK_DROP_MEMBERSHIP
: {
1245 if (!netlink_capable(sock
, NL_NONROOT_RECV
))
1247 err
= netlink_realloc_groups(sk
);
1250 if (!val
|| val
- 1 >= nlk
->ngroups
)
1252 netlink_table_grab();
1253 netlink_update_socket_mc(nlk
, val
,
1254 optname
== NETLINK_ADD_MEMBERSHIP
);
1255 netlink_table_ungrab();
1257 if (nlk
->netlink_bind
)
1258 nlk
->netlink_bind(val
);
1263 case NETLINK_BROADCAST_ERROR
:
1265 nlk
->flags
|= NETLINK_BROADCAST_SEND_ERROR
;
1267 nlk
->flags
&= ~NETLINK_BROADCAST_SEND_ERROR
;
1270 case NETLINK_NO_ENOBUFS
:
1272 nlk
->flags
|= NETLINK_RECV_NO_ENOBUFS
;
1273 clear_bit(0, &nlk
->state
);
1274 wake_up_interruptible(&nlk
->wait
);
1276 nlk
->flags
&= ~NETLINK_RECV_NO_ENOBUFS
;
1286 static int netlink_getsockopt(struct socket
*sock
, int level
, int optname
,
1287 char __user
*optval
, int __user
*optlen
)
1289 struct sock
*sk
= sock
->sk
;
1290 struct netlink_sock
*nlk
= nlk_sk(sk
);
1293 if (level
!= SOL_NETLINK
)
1294 return -ENOPROTOOPT
;
1296 if (get_user(len
, optlen
))
1302 case NETLINK_PKTINFO
:
1303 if (len
< sizeof(int))
1306 val
= nlk
->flags
& NETLINK_RECV_PKTINFO
? 1 : 0;
1307 if (put_user(len
, optlen
) ||
1308 put_user(val
, optval
))
1312 case NETLINK_BROADCAST_ERROR
:
1313 if (len
< sizeof(int))
1316 val
= nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
? 1 : 0;
1317 if (put_user(len
, optlen
) ||
1318 put_user(val
, optval
))
1322 case NETLINK_NO_ENOBUFS
:
1323 if (len
< sizeof(int))
1326 val
= nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
? 1 : 0;
1327 if (put_user(len
, optlen
) ||
1328 put_user(val
, optval
))
1338 static void netlink_cmsg_recv_pktinfo(struct msghdr
*msg
, struct sk_buff
*skb
)
1340 struct nl_pktinfo info
;
1342 info
.group
= NETLINK_CB(skb
).dst_group
;
1343 put_cmsg(msg
, SOL_NETLINK
, NETLINK_PKTINFO
, sizeof(info
), &info
);
1346 static int netlink_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1347 struct msghdr
*msg
, size_t len
)
1349 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1350 struct sock
*sk
= sock
->sk
;
1351 struct netlink_sock
*nlk
= nlk_sk(sk
);
1352 struct sockaddr_nl
*addr
= msg
->msg_name
;
1355 struct sk_buff
*skb
;
1357 struct scm_cookie scm
;
1359 if (msg
->msg_flags
&MSG_OOB
)
1362 if (NULL
== siocb
->scm
)
1365 err
= scm_send(sock
, msg
, siocb
->scm
, true);
1369 if (msg
->msg_namelen
) {
1371 if (addr
->nl_family
!= AF_NETLINK
)
1373 dst_pid
= addr
->nl_pid
;
1374 dst_group
= ffs(addr
->nl_groups
);
1376 if ((dst_group
|| dst_pid
) &&
1377 !netlink_capable(sock
, NL_NONROOT_SEND
))
1380 dst_pid
= nlk
->dst_pid
;
1381 dst_group
= nlk
->dst_group
;
1385 err
= netlink_autobind(sock
);
1391 if (len
> sk
->sk_sndbuf
- 32)
1394 skb
= alloc_skb(len
, GFP_KERNEL
);
1398 NETLINK_CB(skb
).pid
= nlk
->pid
;
1399 NETLINK_CB(skb
).dst_group
= dst_group
;
1400 memcpy(NETLINK_CREDS(skb
), &siocb
->scm
->creds
, sizeof(struct ucred
));
1403 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
1408 err
= security_netlink_send(sk
, skb
);
1415 atomic_inc(&skb
->users
);
1416 netlink_broadcast(sk
, skb
, dst_pid
, dst_group
, GFP_KERNEL
);
1418 err
= netlink_unicast(sk
, skb
, dst_pid
, msg
->msg_flags
&MSG_DONTWAIT
);
1421 scm_destroy(siocb
->scm
);
1425 static int netlink_recvmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1426 struct msghdr
*msg
, size_t len
,
1429 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1430 struct scm_cookie scm
;
1431 struct sock
*sk
= sock
->sk
;
1432 struct netlink_sock
*nlk
= nlk_sk(sk
);
1433 int noblock
= flags
&MSG_DONTWAIT
;
1435 struct sk_buff
*skb
, *data_skb
;
1443 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1449 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1450 if (unlikely(skb_shinfo(skb
)->frag_list
)) {
1452 * If this skb has a frag_list, then here that means that we
1453 * will have to use the frag_list skb's data for compat tasks
1454 * and the regular skb's data for normal (non-compat) tasks.
1456 * If we need to send the compat skb, assign it to the
1457 * 'data_skb' variable so that it will be used below for data
1458 * copying. We keep 'skb' for everything else, including
1459 * freeing both later.
1461 if (flags
& MSG_CMSG_COMPAT
)
1462 data_skb
= skb_shinfo(skb
)->frag_list
;
1466 msg
->msg_namelen
= 0;
1468 copied
= data_skb
->len
;
1470 msg
->msg_flags
|= MSG_TRUNC
;
1474 skb_reset_transport_header(data_skb
);
1475 err
= skb_copy_datagram_iovec(data_skb
, 0, msg
->msg_iov
, copied
);
1477 if (msg
->msg_name
) {
1478 struct sockaddr_nl
*addr
= (struct sockaddr_nl
*)msg
->msg_name
;
1479 addr
->nl_family
= AF_NETLINK
;
1481 addr
->nl_pid
= NETLINK_CB(skb
).pid
;
1482 addr
->nl_groups
= netlink_group_mask(NETLINK_CB(skb
).dst_group
);
1483 msg
->msg_namelen
= sizeof(*addr
);
1486 if (nlk
->flags
& NETLINK_RECV_PKTINFO
)
1487 netlink_cmsg_recv_pktinfo(msg
, skb
);
1489 if (NULL
== siocb
->scm
) {
1490 memset(&scm
, 0, sizeof(scm
));
1493 siocb
->scm
->creds
= *NETLINK_CREDS(skb
);
1494 if (flags
& MSG_TRUNC
)
1495 copied
= data_skb
->len
;
1497 skb_free_datagram(sk
, skb
);
1499 if (nlk
->cb
&& atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
/ 2) {
1500 ret
= netlink_dump(sk
);
1503 sk
->sk_error_report(sk
);
1507 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1509 netlink_rcv_wake(sk
);
1510 return err
? : copied
;
1513 static void netlink_data_ready(struct sock
*sk
, int len
)
1519 * We export these functions to other modules. They provide a
1520 * complete set of kernel non-blocking support for message
1525 netlink_kernel_create(struct net
*net
, int unit
,
1526 struct module
*module
,
1527 struct netlink_kernel_cfg
*cfg
)
1529 struct socket
*sock
;
1531 struct netlink_sock
*nlk
;
1532 struct listeners
*listeners
= NULL
;
1533 struct mutex
*cb_mutex
= cfg
? cfg
->cb_mutex
: NULL
;
1534 unsigned int groups
;
1538 if (unit
< 0 || unit
>= MAX_LINKS
)
1541 if (sock_create_lite(PF_NETLINK
, SOCK_DGRAM
, unit
, &sock
))
1545 * We have to just have a reference on the net from sk, but don't
1546 * get_net it. Besides, we cannot get and then put the net here.
1547 * So we create one inside init_net and the move it to net.
1550 if (__netlink_create(&init_net
, sock
, cb_mutex
, unit
) < 0)
1551 goto out_sock_release_nosk
;
1554 sk_change_net(sk
, net
);
1556 if (!cfg
|| cfg
->groups
< 32)
1559 groups
= cfg
->groups
;
1561 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
1563 goto out_sock_release
;
1565 sk
->sk_data_ready
= netlink_data_ready
;
1566 if (cfg
&& cfg
->input
)
1567 nlk_sk(sk
)->netlink_rcv
= cfg
->input
;
1569 if (netlink_insert(sk
, net
, 0))
1570 goto out_sock_release
;
1573 nlk
->flags
|= NETLINK_KERNEL_SOCKET
;
1575 netlink_table_grab();
1576 if (!nl_table
[unit
].registered
) {
1577 nl_table
[unit
].groups
= groups
;
1578 rcu_assign_pointer(nl_table
[unit
].listeners
, listeners
);
1579 nl_table
[unit
].cb_mutex
= cb_mutex
;
1580 nl_table
[unit
].module
= module
;
1581 nl_table
[unit
].bind
= cfg
? cfg
->bind
: NULL
;
1582 nl_table
[unit
].registered
= 1;
1585 nl_table
[unit
].registered
++;
1587 netlink_table_ungrab();
1592 netlink_kernel_release(sk
);
1595 out_sock_release_nosk
:
1599 EXPORT_SYMBOL(netlink_kernel_create
);
1603 netlink_kernel_release(struct sock
*sk
)
1605 sk_release_kernel(sk
);
1607 EXPORT_SYMBOL(netlink_kernel_release
);
1609 int __netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
1611 struct listeners
*new, *old
;
1612 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
1617 if (NLGRPSZ(tbl
->groups
) < NLGRPSZ(groups
)) {
1618 new = kzalloc(sizeof(*new) + NLGRPSZ(groups
), GFP_ATOMIC
);
1621 old
= rcu_dereference_protected(tbl
->listeners
, 1);
1622 memcpy(new->masks
, old
->masks
, NLGRPSZ(tbl
->groups
));
1623 rcu_assign_pointer(tbl
->listeners
, new);
1625 kfree_rcu(old
, rcu
);
1627 tbl
->groups
= groups
;
1633 * netlink_change_ngroups - change number of multicast groups
1635 * This changes the number of multicast groups that are available
1636 * on a certain netlink family. Note that it is not possible to
1637 * change the number of groups to below 32. Also note that it does
1638 * not implicitly call netlink_clear_multicast_users() when the
1639 * number of groups is reduced.
1641 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1642 * @groups: The new number of groups.
1644 int netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
1648 netlink_table_grab();
1649 err
= __netlink_change_ngroups(sk
, groups
);
1650 netlink_table_ungrab();
1655 void __netlink_clear_multicast_users(struct sock
*ksk
, unsigned int group
)
1658 struct hlist_node
*node
;
1659 struct netlink_table
*tbl
= &nl_table
[ksk
->sk_protocol
];
1661 sk_for_each_bound(sk
, node
, &tbl
->mc_list
)
1662 netlink_update_socket_mc(nlk_sk(sk
), group
, 0);
1666 * netlink_clear_multicast_users - kick off multicast listeners
1668 * This function removes all listeners from the given group.
1669 * @ksk: The kernel netlink socket, as returned by
1670 * netlink_kernel_create().
1671 * @group: The multicast group to clear.
1673 void netlink_clear_multicast_users(struct sock
*ksk
, unsigned int group
)
1675 netlink_table_grab();
1676 __netlink_clear_multicast_users(ksk
, group
);
1677 netlink_table_ungrab();
1680 void netlink_set_nonroot(int protocol
, unsigned int flags
)
1682 if ((unsigned int)protocol
< MAX_LINKS
)
1683 nl_table
[protocol
].nl_nonroot
= flags
;
1685 EXPORT_SYMBOL(netlink_set_nonroot
);
1688 __nlmsg_put(struct sk_buff
*skb
, u32 pid
, u32 seq
, int type
, int len
, int flags
)
1690 struct nlmsghdr
*nlh
;
1691 int size
= NLMSG_LENGTH(len
);
1693 nlh
= (struct nlmsghdr
*)skb_put(skb
, NLMSG_ALIGN(size
));
1694 nlh
->nlmsg_type
= type
;
1695 nlh
->nlmsg_len
= size
;
1696 nlh
->nlmsg_flags
= flags
;
1697 nlh
->nlmsg_pid
= pid
;
1698 nlh
->nlmsg_seq
= seq
;
1699 if (!__builtin_constant_p(size
) || NLMSG_ALIGN(size
) - size
!= 0)
1700 memset(NLMSG_DATA(nlh
) + len
, 0, NLMSG_ALIGN(size
) - size
);
1703 EXPORT_SYMBOL(__nlmsg_put
);
1706 * It looks a bit ugly.
1707 * It would be better to create kernel thread.
1710 static int netlink_dump(struct sock
*sk
)
1712 struct netlink_sock
*nlk
= nlk_sk(sk
);
1713 struct netlink_callback
*cb
;
1714 struct sk_buff
*skb
= NULL
;
1715 struct nlmsghdr
*nlh
;
1716 int len
, err
= -ENOBUFS
;
1719 mutex_lock(nlk
->cb_mutex
);
1727 alloc_size
= max_t(int, cb
->min_dump_alloc
, NLMSG_GOODSIZE
);
1729 skb
= sock_rmalloc(sk
, alloc_size
, 0, GFP_KERNEL
);
1733 len
= cb
->dump(skb
, cb
);
1736 mutex_unlock(nlk
->cb_mutex
);
1738 if (sk_filter(sk
, skb
))
1741 __netlink_sendskb(sk
, skb
);
1745 nlh
= nlmsg_put_answer(skb
, cb
, NLMSG_DONE
, sizeof(len
), NLM_F_MULTI
);
1749 nl_dump_check_consistent(cb
, nlh
);
1751 memcpy(nlmsg_data(nlh
), &len
, sizeof(len
));
1753 if (sk_filter(sk
, skb
))
1756 __netlink_sendskb(sk
, skb
);
1761 mutex_unlock(nlk
->cb_mutex
);
1763 netlink_consume_callback(cb
);
1767 mutex_unlock(nlk
->cb_mutex
);
1772 int netlink_dump_start(struct sock
*ssk
, struct sk_buff
*skb
,
1773 const struct nlmsghdr
*nlh
,
1774 struct netlink_dump_control
*control
)
1776 struct netlink_callback
*cb
;
1778 struct netlink_sock
*nlk
;
1781 cb
= kzalloc(sizeof(*cb
), GFP_KERNEL
);
1785 cb
->dump
= control
->dump
;
1786 cb
->done
= control
->done
;
1788 cb
->data
= control
->data
;
1789 cb
->min_dump_alloc
= control
->min_dump_alloc
;
1790 atomic_inc(&skb
->users
);
1793 sk
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, NETLINK_CB(skb
).pid
);
1795 netlink_destroy_callback(cb
);
1796 return -ECONNREFUSED
;
1799 /* A dump is in progress... */
1800 mutex_lock(nlk
->cb_mutex
);
1802 mutex_unlock(nlk
->cb_mutex
);
1803 netlink_destroy_callback(cb
);
1808 mutex_unlock(nlk
->cb_mutex
);
1810 ret
= netlink_dump(sk
);
1817 /* We successfully started a dump, by returning -EINTR we
1818 * signal not to send ACK even if it was requested.
1822 EXPORT_SYMBOL(netlink_dump_start
);
1824 void netlink_ack(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
, int err
)
1826 struct sk_buff
*skb
;
1827 struct nlmsghdr
*rep
;
1828 struct nlmsgerr
*errmsg
;
1829 size_t payload
= sizeof(*errmsg
);
1831 /* error messages get the original request appened */
1833 payload
+= nlmsg_len(nlh
);
1835 skb
= nlmsg_new(payload
, GFP_KERNEL
);
1839 sk
= netlink_lookup(sock_net(in_skb
->sk
),
1840 in_skb
->sk
->sk_protocol
,
1841 NETLINK_CB(in_skb
).pid
);
1843 sk
->sk_err
= ENOBUFS
;
1844 sk
->sk_error_report(sk
);
1850 rep
= __nlmsg_put(skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
1851 NLMSG_ERROR
, payload
, 0);
1852 errmsg
= nlmsg_data(rep
);
1853 errmsg
->error
= err
;
1854 memcpy(&errmsg
->msg
, nlh
, err
? nlh
->nlmsg_len
: sizeof(*nlh
));
1855 netlink_unicast(in_skb
->sk
, skb
, NETLINK_CB(in_skb
).pid
, MSG_DONTWAIT
);
1857 EXPORT_SYMBOL(netlink_ack
);
1859 int netlink_rcv_skb(struct sk_buff
*skb
, int (*cb
)(struct sk_buff
*,
1862 struct nlmsghdr
*nlh
;
1865 while (skb
->len
>= nlmsg_total_size(0)) {
1868 nlh
= nlmsg_hdr(skb
);
1871 if (nlh
->nlmsg_len
< NLMSG_HDRLEN
|| skb
->len
< nlh
->nlmsg_len
)
1874 /* Only requests are handled by the kernel */
1875 if (!(nlh
->nlmsg_flags
& NLM_F_REQUEST
))
1878 /* Skip control messages */
1879 if (nlh
->nlmsg_type
< NLMSG_MIN_TYPE
)
1887 if (nlh
->nlmsg_flags
& NLM_F_ACK
|| err
)
1888 netlink_ack(skb
, nlh
, err
);
1891 msglen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
1892 if (msglen
> skb
->len
)
1894 skb_pull(skb
, msglen
);
1899 EXPORT_SYMBOL(netlink_rcv_skb
);
1902 * nlmsg_notify - send a notification netlink message
1903 * @sk: netlink socket to use
1904 * @skb: notification message
1905 * @pid: destination netlink pid for reports or 0
1906 * @group: destination multicast group or 0
1907 * @report: 1 to report back, 0 to disable
1908 * @flags: allocation flags
1910 int nlmsg_notify(struct sock
*sk
, struct sk_buff
*skb
, u32 pid
,
1911 unsigned int group
, int report
, gfp_t flags
)
1916 int exclude_pid
= 0;
1919 atomic_inc(&skb
->users
);
1923 /* errors reported via destination sk->sk_err, but propagate
1924 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1925 err
= nlmsg_multicast(sk
, skb
, exclude_pid
, group
, flags
);
1931 err2
= nlmsg_unicast(sk
, skb
, pid
);
1932 if (!err
|| err
== -ESRCH
)
1938 EXPORT_SYMBOL(nlmsg_notify
);
1940 #ifdef CONFIG_PROC_FS
1941 struct nl_seq_iter
{
1942 struct seq_net_private p
;
1947 static struct sock
*netlink_seq_socket_idx(struct seq_file
*seq
, loff_t pos
)
1949 struct nl_seq_iter
*iter
= seq
->private;
1952 struct hlist_node
*node
;
1955 for (i
= 0; i
< MAX_LINKS
; i
++) {
1956 struct nl_pid_hash
*hash
= &nl_table
[i
].hash
;
1958 for (j
= 0; j
<= hash
->mask
; j
++) {
1959 sk_for_each(s
, node
, &hash
->table
[j
]) {
1960 if (sock_net(s
) != seq_file_net(seq
))
1974 static void *netlink_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1975 __acquires(nl_table_lock
)
1977 read_lock(&nl_table_lock
);
1978 return *pos
? netlink_seq_socket_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
1981 static void *netlink_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1984 struct nl_seq_iter
*iter
;
1989 if (v
== SEQ_START_TOKEN
)
1990 return netlink_seq_socket_idx(seq
, 0);
1992 iter
= seq
->private;
1996 } while (s
&& sock_net(s
) != seq_file_net(seq
));
2001 j
= iter
->hash_idx
+ 1;
2004 struct nl_pid_hash
*hash
= &nl_table
[i
].hash
;
2006 for (; j
<= hash
->mask
; j
++) {
2007 s
= sk_head(&hash
->table
[j
]);
2008 while (s
&& sock_net(s
) != seq_file_net(seq
))
2018 } while (++i
< MAX_LINKS
);
2023 static void netlink_seq_stop(struct seq_file
*seq
, void *v
)
2024 __releases(nl_table_lock
)
2026 read_unlock(&nl_table_lock
);
2030 static int netlink_seq_show(struct seq_file
*seq
, void *v
)
2032 if (v
== SEQ_START_TOKEN
) {
2034 "sk Eth Pid Groups "
2035 "Rmem Wmem Dump Locks Drops Inode\n");
2038 struct netlink_sock
*nlk
= nlk_sk(s
);
2040 seq_printf(seq
, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2044 nlk
->groups
? (u32
)nlk
->groups
[0] : 0,
2045 sk_rmem_alloc_get(s
),
2046 sk_wmem_alloc_get(s
),
2048 atomic_read(&s
->sk_refcnt
),
2049 atomic_read(&s
->sk_drops
),
2057 static const struct seq_operations netlink_seq_ops
= {
2058 .start
= netlink_seq_start
,
2059 .next
= netlink_seq_next
,
2060 .stop
= netlink_seq_stop
,
2061 .show
= netlink_seq_show
,
2065 static int netlink_seq_open(struct inode
*inode
, struct file
*file
)
2067 return seq_open_net(inode
, file
, &netlink_seq_ops
,
2068 sizeof(struct nl_seq_iter
));
2071 static const struct file_operations netlink_seq_fops
= {
2072 .owner
= THIS_MODULE
,
2073 .open
= netlink_seq_open
,
2075 .llseek
= seq_lseek
,
2076 .release
= seq_release_net
,
2081 int netlink_register_notifier(struct notifier_block
*nb
)
2083 return atomic_notifier_chain_register(&netlink_chain
, nb
);
2085 EXPORT_SYMBOL(netlink_register_notifier
);
2087 int netlink_unregister_notifier(struct notifier_block
*nb
)
2089 return atomic_notifier_chain_unregister(&netlink_chain
, nb
);
2091 EXPORT_SYMBOL(netlink_unregister_notifier
);
2093 static const struct proto_ops netlink_ops
= {
2094 .family
= PF_NETLINK
,
2095 .owner
= THIS_MODULE
,
2096 .release
= netlink_release
,
2097 .bind
= netlink_bind
,
2098 .connect
= netlink_connect
,
2099 .socketpair
= sock_no_socketpair
,
2100 .accept
= sock_no_accept
,
2101 .getname
= netlink_getname
,
2102 .poll
= datagram_poll
,
2103 .ioctl
= sock_no_ioctl
,
2104 .listen
= sock_no_listen
,
2105 .shutdown
= sock_no_shutdown
,
2106 .setsockopt
= netlink_setsockopt
,
2107 .getsockopt
= netlink_getsockopt
,
2108 .sendmsg
= netlink_sendmsg
,
2109 .recvmsg
= netlink_recvmsg
,
2110 .mmap
= sock_no_mmap
,
2111 .sendpage
= sock_no_sendpage
,
2114 static const struct net_proto_family netlink_family_ops
= {
2115 .family
= PF_NETLINK
,
2116 .create
= netlink_create
,
2117 .owner
= THIS_MODULE
, /* for consistency 8) */
2120 static int __net_init
netlink_net_init(struct net
*net
)
2122 #ifdef CONFIG_PROC_FS
2123 if (!proc_net_fops_create(net
, "netlink", 0, &netlink_seq_fops
))
2129 static void __net_exit
netlink_net_exit(struct net
*net
)
2131 #ifdef CONFIG_PROC_FS
2132 proc_net_remove(net
, "netlink");
2136 static void __init
netlink_add_usersock_entry(void)
2138 struct listeners
*listeners
;
2141 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
2143 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2145 netlink_table_grab();
2147 nl_table
[NETLINK_USERSOCK
].groups
= groups
;
2148 rcu_assign_pointer(nl_table
[NETLINK_USERSOCK
].listeners
, listeners
);
2149 nl_table
[NETLINK_USERSOCK
].module
= THIS_MODULE
;
2150 nl_table
[NETLINK_USERSOCK
].registered
= 1;
2151 nl_table
[NETLINK_USERSOCK
].nl_nonroot
= NL_NONROOT_SEND
;
2153 netlink_table_ungrab();
2156 static struct pernet_operations __net_initdata netlink_net_ops
= {
2157 .init
= netlink_net_init
,
2158 .exit
= netlink_net_exit
,
2161 static int __init
netlink_proto_init(void)
2163 struct sk_buff
*dummy_skb
;
2165 unsigned long limit
;
2167 int err
= proto_register(&netlink_proto
, 0);
2172 BUILD_BUG_ON(sizeof(struct netlink_skb_parms
) > sizeof(dummy_skb
->cb
));
2174 nl_table
= kcalloc(MAX_LINKS
, sizeof(*nl_table
), GFP_KERNEL
);
2178 if (totalram_pages
>= (128 * 1024))
2179 limit
= totalram_pages
>> (21 - PAGE_SHIFT
);
2181 limit
= totalram_pages
>> (23 - PAGE_SHIFT
);
2183 order
= get_bitmask_order(limit
) - 1 + PAGE_SHIFT
;
2184 limit
= (1UL << order
) / sizeof(struct hlist_head
);
2185 order
= get_bitmask_order(min(limit
, (unsigned long)UINT_MAX
)) - 1;
2187 for (i
= 0; i
< MAX_LINKS
; i
++) {
2188 struct nl_pid_hash
*hash
= &nl_table
[i
].hash
;
2190 hash
->table
= nl_pid_hash_zalloc(1 * sizeof(*hash
->table
));
2193 nl_pid_hash_free(nl_table
[i
].hash
.table
,
2194 1 * sizeof(*hash
->table
));
2198 hash
->max_shift
= order
;
2201 hash
->rehash_time
= jiffies
;
2204 netlink_add_usersock_entry();
2206 sock_register(&netlink_family_ops
);
2207 register_pernet_subsys(&netlink_net_ops
);
2208 /* The netlink device handler may be needed early. */
2213 panic("netlink_init: Cannot allocate nl_table\n");
2216 core_initcall(netlink_proto_init
);