2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/selinux.h>
58 #include <linux/mutex.h>
60 #include <net/net_namespace.h>
63 #include <net/netlink.h>
65 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
66 #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
69 /* struct sock has to be the first member of netlink_sock */
77 unsigned long *groups
;
79 wait_queue_head_t wait
;
80 struct netlink_callback
*cb
;
81 struct mutex
*cb_mutex
;
82 struct mutex cb_def_mutex
;
83 void (*netlink_rcv
)(struct sk_buff
*skb
);
84 struct module
*module
;
87 #define NETLINK_KERNEL_SOCKET 0x1
88 #define NETLINK_RECV_PKTINFO 0x2
90 static inline struct netlink_sock
*nlk_sk(struct sock
*sk
)
92 return container_of(sk
, struct netlink_sock
, sk
);
95 static inline int netlink_is_kernel(struct sock
*sk
)
97 return nlk_sk(sk
)->flags
& NETLINK_KERNEL_SOCKET
;
101 struct hlist_head
*table
;
102 unsigned long rehash_time
;
107 unsigned int entries
;
108 unsigned int max_shift
;
113 struct netlink_table
{
114 struct nl_pid_hash hash
;
115 struct hlist_head mc_list
;
116 unsigned long *listeners
;
117 unsigned int nl_nonroot
;
119 struct mutex
*cb_mutex
;
120 struct module
*module
;
124 static struct netlink_table
*nl_table
;
126 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait
);
128 static int netlink_dump(struct sock
*sk
);
129 static void netlink_destroy_callback(struct netlink_callback
*cb
);
131 static DEFINE_RWLOCK(nl_table_lock
);
132 static atomic_t nl_table_users
= ATOMIC_INIT(0);
134 static ATOMIC_NOTIFIER_HEAD(netlink_chain
);
136 static u32
netlink_group_mask(u32 group
)
138 return group
? 1 << (group
- 1) : 0;
141 static struct hlist_head
*nl_pid_hashfn(struct nl_pid_hash
*hash
, u32 pid
)
143 return &hash
->table
[jhash_1word(pid
, hash
->rnd
) & hash
->mask
];
146 static void netlink_sock_destruct(struct sock
*sk
)
148 struct netlink_sock
*nlk
= nlk_sk(sk
);
152 nlk
->cb
->done(nlk
->cb
);
153 netlink_destroy_callback(nlk
->cb
);
156 skb_queue_purge(&sk
->sk_receive_queue
);
158 if (!sock_flag(sk
, SOCK_DEAD
)) {
159 printk("Freeing alive netlink socket %p\n", sk
);
162 BUG_TRAP(!atomic_read(&sk
->sk_rmem_alloc
));
163 BUG_TRAP(!atomic_read(&sk
->sk_wmem_alloc
));
164 BUG_TRAP(!nlk_sk(sk
)->groups
);
167 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
168 * Look, when several writers sleep and reader wakes them up, all but one
169 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
170 * this, _but_ remember, it adds useless work on UP machines.
173 static void netlink_table_grab(void)
175 write_lock_irq(&nl_table_lock
);
177 if (atomic_read(&nl_table_users
)) {
178 DECLARE_WAITQUEUE(wait
, current
);
180 add_wait_queue_exclusive(&nl_table_wait
, &wait
);
182 set_current_state(TASK_UNINTERRUPTIBLE
);
183 if (atomic_read(&nl_table_users
) == 0)
185 write_unlock_irq(&nl_table_lock
);
187 write_lock_irq(&nl_table_lock
);
190 __set_current_state(TASK_RUNNING
);
191 remove_wait_queue(&nl_table_wait
, &wait
);
195 static __inline__
void netlink_table_ungrab(void)
197 write_unlock_irq(&nl_table_lock
);
198 wake_up(&nl_table_wait
);
201 static __inline__
void
202 netlink_lock_table(void)
204 /* read_lock() synchronizes us to netlink_table_grab */
206 read_lock(&nl_table_lock
);
207 atomic_inc(&nl_table_users
);
208 read_unlock(&nl_table_lock
);
211 static __inline__
void
212 netlink_unlock_table(void)
214 if (atomic_dec_and_test(&nl_table_users
))
215 wake_up(&nl_table_wait
);
218 static __inline__
struct sock
*netlink_lookup(struct net
*net
, int protocol
, u32 pid
)
220 struct nl_pid_hash
*hash
= &nl_table
[protocol
].hash
;
221 struct hlist_head
*head
;
223 struct hlist_node
*node
;
225 read_lock(&nl_table_lock
);
226 head
= nl_pid_hashfn(hash
, pid
);
227 sk_for_each(sk
, node
, head
) {
228 if ((sk
->sk_net
== net
) && (nlk_sk(sk
)->pid
== pid
)) {
235 read_unlock(&nl_table_lock
);
239 static inline struct hlist_head
*nl_pid_hash_alloc(size_t size
)
241 if (size
<= PAGE_SIZE
)
242 return kmalloc(size
, GFP_ATOMIC
);
244 return (struct hlist_head
*)
245 __get_free_pages(GFP_ATOMIC
, get_order(size
));
248 static inline void nl_pid_hash_free(struct hlist_head
*table
, size_t size
)
250 if (size
<= PAGE_SIZE
)
253 free_pages((unsigned long)table
, get_order(size
));
256 static int nl_pid_hash_rehash(struct nl_pid_hash
*hash
, int grow
)
258 unsigned int omask
, mask
, shift
;
260 struct hlist_head
*otable
, *table
;
263 omask
= mask
= hash
->mask
;
264 osize
= size
= (mask
+ 1) * sizeof(*table
);
268 if (++shift
> hash
->max_shift
)
274 table
= nl_pid_hash_alloc(size
);
278 memset(table
, 0, size
);
279 otable
= hash
->table
;
283 get_random_bytes(&hash
->rnd
, sizeof(hash
->rnd
));
285 for (i
= 0; i
<= omask
; i
++) {
287 struct hlist_node
*node
, *tmp
;
289 sk_for_each_safe(sk
, node
, tmp
, &otable
[i
])
290 __sk_add_node(sk
, nl_pid_hashfn(hash
, nlk_sk(sk
)->pid
));
293 nl_pid_hash_free(otable
, osize
);
294 hash
->rehash_time
= jiffies
+ 10 * 60 * HZ
;
298 static inline int nl_pid_hash_dilute(struct nl_pid_hash
*hash
, int len
)
300 int avg
= hash
->entries
>> hash
->shift
;
302 if (unlikely(avg
> 1) && nl_pid_hash_rehash(hash
, 1))
305 if (unlikely(len
> avg
) && time_after(jiffies
, hash
->rehash_time
)) {
306 nl_pid_hash_rehash(hash
, 0);
313 static const struct proto_ops netlink_ops
;
316 netlink_update_listeners(struct sock
*sk
)
318 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
319 struct hlist_node
*node
;
323 for (i
= 0; i
< NLGRPLONGS(tbl
->groups
); i
++) {
325 sk_for_each_bound(sk
, node
, &tbl
->mc_list
) {
326 if (i
< NLGRPLONGS(nlk_sk(sk
)->ngroups
))
327 mask
|= nlk_sk(sk
)->groups
[i
];
329 tbl
->listeners
[i
] = mask
;
331 /* this function is only called with the netlink table "grabbed", which
332 * makes sure updates are visible before bind or setsockopt return. */
335 static int netlink_insert(struct sock
*sk
, struct net
*net
, u32 pid
)
337 struct nl_pid_hash
*hash
= &nl_table
[sk
->sk_protocol
].hash
;
338 struct hlist_head
*head
;
339 int err
= -EADDRINUSE
;
341 struct hlist_node
*node
;
344 netlink_table_grab();
345 head
= nl_pid_hashfn(hash
, pid
);
347 sk_for_each(osk
, node
, head
) {
348 if ((osk
->sk_net
== net
) && (nlk_sk(osk
)->pid
== pid
))
360 if (BITS_PER_LONG
> 32 && unlikely(hash
->entries
>= UINT_MAX
))
363 if (len
&& nl_pid_hash_dilute(hash
, len
))
364 head
= nl_pid_hashfn(hash
, pid
);
366 nlk_sk(sk
)->pid
= pid
;
367 sk_add_node(sk
, head
);
371 netlink_table_ungrab();
375 static void netlink_remove(struct sock
*sk
)
377 netlink_table_grab();
378 if (sk_del_node_init(sk
))
379 nl_table
[sk
->sk_protocol
].hash
.entries
--;
380 if (nlk_sk(sk
)->subscriptions
)
381 __sk_del_bind_node(sk
);
382 netlink_table_ungrab();
385 static struct proto netlink_proto
= {
387 .owner
= THIS_MODULE
,
388 .obj_size
= sizeof(struct netlink_sock
),
391 static int __netlink_create(struct net
*net
, struct socket
*sock
,
392 struct mutex
*cb_mutex
, int protocol
)
395 struct netlink_sock
*nlk
;
397 sock
->ops
= &netlink_ops
;
399 sk
= sk_alloc(net
, PF_NETLINK
, GFP_KERNEL
, &netlink_proto
);
403 sock_init_data(sock
, sk
);
407 nlk
->cb_mutex
= cb_mutex
;
409 nlk
->cb_mutex
= &nlk
->cb_def_mutex
;
410 mutex_init(nlk
->cb_mutex
);
412 init_waitqueue_head(&nlk
->wait
);
414 sk
->sk_destruct
= netlink_sock_destruct
;
415 sk
->sk_protocol
= protocol
;
419 static int netlink_create(struct net
*net
, struct socket
*sock
, int protocol
)
421 struct module
*module
= NULL
;
422 struct mutex
*cb_mutex
;
423 struct netlink_sock
*nlk
;
426 sock
->state
= SS_UNCONNECTED
;
428 if (sock
->type
!= SOCK_RAW
&& sock
->type
!= SOCK_DGRAM
)
429 return -ESOCKTNOSUPPORT
;
431 if (protocol
<0 || protocol
>= MAX_LINKS
)
432 return -EPROTONOSUPPORT
;
434 netlink_lock_table();
436 if (!nl_table
[protocol
].registered
) {
437 netlink_unlock_table();
438 request_module("net-pf-%d-proto-%d", PF_NETLINK
, protocol
);
439 netlink_lock_table();
442 if (nl_table
[protocol
].registered
&&
443 try_module_get(nl_table
[protocol
].module
))
444 module
= nl_table
[protocol
].module
;
445 cb_mutex
= nl_table
[protocol
].cb_mutex
;
446 netlink_unlock_table();
448 if ((err
= __netlink_create(net
, sock
, cb_mutex
, protocol
)) < 0)
451 nlk
= nlk_sk(sock
->sk
);
452 nlk
->module
= module
;
461 static int netlink_release(struct socket
*sock
)
463 struct sock
*sk
= sock
->sk
;
464 struct netlink_sock
*nlk
;
474 * OK. Socket is unlinked, any packets that arrive now
479 wake_up_interruptible_all(&nlk
->wait
);
481 skb_queue_purge(&sk
->sk_write_queue
);
483 if (nlk
->pid
&& !nlk
->subscriptions
) {
484 struct netlink_notify n
= {
486 .protocol
= sk
->sk_protocol
,
489 atomic_notifier_call_chain(&netlink_chain
,
490 NETLINK_URELEASE
, &n
);
493 module_put(nlk
->module
);
495 netlink_table_grab();
496 if (netlink_is_kernel(sk
)) {
497 kfree(nl_table
[sk
->sk_protocol
].listeners
);
498 nl_table
[sk
->sk_protocol
].module
= NULL
;
499 nl_table
[sk
->sk_protocol
].registered
= 0;
500 } else if (nlk
->subscriptions
)
501 netlink_update_listeners(sk
);
502 netlink_table_ungrab();
511 static int netlink_autobind(struct socket
*sock
)
513 struct sock
*sk
= sock
->sk
;
514 struct net
*net
= sk
->sk_net
;
515 struct nl_pid_hash
*hash
= &nl_table
[sk
->sk_protocol
].hash
;
516 struct hlist_head
*head
;
518 struct hlist_node
*node
;
519 s32 pid
= current
->tgid
;
521 static s32 rover
= -4097;
525 netlink_table_grab();
526 head
= nl_pid_hashfn(hash
, pid
);
527 sk_for_each(osk
, node
, head
) {
528 if ((osk
->sk_net
!= net
))
530 if (nlk_sk(osk
)->pid
== pid
) {
531 /* Bind collision, search negative pid values. */
535 netlink_table_ungrab();
539 netlink_table_ungrab();
541 err
= netlink_insert(sk
, net
, pid
);
542 if (err
== -EADDRINUSE
)
545 /* If 2 threads race to autobind, that is fine. */
552 static inline int netlink_capable(struct socket
*sock
, unsigned int flag
)
554 return (nl_table
[sock
->sk
->sk_protocol
].nl_nonroot
& flag
) ||
555 capable(CAP_NET_ADMIN
);
559 netlink_update_subscriptions(struct sock
*sk
, unsigned int subscriptions
)
561 struct netlink_sock
*nlk
= nlk_sk(sk
);
563 if (nlk
->subscriptions
&& !subscriptions
)
564 __sk_del_bind_node(sk
);
565 else if (!nlk
->subscriptions
&& subscriptions
)
566 sk_add_bind_node(sk
, &nl_table
[sk
->sk_protocol
].mc_list
);
567 nlk
->subscriptions
= subscriptions
;
570 static int netlink_realloc_groups(struct sock
*sk
)
572 struct netlink_sock
*nlk
= nlk_sk(sk
);
574 unsigned long *new_groups
;
577 netlink_table_grab();
579 groups
= nl_table
[sk
->sk_protocol
].groups
;
580 if (!nl_table
[sk
->sk_protocol
].registered
) {
585 if (nlk
->ngroups
>= groups
)
588 new_groups
= krealloc(nlk
->groups
, NLGRPSZ(groups
), GFP_ATOMIC
);
589 if (new_groups
== NULL
) {
593 memset((char*)new_groups
+ NLGRPSZ(nlk
->ngroups
), 0,
594 NLGRPSZ(groups
) - NLGRPSZ(nlk
->ngroups
));
596 nlk
->groups
= new_groups
;
597 nlk
->ngroups
= groups
;
599 netlink_table_ungrab();
603 static int netlink_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
605 struct sock
*sk
= sock
->sk
;
606 struct net
*net
= sk
->sk_net
;
607 struct netlink_sock
*nlk
= nlk_sk(sk
);
608 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
611 if (nladdr
->nl_family
!= AF_NETLINK
)
614 /* Only superuser is allowed to listen multicasts */
615 if (nladdr
->nl_groups
) {
616 if (!netlink_capable(sock
, NL_NONROOT_RECV
))
618 err
= netlink_realloc_groups(sk
);
624 if (nladdr
->nl_pid
!= nlk
->pid
)
627 err
= nladdr
->nl_pid
?
628 netlink_insert(sk
, net
, nladdr
->nl_pid
) :
629 netlink_autobind(sock
);
634 if (!nladdr
->nl_groups
&& (nlk
->groups
== NULL
|| !(u32
)nlk
->groups
[0]))
637 netlink_table_grab();
638 netlink_update_subscriptions(sk
, nlk
->subscriptions
+
639 hweight32(nladdr
->nl_groups
) -
640 hweight32(nlk
->groups
[0]));
641 nlk
->groups
[0] = (nlk
->groups
[0] & ~0xffffffffUL
) | nladdr
->nl_groups
;
642 netlink_update_listeners(sk
);
643 netlink_table_ungrab();
648 static int netlink_connect(struct socket
*sock
, struct sockaddr
*addr
,
652 struct sock
*sk
= sock
->sk
;
653 struct netlink_sock
*nlk
= nlk_sk(sk
);
654 struct sockaddr_nl
*nladdr
=(struct sockaddr_nl
*)addr
;
656 if (addr
->sa_family
== AF_UNSPEC
) {
657 sk
->sk_state
= NETLINK_UNCONNECTED
;
662 if (addr
->sa_family
!= AF_NETLINK
)
665 /* Only superuser is allowed to send multicasts */
666 if (nladdr
->nl_groups
&& !netlink_capable(sock
, NL_NONROOT_SEND
))
670 err
= netlink_autobind(sock
);
673 sk
->sk_state
= NETLINK_CONNECTED
;
674 nlk
->dst_pid
= nladdr
->nl_pid
;
675 nlk
->dst_group
= ffs(nladdr
->nl_groups
);
681 static int netlink_getname(struct socket
*sock
, struct sockaddr
*addr
, int *addr_len
, int peer
)
683 struct sock
*sk
= sock
->sk
;
684 struct netlink_sock
*nlk
= nlk_sk(sk
);
685 struct sockaddr_nl
*nladdr
=(struct sockaddr_nl
*)addr
;
687 nladdr
->nl_family
= AF_NETLINK
;
689 *addr_len
= sizeof(*nladdr
);
692 nladdr
->nl_pid
= nlk
->dst_pid
;
693 nladdr
->nl_groups
= netlink_group_mask(nlk
->dst_group
);
695 nladdr
->nl_pid
= nlk
->pid
;
696 nladdr
->nl_groups
= nlk
->groups
? nlk
->groups
[0] : 0;
701 static void netlink_overrun(struct sock
*sk
)
703 if (!test_and_set_bit(0, &nlk_sk(sk
)->state
)) {
704 sk
->sk_err
= ENOBUFS
;
705 sk
->sk_error_report(sk
);
709 static struct sock
*netlink_getsockbypid(struct sock
*ssk
, u32 pid
)
712 struct netlink_sock
*nlk
;
714 sock
= netlink_lookup(ssk
->sk_net
, ssk
->sk_protocol
, pid
);
716 return ERR_PTR(-ECONNREFUSED
);
718 /* Don't bother queuing skb if kernel socket has no input function */
720 if (sock
->sk_state
== NETLINK_CONNECTED
&&
721 nlk
->dst_pid
!= nlk_sk(ssk
)->pid
) {
723 return ERR_PTR(-ECONNREFUSED
);
728 struct sock
*netlink_getsockbyfilp(struct file
*filp
)
730 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
733 if (!S_ISSOCK(inode
->i_mode
))
734 return ERR_PTR(-ENOTSOCK
);
736 sock
= SOCKET_I(inode
)->sk
;
737 if (sock
->sk_family
!= AF_NETLINK
)
738 return ERR_PTR(-EINVAL
);
745 * Attach a skb to a netlink socket.
746 * The caller must hold a reference to the destination socket. On error, the
747 * reference is dropped. The skb is not send to the destination, just all
748 * all error checks are performed and memory in the queue is reserved.
750 * < 0: error. skb freed, reference to sock dropped.
752 * 1: repeat lookup - reference dropped while waiting for socket memory.
754 int netlink_attachskb(struct sock
*sk
, struct sk_buff
*skb
, int nonblock
,
755 long *timeo
, struct sock
*ssk
)
757 struct netlink_sock
*nlk
;
761 if (atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
762 test_bit(0, &nlk
->state
)) {
763 DECLARE_WAITQUEUE(wait
, current
);
765 if (!ssk
|| netlink_is_kernel(ssk
))
772 __set_current_state(TASK_INTERRUPTIBLE
);
773 add_wait_queue(&nlk
->wait
, &wait
);
775 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
776 test_bit(0, &nlk
->state
)) &&
777 !sock_flag(sk
, SOCK_DEAD
))
778 *timeo
= schedule_timeout(*timeo
);
780 __set_current_state(TASK_RUNNING
);
781 remove_wait_queue(&nlk
->wait
, &wait
);
784 if (signal_pending(current
)) {
786 return sock_intr_errno(*timeo
);
790 skb_set_owner_r(skb
, sk
);
794 int netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
798 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
799 sk
->sk_data_ready(sk
, len
);
804 void netlink_detachskb(struct sock
*sk
, struct sk_buff
*skb
)
810 static inline struct sk_buff
*netlink_trim(struct sk_buff
*skb
,
817 delta
= skb
->end
- skb
->tail
;
818 if (delta
* 2 < skb
->truesize
)
821 if (skb_shared(skb
)) {
822 struct sk_buff
*nskb
= skb_clone(skb
, allocation
);
829 if (!pskb_expand_head(skb
, 0, -delta
, allocation
))
830 skb
->truesize
-= delta
;
835 static inline void netlink_rcv_wake(struct sock
*sk
)
837 struct netlink_sock
*nlk
= nlk_sk(sk
);
839 if (skb_queue_empty(&sk
->sk_receive_queue
))
840 clear_bit(0, &nlk
->state
);
841 if (!test_bit(0, &nlk
->state
))
842 wake_up_interruptible(&nlk
->wait
);
845 static inline int netlink_unicast_kernel(struct sock
*sk
, struct sk_buff
*skb
)
848 struct netlink_sock
*nlk
= nlk_sk(sk
);
851 if (nlk
->netlink_rcv
!= NULL
) {
853 skb_set_owner_r(skb
, sk
);
854 nlk
->netlink_rcv(skb
);
861 int netlink_unicast(struct sock
*ssk
, struct sk_buff
*skb
,
862 u32 pid
, int nonblock
)
868 skb
= netlink_trim(skb
, gfp_any());
870 timeo
= sock_sndtimeo(ssk
, nonblock
);
872 sk
= netlink_getsockbypid(ssk
, pid
);
877 if (netlink_is_kernel(sk
))
878 return netlink_unicast_kernel(sk
, skb
);
880 err
= netlink_attachskb(sk
, skb
, nonblock
, &timeo
, ssk
);
886 return netlink_sendskb(sk
, skb
);
889 int netlink_has_listeners(struct sock
*sk
, unsigned int group
)
892 unsigned long *listeners
;
894 BUG_ON(!netlink_is_kernel(sk
));
897 listeners
= rcu_dereference(nl_table
[sk
->sk_protocol
].listeners
);
899 if (group
- 1 < nl_table
[sk
->sk_protocol
].groups
)
900 res
= test_bit(group
- 1, listeners
);
906 EXPORT_SYMBOL_GPL(netlink_has_listeners
);
908 static __inline__
int netlink_broadcast_deliver(struct sock
*sk
, struct sk_buff
*skb
)
910 struct netlink_sock
*nlk
= nlk_sk(sk
);
912 if (atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
&&
913 !test_bit(0, &nlk
->state
)) {
914 skb_set_owner_r(skb
, sk
);
915 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
916 sk
->sk_data_ready(sk
, skb
->len
);
917 return atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
;
922 struct netlink_broadcast_data
{
923 struct sock
*exclude_sk
;
931 struct sk_buff
*skb
, *skb2
;
934 static inline int do_one_broadcast(struct sock
*sk
,
935 struct netlink_broadcast_data
*p
)
937 struct netlink_sock
*nlk
= nlk_sk(sk
);
940 if (p
->exclude_sk
== sk
)
943 if (nlk
->pid
== p
->pid
|| p
->group
- 1 >= nlk
->ngroups
||
944 !test_bit(p
->group
- 1, nlk
->groups
))
947 if ((sk
->sk_net
!= p
->net
))
956 if (p
->skb2
== NULL
) {
957 if (skb_shared(p
->skb
)) {
958 p
->skb2
= skb_clone(p
->skb
, p
->allocation
);
960 p
->skb2
= skb_get(p
->skb
);
962 * skb ownership may have been set when
963 * delivered to a previous socket.
968 if (p
->skb2
== NULL
) {
970 /* Clone failed. Notify ALL listeners. */
972 } else if ((val
= netlink_broadcast_deliver(sk
, p
->skb2
)) < 0) {
985 int netlink_broadcast(struct sock
*ssk
, struct sk_buff
*skb
, u32 pid
,
986 u32 group
, gfp_t allocation
)
988 struct net
*net
= ssk
->sk_net
;
989 struct netlink_broadcast_data info
;
990 struct hlist_node
*node
;
993 skb
= netlink_trim(skb
, allocation
);
995 info
.exclude_sk
= ssk
;
1002 info
.allocation
= allocation
;
1006 /* While we sleep in clone, do not allow to change socket list */
1008 netlink_lock_table();
1010 sk_for_each_bound(sk
, node
, &nl_table
[ssk
->sk_protocol
].mc_list
)
1011 do_one_broadcast(sk
, &info
);
1015 netlink_unlock_table();
1018 kfree_skb(info
.skb2
);
1020 if (info
.delivered
) {
1021 if (info
.congested
&& (allocation
& __GFP_WAIT
))
1030 struct netlink_set_err_data
{
1031 struct sock
*exclude_sk
;
1037 static inline int do_one_set_err(struct sock
*sk
,
1038 struct netlink_set_err_data
*p
)
1040 struct netlink_sock
*nlk
= nlk_sk(sk
);
1042 if (sk
== p
->exclude_sk
)
1045 if (sk
->sk_net
!= p
->exclude_sk
->sk_net
)
1048 if (nlk
->pid
== p
->pid
|| p
->group
- 1 >= nlk
->ngroups
||
1049 !test_bit(p
->group
- 1, nlk
->groups
))
1052 sk
->sk_err
= p
->code
;
1053 sk
->sk_error_report(sk
);
1058 void netlink_set_err(struct sock
*ssk
, u32 pid
, u32 group
, int code
)
1060 struct netlink_set_err_data info
;
1061 struct hlist_node
*node
;
1064 info
.exclude_sk
= ssk
;
1069 read_lock(&nl_table_lock
);
1071 sk_for_each_bound(sk
, node
, &nl_table
[ssk
->sk_protocol
].mc_list
)
1072 do_one_set_err(sk
, &info
);
1074 read_unlock(&nl_table_lock
);
1077 /* must be called with netlink table grabbed */
1078 static void netlink_update_socket_mc(struct netlink_sock
*nlk
,
1082 int old
, new = !!is_new
, subscriptions
;
1084 old
= test_bit(group
- 1, nlk
->groups
);
1085 subscriptions
= nlk
->subscriptions
- old
+ new;
1087 __set_bit(group
- 1, nlk
->groups
);
1089 __clear_bit(group
- 1, nlk
->groups
);
1090 netlink_update_subscriptions(&nlk
->sk
, subscriptions
);
1091 netlink_update_listeners(&nlk
->sk
);
1094 static int netlink_setsockopt(struct socket
*sock
, int level
, int optname
,
1095 char __user
*optval
, int optlen
)
1097 struct sock
*sk
= sock
->sk
;
1098 struct netlink_sock
*nlk
= nlk_sk(sk
);
1099 unsigned int val
= 0;
1102 if (level
!= SOL_NETLINK
)
1103 return -ENOPROTOOPT
;
1105 if (optlen
>= sizeof(int) &&
1106 get_user(val
, (unsigned int __user
*)optval
))
1110 case NETLINK_PKTINFO
:
1112 nlk
->flags
|= NETLINK_RECV_PKTINFO
;
1114 nlk
->flags
&= ~NETLINK_RECV_PKTINFO
;
1117 case NETLINK_ADD_MEMBERSHIP
:
1118 case NETLINK_DROP_MEMBERSHIP
: {
1119 if (!netlink_capable(sock
, NL_NONROOT_RECV
))
1121 err
= netlink_realloc_groups(sk
);
1124 if (!val
|| val
- 1 >= nlk
->ngroups
)
1126 netlink_table_grab();
1127 netlink_update_socket_mc(nlk
, val
,
1128 optname
== NETLINK_ADD_MEMBERSHIP
);
1129 netlink_table_ungrab();
1139 static int netlink_getsockopt(struct socket
*sock
, int level
, int optname
,
1140 char __user
*optval
, int __user
*optlen
)
1142 struct sock
*sk
= sock
->sk
;
1143 struct netlink_sock
*nlk
= nlk_sk(sk
);
1146 if (level
!= SOL_NETLINK
)
1147 return -ENOPROTOOPT
;
1149 if (get_user(len
, optlen
))
1155 case NETLINK_PKTINFO
:
1156 if (len
< sizeof(int))
1159 val
= nlk
->flags
& NETLINK_RECV_PKTINFO
? 1 : 0;
1160 if (put_user(len
, optlen
) ||
1161 put_user(val
, optval
))
1171 static void netlink_cmsg_recv_pktinfo(struct msghdr
*msg
, struct sk_buff
*skb
)
1173 struct nl_pktinfo info
;
1175 info
.group
= NETLINK_CB(skb
).dst_group
;
1176 put_cmsg(msg
, SOL_NETLINK
, NETLINK_PKTINFO
, sizeof(info
), &info
);
1179 static int netlink_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1180 struct msghdr
*msg
, size_t len
)
1182 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1183 struct sock
*sk
= sock
->sk
;
1184 struct netlink_sock
*nlk
= nlk_sk(sk
);
1185 struct sockaddr_nl
*addr
=msg
->msg_name
;
1188 struct sk_buff
*skb
;
1190 struct scm_cookie scm
;
1192 if (msg
->msg_flags
&MSG_OOB
)
1195 if (NULL
== siocb
->scm
)
1197 err
= scm_send(sock
, msg
, siocb
->scm
);
1201 if (msg
->msg_namelen
) {
1202 if (addr
->nl_family
!= AF_NETLINK
)
1204 dst_pid
= addr
->nl_pid
;
1205 dst_group
= ffs(addr
->nl_groups
);
1206 if (dst_group
&& !netlink_capable(sock
, NL_NONROOT_SEND
))
1209 dst_pid
= nlk
->dst_pid
;
1210 dst_group
= nlk
->dst_group
;
1214 err
= netlink_autobind(sock
);
1220 if (len
> sk
->sk_sndbuf
- 32)
1223 skb
= alloc_skb(len
, GFP_KERNEL
);
1227 NETLINK_CB(skb
).pid
= nlk
->pid
;
1228 NETLINK_CB(skb
).dst_group
= dst_group
;
1229 NETLINK_CB(skb
).loginuid
= audit_get_loginuid(current
->audit_context
);
1230 selinux_get_task_sid(current
, &(NETLINK_CB(skb
).sid
));
1231 memcpy(NETLINK_CREDS(skb
), &siocb
->scm
->creds
, sizeof(struct ucred
));
1233 /* What can I do? Netlink is asynchronous, so that
1234 we will have to save current capabilities to
1235 check them, when this message will be delivered
1236 to corresponding kernel module. --ANK (980802)
1240 if (memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
)) {
1245 err
= security_netlink_send(sk
, skb
);
1252 atomic_inc(&skb
->users
);
1253 netlink_broadcast(sk
, skb
, dst_pid
, dst_group
, GFP_KERNEL
);
1255 err
= netlink_unicast(sk
, skb
, dst_pid
, msg
->msg_flags
&MSG_DONTWAIT
);
1261 static int netlink_recvmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1262 struct msghdr
*msg
, size_t len
,
1265 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1266 struct scm_cookie scm
;
1267 struct sock
*sk
= sock
->sk
;
1268 struct netlink_sock
*nlk
= nlk_sk(sk
);
1269 int noblock
= flags
&MSG_DONTWAIT
;
1271 struct sk_buff
*skb
;
1279 skb
= skb_recv_datagram(sk
,flags
,noblock
,&err
);
1283 msg
->msg_namelen
= 0;
1287 msg
->msg_flags
|= MSG_TRUNC
;
1291 skb_reset_transport_header(skb
);
1292 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1294 if (msg
->msg_name
) {
1295 struct sockaddr_nl
*addr
= (struct sockaddr_nl
*)msg
->msg_name
;
1296 addr
->nl_family
= AF_NETLINK
;
1298 addr
->nl_pid
= NETLINK_CB(skb
).pid
;
1299 addr
->nl_groups
= netlink_group_mask(NETLINK_CB(skb
).dst_group
);
1300 msg
->msg_namelen
= sizeof(*addr
);
1303 if (nlk
->flags
& NETLINK_RECV_PKTINFO
)
1304 netlink_cmsg_recv_pktinfo(msg
, skb
);
1306 if (NULL
== siocb
->scm
) {
1307 memset(&scm
, 0, sizeof(scm
));
1310 siocb
->scm
->creds
= *NETLINK_CREDS(skb
);
1311 if (flags
& MSG_TRUNC
)
1313 skb_free_datagram(sk
, skb
);
1315 if (nlk
->cb
&& atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
/ 2)
1318 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1320 netlink_rcv_wake(sk
);
1321 return err
? : copied
;
1324 static void netlink_data_ready(struct sock
*sk
, int len
)
1330 * We export these functions to other modules. They provide a
1331 * complete set of kernel non-blocking support for message
1336 netlink_kernel_create(struct net
*net
, int unit
, unsigned int groups
,
1337 void (*input
)(struct sk_buff
*skb
),
1338 struct mutex
*cb_mutex
, struct module
*module
)
1340 struct socket
*sock
;
1342 struct netlink_sock
*nlk
;
1343 unsigned long *listeners
= NULL
;
1347 if (unit
<0 || unit
>=MAX_LINKS
)
1350 if (sock_create_lite(PF_NETLINK
, SOCK_DGRAM
, unit
, &sock
))
1353 if (__netlink_create(net
, sock
, cb_mutex
, unit
) < 0)
1354 goto out_sock_release
;
1359 listeners
= kzalloc(NLGRPSZ(groups
), GFP_KERNEL
);
1361 goto out_sock_release
;
1364 sk
->sk_data_ready
= netlink_data_ready
;
1366 nlk_sk(sk
)->netlink_rcv
= input
;
1368 if (netlink_insert(sk
, net
, 0))
1369 goto out_sock_release
;
1372 nlk
->flags
|= NETLINK_KERNEL_SOCKET
;
1374 netlink_table_grab();
1375 if (!nl_table
[unit
].registered
) {
1376 nl_table
[unit
].groups
= groups
;
1377 nl_table
[unit
].listeners
= listeners
;
1378 nl_table
[unit
].cb_mutex
= cb_mutex
;
1379 nl_table
[unit
].module
= module
;
1380 nl_table
[unit
].registered
= 1;
1384 netlink_table_ungrab();
1395 * netlink_change_ngroups - change number of multicast groups
1397 * This changes the number of multicast groups that are available
1398 * on a certain netlink family. Note that it is not possible to
1399 * change the number of groups to below 32. Also note that it does
1400 * not implicitly call netlink_clear_multicast_users() when the
1401 * number of groups is reduced.
1403 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1404 * @groups: The new number of groups.
1406 int netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
1408 unsigned long *listeners
, *old
= NULL
;
1409 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
1415 netlink_table_grab();
1416 if (NLGRPSZ(tbl
->groups
) < NLGRPSZ(groups
)) {
1417 listeners
= kzalloc(NLGRPSZ(groups
), GFP_ATOMIC
);
1422 old
= tbl
->listeners
;
1423 memcpy(listeners
, old
, NLGRPSZ(tbl
->groups
));
1424 rcu_assign_pointer(tbl
->listeners
, listeners
);
1426 tbl
->groups
= groups
;
1429 netlink_table_ungrab();
1434 EXPORT_SYMBOL(netlink_change_ngroups
);
1437 * netlink_clear_multicast_users - kick off multicast listeners
1439 * This function removes all listeners from the given group.
1440 * @ksk: The kernel netlink socket, as returned by
1441 * netlink_kernel_create().
1442 * @group: The multicast group to clear.
1444 void netlink_clear_multicast_users(struct sock
*ksk
, unsigned int group
)
1447 struct hlist_node
*node
;
1448 struct netlink_table
*tbl
= &nl_table
[ksk
->sk_protocol
];
1450 netlink_table_grab();
1452 sk_for_each_bound(sk
, node
, &tbl
->mc_list
)
1453 netlink_update_socket_mc(nlk_sk(sk
), group
, 0);
1455 netlink_table_ungrab();
1457 EXPORT_SYMBOL(netlink_clear_multicast_users
);
1459 void netlink_set_nonroot(int protocol
, unsigned int flags
)
1461 if ((unsigned int)protocol
< MAX_LINKS
)
1462 nl_table
[protocol
].nl_nonroot
= flags
;
1465 static void netlink_destroy_callback(struct netlink_callback
*cb
)
1473 * It looks a bit ugly.
1474 * It would be better to create kernel thread.
1477 static int netlink_dump(struct sock
*sk
)
1479 struct netlink_sock
*nlk
= nlk_sk(sk
);
1480 struct netlink_callback
*cb
;
1481 struct sk_buff
*skb
;
1482 struct nlmsghdr
*nlh
;
1483 int len
, err
= -ENOBUFS
;
1485 skb
= sock_rmalloc(sk
, NLMSG_GOODSIZE
, 0, GFP_KERNEL
);
1489 mutex_lock(nlk
->cb_mutex
);
1497 len
= cb
->dump(skb
, cb
);
1500 mutex_unlock(nlk
->cb_mutex
);
1501 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1502 sk
->sk_data_ready(sk
, len
);
1506 nlh
= nlmsg_put_answer(skb
, cb
, NLMSG_DONE
, sizeof(len
), NLM_F_MULTI
);
1510 memcpy(nlmsg_data(nlh
), &len
, sizeof(len
));
1512 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1513 sk
->sk_data_ready(sk
, skb
->len
);
1518 mutex_unlock(nlk
->cb_mutex
);
1520 netlink_destroy_callback(cb
);
1524 mutex_unlock(nlk
->cb_mutex
);
1530 int netlink_dump_start(struct sock
*ssk
, struct sk_buff
*skb
,
1531 struct nlmsghdr
*nlh
,
1532 int (*dump
)(struct sk_buff
*skb
, struct netlink_callback
*),
1533 int (*done
)(struct netlink_callback
*))
1535 struct netlink_callback
*cb
;
1537 struct netlink_sock
*nlk
;
1539 cb
= kzalloc(sizeof(*cb
), GFP_KERNEL
);
1546 atomic_inc(&skb
->users
);
1549 sk
= netlink_lookup(ssk
->sk_net
, ssk
->sk_protocol
, NETLINK_CB(skb
).pid
);
1551 netlink_destroy_callback(cb
);
1552 return -ECONNREFUSED
;
1555 /* A dump is in progress... */
1556 mutex_lock(nlk
->cb_mutex
);
1558 mutex_unlock(nlk
->cb_mutex
);
1559 netlink_destroy_callback(cb
);
1564 mutex_unlock(nlk
->cb_mutex
);
1569 /* We successfully started a dump, by returning -EINTR we
1570 * signal not to send ACK even if it was requested.
1575 void netlink_ack(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
, int err
)
1577 struct sk_buff
*skb
;
1578 struct nlmsghdr
*rep
;
1579 struct nlmsgerr
*errmsg
;
1580 size_t payload
= sizeof(*errmsg
);
1582 /* error messages get the original request appened */
1584 payload
+= nlmsg_len(nlh
);
1586 skb
= nlmsg_new(payload
, GFP_KERNEL
);
1590 sk
= netlink_lookup(in_skb
->sk
->sk_net
,
1591 in_skb
->sk
->sk_protocol
,
1592 NETLINK_CB(in_skb
).pid
);
1594 sk
->sk_err
= ENOBUFS
;
1595 sk
->sk_error_report(sk
);
1601 rep
= __nlmsg_put(skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
1602 NLMSG_ERROR
, sizeof(struct nlmsgerr
), 0);
1603 errmsg
= nlmsg_data(rep
);
1604 errmsg
->error
= err
;
1605 memcpy(&errmsg
->msg
, nlh
, err
? nlh
->nlmsg_len
: sizeof(*nlh
));
1606 netlink_unicast(in_skb
->sk
, skb
, NETLINK_CB(in_skb
).pid
, MSG_DONTWAIT
);
1609 int netlink_rcv_skb(struct sk_buff
*skb
, int (*cb
)(struct sk_buff
*,
1612 struct nlmsghdr
*nlh
;
1615 while (skb
->len
>= nlmsg_total_size(0)) {
1618 nlh
= nlmsg_hdr(skb
);
1621 if (nlh
->nlmsg_len
< NLMSG_HDRLEN
|| skb
->len
< nlh
->nlmsg_len
)
1624 /* Only requests are handled by the kernel */
1625 if (!(nlh
->nlmsg_flags
& NLM_F_REQUEST
))
1628 /* Skip control messages */
1629 if (nlh
->nlmsg_type
< NLMSG_MIN_TYPE
)
1637 if (nlh
->nlmsg_flags
& NLM_F_ACK
|| err
)
1638 netlink_ack(skb
, nlh
, err
);
1641 msglen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
1642 if (msglen
> skb
->len
)
1644 skb_pull(skb
, msglen
);
1651 * nlmsg_notify - send a notification netlink message
1652 * @sk: netlink socket to use
1653 * @skb: notification message
1654 * @pid: destination netlink pid for reports or 0
1655 * @group: destination multicast group or 0
1656 * @report: 1 to report back, 0 to disable
1657 * @flags: allocation flags
1659 int nlmsg_notify(struct sock
*sk
, struct sk_buff
*skb
, u32 pid
,
1660 unsigned int group
, int report
, gfp_t flags
)
1665 int exclude_pid
= 0;
1668 atomic_inc(&skb
->users
);
1672 /* errors reported via destination sk->sk_err */
1673 nlmsg_multicast(sk
, skb
, exclude_pid
, group
, flags
);
1677 err
= nlmsg_unicast(sk
, skb
, pid
);
1682 #ifdef CONFIG_PROC_FS
1683 struct nl_seq_iter
{
1689 static struct sock
*netlink_seq_socket_idx(struct seq_file
*seq
, loff_t pos
)
1691 struct nl_seq_iter
*iter
= seq
->private;
1694 struct hlist_node
*node
;
1697 for (i
=0; i
<MAX_LINKS
; i
++) {
1698 struct nl_pid_hash
*hash
= &nl_table
[i
].hash
;
1700 for (j
= 0; j
<= hash
->mask
; j
++) {
1701 sk_for_each(s
, node
, &hash
->table
[j
]) {
1702 if (iter
->net
!= s
->sk_net
)
1716 static void *netlink_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1718 read_lock(&nl_table_lock
);
1719 return *pos
? netlink_seq_socket_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
1722 static void *netlink_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1725 struct nl_seq_iter
*iter
;
1730 if (v
== SEQ_START_TOKEN
)
1731 return netlink_seq_socket_idx(seq
, 0);
1733 iter
= seq
->private;
1737 } while (s
&& (iter
->net
!= s
->sk_net
));
1742 j
= iter
->hash_idx
+ 1;
1745 struct nl_pid_hash
*hash
= &nl_table
[i
].hash
;
1747 for (; j
<= hash
->mask
; j
++) {
1748 s
= sk_head(&hash
->table
[j
]);
1749 while (s
&& (iter
->net
!= s
->sk_net
))
1759 } while (++i
< MAX_LINKS
);
1764 static void netlink_seq_stop(struct seq_file
*seq
, void *v
)
1766 read_unlock(&nl_table_lock
);
1770 static int netlink_seq_show(struct seq_file
*seq
, void *v
)
1772 if (v
== SEQ_START_TOKEN
)
1774 "sk Eth Pid Groups "
1775 "Rmem Wmem Dump Locks\n");
1778 struct netlink_sock
*nlk
= nlk_sk(s
);
1780 seq_printf(seq
, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1784 nlk
->groups
? (u32
)nlk
->groups
[0] : 0,
1785 atomic_read(&s
->sk_rmem_alloc
),
1786 atomic_read(&s
->sk_wmem_alloc
),
1788 atomic_read(&s
->sk_refcnt
)
1795 static const struct seq_operations netlink_seq_ops
= {
1796 .start
= netlink_seq_start
,
1797 .next
= netlink_seq_next
,
1798 .stop
= netlink_seq_stop
,
1799 .show
= netlink_seq_show
,
1803 static int netlink_seq_open(struct inode
*inode
, struct file
*file
)
1805 struct nl_seq_iter
*iter
;
1807 iter
= __seq_open_private(file
, &netlink_seq_ops
, sizeof(*iter
));
1811 iter
->net
= get_proc_net(inode
);
1813 seq_release_private(inode
, file
);
1820 static int netlink_seq_release(struct inode
*inode
, struct file
*file
)
1822 struct seq_file
*seq
= file
->private_data
;
1823 struct nl_seq_iter
*iter
= seq
->private;
1825 return seq_release_private(inode
, file
);
1828 static const struct file_operations netlink_seq_fops
= {
1829 .owner
= THIS_MODULE
,
1830 .open
= netlink_seq_open
,
1832 .llseek
= seq_lseek
,
1833 .release
= netlink_seq_release
,
1838 int netlink_register_notifier(struct notifier_block
*nb
)
1840 return atomic_notifier_chain_register(&netlink_chain
, nb
);
1843 int netlink_unregister_notifier(struct notifier_block
*nb
)
1845 return atomic_notifier_chain_unregister(&netlink_chain
, nb
);
1848 static const struct proto_ops netlink_ops
= {
1849 .family
= PF_NETLINK
,
1850 .owner
= THIS_MODULE
,
1851 .release
= netlink_release
,
1852 .bind
= netlink_bind
,
1853 .connect
= netlink_connect
,
1854 .socketpair
= sock_no_socketpair
,
1855 .accept
= sock_no_accept
,
1856 .getname
= netlink_getname
,
1857 .poll
= datagram_poll
,
1858 .ioctl
= sock_no_ioctl
,
1859 .listen
= sock_no_listen
,
1860 .shutdown
= sock_no_shutdown
,
1861 .setsockopt
= netlink_setsockopt
,
1862 .getsockopt
= netlink_getsockopt
,
1863 .sendmsg
= netlink_sendmsg
,
1864 .recvmsg
= netlink_recvmsg
,
1865 .mmap
= sock_no_mmap
,
1866 .sendpage
= sock_no_sendpage
,
1869 static struct net_proto_family netlink_family_ops
= {
1870 .family
= PF_NETLINK
,
1871 .create
= netlink_create
,
1872 .owner
= THIS_MODULE
, /* for consistency 8) */
1875 static int __net_init
netlink_net_init(struct net
*net
)
1877 #ifdef CONFIG_PROC_FS
1878 if (!proc_net_fops_create(net
, "netlink", 0, &netlink_seq_fops
))
1884 static void __net_exit
netlink_net_exit(struct net
*net
)
1886 #ifdef CONFIG_PROC_FS
1887 proc_net_remove(net
, "netlink");
1891 static struct pernet_operations __net_initdata netlink_net_ops
= {
1892 .init
= netlink_net_init
,
1893 .exit
= netlink_net_exit
,
1896 static int __init
netlink_proto_init(void)
1898 struct sk_buff
*dummy_skb
;
1900 unsigned long limit
;
1902 int err
= proto_register(&netlink_proto
, 0);
1907 BUILD_BUG_ON(sizeof(struct netlink_skb_parms
) > sizeof(dummy_skb
->cb
));
1909 nl_table
= kcalloc(MAX_LINKS
, sizeof(*nl_table
), GFP_KERNEL
);
1913 if (num_physpages
>= (128 * 1024))
1914 limit
= num_physpages
>> (21 - PAGE_SHIFT
);
1916 limit
= num_physpages
>> (23 - PAGE_SHIFT
);
1918 order
= get_bitmask_order(limit
) - 1 + PAGE_SHIFT
;
1919 limit
= (1UL << order
) / sizeof(struct hlist_head
);
1920 order
= get_bitmask_order(min(limit
, (unsigned long)UINT_MAX
)) - 1;
1922 for (i
= 0; i
< MAX_LINKS
; i
++) {
1923 struct nl_pid_hash
*hash
= &nl_table
[i
].hash
;
1925 hash
->table
= nl_pid_hash_alloc(1 * sizeof(*hash
->table
));
1928 nl_pid_hash_free(nl_table
[i
].hash
.table
,
1929 1 * sizeof(*hash
->table
));
1933 memset(hash
->table
, 0, 1 * sizeof(*hash
->table
));
1934 hash
->max_shift
= order
;
1937 hash
->rehash_time
= jiffies
;
1940 sock_register(&netlink_family_ops
);
1941 register_pernet_subsys(&netlink_net_ops
);
1942 /* The netlink device handler may be needed early. */
1947 panic("netlink_init: Cannot allocate nl_table\n");
1950 core_initcall(netlink_proto_init
);
1952 EXPORT_SYMBOL(netlink_ack
);
1953 EXPORT_SYMBOL(netlink_rcv_skb
);
1954 EXPORT_SYMBOL(netlink_broadcast
);
1955 EXPORT_SYMBOL(netlink_dump_start
);
1956 EXPORT_SYMBOL(netlink_kernel_create
);
1957 EXPORT_SYMBOL(netlink_register_notifier
);
1958 EXPORT_SYMBOL(netlink_set_nonroot
);
1959 EXPORT_SYMBOL(netlink_unicast
);
1960 EXPORT_SYMBOL(netlink_unregister_notifier
);
1961 EXPORT_SYMBOL(nlmsg_notify
);