2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
19 #include <linux/config.h>
20 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/major.h>
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/socket.h>
32 #include <linux/fcntl.h>
33 #include <linux/termios.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
37 #include <linux/slab.h>
38 #include <asm/uaccess.h>
39 #include <linux/skbuff.h>
40 #include <linux/netdevice.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/smp_lock.h>
45 #include <linux/notifier.h>
46 #include <linux/security.h>
52 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
53 #define NL_EMULATE_DEV
63 int (*handler
)(int unit
, struct sk_buff
*skb
);
64 wait_queue_head_t wait
;
65 struct netlink_callback
*cb
;
67 void (*data_ready
)(struct sock
*sk
, int bytes
);
70 #define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
72 static struct hlist_head nl_table
[MAX_LINKS
];
73 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait
);
74 static unsigned nl_nonroot
[MAX_LINKS
];
77 static struct socket
*netlink_kernel
[MAX_LINKS
];
80 static int netlink_dump(struct sock
*sk
);
81 static void netlink_destroy_callback(struct netlink_callback
*cb
);
83 atomic_t netlink_sock_nr
;
85 static rwlock_t nl_table_lock
= RW_LOCK_UNLOCKED
;
86 static atomic_t nl_table_users
= ATOMIC_INIT(0);
88 static struct notifier_block
*netlink_chain
;
90 static void netlink_sock_destruct(struct sock
*sk
)
92 skb_queue_purge(&sk
->sk_receive_queue
);
94 if (!sock_flag(sk
, SOCK_DEAD
)) {
95 printk("Freeing alive netlink socket %p\n", sk
);
98 BUG_TRAP(!atomic_read(&sk
->sk_rmem_alloc
));
99 BUG_TRAP(!atomic_read(&sk
->sk_wmem_alloc
));
100 BUG_TRAP(!nlk_sk(sk
)->cb
);
104 atomic_dec(&netlink_sock_nr
);
105 #ifdef NETLINK_REFCNT_DEBUG
106 printk(KERN_DEBUG
"NETLINK %p released, %d are still alive\n", sk
, atomic_read(&netlink_sock_nr
));
110 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
111 * Look, when several writers sleep and reader wakes them up, all but one
112 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
113 * this, _but_ remember, it adds useless work on UP machines.
116 static void netlink_table_grab(void)
118 write_lock_bh(&nl_table_lock
);
120 if (atomic_read(&nl_table_users
)) {
121 DECLARE_WAITQUEUE(wait
, current
);
123 add_wait_queue_exclusive(&nl_table_wait
, &wait
);
125 set_current_state(TASK_UNINTERRUPTIBLE
);
126 if (atomic_read(&nl_table_users
) == 0)
128 write_unlock_bh(&nl_table_lock
);
130 write_lock_bh(&nl_table_lock
);
133 __set_current_state(TASK_RUNNING
);
134 remove_wait_queue(&nl_table_wait
, &wait
);
138 static __inline__
void netlink_table_ungrab(void)
140 write_unlock_bh(&nl_table_lock
);
141 wake_up(&nl_table_wait
);
144 static __inline__
void
145 netlink_lock_table(void)
147 /* read_lock() synchronizes us to netlink_table_grab */
149 read_lock(&nl_table_lock
);
150 atomic_inc(&nl_table_users
);
151 read_unlock(&nl_table_lock
);
154 static __inline__
void
155 netlink_unlock_table(void)
157 if (atomic_dec_and_test(&nl_table_users
))
158 wake_up(&nl_table_wait
);
161 static __inline__
struct sock
*netlink_lookup(int protocol
, u32 pid
)
164 struct hlist_node
*node
;
166 read_lock(&nl_table_lock
);
167 sk_for_each(sk
, node
, &nl_table
[protocol
]) {
168 if (nlk_sk(sk
)->pid
== pid
) {
175 read_unlock(&nl_table_lock
);
179 static struct proto_ops netlink_ops
;
181 static int netlink_insert(struct sock
*sk
, u32 pid
)
183 int err
= -EADDRINUSE
;
185 struct hlist_node
*node
;
187 netlink_table_grab();
188 sk_for_each(osk
, node
, &nl_table
[sk
->sk_protocol
]) {
189 if (nlk_sk(osk
)->pid
== pid
)
194 if (nlk_sk(sk
)->pid
== 0) {
195 nlk_sk(sk
)->pid
= pid
;
196 sk_add_node(sk
, &nl_table
[sk
->sk_protocol
]);
200 netlink_table_ungrab();
204 static void netlink_remove(struct sock
*sk
)
206 netlink_table_grab();
207 sk_del_node_init(sk
);
208 netlink_table_ungrab();
211 static int netlink_create(struct socket
*sock
, int protocol
)
214 struct netlink_opt
*nlk
;
216 sock
->state
= SS_UNCONNECTED
;
218 if (sock
->type
!= SOCK_RAW
&& sock
->type
!= SOCK_DGRAM
)
219 return -ESOCKTNOSUPPORT
;
221 if (protocol
<0 || protocol
>= MAX_LINKS
)
222 return -EPROTONOSUPPORT
;
224 sock
->ops
= &netlink_ops
;
226 sk
= sk_alloc(PF_NETLINK
, GFP_KERNEL
, 1, NULL
);
230 sock_init_data(sock
,sk
);
231 sk_set_owner(sk
, THIS_MODULE
);
233 nlk
= sk
->sk_protinfo
= kmalloc(sizeof(*nlk
), GFP_KERNEL
);
238 memset(nlk
, 0, sizeof(*nlk
));
240 spin_lock_init(&nlk
->cb_lock
);
241 init_waitqueue_head(&nlk
->wait
);
242 sk
->sk_destruct
= netlink_sock_destruct
;
243 atomic_inc(&netlink_sock_nr
);
245 sk
->sk_protocol
= protocol
;
249 static int netlink_release(struct socket
*sock
)
251 struct sock
*sk
= sock
->sk
;
252 struct netlink_opt
*nlk
;
260 spin_lock(&nlk
->cb_lock
);
262 nlk
->cb
->done(nlk
->cb
);
263 netlink_destroy_callback(nlk
->cb
);
267 spin_unlock(&nlk
->cb_lock
);
269 /* OK. Socket is unlinked, and, therefore,
270 no new packets will arrive */
274 wake_up_interruptible_all(&nlk
->wait
);
276 skb_queue_purge(&sk
->sk_write_queue
);
278 if (nlk
->pid
&& !nlk
->groups
) {
279 struct netlink_notify n
= {
280 .protocol
= sk
->sk_protocol
,
283 notifier_call_chain(&netlink_chain
, NETLINK_URELEASE
, &n
);
290 static int netlink_autobind(struct socket
*sock
)
292 struct sock
*sk
= sock
->sk
;
294 struct hlist_node
*node
;
295 s32 pid
= current
->pid
;
299 netlink_table_grab();
300 sk_for_each(osk
, node
, &nl_table
[sk
->sk_protocol
]) {
301 if (nlk_sk(osk
)->pid
== pid
) {
302 /* Bind collision, search negative pid values. */
306 netlink_table_ungrab();
310 netlink_table_ungrab();
312 err
= netlink_insert(sk
, pid
);
313 if (err
== -EADDRINUSE
)
315 nlk_sk(sk
)->groups
= 0;
319 static inline int netlink_capable(struct socket
*sock
, unsigned flag
)
321 return (nl_nonroot
[sock
->sk
->sk_protocol
] & flag
) ||
322 capable(CAP_NET_ADMIN
);
325 static int netlink_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
327 struct sock
*sk
= sock
->sk
;
328 struct netlink_opt
*nlk
= nlk_sk(sk
);
329 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
332 if (nladdr
->nl_family
!= AF_NETLINK
)
335 /* Only superuser is allowed to listen multicasts */
336 if (nladdr
->nl_groups
&& !netlink_capable(sock
, NL_NONROOT_RECV
))
340 if (nladdr
->nl_pid
!= nlk
->pid
)
342 nlk
->groups
= nladdr
->nl_groups
;
346 if (nladdr
->nl_pid
== 0) {
347 err
= netlink_autobind(sock
);
349 nlk
->groups
= nladdr
->nl_groups
;
353 err
= netlink_insert(sk
, nladdr
->nl_pid
);
355 nlk
->groups
= nladdr
->nl_groups
;
359 static int netlink_connect(struct socket
*sock
, struct sockaddr
*addr
,
363 struct sock
*sk
= sock
->sk
;
364 struct netlink_opt
*nlk
= nlk_sk(sk
);
365 struct sockaddr_nl
*nladdr
=(struct sockaddr_nl
*)addr
;
367 if (addr
->sa_family
== AF_UNSPEC
) {
368 sk
->sk_state
= NETLINK_UNCONNECTED
;
373 if (addr
->sa_family
!= AF_NETLINK
)
376 /* Only superuser is allowed to send multicasts */
377 if (nladdr
->nl_groups
&& !netlink_capable(sock
, NL_NONROOT_SEND
))
381 err
= netlink_autobind(sock
);
384 sk
->sk_state
= NETLINK_CONNECTED
;
385 nlk
->dst_pid
= nladdr
->nl_pid
;
386 nlk
->dst_groups
= nladdr
->nl_groups
;
392 static int netlink_getname(struct socket
*sock
, struct sockaddr
*addr
, int *addr_len
, int peer
)
394 struct sock
*sk
= sock
->sk
;
395 struct netlink_opt
*nlk
= nlk_sk(sk
);
396 struct sockaddr_nl
*nladdr
=(struct sockaddr_nl
*)addr
;
398 nladdr
->nl_family
= AF_NETLINK
;
400 *addr_len
= sizeof(*nladdr
);
403 nladdr
->nl_pid
= nlk
->dst_pid
;
404 nladdr
->nl_groups
= nlk
->dst_groups
;
406 nladdr
->nl_pid
= nlk
->pid
;
407 nladdr
->nl_groups
= nlk
->groups
;
412 static void netlink_overrun(struct sock
*sk
)
414 if (!test_and_set_bit(0, &nlk_sk(sk
)->state
)) {
415 sk
->sk_err
= ENOBUFS
;
416 sk
->sk_error_report(sk
);
420 struct sock
*netlink_getsockbypid(struct sock
*ssk
, u32 pid
)
422 int protocol
= ssk
->sk_protocol
;
424 struct netlink_opt
*nlk
;
426 sock
= netlink_lookup(protocol
, pid
);
428 return ERR_PTR(-ECONNREFUSED
);
430 /* Don't bother queuing skb if kernel socket has no input function */
432 if ((nlk
->pid
== 0 && !nlk
->data_ready
) ||
433 (sock
->sk_state
== NETLINK_CONNECTED
&&
434 nlk
->dst_pid
!= nlk_sk(ssk
)->pid
)) {
436 return ERR_PTR(-ECONNREFUSED
);
441 struct sock
*netlink_getsockbyfilp(struct file
*filp
)
443 struct inode
*inode
= filp
->f_dentry
->d_inode
;
444 struct socket
*socket
;
447 if (!inode
->i_sock
|| !(socket
= SOCKET_I(inode
)))
448 return ERR_PTR(-ENOTSOCK
);
451 if (sock
->sk_family
!= AF_NETLINK
)
452 return ERR_PTR(-EINVAL
);
459 * Attach a skb to a netlink socket.
460 * The caller must hold a reference to the destination socket. On error, the
461 * reference is dropped. The skb is not send to the destination, just all
462 * all error checks are performed and memory in the queue is reserved.
464 * < 0: error. skb freed, reference to sock dropped.
466 * 1: repeat lookup - reference dropped while waiting for socket memory.
468 int netlink_attachskb(struct sock
*sk
, struct sk_buff
*skb
, int nonblock
, long timeo
)
470 struct netlink_opt
*nlk
;
474 #ifdef NL_EMULATE_DEV
478 if (atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
479 test_bit(0, &nlk
->state
)) {
480 DECLARE_WAITQUEUE(wait
, current
);
489 __set_current_state(TASK_INTERRUPTIBLE
);
490 add_wait_queue(&nlk
->wait
, &wait
);
492 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
493 test_bit(0, &nlk
->state
)) &&
494 !sock_flag(sk
, SOCK_DEAD
))
495 timeo
= schedule_timeout(timeo
);
497 __set_current_state(TASK_RUNNING
);
498 remove_wait_queue(&nlk
->wait
, &wait
);
501 if (signal_pending(current
)) {
503 return sock_intr_errno(timeo
);
508 skb_set_owner_r(skb
, sk
);
512 int netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
, int protocol
)
514 struct netlink_opt
*nlk
;
518 #ifdef NL_EMULATE_DEV
521 len
= nlk
->handler(protocol
, skb
);
527 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
528 sk
->sk_data_ready(sk
, len
);
533 void netlink_detachskb(struct sock
*sk
, struct sk_buff
*skb
)
539 static inline void netlink_trim(struct sk_buff
*skb
, int allocation
)
541 int delta
= skb
->end
- skb
->tail
;
543 /* If the packet is charged to a socket, the modification
544 * of truesize below is illegal and will corrupt socket
545 * buffer accounting state.
547 BUG_ON(skb
->list
!= NULL
);
549 if (delta
* 2 < skb
->truesize
)
551 if (pskb_expand_head(skb
, 0, -delta
, allocation
))
553 skb
->truesize
-= delta
;
556 int netlink_unicast(struct sock
*ssk
, struct sk_buff
*skb
, u32 pid
, int nonblock
)
562 netlink_trim(skb
, gfp_any());
564 timeo
= sock_sndtimeo(ssk
, nonblock
);
566 sk
= netlink_getsockbypid(ssk
, pid
);
571 err
= netlink_attachskb(sk
, skb
, nonblock
, timeo
);
577 return netlink_sendskb(sk
, skb
, ssk
->sk_protocol
);
580 static __inline__
int netlink_broadcast_deliver(struct sock
*sk
, struct sk_buff
*skb
)
582 struct netlink_opt
*nlk
= nlk_sk(sk
);
583 #ifdef NL_EMULATE_DEV
586 nlk
->handler(sk
->sk_protocol
, skb
);
590 if (atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
&&
591 !test_bit(0, &nlk
->state
)) {
593 skb_set_owner_r(skb
, sk
);
594 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
595 sk
->sk_data_ready(sk
, skb
->len
);
601 int netlink_broadcast(struct sock
*ssk
, struct sk_buff
*skb
, u32 pid
,
602 u32 group
, int allocation
)
605 struct hlist_node
*node
;
606 struct sk_buff
*skb2
= NULL
;
607 int protocol
= ssk
->sk_protocol
;
608 int failure
= 0, delivered
= 0;
610 netlink_trim(skb
, allocation
);
612 /* While we sleep in clone, do not allow to change socket list */
614 netlink_lock_table();
616 sk_for_each(sk
, node
, &nl_table
[protocol
]) {
617 struct netlink_opt
*nlk
= nlk_sk(sk
);
622 if (nlk
->pid
== pid
|| !(nlk
->groups
& group
))
632 if (atomic_read(&skb
->users
) != 1) {
633 skb2
= skb_clone(skb
, allocation
);
636 atomic_inc(&skb
->users
);
641 /* Clone failed. Notify ALL listeners. */
643 } else if (netlink_broadcast_deliver(sk
, skb2
)) {
652 netlink_unlock_table();
665 void netlink_set_err(struct sock
*ssk
, u32 pid
, u32 group
, int code
)
668 struct hlist_node
*node
;
669 int protocol
= ssk
->sk_protocol
;
671 read_lock(&nl_table_lock
);
672 sk_for_each(sk
, node
, &nl_table
[protocol
]) {
673 struct netlink_opt
*nlk
= nlk_sk(sk
);
677 if (nlk
->pid
== pid
|| !(nlk
->groups
& group
))
681 sk
->sk_error_report(sk
);
683 read_unlock(&nl_table_lock
);
686 static inline void netlink_rcv_wake(struct sock
*sk
)
688 struct netlink_opt
*nlk
= nlk_sk(sk
);
690 if (!skb_queue_len(&sk
->sk_receive_queue
))
691 clear_bit(0, &nlk
->state
);
692 if (!test_bit(0, &nlk
->state
))
693 wake_up_interruptible(&nlk
->wait
);
696 static int netlink_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
697 struct msghdr
*msg
, size_t len
)
699 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
700 struct sock
*sk
= sock
->sk
;
701 struct netlink_opt
*nlk
= nlk_sk(sk
);
702 struct sockaddr_nl
*addr
=msg
->msg_name
;
707 struct scm_cookie scm
;
709 if (msg
->msg_flags
&MSG_OOB
)
712 if (NULL
== siocb
->scm
)
714 err
= scm_send(sock
, msg
, siocb
->scm
);
718 if (msg
->msg_namelen
) {
719 if (addr
->nl_family
!= AF_NETLINK
)
721 dst_pid
= addr
->nl_pid
;
722 dst_groups
= addr
->nl_groups
;
723 if (dst_groups
&& !netlink_capable(sock
, NL_NONROOT_SEND
))
726 dst_pid
= nlk
->dst_pid
;
727 dst_groups
= nlk
->dst_groups
;
731 err
= netlink_autobind(sock
);
737 if (len
> sk
->sk_sndbuf
- 32)
740 skb
= alloc_skb(len
, GFP_KERNEL
);
744 NETLINK_CB(skb
).pid
= nlk
->pid
;
745 NETLINK_CB(skb
).groups
= nlk
->groups
;
746 NETLINK_CB(skb
).dst_pid
= dst_pid
;
747 NETLINK_CB(skb
).dst_groups
= dst_groups
;
748 memcpy(NETLINK_CREDS(skb
), &siocb
->scm
->creds
, sizeof(struct ucred
));
750 /* What can I do? Netlink is asynchronous, so that
751 we will have to save current capabilities to
752 check them, when this message will be delivered
753 to corresponding kernel module. --ANK (980802)
757 if (memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
)) {
762 err
= security_netlink_send(sk
, skb
);
769 atomic_inc(&skb
->users
);
770 netlink_broadcast(sk
, skb
, dst_pid
, dst_groups
, GFP_KERNEL
);
772 err
= netlink_unicast(sk
, skb
, dst_pid
, msg
->msg_flags
&MSG_DONTWAIT
);
778 static int netlink_recvmsg(struct kiocb
*kiocb
, struct socket
*sock
,
779 struct msghdr
*msg
, size_t len
,
782 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
783 struct scm_cookie scm
;
784 struct sock
*sk
= sock
->sk
;
785 struct netlink_opt
*nlk
= nlk_sk(sk
);
786 int noblock
= flags
&MSG_DONTWAIT
;
796 skb
= skb_recv_datagram(sk
,flags
,noblock
,&err
);
800 msg
->msg_namelen
= 0;
804 msg
->msg_flags
|= MSG_TRUNC
;
808 skb
->h
.raw
= skb
->data
;
809 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
812 struct sockaddr_nl
*addr
= (struct sockaddr_nl
*)msg
->msg_name
;
813 addr
->nl_family
= AF_NETLINK
;
815 addr
->nl_pid
= NETLINK_CB(skb
).pid
;
816 addr
->nl_groups
= NETLINK_CB(skb
).dst_groups
;
817 msg
->msg_namelen
= sizeof(*addr
);
820 if (NULL
== siocb
->scm
) {
821 memset(&scm
, 0, sizeof(scm
));
824 siocb
->scm
->creds
= *NETLINK_CREDS(skb
);
825 skb_free_datagram(sk
, skb
);
827 if (nlk
->cb
&& atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
/ 2)
830 scm_recv(sock
, msg
, siocb
->scm
, flags
);
833 netlink_rcv_wake(sk
);
834 return err
? : copied
;
837 static void netlink_data_ready(struct sock
*sk
, int len
)
839 struct netlink_opt
*nlk
= nlk_sk(sk
);
842 nlk
->data_ready(sk
, len
);
843 netlink_rcv_wake(sk
);
847 * We export these functions to other modules. They provide a
848 * complete set of kernel non-blocking support for message
853 netlink_kernel_create(int unit
, void (*input
)(struct sock
*sk
, int len
))
858 if (unit
<0 || unit
>=MAX_LINKS
)
861 if (sock_create_lite(PF_NETLINK
, SOCK_DGRAM
, unit
, &sock
))
864 if (netlink_create(sock
, unit
) < 0) {
869 sk
->sk_data_ready
= netlink_data_ready
;
871 nlk_sk(sk
)->data_ready
= input
;
873 netlink_insert(sk
, 0);
877 void netlink_set_nonroot(int protocol
, unsigned flags
)
879 if ((unsigned)protocol
< MAX_LINKS
)
880 nl_nonroot
[protocol
] = flags
;
883 static void netlink_destroy_callback(struct netlink_callback
*cb
)
891 * It looks a bit ugly.
892 * It would be better to create kernel thread.
895 static int netlink_dump(struct sock
*sk
)
897 struct netlink_opt
*nlk
= nlk_sk(sk
);
898 struct netlink_callback
*cb
;
900 struct nlmsghdr
*nlh
;
903 skb
= sock_rmalloc(sk
, NLMSG_GOODSIZE
, 0, GFP_KERNEL
);
907 spin_lock(&nlk
->cb_lock
);
911 spin_unlock(&nlk
->cb_lock
);
916 len
= cb
->dump(skb
, cb
);
919 spin_unlock(&nlk
->cb_lock
);
920 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
921 sk
->sk_data_ready(sk
, len
);
925 nlh
= __nlmsg_put(skb
, NETLINK_CB(cb
->skb
).pid
, cb
->nlh
->nlmsg_seq
, NLMSG_DONE
, sizeof(int));
926 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
927 memcpy(NLMSG_DATA(nlh
), &len
, sizeof(len
));
928 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
929 sk
->sk_data_ready(sk
, skb
->len
);
933 spin_unlock(&nlk
->cb_lock
);
935 netlink_destroy_callback(cb
);
940 int netlink_dump_start(struct sock
*ssk
, struct sk_buff
*skb
,
941 struct nlmsghdr
*nlh
,
942 int (*dump
)(struct sk_buff
*skb
, struct netlink_callback
*),
943 int (*done
)(struct netlink_callback
*))
945 struct netlink_callback
*cb
;
947 struct netlink_opt
*nlk
;
949 cb
= kmalloc(sizeof(*cb
), GFP_KERNEL
);
953 memset(cb
, 0, sizeof(*cb
));
957 atomic_inc(&skb
->users
);
960 sk
= netlink_lookup(ssk
->sk_protocol
, NETLINK_CB(skb
).pid
);
962 netlink_destroy_callback(cb
);
963 return -ECONNREFUSED
;
966 /* A dump is in progress... */
967 spin_lock(&nlk
->cb_lock
);
969 spin_unlock(&nlk
->cb_lock
);
970 netlink_destroy_callback(cb
);
975 spin_unlock(&nlk
->cb_lock
);
981 void netlink_ack(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
, int err
)
984 struct nlmsghdr
*rep
;
985 struct nlmsgerr
*errmsg
;
989 size
= NLMSG_SPACE(sizeof(struct nlmsgerr
));
991 size
= NLMSG_SPACE(4 + NLMSG_ALIGN(nlh
->nlmsg_len
));
993 skb
= alloc_skb(size
, GFP_KERNEL
);
997 sk
= netlink_lookup(in_skb
->sk
->sk_protocol
,
998 NETLINK_CB(in_skb
).pid
);
1000 sk
->sk_err
= ENOBUFS
;
1001 sk
->sk_error_report(sk
);
1007 rep
= __nlmsg_put(skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
1008 NLMSG_ERROR
, sizeof(struct nlmsgerr
));
1009 errmsg
= NLMSG_DATA(rep
);
1010 errmsg
->error
= err
;
1011 memcpy(&errmsg
->msg
, nlh
, err
? nlh
->nlmsg_len
: sizeof(struct nlmsghdr
));
1012 netlink_unicast(in_skb
->sk
, skb
, NETLINK_CB(in_skb
).pid
, MSG_DONTWAIT
);
1016 #ifdef NL_EMULATE_DEV
1018 static rwlock_t nl_emu_lock
= RW_LOCK_UNLOCKED
;
1021 * Backward compatibility.
1024 int netlink_attach(int unit
, int (*function
)(int, struct sk_buff
*skb
))
1026 struct sock
*sk
= netlink_kernel_create(unit
, NULL
);
1029 nlk_sk(sk
)->handler
= function
;
1030 write_lock_bh(&nl_emu_lock
);
1031 netlink_kernel
[unit
] = sk
->sk_socket
;
1032 write_unlock_bh(&nl_emu_lock
);
1036 void netlink_detach(int unit
)
1038 struct socket
*sock
;
1040 write_lock_bh(&nl_emu_lock
);
1041 sock
= netlink_kernel
[unit
];
1042 netlink_kernel
[unit
] = NULL
;
1043 write_unlock_bh(&nl_emu_lock
);
1048 int netlink_post(int unit
, struct sk_buff
*skb
)
1050 struct socket
*sock
;
1052 read_lock(&nl_emu_lock
);
1053 sock
= netlink_kernel
[unit
];
1055 struct sock
*sk
= sock
->sk
;
1056 memset(skb
->cb
, 0, sizeof(skb
->cb
));
1058 read_unlock(&nl_emu_lock
);
1060 netlink_broadcast(sk
, skb
, 0, ~0, GFP_ATOMIC
);
1065 read_unlock(&nl_emu_lock
);
1071 #ifdef CONFIG_PROC_FS
1072 static struct sock
*netlink_seq_socket_idx(struct seq_file
*seq
, loff_t pos
)
1076 struct hlist_node
*node
;
1079 for (i
=0; i
<MAX_LINKS
; i
++) {
1080 sk_for_each(s
, node
, &nl_table
[i
]) {
1082 seq
->private = (void *) i
;
1091 static void *netlink_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1093 read_lock(&nl_table_lock
);
1094 return *pos
? netlink_seq_socket_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
1097 static void *netlink_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1103 if (v
== SEQ_START_TOKEN
)
1104 return netlink_seq_socket_idx(seq
, 0);
1108 long i
= (long)seq
->private;
1110 while (++i
< MAX_LINKS
) {
1111 s
= sk_head(&nl_table
[i
]);
1113 seq
->private = (void *) i
;
1121 static void netlink_seq_stop(struct seq_file
*seq
, void *v
)
1123 read_unlock(&nl_table_lock
);
1127 static int netlink_seq_show(struct seq_file
*seq
, void *v
)
1129 if (v
== SEQ_START_TOKEN
)
1131 "sk Eth Pid Groups "
1132 "Rmem Wmem Dump Locks\n");
1135 struct netlink_opt
*nlk
= nlk_sk(s
);
1137 seq_printf(seq
, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1142 atomic_read(&s
->sk_rmem_alloc
),
1143 atomic_read(&s
->sk_wmem_alloc
),
1145 atomic_read(&s
->sk_refcnt
)
1152 static struct seq_operations netlink_seq_ops
= {
1153 .start
= netlink_seq_start
,
1154 .next
= netlink_seq_next
,
1155 .stop
= netlink_seq_stop
,
1156 .show
= netlink_seq_show
,
1160 static int netlink_seq_open(struct inode
*inode
, struct file
*file
)
1162 return seq_open(file
, &netlink_seq_ops
);
1165 static struct file_operations netlink_seq_fops
= {
1166 .owner
= THIS_MODULE
,
1167 .open
= netlink_seq_open
,
1169 .llseek
= seq_lseek
,
1170 .release
= seq_release
,
1175 int netlink_register_notifier(struct notifier_block
*nb
)
1177 return notifier_chain_register(&netlink_chain
, nb
);
1180 int netlink_unregister_notifier(struct notifier_block
*nb
)
1182 return notifier_chain_unregister(&netlink_chain
, nb
);
1185 static struct proto_ops netlink_ops
= {
1186 .family
= PF_NETLINK
,
1187 .owner
= THIS_MODULE
,
1188 .release
= netlink_release
,
1189 .bind
= netlink_bind
,
1190 .connect
= netlink_connect
,
1191 .socketpair
= sock_no_socketpair
,
1192 .accept
= sock_no_accept
,
1193 .getname
= netlink_getname
,
1194 .poll
= datagram_poll
,
1195 .ioctl
= sock_no_ioctl
,
1196 .listen
= sock_no_listen
,
1197 .shutdown
= sock_no_shutdown
,
1198 .setsockopt
= sock_no_setsockopt
,
1199 .getsockopt
= sock_no_getsockopt
,
1200 .sendmsg
= netlink_sendmsg
,
1201 .recvmsg
= netlink_recvmsg
,
1202 .mmap
= sock_no_mmap
,
1203 .sendpage
= sock_no_sendpage
,
1206 static struct net_proto_family netlink_family_ops
= {
1207 .family
= PF_NETLINK
,
1208 .create
= netlink_create
,
1209 .owner
= THIS_MODULE
, /* for consistency 8) */
1212 static int __init
netlink_proto_init(void)
1214 struct sk_buff
*dummy_skb
;
1216 if (sizeof(struct netlink_skb_parms
) > sizeof(dummy_skb
->cb
)) {
1217 printk(KERN_CRIT
"netlink_init: panic\n");
1220 sock_register(&netlink_family_ops
);
1222 #ifdef CONFIG_PROC_FS
1223 proc_net_fops_create("netlink", 0, &netlink_seq_fops
);
1226 /* The netlink device handler may be needed early. */
1232 static void __exit
netlink_proto_exit(void)
1234 sock_unregister(PF_NETLINK
);
1235 proc_net_remove("netlink");
1238 core_initcall(netlink_proto_init
);
1239 module_exit(netlink_proto_exit
);
1241 MODULE_LICENSE("GPL");
1243 MODULE_ALIAS_NETPROTO(PF_NETLINK
);
1245 EXPORT_SYMBOL(netlink_ack
);
1246 EXPORT_SYMBOL(netlink_broadcast
);
1247 EXPORT_SYMBOL(netlink_dump_start
);
1248 EXPORT_SYMBOL(netlink_kernel_create
);
1249 EXPORT_SYMBOL(netlink_register_notifier
);
1250 EXPORT_SYMBOL(netlink_set_err
);
1251 EXPORT_SYMBOL(netlink_set_nonroot
);
1252 EXPORT_SYMBOL(netlink_unicast
);
1253 EXPORT_SYMBOL(netlink_unregister_notifier
);
1255 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
1256 EXPORT_SYMBOL(netlink_attach
);
1257 EXPORT_SYMBOL(netlink_detach
);
1258 EXPORT_SYMBOL(netlink_post
);