usb/misc/chaoskey: introduce an URB for asynchronous reads
[linux/fpc-iii.git] / net / netlink / genetlink.c
blobf830326b3b1dbaf63e727e7a4f4f496d5108db64
1 /*
2 * NETLINK Generic Netlink Family
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
7 */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/string.h>
16 #include <linux/skbuff.h>
17 #include <linux/mutex.h>
18 #include <linux/bitmap.h>
19 #include <linux/rwsem.h>
20 #include <net/sock.h>
21 #include <net/genetlink.h>
23 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
24 static DECLARE_RWSEM(cb_lock);
26 atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
27 DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
29 void genl_lock(void)
31 mutex_lock(&genl_mutex);
33 EXPORT_SYMBOL(genl_lock);
35 void genl_unlock(void)
37 mutex_unlock(&genl_mutex);
39 EXPORT_SYMBOL(genl_unlock);
41 #ifdef CONFIG_LOCKDEP
42 bool lockdep_genl_is_held(void)
44 return lockdep_is_held(&genl_mutex);
46 EXPORT_SYMBOL(lockdep_genl_is_held);
47 #endif
49 static void genl_lock_all(void)
51 down_write(&cb_lock);
52 genl_lock();
55 static void genl_unlock_all(void)
57 genl_unlock();
58 up_write(&cb_lock);
61 #define GENL_FAM_TAB_SIZE 16
62 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
64 static struct list_head family_ht[GENL_FAM_TAB_SIZE];
66 * Bitmap of multicast groups that are currently in use.
68 * To avoid an allocation at boot of just one unsigned long,
69 * declare it global instead.
70 * Bit 0 is marked as already used since group 0 is invalid.
71 * Bit 1 is marked as already used since the drop-monitor code
72 * abuses the API and thinks it can statically use group 1.
73 * That group will typically conflict with other groups that
74 * any proper users use.
75 * Bit 16 is marked as used since it's used for generic netlink
76 * and the code no longer marks pre-reserved IDs as used.
77 * Bit 17 is marked as already used since the VFS quota code
78 * also abused this API and relied on family == group ID, we
79 * cater to that by giving it a static family and group ID.
80 * Bit 18 is marked as already used since the PMCRAID driver
81 * did the same thing as the VFS quota code (maybe copied?)
83 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
84 BIT(GENL_ID_VFS_DQUOT) |
85 BIT(GENL_ID_PMCRAID);
86 static unsigned long *mc_groups = &mc_group_start;
87 static unsigned long mc_groups_longs = 1;
89 static int genl_ctrl_event(int event, struct genl_family *family,
90 const struct genl_multicast_group *grp,
91 int grp_id);
93 static inline unsigned int genl_family_hash(unsigned int id)
95 return id & GENL_FAM_TAB_MASK;
98 static inline struct list_head *genl_family_chain(unsigned int id)
100 return &family_ht[genl_family_hash(id)];
103 static struct genl_family *genl_family_find_byid(unsigned int id)
105 struct genl_family *f;
107 list_for_each_entry(f, genl_family_chain(id), family_list)
108 if (f->id == id)
109 return f;
111 return NULL;
114 static struct genl_family *genl_family_find_byname(char *name)
116 struct genl_family *f;
117 int i;
119 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
120 list_for_each_entry(f, genl_family_chain(i), family_list)
121 if (strcmp(f->name, name) == 0)
122 return f;
124 return NULL;
127 static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
129 int i;
131 for (i = 0; i < family->n_ops; i++)
132 if (family->ops[i].cmd == cmd)
133 return &family->ops[i];
135 return NULL;
138 /* Of course we are going to have problems once we hit
139 * 2^16 alive types, but that can only happen by year 2K
141 static u16 genl_generate_id(void)
143 static u16 id_gen_idx = GENL_MIN_ID;
144 int i;
146 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
147 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
148 id_gen_idx != GENL_ID_PMCRAID &&
149 !genl_family_find_byid(id_gen_idx))
150 return id_gen_idx;
151 if (++id_gen_idx > GENL_MAX_ID)
152 id_gen_idx = GENL_MIN_ID;
155 return 0;
158 static int genl_allocate_reserve_groups(int n_groups, int *first_id)
160 unsigned long *new_groups;
161 int start = 0;
162 int i;
163 int id;
164 bool fits;
166 do {
167 if (start == 0)
168 id = find_first_zero_bit(mc_groups,
169 mc_groups_longs *
170 BITS_PER_LONG);
171 else
172 id = find_next_zero_bit(mc_groups,
173 mc_groups_longs * BITS_PER_LONG,
174 start);
176 fits = true;
177 for (i = id;
178 i < min_t(int, id + n_groups,
179 mc_groups_longs * BITS_PER_LONG);
180 i++) {
181 if (test_bit(i, mc_groups)) {
182 start = i;
183 fits = false;
184 break;
188 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
189 unsigned long new_longs = mc_groups_longs +
190 BITS_TO_LONGS(n_groups);
191 size_t nlen = new_longs * sizeof(unsigned long);
193 if (mc_groups == &mc_group_start) {
194 new_groups = kzalloc(nlen, GFP_KERNEL);
195 if (!new_groups)
196 return -ENOMEM;
197 mc_groups = new_groups;
198 *mc_groups = mc_group_start;
199 } else {
200 new_groups = krealloc(mc_groups, nlen,
201 GFP_KERNEL);
202 if (!new_groups)
203 return -ENOMEM;
204 mc_groups = new_groups;
205 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
206 mc_groups[mc_groups_longs + i] = 0;
208 mc_groups_longs = new_longs;
210 } while (!fits);
212 for (i = id; i < id + n_groups; i++)
213 set_bit(i, mc_groups);
214 *first_id = id;
215 return 0;
218 static struct genl_family genl_ctrl;
220 static int genl_validate_assign_mc_groups(struct genl_family *family)
222 int first_id;
223 int n_groups = family->n_mcgrps;
224 int err = 0, i;
225 bool groups_allocated = false;
227 if (!n_groups)
228 return 0;
230 for (i = 0; i < n_groups; i++) {
231 const struct genl_multicast_group *grp = &family->mcgrps[i];
233 if (WARN_ON(grp->name[0] == '\0'))
234 return -EINVAL;
235 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
236 return -EINVAL;
239 /* special-case our own group and hacks */
240 if (family == &genl_ctrl) {
241 first_id = GENL_ID_CTRL;
242 BUG_ON(n_groups != 1);
243 } else if (strcmp(family->name, "NET_DM") == 0) {
244 first_id = 1;
245 BUG_ON(n_groups != 1);
246 } else if (family->id == GENL_ID_VFS_DQUOT) {
247 first_id = GENL_ID_VFS_DQUOT;
248 BUG_ON(n_groups != 1);
249 } else if (family->id == GENL_ID_PMCRAID) {
250 first_id = GENL_ID_PMCRAID;
251 BUG_ON(n_groups != 1);
252 } else {
253 groups_allocated = true;
254 err = genl_allocate_reserve_groups(n_groups, &first_id);
255 if (err)
256 return err;
259 family->mcgrp_offset = first_id;
261 /* if still initializing, can't and don't need to to realloc bitmaps */
262 if (!init_net.genl_sock)
263 return 0;
265 if (family->netnsok) {
266 struct net *net;
268 netlink_table_grab();
269 rcu_read_lock();
270 for_each_net_rcu(net) {
271 err = __netlink_change_ngroups(net->genl_sock,
272 mc_groups_longs * BITS_PER_LONG);
273 if (err) {
275 * No need to roll back, can only fail if
276 * memory allocation fails and then the
277 * number of _possible_ groups has been
278 * increased on some sockets which is ok.
280 break;
283 rcu_read_unlock();
284 netlink_table_ungrab();
285 } else {
286 err = netlink_change_ngroups(init_net.genl_sock,
287 mc_groups_longs * BITS_PER_LONG);
290 if (groups_allocated && err) {
291 for (i = 0; i < family->n_mcgrps; i++)
292 clear_bit(family->mcgrp_offset + i, mc_groups);
295 return err;
298 static void genl_unregister_mc_groups(struct genl_family *family)
300 struct net *net;
301 int i;
303 netlink_table_grab();
304 rcu_read_lock();
305 for_each_net_rcu(net) {
306 for (i = 0; i < family->n_mcgrps; i++)
307 __netlink_clear_multicast_users(
308 net->genl_sock, family->mcgrp_offset + i);
310 rcu_read_unlock();
311 netlink_table_ungrab();
313 for (i = 0; i < family->n_mcgrps; i++) {
314 int grp_id = family->mcgrp_offset + i;
316 if (grp_id != 1)
317 clear_bit(grp_id, mc_groups);
318 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
319 &family->mcgrps[i], grp_id);
323 static int genl_validate_ops(const struct genl_family *family)
325 const struct genl_ops *ops = family->ops;
326 unsigned int n_ops = family->n_ops;
327 int i, j;
329 if (WARN_ON(n_ops && !ops))
330 return -EINVAL;
332 if (!n_ops)
333 return 0;
335 for (i = 0; i < n_ops; i++) {
336 if (ops[i].dumpit == NULL && ops[i].doit == NULL)
337 return -EINVAL;
338 for (j = i + 1; j < n_ops; j++)
339 if (ops[i].cmd == ops[j].cmd)
340 return -EINVAL;
343 return 0;
347 * __genl_register_family - register a generic netlink family
348 * @family: generic netlink family
350 * Registers the specified family after validating it first. Only one
351 * family may be registered with the same family name or identifier.
352 * The family id may equal GENL_ID_GENERATE causing an unique id to
353 * be automatically generated and assigned.
355 * The family's ops array must already be assigned, you can use the
356 * genl_register_family_with_ops() helper function.
358 * Return 0 on success or a negative error code.
360 int __genl_register_family(struct genl_family *family)
362 int err = -EINVAL, i;
364 if (family->id && family->id < GENL_MIN_ID)
365 goto errout;
367 if (family->id > GENL_MAX_ID)
368 goto errout;
370 err = genl_validate_ops(family);
371 if (err)
372 return err;
374 genl_lock_all();
376 if (genl_family_find_byname(family->name)) {
377 err = -EEXIST;
378 goto errout_locked;
381 if (family->id == GENL_ID_GENERATE) {
382 u16 newid = genl_generate_id();
384 if (!newid) {
385 err = -ENOMEM;
386 goto errout_locked;
389 family->id = newid;
390 } else if (genl_family_find_byid(family->id)) {
391 err = -EEXIST;
392 goto errout_locked;
395 if (family->maxattr && !family->parallel_ops) {
396 family->attrbuf = kmalloc((family->maxattr+1) *
397 sizeof(struct nlattr *), GFP_KERNEL);
398 if (family->attrbuf == NULL) {
399 err = -ENOMEM;
400 goto errout_locked;
402 } else
403 family->attrbuf = NULL;
405 err = genl_validate_assign_mc_groups(family);
406 if (err)
407 goto errout_locked;
409 list_add_tail(&family->family_list, genl_family_chain(family->id));
410 genl_unlock_all();
412 /* send all events */
413 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
414 for (i = 0; i < family->n_mcgrps; i++)
415 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
416 &family->mcgrps[i], family->mcgrp_offset + i);
418 return 0;
420 errout_locked:
421 genl_unlock_all();
422 errout:
423 return err;
425 EXPORT_SYMBOL(__genl_register_family);
428 * genl_unregister_family - unregister generic netlink family
429 * @family: generic netlink family
431 * Unregisters the specified family.
433 * Returns 0 on success or a negative error code.
435 int genl_unregister_family(struct genl_family *family)
437 struct genl_family *rc;
439 genl_lock_all();
441 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
442 if (family->id != rc->id || strcmp(rc->name, family->name))
443 continue;
445 genl_unregister_mc_groups(family);
447 list_del(&rc->family_list);
448 family->n_ops = 0;
449 up_write(&cb_lock);
450 wait_event(genl_sk_destructing_waitq,
451 atomic_read(&genl_sk_destructing_cnt) == 0);
452 genl_unlock();
454 kfree(family->attrbuf);
455 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
456 return 0;
459 genl_unlock_all();
461 return -ENOENT;
463 EXPORT_SYMBOL(genl_unregister_family);
466 * genlmsg_new_unicast - Allocate generic netlink message for unicast
467 * @payload: size of the message payload
468 * @info: information on destination
469 * @flags: the type of memory to allocate
471 * Allocates a new sk_buff large enough to cover the specified payload
472 * plus required Netlink headers. Will check receiving socket for
473 * memory mapped i/o capability and use it if enabled. Will fall back
474 * to non-mapped skb if message size exceeds the frame size of the ring.
476 struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
477 gfp_t flags)
479 size_t len = nlmsg_total_size(genlmsg_total_size(payload));
481 return netlink_alloc_skb(info->dst_sk, len, info->snd_portid, flags);
483 EXPORT_SYMBOL_GPL(genlmsg_new_unicast);
486 * genlmsg_put - Add generic netlink header to netlink message
487 * @skb: socket buffer holding the message
488 * @portid: netlink portid the message is addressed to
489 * @seq: sequence number (usually the one of the sender)
490 * @family: generic netlink family
491 * @flags: netlink message flags
492 * @cmd: generic netlink command
494 * Returns pointer to user specific header
496 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
497 struct genl_family *family, int flags, u8 cmd)
499 struct nlmsghdr *nlh;
500 struct genlmsghdr *hdr;
502 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
503 family->hdrsize, flags);
504 if (nlh == NULL)
505 return NULL;
507 hdr = nlmsg_data(nlh);
508 hdr->cmd = cmd;
509 hdr->version = family->version;
510 hdr->reserved = 0;
512 return (char *) hdr + GENL_HDRLEN;
514 EXPORT_SYMBOL(genlmsg_put);
516 static int genl_lock_start(struct netlink_callback *cb)
518 /* our ops are always const - netlink API doesn't propagate that */
519 const struct genl_ops *ops = cb->data;
520 int rc = 0;
522 if (ops->start) {
523 genl_lock();
524 rc = ops->start(cb);
525 genl_unlock();
527 return rc;
530 static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
532 /* our ops are always const - netlink API doesn't propagate that */
533 const struct genl_ops *ops = cb->data;
534 int rc;
536 genl_lock();
537 rc = ops->dumpit(skb, cb);
538 genl_unlock();
539 return rc;
542 static int genl_lock_done(struct netlink_callback *cb)
544 /* our ops are always const - netlink API doesn't propagate that */
545 const struct genl_ops *ops = cb->data;
546 int rc = 0;
548 if (ops->done) {
549 genl_lock();
550 rc = ops->done(cb);
551 genl_unlock();
553 return rc;
556 static int genl_family_rcv_msg(struct genl_family *family,
557 struct sk_buff *skb,
558 struct nlmsghdr *nlh)
560 const struct genl_ops *ops;
561 struct net *net = sock_net(skb->sk);
562 struct genl_info info;
563 struct genlmsghdr *hdr = nlmsg_data(nlh);
564 struct nlattr **attrbuf;
565 int hdrlen, err;
567 /* this family doesn't exist in this netns */
568 if (!family->netnsok && !net_eq(net, &init_net))
569 return -ENOENT;
571 hdrlen = GENL_HDRLEN + family->hdrsize;
572 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
573 return -EINVAL;
575 ops = genl_get_cmd(hdr->cmd, family);
576 if (ops == NULL)
577 return -EOPNOTSUPP;
579 if ((ops->flags & GENL_ADMIN_PERM) &&
580 !netlink_capable(skb, CAP_NET_ADMIN))
581 return -EPERM;
583 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
584 int rc;
586 if (ops->dumpit == NULL)
587 return -EOPNOTSUPP;
589 if (!family->parallel_ops) {
590 struct netlink_dump_control c = {
591 .module = family->module,
592 /* we have const, but the netlink API doesn't */
593 .data = (void *)ops,
594 .start = genl_lock_start,
595 .dump = genl_lock_dumpit,
596 .done = genl_lock_done,
599 genl_unlock();
600 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
601 genl_lock();
603 } else {
604 struct netlink_dump_control c = {
605 .module = family->module,
606 .start = ops->start,
607 .dump = ops->dumpit,
608 .done = ops->done,
611 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
614 return rc;
617 if (ops->doit == NULL)
618 return -EOPNOTSUPP;
620 if (family->maxattr && family->parallel_ops) {
621 attrbuf = kmalloc((family->maxattr+1) *
622 sizeof(struct nlattr *), GFP_KERNEL);
623 if (attrbuf == NULL)
624 return -ENOMEM;
625 } else
626 attrbuf = family->attrbuf;
628 if (attrbuf) {
629 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
630 ops->policy);
631 if (err < 0)
632 goto out;
635 info.snd_seq = nlh->nlmsg_seq;
636 info.snd_portid = NETLINK_CB(skb).portid;
637 info.nlhdr = nlh;
638 info.genlhdr = nlmsg_data(nlh);
639 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
640 info.attrs = attrbuf;
641 info.dst_sk = skb->sk;
642 genl_info_net_set(&info, net);
643 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
645 if (family->pre_doit) {
646 err = family->pre_doit(ops, skb, &info);
647 if (err)
648 goto out;
651 err = ops->doit(skb, &info);
653 if (family->post_doit)
654 family->post_doit(ops, skb, &info);
656 out:
657 if (family->parallel_ops)
658 kfree(attrbuf);
660 return err;
663 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
665 struct genl_family *family;
666 int err;
668 family = genl_family_find_byid(nlh->nlmsg_type);
669 if (family == NULL)
670 return -ENOENT;
672 if (!family->parallel_ops)
673 genl_lock();
675 err = genl_family_rcv_msg(family, skb, nlh);
677 if (!family->parallel_ops)
678 genl_unlock();
680 return err;
683 static void genl_rcv(struct sk_buff *skb)
685 down_read(&cb_lock);
686 netlink_rcv_skb(skb, &genl_rcv_msg);
687 up_read(&cb_lock);
690 /**************************************************************************
691 * Controller
692 **************************************************************************/
694 static struct genl_family genl_ctrl = {
695 .id = GENL_ID_CTRL,
696 .name = "nlctrl",
697 .version = 0x2,
698 .maxattr = CTRL_ATTR_MAX,
699 .netnsok = true,
702 static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
703 u32 flags, struct sk_buff *skb, u8 cmd)
705 void *hdr;
707 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
708 if (hdr == NULL)
709 return -1;
711 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
712 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
713 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
714 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
715 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
716 goto nla_put_failure;
718 if (family->n_ops) {
719 struct nlattr *nla_ops;
720 int i;
722 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
723 if (nla_ops == NULL)
724 goto nla_put_failure;
726 for (i = 0; i < family->n_ops; i++) {
727 struct nlattr *nest;
728 const struct genl_ops *ops = &family->ops[i];
729 u32 op_flags = ops->flags;
731 if (ops->dumpit)
732 op_flags |= GENL_CMD_CAP_DUMP;
733 if (ops->doit)
734 op_flags |= GENL_CMD_CAP_DO;
735 if (ops->policy)
736 op_flags |= GENL_CMD_CAP_HASPOL;
738 nest = nla_nest_start(skb, i + 1);
739 if (nest == NULL)
740 goto nla_put_failure;
742 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
743 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
744 goto nla_put_failure;
746 nla_nest_end(skb, nest);
749 nla_nest_end(skb, nla_ops);
752 if (family->n_mcgrps) {
753 struct nlattr *nla_grps;
754 int i;
756 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
757 if (nla_grps == NULL)
758 goto nla_put_failure;
760 for (i = 0; i < family->n_mcgrps; i++) {
761 struct nlattr *nest;
762 const struct genl_multicast_group *grp;
764 grp = &family->mcgrps[i];
766 nest = nla_nest_start(skb, i + 1);
767 if (nest == NULL)
768 goto nla_put_failure;
770 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
771 family->mcgrp_offset + i) ||
772 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
773 grp->name))
774 goto nla_put_failure;
776 nla_nest_end(skb, nest);
778 nla_nest_end(skb, nla_grps);
781 genlmsg_end(skb, hdr);
782 return 0;
784 nla_put_failure:
785 genlmsg_cancel(skb, hdr);
786 return -EMSGSIZE;
789 static int ctrl_fill_mcgrp_info(struct genl_family *family,
790 const struct genl_multicast_group *grp,
791 int grp_id, u32 portid, u32 seq, u32 flags,
792 struct sk_buff *skb, u8 cmd)
794 void *hdr;
795 struct nlattr *nla_grps;
796 struct nlattr *nest;
798 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
799 if (hdr == NULL)
800 return -1;
802 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
803 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
804 goto nla_put_failure;
806 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
807 if (nla_grps == NULL)
808 goto nla_put_failure;
810 nest = nla_nest_start(skb, 1);
811 if (nest == NULL)
812 goto nla_put_failure;
814 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
815 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
816 grp->name))
817 goto nla_put_failure;
819 nla_nest_end(skb, nest);
820 nla_nest_end(skb, nla_grps);
822 genlmsg_end(skb, hdr);
823 return 0;
825 nla_put_failure:
826 genlmsg_cancel(skb, hdr);
827 return -EMSGSIZE;
830 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
833 int i, n = 0;
834 struct genl_family *rt;
835 struct net *net = sock_net(skb->sk);
836 int chains_to_skip = cb->args[0];
837 int fams_to_skip = cb->args[1];
839 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
840 n = 0;
841 list_for_each_entry(rt, genl_family_chain(i), family_list) {
842 if (!rt->netnsok && !net_eq(net, &init_net))
843 continue;
844 if (++n < fams_to_skip)
845 continue;
846 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
847 cb->nlh->nlmsg_seq, NLM_F_MULTI,
848 skb, CTRL_CMD_NEWFAMILY) < 0)
849 goto errout;
852 fams_to_skip = 0;
855 errout:
856 cb->args[0] = i;
857 cb->args[1] = n;
859 return skb->len;
862 static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
863 u32 portid, int seq, u8 cmd)
865 struct sk_buff *skb;
866 int err;
868 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
869 if (skb == NULL)
870 return ERR_PTR(-ENOBUFS);
872 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
873 if (err < 0) {
874 nlmsg_free(skb);
875 return ERR_PTR(err);
878 return skb;
881 static struct sk_buff *
882 ctrl_build_mcgrp_msg(struct genl_family *family,
883 const struct genl_multicast_group *grp,
884 int grp_id, u32 portid, int seq, u8 cmd)
886 struct sk_buff *skb;
887 int err;
889 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
890 if (skb == NULL)
891 return ERR_PTR(-ENOBUFS);
893 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
894 seq, 0, skb, cmd);
895 if (err < 0) {
896 nlmsg_free(skb);
897 return ERR_PTR(err);
900 return skb;
903 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
904 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
905 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
906 .len = GENL_NAMSIZ - 1 },
909 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
911 struct sk_buff *msg;
912 struct genl_family *res = NULL;
913 int err = -EINVAL;
915 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
916 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
917 res = genl_family_find_byid(id);
918 err = -ENOENT;
921 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
922 char *name;
924 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
925 res = genl_family_find_byname(name);
926 #ifdef CONFIG_MODULES
927 if (res == NULL) {
928 genl_unlock();
929 up_read(&cb_lock);
930 request_module("net-pf-%d-proto-%d-family-%s",
931 PF_NETLINK, NETLINK_GENERIC, name);
932 down_read(&cb_lock);
933 genl_lock();
934 res = genl_family_find_byname(name);
936 #endif
937 err = -ENOENT;
940 if (res == NULL)
941 return err;
943 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
944 /* family doesn't exist here */
945 return -ENOENT;
948 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
949 CTRL_CMD_NEWFAMILY);
950 if (IS_ERR(msg))
951 return PTR_ERR(msg);
953 return genlmsg_reply(msg, info);
956 static int genl_ctrl_event(int event, struct genl_family *family,
957 const struct genl_multicast_group *grp,
958 int grp_id)
960 struct sk_buff *msg;
962 /* genl is still initialising */
963 if (!init_net.genl_sock)
964 return 0;
966 switch (event) {
967 case CTRL_CMD_NEWFAMILY:
968 case CTRL_CMD_DELFAMILY:
969 WARN_ON(grp);
970 msg = ctrl_build_family_msg(family, 0, 0, event);
971 break;
972 case CTRL_CMD_NEWMCAST_GRP:
973 case CTRL_CMD_DELMCAST_GRP:
974 BUG_ON(!grp);
975 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
976 break;
977 default:
978 return -EINVAL;
981 if (IS_ERR(msg))
982 return PTR_ERR(msg);
984 if (!family->netnsok) {
985 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
986 0, GFP_KERNEL);
987 } else {
988 rcu_read_lock();
989 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
990 0, GFP_ATOMIC);
991 rcu_read_unlock();
994 return 0;
997 static struct genl_ops genl_ctrl_ops[] = {
999 .cmd = CTRL_CMD_GETFAMILY,
1000 .doit = ctrl_getfamily,
1001 .dumpit = ctrl_dumpfamily,
1002 .policy = ctrl_policy,
1006 static struct genl_multicast_group genl_ctrl_groups[] = {
1007 { .name = "notify", },
1010 static int genl_bind(struct net *net, int group)
1012 int i, err = -ENOENT;
1014 down_read(&cb_lock);
1015 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1016 struct genl_family *f;
1018 list_for_each_entry(f, genl_family_chain(i), family_list) {
1019 if (group >= f->mcgrp_offset &&
1020 group < f->mcgrp_offset + f->n_mcgrps) {
1021 int fam_grp = group - f->mcgrp_offset;
1023 if (!f->netnsok && net != &init_net)
1024 err = -ENOENT;
1025 else if (f->mcast_bind)
1026 err = f->mcast_bind(net, fam_grp);
1027 else
1028 err = 0;
1029 break;
1033 up_read(&cb_lock);
1035 return err;
1038 static void genl_unbind(struct net *net, int group)
1040 int i;
1042 down_read(&cb_lock);
1043 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1044 struct genl_family *f;
1046 list_for_each_entry(f, genl_family_chain(i), family_list) {
1047 if (group >= f->mcgrp_offset &&
1048 group < f->mcgrp_offset + f->n_mcgrps) {
1049 int fam_grp = group - f->mcgrp_offset;
1051 if (f->mcast_unbind)
1052 f->mcast_unbind(net, fam_grp);
1053 break;
1057 up_read(&cb_lock);
1060 static int __net_init genl_pernet_init(struct net *net)
1062 struct netlink_kernel_cfg cfg = {
1063 .input = genl_rcv,
1064 .flags = NL_CFG_F_NONROOT_RECV,
1065 .bind = genl_bind,
1066 .unbind = genl_unbind,
1069 /* we'll bump the group number right afterwards */
1070 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1072 if (!net->genl_sock && net_eq(net, &init_net))
1073 panic("GENL: Cannot initialize generic netlink\n");
1075 if (!net->genl_sock)
1076 return -ENOMEM;
1078 return 0;
1081 static void __net_exit genl_pernet_exit(struct net *net)
1083 netlink_kernel_release(net->genl_sock);
1084 net->genl_sock = NULL;
1087 static struct pernet_operations genl_pernet_ops = {
1088 .init = genl_pernet_init,
1089 .exit = genl_pernet_exit,
1092 static int __init genl_init(void)
1094 int i, err;
1096 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
1097 INIT_LIST_HEAD(&family_ht[i]);
1099 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
1100 genl_ctrl_groups);
1101 if (err < 0)
1102 goto problem;
1104 err = register_pernet_subsys(&genl_pernet_ops);
1105 if (err)
1106 goto problem;
1108 return 0;
1110 problem:
1111 panic("GENL: Cannot register controller: %d\n", err);
1114 subsys_initcall(genl_init);
1116 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1117 gfp_t flags)
1119 struct sk_buff *tmp;
1120 struct net *net, *prev = NULL;
1121 int err;
1123 for_each_net_rcu(net) {
1124 if (prev) {
1125 tmp = skb_clone(skb, flags);
1126 if (!tmp) {
1127 err = -ENOMEM;
1128 goto error;
1130 err = nlmsg_multicast(prev->genl_sock, tmp,
1131 portid, group, flags);
1132 if (err)
1133 goto error;
1136 prev = net;
1139 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1140 error:
1141 kfree_skb(skb);
1142 return err;
1145 int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
1146 u32 portid, unsigned int group, gfp_t flags)
1148 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1149 return -EINVAL;
1150 group = family->mcgrp_offset + group;
1151 return genlmsg_mcast(skb, portid, group, flags);
1153 EXPORT_SYMBOL(genlmsg_multicast_allns);
1155 void genl_notify(struct genl_family *family, struct sk_buff *skb,
1156 struct genl_info *info, u32 group, gfp_t flags)
1158 struct net *net = genl_info_net(info);
1159 struct sock *sk = net->genl_sock;
1160 int report = 0;
1162 if (info->nlhdr)
1163 report = nlmsg_report(info->nlhdr);
1165 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1166 return;
1167 group = family->mcgrp_offset + group;
1168 nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
1170 EXPORT_SYMBOL(genl_notify);