2 * NETLINK Generic Netlink Family
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/string.h>
16 #include <linux/skbuff.h>
17 #include <linux/mutex.h>
18 #include <linux/bitmap.h>
19 #include <linux/rwsem.h>
21 #include <net/genetlink.h>
23 static DEFINE_MUTEX(genl_mutex
); /* serialization of message processing */
24 static DECLARE_RWSEM(cb_lock
);
28 mutex_lock(&genl_mutex
);
30 EXPORT_SYMBOL(genl_lock
);
32 void genl_unlock(void)
34 mutex_unlock(&genl_mutex
);
36 EXPORT_SYMBOL(genl_unlock
);
39 int lockdep_genl_is_held(void)
41 return lockdep_is_held(&genl_mutex
);
43 EXPORT_SYMBOL(lockdep_genl_is_held
);
46 static void genl_lock_all(void)
52 static void genl_unlock_all(void)
58 #define GENL_FAM_TAB_SIZE 16
59 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
61 static struct list_head family_ht
[GENL_FAM_TAB_SIZE
];
63 * Bitmap of multicast groups that are currently in use.
65 * To avoid an allocation at boot of just one unsigned long,
66 * declare it global instead.
67 * Bit 0 is marked as already used since group 0 is invalid.
68 * Bit 1 is marked as already used since the drop-monitor code
69 * abuses the API and thinks it can statically use group 1.
70 * That group will typically conflict with other groups that
71 * any proper users use.
72 * Bit 16 is marked as used since it's used for generic netlink
73 * and the code no longer marks pre-reserved IDs as used.
74 * Bit 17 is marked as already used since the VFS quota code
75 * also abused this API and relied on family == group ID, we
76 * cater to that by giving it a static family and group ID.
77 * Bit 18 is marked as already used since the PMCRAID driver
78 * did the same thing as the VFS quota code (maybe copied?)
80 static unsigned long mc_group_start
= 0x3 | BIT(GENL_ID_CTRL
) |
81 BIT(GENL_ID_VFS_DQUOT
) |
83 static unsigned long *mc_groups
= &mc_group_start
;
84 static unsigned long mc_groups_longs
= 1;
86 static int genl_ctrl_event(int event
, struct genl_family
*family
,
87 const struct genl_multicast_group
*grp
,
90 static inline unsigned int genl_family_hash(unsigned int id
)
92 return id
& GENL_FAM_TAB_MASK
;
95 static inline struct list_head
*genl_family_chain(unsigned int id
)
97 return &family_ht
[genl_family_hash(id
)];
100 static struct genl_family
*genl_family_find_byid(unsigned int id
)
102 struct genl_family
*f
;
104 list_for_each_entry(f
, genl_family_chain(id
), family_list
)
111 static struct genl_family
*genl_family_find_byname(char *name
)
113 struct genl_family
*f
;
116 for (i
= 0; i
< GENL_FAM_TAB_SIZE
; i
++)
117 list_for_each_entry(f
, genl_family_chain(i
), family_list
)
118 if (strcmp(f
->name
, name
) == 0)
124 static const struct genl_ops
*genl_get_cmd(u8 cmd
, struct genl_family
*family
)
128 for (i
= 0; i
< family
->n_ops
; i
++)
129 if (family
->ops
[i
].cmd
== cmd
)
130 return &family
->ops
[i
];
135 /* Of course we are going to have problems once we hit
136 * 2^16 alive types, but that can only happen by year 2K
138 static u16
genl_generate_id(void)
140 static u16 id_gen_idx
= GENL_MIN_ID
;
143 for (i
= 0; i
<= GENL_MAX_ID
- GENL_MIN_ID
; i
++) {
144 if (id_gen_idx
!= GENL_ID_VFS_DQUOT
&&
145 id_gen_idx
!= GENL_ID_PMCRAID
&&
146 !genl_family_find_byid(id_gen_idx
))
148 if (++id_gen_idx
> GENL_MAX_ID
)
149 id_gen_idx
= GENL_MIN_ID
;
155 static int genl_allocate_reserve_groups(int n_groups
, int *first_id
)
157 unsigned long *new_groups
;
165 id
= find_first_zero_bit(mc_groups
,
169 id
= find_next_zero_bit(mc_groups
,
170 mc_groups_longs
* BITS_PER_LONG
,
175 i
< min_t(int, id
+ n_groups
,
176 mc_groups_longs
* BITS_PER_LONG
);
178 if (test_bit(i
, mc_groups
)) {
185 if (id
>= mc_groups_longs
* BITS_PER_LONG
) {
186 unsigned long new_longs
= mc_groups_longs
+
187 BITS_TO_LONGS(n_groups
);
188 size_t nlen
= new_longs
* sizeof(unsigned long);
190 if (mc_groups
== &mc_group_start
) {
191 new_groups
= kzalloc(nlen
, GFP_KERNEL
);
194 mc_groups
= new_groups
;
195 *mc_groups
= mc_group_start
;
197 new_groups
= krealloc(mc_groups
, nlen
,
201 mc_groups
= new_groups
;
202 for (i
= 0; i
< BITS_TO_LONGS(n_groups
); i
++)
203 mc_groups
[mc_groups_longs
+ i
] = 0;
205 mc_groups_longs
= new_longs
;
209 for (i
= id
; i
< id
+ n_groups
; i
++)
210 set_bit(i
, mc_groups
);
215 static struct genl_family genl_ctrl
;
217 static int genl_validate_assign_mc_groups(struct genl_family
*family
)
220 int n_groups
= family
->n_mcgrps
;
222 bool groups_allocated
= false;
227 for (i
= 0; i
< n_groups
; i
++) {
228 const struct genl_multicast_group
*grp
= &family
->mcgrps
[i
];
230 if (WARN_ON(grp
->name
[0] == '\0'))
232 if (WARN_ON(memchr(grp
->name
, '\0', GENL_NAMSIZ
) == NULL
))
236 /* special-case our own group and hacks */
237 if (family
== &genl_ctrl
) {
238 first_id
= GENL_ID_CTRL
;
239 BUG_ON(n_groups
!= 1);
240 } else if (strcmp(family
->name
, "NET_DM") == 0) {
242 BUG_ON(n_groups
!= 1);
243 } else if (family
->id
== GENL_ID_VFS_DQUOT
) {
244 first_id
= GENL_ID_VFS_DQUOT
;
245 BUG_ON(n_groups
!= 1);
246 } else if (family
->id
== GENL_ID_PMCRAID
) {
247 first_id
= GENL_ID_PMCRAID
;
248 BUG_ON(n_groups
!= 1);
250 groups_allocated
= true;
251 err
= genl_allocate_reserve_groups(n_groups
, &first_id
);
256 family
->mcgrp_offset
= first_id
;
258 /* if still initializing, can't and don't need to to realloc bitmaps */
259 if (!init_net
.genl_sock
)
262 if (family
->netnsok
) {
265 netlink_table_grab();
267 for_each_net_rcu(net
) {
268 err
= __netlink_change_ngroups(net
->genl_sock
,
269 mc_groups_longs
* BITS_PER_LONG
);
272 * No need to roll back, can only fail if
273 * memory allocation fails and then the
274 * number of _possible_ groups has been
275 * increased on some sockets which is ok.
281 netlink_table_ungrab();
283 err
= netlink_change_ngroups(init_net
.genl_sock
,
284 mc_groups_longs
* BITS_PER_LONG
);
287 if (groups_allocated
&& err
) {
288 for (i
= 0; i
< family
->n_mcgrps
; i
++)
289 clear_bit(family
->mcgrp_offset
+ i
, mc_groups
);
295 static void genl_unregister_mc_groups(struct genl_family
*family
)
300 netlink_table_grab();
302 for_each_net_rcu(net
) {
303 for (i
= 0; i
< family
->n_mcgrps
; i
++)
304 __netlink_clear_multicast_users(
305 net
->genl_sock
, family
->mcgrp_offset
+ i
);
308 netlink_table_ungrab();
310 for (i
= 0; i
< family
->n_mcgrps
; i
++) {
311 int grp_id
= family
->mcgrp_offset
+ i
;
314 clear_bit(grp_id
, mc_groups
);
315 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP
, family
,
316 &family
->mcgrps
[i
], grp_id
);
320 static int genl_validate_ops(struct genl_family
*family
)
322 const struct genl_ops
*ops
= family
->ops
;
323 unsigned int n_ops
= family
->n_ops
;
326 if (WARN_ON(n_ops
&& !ops
))
332 for (i
= 0; i
< n_ops
; i
++) {
333 if (ops
[i
].dumpit
== NULL
&& ops
[i
].doit
== NULL
)
335 for (j
= i
+ 1; j
< n_ops
; j
++)
336 if (ops
[i
].cmd
== ops
[j
].cmd
)
340 /* family is not registered yet, so no locking needed */
342 family
->n_ops
= n_ops
;
348 * __genl_register_family - register a generic netlink family
349 * @family: generic netlink family
351 * Registers the specified family after validating it first. Only one
352 * family may be registered with the same family name or identifier.
353 * The family id may equal GENL_ID_GENERATE causing an unique id to
354 * be automatically generated and assigned.
356 * The family's ops array must already be assigned, you can use the
357 * genl_register_family_with_ops() helper function.
359 * Return 0 on success or a negative error code.
361 int __genl_register_family(struct genl_family
*family
)
363 int err
= -EINVAL
, i
;
365 if (family
->id
&& family
->id
< GENL_MIN_ID
)
368 if (family
->id
> GENL_MAX_ID
)
371 err
= genl_validate_ops(family
);
377 if (genl_family_find_byname(family
->name
)) {
382 if (family
->id
== GENL_ID_GENERATE
) {
383 u16 newid
= genl_generate_id();
391 } else if (genl_family_find_byid(family
->id
)) {
396 if (family
->maxattr
&& !family
->parallel_ops
) {
397 family
->attrbuf
= kmalloc((family
->maxattr
+1) *
398 sizeof(struct nlattr
*), GFP_KERNEL
);
399 if (family
->attrbuf
== NULL
) {
404 family
->attrbuf
= NULL
;
406 err
= genl_validate_assign_mc_groups(family
);
410 list_add_tail(&family
->family_list
, genl_family_chain(family
->id
));
413 /* send all events */
414 genl_ctrl_event(CTRL_CMD_NEWFAMILY
, family
, NULL
, 0);
415 for (i
= 0; i
< family
->n_mcgrps
; i
++)
416 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP
, family
,
417 &family
->mcgrps
[i
], family
->mcgrp_offset
+ i
);
426 EXPORT_SYMBOL(__genl_register_family
);
429 * genl_unregister_family - unregister generic netlink family
430 * @family: generic netlink family
432 * Unregisters the specified family.
434 * Returns 0 on success or a negative error code.
436 int genl_unregister_family(struct genl_family
*family
)
438 struct genl_family
*rc
;
442 genl_unregister_mc_groups(family
);
444 list_for_each_entry(rc
, genl_family_chain(family
->id
), family_list
) {
445 if (family
->id
!= rc
->id
|| strcmp(rc
->name
, family
->name
))
448 list_del(&rc
->family_list
);
452 kfree(family
->attrbuf
);
453 genl_ctrl_event(CTRL_CMD_DELFAMILY
, family
, NULL
, 0);
461 EXPORT_SYMBOL(genl_unregister_family
);
464 * genlmsg_new_unicast - Allocate generic netlink message for unicast
465 * @payload: size of the message payload
466 * @info: information on destination
467 * @flags: the type of memory to allocate
469 * Allocates a new sk_buff large enough to cover the specified payload
470 * plus required Netlink headers. Will check receiving socket for
471 * memory mapped i/o capability and use it if enabled. Will fall back
472 * to non-mapped skb if message size exceeds the frame size of the ring.
474 struct sk_buff
*genlmsg_new_unicast(size_t payload
, struct genl_info
*info
,
477 size_t len
= nlmsg_total_size(genlmsg_total_size(payload
));
479 return netlink_alloc_skb(info
->dst_sk
, len
, info
->snd_portid
, flags
);
481 EXPORT_SYMBOL_GPL(genlmsg_new_unicast
);
484 * genlmsg_put - Add generic netlink header to netlink message
485 * @skb: socket buffer holding the message
486 * @portid: netlink portid the message is addressed to
487 * @seq: sequence number (usually the one of the sender)
488 * @family: generic netlink family
489 * @flags: netlink message flags
490 * @cmd: generic netlink command
492 * Returns pointer to user specific header
494 void *genlmsg_put(struct sk_buff
*skb
, u32 portid
, u32 seq
,
495 struct genl_family
*family
, int flags
, u8 cmd
)
497 struct nlmsghdr
*nlh
;
498 struct genlmsghdr
*hdr
;
500 nlh
= nlmsg_put(skb
, portid
, seq
, family
->id
, GENL_HDRLEN
+
501 family
->hdrsize
, flags
);
505 hdr
= nlmsg_data(nlh
);
507 hdr
->version
= family
->version
;
510 return (char *) hdr
+ GENL_HDRLEN
;
512 EXPORT_SYMBOL(genlmsg_put
);
514 static int genl_lock_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
516 /* our ops are always const - netlink API doesn't propagate that */
517 const struct genl_ops
*ops
= cb
->data
;
521 rc
= ops
->dumpit(skb
, cb
);
526 static int genl_lock_done(struct netlink_callback
*cb
)
528 /* our ops are always const - netlink API doesn't propagate that */
529 const struct genl_ops
*ops
= cb
->data
;
540 static int genl_family_rcv_msg(struct genl_family
*family
,
542 struct nlmsghdr
*nlh
)
544 const struct genl_ops
*ops
;
545 struct net
*net
= sock_net(skb
->sk
);
546 struct genl_info info
;
547 struct genlmsghdr
*hdr
= nlmsg_data(nlh
);
548 struct nlattr
**attrbuf
;
551 /* this family doesn't exist in this netns */
552 if (!family
->netnsok
&& !net_eq(net
, &init_net
))
555 hdrlen
= GENL_HDRLEN
+ family
->hdrsize
;
556 if (nlh
->nlmsg_len
< nlmsg_msg_size(hdrlen
))
559 ops
= genl_get_cmd(hdr
->cmd
, family
);
563 if ((ops
->flags
& GENL_ADMIN_PERM
) &&
564 !capable(CAP_NET_ADMIN
))
567 if ((nlh
->nlmsg_flags
& NLM_F_DUMP
) == NLM_F_DUMP
) {
570 if (ops
->dumpit
== NULL
)
573 if (!family
->parallel_ops
) {
574 struct netlink_dump_control c
= {
575 .module
= family
->module
,
576 /* we have const, but the netlink API doesn't */
578 .dump
= genl_lock_dumpit
,
579 .done
= genl_lock_done
,
583 rc
= __netlink_dump_start(net
->genl_sock
, skb
, nlh
, &c
);
587 struct netlink_dump_control c
= {
588 .module
= family
->module
,
593 rc
= __netlink_dump_start(net
->genl_sock
, skb
, nlh
, &c
);
599 if (ops
->doit
== NULL
)
602 if (family
->maxattr
&& family
->parallel_ops
) {
603 attrbuf
= kmalloc((family
->maxattr
+1) *
604 sizeof(struct nlattr
*), GFP_KERNEL
);
608 attrbuf
= family
->attrbuf
;
611 err
= nlmsg_parse(nlh
, hdrlen
, attrbuf
, family
->maxattr
,
617 info
.snd_seq
= nlh
->nlmsg_seq
;
618 info
.snd_portid
= NETLINK_CB(skb
).portid
;
620 info
.genlhdr
= nlmsg_data(nlh
);
621 info
.userhdr
= nlmsg_data(nlh
) + GENL_HDRLEN
;
622 info
.attrs
= attrbuf
;
623 info
.dst_sk
= skb
->sk
;
624 genl_info_net_set(&info
, net
);
625 memset(&info
.user_ptr
, 0, sizeof(info
.user_ptr
));
627 if (family
->pre_doit
) {
628 err
= family
->pre_doit(ops
, skb
, &info
);
633 err
= ops
->doit(skb
, &info
);
635 if (family
->post_doit
)
636 family
->post_doit(ops
, skb
, &info
);
639 if (family
->parallel_ops
)
645 static int genl_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
647 struct genl_family
*family
;
650 family
= genl_family_find_byid(nlh
->nlmsg_type
);
654 if (!family
->parallel_ops
)
657 err
= genl_family_rcv_msg(family
, skb
, nlh
);
659 if (!family
->parallel_ops
)
665 static void genl_rcv(struct sk_buff
*skb
)
668 netlink_rcv_skb(skb
, &genl_rcv_msg
);
672 /**************************************************************************
674 **************************************************************************/
676 static struct genl_family genl_ctrl
= {
680 .maxattr
= CTRL_ATTR_MAX
,
684 static int ctrl_fill_info(struct genl_family
*family
, u32 portid
, u32 seq
,
685 u32 flags
, struct sk_buff
*skb
, u8 cmd
)
689 hdr
= genlmsg_put(skb
, portid
, seq
, &genl_ctrl
, flags
, cmd
);
693 if (nla_put_string(skb
, CTRL_ATTR_FAMILY_NAME
, family
->name
) ||
694 nla_put_u16(skb
, CTRL_ATTR_FAMILY_ID
, family
->id
) ||
695 nla_put_u32(skb
, CTRL_ATTR_VERSION
, family
->version
) ||
696 nla_put_u32(skb
, CTRL_ATTR_HDRSIZE
, family
->hdrsize
) ||
697 nla_put_u32(skb
, CTRL_ATTR_MAXATTR
, family
->maxattr
))
698 goto nla_put_failure
;
701 struct nlattr
*nla_ops
;
704 nla_ops
= nla_nest_start(skb
, CTRL_ATTR_OPS
);
706 goto nla_put_failure
;
708 for (i
= 0; i
< family
->n_ops
; i
++) {
710 const struct genl_ops
*ops
= &family
->ops
[i
];
711 u32 op_flags
= ops
->flags
;
714 op_flags
|= GENL_CMD_CAP_DUMP
;
716 op_flags
|= GENL_CMD_CAP_DO
;
718 op_flags
|= GENL_CMD_CAP_HASPOL
;
720 nest
= nla_nest_start(skb
, i
+ 1);
722 goto nla_put_failure
;
724 if (nla_put_u32(skb
, CTRL_ATTR_OP_ID
, ops
->cmd
) ||
725 nla_put_u32(skb
, CTRL_ATTR_OP_FLAGS
, op_flags
))
726 goto nla_put_failure
;
728 nla_nest_end(skb
, nest
);
731 nla_nest_end(skb
, nla_ops
);
734 if (family
->n_mcgrps
) {
735 struct nlattr
*nla_grps
;
738 nla_grps
= nla_nest_start(skb
, CTRL_ATTR_MCAST_GROUPS
);
739 if (nla_grps
== NULL
)
740 goto nla_put_failure
;
742 for (i
= 0; i
< family
->n_mcgrps
; i
++) {
744 const struct genl_multicast_group
*grp
;
746 grp
= &family
->mcgrps
[i
];
748 nest
= nla_nest_start(skb
, i
+ 1);
750 goto nla_put_failure
;
752 if (nla_put_u32(skb
, CTRL_ATTR_MCAST_GRP_ID
,
753 family
->mcgrp_offset
+ i
) ||
754 nla_put_string(skb
, CTRL_ATTR_MCAST_GRP_NAME
,
756 goto nla_put_failure
;
758 nla_nest_end(skb
, nest
);
760 nla_nest_end(skb
, nla_grps
);
763 return genlmsg_end(skb
, hdr
);
766 genlmsg_cancel(skb
, hdr
);
770 static int ctrl_fill_mcgrp_info(struct genl_family
*family
,
771 const struct genl_multicast_group
*grp
,
772 int grp_id
, u32 portid
, u32 seq
, u32 flags
,
773 struct sk_buff
*skb
, u8 cmd
)
776 struct nlattr
*nla_grps
;
779 hdr
= genlmsg_put(skb
, portid
, seq
, &genl_ctrl
, flags
, cmd
);
783 if (nla_put_string(skb
, CTRL_ATTR_FAMILY_NAME
, family
->name
) ||
784 nla_put_u16(skb
, CTRL_ATTR_FAMILY_ID
, family
->id
))
785 goto nla_put_failure
;
787 nla_grps
= nla_nest_start(skb
, CTRL_ATTR_MCAST_GROUPS
);
788 if (nla_grps
== NULL
)
789 goto nla_put_failure
;
791 nest
= nla_nest_start(skb
, 1);
793 goto nla_put_failure
;
795 if (nla_put_u32(skb
, CTRL_ATTR_MCAST_GRP_ID
, grp_id
) ||
796 nla_put_string(skb
, CTRL_ATTR_MCAST_GRP_NAME
,
798 goto nla_put_failure
;
800 nla_nest_end(skb
, nest
);
801 nla_nest_end(skb
, nla_grps
);
803 return genlmsg_end(skb
, hdr
);
806 genlmsg_cancel(skb
, hdr
);
810 static int ctrl_dumpfamily(struct sk_buff
*skb
, struct netlink_callback
*cb
)
814 struct genl_family
*rt
;
815 struct net
*net
= sock_net(skb
->sk
);
816 int chains_to_skip
= cb
->args
[0];
817 int fams_to_skip
= cb
->args
[1];
819 for (i
= chains_to_skip
; i
< GENL_FAM_TAB_SIZE
; i
++) {
821 list_for_each_entry(rt
, genl_family_chain(i
), family_list
) {
822 if (!rt
->netnsok
&& !net_eq(net
, &init_net
))
824 if (++n
< fams_to_skip
)
826 if (ctrl_fill_info(rt
, NETLINK_CB(cb
->skb
).portid
,
827 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
828 skb
, CTRL_CMD_NEWFAMILY
) < 0)
842 static struct sk_buff
*ctrl_build_family_msg(struct genl_family
*family
,
843 u32 portid
, int seq
, u8 cmd
)
848 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
850 return ERR_PTR(-ENOBUFS
);
852 err
= ctrl_fill_info(family
, portid
, seq
, 0, skb
, cmd
);
861 static struct sk_buff
*
862 ctrl_build_mcgrp_msg(struct genl_family
*family
,
863 const struct genl_multicast_group
*grp
,
864 int grp_id
, u32 portid
, int seq
, u8 cmd
)
869 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
871 return ERR_PTR(-ENOBUFS
);
873 err
= ctrl_fill_mcgrp_info(family
, grp
, grp_id
, portid
,
883 static const struct nla_policy ctrl_policy
[CTRL_ATTR_MAX
+1] = {
884 [CTRL_ATTR_FAMILY_ID
] = { .type
= NLA_U16
},
885 [CTRL_ATTR_FAMILY_NAME
] = { .type
= NLA_NUL_STRING
,
886 .len
= GENL_NAMSIZ
- 1 },
889 static int ctrl_getfamily(struct sk_buff
*skb
, struct genl_info
*info
)
892 struct genl_family
*res
= NULL
;
895 if (info
->attrs
[CTRL_ATTR_FAMILY_ID
]) {
896 u16 id
= nla_get_u16(info
->attrs
[CTRL_ATTR_FAMILY_ID
]);
897 res
= genl_family_find_byid(id
);
901 if (info
->attrs
[CTRL_ATTR_FAMILY_NAME
]) {
904 name
= nla_data(info
->attrs
[CTRL_ATTR_FAMILY_NAME
]);
905 res
= genl_family_find_byname(name
);
906 #ifdef CONFIG_MODULES
910 request_module("net-pf-%d-proto-%d-family-%s",
911 PF_NETLINK
, NETLINK_GENERIC
, name
);
914 res
= genl_family_find_byname(name
);
923 if (!res
->netnsok
&& !net_eq(genl_info_net(info
), &init_net
)) {
924 /* family doesn't exist here */
928 msg
= ctrl_build_family_msg(res
, info
->snd_portid
, info
->snd_seq
,
933 return genlmsg_reply(msg
, info
);
936 static int genl_ctrl_event(int event
, struct genl_family
*family
,
937 const struct genl_multicast_group
*grp
,
942 /* genl is still initialising */
943 if (!init_net
.genl_sock
)
947 case CTRL_CMD_NEWFAMILY
:
948 case CTRL_CMD_DELFAMILY
:
950 msg
= ctrl_build_family_msg(family
, 0, 0, event
);
952 case CTRL_CMD_NEWMCAST_GRP
:
953 case CTRL_CMD_DELMCAST_GRP
:
955 msg
= ctrl_build_mcgrp_msg(family
, grp
, grp_id
, 0, 0, event
);
964 if (!family
->netnsok
) {
965 genlmsg_multicast_netns(&genl_ctrl
, &init_net
, msg
, 0,
969 genlmsg_multicast_allns(&genl_ctrl
, msg
, 0,
977 static struct genl_ops genl_ctrl_ops
[] = {
979 .cmd
= CTRL_CMD_GETFAMILY
,
980 .doit
= ctrl_getfamily
,
981 .dumpit
= ctrl_dumpfamily
,
982 .policy
= ctrl_policy
,
986 static struct genl_multicast_group genl_ctrl_groups
[] = {
987 { .name
= "notify", },
990 static int __net_init
genl_pernet_init(struct net
*net
)
992 struct netlink_kernel_cfg cfg
= {
994 .flags
= NL_CFG_F_NONROOT_RECV
,
997 /* we'll bump the group number right afterwards */
998 net
->genl_sock
= netlink_kernel_create(net
, NETLINK_GENERIC
, &cfg
);
1000 if (!net
->genl_sock
&& net_eq(net
, &init_net
))
1001 panic("GENL: Cannot initialize generic netlink\n");
1003 if (!net
->genl_sock
)
1009 static void __net_exit
genl_pernet_exit(struct net
*net
)
1011 netlink_kernel_release(net
->genl_sock
);
1012 net
->genl_sock
= NULL
;
1015 static struct pernet_operations genl_pernet_ops
= {
1016 .init
= genl_pernet_init
,
1017 .exit
= genl_pernet_exit
,
1020 static int __init
genl_init(void)
1024 for (i
= 0; i
< GENL_FAM_TAB_SIZE
; i
++)
1025 INIT_LIST_HEAD(&family_ht
[i
]);
1027 err
= genl_register_family_with_ops_groups(&genl_ctrl
, genl_ctrl_ops
,
1032 err
= register_pernet_subsys(&genl_pernet_ops
);
1039 panic("GENL: Cannot register controller: %d\n", err
);
1042 subsys_initcall(genl_init
);
1044 static int genlmsg_mcast(struct sk_buff
*skb
, u32 portid
, unsigned long group
,
1047 struct sk_buff
*tmp
;
1048 struct net
*net
, *prev
= NULL
;
1051 for_each_net_rcu(net
) {
1053 tmp
= skb_clone(skb
, flags
);
1058 err
= nlmsg_multicast(prev
->genl_sock
, tmp
,
1059 portid
, group
, flags
);
1067 return nlmsg_multicast(prev
->genl_sock
, skb
, portid
, group
, flags
);
1073 int genlmsg_multicast_allns(struct genl_family
*family
, struct sk_buff
*skb
,
1074 u32 portid
, unsigned int group
, gfp_t flags
)
1076 if (WARN_ON_ONCE(group
>= family
->n_mcgrps
))
1078 group
= family
->mcgrp_offset
+ group
;
1079 return genlmsg_mcast(skb
, portid
, group
, flags
);
1081 EXPORT_SYMBOL(genlmsg_multicast_allns
);
1083 void genl_notify(struct genl_family
*family
,
1084 struct sk_buff
*skb
, struct net
*net
, u32 portid
, u32 group
,
1085 struct nlmsghdr
*nlh
, gfp_t flags
)
1087 struct sock
*sk
= net
->genl_sock
;
1091 report
= nlmsg_report(nlh
);
1093 if (WARN_ON_ONCE(group
>= family
->n_mcgrps
))
1095 group
= family
->mcgrp_offset
+ group
;
1096 nlmsg_notify(sk
, skb
, portid
, group
, report
, flags
);
1098 EXPORT_SYMBOL(genl_notify
);