Linux 4.3
[linux/fpc-iii.git] / net / netfilter / nfnetlink.c
blob70277b11f742e8f0a2756c2ba54e1565419adc95
1 /* Netfilter messages via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>,
5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org>
8 * Initial netfilter messages via netlink development funded and
9 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference.
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/sockios.h>
23 #include <linux/net.h>
24 #include <linux/skbuff.h>
25 #include <asm/uaccess.h>
26 #include <net/sock.h>
27 #include <linux/init.h>
29 #include <net/netlink.h>
30 #include <linux/netfilter/nfnetlink.h>
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
34 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
36 static char __initdata nfversion[] = "0.30";
38 static struct {
39 struct mutex mutex;
40 const struct nfnetlink_subsystem __rcu *subsys;
41 } table[NFNL_SUBSYS_COUNT];
43 static const int nfnl_group2type[NFNLGRP_MAX+1] = {
44 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
45 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
46 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
47 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
48 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
49 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
50 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
51 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
54 void nfnl_lock(__u8 subsys_id)
56 mutex_lock(&table[subsys_id].mutex);
58 EXPORT_SYMBOL_GPL(nfnl_lock);
60 void nfnl_unlock(__u8 subsys_id)
62 mutex_unlock(&table[subsys_id].mutex);
64 EXPORT_SYMBOL_GPL(nfnl_unlock);
66 #ifdef CONFIG_PROVE_LOCKING
67 int lockdep_nfnl_is_held(u8 subsys_id)
69 return lockdep_is_held(&table[subsys_id].mutex);
71 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
72 #endif
74 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
76 nfnl_lock(n->subsys_id);
77 if (table[n->subsys_id].subsys) {
78 nfnl_unlock(n->subsys_id);
79 return -EBUSY;
81 rcu_assign_pointer(table[n->subsys_id].subsys, n);
82 nfnl_unlock(n->subsys_id);
84 return 0;
86 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
88 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
90 nfnl_lock(n->subsys_id);
91 table[n->subsys_id].subsys = NULL;
92 nfnl_unlock(n->subsys_id);
93 synchronize_rcu();
94 return 0;
96 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
98 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type)
100 u_int8_t subsys_id = NFNL_SUBSYS_ID(type);
102 if (subsys_id >= NFNL_SUBSYS_COUNT)
103 return NULL;
105 return rcu_dereference(table[subsys_id].subsys);
108 static inline const struct nfnl_callback *
109 nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
111 u_int8_t cb_id = NFNL_MSG_TYPE(type);
113 if (cb_id >= ss->cb_count)
114 return NULL;
116 return &ss->cb[cb_id];
119 int nfnetlink_has_listeners(struct net *net, unsigned int group)
121 return netlink_has_listeners(net->nfnl, group);
123 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
125 struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
126 u32 dst_portid, gfp_t gfp_mask)
128 return netlink_alloc_skb(net->nfnl, size, dst_portid, gfp_mask);
130 EXPORT_SYMBOL_GPL(nfnetlink_alloc_skb);
132 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
133 unsigned int group, int echo, gfp_t flags)
135 return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags);
137 EXPORT_SYMBOL_GPL(nfnetlink_send);
139 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
141 return netlink_set_err(net->nfnl, portid, group, error);
143 EXPORT_SYMBOL_GPL(nfnetlink_set_err);
145 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
146 int flags)
148 return netlink_unicast(net->nfnl, skb, portid, flags);
150 EXPORT_SYMBOL_GPL(nfnetlink_unicast);
152 /* Process one complete nfnetlink message. */
153 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
155 struct net *net = sock_net(skb->sk);
156 const struct nfnl_callback *nc;
157 const struct nfnetlink_subsystem *ss;
158 int type, err;
160 /* All the messages must at least contain nfgenmsg */
161 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
162 return 0;
164 type = nlh->nlmsg_type;
165 replay:
166 rcu_read_lock();
167 ss = nfnetlink_get_subsys(type);
168 if (!ss) {
169 #ifdef CONFIG_MODULES
170 rcu_read_unlock();
171 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
172 rcu_read_lock();
173 ss = nfnetlink_get_subsys(type);
174 if (!ss)
175 #endif
177 rcu_read_unlock();
178 return -EINVAL;
182 nc = nfnetlink_find_client(type, ss);
183 if (!nc) {
184 rcu_read_unlock();
185 return -EINVAL;
189 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
190 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
191 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
192 struct nlattr *attr = (void *)nlh + min_len;
193 int attrlen = nlh->nlmsg_len - min_len;
194 __u8 subsys_id = NFNL_SUBSYS_ID(type);
196 err = nla_parse(cda, ss->cb[cb_id].attr_count,
197 attr, attrlen, ss->cb[cb_id].policy);
198 if (err < 0) {
199 rcu_read_unlock();
200 return err;
203 if (nc->call_rcu) {
204 err = nc->call_rcu(net->nfnl, skb, nlh,
205 (const struct nlattr **)cda);
206 rcu_read_unlock();
207 } else {
208 rcu_read_unlock();
209 nfnl_lock(subsys_id);
210 if (rcu_dereference_protected(table[subsys_id].subsys,
211 lockdep_is_held(&table[subsys_id].mutex)) != ss ||
212 nfnetlink_find_client(type, ss) != nc)
213 err = -EAGAIN;
214 else if (nc->call)
215 err = nc->call(net->nfnl, skb, nlh,
216 (const struct nlattr **)cda);
217 else
218 err = -EINVAL;
219 nfnl_unlock(subsys_id);
221 if (err == -EAGAIN)
222 goto replay;
223 return err;
227 struct nfnl_err {
228 struct list_head head;
229 struct nlmsghdr *nlh;
230 int err;
233 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
235 struct nfnl_err *nfnl_err;
237 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
238 if (nfnl_err == NULL)
239 return -ENOMEM;
241 nfnl_err->nlh = nlh;
242 nfnl_err->err = err;
243 list_add_tail(&nfnl_err->head, list);
245 return 0;
248 static void nfnl_err_del(struct nfnl_err *nfnl_err)
250 list_del(&nfnl_err->head);
251 kfree(nfnl_err);
254 static void nfnl_err_reset(struct list_head *err_list)
256 struct nfnl_err *nfnl_err, *next;
258 list_for_each_entry_safe(nfnl_err, next, err_list, head)
259 nfnl_err_del(nfnl_err);
262 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
264 struct nfnl_err *nfnl_err, *next;
266 list_for_each_entry_safe(nfnl_err, next, err_list, head) {
267 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err);
268 nfnl_err_del(nfnl_err);
272 enum {
273 NFNL_BATCH_FAILURE = (1 << 0),
274 NFNL_BATCH_DONE = (1 << 1),
275 NFNL_BATCH_REPLAY = (1 << 2),
278 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
279 u_int16_t subsys_id)
281 struct sk_buff *oskb = skb;
282 struct net *net = sock_net(skb->sk);
283 const struct nfnetlink_subsystem *ss;
284 const struct nfnl_callback *nc;
285 static LIST_HEAD(err_list);
286 u32 status;
287 int err;
289 if (subsys_id >= NFNL_SUBSYS_COUNT)
290 return netlink_ack(skb, nlh, -EINVAL);
291 replay:
292 status = 0;
294 skb = netlink_skb_clone(oskb, GFP_KERNEL);
295 if (!skb)
296 return netlink_ack(oskb, nlh, -ENOMEM);
298 skb->sk = oskb->sk;
300 nfnl_lock(subsys_id);
301 ss = rcu_dereference_protected(table[subsys_id].subsys,
302 lockdep_is_held(&table[subsys_id].mutex));
303 if (!ss) {
304 #ifdef CONFIG_MODULES
305 nfnl_unlock(subsys_id);
306 request_module("nfnetlink-subsys-%d", subsys_id);
307 nfnl_lock(subsys_id);
308 ss = rcu_dereference_protected(table[subsys_id].subsys,
309 lockdep_is_held(&table[subsys_id].mutex));
310 if (!ss)
311 #endif
313 nfnl_unlock(subsys_id);
314 netlink_ack(skb, nlh, -EOPNOTSUPP);
315 return kfree_skb(skb);
319 if (!ss->commit || !ss->abort) {
320 nfnl_unlock(subsys_id);
321 netlink_ack(skb, nlh, -EOPNOTSUPP);
322 return kfree_skb(skb);
325 while (skb->len >= nlmsg_total_size(0)) {
326 int msglen, type;
328 nlh = nlmsg_hdr(skb);
329 err = 0;
331 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
332 skb->len < nlh->nlmsg_len) {
333 err = -EINVAL;
334 goto ack;
337 /* Only requests are handled by the kernel */
338 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
339 err = -EINVAL;
340 goto ack;
343 type = nlh->nlmsg_type;
344 if (type == NFNL_MSG_BATCH_BEGIN) {
345 /* Malformed: Batch begin twice */
346 nfnl_err_reset(&err_list);
347 status |= NFNL_BATCH_FAILURE;
348 goto done;
349 } else if (type == NFNL_MSG_BATCH_END) {
350 status |= NFNL_BATCH_DONE;
351 goto done;
352 } else if (type < NLMSG_MIN_TYPE) {
353 err = -EINVAL;
354 goto ack;
357 /* We only accept a batch with messages for the same
358 * subsystem.
360 if (NFNL_SUBSYS_ID(type) != subsys_id) {
361 err = -EINVAL;
362 goto ack;
365 nc = nfnetlink_find_client(type, ss);
366 if (!nc) {
367 err = -EINVAL;
368 goto ack;
372 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
373 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
374 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
375 struct nlattr *attr = (void *)nlh + min_len;
376 int attrlen = nlh->nlmsg_len - min_len;
378 err = nla_parse(cda, ss->cb[cb_id].attr_count,
379 attr, attrlen, ss->cb[cb_id].policy);
380 if (err < 0)
381 goto ack;
383 if (nc->call_batch) {
384 err = nc->call_batch(net->nfnl, skb, nlh,
385 (const struct nlattr **)cda);
388 /* The lock was released to autoload some module, we
389 * have to abort and start from scratch using the
390 * original skb.
392 if (err == -EAGAIN) {
393 status |= NFNL_BATCH_REPLAY;
394 goto next;
397 ack:
398 if (nlh->nlmsg_flags & NLM_F_ACK || err) {
399 /* Errors are delivered once the full batch has been
400 * processed, this avoids that the same error is
401 * reported several times when replaying the batch.
403 if (nfnl_err_add(&err_list, nlh, err) < 0) {
404 /* We failed to enqueue an error, reset the
405 * list of errors and send OOM to userspace
406 * pointing to the batch header.
408 nfnl_err_reset(&err_list);
409 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
410 status |= NFNL_BATCH_FAILURE;
411 goto done;
413 /* We don't stop processing the batch on errors, thus,
414 * userspace gets all the errors that the batch
415 * triggers.
417 if (err)
418 status |= NFNL_BATCH_FAILURE;
420 next:
421 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
422 if (msglen > skb->len)
423 msglen = skb->len;
424 skb_pull(skb, msglen);
426 done:
427 if (status & NFNL_BATCH_REPLAY) {
428 ss->abort(oskb);
429 nfnl_err_reset(&err_list);
430 nfnl_unlock(subsys_id);
431 kfree_skb(skb);
432 goto replay;
433 } else if (status == NFNL_BATCH_DONE) {
434 ss->commit(oskb);
435 } else {
436 ss->abort(oskb);
439 nfnl_err_deliver(&err_list, oskb);
440 nfnl_unlock(subsys_id);
441 kfree_skb(skb);
444 static void nfnetlink_rcv(struct sk_buff *skb)
446 struct nlmsghdr *nlh = nlmsg_hdr(skb);
447 u_int16_t res_id;
448 int msglen;
450 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
451 skb->len < nlh->nlmsg_len)
452 return;
454 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
455 netlink_ack(skb, nlh, -EPERM);
456 return;
459 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) {
460 struct nfgenmsg *nfgenmsg;
462 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
463 if (msglen > skb->len)
464 msglen = skb->len;
466 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
467 skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
468 return;
470 nfgenmsg = nlmsg_data(nlh);
471 skb_pull(skb, msglen);
472 /* Work around old nft using host byte order */
473 if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
474 res_id = NFNL_SUBSYS_NFTABLES;
475 else
476 res_id = ntohs(nfgenmsg->res_id);
477 nfnetlink_rcv_batch(skb, nlh, res_id);
478 } else {
479 netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
483 #ifdef CONFIG_MODULES
484 static int nfnetlink_bind(struct net *net, int group)
486 const struct nfnetlink_subsystem *ss;
487 int type;
489 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
490 return 0;
492 type = nfnl_group2type[group];
494 rcu_read_lock();
495 ss = nfnetlink_get_subsys(type);
496 rcu_read_unlock();
497 if (!ss)
498 request_module("nfnetlink-subsys-%d", type);
499 return 0;
501 #endif
503 static int __net_init nfnetlink_net_init(struct net *net)
505 struct sock *nfnl;
506 struct netlink_kernel_cfg cfg = {
507 .groups = NFNLGRP_MAX,
508 .input = nfnetlink_rcv,
509 #ifdef CONFIG_MODULES
510 .bind = nfnetlink_bind,
511 #endif
514 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
515 if (!nfnl)
516 return -ENOMEM;
517 net->nfnl_stash = nfnl;
518 rcu_assign_pointer(net->nfnl, nfnl);
519 return 0;
522 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
524 struct net *net;
526 list_for_each_entry(net, net_exit_list, exit_list)
527 RCU_INIT_POINTER(net->nfnl, NULL);
528 synchronize_net();
529 list_for_each_entry(net, net_exit_list, exit_list)
530 netlink_kernel_release(net->nfnl_stash);
533 static struct pernet_operations nfnetlink_net_ops = {
534 .init = nfnetlink_net_init,
535 .exit_batch = nfnetlink_net_exit_batch,
538 static int __init nfnetlink_init(void)
540 int i;
542 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
543 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
545 for (i=0; i<NFNL_SUBSYS_COUNT; i++)
546 mutex_init(&table[i].mutex);
548 pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
549 return register_pernet_subsys(&nfnetlink_net_ops);
552 static void __exit nfnetlink_exit(void)
554 pr_info("Removing netfilter NETLINK layer.\n");
555 unregister_pernet_subsys(&nfnetlink_net_ops);
557 module_init(nfnetlink_init);
558 module_exit(nfnetlink_exit);