clockevents: Provide combined configure and register function
[linux-2.6/linux-mips.git] / net / ipv4 / fib_frontend.c
blob451088330bbb65dd5c6fda36f043eb2a68511ce6
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: FIB frontend.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <linux/bitops.h>
20 #include <linux/capability.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/string.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/errno.h>
28 #include <linux/in.h>
29 #include <linux/inet.h>
30 #include <linux/inetdevice.h>
31 #include <linux/netdevice.h>
32 #include <linux/if_addr.h>
33 #include <linux/if_arp.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
39 #include <net/ip.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
42 #include <net/tcp.h>
43 #include <net/sock.h>
44 #include <net/arp.h>
45 #include <net/ip_fib.h>
46 #include <net/rtnetlink.h>
48 #ifndef CONFIG_IP_MULTIPLE_TABLES
50 static int __net_init fib4_rules_init(struct net *net)
52 struct fib_table *local_table, *main_table;
54 local_table = fib_trie_table(RT_TABLE_LOCAL);
55 if (local_table == NULL)
56 return -ENOMEM;
58 main_table = fib_trie_table(RT_TABLE_MAIN);
59 if (main_table == NULL)
60 goto fail;
62 hlist_add_head_rcu(&local_table->tb_hlist,
63 &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
64 hlist_add_head_rcu(&main_table->tb_hlist,
65 &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
66 return 0;
68 fail:
69 kfree(local_table);
70 return -ENOMEM;
72 #else
74 struct fib_table *fib_new_table(struct net *net, u32 id)
76 struct fib_table *tb;
77 unsigned int h;
79 if (id == 0)
80 id = RT_TABLE_MAIN;
81 tb = fib_get_table(net, id);
82 if (tb)
83 return tb;
85 tb = fib_trie_table(id);
86 if (!tb)
87 return NULL;
88 h = id & (FIB_TABLE_HASHSZ - 1);
89 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
90 return tb;
93 struct fib_table *fib_get_table(struct net *net, u32 id)
95 struct fib_table *tb;
96 struct hlist_node *node;
97 struct hlist_head *head;
98 unsigned int h;
100 if (id == 0)
101 id = RT_TABLE_MAIN;
102 h = id & (FIB_TABLE_HASHSZ - 1);
104 rcu_read_lock();
105 head = &net->ipv4.fib_table_hash[h];
106 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
107 if (tb->tb_id == id) {
108 rcu_read_unlock();
109 return tb;
112 rcu_read_unlock();
113 return NULL;
115 #endif /* CONFIG_IP_MULTIPLE_TABLES */
117 static void fib_flush(struct net *net)
119 int flushed = 0;
120 struct fib_table *tb;
121 struct hlist_node *node;
122 struct hlist_head *head;
123 unsigned int h;
125 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
126 head = &net->ipv4.fib_table_hash[h];
127 hlist_for_each_entry(tb, node, head, tb_hlist)
128 flushed += fib_table_flush(tb);
131 if (flushed)
132 rt_cache_flush(net, -1);
136 * Find address type as if only "dev" was present in the system. If
137 * on_dev is NULL then all interfaces are taken into consideration.
139 static inline unsigned __inet_dev_addr_type(struct net *net,
140 const struct net_device *dev,
141 __be32 addr)
143 struct flowi4 fl4 = { .daddr = addr };
144 struct fib_result res;
145 unsigned ret = RTN_BROADCAST;
146 struct fib_table *local_table;
148 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
149 return RTN_BROADCAST;
150 if (ipv4_is_multicast(addr))
151 return RTN_MULTICAST;
153 #ifdef CONFIG_IP_MULTIPLE_TABLES
154 res.r = NULL;
155 #endif
157 local_table = fib_get_table(net, RT_TABLE_LOCAL);
158 if (local_table) {
159 ret = RTN_UNICAST;
160 rcu_read_lock();
161 if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
162 if (!dev || dev == res.fi->fib_dev)
163 ret = res.type;
165 rcu_read_unlock();
167 return ret;
170 unsigned int inet_addr_type(struct net *net, __be32 addr)
172 return __inet_dev_addr_type(net, NULL, addr);
174 EXPORT_SYMBOL(inet_addr_type);
176 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
177 __be32 addr)
179 return __inet_dev_addr_type(net, dev, addr);
181 EXPORT_SYMBOL(inet_dev_addr_type);
183 /* Given (packet source, input interface) and optional (dst, oif, tos):
184 * - (main) check, that source is valid i.e. not broadcast or our local
185 * address.
186 * - figure out what "logical" interface this packet arrived
187 * and calculate "specific destination" address.
188 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock()
191 int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
192 struct net_device *dev, __be32 *spec_dst,
193 u32 *itag, u32 mark)
195 struct in_device *in_dev;
196 struct flowi4 fl4;
197 struct fib_result res;
198 int no_addr, rpf, accept_local;
199 bool dev_match;
200 int ret;
201 struct net *net;
203 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif;
205 fl4.flowi4_mark = mark;
206 fl4.daddr = src;
207 fl4.saddr = dst;
208 fl4.flowi4_tos = tos;
209 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
211 no_addr = rpf = accept_local = 0;
212 in_dev = __in_dev_get_rcu(dev);
213 if (in_dev) {
214 no_addr = in_dev->ifa_list == NULL;
215 rpf = IN_DEV_RPFILTER(in_dev);
216 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
217 if (mark && !IN_DEV_SRC_VMARK(in_dev))
218 fl4.flowi4_mark = 0;
221 if (in_dev == NULL)
222 goto e_inval;
224 net = dev_net(dev);
225 if (fib_lookup(net, &fl4, &res))
226 goto last_resort;
227 if (res.type != RTN_UNICAST) {
228 if (res.type != RTN_LOCAL || !accept_local)
229 goto e_inval;
231 *spec_dst = FIB_RES_PREFSRC(net, res);
232 fib_combine_itag(itag, &res);
233 dev_match = false;
235 #ifdef CONFIG_IP_ROUTE_MULTIPATH
236 for (ret = 0; ret < res.fi->fib_nhs; ret++) {
237 struct fib_nh *nh = &res.fi->fib_nh[ret];
239 if (nh->nh_dev == dev) {
240 dev_match = true;
241 break;
244 #else
245 if (FIB_RES_DEV(res) == dev)
246 dev_match = true;
247 #endif
248 if (dev_match) {
249 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
250 return ret;
252 if (no_addr)
253 goto last_resort;
254 if (rpf == 1)
255 goto e_rpf;
256 fl4.flowi4_oif = dev->ifindex;
258 ret = 0;
259 if (fib_lookup(net, &fl4, &res) == 0) {
260 if (res.type == RTN_UNICAST) {
261 *spec_dst = FIB_RES_PREFSRC(net, res);
262 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
265 return ret;
267 last_resort:
268 if (rpf)
269 goto e_rpf;
270 *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
271 *itag = 0;
272 return 0;
274 e_inval:
275 return -EINVAL;
276 e_rpf:
277 return -EXDEV;
280 static inline __be32 sk_extract_addr(struct sockaddr *addr)
282 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
285 static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
287 struct nlattr *nla;
289 nla = (struct nlattr *) ((char *) mx + len);
290 nla->nla_type = type;
291 nla->nla_len = nla_attr_size(4);
292 *(u32 *) nla_data(nla) = value;
294 return len + nla_total_size(4);
297 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
298 struct fib_config *cfg)
300 __be32 addr;
301 int plen;
303 memset(cfg, 0, sizeof(*cfg));
304 cfg->fc_nlinfo.nl_net = net;
306 if (rt->rt_dst.sa_family != AF_INET)
307 return -EAFNOSUPPORT;
310 * Check mask for validity:
311 * a) it must be contiguous.
312 * b) destination must have all host bits clear.
313 * c) if application forgot to set correct family (AF_INET),
314 * reject request unless it is absolutely clear i.e.
315 * both family and mask are zero.
317 plen = 32;
318 addr = sk_extract_addr(&rt->rt_dst);
319 if (!(rt->rt_flags & RTF_HOST)) {
320 __be32 mask = sk_extract_addr(&rt->rt_genmask);
322 if (rt->rt_genmask.sa_family != AF_INET) {
323 if (mask || rt->rt_genmask.sa_family)
324 return -EAFNOSUPPORT;
327 if (bad_mask(mask, addr))
328 return -EINVAL;
330 plen = inet_mask_len(mask);
333 cfg->fc_dst_len = plen;
334 cfg->fc_dst = addr;
336 if (cmd != SIOCDELRT) {
337 cfg->fc_nlflags = NLM_F_CREATE;
338 cfg->fc_protocol = RTPROT_BOOT;
341 if (rt->rt_metric)
342 cfg->fc_priority = rt->rt_metric - 1;
344 if (rt->rt_flags & RTF_REJECT) {
345 cfg->fc_scope = RT_SCOPE_HOST;
346 cfg->fc_type = RTN_UNREACHABLE;
347 return 0;
350 cfg->fc_scope = RT_SCOPE_NOWHERE;
351 cfg->fc_type = RTN_UNICAST;
353 if (rt->rt_dev) {
354 char *colon;
355 struct net_device *dev;
356 char devname[IFNAMSIZ];
358 if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
359 return -EFAULT;
361 devname[IFNAMSIZ-1] = 0;
362 colon = strchr(devname, ':');
363 if (colon)
364 *colon = 0;
365 dev = __dev_get_by_name(net, devname);
366 if (!dev)
367 return -ENODEV;
368 cfg->fc_oif = dev->ifindex;
369 if (colon) {
370 struct in_ifaddr *ifa;
371 struct in_device *in_dev = __in_dev_get_rtnl(dev);
372 if (!in_dev)
373 return -ENODEV;
374 *colon = ':';
375 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
376 if (strcmp(ifa->ifa_label, devname) == 0)
377 break;
378 if (ifa == NULL)
379 return -ENODEV;
380 cfg->fc_prefsrc = ifa->ifa_local;
384 addr = sk_extract_addr(&rt->rt_gateway);
385 if (rt->rt_gateway.sa_family == AF_INET && addr) {
386 cfg->fc_gw = addr;
387 if (rt->rt_flags & RTF_GATEWAY &&
388 inet_addr_type(net, addr) == RTN_UNICAST)
389 cfg->fc_scope = RT_SCOPE_UNIVERSE;
392 if (cmd == SIOCDELRT)
393 return 0;
395 if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
396 return -EINVAL;
398 if (cfg->fc_scope == RT_SCOPE_NOWHERE)
399 cfg->fc_scope = RT_SCOPE_LINK;
401 if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
402 struct nlattr *mx;
403 int len = 0;
405 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
406 if (mx == NULL)
407 return -ENOMEM;
409 if (rt->rt_flags & RTF_MTU)
410 len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
412 if (rt->rt_flags & RTF_WINDOW)
413 len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
415 if (rt->rt_flags & RTF_IRTT)
416 len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
418 cfg->fc_mx = mx;
419 cfg->fc_mx_len = len;
422 return 0;
426 * Handle IP routing ioctl calls.
427 * These are used to manipulate the routing tables
429 int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
431 struct fib_config cfg;
432 struct rtentry rt;
433 int err;
435 switch (cmd) {
436 case SIOCADDRT: /* Add a route */
437 case SIOCDELRT: /* Delete a route */
438 if (!capable(CAP_NET_ADMIN))
439 return -EPERM;
441 if (copy_from_user(&rt, arg, sizeof(rt)))
442 return -EFAULT;
444 rtnl_lock();
445 err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
446 if (err == 0) {
447 struct fib_table *tb;
449 if (cmd == SIOCDELRT) {
450 tb = fib_get_table(net, cfg.fc_table);
451 if (tb)
452 err = fib_table_delete(tb, &cfg);
453 else
454 err = -ESRCH;
455 } else {
456 tb = fib_new_table(net, cfg.fc_table);
457 if (tb)
458 err = fib_table_insert(tb, &cfg);
459 else
460 err = -ENOBUFS;
463 /* allocated by rtentry_to_fib_config() */
464 kfree(cfg.fc_mx);
466 rtnl_unlock();
467 return err;
469 return -EINVAL;
472 const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
473 [RTA_DST] = { .type = NLA_U32 },
474 [RTA_SRC] = { .type = NLA_U32 },
475 [RTA_IIF] = { .type = NLA_U32 },
476 [RTA_OIF] = { .type = NLA_U32 },
477 [RTA_GATEWAY] = { .type = NLA_U32 },
478 [RTA_PRIORITY] = { .type = NLA_U32 },
479 [RTA_PREFSRC] = { .type = NLA_U32 },
480 [RTA_METRICS] = { .type = NLA_NESTED },
481 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
482 [RTA_FLOW] = { .type = NLA_U32 },
485 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
486 struct nlmsghdr *nlh, struct fib_config *cfg)
488 struct nlattr *attr;
489 int err, remaining;
490 struct rtmsg *rtm;
492 err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
493 if (err < 0)
494 goto errout;
496 memset(cfg, 0, sizeof(*cfg));
498 rtm = nlmsg_data(nlh);
499 cfg->fc_dst_len = rtm->rtm_dst_len;
500 cfg->fc_tos = rtm->rtm_tos;
501 cfg->fc_table = rtm->rtm_table;
502 cfg->fc_protocol = rtm->rtm_protocol;
503 cfg->fc_scope = rtm->rtm_scope;
504 cfg->fc_type = rtm->rtm_type;
505 cfg->fc_flags = rtm->rtm_flags;
506 cfg->fc_nlflags = nlh->nlmsg_flags;
508 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
509 cfg->fc_nlinfo.nlh = nlh;
510 cfg->fc_nlinfo.nl_net = net;
512 if (cfg->fc_type > RTN_MAX) {
513 err = -EINVAL;
514 goto errout;
517 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
518 switch (nla_type(attr)) {
519 case RTA_DST:
520 cfg->fc_dst = nla_get_be32(attr);
521 break;
522 case RTA_OIF:
523 cfg->fc_oif = nla_get_u32(attr);
524 break;
525 case RTA_GATEWAY:
526 cfg->fc_gw = nla_get_be32(attr);
527 break;
528 case RTA_PRIORITY:
529 cfg->fc_priority = nla_get_u32(attr);
530 break;
531 case RTA_PREFSRC:
532 cfg->fc_prefsrc = nla_get_be32(attr);
533 break;
534 case RTA_METRICS:
535 cfg->fc_mx = nla_data(attr);
536 cfg->fc_mx_len = nla_len(attr);
537 break;
538 case RTA_MULTIPATH:
539 cfg->fc_mp = nla_data(attr);
540 cfg->fc_mp_len = nla_len(attr);
541 break;
542 case RTA_FLOW:
543 cfg->fc_flow = nla_get_u32(attr);
544 break;
545 case RTA_TABLE:
546 cfg->fc_table = nla_get_u32(attr);
547 break;
551 return 0;
552 errout:
553 return err;
556 static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
558 struct net *net = sock_net(skb->sk);
559 struct fib_config cfg;
560 struct fib_table *tb;
561 int err;
563 err = rtm_to_fib_config(net, skb, nlh, &cfg);
564 if (err < 0)
565 goto errout;
567 tb = fib_get_table(net, cfg.fc_table);
568 if (tb == NULL) {
569 err = -ESRCH;
570 goto errout;
573 err = fib_table_delete(tb, &cfg);
574 errout:
575 return err;
578 static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
580 struct net *net = sock_net(skb->sk);
581 struct fib_config cfg;
582 struct fib_table *tb;
583 int err;
585 err = rtm_to_fib_config(net, skb, nlh, &cfg);
586 if (err < 0)
587 goto errout;
589 tb = fib_new_table(net, cfg.fc_table);
590 if (tb == NULL) {
591 err = -ENOBUFS;
592 goto errout;
595 err = fib_table_insert(tb, &cfg);
596 errout:
597 return err;
600 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
602 struct net *net = sock_net(skb->sk);
603 unsigned int h, s_h;
604 unsigned int e = 0, s_e;
605 struct fib_table *tb;
606 struct hlist_node *node;
607 struct hlist_head *head;
608 int dumped = 0;
610 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
611 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
612 return ip_rt_dump(skb, cb);
614 s_h = cb->args[0];
615 s_e = cb->args[1];
617 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
618 e = 0;
619 head = &net->ipv4.fib_table_hash[h];
620 hlist_for_each_entry(tb, node, head, tb_hlist) {
621 if (e < s_e)
622 goto next;
623 if (dumped)
624 memset(&cb->args[2], 0, sizeof(cb->args) -
625 2 * sizeof(cb->args[0]));
626 if (fib_table_dump(tb, skb, cb) < 0)
627 goto out;
628 dumped = 1;
629 next:
630 e++;
633 out:
634 cb->args[1] = e;
635 cb->args[0] = h;
637 return skb->len;
640 /* Prepare and feed intra-kernel routing request.
641 * Really, it should be netlink message, but :-( netlink
642 * can be not configured, so that we feed it directly
643 * to fib engine. It is legal, because all events occur
644 * only when netlink is already locked.
646 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
648 struct net *net = dev_net(ifa->ifa_dev->dev);
649 struct fib_table *tb;
650 struct fib_config cfg = {
651 .fc_protocol = RTPROT_KERNEL,
652 .fc_type = type,
653 .fc_dst = dst,
654 .fc_dst_len = dst_len,
655 .fc_prefsrc = ifa->ifa_local,
656 .fc_oif = ifa->ifa_dev->dev->ifindex,
657 .fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
658 .fc_nlinfo = {
659 .nl_net = net,
663 if (type == RTN_UNICAST)
664 tb = fib_new_table(net, RT_TABLE_MAIN);
665 else
666 tb = fib_new_table(net, RT_TABLE_LOCAL);
668 if (tb == NULL)
669 return;
671 cfg.fc_table = tb->tb_id;
673 if (type != RTN_LOCAL)
674 cfg.fc_scope = RT_SCOPE_LINK;
675 else
676 cfg.fc_scope = RT_SCOPE_HOST;
678 if (cmd == RTM_NEWROUTE)
679 fib_table_insert(tb, &cfg);
680 else
681 fib_table_delete(tb, &cfg);
684 void fib_add_ifaddr(struct in_ifaddr *ifa)
686 struct in_device *in_dev = ifa->ifa_dev;
687 struct net_device *dev = in_dev->dev;
688 struct in_ifaddr *prim = ifa;
689 __be32 mask = ifa->ifa_mask;
690 __be32 addr = ifa->ifa_local;
691 __be32 prefix = ifa->ifa_address & mask;
693 if (ifa->ifa_flags & IFA_F_SECONDARY) {
694 prim = inet_ifa_byprefix(in_dev, prefix, mask);
695 if (prim == NULL) {
696 printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n");
697 return;
701 fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
703 if (!(dev->flags & IFF_UP))
704 return;
706 /* Add broadcast address, if it is explicitly assigned. */
707 if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
708 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
710 if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
711 (prefix != addr || ifa->ifa_prefixlen < 32)) {
712 fib_magic(RTM_NEWROUTE,
713 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
714 prefix, ifa->ifa_prefixlen, prim);
716 /* Add network specific broadcasts, when it takes a sense */
717 if (ifa->ifa_prefixlen < 31) {
718 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
719 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
720 32, prim);
725 /* Delete primary or secondary address.
726 * Optionally, on secondary address promotion consider the addresses
727 * from subnet iprim as deleted, even if they are in device list.
728 * In this case the secondary ifa can be in device list.
730 void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
732 struct in_device *in_dev = ifa->ifa_dev;
733 struct net_device *dev = in_dev->dev;
734 struct in_ifaddr *ifa1;
735 struct in_ifaddr *prim = ifa, *prim1 = NULL;
736 __be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
737 __be32 any = ifa->ifa_address & ifa->ifa_mask;
738 #define LOCAL_OK 1
739 #define BRD_OK 2
740 #define BRD0_OK 4
741 #define BRD1_OK 8
742 unsigned ok = 0;
743 int subnet = 0; /* Primary network */
744 int gone = 1; /* Address is missing */
745 int same_prefsrc = 0; /* Another primary with same IP */
747 if (ifa->ifa_flags & IFA_F_SECONDARY) {
748 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
749 if (prim == NULL) {
750 printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n");
751 return;
753 if (iprim && iprim != prim) {
754 printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n");
755 return;
757 } else if (!ipv4_is_zeronet(any) &&
758 (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
759 fib_magic(RTM_DELROUTE,
760 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
761 any, ifa->ifa_prefixlen, prim);
762 subnet = 1;
765 /* Deletion is more complicated than add.
766 * We should take care of not to delete too much :-)
768 * Scan address list to be sure that addresses are really gone.
771 for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
772 if (ifa1 == ifa) {
773 /* promotion, keep the IP */
774 gone = 0;
775 continue;
777 /* Ignore IFAs from our subnet */
778 if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
779 inet_ifa_match(ifa1->ifa_address, iprim))
780 continue;
782 /* Ignore ifa1 if it uses different primary IP (prefsrc) */
783 if (ifa1->ifa_flags & IFA_F_SECONDARY) {
784 /* Another address from our subnet? */
785 if (ifa1->ifa_mask == prim->ifa_mask &&
786 inet_ifa_match(ifa1->ifa_address, prim))
787 prim1 = prim;
788 else {
789 /* We reached the secondaries, so
790 * same_prefsrc should be determined.
792 if (!same_prefsrc)
793 continue;
794 /* Search new prim1 if ifa1 is not
795 * using the current prim1
797 if (!prim1 ||
798 ifa1->ifa_mask != prim1->ifa_mask ||
799 !inet_ifa_match(ifa1->ifa_address, prim1))
800 prim1 = inet_ifa_byprefix(in_dev,
801 ifa1->ifa_address,
802 ifa1->ifa_mask);
803 if (!prim1)
804 continue;
805 if (prim1->ifa_local != prim->ifa_local)
806 continue;
808 } else {
809 if (prim->ifa_local != ifa1->ifa_local)
810 continue;
811 prim1 = ifa1;
812 if (prim != prim1)
813 same_prefsrc = 1;
815 if (ifa->ifa_local == ifa1->ifa_local)
816 ok |= LOCAL_OK;
817 if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
818 ok |= BRD_OK;
819 if (brd == ifa1->ifa_broadcast)
820 ok |= BRD1_OK;
821 if (any == ifa1->ifa_broadcast)
822 ok |= BRD0_OK;
823 /* primary has network specific broadcasts */
824 if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
825 __be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
826 __be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
828 if (!ipv4_is_zeronet(any1)) {
829 if (ifa->ifa_broadcast == brd1 ||
830 ifa->ifa_broadcast == any1)
831 ok |= BRD_OK;
832 if (brd == brd1 || brd == any1)
833 ok |= BRD1_OK;
834 if (any == brd1 || any == any1)
835 ok |= BRD0_OK;
840 if (!(ok & BRD_OK))
841 fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
842 if (subnet && ifa->ifa_prefixlen < 31) {
843 if (!(ok & BRD1_OK))
844 fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
845 if (!(ok & BRD0_OK))
846 fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
848 if (!(ok & LOCAL_OK)) {
849 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
851 /* Check, that this local address finally disappeared. */
852 if (gone &&
853 inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
854 /* And the last, but not the least thing.
855 * We must flush stray FIB entries.
857 * First of all, we scan fib_info list searching
858 * for stray nexthop entries, then ignite fib_flush.
860 if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
861 fib_flush(dev_net(dev));
864 #undef LOCAL_OK
865 #undef BRD_OK
866 #undef BRD0_OK
867 #undef BRD1_OK
870 static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
873 struct fib_result res;
874 struct flowi4 fl4 = {
875 .flowi4_mark = frn->fl_mark,
876 .daddr = frn->fl_addr,
877 .flowi4_tos = frn->fl_tos,
878 .flowi4_scope = frn->fl_scope,
881 #ifdef CONFIG_IP_MULTIPLE_TABLES
882 res.r = NULL;
883 #endif
885 frn->err = -ENOENT;
886 if (tb) {
887 local_bh_disable();
889 frn->tb_id = tb->tb_id;
890 rcu_read_lock();
891 frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
893 if (!frn->err) {
894 frn->prefixlen = res.prefixlen;
895 frn->nh_sel = res.nh_sel;
896 frn->type = res.type;
897 frn->scope = res.scope;
899 rcu_read_unlock();
900 local_bh_enable();
904 static void nl_fib_input(struct sk_buff *skb)
906 struct net *net;
907 struct fib_result_nl *frn;
908 struct nlmsghdr *nlh;
909 struct fib_table *tb;
910 u32 pid;
912 net = sock_net(skb->sk);
913 nlh = nlmsg_hdr(skb);
914 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
915 nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn)))
916 return;
918 skb = skb_clone(skb, GFP_KERNEL);
919 if (skb == NULL)
920 return;
921 nlh = nlmsg_hdr(skb);
923 frn = (struct fib_result_nl *) NLMSG_DATA(nlh);
924 tb = fib_get_table(net, frn->tb_id_in);
926 nl_fib_lookup(frn, tb);
928 pid = NETLINK_CB(skb).pid; /* pid of sending process */
929 NETLINK_CB(skb).pid = 0; /* from kernel */
930 NETLINK_CB(skb).dst_group = 0; /* unicast */
931 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
934 static int __net_init nl_fib_lookup_init(struct net *net)
936 struct sock *sk;
937 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
938 nl_fib_input, NULL, THIS_MODULE);
939 if (sk == NULL)
940 return -EAFNOSUPPORT;
941 net->ipv4.fibnl = sk;
942 return 0;
945 static void nl_fib_lookup_exit(struct net *net)
947 netlink_kernel_release(net->ipv4.fibnl);
948 net->ipv4.fibnl = NULL;
951 static void fib_disable_ip(struct net_device *dev, int force, int delay)
953 if (fib_sync_down_dev(dev, force))
954 fib_flush(dev_net(dev));
955 rt_cache_flush(dev_net(dev), delay);
956 arp_ifdown(dev);
959 static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
961 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
962 struct net_device *dev = ifa->ifa_dev->dev;
963 struct net *net = dev_net(dev);
965 switch (event) {
966 case NETDEV_UP:
967 fib_add_ifaddr(ifa);
968 #ifdef CONFIG_IP_ROUTE_MULTIPATH
969 fib_sync_up(dev);
970 #endif
971 atomic_inc(&net->ipv4.dev_addr_genid);
972 rt_cache_flush(dev_net(dev), -1);
973 break;
974 case NETDEV_DOWN:
975 fib_del_ifaddr(ifa, NULL);
976 atomic_inc(&net->ipv4.dev_addr_genid);
977 if (ifa->ifa_dev->ifa_list == NULL) {
978 /* Last address was deleted from this interface.
979 * Disable IP.
981 fib_disable_ip(dev, 1, 0);
982 } else {
983 rt_cache_flush(dev_net(dev), -1);
985 break;
987 return NOTIFY_DONE;
990 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
992 struct net_device *dev = ptr;
993 struct in_device *in_dev = __in_dev_get_rtnl(dev);
994 struct net *net = dev_net(dev);
996 if (event == NETDEV_UNREGISTER) {
997 fib_disable_ip(dev, 2, -1);
998 return NOTIFY_DONE;
1001 if (!in_dev)
1002 return NOTIFY_DONE;
1004 switch (event) {
1005 case NETDEV_UP:
1006 for_ifa(in_dev) {
1007 fib_add_ifaddr(ifa);
1008 } endfor_ifa(in_dev);
1009 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1010 fib_sync_up(dev);
1011 #endif
1012 atomic_inc(&net->ipv4.dev_addr_genid);
1013 rt_cache_flush(dev_net(dev), -1);
1014 break;
1015 case NETDEV_DOWN:
1016 fib_disable_ip(dev, 0, 0);
1017 break;
1018 case NETDEV_CHANGEMTU:
1019 case NETDEV_CHANGE:
1020 rt_cache_flush(dev_net(dev), 0);
1021 break;
1022 case NETDEV_UNREGISTER_BATCH:
1023 /* The batch unregister is only called on the first
1024 * device in the list of devices being unregistered.
1025 * Therefore we should not pass dev_net(dev) in here.
1027 rt_cache_flush_batch(NULL);
1028 break;
1030 return NOTIFY_DONE;
1033 static struct notifier_block fib_inetaddr_notifier = {
1034 .notifier_call = fib_inetaddr_event,
1037 static struct notifier_block fib_netdev_notifier = {
1038 .notifier_call = fib_netdev_event,
1041 static int __net_init ip_fib_net_init(struct net *net)
1043 int err;
1044 size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
1046 /* Avoid false sharing : Use at least a full cache line */
1047 size = max_t(size_t, size, L1_CACHE_BYTES);
1049 net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
1050 if (net->ipv4.fib_table_hash == NULL)
1051 return -ENOMEM;
1053 err = fib4_rules_init(net);
1054 if (err < 0)
1055 goto fail;
1056 return 0;
1058 fail:
1059 kfree(net->ipv4.fib_table_hash);
1060 return err;
1063 static void ip_fib_net_exit(struct net *net)
1065 unsigned int i;
1067 #ifdef CONFIG_IP_MULTIPLE_TABLES
1068 fib4_rules_exit(net);
1069 #endif
1071 rtnl_lock();
1072 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1073 struct fib_table *tb;
1074 struct hlist_head *head;
1075 struct hlist_node *node, *tmp;
1077 head = &net->ipv4.fib_table_hash[i];
1078 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
1079 hlist_del(node);
1080 fib_table_flush(tb);
1081 fib_free_table(tb);
1084 rtnl_unlock();
1085 kfree(net->ipv4.fib_table_hash);
1088 static int __net_init fib_net_init(struct net *net)
1090 int error;
1092 error = ip_fib_net_init(net);
1093 if (error < 0)
1094 goto out;
1095 error = nl_fib_lookup_init(net);
1096 if (error < 0)
1097 goto out_nlfl;
1098 error = fib_proc_init(net);
1099 if (error < 0)
1100 goto out_proc;
1101 out:
1102 return error;
1104 out_proc:
1105 nl_fib_lookup_exit(net);
1106 out_nlfl:
1107 ip_fib_net_exit(net);
1108 goto out;
1111 static void __net_exit fib_net_exit(struct net *net)
1113 fib_proc_exit(net);
1114 nl_fib_lookup_exit(net);
1115 ip_fib_net_exit(net);
1118 static struct pernet_operations fib_net_ops = {
1119 .init = fib_net_init,
1120 .exit = fib_net_exit,
1123 void __init ip_fib_init(void)
1125 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL);
1126 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL);
1127 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib);
1129 register_pernet_subsys(&fib_net_ops);
1130 register_netdevice_notifier(&fib_netdev_notifier);
1131 register_inetaddr_notifier(&fib_inetaddr_notifier);
1133 fib_trie_init();