2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
62 #include <net/route.h>
63 #include <net/ip_fib.h>
64 #include <net/rtnetlink.h>
65 #include <net/net_namespace.h>
66 #include <net/addrconf.h>
68 #include "fib_lookup.h"
70 static struct ipv4_devconf ipv4_devconf
= {
72 [IPV4_DEVCONF_ACCEPT_REDIRECTS
- 1] = 1,
73 [IPV4_DEVCONF_SEND_REDIRECTS
- 1] = 1,
74 [IPV4_DEVCONF_SECURE_REDIRECTS
- 1] = 1,
75 [IPV4_DEVCONF_SHARED_MEDIA
- 1] = 1,
76 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL
- 1] = 10000 /*ms*/,
77 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL
- 1] = 1000 /*ms*/,
81 static struct ipv4_devconf ipv4_devconf_dflt
= {
83 [IPV4_DEVCONF_ACCEPT_REDIRECTS
- 1] = 1,
84 [IPV4_DEVCONF_SEND_REDIRECTS
- 1] = 1,
85 [IPV4_DEVCONF_SECURE_REDIRECTS
- 1] = 1,
86 [IPV4_DEVCONF_SHARED_MEDIA
- 1] = 1,
87 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE
- 1] = 1,
88 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL
- 1] = 10000 /*ms*/,
89 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL
- 1] = 1000 /*ms*/,
93 #define IPV4_DEVCONF_DFLT(net, attr) \
94 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
96 static const struct nla_policy ifa_ipv4_policy
[IFA_MAX
+1] = {
97 [IFA_LOCAL
] = { .type
= NLA_U32
},
98 [IFA_ADDRESS
] = { .type
= NLA_U32
},
99 [IFA_BROADCAST
] = { .type
= NLA_U32
},
100 [IFA_LABEL
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
- 1 },
101 [IFA_CACHEINFO
] = { .len
= sizeof(struct ifa_cacheinfo
) },
102 [IFA_FLAGS
] = { .type
= NLA_U32
},
105 #define IN4_ADDR_HSIZE_SHIFT 8
106 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
108 static struct hlist_head inet_addr_lst
[IN4_ADDR_HSIZE
];
110 static u32
inet_addr_hash(const struct net
*net
, __be32 addr
)
112 u32 val
= (__force u32
) addr
^ net_hash_mix(net
);
114 return hash_32(val
, IN4_ADDR_HSIZE_SHIFT
);
117 static void inet_hash_insert(struct net
*net
, struct in_ifaddr
*ifa
)
119 u32 hash
= inet_addr_hash(net
, ifa
->ifa_local
);
122 hlist_add_head_rcu(&ifa
->hash
, &inet_addr_lst
[hash
]);
125 static void inet_hash_remove(struct in_ifaddr
*ifa
)
128 hlist_del_init_rcu(&ifa
->hash
);
132 * __ip_dev_find - find the first device with a given source address.
133 * @net: the net namespace
134 * @addr: the source address
135 * @devref: if true, take a reference on the found device
137 * If a caller uses devref=false, it should be protected by RCU, or RTNL
139 struct net_device
*__ip_dev_find(struct net
*net
, __be32 addr
, bool devref
)
141 u32 hash
= inet_addr_hash(net
, addr
);
142 struct net_device
*result
= NULL
;
143 struct in_ifaddr
*ifa
;
146 hlist_for_each_entry_rcu(ifa
, &inet_addr_lst
[hash
], hash
) {
147 if (ifa
->ifa_local
== addr
) {
148 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
150 if (!net_eq(dev_net(dev
), net
))
157 struct flowi4 fl4
= { .daddr
= addr
};
158 struct fib_result res
= { 0 };
159 struct fib_table
*local
;
161 /* Fallback to FIB local table so that communication
162 * over loopback subnets work.
164 local
= fib_get_table(net
, RT_TABLE_LOCAL
);
166 !fib_table_lookup(local
, &fl4
, &res
, FIB_LOOKUP_NOREF
) &&
167 res
.type
== RTN_LOCAL
)
168 result
= FIB_RES_DEV(res
);
170 if (result
&& devref
)
175 EXPORT_SYMBOL(__ip_dev_find
);
177 static void rtmsg_ifa(int event
, struct in_ifaddr
*, struct nlmsghdr
*, u32
);
179 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain
);
180 static void inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
183 static int devinet_sysctl_register(struct in_device
*idev
);
184 static void devinet_sysctl_unregister(struct in_device
*idev
);
186 static int devinet_sysctl_register(struct in_device
*idev
)
190 static void devinet_sysctl_unregister(struct in_device
*idev
)
195 /* Locks all the inet devices. */
197 static struct in_ifaddr
*inet_alloc_ifa(void)
199 return kzalloc(sizeof(struct in_ifaddr
), GFP_KERNEL
);
202 static void inet_rcu_free_ifa(struct rcu_head
*head
)
204 struct in_ifaddr
*ifa
= container_of(head
, struct in_ifaddr
, rcu_head
);
206 in_dev_put(ifa
->ifa_dev
);
210 static void inet_free_ifa(struct in_ifaddr
*ifa
)
212 call_rcu(&ifa
->rcu_head
, inet_rcu_free_ifa
);
215 void in_dev_finish_destroy(struct in_device
*idev
)
217 struct net_device
*dev
= idev
->dev
;
219 WARN_ON(idev
->ifa_list
);
220 WARN_ON(idev
->mc_list
);
221 kfree(rcu_dereference_protected(idev
->mc_hash
, 1));
222 #ifdef NET_REFCNT_DEBUG
223 pr_debug("%s: %p=%s\n", __func__
, idev
, dev
? dev
->name
: "NIL");
227 pr_err("Freeing alive in_device %p\n", idev
);
231 EXPORT_SYMBOL(in_dev_finish_destroy
);
233 static struct in_device
*inetdev_init(struct net_device
*dev
)
235 struct in_device
*in_dev
;
240 in_dev
= kzalloc(sizeof(*in_dev
), GFP_KERNEL
);
243 memcpy(&in_dev
->cnf
, dev_net(dev
)->ipv4
.devconf_dflt
,
244 sizeof(in_dev
->cnf
));
245 in_dev
->cnf
.sysctl
= NULL
;
247 in_dev
->arp_parms
= neigh_parms_alloc(dev
, &arp_tbl
);
248 if (!in_dev
->arp_parms
)
250 if (IPV4_DEVCONF(in_dev
->cnf
, FORWARDING
))
251 dev_disable_lro(dev
);
252 /* Reference in_dev->dev */
254 /* Account for reference dev->ip_ptr (below) */
257 err
= devinet_sysctl_register(in_dev
);
264 ip_mc_init_dev(in_dev
);
265 if (dev
->flags
& IFF_UP
)
268 /* we can receive as soon as ip_ptr is set -- do this last */
269 rcu_assign_pointer(dev
->ip_ptr
, in_dev
);
271 return in_dev
?: ERR_PTR(err
);
278 static void in_dev_rcu_put(struct rcu_head
*head
)
280 struct in_device
*idev
= container_of(head
, struct in_device
, rcu_head
);
284 static void inetdev_destroy(struct in_device
*in_dev
)
286 struct in_ifaddr
*ifa
;
287 struct net_device
*dev
;
295 ip_mc_destroy_dev(in_dev
);
297 while ((ifa
= in_dev
->ifa_list
) != NULL
) {
298 inet_del_ifa(in_dev
, &in_dev
->ifa_list
, 0);
302 RCU_INIT_POINTER(dev
->ip_ptr
, NULL
);
304 devinet_sysctl_unregister(in_dev
);
305 neigh_parms_release(&arp_tbl
, in_dev
->arp_parms
);
308 call_rcu(&in_dev
->rcu_head
, in_dev_rcu_put
);
311 int inet_addr_onlink(struct in_device
*in_dev
, __be32 a
, __be32 b
)
314 for_primary_ifa(in_dev
) {
315 if (inet_ifa_match(a
, ifa
)) {
316 if (!b
|| inet_ifa_match(b
, ifa
)) {
321 } endfor_ifa(in_dev
);
326 static void __inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
327 int destroy
, struct nlmsghdr
*nlh
, u32 portid
)
329 struct in_ifaddr
*promote
= NULL
;
330 struct in_ifaddr
*ifa
, *ifa1
= *ifap
;
331 struct in_ifaddr
*last_prim
= in_dev
->ifa_list
;
332 struct in_ifaddr
*prev_prom
= NULL
;
333 int do_promote
= IN_DEV_PROMOTE_SECONDARIES(in_dev
);
340 /* 1. Deleting primary ifaddr forces deletion all secondaries
341 * unless alias promotion is set
344 if (!(ifa1
->ifa_flags
& IFA_F_SECONDARY
)) {
345 struct in_ifaddr
**ifap1
= &ifa1
->ifa_next
;
347 while ((ifa
= *ifap1
) != NULL
) {
348 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
) &&
349 ifa1
->ifa_scope
<= ifa
->ifa_scope
)
352 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
) ||
353 ifa1
->ifa_mask
!= ifa
->ifa_mask
||
354 !inet_ifa_match(ifa1
->ifa_address
, ifa
)) {
355 ifap1
= &ifa
->ifa_next
;
361 inet_hash_remove(ifa
);
362 *ifap1
= ifa
->ifa_next
;
364 rtmsg_ifa(RTM_DELADDR
, ifa
, nlh
, portid
);
365 blocking_notifier_call_chain(&inetaddr_chain
,
375 /* On promotion all secondaries from subnet are changing
376 * the primary IP, we must remove all their routes silently
377 * and later to add them back with new prefsrc. Do this
378 * while all addresses are on the device list.
380 for (ifa
= promote
; ifa
; ifa
= ifa
->ifa_next
) {
381 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
382 inet_ifa_match(ifa1
->ifa_address
, ifa
))
383 fib_del_ifaddr(ifa
, ifa1
);
389 *ifap
= ifa1
->ifa_next
;
390 inet_hash_remove(ifa1
);
392 /* 3. Announce address deletion */
394 /* Send message first, then call notifier.
395 At first sight, FIB update triggered by notifier
396 will refer to already deleted ifaddr, that could confuse
397 netlink listeners. It is not true: look, gated sees
398 that route deleted and if it still thinks that ifaddr
399 is valid, it will try to restore deleted routes... Grr.
400 So that, this order is correct.
402 rtmsg_ifa(RTM_DELADDR
, ifa1
, nlh
, portid
);
403 blocking_notifier_call_chain(&inetaddr_chain
, NETDEV_DOWN
, ifa1
);
406 struct in_ifaddr
*next_sec
= promote
->ifa_next
;
409 prev_prom
->ifa_next
= promote
->ifa_next
;
410 promote
->ifa_next
= last_prim
->ifa_next
;
411 last_prim
->ifa_next
= promote
;
414 promote
->ifa_flags
&= ~IFA_F_SECONDARY
;
415 rtmsg_ifa(RTM_NEWADDR
, promote
, nlh
, portid
);
416 blocking_notifier_call_chain(&inetaddr_chain
,
418 for (ifa
= next_sec
; ifa
; ifa
= ifa
->ifa_next
) {
419 if (ifa1
->ifa_mask
!= ifa
->ifa_mask
||
420 !inet_ifa_match(ifa1
->ifa_address
, ifa
))
430 static void inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
433 __inet_del_ifa(in_dev
, ifap
, destroy
, NULL
, 0);
436 static void check_lifetime(struct work_struct
*work
);
438 static DECLARE_DELAYED_WORK(check_lifetime_work
, check_lifetime
);
440 static int __inet_insert_ifa(struct in_ifaddr
*ifa
, struct nlmsghdr
*nlh
,
443 struct in_device
*in_dev
= ifa
->ifa_dev
;
444 struct in_ifaddr
*ifa1
, **ifap
, **last_primary
;
448 if (!ifa
->ifa_local
) {
453 ifa
->ifa_flags
&= ~IFA_F_SECONDARY
;
454 last_primary
= &in_dev
->ifa_list
;
456 for (ifap
= &in_dev
->ifa_list
; (ifa1
= *ifap
) != NULL
;
457 ifap
= &ifa1
->ifa_next
) {
458 if (!(ifa1
->ifa_flags
& IFA_F_SECONDARY
) &&
459 ifa
->ifa_scope
<= ifa1
->ifa_scope
)
460 last_primary
= &ifa1
->ifa_next
;
461 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
462 inet_ifa_match(ifa1
->ifa_address
, ifa
)) {
463 if (ifa1
->ifa_local
== ifa
->ifa_local
) {
467 if (ifa1
->ifa_scope
!= ifa
->ifa_scope
) {
471 ifa
->ifa_flags
|= IFA_F_SECONDARY
;
475 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
)) {
476 prandom_seed((__force u32
) ifa
->ifa_local
);
480 ifa
->ifa_next
= *ifap
;
483 inet_hash_insert(dev_net(in_dev
->dev
), ifa
);
485 cancel_delayed_work(&check_lifetime_work
);
486 queue_delayed_work(system_power_efficient_wq
, &check_lifetime_work
, 0);
488 /* Send message first, then call notifier.
489 Notifier will trigger FIB update, so that
490 listeners of netlink will know about new ifaddr */
491 rtmsg_ifa(RTM_NEWADDR
, ifa
, nlh
, portid
);
492 blocking_notifier_call_chain(&inetaddr_chain
, NETDEV_UP
, ifa
);
497 static int inet_insert_ifa(struct in_ifaddr
*ifa
)
499 return __inet_insert_ifa(ifa
, NULL
, 0);
502 static int inet_set_ifa(struct net_device
*dev
, struct in_ifaddr
*ifa
)
504 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
512 ipv4_devconf_setall(in_dev
);
513 neigh_parms_data_state_setall(in_dev
->arp_parms
);
514 if (ifa
->ifa_dev
!= in_dev
) {
515 WARN_ON(ifa
->ifa_dev
);
517 ifa
->ifa_dev
= in_dev
;
519 if (ipv4_is_loopback(ifa
->ifa_local
))
520 ifa
->ifa_scope
= RT_SCOPE_HOST
;
521 return inet_insert_ifa(ifa
);
524 /* Caller must hold RCU or RTNL :
525 * We dont take a reference on found in_device
527 struct in_device
*inetdev_by_index(struct net
*net
, int ifindex
)
529 struct net_device
*dev
;
530 struct in_device
*in_dev
= NULL
;
533 dev
= dev_get_by_index_rcu(net
, ifindex
);
535 in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
539 EXPORT_SYMBOL(inetdev_by_index
);
541 /* Called only from RTNL semaphored context. No locks. */
543 struct in_ifaddr
*inet_ifa_byprefix(struct in_device
*in_dev
, __be32 prefix
,
548 for_primary_ifa(in_dev
) {
549 if (ifa
->ifa_mask
== mask
&& inet_ifa_match(prefix
, ifa
))
551 } endfor_ifa(in_dev
);
555 static int ip_mc_config(struct sock
*sk
, bool join
, const struct in_ifaddr
*ifa
)
557 struct ip_mreqn mreq
= {
558 .imr_multiaddr
.s_addr
= ifa
->ifa_address
,
559 .imr_ifindex
= ifa
->ifa_dev
->dev
->ifindex
,
567 ret
= ip_mc_join_group(sk
, &mreq
);
569 ret
= ip_mc_leave_group(sk
, &mreq
);
575 static int inet_rtm_deladdr(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
577 struct net
*net
= sock_net(skb
->sk
);
578 struct nlattr
*tb
[IFA_MAX
+1];
579 struct in_device
*in_dev
;
580 struct ifaddrmsg
*ifm
;
581 struct in_ifaddr
*ifa
, **ifap
;
586 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFA_MAX
, ifa_ipv4_policy
);
590 ifm
= nlmsg_data(nlh
);
591 in_dev
= inetdev_by_index(net
, ifm
->ifa_index
);
597 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
598 ifap
= &ifa
->ifa_next
) {
600 ifa
->ifa_local
!= nla_get_in_addr(tb
[IFA_LOCAL
]))
603 if (tb
[IFA_LABEL
] && nla_strcmp(tb
[IFA_LABEL
], ifa
->ifa_label
))
606 if (tb
[IFA_ADDRESS
] &&
607 (ifm
->ifa_prefixlen
!= ifa
->ifa_prefixlen
||
608 !inet_ifa_match(nla_get_in_addr(tb
[IFA_ADDRESS
]), ifa
)))
611 if (ipv4_is_multicast(ifa
->ifa_address
))
612 ip_mc_config(net
->ipv4
.mc_autojoin_sk
, false, ifa
);
613 __inet_del_ifa(in_dev
, ifap
, 1, nlh
, NETLINK_CB(skb
).portid
);
617 err
= -EADDRNOTAVAIL
;
622 #define INFINITY_LIFE_TIME 0xFFFFFFFF
624 static void check_lifetime(struct work_struct
*work
)
626 unsigned long now
, next
, next_sec
, next_sched
;
627 struct in_ifaddr
*ifa
;
628 struct hlist_node
*n
;
632 next
= round_jiffies_up(now
+ ADDR_CHECK_FREQUENCY
);
634 for (i
= 0; i
< IN4_ADDR_HSIZE
; i
++) {
635 bool change_needed
= false;
638 hlist_for_each_entry_rcu(ifa
, &inet_addr_lst
[i
], hash
) {
641 if (ifa
->ifa_flags
& IFA_F_PERMANENT
)
644 /* We try to batch several events at once. */
645 age
= (now
- ifa
->ifa_tstamp
+
646 ADDRCONF_TIMER_FUZZ_MINUS
) / HZ
;
648 if (ifa
->ifa_valid_lft
!= INFINITY_LIFE_TIME
&&
649 age
>= ifa
->ifa_valid_lft
) {
650 change_needed
= true;
651 } else if (ifa
->ifa_preferred_lft
==
652 INFINITY_LIFE_TIME
) {
654 } else if (age
>= ifa
->ifa_preferred_lft
) {
655 if (time_before(ifa
->ifa_tstamp
+
656 ifa
->ifa_valid_lft
* HZ
, next
))
657 next
= ifa
->ifa_tstamp
+
658 ifa
->ifa_valid_lft
* HZ
;
660 if (!(ifa
->ifa_flags
& IFA_F_DEPRECATED
))
661 change_needed
= true;
662 } else if (time_before(ifa
->ifa_tstamp
+
663 ifa
->ifa_preferred_lft
* HZ
,
665 next
= ifa
->ifa_tstamp
+
666 ifa
->ifa_preferred_lft
* HZ
;
673 hlist_for_each_entry_safe(ifa
, n
, &inet_addr_lst
[i
], hash
) {
676 if (ifa
->ifa_flags
& IFA_F_PERMANENT
)
679 /* We try to batch several events at once. */
680 age
= (now
- ifa
->ifa_tstamp
+
681 ADDRCONF_TIMER_FUZZ_MINUS
) / HZ
;
683 if (ifa
->ifa_valid_lft
!= INFINITY_LIFE_TIME
&&
684 age
>= ifa
->ifa_valid_lft
) {
685 struct in_ifaddr
**ifap
;
687 for (ifap
= &ifa
->ifa_dev
->ifa_list
;
688 *ifap
!= NULL
; ifap
= &(*ifap
)->ifa_next
) {
690 inet_del_ifa(ifa
->ifa_dev
,
695 } else if (ifa
->ifa_preferred_lft
!=
696 INFINITY_LIFE_TIME
&&
697 age
>= ifa
->ifa_preferred_lft
&&
698 !(ifa
->ifa_flags
& IFA_F_DEPRECATED
)) {
699 ifa
->ifa_flags
|= IFA_F_DEPRECATED
;
700 rtmsg_ifa(RTM_NEWADDR
, ifa
, NULL
, 0);
706 next_sec
= round_jiffies_up(next
);
709 /* If rounded timeout is accurate enough, accept it. */
710 if (time_before(next_sec
, next
+ ADDRCONF_TIMER_FUZZ
))
711 next_sched
= next_sec
;
714 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
715 if (time_before(next_sched
, now
+ ADDRCONF_TIMER_FUZZ_MAX
))
716 next_sched
= now
+ ADDRCONF_TIMER_FUZZ_MAX
;
718 queue_delayed_work(system_power_efficient_wq
, &check_lifetime_work
,
722 static void set_ifa_lifetime(struct in_ifaddr
*ifa
, __u32 valid_lft
,
725 unsigned long timeout
;
727 ifa
->ifa_flags
&= ~(IFA_F_PERMANENT
| IFA_F_DEPRECATED
);
729 timeout
= addrconf_timeout_fixup(valid_lft
, HZ
);
730 if (addrconf_finite_timeout(timeout
))
731 ifa
->ifa_valid_lft
= timeout
;
733 ifa
->ifa_flags
|= IFA_F_PERMANENT
;
735 timeout
= addrconf_timeout_fixup(prefered_lft
, HZ
);
736 if (addrconf_finite_timeout(timeout
)) {
738 ifa
->ifa_flags
|= IFA_F_DEPRECATED
;
739 ifa
->ifa_preferred_lft
= timeout
;
741 ifa
->ifa_tstamp
= jiffies
;
742 if (!ifa
->ifa_cstamp
)
743 ifa
->ifa_cstamp
= ifa
->ifa_tstamp
;
746 static struct in_ifaddr
*rtm_to_ifaddr(struct net
*net
, struct nlmsghdr
*nlh
,
747 __u32
*pvalid_lft
, __u32
*pprefered_lft
)
749 struct nlattr
*tb
[IFA_MAX
+1];
750 struct in_ifaddr
*ifa
;
751 struct ifaddrmsg
*ifm
;
752 struct net_device
*dev
;
753 struct in_device
*in_dev
;
756 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFA_MAX
, ifa_ipv4_policy
);
760 ifm
= nlmsg_data(nlh
);
762 if (ifm
->ifa_prefixlen
> 32 || !tb
[IFA_LOCAL
])
765 dev
= __dev_get_by_index(net
, ifm
->ifa_index
);
770 in_dev
= __in_dev_get_rtnl(dev
);
775 ifa
= inet_alloc_ifa();
778 * A potential indev allocation can be left alive, it stays
779 * assigned to its device and is destroy with it.
783 ipv4_devconf_setall(in_dev
);
784 neigh_parms_data_state_setall(in_dev
->arp_parms
);
787 if (!tb
[IFA_ADDRESS
])
788 tb
[IFA_ADDRESS
] = tb
[IFA_LOCAL
];
790 INIT_HLIST_NODE(&ifa
->hash
);
791 ifa
->ifa_prefixlen
= ifm
->ifa_prefixlen
;
792 ifa
->ifa_mask
= inet_make_mask(ifm
->ifa_prefixlen
);
793 ifa
->ifa_flags
= tb
[IFA_FLAGS
] ? nla_get_u32(tb
[IFA_FLAGS
]) :
795 ifa
->ifa_scope
= ifm
->ifa_scope
;
796 ifa
->ifa_dev
= in_dev
;
798 ifa
->ifa_local
= nla_get_in_addr(tb
[IFA_LOCAL
]);
799 ifa
->ifa_address
= nla_get_in_addr(tb
[IFA_ADDRESS
]);
801 if (tb
[IFA_BROADCAST
])
802 ifa
->ifa_broadcast
= nla_get_in_addr(tb
[IFA_BROADCAST
]);
805 nla_strlcpy(ifa
->ifa_label
, tb
[IFA_LABEL
], IFNAMSIZ
);
807 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
809 if (tb
[IFA_CACHEINFO
]) {
810 struct ifa_cacheinfo
*ci
;
812 ci
= nla_data(tb
[IFA_CACHEINFO
]);
813 if (!ci
->ifa_valid
|| ci
->ifa_prefered
> ci
->ifa_valid
) {
817 *pvalid_lft
= ci
->ifa_valid
;
818 *pprefered_lft
= ci
->ifa_prefered
;
829 static struct in_ifaddr
*find_matching_ifa(struct in_ifaddr
*ifa
)
831 struct in_device
*in_dev
= ifa
->ifa_dev
;
832 struct in_ifaddr
*ifa1
, **ifap
;
837 for (ifap
= &in_dev
->ifa_list
; (ifa1
= *ifap
) != NULL
;
838 ifap
= &ifa1
->ifa_next
) {
839 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
840 inet_ifa_match(ifa1
->ifa_address
, ifa
) &&
841 ifa1
->ifa_local
== ifa
->ifa_local
)
847 static int inet_rtm_newaddr(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
849 struct net
*net
= sock_net(skb
->sk
);
850 struct in_ifaddr
*ifa
;
851 struct in_ifaddr
*ifa_existing
;
852 __u32 valid_lft
= INFINITY_LIFE_TIME
;
853 __u32 prefered_lft
= INFINITY_LIFE_TIME
;
857 ifa
= rtm_to_ifaddr(net
, nlh
, &valid_lft
, &prefered_lft
);
861 ifa_existing
= find_matching_ifa(ifa
);
863 /* It would be best to check for !NLM_F_CREATE here but
864 * userspace already relies on not having to provide this.
866 set_ifa_lifetime(ifa
, valid_lft
, prefered_lft
);
867 if (ifa
->ifa_flags
& IFA_F_MCAUTOJOIN
) {
868 int ret
= ip_mc_config(net
->ipv4
.mc_autojoin_sk
,
876 return __inet_insert_ifa(ifa
, nlh
, NETLINK_CB(skb
).portid
);
880 if (nlh
->nlmsg_flags
& NLM_F_EXCL
||
881 !(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
884 set_ifa_lifetime(ifa
, valid_lft
, prefered_lft
);
885 cancel_delayed_work(&check_lifetime_work
);
886 queue_delayed_work(system_power_efficient_wq
,
887 &check_lifetime_work
, 0);
888 rtmsg_ifa(RTM_NEWADDR
, ifa
, nlh
, NETLINK_CB(skb
).portid
);
894 * Determine a default network mask, based on the IP address.
897 static int inet_abc_len(__be32 addr
)
899 int rc
= -1; /* Something else, probably a multicast. */
901 if (ipv4_is_zeronet(addr
))
904 __u32 haddr
= ntohl(addr
);
906 if (IN_CLASSA(haddr
))
908 else if (IN_CLASSB(haddr
))
910 else if (IN_CLASSC(haddr
))
918 int devinet_ioctl(struct net
*net
, unsigned int cmd
, void __user
*arg
)
921 struct sockaddr_in sin_orig
;
922 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&ifr
.ifr_addr
;
923 struct in_device
*in_dev
;
924 struct in_ifaddr
**ifap
= NULL
;
925 struct in_ifaddr
*ifa
= NULL
;
926 struct net_device
*dev
;
929 int tryaddrmatch
= 0;
932 * Fetch the caller's info block into kernel space
935 if (copy_from_user(&ifr
, arg
, sizeof(struct ifreq
)))
937 ifr
.ifr_name
[IFNAMSIZ
- 1] = 0;
939 /* save original address for comparison */
940 memcpy(&sin_orig
, sin
, sizeof(*sin
));
942 colon
= strchr(ifr
.ifr_name
, ':');
946 dev_load(net
, ifr
.ifr_name
);
949 case SIOCGIFADDR
: /* Get interface address */
950 case SIOCGIFBRDADDR
: /* Get the broadcast address */
951 case SIOCGIFDSTADDR
: /* Get the destination address */
952 case SIOCGIFNETMASK
: /* Get the netmask for the interface */
953 /* Note that these ioctls will not sleep,
954 so that we do not impose a lock.
955 One day we will be forced to put shlock here (I mean SMP)
957 tryaddrmatch
= (sin_orig
.sin_family
== AF_INET
);
958 memset(sin
, 0, sizeof(*sin
));
959 sin
->sin_family
= AF_INET
;
964 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
967 case SIOCSIFADDR
: /* Set interface address (and family) */
968 case SIOCSIFBRDADDR
: /* Set the broadcast address */
969 case SIOCSIFDSTADDR
: /* Set the destination address */
970 case SIOCSIFNETMASK
: /* Set the netmask for the interface */
972 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
975 if (sin
->sin_family
!= AF_INET
)
986 dev
= __dev_get_by_name(net
, ifr
.ifr_name
);
993 in_dev
= __in_dev_get_rtnl(dev
);
996 /* Matthias Andree */
997 /* compare label and address (4.4BSD style) */
998 /* note: we only do this for a limited set of ioctls
999 and only if the original address family was AF_INET.
1000 This is checked above. */
1001 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
1002 ifap
= &ifa
->ifa_next
) {
1003 if (!strcmp(ifr
.ifr_name
, ifa
->ifa_label
) &&
1004 sin_orig
.sin_addr
.s_addr
==
1010 /* we didn't get a match, maybe the application is
1011 4.3BSD-style and passed in junk so we fall back to
1012 comparing just the label */
1014 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
1015 ifap
= &ifa
->ifa_next
)
1016 if (!strcmp(ifr
.ifr_name
, ifa
->ifa_label
))
1021 ret
= -EADDRNOTAVAIL
;
1022 if (!ifa
&& cmd
!= SIOCSIFADDR
&& cmd
!= SIOCSIFFLAGS
)
1026 case SIOCGIFADDR
: /* Get interface address */
1027 sin
->sin_addr
.s_addr
= ifa
->ifa_local
;
1030 case SIOCGIFBRDADDR
: /* Get the broadcast address */
1031 sin
->sin_addr
.s_addr
= ifa
->ifa_broadcast
;
1034 case SIOCGIFDSTADDR
: /* Get the destination address */
1035 sin
->sin_addr
.s_addr
= ifa
->ifa_address
;
1038 case SIOCGIFNETMASK
: /* Get the netmask for the interface */
1039 sin
->sin_addr
.s_addr
= ifa
->ifa_mask
;
1044 ret
= -EADDRNOTAVAIL
;
1048 if (!(ifr
.ifr_flags
& IFF_UP
))
1049 inet_del_ifa(in_dev
, ifap
, 1);
1052 ret
= dev_change_flags(dev
, ifr
.ifr_flags
);
1055 case SIOCSIFADDR
: /* Set interface address (and family) */
1057 if (inet_abc_len(sin
->sin_addr
.s_addr
) < 0)
1062 ifa
= inet_alloc_ifa();
1065 INIT_HLIST_NODE(&ifa
->hash
);
1067 memcpy(ifa
->ifa_label
, ifr
.ifr_name
, IFNAMSIZ
);
1069 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1072 if (ifa
->ifa_local
== sin
->sin_addr
.s_addr
)
1074 inet_del_ifa(in_dev
, ifap
, 0);
1075 ifa
->ifa_broadcast
= 0;
1079 ifa
->ifa_address
= ifa
->ifa_local
= sin
->sin_addr
.s_addr
;
1081 if (!(dev
->flags
& IFF_POINTOPOINT
)) {
1082 ifa
->ifa_prefixlen
= inet_abc_len(ifa
->ifa_address
);
1083 ifa
->ifa_mask
= inet_make_mask(ifa
->ifa_prefixlen
);
1084 if ((dev
->flags
& IFF_BROADCAST
) &&
1085 ifa
->ifa_prefixlen
< 31)
1086 ifa
->ifa_broadcast
= ifa
->ifa_address
|
1089 ifa
->ifa_prefixlen
= 32;
1090 ifa
->ifa_mask
= inet_make_mask(32);
1092 set_ifa_lifetime(ifa
, INFINITY_LIFE_TIME
, INFINITY_LIFE_TIME
);
1093 ret
= inet_set_ifa(dev
, ifa
);
1096 case SIOCSIFBRDADDR
: /* Set the broadcast address */
1098 if (ifa
->ifa_broadcast
!= sin
->sin_addr
.s_addr
) {
1099 inet_del_ifa(in_dev
, ifap
, 0);
1100 ifa
->ifa_broadcast
= sin
->sin_addr
.s_addr
;
1101 inet_insert_ifa(ifa
);
1105 case SIOCSIFDSTADDR
: /* Set the destination address */
1107 if (ifa
->ifa_address
== sin
->sin_addr
.s_addr
)
1110 if (inet_abc_len(sin
->sin_addr
.s_addr
) < 0)
1113 inet_del_ifa(in_dev
, ifap
, 0);
1114 ifa
->ifa_address
= sin
->sin_addr
.s_addr
;
1115 inet_insert_ifa(ifa
);
1118 case SIOCSIFNETMASK
: /* Set the netmask for the interface */
1121 * The mask we set must be legal.
1124 if (bad_mask(sin
->sin_addr
.s_addr
, 0))
1127 if (ifa
->ifa_mask
!= sin
->sin_addr
.s_addr
) {
1128 __be32 old_mask
= ifa
->ifa_mask
;
1129 inet_del_ifa(in_dev
, ifap
, 0);
1130 ifa
->ifa_mask
= sin
->sin_addr
.s_addr
;
1131 ifa
->ifa_prefixlen
= inet_mask_len(ifa
->ifa_mask
);
1133 /* See if current broadcast address matches
1134 * with current netmask, then recalculate
1135 * the broadcast address. Otherwise it's a
1136 * funny address, so don't touch it since
1137 * the user seems to know what (s)he's doing...
1139 if ((dev
->flags
& IFF_BROADCAST
) &&
1140 (ifa
->ifa_prefixlen
< 31) &&
1141 (ifa
->ifa_broadcast
==
1142 (ifa
->ifa_local
|~old_mask
))) {
1143 ifa
->ifa_broadcast
= (ifa
->ifa_local
|
1144 ~sin
->sin_addr
.s_addr
);
1146 inet_insert_ifa(ifa
);
1156 ret
= copy_to_user(arg
, &ifr
, sizeof(struct ifreq
)) ? -EFAULT
: 0;
1160 static int inet_gifconf(struct net_device
*dev
, char __user
*buf
, int len
)
1162 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1163 struct in_ifaddr
*ifa
;
1170 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
1172 done
+= sizeof(ifr
);
1175 if (len
< (int) sizeof(ifr
))
1177 memset(&ifr
, 0, sizeof(struct ifreq
));
1178 strcpy(ifr
.ifr_name
, ifa
->ifa_label
);
1180 (*(struct sockaddr_in
*)&ifr
.ifr_addr
).sin_family
= AF_INET
;
1181 (*(struct sockaddr_in
*)&ifr
.ifr_addr
).sin_addr
.s_addr
=
1184 if (copy_to_user(buf
, &ifr
, sizeof(struct ifreq
))) {
1188 buf
+= sizeof(struct ifreq
);
1189 len
-= sizeof(struct ifreq
);
1190 done
+= sizeof(struct ifreq
);
1196 __be32
inet_select_addr(const struct net_device
*dev
, __be32 dst
, int scope
)
1199 struct in_device
*in_dev
;
1200 struct net
*net
= dev_net(dev
);
1203 in_dev
= __in_dev_get_rcu(dev
);
1207 for_primary_ifa(in_dev
) {
1208 if (ifa
->ifa_scope
> scope
)
1210 if (!dst
|| inet_ifa_match(dst
, ifa
)) {
1211 addr
= ifa
->ifa_local
;
1215 addr
= ifa
->ifa_local
;
1216 } endfor_ifa(in_dev
);
1222 /* Not loopback addresses on loopback should be preferred
1223 in this case. It is important that lo is the first interface
1226 for_each_netdev_rcu(net
, dev
) {
1227 in_dev
= __in_dev_get_rcu(dev
);
1231 for_primary_ifa(in_dev
) {
1232 if (ifa
->ifa_scope
!= RT_SCOPE_LINK
&&
1233 ifa
->ifa_scope
<= scope
) {
1234 addr
= ifa
->ifa_local
;
1237 } endfor_ifa(in_dev
);
1243 EXPORT_SYMBOL(inet_select_addr
);
1245 static __be32
confirm_addr_indev(struct in_device
*in_dev
, __be32 dst
,
1246 __be32 local
, int scope
)
1253 (local
== ifa
->ifa_local
|| !local
) &&
1254 ifa
->ifa_scope
<= scope
) {
1255 addr
= ifa
->ifa_local
;
1260 same
= (!local
|| inet_ifa_match(local
, ifa
)) &&
1261 (!dst
|| inet_ifa_match(dst
, ifa
));
1265 /* Is the selected addr into dst subnet? */
1266 if (inet_ifa_match(addr
, ifa
))
1268 /* No, then can we use new local src? */
1269 if (ifa
->ifa_scope
<= scope
) {
1270 addr
= ifa
->ifa_local
;
1273 /* search for large dst subnet for addr */
1277 } endfor_ifa(in_dev
);
1279 return same
? addr
: 0;
1283 * Confirm that local IP address exists using wildcards:
1284 * - net: netns to check, cannot be NULL
1285 * - in_dev: only on this interface, NULL=any interface
1286 * - dst: only in the same subnet as dst, 0=any dst
1287 * - local: address, 0=autoselect the local address
1288 * - scope: maximum allowed scope value for the local address
1290 __be32
inet_confirm_addr(struct net
*net
, struct in_device
*in_dev
,
1291 __be32 dst
, __be32 local
, int scope
)
1294 struct net_device
*dev
;
1297 return confirm_addr_indev(in_dev
, dst
, local
, scope
);
1300 for_each_netdev_rcu(net
, dev
) {
1301 in_dev
= __in_dev_get_rcu(dev
);
1303 addr
= confirm_addr_indev(in_dev
, dst
, local
, scope
);
1312 EXPORT_SYMBOL(inet_confirm_addr
);
1318 int register_inetaddr_notifier(struct notifier_block
*nb
)
1320 return blocking_notifier_chain_register(&inetaddr_chain
, nb
);
1322 EXPORT_SYMBOL(register_inetaddr_notifier
);
1324 int unregister_inetaddr_notifier(struct notifier_block
*nb
)
1326 return blocking_notifier_chain_unregister(&inetaddr_chain
, nb
);
1328 EXPORT_SYMBOL(unregister_inetaddr_notifier
);
1330 /* Rename ifa_labels for a device name change. Make some effort to preserve
1331 * existing alias numbering and to create unique labels if possible.
1333 static void inetdev_changename(struct net_device
*dev
, struct in_device
*in_dev
)
1335 struct in_ifaddr
*ifa
;
1338 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
1339 char old
[IFNAMSIZ
], *dot
;
1341 memcpy(old
, ifa
->ifa_label
, IFNAMSIZ
);
1342 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1345 dot
= strchr(old
, ':');
1347 sprintf(old
, ":%d", named
);
1350 if (strlen(dot
) + strlen(dev
->name
) < IFNAMSIZ
)
1351 strcat(ifa
->ifa_label
, dot
);
1353 strcpy(ifa
->ifa_label
+ (IFNAMSIZ
- strlen(dot
) - 1), dot
);
1355 rtmsg_ifa(RTM_NEWADDR
, ifa
, NULL
, 0);
1359 static bool inetdev_valid_mtu(unsigned int mtu
)
1361 return mtu
>= IPV4_MIN_MTU
;
1364 static void inetdev_send_gratuitous_arp(struct net_device
*dev
,
1365 struct in_device
*in_dev
)
1368 struct in_ifaddr
*ifa
;
1370 for (ifa
= in_dev
->ifa_list
; ifa
;
1371 ifa
= ifa
->ifa_next
) {
1372 arp_send(ARPOP_REQUEST
, ETH_P_ARP
,
1373 ifa
->ifa_local
, dev
,
1374 ifa
->ifa_local
, NULL
,
1375 dev
->dev_addr
, NULL
);
1379 /* Called only under RTNL semaphore */
1381 static int inetdev_event(struct notifier_block
*this, unsigned long event
,
1384 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1385 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1390 if (event
== NETDEV_REGISTER
) {
1391 in_dev
= inetdev_init(dev
);
1393 return notifier_from_errno(PTR_ERR(in_dev
));
1394 if (dev
->flags
& IFF_LOOPBACK
) {
1395 IN_DEV_CONF_SET(in_dev
, NOXFRM
, 1);
1396 IN_DEV_CONF_SET(in_dev
, NOPOLICY
, 1);
1398 } else if (event
== NETDEV_CHANGEMTU
) {
1399 /* Re-enabling IP */
1400 if (inetdev_valid_mtu(dev
->mtu
))
1401 in_dev
= inetdev_init(dev
);
1407 case NETDEV_REGISTER
:
1408 pr_debug("%s: bug\n", __func__
);
1409 RCU_INIT_POINTER(dev
->ip_ptr
, NULL
);
1412 if (!inetdev_valid_mtu(dev
->mtu
))
1414 if (dev
->flags
& IFF_LOOPBACK
) {
1415 struct in_ifaddr
*ifa
= inet_alloc_ifa();
1418 INIT_HLIST_NODE(&ifa
->hash
);
1420 ifa
->ifa_address
= htonl(INADDR_LOOPBACK
);
1421 ifa
->ifa_prefixlen
= 8;
1422 ifa
->ifa_mask
= inet_make_mask(8);
1423 in_dev_hold(in_dev
);
1424 ifa
->ifa_dev
= in_dev
;
1425 ifa
->ifa_scope
= RT_SCOPE_HOST
;
1426 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1427 set_ifa_lifetime(ifa
, INFINITY_LIFE_TIME
,
1428 INFINITY_LIFE_TIME
);
1429 ipv4_devconf_setall(in_dev
);
1430 neigh_parms_data_state_setall(in_dev
->arp_parms
);
1431 inet_insert_ifa(ifa
);
1436 case NETDEV_CHANGEADDR
:
1437 if (!IN_DEV_ARP_NOTIFY(in_dev
))
1440 case NETDEV_NOTIFY_PEERS
:
1441 /* Send gratuitous ARP to notify of link change */
1442 inetdev_send_gratuitous_arp(dev
, in_dev
);
1447 case NETDEV_PRE_TYPE_CHANGE
:
1448 ip_mc_unmap(in_dev
);
1450 case NETDEV_POST_TYPE_CHANGE
:
1451 ip_mc_remap(in_dev
);
1453 case NETDEV_CHANGEMTU
:
1454 if (inetdev_valid_mtu(dev
->mtu
))
1456 /* disable IP when MTU is not enough */
1457 case NETDEV_UNREGISTER
:
1458 inetdev_destroy(in_dev
);
1460 case NETDEV_CHANGENAME
:
1461 /* Do not notify about label change, this event is
1462 * not interesting to applications using netlink.
1464 inetdev_changename(dev
, in_dev
);
1466 devinet_sysctl_unregister(in_dev
);
1467 devinet_sysctl_register(in_dev
);
1474 static struct notifier_block ip_netdev_notifier
= {
1475 .notifier_call
= inetdev_event
,
1478 static size_t inet_nlmsg_size(void)
1480 return NLMSG_ALIGN(sizeof(struct ifaddrmsg
))
1481 + nla_total_size(4) /* IFA_ADDRESS */
1482 + nla_total_size(4) /* IFA_LOCAL */
1483 + nla_total_size(4) /* IFA_BROADCAST */
1484 + nla_total_size(IFNAMSIZ
) /* IFA_LABEL */
1485 + nla_total_size(4) /* IFA_FLAGS */
1486 + nla_total_size(sizeof(struct ifa_cacheinfo
)); /* IFA_CACHEINFO */
1489 static inline u32
cstamp_delta(unsigned long cstamp
)
1491 return (cstamp
- INITIAL_JIFFIES
) * 100UL / HZ
;
1494 static int put_cacheinfo(struct sk_buff
*skb
, unsigned long cstamp
,
1495 unsigned long tstamp
, u32 preferred
, u32 valid
)
1497 struct ifa_cacheinfo ci
;
1499 ci
.cstamp
= cstamp_delta(cstamp
);
1500 ci
.tstamp
= cstamp_delta(tstamp
);
1501 ci
.ifa_prefered
= preferred
;
1502 ci
.ifa_valid
= valid
;
1504 return nla_put(skb
, IFA_CACHEINFO
, sizeof(ci
), &ci
);
1507 static int inet_fill_ifaddr(struct sk_buff
*skb
, struct in_ifaddr
*ifa
,
1508 u32 portid
, u32 seq
, int event
, unsigned int flags
)
1510 struct ifaddrmsg
*ifm
;
1511 struct nlmsghdr
*nlh
;
1512 u32 preferred
, valid
;
1514 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*ifm
), flags
);
1518 ifm
= nlmsg_data(nlh
);
1519 ifm
->ifa_family
= AF_INET
;
1520 ifm
->ifa_prefixlen
= ifa
->ifa_prefixlen
;
1521 ifm
->ifa_flags
= ifa
->ifa_flags
;
1522 ifm
->ifa_scope
= ifa
->ifa_scope
;
1523 ifm
->ifa_index
= ifa
->ifa_dev
->dev
->ifindex
;
1525 if (!(ifm
->ifa_flags
& IFA_F_PERMANENT
)) {
1526 preferred
= ifa
->ifa_preferred_lft
;
1527 valid
= ifa
->ifa_valid_lft
;
1528 if (preferred
!= INFINITY_LIFE_TIME
) {
1529 long tval
= (jiffies
- ifa
->ifa_tstamp
) / HZ
;
1531 if (preferred
> tval
)
1535 if (valid
!= INFINITY_LIFE_TIME
) {
1543 preferred
= INFINITY_LIFE_TIME
;
1544 valid
= INFINITY_LIFE_TIME
;
1546 if ((ifa
->ifa_address
&&
1547 nla_put_in_addr(skb
, IFA_ADDRESS
, ifa
->ifa_address
)) ||
1549 nla_put_in_addr(skb
, IFA_LOCAL
, ifa
->ifa_local
)) ||
1550 (ifa
->ifa_broadcast
&&
1551 nla_put_in_addr(skb
, IFA_BROADCAST
, ifa
->ifa_broadcast
)) ||
1552 (ifa
->ifa_label
[0] &&
1553 nla_put_string(skb
, IFA_LABEL
, ifa
->ifa_label
)) ||
1554 nla_put_u32(skb
, IFA_FLAGS
, ifa
->ifa_flags
) ||
1555 put_cacheinfo(skb
, ifa
->ifa_cstamp
, ifa
->ifa_tstamp
,
1557 goto nla_put_failure
;
1559 nlmsg_end(skb
, nlh
);
1563 nlmsg_cancel(skb
, nlh
);
1567 static int inet_dump_ifaddr(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1569 struct net
*net
= sock_net(skb
->sk
);
1572 int ip_idx
, s_ip_idx
;
1573 struct net_device
*dev
;
1574 struct in_device
*in_dev
;
1575 struct in_ifaddr
*ifa
;
1576 struct hlist_head
*head
;
1579 s_idx
= idx
= cb
->args
[1];
1580 s_ip_idx
= ip_idx
= cb
->args
[2];
1582 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1584 head
= &net
->dev_index_head
[h
];
1586 cb
->seq
= atomic_read(&net
->ipv4
.dev_addr_genid
) ^
1588 hlist_for_each_entry_rcu(dev
, head
, index_hlist
) {
1591 if (h
> s_h
|| idx
> s_idx
)
1593 in_dev
= __in_dev_get_rcu(dev
);
1597 for (ifa
= in_dev
->ifa_list
, ip_idx
= 0; ifa
;
1598 ifa
= ifa
->ifa_next
, ip_idx
++) {
1599 if (ip_idx
< s_ip_idx
)
1601 if (inet_fill_ifaddr(skb
, ifa
,
1602 NETLINK_CB(cb
->skb
).portid
,
1604 RTM_NEWADDR
, NLM_F_MULTI
) < 0) {
1608 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1619 cb
->args
[2] = ip_idx
;
1624 static void rtmsg_ifa(int event
, struct in_ifaddr
*ifa
, struct nlmsghdr
*nlh
,
1627 struct sk_buff
*skb
;
1628 u32 seq
= nlh
? nlh
->nlmsg_seq
: 0;
1632 net
= dev_net(ifa
->ifa_dev
->dev
);
1633 skb
= nlmsg_new(inet_nlmsg_size(), GFP_KERNEL
);
1637 err
= inet_fill_ifaddr(skb
, ifa
, portid
, seq
, event
, 0);
1639 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1640 WARN_ON(err
== -EMSGSIZE
);
1644 rtnl_notify(skb
, net
, portid
, RTNLGRP_IPV4_IFADDR
, nlh
, GFP_KERNEL
);
1648 rtnl_set_sk_err(net
, RTNLGRP_IPV4_IFADDR
, err
);
1651 static size_t inet_get_link_af_size(const struct net_device
*dev
,
1652 u32 ext_filter_mask
)
1654 struct in_device
*in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
1659 return nla_total_size(IPV4_DEVCONF_MAX
* 4); /* IFLA_INET_CONF */
1662 static int inet_fill_link_af(struct sk_buff
*skb
, const struct net_device
*dev
,
1663 u32 ext_filter_mask
)
1665 struct in_device
*in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
1672 nla
= nla_reserve(skb
, IFLA_INET_CONF
, IPV4_DEVCONF_MAX
* 4);
1676 for (i
= 0; i
< IPV4_DEVCONF_MAX
; i
++)
1677 ((u32
*) nla_data(nla
))[i
] = in_dev
->cnf
.data
[i
];
1682 static const struct nla_policy inet_af_policy
[IFLA_INET_MAX
+1] = {
1683 [IFLA_INET_CONF
] = { .type
= NLA_NESTED
},
1686 static int inet_validate_link_af(const struct net_device
*dev
,
1687 const struct nlattr
*nla
)
1689 struct nlattr
*a
, *tb
[IFLA_INET_MAX
+1];
1692 if (dev
&& !__in_dev_get_rtnl(dev
))
1693 return -EAFNOSUPPORT
;
1695 err
= nla_parse_nested(tb
, IFLA_INET_MAX
, nla
, inet_af_policy
);
1699 if (tb
[IFLA_INET_CONF
]) {
1700 nla_for_each_nested(a
, tb
[IFLA_INET_CONF
], rem
) {
1701 int cfgid
= nla_type(a
);
1706 if (cfgid
<= 0 || cfgid
> IPV4_DEVCONF_MAX
)
1714 static int inet_set_link_af(struct net_device
*dev
, const struct nlattr
*nla
)
1716 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1717 struct nlattr
*a
, *tb
[IFLA_INET_MAX
+1];
1721 return -EAFNOSUPPORT
;
1723 if (nla_parse_nested(tb
, IFLA_INET_MAX
, nla
, NULL
) < 0)
1726 if (tb
[IFLA_INET_CONF
]) {
1727 nla_for_each_nested(a
, tb
[IFLA_INET_CONF
], rem
)
1728 ipv4_devconf_set(in_dev
, nla_type(a
), nla_get_u32(a
));
1734 static int inet_netconf_msgsize_devconf(int type
)
1736 int size
= NLMSG_ALIGN(sizeof(struct netconfmsg
))
1737 + nla_total_size(4); /* NETCONFA_IFINDEX */
1739 /* type -1 is used for ALL */
1740 if (type
== -1 || type
== NETCONFA_FORWARDING
)
1741 size
+= nla_total_size(4);
1742 if (type
== -1 || type
== NETCONFA_RP_FILTER
)
1743 size
+= nla_total_size(4);
1744 if (type
== -1 || type
== NETCONFA_MC_FORWARDING
)
1745 size
+= nla_total_size(4);
1746 if (type
== -1 || type
== NETCONFA_PROXY_NEIGH
)
1747 size
+= nla_total_size(4);
1748 if (type
== -1 || type
== NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
)
1749 size
+= nla_total_size(4);
1754 static int inet_netconf_fill_devconf(struct sk_buff
*skb
, int ifindex
,
1755 struct ipv4_devconf
*devconf
, u32 portid
,
1756 u32 seq
, int event
, unsigned int flags
,
1759 struct nlmsghdr
*nlh
;
1760 struct netconfmsg
*ncm
;
1762 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(struct netconfmsg
),
1767 ncm
= nlmsg_data(nlh
);
1768 ncm
->ncm_family
= AF_INET
;
1770 if (nla_put_s32(skb
, NETCONFA_IFINDEX
, ifindex
) < 0)
1771 goto nla_put_failure
;
1773 /* type -1 is used for ALL */
1774 if ((type
== -1 || type
== NETCONFA_FORWARDING
) &&
1775 nla_put_s32(skb
, NETCONFA_FORWARDING
,
1776 IPV4_DEVCONF(*devconf
, FORWARDING
)) < 0)
1777 goto nla_put_failure
;
1778 if ((type
== -1 || type
== NETCONFA_RP_FILTER
) &&
1779 nla_put_s32(skb
, NETCONFA_RP_FILTER
,
1780 IPV4_DEVCONF(*devconf
, RP_FILTER
)) < 0)
1781 goto nla_put_failure
;
1782 if ((type
== -1 || type
== NETCONFA_MC_FORWARDING
) &&
1783 nla_put_s32(skb
, NETCONFA_MC_FORWARDING
,
1784 IPV4_DEVCONF(*devconf
, MC_FORWARDING
)) < 0)
1785 goto nla_put_failure
;
1786 if ((type
== -1 || type
== NETCONFA_PROXY_NEIGH
) &&
1787 nla_put_s32(skb
, NETCONFA_PROXY_NEIGH
,
1788 IPV4_DEVCONF(*devconf
, PROXY_ARP
)) < 0)
1789 goto nla_put_failure
;
1790 if ((type
== -1 || type
== NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
) &&
1791 nla_put_s32(skb
, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
,
1792 IPV4_DEVCONF(*devconf
, IGNORE_ROUTES_WITH_LINKDOWN
)) < 0)
1793 goto nla_put_failure
;
1795 nlmsg_end(skb
, nlh
);
1799 nlmsg_cancel(skb
, nlh
);
1803 void inet_netconf_notify_devconf(struct net
*net
, int type
, int ifindex
,
1804 struct ipv4_devconf
*devconf
)
1806 struct sk_buff
*skb
;
1809 skb
= nlmsg_new(inet_netconf_msgsize_devconf(type
), GFP_ATOMIC
);
1813 err
= inet_netconf_fill_devconf(skb
, ifindex
, devconf
, 0, 0,
1814 RTM_NEWNETCONF
, 0, type
);
1816 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1817 WARN_ON(err
== -EMSGSIZE
);
1821 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_NETCONF
, NULL
, GFP_ATOMIC
);
1825 rtnl_set_sk_err(net
, RTNLGRP_IPV4_NETCONF
, err
);
1828 static const struct nla_policy devconf_ipv4_policy
[NETCONFA_MAX
+1] = {
1829 [NETCONFA_IFINDEX
] = { .len
= sizeof(int) },
1830 [NETCONFA_FORWARDING
] = { .len
= sizeof(int) },
1831 [NETCONFA_RP_FILTER
] = { .len
= sizeof(int) },
1832 [NETCONFA_PROXY_NEIGH
] = { .len
= sizeof(int) },
1833 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
] = { .len
= sizeof(int) },
1836 static int inet_netconf_get_devconf(struct sk_buff
*in_skb
,
1837 struct nlmsghdr
*nlh
)
1839 struct net
*net
= sock_net(in_skb
->sk
);
1840 struct nlattr
*tb
[NETCONFA_MAX
+1];
1841 struct netconfmsg
*ncm
;
1842 struct sk_buff
*skb
;
1843 struct ipv4_devconf
*devconf
;
1844 struct in_device
*in_dev
;
1845 struct net_device
*dev
;
1849 err
= nlmsg_parse(nlh
, sizeof(*ncm
), tb
, NETCONFA_MAX
,
1850 devconf_ipv4_policy
);
1855 if (!tb
[NETCONFA_IFINDEX
])
1858 ifindex
= nla_get_s32(tb
[NETCONFA_IFINDEX
]);
1860 case NETCONFA_IFINDEX_ALL
:
1861 devconf
= net
->ipv4
.devconf_all
;
1863 case NETCONFA_IFINDEX_DEFAULT
:
1864 devconf
= net
->ipv4
.devconf_dflt
;
1867 dev
= __dev_get_by_index(net
, ifindex
);
1870 in_dev
= __in_dev_get_rtnl(dev
);
1873 devconf
= &in_dev
->cnf
;
1878 skb
= nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC
);
1882 err
= inet_netconf_fill_devconf(skb
, ifindex
, devconf
,
1883 NETLINK_CB(in_skb
).portid
,
1884 nlh
->nlmsg_seq
, RTM_NEWNETCONF
, 0,
1887 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1888 WARN_ON(err
== -EMSGSIZE
);
1892 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
1897 static int inet_netconf_dump_devconf(struct sk_buff
*skb
,
1898 struct netlink_callback
*cb
)
1900 struct net
*net
= sock_net(skb
->sk
);
1903 struct net_device
*dev
;
1904 struct in_device
*in_dev
;
1905 struct hlist_head
*head
;
1908 s_idx
= idx
= cb
->args
[1];
1910 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1912 head
= &net
->dev_index_head
[h
];
1914 cb
->seq
= atomic_read(&net
->ipv4
.dev_addr_genid
) ^
1916 hlist_for_each_entry_rcu(dev
, head
, index_hlist
) {
1919 in_dev
= __in_dev_get_rcu(dev
);
1923 if (inet_netconf_fill_devconf(skb
, dev
->ifindex
,
1925 NETLINK_CB(cb
->skb
).portid
,
1933 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1939 if (h
== NETDEV_HASHENTRIES
) {
1940 if (inet_netconf_fill_devconf(skb
, NETCONFA_IFINDEX_ALL
,
1941 net
->ipv4
.devconf_all
,
1942 NETLINK_CB(cb
->skb
).portid
,
1944 RTM_NEWNETCONF
, NLM_F_MULTI
,
1950 if (h
== NETDEV_HASHENTRIES
+ 1) {
1951 if (inet_netconf_fill_devconf(skb
, NETCONFA_IFINDEX_DEFAULT
,
1952 net
->ipv4
.devconf_dflt
,
1953 NETLINK_CB(cb
->skb
).portid
,
1955 RTM_NEWNETCONF
, NLM_F_MULTI
,
1968 #ifdef CONFIG_SYSCTL
1970 static void devinet_copy_dflt_conf(struct net
*net
, int i
)
1972 struct net_device
*dev
;
1975 for_each_netdev_rcu(net
, dev
) {
1976 struct in_device
*in_dev
;
1978 in_dev
= __in_dev_get_rcu(dev
);
1979 if (in_dev
&& !test_bit(i
, in_dev
->cnf
.state
))
1980 in_dev
->cnf
.data
[i
] = net
->ipv4
.devconf_dflt
->data
[i
];
1985 /* called with RTNL locked */
1986 static void inet_forward_change(struct net
*net
)
1988 struct net_device
*dev
;
1989 int on
= IPV4_DEVCONF_ALL(net
, FORWARDING
);
1991 IPV4_DEVCONF_ALL(net
, ACCEPT_REDIRECTS
) = !on
;
1992 IPV4_DEVCONF_DFLT(net
, FORWARDING
) = on
;
1993 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
1994 NETCONFA_IFINDEX_ALL
,
1995 net
->ipv4
.devconf_all
);
1996 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
1997 NETCONFA_IFINDEX_DEFAULT
,
1998 net
->ipv4
.devconf_dflt
);
2000 for_each_netdev(net
, dev
) {
2001 struct in_device
*in_dev
;
2003 dev_disable_lro(dev
);
2005 in_dev
= __in_dev_get_rcu(dev
);
2007 IN_DEV_CONF_SET(in_dev
, FORWARDING
, on
);
2008 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
2009 dev
->ifindex
, &in_dev
->cnf
);
2015 static int devinet_conf_ifindex(struct net
*net
, struct ipv4_devconf
*cnf
)
2017 if (cnf
== net
->ipv4
.devconf_dflt
)
2018 return NETCONFA_IFINDEX_DEFAULT
;
2019 else if (cnf
== net
->ipv4
.devconf_all
)
2020 return NETCONFA_IFINDEX_ALL
;
2022 struct in_device
*idev
2023 = container_of(cnf
, struct in_device
, cnf
);
2024 return idev
->dev
->ifindex
;
2028 static int devinet_conf_proc(struct ctl_table
*ctl
, int write
,
2029 void __user
*buffer
,
2030 size_t *lenp
, loff_t
*ppos
)
2032 int old_value
= *(int *)ctl
->data
;
2033 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2034 int new_value
= *(int *)ctl
->data
;
2037 struct ipv4_devconf
*cnf
= ctl
->extra1
;
2038 struct net
*net
= ctl
->extra2
;
2039 int i
= (int *)ctl
->data
- cnf
->data
;
2042 set_bit(i
, cnf
->state
);
2044 if (cnf
== net
->ipv4
.devconf_dflt
)
2045 devinet_copy_dflt_conf(net
, i
);
2046 if (i
== IPV4_DEVCONF_ACCEPT_LOCAL
- 1 ||
2047 i
== IPV4_DEVCONF_ROUTE_LOCALNET
- 1)
2048 if ((new_value
== 0) && (old_value
!= 0))
2049 rt_cache_flush(net
);
2051 if (i
== IPV4_DEVCONF_RP_FILTER
- 1 &&
2052 new_value
!= old_value
) {
2053 ifindex
= devinet_conf_ifindex(net
, cnf
);
2054 inet_netconf_notify_devconf(net
, NETCONFA_RP_FILTER
,
2057 if (i
== IPV4_DEVCONF_PROXY_ARP
- 1 &&
2058 new_value
!= old_value
) {
2059 ifindex
= devinet_conf_ifindex(net
, cnf
);
2060 inet_netconf_notify_devconf(net
, NETCONFA_PROXY_NEIGH
,
2063 if (i
== IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN
- 1 &&
2064 new_value
!= old_value
) {
2065 ifindex
= devinet_conf_ifindex(net
, cnf
);
2066 inet_netconf_notify_devconf(net
, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
,
2074 static int devinet_sysctl_forward(struct ctl_table
*ctl
, int write
,
2075 void __user
*buffer
,
2076 size_t *lenp
, loff_t
*ppos
)
2078 int *valp
= ctl
->data
;
2081 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2083 if (write
&& *valp
!= val
) {
2084 struct net
*net
= ctl
->extra2
;
2086 if (valp
!= &IPV4_DEVCONF_DFLT(net
, FORWARDING
)) {
2087 if (!rtnl_trylock()) {
2088 /* Restore the original values before restarting */
2091 return restart_syscall();
2093 if (valp
== &IPV4_DEVCONF_ALL(net
, FORWARDING
)) {
2094 inet_forward_change(net
);
2096 struct ipv4_devconf
*cnf
= ctl
->extra1
;
2097 struct in_device
*idev
=
2098 container_of(cnf
, struct in_device
, cnf
);
2100 dev_disable_lro(idev
->dev
);
2101 inet_netconf_notify_devconf(net
,
2102 NETCONFA_FORWARDING
,
2107 rt_cache_flush(net
);
2109 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
2110 NETCONFA_IFINDEX_DEFAULT
,
2111 net
->ipv4
.devconf_dflt
);
2117 static int ipv4_doint_and_flush(struct ctl_table
*ctl
, int write
,
2118 void __user
*buffer
,
2119 size_t *lenp
, loff_t
*ppos
)
2121 int *valp
= ctl
->data
;
2123 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2124 struct net
*net
= ctl
->extra2
;
2126 if (write
&& *valp
!= val
)
2127 rt_cache_flush(net
);
2132 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2135 .data = ipv4_devconf.data + \
2136 IPV4_DEVCONF_ ## attr - 1, \
2137 .maxlen = sizeof(int), \
2139 .proc_handler = proc, \
2140 .extra1 = &ipv4_devconf, \
2143 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2144 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2146 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2147 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2149 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2150 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2152 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2153 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2155 static struct devinet_sysctl_table
{
2156 struct ctl_table_header
*sysctl_header
;
2157 struct ctl_table devinet_vars
[__IPV4_DEVCONF_MAX
];
2158 } devinet_sysctl
= {
2160 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING
, "forwarding",
2161 devinet_sysctl_forward
),
2162 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING
, "mc_forwarding"),
2164 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS
, "accept_redirects"),
2165 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS
, "secure_redirects"),
2166 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA
, "shared_media"),
2167 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER
, "rp_filter"),
2168 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS
, "send_redirects"),
2169 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE
,
2170 "accept_source_route"),
2171 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL
, "accept_local"),
2172 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK
, "src_valid_mark"),
2173 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP
, "proxy_arp"),
2174 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID
, "medium_id"),
2175 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY
, "bootp_relay"),
2176 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS
, "log_martians"),
2177 DEVINET_SYSCTL_RW_ENTRY(TAG
, "tag"),
2178 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER
, "arp_filter"),
2179 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE
, "arp_announce"),
2180 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE
, "arp_ignore"),
2181 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT
, "arp_accept"),
2182 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY
, "arp_notify"),
2183 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN
, "proxy_arp_pvlan"),
2184 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION
,
2185 "force_igmp_version"),
2186 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL
,
2187 "igmpv2_unsolicited_report_interval"),
2188 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL
,
2189 "igmpv3_unsolicited_report_interval"),
2190 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN
,
2191 "ignore_routes_with_linkdown"),
2193 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM
, "disable_xfrm"),
2194 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY
, "disable_policy"),
2195 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES
,
2196 "promote_secondaries"),
2197 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET
,
2202 static int __devinet_sysctl_register(struct net
*net
, char *dev_name
,
2203 struct ipv4_devconf
*p
)
2206 struct devinet_sysctl_table
*t
;
2207 char path
[sizeof("net/ipv4/conf/") + IFNAMSIZ
];
2209 t
= kmemdup(&devinet_sysctl
, sizeof(*t
), GFP_KERNEL
);
2213 for (i
= 0; i
< ARRAY_SIZE(t
->devinet_vars
) - 1; i
++) {
2214 t
->devinet_vars
[i
].data
+= (char *)p
- (char *)&ipv4_devconf
;
2215 t
->devinet_vars
[i
].extra1
= p
;
2216 t
->devinet_vars
[i
].extra2
= net
;
2219 snprintf(path
, sizeof(path
), "net/ipv4/conf/%s", dev_name
);
2221 t
->sysctl_header
= register_net_sysctl(net
, path
, t
->devinet_vars
);
2222 if (!t
->sysctl_header
)
2234 static void __devinet_sysctl_unregister(struct ipv4_devconf
*cnf
)
2236 struct devinet_sysctl_table
*t
= cnf
->sysctl
;
2242 unregister_net_sysctl_table(t
->sysctl_header
);
2246 static int devinet_sysctl_register(struct in_device
*idev
)
2250 if (!sysctl_dev_name_is_allowed(idev
->dev
->name
))
2253 err
= neigh_sysctl_register(idev
->dev
, idev
->arp_parms
, NULL
);
2256 err
= __devinet_sysctl_register(dev_net(idev
->dev
), idev
->dev
->name
,
2259 neigh_sysctl_unregister(idev
->arp_parms
);
2263 static void devinet_sysctl_unregister(struct in_device
*idev
)
2265 __devinet_sysctl_unregister(&idev
->cnf
);
2266 neigh_sysctl_unregister(idev
->arp_parms
);
2269 static struct ctl_table ctl_forward_entry
[] = {
2271 .procname
= "ip_forward",
2272 .data
= &ipv4_devconf
.data
[
2273 IPV4_DEVCONF_FORWARDING
- 1],
2274 .maxlen
= sizeof(int),
2276 .proc_handler
= devinet_sysctl_forward
,
2277 .extra1
= &ipv4_devconf
,
2278 .extra2
= &init_net
,
2284 static __net_init
int devinet_init_net(struct net
*net
)
2287 struct ipv4_devconf
*all
, *dflt
;
2288 #ifdef CONFIG_SYSCTL
2289 struct ctl_table
*tbl
= ctl_forward_entry
;
2290 struct ctl_table_header
*forw_hdr
;
2294 all
= &ipv4_devconf
;
2295 dflt
= &ipv4_devconf_dflt
;
2297 if (!net_eq(net
, &init_net
)) {
2298 all
= kmemdup(all
, sizeof(ipv4_devconf
), GFP_KERNEL
);
2302 dflt
= kmemdup(dflt
, sizeof(ipv4_devconf_dflt
), GFP_KERNEL
);
2304 goto err_alloc_dflt
;
2306 #ifdef CONFIG_SYSCTL
2307 tbl
= kmemdup(tbl
, sizeof(ctl_forward_entry
), GFP_KERNEL
);
2311 tbl
[0].data
= &all
->data
[IPV4_DEVCONF_FORWARDING
- 1];
2312 tbl
[0].extra1
= all
;
2313 tbl
[0].extra2
= net
;
2317 #ifdef CONFIG_SYSCTL
2318 err
= __devinet_sysctl_register(net
, "all", all
);
2322 err
= __devinet_sysctl_register(net
, "default", dflt
);
2327 forw_hdr
= register_net_sysctl(net
, "net/ipv4", tbl
);
2330 net
->ipv4
.forw_hdr
= forw_hdr
;
2333 net
->ipv4
.devconf_all
= all
;
2334 net
->ipv4
.devconf_dflt
= dflt
;
2337 #ifdef CONFIG_SYSCTL
2339 __devinet_sysctl_unregister(dflt
);
2341 __devinet_sysctl_unregister(all
);
2343 if (tbl
!= ctl_forward_entry
)
2347 if (dflt
!= &ipv4_devconf_dflt
)
2350 if (all
!= &ipv4_devconf
)
2356 static __net_exit
void devinet_exit_net(struct net
*net
)
2358 #ifdef CONFIG_SYSCTL
2359 struct ctl_table
*tbl
;
2361 tbl
= net
->ipv4
.forw_hdr
->ctl_table_arg
;
2362 unregister_net_sysctl_table(net
->ipv4
.forw_hdr
);
2363 __devinet_sysctl_unregister(net
->ipv4
.devconf_dflt
);
2364 __devinet_sysctl_unregister(net
->ipv4
.devconf_all
);
2367 kfree(net
->ipv4
.devconf_dflt
);
2368 kfree(net
->ipv4
.devconf_all
);
2371 static __net_initdata
struct pernet_operations devinet_ops
= {
2372 .init
= devinet_init_net
,
2373 .exit
= devinet_exit_net
,
2376 static struct rtnl_af_ops inet_af_ops __read_mostly
= {
2378 .fill_link_af
= inet_fill_link_af
,
2379 .get_link_af_size
= inet_get_link_af_size
,
2380 .validate_link_af
= inet_validate_link_af
,
2381 .set_link_af
= inet_set_link_af
,
2384 void __init
devinet_init(void)
2388 for (i
= 0; i
< IN4_ADDR_HSIZE
; i
++)
2389 INIT_HLIST_HEAD(&inet_addr_lst
[i
]);
2391 register_pernet_subsys(&devinet_ops
);
2393 register_gifconf(PF_INET
, inet_gifconf
);
2394 register_netdevice_notifier(&ip_netdev_notifier
);
2396 queue_delayed_work(system_power_efficient_wq
, &check_lifetime_work
, 0);
2398 rtnl_af_register(&inet_af_ops
);
2400 rtnl_register(PF_INET
, RTM_NEWADDR
, inet_rtm_newaddr
, NULL
, NULL
);
2401 rtnl_register(PF_INET
, RTM_DELADDR
, inet_rtm_deladdr
, NULL
, NULL
);
2402 rtnl_register(PF_INET
, RTM_GETADDR
, NULL
, inet_dump_ifaddr
, NULL
);
2403 rtnl_register(PF_INET
, RTM_GETNETCONF
, inet_netconf_get_devconf
,
2404 inet_netconf_dump_devconf
, NULL
);