2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <linux/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/sched/signal.h>
36 #include <linux/string.h>
38 #include <linux/socket.h>
39 #include <linux/sockios.h>
41 #include <linux/errno.h>
42 #include <linux/interrupt.h>
43 #include <linux/if_addr.h>
44 #include <linux/if_ether.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/notifier.h>
51 #include <linux/inetdevice.h>
52 #include <linux/igmp.h>
53 #include <linux/slab.h>
54 #include <linux/hash.h>
56 #include <linux/sysctl.h>
58 #include <linux/kmod.h>
59 #include <linux/netconf.h>
63 #include <net/route.h>
64 #include <net/ip_fib.h>
65 #include <net/rtnetlink.h>
66 #include <net/net_namespace.h>
67 #include <net/addrconf.h>
69 static struct ipv4_devconf ipv4_devconf
= {
71 [IPV4_DEVCONF_ACCEPT_REDIRECTS
- 1] = 1,
72 [IPV4_DEVCONF_SEND_REDIRECTS
- 1] = 1,
73 [IPV4_DEVCONF_SECURE_REDIRECTS
- 1] = 1,
74 [IPV4_DEVCONF_SHARED_MEDIA
- 1] = 1,
75 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL
- 1] = 10000 /*ms*/,
76 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL
- 1] = 1000 /*ms*/,
80 static struct ipv4_devconf ipv4_devconf_dflt
= {
82 [IPV4_DEVCONF_ACCEPT_REDIRECTS
- 1] = 1,
83 [IPV4_DEVCONF_SEND_REDIRECTS
- 1] = 1,
84 [IPV4_DEVCONF_SECURE_REDIRECTS
- 1] = 1,
85 [IPV4_DEVCONF_SHARED_MEDIA
- 1] = 1,
86 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE
- 1] = 1,
87 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL
- 1] = 10000 /*ms*/,
88 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL
- 1] = 1000 /*ms*/,
92 #define IPV4_DEVCONF_DFLT(net, attr) \
93 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
95 static const struct nla_policy ifa_ipv4_policy
[IFA_MAX
+1] = {
96 [IFA_LOCAL
] = { .type
= NLA_U32
},
97 [IFA_ADDRESS
] = { .type
= NLA_U32
},
98 [IFA_BROADCAST
] = { .type
= NLA_U32
},
99 [IFA_LABEL
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
- 1 },
100 [IFA_CACHEINFO
] = { .len
= sizeof(struct ifa_cacheinfo
) },
101 [IFA_FLAGS
] = { .type
= NLA_U32
},
102 [IFA_RT_PRIORITY
] = { .type
= NLA_U32
},
105 #define IN4_ADDR_HSIZE_SHIFT 8
106 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
108 static struct hlist_head inet_addr_lst
[IN4_ADDR_HSIZE
];
110 static u32
inet_addr_hash(const struct net
*net
, __be32 addr
)
112 u32 val
= (__force u32
) addr
^ net_hash_mix(net
);
114 return hash_32(val
, IN4_ADDR_HSIZE_SHIFT
);
117 static void inet_hash_insert(struct net
*net
, struct in_ifaddr
*ifa
)
119 u32 hash
= inet_addr_hash(net
, ifa
->ifa_local
);
122 hlist_add_head_rcu(&ifa
->hash
, &inet_addr_lst
[hash
]);
125 static void inet_hash_remove(struct in_ifaddr
*ifa
)
128 hlist_del_init_rcu(&ifa
->hash
);
132 * __ip_dev_find - find the first device with a given source address.
133 * @net: the net namespace
134 * @addr: the source address
135 * @devref: if true, take a reference on the found device
137 * If a caller uses devref=false, it should be protected by RCU, or RTNL
139 struct net_device
*__ip_dev_find(struct net
*net
, __be32 addr
, bool devref
)
141 struct net_device
*result
= NULL
;
142 struct in_ifaddr
*ifa
;
145 ifa
= inet_lookup_ifaddr_rcu(net
, addr
);
147 struct flowi4 fl4
= { .daddr
= addr
};
148 struct fib_result res
= { 0 };
149 struct fib_table
*local
;
151 /* Fallback to FIB local table so that communication
152 * over loopback subnets work.
154 local
= fib_get_table(net
, RT_TABLE_LOCAL
);
156 !fib_table_lookup(local
, &fl4
, &res
, FIB_LOOKUP_NOREF
) &&
157 res
.type
== RTN_LOCAL
)
158 result
= FIB_RES_DEV(res
);
160 result
= ifa
->ifa_dev
->dev
;
162 if (result
&& devref
)
167 EXPORT_SYMBOL(__ip_dev_find
);
169 /* called under RCU lock */
170 struct in_ifaddr
*inet_lookup_ifaddr_rcu(struct net
*net
, __be32 addr
)
172 u32 hash
= inet_addr_hash(net
, addr
);
173 struct in_ifaddr
*ifa
;
175 hlist_for_each_entry_rcu(ifa
, &inet_addr_lst
[hash
], hash
)
176 if (ifa
->ifa_local
== addr
&&
177 net_eq(dev_net(ifa
->ifa_dev
->dev
), net
))
183 static void rtmsg_ifa(int event
, struct in_ifaddr
*, struct nlmsghdr
*, u32
);
185 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain
);
186 static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain
);
187 static void inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
190 static int devinet_sysctl_register(struct in_device
*idev
);
191 static void devinet_sysctl_unregister(struct in_device
*idev
);
193 static int devinet_sysctl_register(struct in_device
*idev
)
197 static void devinet_sysctl_unregister(struct in_device
*idev
)
202 /* Locks all the inet devices. */
204 static struct in_ifaddr
*inet_alloc_ifa(void)
206 return kzalloc(sizeof(struct in_ifaddr
), GFP_KERNEL
);
209 static void inet_rcu_free_ifa(struct rcu_head
*head
)
211 struct in_ifaddr
*ifa
= container_of(head
, struct in_ifaddr
, rcu_head
);
213 in_dev_put(ifa
->ifa_dev
);
217 static void inet_free_ifa(struct in_ifaddr
*ifa
)
219 call_rcu(&ifa
->rcu_head
, inet_rcu_free_ifa
);
222 void in_dev_finish_destroy(struct in_device
*idev
)
224 struct net_device
*dev
= idev
->dev
;
226 WARN_ON(idev
->ifa_list
);
227 WARN_ON(idev
->mc_list
);
228 kfree(rcu_dereference_protected(idev
->mc_hash
, 1));
229 #ifdef NET_REFCNT_DEBUG
230 pr_debug("%s: %p=%s\n", __func__
, idev
, dev
? dev
->name
: "NIL");
234 pr_err("Freeing alive in_device %p\n", idev
);
238 EXPORT_SYMBOL(in_dev_finish_destroy
);
240 static struct in_device
*inetdev_init(struct net_device
*dev
)
242 struct in_device
*in_dev
;
247 in_dev
= kzalloc(sizeof(*in_dev
), GFP_KERNEL
);
250 memcpy(&in_dev
->cnf
, dev_net(dev
)->ipv4
.devconf_dflt
,
251 sizeof(in_dev
->cnf
));
252 in_dev
->cnf
.sysctl
= NULL
;
254 in_dev
->arp_parms
= neigh_parms_alloc(dev
, &arp_tbl
);
255 if (!in_dev
->arp_parms
)
257 if (IPV4_DEVCONF(in_dev
->cnf
, FORWARDING
))
258 dev_disable_lro(dev
);
259 /* Reference in_dev->dev */
261 /* Account for reference dev->ip_ptr (below) */
262 refcount_set(&in_dev
->refcnt
, 1);
264 err
= devinet_sysctl_register(in_dev
);
271 ip_mc_init_dev(in_dev
);
272 if (dev
->flags
& IFF_UP
)
275 /* we can receive as soon as ip_ptr is set -- do this last */
276 rcu_assign_pointer(dev
->ip_ptr
, in_dev
);
278 return in_dev
?: ERR_PTR(err
);
285 static void in_dev_rcu_put(struct rcu_head
*head
)
287 struct in_device
*idev
= container_of(head
, struct in_device
, rcu_head
);
291 static void inetdev_destroy(struct in_device
*in_dev
)
293 struct in_ifaddr
*ifa
;
294 struct net_device
*dev
;
302 ip_mc_destroy_dev(in_dev
);
304 while ((ifa
= in_dev
->ifa_list
) != NULL
) {
305 inet_del_ifa(in_dev
, &in_dev
->ifa_list
, 0);
309 RCU_INIT_POINTER(dev
->ip_ptr
, NULL
);
311 devinet_sysctl_unregister(in_dev
);
312 neigh_parms_release(&arp_tbl
, in_dev
->arp_parms
);
315 call_rcu(&in_dev
->rcu_head
, in_dev_rcu_put
);
318 int inet_addr_onlink(struct in_device
*in_dev
, __be32 a
, __be32 b
)
321 for_primary_ifa(in_dev
) {
322 if (inet_ifa_match(a
, ifa
)) {
323 if (!b
|| inet_ifa_match(b
, ifa
)) {
328 } endfor_ifa(in_dev
);
333 static void __inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
334 int destroy
, struct nlmsghdr
*nlh
, u32 portid
)
336 struct in_ifaddr
*promote
= NULL
;
337 struct in_ifaddr
*ifa
, *ifa1
= *ifap
;
338 struct in_ifaddr
*last_prim
= in_dev
->ifa_list
;
339 struct in_ifaddr
*prev_prom
= NULL
;
340 int do_promote
= IN_DEV_PROMOTE_SECONDARIES(in_dev
);
347 /* 1. Deleting primary ifaddr forces deletion all secondaries
348 * unless alias promotion is set
351 if (!(ifa1
->ifa_flags
& IFA_F_SECONDARY
)) {
352 struct in_ifaddr
**ifap1
= &ifa1
->ifa_next
;
354 while ((ifa
= *ifap1
) != NULL
) {
355 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
) &&
356 ifa1
->ifa_scope
<= ifa
->ifa_scope
)
359 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
) ||
360 ifa1
->ifa_mask
!= ifa
->ifa_mask
||
361 !inet_ifa_match(ifa1
->ifa_address
, ifa
)) {
362 ifap1
= &ifa
->ifa_next
;
368 inet_hash_remove(ifa
);
369 *ifap1
= ifa
->ifa_next
;
371 rtmsg_ifa(RTM_DELADDR
, ifa
, nlh
, portid
);
372 blocking_notifier_call_chain(&inetaddr_chain
,
382 /* On promotion all secondaries from subnet are changing
383 * the primary IP, we must remove all their routes silently
384 * and later to add them back with new prefsrc. Do this
385 * while all addresses are on the device list.
387 for (ifa
= promote
; ifa
; ifa
= ifa
->ifa_next
) {
388 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
389 inet_ifa_match(ifa1
->ifa_address
, ifa
))
390 fib_del_ifaddr(ifa
, ifa1
);
396 *ifap
= ifa1
->ifa_next
;
397 inet_hash_remove(ifa1
);
399 /* 3. Announce address deletion */
401 /* Send message first, then call notifier.
402 At first sight, FIB update triggered by notifier
403 will refer to already deleted ifaddr, that could confuse
404 netlink listeners. It is not true: look, gated sees
405 that route deleted and if it still thinks that ifaddr
406 is valid, it will try to restore deleted routes... Grr.
407 So that, this order is correct.
409 rtmsg_ifa(RTM_DELADDR
, ifa1
, nlh
, portid
);
410 blocking_notifier_call_chain(&inetaddr_chain
, NETDEV_DOWN
, ifa1
);
413 struct in_ifaddr
*next_sec
= promote
->ifa_next
;
416 prev_prom
->ifa_next
= promote
->ifa_next
;
417 promote
->ifa_next
= last_prim
->ifa_next
;
418 last_prim
->ifa_next
= promote
;
421 promote
->ifa_flags
&= ~IFA_F_SECONDARY
;
422 rtmsg_ifa(RTM_NEWADDR
, promote
, nlh
, portid
);
423 blocking_notifier_call_chain(&inetaddr_chain
,
425 for (ifa
= next_sec
; ifa
; ifa
= ifa
->ifa_next
) {
426 if (ifa1
->ifa_mask
!= ifa
->ifa_mask
||
427 !inet_ifa_match(ifa1
->ifa_address
, ifa
))
437 static void inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
440 __inet_del_ifa(in_dev
, ifap
, destroy
, NULL
, 0);
443 static void check_lifetime(struct work_struct
*work
);
445 static DECLARE_DELAYED_WORK(check_lifetime_work
, check_lifetime
);
447 static int __inet_insert_ifa(struct in_ifaddr
*ifa
, struct nlmsghdr
*nlh
,
448 u32 portid
, struct netlink_ext_ack
*extack
)
450 struct in_device
*in_dev
= ifa
->ifa_dev
;
451 struct in_ifaddr
*ifa1
, **ifap
, **last_primary
;
452 struct in_validator_info ivi
;
457 if (!ifa
->ifa_local
) {
462 ifa
->ifa_flags
&= ~IFA_F_SECONDARY
;
463 last_primary
= &in_dev
->ifa_list
;
465 for (ifap
= &in_dev
->ifa_list
; (ifa1
= *ifap
) != NULL
;
466 ifap
= &ifa1
->ifa_next
) {
467 if (!(ifa1
->ifa_flags
& IFA_F_SECONDARY
) &&
468 ifa
->ifa_scope
<= ifa1
->ifa_scope
)
469 last_primary
= &ifa1
->ifa_next
;
470 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
471 inet_ifa_match(ifa1
->ifa_address
, ifa
)) {
472 if (ifa1
->ifa_local
== ifa
->ifa_local
) {
476 if (ifa1
->ifa_scope
!= ifa
->ifa_scope
) {
480 ifa
->ifa_flags
|= IFA_F_SECONDARY
;
484 /* Allow any devices that wish to register ifaddr validtors to weigh
485 * in now, before changes are committed. The rntl lock is serializing
486 * access here, so the state should not change between a validator call
487 * and a final notify on commit. This isn't invoked on promotion under
488 * the assumption that validators are checking the address itself, and
491 ivi
.ivi_addr
= ifa
->ifa_address
;
492 ivi
.ivi_dev
= ifa
->ifa_dev
;
494 ret
= blocking_notifier_call_chain(&inetaddr_validator_chain
,
496 ret
= notifier_to_errno(ret
);
502 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
)) {
503 prandom_seed((__force u32
) ifa
->ifa_local
);
507 ifa
->ifa_next
= *ifap
;
510 inet_hash_insert(dev_net(in_dev
->dev
), ifa
);
512 cancel_delayed_work(&check_lifetime_work
);
513 queue_delayed_work(system_power_efficient_wq
, &check_lifetime_work
, 0);
515 /* Send message first, then call notifier.
516 Notifier will trigger FIB update, so that
517 listeners of netlink will know about new ifaddr */
518 rtmsg_ifa(RTM_NEWADDR
, ifa
, nlh
, portid
);
519 blocking_notifier_call_chain(&inetaddr_chain
, NETDEV_UP
, ifa
);
524 static int inet_insert_ifa(struct in_ifaddr
*ifa
)
526 return __inet_insert_ifa(ifa
, NULL
, 0, NULL
);
529 static int inet_set_ifa(struct net_device
*dev
, struct in_ifaddr
*ifa
)
531 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
539 ipv4_devconf_setall(in_dev
);
540 neigh_parms_data_state_setall(in_dev
->arp_parms
);
541 if (ifa
->ifa_dev
!= in_dev
) {
542 WARN_ON(ifa
->ifa_dev
);
544 ifa
->ifa_dev
= in_dev
;
546 if (ipv4_is_loopback(ifa
->ifa_local
))
547 ifa
->ifa_scope
= RT_SCOPE_HOST
;
548 return inet_insert_ifa(ifa
);
551 /* Caller must hold RCU or RTNL :
552 * We dont take a reference on found in_device
554 struct in_device
*inetdev_by_index(struct net
*net
, int ifindex
)
556 struct net_device
*dev
;
557 struct in_device
*in_dev
= NULL
;
560 dev
= dev_get_by_index_rcu(net
, ifindex
);
562 in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
566 EXPORT_SYMBOL(inetdev_by_index
);
568 /* Called only from RTNL semaphored context. No locks. */
570 struct in_ifaddr
*inet_ifa_byprefix(struct in_device
*in_dev
, __be32 prefix
,
575 for_primary_ifa(in_dev
) {
576 if (ifa
->ifa_mask
== mask
&& inet_ifa_match(prefix
, ifa
))
578 } endfor_ifa(in_dev
);
582 static int ip_mc_config(struct sock
*sk
, bool join
, const struct in_ifaddr
*ifa
)
584 struct ip_mreqn mreq
= {
585 .imr_multiaddr
.s_addr
= ifa
->ifa_address
,
586 .imr_ifindex
= ifa
->ifa_dev
->dev
->ifindex
,
594 ret
= ip_mc_join_group(sk
, &mreq
);
596 ret
= ip_mc_leave_group(sk
, &mreq
);
602 static int inet_rtm_deladdr(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
603 struct netlink_ext_ack
*extack
)
605 struct net
*net
= sock_net(skb
->sk
);
606 struct nlattr
*tb
[IFA_MAX
+1];
607 struct in_device
*in_dev
;
608 struct ifaddrmsg
*ifm
;
609 struct in_ifaddr
*ifa
, **ifap
;
614 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFA_MAX
, ifa_ipv4_policy
,
619 ifm
= nlmsg_data(nlh
);
620 in_dev
= inetdev_by_index(net
, ifm
->ifa_index
);
626 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
627 ifap
= &ifa
->ifa_next
) {
629 ifa
->ifa_local
!= nla_get_in_addr(tb
[IFA_LOCAL
]))
632 if (tb
[IFA_LABEL
] && nla_strcmp(tb
[IFA_LABEL
], ifa
->ifa_label
))
635 if (tb
[IFA_ADDRESS
] &&
636 (ifm
->ifa_prefixlen
!= ifa
->ifa_prefixlen
||
637 !inet_ifa_match(nla_get_in_addr(tb
[IFA_ADDRESS
]), ifa
)))
640 if (ipv4_is_multicast(ifa
->ifa_address
))
641 ip_mc_config(net
->ipv4
.mc_autojoin_sk
, false, ifa
);
642 __inet_del_ifa(in_dev
, ifap
, 1, nlh
, NETLINK_CB(skb
).portid
);
646 err
= -EADDRNOTAVAIL
;
651 #define INFINITY_LIFE_TIME 0xFFFFFFFF
653 static void check_lifetime(struct work_struct
*work
)
655 unsigned long now
, next
, next_sec
, next_sched
;
656 struct in_ifaddr
*ifa
;
657 struct hlist_node
*n
;
661 next
= round_jiffies_up(now
+ ADDR_CHECK_FREQUENCY
);
663 for (i
= 0; i
< IN4_ADDR_HSIZE
; i
++) {
664 bool change_needed
= false;
667 hlist_for_each_entry_rcu(ifa
, &inet_addr_lst
[i
], hash
) {
670 if (ifa
->ifa_flags
& IFA_F_PERMANENT
)
673 /* We try to batch several events at once. */
674 age
= (now
- ifa
->ifa_tstamp
+
675 ADDRCONF_TIMER_FUZZ_MINUS
) / HZ
;
677 if (ifa
->ifa_valid_lft
!= INFINITY_LIFE_TIME
&&
678 age
>= ifa
->ifa_valid_lft
) {
679 change_needed
= true;
680 } else if (ifa
->ifa_preferred_lft
==
681 INFINITY_LIFE_TIME
) {
683 } else if (age
>= ifa
->ifa_preferred_lft
) {
684 if (time_before(ifa
->ifa_tstamp
+
685 ifa
->ifa_valid_lft
* HZ
, next
))
686 next
= ifa
->ifa_tstamp
+
687 ifa
->ifa_valid_lft
* HZ
;
689 if (!(ifa
->ifa_flags
& IFA_F_DEPRECATED
))
690 change_needed
= true;
691 } else if (time_before(ifa
->ifa_tstamp
+
692 ifa
->ifa_preferred_lft
* HZ
,
694 next
= ifa
->ifa_tstamp
+
695 ifa
->ifa_preferred_lft
* HZ
;
702 hlist_for_each_entry_safe(ifa
, n
, &inet_addr_lst
[i
], hash
) {
705 if (ifa
->ifa_flags
& IFA_F_PERMANENT
)
708 /* We try to batch several events at once. */
709 age
= (now
- ifa
->ifa_tstamp
+
710 ADDRCONF_TIMER_FUZZ_MINUS
) / HZ
;
712 if (ifa
->ifa_valid_lft
!= INFINITY_LIFE_TIME
&&
713 age
>= ifa
->ifa_valid_lft
) {
714 struct in_ifaddr
**ifap
;
716 for (ifap
= &ifa
->ifa_dev
->ifa_list
;
717 *ifap
!= NULL
; ifap
= &(*ifap
)->ifa_next
) {
719 inet_del_ifa(ifa
->ifa_dev
,
724 } else if (ifa
->ifa_preferred_lft
!=
725 INFINITY_LIFE_TIME
&&
726 age
>= ifa
->ifa_preferred_lft
&&
727 !(ifa
->ifa_flags
& IFA_F_DEPRECATED
)) {
728 ifa
->ifa_flags
|= IFA_F_DEPRECATED
;
729 rtmsg_ifa(RTM_NEWADDR
, ifa
, NULL
, 0);
735 next_sec
= round_jiffies_up(next
);
738 /* If rounded timeout is accurate enough, accept it. */
739 if (time_before(next_sec
, next
+ ADDRCONF_TIMER_FUZZ
))
740 next_sched
= next_sec
;
743 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
744 if (time_before(next_sched
, now
+ ADDRCONF_TIMER_FUZZ_MAX
))
745 next_sched
= now
+ ADDRCONF_TIMER_FUZZ_MAX
;
747 queue_delayed_work(system_power_efficient_wq
, &check_lifetime_work
,
751 static void set_ifa_lifetime(struct in_ifaddr
*ifa
, __u32 valid_lft
,
754 unsigned long timeout
;
756 ifa
->ifa_flags
&= ~(IFA_F_PERMANENT
| IFA_F_DEPRECATED
);
758 timeout
= addrconf_timeout_fixup(valid_lft
, HZ
);
759 if (addrconf_finite_timeout(timeout
))
760 ifa
->ifa_valid_lft
= timeout
;
762 ifa
->ifa_flags
|= IFA_F_PERMANENT
;
764 timeout
= addrconf_timeout_fixup(prefered_lft
, HZ
);
765 if (addrconf_finite_timeout(timeout
)) {
767 ifa
->ifa_flags
|= IFA_F_DEPRECATED
;
768 ifa
->ifa_preferred_lft
= timeout
;
770 ifa
->ifa_tstamp
= jiffies
;
771 if (!ifa
->ifa_cstamp
)
772 ifa
->ifa_cstamp
= ifa
->ifa_tstamp
;
775 static struct in_ifaddr
*rtm_to_ifaddr(struct net
*net
, struct nlmsghdr
*nlh
,
776 __u32
*pvalid_lft
, __u32
*pprefered_lft
)
778 struct nlattr
*tb
[IFA_MAX
+1];
779 struct in_ifaddr
*ifa
;
780 struct ifaddrmsg
*ifm
;
781 struct net_device
*dev
;
782 struct in_device
*in_dev
;
785 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFA_MAX
, ifa_ipv4_policy
,
790 ifm
= nlmsg_data(nlh
);
792 if (ifm
->ifa_prefixlen
> 32 || !tb
[IFA_LOCAL
])
795 dev
= __dev_get_by_index(net
, ifm
->ifa_index
);
800 in_dev
= __in_dev_get_rtnl(dev
);
805 ifa
= inet_alloc_ifa();
808 * A potential indev allocation can be left alive, it stays
809 * assigned to its device and is destroy with it.
813 ipv4_devconf_setall(in_dev
);
814 neigh_parms_data_state_setall(in_dev
->arp_parms
);
817 if (!tb
[IFA_ADDRESS
])
818 tb
[IFA_ADDRESS
] = tb
[IFA_LOCAL
];
820 INIT_HLIST_NODE(&ifa
->hash
);
821 ifa
->ifa_prefixlen
= ifm
->ifa_prefixlen
;
822 ifa
->ifa_mask
= inet_make_mask(ifm
->ifa_prefixlen
);
823 ifa
->ifa_flags
= tb
[IFA_FLAGS
] ? nla_get_u32(tb
[IFA_FLAGS
]) :
825 ifa
->ifa_scope
= ifm
->ifa_scope
;
826 ifa
->ifa_dev
= in_dev
;
828 ifa
->ifa_local
= nla_get_in_addr(tb
[IFA_LOCAL
]);
829 ifa
->ifa_address
= nla_get_in_addr(tb
[IFA_ADDRESS
]);
831 if (tb
[IFA_BROADCAST
])
832 ifa
->ifa_broadcast
= nla_get_in_addr(tb
[IFA_BROADCAST
]);
835 nla_strlcpy(ifa
->ifa_label
, tb
[IFA_LABEL
], IFNAMSIZ
);
837 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
839 if (tb
[IFA_RT_PRIORITY
])
840 ifa
->ifa_rt_priority
= nla_get_u32(tb
[IFA_RT_PRIORITY
]);
842 if (tb
[IFA_CACHEINFO
]) {
843 struct ifa_cacheinfo
*ci
;
845 ci
= nla_data(tb
[IFA_CACHEINFO
]);
846 if (!ci
->ifa_valid
|| ci
->ifa_prefered
> ci
->ifa_valid
) {
850 *pvalid_lft
= ci
->ifa_valid
;
851 *pprefered_lft
= ci
->ifa_prefered
;
862 static struct in_ifaddr
*find_matching_ifa(struct in_ifaddr
*ifa
)
864 struct in_device
*in_dev
= ifa
->ifa_dev
;
865 struct in_ifaddr
*ifa1
, **ifap
;
870 for (ifap
= &in_dev
->ifa_list
; (ifa1
= *ifap
) != NULL
;
871 ifap
= &ifa1
->ifa_next
) {
872 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
873 inet_ifa_match(ifa1
->ifa_address
, ifa
) &&
874 ifa1
->ifa_local
== ifa
->ifa_local
)
880 static int inet_rtm_newaddr(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
881 struct netlink_ext_ack
*extack
)
883 struct net
*net
= sock_net(skb
->sk
);
884 struct in_ifaddr
*ifa
;
885 struct in_ifaddr
*ifa_existing
;
886 __u32 valid_lft
= INFINITY_LIFE_TIME
;
887 __u32 prefered_lft
= INFINITY_LIFE_TIME
;
891 ifa
= rtm_to_ifaddr(net
, nlh
, &valid_lft
, &prefered_lft
);
895 ifa_existing
= find_matching_ifa(ifa
);
897 /* It would be best to check for !NLM_F_CREATE here but
898 * userspace already relies on not having to provide this.
900 set_ifa_lifetime(ifa
, valid_lft
, prefered_lft
);
901 if (ifa
->ifa_flags
& IFA_F_MCAUTOJOIN
) {
902 int ret
= ip_mc_config(net
->ipv4
.mc_autojoin_sk
,
910 return __inet_insert_ifa(ifa
, nlh
, NETLINK_CB(skb
).portid
,
913 u32 new_metric
= ifa
->ifa_rt_priority
;
917 if (nlh
->nlmsg_flags
& NLM_F_EXCL
||
918 !(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
922 if (ifa
->ifa_rt_priority
!= new_metric
) {
923 fib_modify_prefix_metric(ifa
, new_metric
);
924 ifa
->ifa_rt_priority
= new_metric
;
927 set_ifa_lifetime(ifa
, valid_lft
, prefered_lft
);
928 cancel_delayed_work(&check_lifetime_work
);
929 queue_delayed_work(system_power_efficient_wq
,
930 &check_lifetime_work
, 0);
931 rtmsg_ifa(RTM_NEWADDR
, ifa
, nlh
, NETLINK_CB(skb
).portid
);
937 * Determine a default network mask, based on the IP address.
940 static int inet_abc_len(__be32 addr
)
942 int rc
= -1; /* Something else, probably a multicast. */
944 if (ipv4_is_zeronet(addr
))
947 __u32 haddr
= ntohl(addr
);
949 if (IN_CLASSA(haddr
))
951 else if (IN_CLASSB(haddr
))
953 else if (IN_CLASSC(haddr
))
961 int devinet_ioctl(struct net
*net
, unsigned int cmd
, struct ifreq
*ifr
)
963 struct sockaddr_in sin_orig
;
964 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&ifr
->ifr_addr
;
965 struct in_device
*in_dev
;
966 struct in_ifaddr
**ifap
= NULL
;
967 struct in_ifaddr
*ifa
= NULL
;
968 struct net_device
*dev
;
971 int tryaddrmatch
= 0;
973 ifr
->ifr_name
[IFNAMSIZ
- 1] = 0;
975 /* save original address for comparison */
976 memcpy(&sin_orig
, sin
, sizeof(*sin
));
978 colon
= strchr(ifr
->ifr_name
, ':');
982 dev_load(net
, ifr
->ifr_name
);
985 case SIOCGIFADDR
: /* Get interface address */
986 case SIOCGIFBRDADDR
: /* Get the broadcast address */
987 case SIOCGIFDSTADDR
: /* Get the destination address */
988 case SIOCGIFNETMASK
: /* Get the netmask for the interface */
989 /* Note that these ioctls will not sleep,
990 so that we do not impose a lock.
991 One day we will be forced to put shlock here (I mean SMP)
993 tryaddrmatch
= (sin_orig
.sin_family
== AF_INET
);
994 memset(sin
, 0, sizeof(*sin
));
995 sin
->sin_family
= AF_INET
;
1000 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1003 case SIOCSIFADDR
: /* Set interface address (and family) */
1004 case SIOCSIFBRDADDR
: /* Set the broadcast address */
1005 case SIOCSIFDSTADDR
: /* Set the destination address */
1006 case SIOCSIFNETMASK
: /* Set the netmask for the interface */
1008 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1011 if (sin
->sin_family
!= AF_INET
)
1022 dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
1029 in_dev
= __in_dev_get_rtnl(dev
);
1032 /* Matthias Andree */
1033 /* compare label and address (4.4BSD style) */
1034 /* note: we only do this for a limited set of ioctls
1035 and only if the original address family was AF_INET.
1036 This is checked above. */
1037 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
1038 ifap
= &ifa
->ifa_next
) {
1039 if (!strcmp(ifr
->ifr_name
, ifa
->ifa_label
) &&
1040 sin_orig
.sin_addr
.s_addr
==
1046 /* we didn't get a match, maybe the application is
1047 4.3BSD-style and passed in junk so we fall back to
1048 comparing just the label */
1050 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
1051 ifap
= &ifa
->ifa_next
)
1052 if (!strcmp(ifr
->ifr_name
, ifa
->ifa_label
))
1057 ret
= -EADDRNOTAVAIL
;
1058 if (!ifa
&& cmd
!= SIOCSIFADDR
&& cmd
!= SIOCSIFFLAGS
)
1062 case SIOCGIFADDR
: /* Get interface address */
1064 sin
->sin_addr
.s_addr
= ifa
->ifa_local
;
1067 case SIOCGIFBRDADDR
: /* Get the broadcast address */
1069 sin
->sin_addr
.s_addr
= ifa
->ifa_broadcast
;
1072 case SIOCGIFDSTADDR
: /* Get the destination address */
1074 sin
->sin_addr
.s_addr
= ifa
->ifa_address
;
1077 case SIOCGIFNETMASK
: /* Get the netmask for the interface */
1079 sin
->sin_addr
.s_addr
= ifa
->ifa_mask
;
1084 ret
= -EADDRNOTAVAIL
;
1088 if (!(ifr
->ifr_flags
& IFF_UP
))
1089 inet_del_ifa(in_dev
, ifap
, 1);
1092 ret
= dev_change_flags(dev
, ifr
->ifr_flags
);
1095 case SIOCSIFADDR
: /* Set interface address (and family) */
1097 if (inet_abc_len(sin
->sin_addr
.s_addr
) < 0)
1102 ifa
= inet_alloc_ifa();
1105 INIT_HLIST_NODE(&ifa
->hash
);
1107 memcpy(ifa
->ifa_label
, ifr
->ifr_name
, IFNAMSIZ
);
1109 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1112 if (ifa
->ifa_local
== sin
->sin_addr
.s_addr
)
1114 inet_del_ifa(in_dev
, ifap
, 0);
1115 ifa
->ifa_broadcast
= 0;
1119 ifa
->ifa_address
= ifa
->ifa_local
= sin
->sin_addr
.s_addr
;
1121 if (!(dev
->flags
& IFF_POINTOPOINT
)) {
1122 ifa
->ifa_prefixlen
= inet_abc_len(ifa
->ifa_address
);
1123 ifa
->ifa_mask
= inet_make_mask(ifa
->ifa_prefixlen
);
1124 if ((dev
->flags
& IFF_BROADCAST
) &&
1125 ifa
->ifa_prefixlen
< 31)
1126 ifa
->ifa_broadcast
= ifa
->ifa_address
|
1129 ifa
->ifa_prefixlen
= 32;
1130 ifa
->ifa_mask
= inet_make_mask(32);
1132 set_ifa_lifetime(ifa
, INFINITY_LIFE_TIME
, INFINITY_LIFE_TIME
);
1133 ret
= inet_set_ifa(dev
, ifa
);
1136 case SIOCSIFBRDADDR
: /* Set the broadcast address */
1138 if (ifa
->ifa_broadcast
!= sin
->sin_addr
.s_addr
) {
1139 inet_del_ifa(in_dev
, ifap
, 0);
1140 ifa
->ifa_broadcast
= sin
->sin_addr
.s_addr
;
1141 inet_insert_ifa(ifa
);
1145 case SIOCSIFDSTADDR
: /* Set the destination address */
1147 if (ifa
->ifa_address
== sin
->sin_addr
.s_addr
)
1150 if (inet_abc_len(sin
->sin_addr
.s_addr
) < 0)
1153 inet_del_ifa(in_dev
, ifap
, 0);
1154 ifa
->ifa_address
= sin
->sin_addr
.s_addr
;
1155 inet_insert_ifa(ifa
);
1158 case SIOCSIFNETMASK
: /* Set the netmask for the interface */
1161 * The mask we set must be legal.
1164 if (bad_mask(sin
->sin_addr
.s_addr
, 0))
1167 if (ifa
->ifa_mask
!= sin
->sin_addr
.s_addr
) {
1168 __be32 old_mask
= ifa
->ifa_mask
;
1169 inet_del_ifa(in_dev
, ifap
, 0);
1170 ifa
->ifa_mask
= sin
->sin_addr
.s_addr
;
1171 ifa
->ifa_prefixlen
= inet_mask_len(ifa
->ifa_mask
);
1173 /* See if current broadcast address matches
1174 * with current netmask, then recalculate
1175 * the broadcast address. Otherwise it's a
1176 * funny address, so don't touch it since
1177 * the user seems to know what (s)he's doing...
1179 if ((dev
->flags
& IFF_BROADCAST
) &&
1180 (ifa
->ifa_prefixlen
< 31) &&
1181 (ifa
->ifa_broadcast
==
1182 (ifa
->ifa_local
|~old_mask
))) {
1183 ifa
->ifa_broadcast
= (ifa
->ifa_local
|
1184 ~sin
->sin_addr
.s_addr
);
1186 inet_insert_ifa(ifa
);
1196 static int inet_gifconf(struct net_device
*dev
, char __user
*buf
, int len
, int size
)
1198 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1199 struct in_ifaddr
*ifa
;
1203 if (WARN_ON(size
> sizeof(struct ifreq
)))
1209 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
1216 memset(&ifr
, 0, sizeof(struct ifreq
));
1217 strcpy(ifr
.ifr_name
, ifa
->ifa_label
);
1219 (*(struct sockaddr_in
*)&ifr
.ifr_addr
).sin_family
= AF_INET
;
1220 (*(struct sockaddr_in
*)&ifr
.ifr_addr
).sin_addr
.s_addr
=
1223 if (copy_to_user(buf
+ done
, &ifr
, size
)) {
1234 static __be32
in_dev_select_addr(const struct in_device
*in_dev
,
1237 for_primary_ifa(in_dev
) {
1238 if (ifa
->ifa_scope
!= RT_SCOPE_LINK
&&
1239 ifa
->ifa_scope
<= scope
)
1240 return ifa
->ifa_local
;
1241 } endfor_ifa(in_dev
);
1246 __be32
inet_select_addr(const struct net_device
*dev
, __be32 dst
, int scope
)
1249 struct in_device
*in_dev
;
1250 struct net
*net
= dev_net(dev
);
1254 in_dev
= __in_dev_get_rcu(dev
);
1258 for_primary_ifa(in_dev
) {
1259 if (ifa
->ifa_scope
> scope
)
1261 if (!dst
|| inet_ifa_match(dst
, ifa
)) {
1262 addr
= ifa
->ifa_local
;
1266 addr
= ifa
->ifa_local
;
1267 } endfor_ifa(in_dev
);
1272 master_idx
= l3mdev_master_ifindex_rcu(dev
);
1274 /* For VRFs, the VRF device takes the place of the loopback device,
1275 * with addresses on it being preferred. Note in such cases the
1276 * loopback device will be among the devices that fail the master_idx
1277 * equality check in the loop below.
1280 (dev
= dev_get_by_index_rcu(net
, master_idx
)) &&
1281 (in_dev
= __in_dev_get_rcu(dev
))) {
1282 addr
= in_dev_select_addr(in_dev
, scope
);
1287 /* Not loopback addresses on loopback should be preferred
1288 in this case. It is important that lo is the first interface
1291 for_each_netdev_rcu(net
, dev
) {
1292 if (l3mdev_master_ifindex_rcu(dev
) != master_idx
)
1295 in_dev
= __in_dev_get_rcu(dev
);
1299 addr
= in_dev_select_addr(in_dev
, scope
);
1307 EXPORT_SYMBOL(inet_select_addr
);
1309 static __be32
confirm_addr_indev(struct in_device
*in_dev
, __be32 dst
,
1310 __be32 local
, int scope
)
1317 (local
== ifa
->ifa_local
|| !local
) &&
1318 ifa
->ifa_scope
<= scope
) {
1319 addr
= ifa
->ifa_local
;
1324 same
= (!local
|| inet_ifa_match(local
, ifa
)) &&
1325 (!dst
|| inet_ifa_match(dst
, ifa
));
1329 /* Is the selected addr into dst subnet? */
1330 if (inet_ifa_match(addr
, ifa
))
1332 /* No, then can we use new local src? */
1333 if (ifa
->ifa_scope
<= scope
) {
1334 addr
= ifa
->ifa_local
;
1337 /* search for large dst subnet for addr */
1341 } endfor_ifa(in_dev
);
1343 return same
? addr
: 0;
1347 * Confirm that local IP address exists using wildcards:
1348 * - net: netns to check, cannot be NULL
1349 * - in_dev: only on this interface, NULL=any interface
1350 * - dst: only in the same subnet as dst, 0=any dst
1351 * - local: address, 0=autoselect the local address
1352 * - scope: maximum allowed scope value for the local address
1354 __be32
inet_confirm_addr(struct net
*net
, struct in_device
*in_dev
,
1355 __be32 dst
, __be32 local
, int scope
)
1358 struct net_device
*dev
;
1361 return confirm_addr_indev(in_dev
, dst
, local
, scope
);
1364 for_each_netdev_rcu(net
, dev
) {
1365 in_dev
= __in_dev_get_rcu(dev
);
1367 addr
= confirm_addr_indev(in_dev
, dst
, local
, scope
);
1376 EXPORT_SYMBOL(inet_confirm_addr
);
1382 int register_inetaddr_notifier(struct notifier_block
*nb
)
1384 return blocking_notifier_chain_register(&inetaddr_chain
, nb
);
1386 EXPORT_SYMBOL(register_inetaddr_notifier
);
1388 int unregister_inetaddr_notifier(struct notifier_block
*nb
)
1390 return blocking_notifier_chain_unregister(&inetaddr_chain
, nb
);
1392 EXPORT_SYMBOL(unregister_inetaddr_notifier
);
1394 int register_inetaddr_validator_notifier(struct notifier_block
*nb
)
1396 return blocking_notifier_chain_register(&inetaddr_validator_chain
, nb
);
1398 EXPORT_SYMBOL(register_inetaddr_validator_notifier
);
1400 int unregister_inetaddr_validator_notifier(struct notifier_block
*nb
)
1402 return blocking_notifier_chain_unregister(&inetaddr_validator_chain
,
1405 EXPORT_SYMBOL(unregister_inetaddr_validator_notifier
);
1407 /* Rename ifa_labels for a device name change. Make some effort to preserve
1408 * existing alias numbering and to create unique labels if possible.
1410 static void inetdev_changename(struct net_device
*dev
, struct in_device
*in_dev
)
1412 struct in_ifaddr
*ifa
;
1415 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
1416 char old
[IFNAMSIZ
], *dot
;
1418 memcpy(old
, ifa
->ifa_label
, IFNAMSIZ
);
1419 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1422 dot
= strchr(old
, ':');
1424 sprintf(old
, ":%d", named
);
1427 if (strlen(dot
) + strlen(dev
->name
) < IFNAMSIZ
)
1428 strcat(ifa
->ifa_label
, dot
);
1430 strcpy(ifa
->ifa_label
+ (IFNAMSIZ
- strlen(dot
) - 1), dot
);
1432 rtmsg_ifa(RTM_NEWADDR
, ifa
, NULL
, 0);
1436 static bool inetdev_valid_mtu(unsigned int mtu
)
1438 return mtu
>= IPV4_MIN_MTU
;
1441 static void inetdev_send_gratuitous_arp(struct net_device
*dev
,
1442 struct in_device
*in_dev
)
1445 struct in_ifaddr
*ifa
;
1447 for (ifa
= in_dev
->ifa_list
; ifa
;
1448 ifa
= ifa
->ifa_next
) {
1449 arp_send(ARPOP_REQUEST
, ETH_P_ARP
,
1450 ifa
->ifa_local
, dev
,
1451 ifa
->ifa_local
, NULL
,
1452 dev
->dev_addr
, NULL
);
1456 /* Called only under RTNL semaphore */
1458 static int inetdev_event(struct notifier_block
*this, unsigned long event
,
1461 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1462 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1467 if (event
== NETDEV_REGISTER
) {
1468 in_dev
= inetdev_init(dev
);
1470 return notifier_from_errno(PTR_ERR(in_dev
));
1471 if (dev
->flags
& IFF_LOOPBACK
) {
1472 IN_DEV_CONF_SET(in_dev
, NOXFRM
, 1);
1473 IN_DEV_CONF_SET(in_dev
, NOPOLICY
, 1);
1475 } else if (event
== NETDEV_CHANGEMTU
) {
1476 /* Re-enabling IP */
1477 if (inetdev_valid_mtu(dev
->mtu
))
1478 in_dev
= inetdev_init(dev
);
1484 case NETDEV_REGISTER
:
1485 pr_debug("%s: bug\n", __func__
);
1486 RCU_INIT_POINTER(dev
->ip_ptr
, NULL
);
1489 if (!inetdev_valid_mtu(dev
->mtu
))
1491 if (dev
->flags
& IFF_LOOPBACK
) {
1492 struct in_ifaddr
*ifa
= inet_alloc_ifa();
1495 INIT_HLIST_NODE(&ifa
->hash
);
1497 ifa
->ifa_address
= htonl(INADDR_LOOPBACK
);
1498 ifa
->ifa_prefixlen
= 8;
1499 ifa
->ifa_mask
= inet_make_mask(8);
1500 in_dev_hold(in_dev
);
1501 ifa
->ifa_dev
= in_dev
;
1502 ifa
->ifa_scope
= RT_SCOPE_HOST
;
1503 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1504 set_ifa_lifetime(ifa
, INFINITY_LIFE_TIME
,
1505 INFINITY_LIFE_TIME
);
1506 ipv4_devconf_setall(in_dev
);
1507 neigh_parms_data_state_setall(in_dev
->arp_parms
);
1508 inet_insert_ifa(ifa
);
1513 case NETDEV_CHANGEADDR
:
1514 if (!IN_DEV_ARP_NOTIFY(in_dev
))
1517 case NETDEV_NOTIFY_PEERS
:
1518 /* Send gratuitous ARP to notify of link change */
1519 inetdev_send_gratuitous_arp(dev
, in_dev
);
1524 case NETDEV_PRE_TYPE_CHANGE
:
1525 ip_mc_unmap(in_dev
);
1527 case NETDEV_POST_TYPE_CHANGE
:
1528 ip_mc_remap(in_dev
);
1530 case NETDEV_CHANGEMTU
:
1531 if (inetdev_valid_mtu(dev
->mtu
))
1533 /* disable IP when MTU is not enough */
1535 case NETDEV_UNREGISTER
:
1536 inetdev_destroy(in_dev
);
1538 case NETDEV_CHANGENAME
:
1539 /* Do not notify about label change, this event is
1540 * not interesting to applications using netlink.
1542 inetdev_changename(dev
, in_dev
);
1544 devinet_sysctl_unregister(in_dev
);
1545 devinet_sysctl_register(in_dev
);
1552 static struct notifier_block ip_netdev_notifier
= {
1553 .notifier_call
= inetdev_event
,
1556 static size_t inet_nlmsg_size(void)
1558 return NLMSG_ALIGN(sizeof(struct ifaddrmsg
))
1559 + nla_total_size(4) /* IFA_ADDRESS */
1560 + nla_total_size(4) /* IFA_LOCAL */
1561 + nla_total_size(4) /* IFA_BROADCAST */
1562 + nla_total_size(IFNAMSIZ
) /* IFA_LABEL */
1563 + nla_total_size(4) /* IFA_FLAGS */
1564 + nla_total_size(4) /* IFA_RT_PRIORITY */
1565 + nla_total_size(sizeof(struct ifa_cacheinfo
)); /* IFA_CACHEINFO */
1568 static inline u32
cstamp_delta(unsigned long cstamp
)
1570 return (cstamp
- INITIAL_JIFFIES
) * 100UL / HZ
;
1573 static int put_cacheinfo(struct sk_buff
*skb
, unsigned long cstamp
,
1574 unsigned long tstamp
, u32 preferred
, u32 valid
)
1576 struct ifa_cacheinfo ci
;
1578 ci
.cstamp
= cstamp_delta(cstamp
);
1579 ci
.tstamp
= cstamp_delta(tstamp
);
1580 ci
.ifa_prefered
= preferred
;
1581 ci
.ifa_valid
= valid
;
1583 return nla_put(skb
, IFA_CACHEINFO
, sizeof(ci
), &ci
);
1586 static int inet_fill_ifaddr(struct sk_buff
*skb
, struct in_ifaddr
*ifa
,
1587 u32 portid
, u32 seq
, int event
, unsigned int flags
)
1589 struct ifaddrmsg
*ifm
;
1590 struct nlmsghdr
*nlh
;
1591 u32 preferred
, valid
;
1593 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*ifm
), flags
);
1597 ifm
= nlmsg_data(nlh
);
1598 ifm
->ifa_family
= AF_INET
;
1599 ifm
->ifa_prefixlen
= ifa
->ifa_prefixlen
;
1600 ifm
->ifa_flags
= ifa
->ifa_flags
;
1601 ifm
->ifa_scope
= ifa
->ifa_scope
;
1602 ifm
->ifa_index
= ifa
->ifa_dev
->dev
->ifindex
;
1604 if (!(ifm
->ifa_flags
& IFA_F_PERMANENT
)) {
1605 preferred
= ifa
->ifa_preferred_lft
;
1606 valid
= ifa
->ifa_valid_lft
;
1607 if (preferred
!= INFINITY_LIFE_TIME
) {
1608 long tval
= (jiffies
- ifa
->ifa_tstamp
) / HZ
;
1610 if (preferred
> tval
)
1614 if (valid
!= INFINITY_LIFE_TIME
) {
1622 preferred
= INFINITY_LIFE_TIME
;
1623 valid
= INFINITY_LIFE_TIME
;
1625 if ((ifa
->ifa_address
&&
1626 nla_put_in_addr(skb
, IFA_ADDRESS
, ifa
->ifa_address
)) ||
1628 nla_put_in_addr(skb
, IFA_LOCAL
, ifa
->ifa_local
)) ||
1629 (ifa
->ifa_broadcast
&&
1630 nla_put_in_addr(skb
, IFA_BROADCAST
, ifa
->ifa_broadcast
)) ||
1631 (ifa
->ifa_label
[0] &&
1632 nla_put_string(skb
, IFA_LABEL
, ifa
->ifa_label
)) ||
1633 nla_put_u32(skb
, IFA_FLAGS
, ifa
->ifa_flags
) ||
1634 (ifa
->ifa_rt_priority
&&
1635 nla_put_u32(skb
, IFA_RT_PRIORITY
, ifa
->ifa_rt_priority
)) ||
1636 put_cacheinfo(skb
, ifa
->ifa_cstamp
, ifa
->ifa_tstamp
,
1638 goto nla_put_failure
;
1640 nlmsg_end(skb
, nlh
);
1644 nlmsg_cancel(skb
, nlh
);
1648 static int inet_dump_ifaddr(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1650 struct net
*net
= sock_net(skb
->sk
);
1653 int ip_idx
, s_ip_idx
;
1654 struct net_device
*dev
;
1655 struct in_device
*in_dev
;
1656 struct in_ifaddr
*ifa
;
1657 struct hlist_head
*head
;
1660 s_idx
= idx
= cb
->args
[1];
1661 s_ip_idx
= ip_idx
= cb
->args
[2];
1663 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1665 head
= &net
->dev_index_head
[h
];
1667 cb
->seq
= atomic_read(&net
->ipv4
.dev_addr_genid
) ^
1669 hlist_for_each_entry_rcu(dev
, head
, index_hlist
) {
1672 if (h
> s_h
|| idx
> s_idx
)
1674 in_dev
= __in_dev_get_rcu(dev
);
1678 for (ifa
= in_dev
->ifa_list
, ip_idx
= 0; ifa
;
1679 ifa
= ifa
->ifa_next
, ip_idx
++) {
1680 if (ip_idx
< s_ip_idx
)
1682 if (inet_fill_ifaddr(skb
, ifa
,
1683 NETLINK_CB(cb
->skb
).portid
,
1685 RTM_NEWADDR
, NLM_F_MULTI
) < 0) {
1689 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1700 cb
->args
[2] = ip_idx
;
1705 static void rtmsg_ifa(int event
, struct in_ifaddr
*ifa
, struct nlmsghdr
*nlh
,
1708 struct sk_buff
*skb
;
1709 u32 seq
= nlh
? nlh
->nlmsg_seq
: 0;
1713 net
= dev_net(ifa
->ifa_dev
->dev
);
1714 skb
= nlmsg_new(inet_nlmsg_size(), GFP_KERNEL
);
1718 err
= inet_fill_ifaddr(skb
, ifa
, portid
, seq
, event
, 0);
1720 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1721 WARN_ON(err
== -EMSGSIZE
);
1725 rtnl_notify(skb
, net
, portid
, RTNLGRP_IPV4_IFADDR
, nlh
, GFP_KERNEL
);
1729 rtnl_set_sk_err(net
, RTNLGRP_IPV4_IFADDR
, err
);
1732 static size_t inet_get_link_af_size(const struct net_device
*dev
,
1733 u32 ext_filter_mask
)
1735 struct in_device
*in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
1740 return nla_total_size(IPV4_DEVCONF_MAX
* 4); /* IFLA_INET_CONF */
1743 static int inet_fill_link_af(struct sk_buff
*skb
, const struct net_device
*dev
,
1744 u32 ext_filter_mask
)
1746 struct in_device
*in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
1753 nla
= nla_reserve(skb
, IFLA_INET_CONF
, IPV4_DEVCONF_MAX
* 4);
1757 for (i
= 0; i
< IPV4_DEVCONF_MAX
; i
++)
1758 ((u32
*) nla_data(nla
))[i
] = in_dev
->cnf
.data
[i
];
1763 static const struct nla_policy inet_af_policy
[IFLA_INET_MAX
+1] = {
1764 [IFLA_INET_CONF
] = { .type
= NLA_NESTED
},
1767 static int inet_validate_link_af(const struct net_device
*dev
,
1768 const struct nlattr
*nla
)
1770 struct nlattr
*a
, *tb
[IFLA_INET_MAX
+1];
1773 if (dev
&& !__in_dev_get_rcu(dev
))
1774 return -EAFNOSUPPORT
;
1776 err
= nla_parse_nested(tb
, IFLA_INET_MAX
, nla
, inet_af_policy
, NULL
);
1780 if (tb
[IFLA_INET_CONF
]) {
1781 nla_for_each_nested(a
, tb
[IFLA_INET_CONF
], rem
) {
1782 int cfgid
= nla_type(a
);
1787 if (cfgid
<= 0 || cfgid
> IPV4_DEVCONF_MAX
)
1795 static int inet_set_link_af(struct net_device
*dev
, const struct nlattr
*nla
)
1797 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
1798 struct nlattr
*a
, *tb
[IFLA_INET_MAX
+1];
1802 return -EAFNOSUPPORT
;
1804 if (nla_parse_nested(tb
, IFLA_INET_MAX
, nla
, NULL
, NULL
) < 0)
1807 if (tb
[IFLA_INET_CONF
]) {
1808 nla_for_each_nested(a
, tb
[IFLA_INET_CONF
], rem
)
1809 ipv4_devconf_set(in_dev
, nla_type(a
), nla_get_u32(a
));
1815 static int inet_netconf_msgsize_devconf(int type
)
1817 int size
= NLMSG_ALIGN(sizeof(struct netconfmsg
))
1818 + nla_total_size(4); /* NETCONFA_IFINDEX */
1821 if (type
== NETCONFA_ALL
)
1824 if (all
|| type
== NETCONFA_FORWARDING
)
1825 size
+= nla_total_size(4);
1826 if (all
|| type
== NETCONFA_RP_FILTER
)
1827 size
+= nla_total_size(4);
1828 if (all
|| type
== NETCONFA_MC_FORWARDING
)
1829 size
+= nla_total_size(4);
1830 if (all
|| type
== NETCONFA_PROXY_NEIGH
)
1831 size
+= nla_total_size(4);
1832 if (all
|| type
== NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
)
1833 size
+= nla_total_size(4);
1838 static int inet_netconf_fill_devconf(struct sk_buff
*skb
, int ifindex
,
1839 struct ipv4_devconf
*devconf
, u32 portid
,
1840 u32 seq
, int event
, unsigned int flags
,
1843 struct nlmsghdr
*nlh
;
1844 struct netconfmsg
*ncm
;
1847 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(struct netconfmsg
),
1852 if (type
== NETCONFA_ALL
)
1855 ncm
= nlmsg_data(nlh
);
1856 ncm
->ncm_family
= AF_INET
;
1858 if (nla_put_s32(skb
, NETCONFA_IFINDEX
, ifindex
) < 0)
1859 goto nla_put_failure
;
1864 if ((all
|| type
== NETCONFA_FORWARDING
) &&
1865 nla_put_s32(skb
, NETCONFA_FORWARDING
,
1866 IPV4_DEVCONF(*devconf
, FORWARDING
)) < 0)
1867 goto nla_put_failure
;
1868 if ((all
|| type
== NETCONFA_RP_FILTER
) &&
1869 nla_put_s32(skb
, NETCONFA_RP_FILTER
,
1870 IPV4_DEVCONF(*devconf
, RP_FILTER
)) < 0)
1871 goto nla_put_failure
;
1872 if ((all
|| type
== NETCONFA_MC_FORWARDING
) &&
1873 nla_put_s32(skb
, NETCONFA_MC_FORWARDING
,
1874 IPV4_DEVCONF(*devconf
, MC_FORWARDING
)) < 0)
1875 goto nla_put_failure
;
1876 if ((all
|| type
== NETCONFA_PROXY_NEIGH
) &&
1877 nla_put_s32(skb
, NETCONFA_PROXY_NEIGH
,
1878 IPV4_DEVCONF(*devconf
, PROXY_ARP
)) < 0)
1879 goto nla_put_failure
;
1880 if ((all
|| type
== NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
) &&
1881 nla_put_s32(skb
, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
,
1882 IPV4_DEVCONF(*devconf
, IGNORE_ROUTES_WITH_LINKDOWN
)) < 0)
1883 goto nla_put_failure
;
1886 nlmsg_end(skb
, nlh
);
1890 nlmsg_cancel(skb
, nlh
);
1894 void inet_netconf_notify_devconf(struct net
*net
, int event
, int type
,
1895 int ifindex
, struct ipv4_devconf
*devconf
)
1897 struct sk_buff
*skb
;
1900 skb
= nlmsg_new(inet_netconf_msgsize_devconf(type
), GFP_KERNEL
);
1904 err
= inet_netconf_fill_devconf(skb
, ifindex
, devconf
, 0, 0,
1907 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1908 WARN_ON(err
== -EMSGSIZE
);
1912 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_NETCONF
, NULL
, GFP_KERNEL
);
1916 rtnl_set_sk_err(net
, RTNLGRP_IPV4_NETCONF
, err
);
1919 static const struct nla_policy devconf_ipv4_policy
[NETCONFA_MAX
+1] = {
1920 [NETCONFA_IFINDEX
] = { .len
= sizeof(int) },
1921 [NETCONFA_FORWARDING
] = { .len
= sizeof(int) },
1922 [NETCONFA_RP_FILTER
] = { .len
= sizeof(int) },
1923 [NETCONFA_PROXY_NEIGH
] = { .len
= sizeof(int) },
1924 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
] = { .len
= sizeof(int) },
1927 static int inet_netconf_get_devconf(struct sk_buff
*in_skb
,
1928 struct nlmsghdr
*nlh
,
1929 struct netlink_ext_ack
*extack
)
1931 struct net
*net
= sock_net(in_skb
->sk
);
1932 struct nlattr
*tb
[NETCONFA_MAX
+1];
1933 struct netconfmsg
*ncm
;
1934 struct sk_buff
*skb
;
1935 struct ipv4_devconf
*devconf
;
1936 struct in_device
*in_dev
;
1937 struct net_device
*dev
;
1941 err
= nlmsg_parse(nlh
, sizeof(*ncm
), tb
, NETCONFA_MAX
,
1942 devconf_ipv4_policy
, extack
);
1947 if (!tb
[NETCONFA_IFINDEX
])
1950 ifindex
= nla_get_s32(tb
[NETCONFA_IFINDEX
]);
1952 case NETCONFA_IFINDEX_ALL
:
1953 devconf
= net
->ipv4
.devconf_all
;
1955 case NETCONFA_IFINDEX_DEFAULT
:
1956 devconf
= net
->ipv4
.devconf_dflt
;
1959 dev
= __dev_get_by_index(net
, ifindex
);
1962 in_dev
= __in_dev_get_rtnl(dev
);
1965 devconf
= &in_dev
->cnf
;
1970 skb
= nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL
), GFP_KERNEL
);
1974 err
= inet_netconf_fill_devconf(skb
, ifindex
, devconf
,
1975 NETLINK_CB(in_skb
).portid
,
1976 nlh
->nlmsg_seq
, RTM_NEWNETCONF
, 0,
1979 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1980 WARN_ON(err
== -EMSGSIZE
);
1984 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
1989 static int inet_netconf_dump_devconf(struct sk_buff
*skb
,
1990 struct netlink_callback
*cb
)
1992 struct net
*net
= sock_net(skb
->sk
);
1995 struct net_device
*dev
;
1996 struct in_device
*in_dev
;
1997 struct hlist_head
*head
;
2000 s_idx
= idx
= cb
->args
[1];
2002 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
2004 head
= &net
->dev_index_head
[h
];
2006 cb
->seq
= atomic_read(&net
->ipv4
.dev_addr_genid
) ^
2008 hlist_for_each_entry_rcu(dev
, head
, index_hlist
) {
2011 in_dev
= __in_dev_get_rcu(dev
);
2015 if (inet_netconf_fill_devconf(skb
, dev
->ifindex
,
2017 NETLINK_CB(cb
->skb
).portid
,
2021 NETCONFA_ALL
) < 0) {
2025 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
2031 if (h
== NETDEV_HASHENTRIES
) {
2032 if (inet_netconf_fill_devconf(skb
, NETCONFA_IFINDEX_ALL
,
2033 net
->ipv4
.devconf_all
,
2034 NETLINK_CB(cb
->skb
).portid
,
2036 RTM_NEWNETCONF
, NLM_F_MULTI
,
2042 if (h
== NETDEV_HASHENTRIES
+ 1) {
2043 if (inet_netconf_fill_devconf(skb
, NETCONFA_IFINDEX_DEFAULT
,
2044 net
->ipv4
.devconf_dflt
,
2045 NETLINK_CB(cb
->skb
).portid
,
2047 RTM_NEWNETCONF
, NLM_F_MULTI
,
2060 #ifdef CONFIG_SYSCTL
2062 static void devinet_copy_dflt_conf(struct net
*net
, int i
)
2064 struct net_device
*dev
;
2067 for_each_netdev_rcu(net
, dev
) {
2068 struct in_device
*in_dev
;
2070 in_dev
= __in_dev_get_rcu(dev
);
2071 if (in_dev
&& !test_bit(i
, in_dev
->cnf
.state
))
2072 in_dev
->cnf
.data
[i
] = net
->ipv4
.devconf_dflt
->data
[i
];
2077 /* called with RTNL locked */
2078 static void inet_forward_change(struct net
*net
)
2080 struct net_device
*dev
;
2081 int on
= IPV4_DEVCONF_ALL(net
, FORWARDING
);
2083 IPV4_DEVCONF_ALL(net
, ACCEPT_REDIRECTS
) = !on
;
2084 IPV4_DEVCONF_DFLT(net
, FORWARDING
) = on
;
2085 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2086 NETCONFA_FORWARDING
,
2087 NETCONFA_IFINDEX_ALL
,
2088 net
->ipv4
.devconf_all
);
2089 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2090 NETCONFA_FORWARDING
,
2091 NETCONFA_IFINDEX_DEFAULT
,
2092 net
->ipv4
.devconf_dflt
);
2094 for_each_netdev(net
, dev
) {
2095 struct in_device
*in_dev
;
2098 dev_disable_lro(dev
);
2100 in_dev
= __in_dev_get_rtnl(dev
);
2102 IN_DEV_CONF_SET(in_dev
, FORWARDING
, on
);
2103 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2104 NETCONFA_FORWARDING
,
2105 dev
->ifindex
, &in_dev
->cnf
);
2110 static int devinet_conf_ifindex(struct net
*net
, struct ipv4_devconf
*cnf
)
2112 if (cnf
== net
->ipv4
.devconf_dflt
)
2113 return NETCONFA_IFINDEX_DEFAULT
;
2114 else if (cnf
== net
->ipv4
.devconf_all
)
2115 return NETCONFA_IFINDEX_ALL
;
2117 struct in_device
*idev
2118 = container_of(cnf
, struct in_device
, cnf
);
2119 return idev
->dev
->ifindex
;
2123 static int devinet_conf_proc(struct ctl_table
*ctl
, int write
,
2124 void __user
*buffer
,
2125 size_t *lenp
, loff_t
*ppos
)
2127 int old_value
= *(int *)ctl
->data
;
2128 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2129 int new_value
= *(int *)ctl
->data
;
2132 struct ipv4_devconf
*cnf
= ctl
->extra1
;
2133 struct net
*net
= ctl
->extra2
;
2134 int i
= (int *)ctl
->data
- cnf
->data
;
2137 set_bit(i
, cnf
->state
);
2139 if (cnf
== net
->ipv4
.devconf_dflt
)
2140 devinet_copy_dflt_conf(net
, i
);
2141 if (i
== IPV4_DEVCONF_ACCEPT_LOCAL
- 1 ||
2142 i
== IPV4_DEVCONF_ROUTE_LOCALNET
- 1)
2143 if ((new_value
== 0) && (old_value
!= 0))
2144 rt_cache_flush(net
);
2146 if (i
== IPV4_DEVCONF_RP_FILTER
- 1 &&
2147 new_value
!= old_value
) {
2148 ifindex
= devinet_conf_ifindex(net
, cnf
);
2149 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2153 if (i
== IPV4_DEVCONF_PROXY_ARP
- 1 &&
2154 new_value
!= old_value
) {
2155 ifindex
= devinet_conf_ifindex(net
, cnf
);
2156 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2157 NETCONFA_PROXY_NEIGH
,
2160 if (i
== IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN
- 1 &&
2161 new_value
!= old_value
) {
2162 ifindex
= devinet_conf_ifindex(net
, cnf
);
2163 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2164 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN
,
2172 static int devinet_sysctl_forward(struct ctl_table
*ctl
, int write
,
2173 void __user
*buffer
,
2174 size_t *lenp
, loff_t
*ppos
)
2176 int *valp
= ctl
->data
;
2179 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2181 if (write
&& *valp
!= val
) {
2182 struct net
*net
= ctl
->extra2
;
2184 if (valp
!= &IPV4_DEVCONF_DFLT(net
, FORWARDING
)) {
2185 if (!rtnl_trylock()) {
2186 /* Restore the original values before restarting */
2189 return restart_syscall();
2191 if (valp
== &IPV4_DEVCONF_ALL(net
, FORWARDING
)) {
2192 inet_forward_change(net
);
2194 struct ipv4_devconf
*cnf
= ctl
->extra1
;
2195 struct in_device
*idev
=
2196 container_of(cnf
, struct in_device
, cnf
);
2198 dev_disable_lro(idev
->dev
);
2199 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2200 NETCONFA_FORWARDING
,
2205 rt_cache_flush(net
);
2207 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
2208 NETCONFA_FORWARDING
,
2209 NETCONFA_IFINDEX_DEFAULT
,
2210 net
->ipv4
.devconf_dflt
);
2216 static int ipv4_doint_and_flush(struct ctl_table
*ctl
, int write
,
2217 void __user
*buffer
,
2218 size_t *lenp
, loff_t
*ppos
)
2220 int *valp
= ctl
->data
;
2222 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2223 struct net
*net
= ctl
->extra2
;
2225 if (write
&& *valp
!= val
)
2226 rt_cache_flush(net
);
2231 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2234 .data = ipv4_devconf.data + \
2235 IPV4_DEVCONF_ ## attr - 1, \
2236 .maxlen = sizeof(int), \
2238 .proc_handler = proc, \
2239 .extra1 = &ipv4_devconf, \
2242 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2243 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2245 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2246 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2248 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2249 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2251 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2252 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2254 static struct devinet_sysctl_table
{
2255 struct ctl_table_header
*sysctl_header
;
2256 struct ctl_table devinet_vars
[__IPV4_DEVCONF_MAX
];
2257 } devinet_sysctl
= {
2259 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING
, "forwarding",
2260 devinet_sysctl_forward
),
2261 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING
, "mc_forwarding"),
2263 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS
, "accept_redirects"),
2264 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS
, "secure_redirects"),
2265 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA
, "shared_media"),
2266 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER
, "rp_filter"),
2267 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS
, "send_redirects"),
2268 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE
,
2269 "accept_source_route"),
2270 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL
, "accept_local"),
2271 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK
, "src_valid_mark"),
2272 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP
, "proxy_arp"),
2273 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID
, "medium_id"),
2274 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY
, "bootp_relay"),
2275 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS
, "log_martians"),
2276 DEVINET_SYSCTL_RW_ENTRY(TAG
, "tag"),
2277 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER
, "arp_filter"),
2278 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE
, "arp_announce"),
2279 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE
, "arp_ignore"),
2280 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT
, "arp_accept"),
2281 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY
, "arp_notify"),
2282 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN
, "proxy_arp_pvlan"),
2283 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION
,
2284 "force_igmp_version"),
2285 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL
,
2286 "igmpv2_unsolicited_report_interval"),
2287 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL
,
2288 "igmpv3_unsolicited_report_interval"),
2289 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN
,
2290 "ignore_routes_with_linkdown"),
2291 DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP
,
2292 "drop_gratuitous_arp"),
2294 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM
, "disable_xfrm"),
2295 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY
, "disable_policy"),
2296 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES
,
2297 "promote_secondaries"),
2298 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET
,
2300 DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST
,
2301 "drop_unicast_in_l2_multicast"),
2305 static int __devinet_sysctl_register(struct net
*net
, char *dev_name
,
2306 int ifindex
, struct ipv4_devconf
*p
)
2309 struct devinet_sysctl_table
*t
;
2310 char path
[sizeof("net/ipv4/conf/") + IFNAMSIZ
];
2312 t
= kmemdup(&devinet_sysctl
, sizeof(*t
), GFP_KERNEL
);
2316 for (i
= 0; i
< ARRAY_SIZE(t
->devinet_vars
) - 1; i
++) {
2317 t
->devinet_vars
[i
].data
+= (char *)p
- (char *)&ipv4_devconf
;
2318 t
->devinet_vars
[i
].extra1
= p
;
2319 t
->devinet_vars
[i
].extra2
= net
;
2322 snprintf(path
, sizeof(path
), "net/ipv4/conf/%s", dev_name
);
2324 t
->sysctl_header
= register_net_sysctl(net
, path
, t
->devinet_vars
);
2325 if (!t
->sysctl_header
)
2330 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
, NETCONFA_ALL
,
2340 static void __devinet_sysctl_unregister(struct net
*net
,
2341 struct ipv4_devconf
*cnf
, int ifindex
)
2343 struct devinet_sysctl_table
*t
= cnf
->sysctl
;
2347 unregister_net_sysctl_table(t
->sysctl_header
);
2351 inet_netconf_notify_devconf(net
, RTM_DELNETCONF
, 0, ifindex
, NULL
);
2354 static int devinet_sysctl_register(struct in_device
*idev
)
2358 if (!sysctl_dev_name_is_allowed(idev
->dev
->name
))
2361 err
= neigh_sysctl_register(idev
->dev
, idev
->arp_parms
, NULL
);
2364 err
= __devinet_sysctl_register(dev_net(idev
->dev
), idev
->dev
->name
,
2365 idev
->dev
->ifindex
, &idev
->cnf
);
2367 neigh_sysctl_unregister(idev
->arp_parms
);
2371 static void devinet_sysctl_unregister(struct in_device
*idev
)
2373 struct net
*net
= dev_net(idev
->dev
);
2375 __devinet_sysctl_unregister(net
, &idev
->cnf
, idev
->dev
->ifindex
);
2376 neigh_sysctl_unregister(idev
->arp_parms
);
2379 static struct ctl_table ctl_forward_entry
[] = {
2381 .procname
= "ip_forward",
2382 .data
= &ipv4_devconf
.data
[
2383 IPV4_DEVCONF_FORWARDING
- 1],
2384 .maxlen
= sizeof(int),
2386 .proc_handler
= devinet_sysctl_forward
,
2387 .extra1
= &ipv4_devconf
,
2388 .extra2
= &init_net
,
2394 static __net_init
int devinet_init_net(struct net
*net
)
2397 struct ipv4_devconf
*all
, *dflt
;
2398 #ifdef CONFIG_SYSCTL
2399 struct ctl_table
*tbl
= ctl_forward_entry
;
2400 struct ctl_table_header
*forw_hdr
;
2404 all
= &ipv4_devconf
;
2405 dflt
= &ipv4_devconf_dflt
;
2407 if (!net_eq(net
, &init_net
)) {
2408 all
= kmemdup(all
, sizeof(ipv4_devconf
), GFP_KERNEL
);
2412 dflt
= kmemdup(dflt
, sizeof(ipv4_devconf_dflt
), GFP_KERNEL
);
2414 goto err_alloc_dflt
;
2416 #ifdef CONFIG_SYSCTL
2417 tbl
= kmemdup(tbl
, sizeof(ctl_forward_entry
), GFP_KERNEL
);
2421 tbl
[0].data
= &all
->data
[IPV4_DEVCONF_FORWARDING
- 1];
2422 tbl
[0].extra1
= all
;
2423 tbl
[0].extra2
= net
;
2427 #ifdef CONFIG_SYSCTL
2428 err
= __devinet_sysctl_register(net
, "all", NETCONFA_IFINDEX_ALL
, all
);
2432 err
= __devinet_sysctl_register(net
, "default",
2433 NETCONFA_IFINDEX_DEFAULT
, dflt
);
2438 forw_hdr
= register_net_sysctl(net
, "net/ipv4", tbl
);
2441 net
->ipv4
.forw_hdr
= forw_hdr
;
2444 net
->ipv4
.devconf_all
= all
;
2445 net
->ipv4
.devconf_dflt
= dflt
;
2448 #ifdef CONFIG_SYSCTL
2450 __devinet_sysctl_unregister(net
, dflt
, NETCONFA_IFINDEX_DEFAULT
);
2452 __devinet_sysctl_unregister(net
, all
, NETCONFA_IFINDEX_ALL
);
2454 if (tbl
!= ctl_forward_entry
)
2458 if (dflt
!= &ipv4_devconf_dflt
)
2461 if (all
!= &ipv4_devconf
)
2467 static __net_exit
void devinet_exit_net(struct net
*net
)
2469 #ifdef CONFIG_SYSCTL
2470 struct ctl_table
*tbl
;
2472 tbl
= net
->ipv4
.forw_hdr
->ctl_table_arg
;
2473 unregister_net_sysctl_table(net
->ipv4
.forw_hdr
);
2474 __devinet_sysctl_unregister(net
, net
->ipv4
.devconf_dflt
,
2475 NETCONFA_IFINDEX_DEFAULT
);
2476 __devinet_sysctl_unregister(net
, net
->ipv4
.devconf_all
,
2477 NETCONFA_IFINDEX_ALL
);
2480 kfree(net
->ipv4
.devconf_dflt
);
2481 kfree(net
->ipv4
.devconf_all
);
2484 static __net_initdata
struct pernet_operations devinet_ops
= {
2485 .init
= devinet_init_net
,
2486 .exit
= devinet_exit_net
,
2489 static struct rtnl_af_ops inet_af_ops __read_mostly
= {
2491 .fill_link_af
= inet_fill_link_af
,
2492 .get_link_af_size
= inet_get_link_af_size
,
2493 .validate_link_af
= inet_validate_link_af
,
2494 .set_link_af
= inet_set_link_af
,
2497 void __init
devinet_init(void)
2501 for (i
= 0; i
< IN4_ADDR_HSIZE
; i
++)
2502 INIT_HLIST_HEAD(&inet_addr_lst
[i
]);
2504 register_pernet_subsys(&devinet_ops
);
2506 register_gifconf(PF_INET
, inet_gifconf
);
2507 register_netdevice_notifier(&ip_netdev_notifier
);
2509 queue_delayed_work(system_power_efficient_wq
, &check_lifetime_work
, 0);
2511 rtnl_af_register(&inet_af_ops
);
2513 rtnl_register(PF_INET
, RTM_NEWADDR
, inet_rtm_newaddr
, NULL
, 0);
2514 rtnl_register(PF_INET
, RTM_DELADDR
, inet_rtm_deladdr
, NULL
, 0);
2515 rtnl_register(PF_INET
, RTM_GETADDR
, NULL
, inet_dump_ifaddr
, 0);
2516 rtnl_register(PF_INET
, RTM_GETNETCONF
, inet_netconf_get_devconf
,
2517 inet_netconf_dump_devconf
, 0);